]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7 | #include <linux/string.h> /* For memset() */ | |
7ff6f082 MP |
8 | #include <linux/cpumask.h> |
9 | ||
1da177e4 LT |
10 | #include <asm/percpu.h> |
11 | ||
5280e004 | 12 | #ifdef CONFIG_SMP |
13 | #define DEFINE_PER_CPU(type, name) \ | |
14 | __attribute__((__section__(".data.percpu"))) \ | |
15 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
16 | ||
17 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
18 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | |
19 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | |
20 | ____cacheline_aligned_in_smp | |
21 | #else | |
22 | #define DEFINE_PER_CPU(type, name) \ | |
23 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
24 | ||
25 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
26 | DEFINE_PER_CPU(type, name) | |
27 | #endif | |
28 | ||
29 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | |
30 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | |
31 | ||
1da177e4 LT |
32 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
33 | #ifndef PERCPU_ENOUGH_ROOM | |
b00742d3 JF |
34 | #ifdef CONFIG_MODULES |
35 | #define PERCPU_MODULE_RESERVE 8192 | |
36 | #else | |
37 | #define PERCPU_MODULE_RESERVE 0 | |
1da177e4 LT |
38 | #endif |
39 | ||
b00742d3 JF |
40 | #define PERCPU_ENOUGH_ROOM \ |
41 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | |
42 | #endif /* PERCPU_ENOUGH_ROOM */ | |
43 | ||
632bbfee JB |
44 | /* |
45 | * Must be an lvalue. Since @var must be a simple identifier, | |
46 | * we force a syntax error here if it isn't. | |
47 | */ | |
48 | #define get_cpu_var(var) (*({ \ | |
a666ecfb | 49 | extern int simple_identifier_##var(void); \ |
632bbfee JB |
50 | preempt_disable(); \ |
51 | &__get_cpu_var(var); })) | |
1da177e4 LT |
52 | #define put_cpu_var(var) preempt_enable() |
53 | ||
54 | #ifdef CONFIG_SMP | |
55 | ||
56 | struct percpu_data { | |
b3242151 | 57 | void *ptrs[1]; |
1da177e4 LT |
58 | }; |
59 | ||
7ff6f082 | 60 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
1da177e4 | 61 | /* |
7ff6f082 MP |
62 | * Use this to get to a cpu's version of the per-cpu object dynamically |
63 | * allocated. Non-atomic access to the current CPU's version should | |
1da177e4 LT |
64 | * probably be combined with get_cpu()/put_cpu(). |
65 | */ | |
7ff6f082 MP |
66 | #define percpu_ptr(ptr, cpu) \ |
67 | ({ \ | |
68 | struct percpu_data *__p = __percpu_disguise(ptr); \ | |
69 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | |
1da177e4 LT |
70 | }) |
71 | ||
7ff6f082 MP |
72 | extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); |
73 | extern void percpu_depopulate(void *__pdata, int cpu); | |
74 | extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
75 | cpumask_t *mask); | |
76 | extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); | |
77 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | |
78 | extern void percpu_free(void *__pdata); | |
1da177e4 LT |
79 | |
80 | #else /* CONFIG_SMP */ | |
81 | ||
7ff6f082 MP |
82 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
83 | ||
84 | static inline void percpu_depopulate(void *__pdata, int cpu) | |
85 | { | |
86 | } | |
87 | ||
88 | static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | |
89 | { | |
90 | } | |
1da177e4 | 91 | |
7ff6f082 MP |
92 | static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, |
93 | int cpu) | |
1da177e4 | 94 | { |
7ff6f082 | 95 | return percpu_ptr(__pdata, cpu); |
1da177e4 | 96 | } |
7ff6f082 MP |
97 | |
98 | static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
99 | cpumask_t *mask) | |
100 | { | |
101 | return 0; | |
102 | } | |
103 | ||
0891a8d7 | 104 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
7ff6f082 MP |
105 | { |
106 | return kzalloc(size, gfp); | |
107 | } | |
108 | ||
109 | static inline void percpu_free(void *__pdata) | |
110 | { | |
111 | kfree(__pdata); | |
1da177e4 LT |
112 | } |
113 | ||
114 | #endif /* CONFIG_SMP */ | |
115 | ||
7ff6f082 MP |
116 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ |
117 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | |
118 | #define percpu_depopulate_mask(__pdata, mask) \ | |
119 | __percpu_depopulate_mask((__pdata), &(mask)) | |
120 | #define percpu_alloc_mask(size, gfp, mask) \ | |
121 | __percpu_alloc_mask((size), (gfp), &(mask)) | |
122 | ||
123 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | |
124 | ||
125 | /* (legacy) interface for use without CPU hotplug handling */ | |
126 | ||
127 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | |
128 | cpu_possible_map) | |
129 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | |
130 | #define free_percpu(ptr) percpu_free((ptr)) | |
131 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | |
1da177e4 LT |
132 | |
133 | #endif /* __LINUX_PERCPU_H */ |