]>
Commit | Line | Data |
---|---|---|
2e892f43 CL |
1 | #ifndef _LINUX_SLAB_DEF_H |
2 | #define _LINUX_SLAB_DEF_H | |
3 | ||
4 | /* | |
5 | * Definitions unique to the original Linux SLAB allocator. | |
6 | * | |
7 | * What we provide here is a way to optimize the frequent kmalloc | |
8 | * calls in the kernel by selecting the appropriate general cache | |
9 | * if kmalloc was called with a size that can be established at | |
10 | * compile time. | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | |
16 | #include <linux/compiler.h> | |
039ca4e7 | 17 | |
8eae985f PE |
18 | /* |
19 | * struct kmem_cache | |
20 | * | |
21 | * manages a cache. | |
22 | */ | |
23 | ||
24 | struct kmem_cache { | |
b56efcf0 | 25 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
8eae985f PE |
26 | unsigned int batchcount; |
27 | unsigned int limit; | |
28 | unsigned int shared; | |
29 | ||
3b0efdfa | 30 | unsigned int size; |
8eae985f | 31 | u32 reciprocal_buffer_size; |
b56efcf0 | 32 | /* 2) touched by every alloc & free from the backend */ |
8eae985f PE |
33 | |
34 | unsigned int flags; /* constant flags */ | |
35 | unsigned int num; /* # of objs per slab */ | |
36 | ||
b56efcf0 | 37 | /* 3) cache_grow/shrink */ |
8eae985f PE |
38 | /* order of pgs per slab (2^n) */ |
39 | unsigned int gfporder; | |
40 | ||
41 | /* force GFP flags, e.g. GFP_DMA */ | |
a618e89f | 42 | gfp_t allocflags; |
8eae985f PE |
43 | |
44 | size_t colour; /* cache colouring range */ | |
45 | unsigned int colour_off; /* colour offset */ | |
46 | struct kmem_cache *slabp_cache; | |
47 | unsigned int slab_size; | |
8eae985f PE |
48 | |
49 | /* constructor func */ | |
50 | void (*ctor)(void *obj); | |
51 | ||
b56efcf0 | 52 | /* 4) cache creation/removal */ |
8eae985f | 53 | const char *name; |
3b0efdfa CL |
54 | struct list_head list; |
55 | int refcount; | |
56 | int object_size; | |
57 | int align; | |
8eae985f | 58 | |
b56efcf0 | 59 | /* 5) statistics */ |
8eae985f PE |
60 | #ifdef CONFIG_DEBUG_SLAB |
61 | unsigned long num_active; | |
62 | unsigned long num_allocations; | |
63 | unsigned long high_mark; | |
64 | unsigned long grown; | |
65 | unsigned long reaped; | |
66 | unsigned long errors; | |
67 | unsigned long max_freeable; | |
68 | unsigned long node_allocs; | |
69 | unsigned long node_frees; | |
70 | unsigned long node_overflow; | |
71 | atomic_t allochit; | |
72 | atomic_t allocmiss; | |
73 | atomic_t freehit; | |
74 | atomic_t freemiss; | |
75 | ||
76 | /* | |
77 | * If debugging is enabled, then the allocator can add additional | |
3b0efdfa | 78 | * fields and/or padding to every object. size contains the total |
8eae985f PE |
79 | * object size including these internal fields, the following two |
80 | * variables contain the offset to the user object and its size. | |
81 | */ | |
82 | int obj_offset; | |
8eae985f PE |
83 | #endif /* CONFIG_DEBUG_SLAB */ |
84 | ||
b56efcf0 | 85 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
8eae985f | 86 | /* |
b56efcf0 ED |
87 | * We put array[] at the end of kmem_cache, because we want to size |
88 | * this array to nr_cpu_ids slots instead of NR_CPUS | |
8eae985f | 89 | * (see kmem_cache_init()) |
b56efcf0 ED |
90 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
91 | * is statically defined, so we reserve the max number of cpus. | |
3c583465 CL |
92 | * |
93 | * We also need to guarantee that the list is able to accomodate a | |
94 | * pointer for each node since "nodelists" uses the remainder of | |
95 | * available pointers. | |
8eae985f | 96 | */ |
b56efcf0 | 97 | struct kmem_list3 **nodelists; |
3c583465 | 98 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
8eae985f | 99 | /* |
b56efcf0 | 100 | * Do not add fields after array[] |
8eae985f PE |
101 | */ |
102 | }; | |
103 | ||
2e892f43 CL |
104 | /* Size description struct for general caches. */ |
105 | struct cache_sizes { | |
106 | size_t cs_size; | |
107 | struct kmem_cache *cs_cachep; | |
4b51d669 | 108 | #ifdef CONFIG_ZONE_DMA |
2e892f43 | 109 | struct kmem_cache *cs_dmacachep; |
4b51d669 | 110 | #endif |
2e892f43 CL |
111 | }; |
112 | extern struct cache_sizes malloc_sizes[]; | |
113 | ||
6193a2ff PM |
114 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
115 | void *__kmalloc(size_t size, gfp_t flags); | |
116 | ||
0f24f128 | 117 | #ifdef CONFIG_TRACING |
4052147c | 118 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
36555751 EGM |
119 | #else |
120 | static __always_inline void * | |
4052147c | 121 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
2e892f43 | 122 | { |
36555751 EGM |
123 | return kmem_cache_alloc(cachep, flags); |
124 | } | |
36555751 EGM |
125 | #endif |
126 | ||
127 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
128 | { | |
129 | struct kmem_cache *cachep; | |
130 | void *ret; | |
131 | ||
2e892f43 CL |
132 | if (__builtin_constant_p(size)) { |
133 | int i = 0; | |
6cb8f913 CL |
134 | |
135 | if (!size) | |
136 | return ZERO_SIZE_PTR; | |
137 | ||
2e892f43 CL |
138 | #define CACHE(x) \ |
139 | if (size <= x) \ | |
140 | goto found; \ | |
141 | else \ | |
142 | i++; | |
1c61fc40 | 143 | #include <linux/kmalloc_sizes.h> |
2e892f43 | 144 | #undef CACHE |
1cf3eb2f | 145 | return NULL; |
2e892f43 | 146 | found: |
4b51d669 CL |
147 | #ifdef CONFIG_ZONE_DMA |
148 | if (flags & GFP_DMA) | |
36555751 EGM |
149 | cachep = malloc_sizes[i].cs_dmacachep; |
150 | else | |
4b51d669 | 151 | #endif |
36555751 EGM |
152 | cachep = malloc_sizes[i].cs_cachep; |
153 | ||
4052147c | 154 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
36555751 EGM |
155 | |
156 | return ret; | |
2e892f43 CL |
157 | } |
158 | return __kmalloc(size, flags); | |
159 | } | |
160 | ||
2e892f43 CL |
161 | #ifdef CONFIG_NUMA |
162 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
6193a2ff | 163 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
2e892f43 | 164 | |
0f24f128 | 165 | #ifdef CONFIG_TRACING |
dffa3f98 | 166 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb586 | 167 | gfp_t flags, |
dffa3f98 EG |
168 | int nodeid, |
169 | size_t size); | |
36555751 EGM |
170 | #else |
171 | static __always_inline void * | |
dffa3f98 | 172 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb586 | 173 | gfp_t flags, |
dffa3f98 EG |
174 | int nodeid, |
175 | size_t size) | |
36555751 EGM |
176 | { |
177 | return kmem_cache_alloc_node(cachep, flags, nodeid); | |
178 | } | |
179 | #endif | |
180 | ||
181 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
2e892f43 | 182 | { |
36555751 | 183 | struct kmem_cache *cachep; |
36555751 | 184 | |
2e892f43 CL |
185 | if (__builtin_constant_p(size)) { |
186 | int i = 0; | |
6cb8f913 CL |
187 | |
188 | if (!size) | |
189 | return ZERO_SIZE_PTR; | |
190 | ||
2e892f43 CL |
191 | #define CACHE(x) \ |
192 | if (size <= x) \ | |
193 | goto found; \ | |
194 | else \ | |
195 | i++; | |
1c61fc40 | 196 | #include <linux/kmalloc_sizes.h> |
2e892f43 | 197 | #undef CACHE |
1cf3eb2f | 198 | return NULL; |
2e892f43 | 199 | found: |
4b51d669 CL |
200 | #ifdef CONFIG_ZONE_DMA |
201 | if (flags & GFP_DMA) | |
36555751 EGM |
202 | cachep = malloc_sizes[i].cs_dmacachep; |
203 | else | |
4b51d669 | 204 | #endif |
36555751 EGM |
205 | cachep = malloc_sizes[i].cs_cachep; |
206 | ||
dffa3f98 | 207 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
2e892f43 CL |
208 | } |
209 | return __kmalloc_node(size, flags, node); | |
210 | } | |
211 | ||
212 | #endif /* CONFIG_NUMA */ | |
213 | ||
214 | #endif /* _LINUX_SLAB_DEF_H */ |