]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_SLUB_DEF_H | |
2 | #define _LINUX_SLUB_DEF_H | |
3 | ||
4 | /* | |
5 | * SLUB : A Slab allocator without object queues. | |
6 | * | |
7 | * (C) 2007 SGI, Christoph Lameter | |
8 | */ | |
9 | #include <linux/types.h> | |
10 | #include <linux/gfp.h> | |
11 | #include <linux/workqueue.h> | |
12 | #include <linux/kobject.h> | |
13 | #include <linux/kmemleak.h> | |
14 | ||
15 | #include <trace/events/kmem.h> | |
16 | ||
17 | enum stat_item { | |
18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | |
19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | |
20 | FREE_FASTPATH, /* Free to cpu slub */ | |
21 | FREE_SLOWPATH, /* Freeing not to cpu slab */ | |
22 | FREE_FROZEN, /* Freeing to frozen slab */ | |
23 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | |
24 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ | |
25 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ | |
26 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ | |
27 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | |
28 | FREE_SLAB, /* Slab freed to the page allocator */ | |
29 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | |
30 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | |
31 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ | |
32 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | |
33 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | |
34 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | |
35 | ORDER_FALLBACK, /* Number of times fallback was necessary */ | |
36 | NR_SLUB_STAT_ITEMS }; | |
37 | ||
38 | struct kmem_cache_cpu { | |
39 | void **freelist; /* Pointer to first free per cpu object */ | |
40 | struct page *page; /* The slab from which we are allocating */ | |
41 | int node; /* The node of the page (or -1 for debug) */ | |
42 | #ifdef CONFIG_SLUB_STATS | |
43 | unsigned stat[NR_SLUB_STAT_ITEMS]; | |
44 | #endif | |
45 | }; | |
46 | ||
47 | struct kmem_cache_node { | |
48 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | |
49 | unsigned long nr_partial; | |
50 | struct list_head partial; | |
51 | #ifdef CONFIG_SLUB_DEBUG | |
52 | atomic_long_t nr_slabs; | |
53 | atomic_long_t total_objects; | |
54 | struct list_head full; | |
55 | #endif | |
56 | }; | |
57 | ||
58 | /* | |
59 | * Word size structure that can be atomically updated or read and that | |
60 | * contains both the order and the number of objects that a slab of the | |
61 | * given order would contain. | |
62 | */ | |
63 | struct kmem_cache_order_objects { | |
64 | unsigned long x; | |
65 | }; | |
66 | ||
67 | /* | |
68 | * Slab cache management. | |
69 | */ | |
70 | struct kmem_cache { | |
71 | struct kmem_cache_cpu *cpu_slab; | |
72 | /* Used for retriving partial slabs etc */ | |
73 | unsigned long flags; | |
74 | int size; /* The size of an object including meta data */ | |
75 | int objsize; /* The size of an object without meta data */ | |
76 | int offset; /* Free pointer offset. */ | |
77 | struct kmem_cache_order_objects oo; | |
78 | ||
79 | /* Allocation and freeing of slabs */ | |
80 | struct kmem_cache_order_objects max; | |
81 | struct kmem_cache_order_objects min; | |
82 | gfp_t allocflags; /* gfp flags to use on each alloc */ | |
83 | int refcount; /* Refcount for slab cache destroy */ | |
84 | void (*ctor)(void *); | |
85 | int inuse; /* Offset to metadata */ | |
86 | int align; /* Alignment */ | |
87 | unsigned long min_partial; | |
88 | const char *name; /* Name (only for display!) */ | |
89 | struct list_head list; /* List of slab caches */ | |
90 | #ifdef CONFIG_SLUB_DEBUG | |
91 | struct kobject kobj; /* For sysfs */ | |
92 | #endif | |
93 | ||
94 | #ifdef CONFIG_NUMA | |
95 | /* | |
96 | * Defragmentation by allocating from a remote node. | |
97 | */ | |
98 | int remote_node_defrag_ratio; | |
99 | struct kmem_cache_node *node[MAX_NUMNODES]; | |
100 | #else | |
101 | /* Avoid an extra cache line for UP */ | |
102 | struct kmem_cache_node local_node; | |
103 | #endif | |
104 | }; | |
105 | ||
106 | /* | |
107 | * Kmalloc subsystem. | |
108 | */ | |
109 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 | |
110 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | |
111 | #else | |
112 | #define KMALLOC_MIN_SIZE 8 | |
113 | #endif | |
114 | ||
115 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | |
116 | ||
117 | #ifndef ARCH_KMALLOC_MINALIGN | |
118 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | |
119 | #endif | |
120 | ||
121 | #ifndef ARCH_SLAB_MINALIGN | |
122 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | |
123 | #endif | |
124 | ||
125 | /* | |
126 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | |
127 | * are passed through to the page allocator. The page allocator "fastpath" | |
128 | * is relatively slow so we need this value sufficiently high so that | |
129 | * performance critical objects are allocated through the SLUB fastpath. | |
130 | * | |
131 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | |
132 | * "fastpath" becomes competitive with the slab allocator fastpaths. | |
133 | */ | |
134 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) | |
135 | ||
136 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) | |
137 | ||
138 | #ifdef CONFIG_ZONE_DMA | |
139 | #define SLUB_DMA __GFP_DMA | |
140 | /* Reserve extra caches for potential DMA use */ | |
141 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) | |
142 | #else | |
143 | /* Disable DMA functionality */ | |
144 | #define SLUB_DMA (__force gfp_t)0 | |
145 | #define KMALLOC_CACHES SLUB_PAGE_SHIFT | |
146 | #endif | |
147 | ||
148 | /* | |
149 | * We keep the general caches in an array of slab caches that are used for | |
150 | * 2^x bytes of allocations. | |
151 | */ | |
152 | extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; | |
153 | ||
154 | /* | |
155 | * Sorry that the following has to be that ugly but some versions of GCC | |
156 | * have trouble with constant propagation and loops. | |
157 | */ | |
158 | static __always_inline int kmalloc_index(size_t size) | |
159 | { | |
160 | if (!size) | |
161 | return 0; | |
162 | ||
163 | if (size <= KMALLOC_MIN_SIZE) | |
164 | return KMALLOC_SHIFT_LOW; | |
165 | ||
166 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | |
167 | return 1; | |
168 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | |
169 | return 2; | |
170 | if (size <= 8) return 3; | |
171 | if (size <= 16) return 4; | |
172 | if (size <= 32) return 5; | |
173 | if (size <= 64) return 6; | |
174 | if (size <= 128) return 7; | |
175 | if (size <= 256) return 8; | |
176 | if (size <= 512) return 9; | |
177 | if (size <= 1024) return 10; | |
178 | if (size <= 2 * 1024) return 11; | |
179 | if (size <= 4 * 1024) return 12; | |
180 | /* | |
181 | * The following is only needed to support architectures with a larger page | |
182 | * size than 4k. | |
183 | */ | |
184 | if (size <= 8 * 1024) return 13; | |
185 | if (size <= 16 * 1024) return 14; | |
186 | if (size <= 32 * 1024) return 15; | |
187 | if (size <= 64 * 1024) return 16; | |
188 | if (size <= 128 * 1024) return 17; | |
189 | if (size <= 256 * 1024) return 18; | |
190 | if (size <= 512 * 1024) return 19; | |
191 | if (size <= 1024 * 1024) return 20; | |
192 | if (size <= 2 * 1024 * 1024) return 21; | |
193 | return -1; | |
194 | ||
195 | /* | |
196 | * What we really wanted to do and cannot do because of compiler issues is: | |
197 | * int i; | |
198 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | |
199 | * if (size <= (1 << i)) | |
200 | * return i; | |
201 | */ | |
202 | } | |
203 | ||
204 | /* | |
205 | * Find the slab cache for a given combination of allocation flags and size. | |
206 | * | |
207 | * This ought to end up with a global pointer to the right cache | |
208 | * in kmalloc_caches. | |
209 | */ | |
210 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |
211 | { | |
212 | int index = kmalloc_index(size); | |
213 | ||
214 | if (index == 0) | |
215 | return NULL; | |
216 | ||
217 | return &kmalloc_caches[index]; | |
218 | } | |
219 | ||
220 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |
221 | void *__kmalloc(size_t size, gfp_t flags); | |
222 | ||
223 | #ifdef CONFIG_TRACING | |
224 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | |
225 | #else | |
226 | static __always_inline void * | |
227 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | |
228 | { | |
229 | return kmem_cache_alloc(s, gfpflags); | |
230 | } | |
231 | #endif | |
232 | ||
233 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |
234 | { | |
235 | unsigned int order = get_order(size); | |
236 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | |
237 | ||
238 | kmemleak_alloc(ret, size, 1, flags); | |
239 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | |
240 | ||
241 | return ret; | |
242 | } | |
243 | ||
244 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
245 | { | |
246 | void *ret; | |
247 | ||
248 | if (__builtin_constant_p(size)) { | |
249 | if (size > SLUB_MAX_SIZE) | |
250 | return kmalloc_large(size, flags); | |
251 | ||
252 | if (!(flags & SLUB_DMA)) { | |
253 | struct kmem_cache *s = kmalloc_slab(size); | |
254 | ||
255 | if (!s) | |
256 | return ZERO_SIZE_PTR; | |
257 | ||
258 | ret = kmem_cache_alloc_notrace(s, flags); | |
259 | ||
260 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | } | |
265 | return __kmalloc(size, flags); | |
266 | } | |
267 | ||
268 | #ifdef CONFIG_NUMA | |
269 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
270 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
271 | ||
272 | #ifdef CONFIG_TRACING | |
273 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | |
274 | gfp_t gfpflags, | |
275 | int node); | |
276 | #else | |
277 | static __always_inline void * | |
278 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | |
279 | gfp_t gfpflags, | |
280 | int node) | |
281 | { | |
282 | return kmem_cache_alloc_node(s, gfpflags, node); | |
283 | } | |
284 | #endif | |
285 | ||
286 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
287 | { | |
288 | void *ret; | |
289 | ||
290 | if (__builtin_constant_p(size) && | |
291 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | |
292 | struct kmem_cache *s = kmalloc_slab(size); | |
293 | ||
294 | if (!s) | |
295 | return ZERO_SIZE_PTR; | |
296 | ||
297 | ret = kmem_cache_alloc_node_notrace(s, flags, node); | |
298 | ||
299 | trace_kmalloc_node(_THIS_IP_, ret, | |
300 | size, s->size, flags, node); | |
301 | ||
302 | return ret; | |
303 | } | |
304 | return __kmalloc_node(size, flags, node); | |
305 | } | |
306 | #endif | |
307 | ||
308 | #endif /* _LINUX_SLUB_DEF_H */ |