]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). | |
3 | * | |
4 | * (C) SGI 2006, Christoph Lameter | |
5 | * Cleaned up and restructured to ease the addition of alternative | |
6 | * implementations of SLAB allocators. | |
7 | * (C) Linux Foundation 2008-2013 | |
8 | * Unified interface for all slab allocators | |
9 | */ | |
10 | ||
11 | #ifndef _LINUX_SLAB_H | |
12 | #define _LINUX_SLAB_H | |
13 | ||
14 | #include <linux/gfp.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/workqueue.h> | |
17 | ||
18 | ||
19 | /* | |
20 | * Flags to pass to kmem_cache_create(). | |
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | |
22 | */ | |
23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ | |
24 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ | |
25 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | |
26 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | |
27 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ | |
28 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ | |
29 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ | |
30 | /* | |
31 | * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! | |
32 | * | |
33 | * This delays freeing the SLAB page by a grace period, it does _NOT_ | |
34 | * delay object freeing. This means that if you do kmem_cache_free() | |
35 | * that memory location is free to be reused at any time. Thus it may | |
36 | * be possible to see another object there in the same RCU grace period. | |
37 | * | |
38 | * This feature only ensures the memory location backing the object | |
39 | * stays valid, the trick to using this is relying on an independent | |
40 | * object validation pass. Something like: | |
41 | * | |
42 | * rcu_read_lock() | |
43 | * again: | |
44 | * obj = lockless_lookup(key); | |
45 | * if (obj) { | |
46 | * if (!try_get_ref(obj)) // might fail for free objects | |
47 | * goto again; | |
48 | * | |
49 | * if (obj->key != key) { // not the object we expected | |
50 | * put_ref(obj); | |
51 | * goto again; | |
52 | * } | |
53 | * } | |
54 | * rcu_read_unlock(); | |
55 | * | |
56 | * This is useful if we need to approach a kernel structure obliquely, | |
57 | * from its address obtained without the usual locking. We can lock | |
58 | * the structure to stabilize it and check it's still at the given address, | |
59 | * only if we can be sure that the memory has not been meanwhile reused | |
60 | * for some other kind of object (which our subsystem's lock might corrupt). | |
61 | * | |
62 | * rcu_read_lock before reading the address, then rcu_read_unlock after | |
63 | * taking the spinlock within the structure expected at that address. | |
64 | */ | |
65 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ | |
66 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | |
67 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ | |
68 | ||
69 | /* Flag to prevent checks on free */ | |
70 | #ifdef CONFIG_DEBUG_OBJECTS | |
71 | # define SLAB_DEBUG_OBJECTS 0x00400000UL | |
72 | #else | |
73 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | |
74 | #endif | |
75 | ||
76 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | |
77 | ||
78 | /* Don't track use of uninitialized memory */ | |
79 | #ifdef CONFIG_KMEMCHECK | |
80 | # define SLAB_NOTRACK 0x01000000UL | |
81 | #else | |
82 | # define SLAB_NOTRACK 0x00000000UL | |
83 | #endif | |
84 | #ifdef CONFIG_FAILSLAB | |
85 | # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ | |
86 | #else | |
87 | # define SLAB_FAILSLAB 0x00000000UL | |
88 | #endif | |
89 | ||
90 | /* The following flags affect the page allocator grouping pages by mobility */ | |
91 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | |
92 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | |
93 | /* | |
94 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | |
95 | * | |
96 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | |
97 | * | |
98 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | |
99 | * Both make kfree a no-op. | |
100 | */ | |
101 | #define ZERO_SIZE_PTR ((void *)16) | |
102 | ||
103 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | |
104 | (unsigned long)ZERO_SIZE_PTR) | |
105 | ||
106 | #include <linux/kmemleak.h> | |
107 | ||
108 | struct mem_cgroup; | |
109 | /* | |
110 | * struct kmem_cache related prototypes | |
111 | */ | |
112 | void __init kmem_cache_init(void); | |
113 | int slab_is_available(void); | |
114 | ||
115 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |
116 | unsigned long, | |
117 | void (*)(void *)); | |
118 | #ifdef CONFIG_MEMCG_KMEM | |
119 | struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *, | |
120 | struct kmem_cache *, | |
121 | const char *); | |
122 | #endif | |
123 | void kmem_cache_destroy(struct kmem_cache *); | |
124 | int kmem_cache_shrink(struct kmem_cache *); | |
125 | void kmem_cache_free(struct kmem_cache *, void *); | |
126 | ||
127 | /* | |
128 | * Please use this macro to create slab caches. Simply specify the | |
129 | * name of the structure and maybe some flags that are listed above. | |
130 | * | |
131 | * The alignment of the struct determines object alignment. If you | |
132 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration | |
133 | * then the objects will be properly aligned in SMP configurations. | |
134 | */ | |
135 | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | |
136 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
137 | (__flags), NULL) | |
138 | ||
139 | /* | |
140 | * Common kmalloc functions provided by all allocators | |
141 | */ | |
142 | void * __must_check __krealloc(const void *, size_t, gfp_t); | |
143 | void * __must_check krealloc(const void *, size_t, gfp_t); | |
144 | void kfree(const void *); | |
145 | void kzfree(const void *); | |
146 | size_t ksize(const void *); | |
147 | ||
148 | /* | |
149 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | |
150 | * alignment larger than the alignment of a 64-bit integer. | |
151 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. | |
152 | */ | |
153 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | |
154 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | |
155 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | |
156 | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) | |
157 | #else | |
158 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | |
159 | #endif | |
160 | ||
161 | #ifdef CONFIG_SLOB | |
162 | /* | |
163 | * Common fields provided in kmem_cache by all slab allocators | |
164 | * This struct is either used directly by the allocator (SLOB) | |
165 | * or the allocator must include definitions for all fields | |
166 | * provided in kmem_cache_common in their definition of kmem_cache. | |
167 | * | |
168 | * Once we can do anonymous structs (C11 standard) we could put a | |
169 | * anonymous struct definition in these allocators so that the | |
170 | * separate allocations in the kmem_cache structure of SLAB and | |
171 | * SLUB is no longer needed. | |
172 | */ | |
173 | struct kmem_cache { | |
174 | unsigned int object_size;/* The original size of the object */ | |
175 | unsigned int size; /* The aligned/padded/added on size */ | |
176 | unsigned int align; /* Alignment as calculated */ | |
177 | unsigned long flags; /* Active flags on the slab */ | |
178 | const char *name; /* Slab name for sysfs */ | |
179 | int refcount; /* Use counter */ | |
180 | void (*ctor)(void *); /* Called on object slot creation */ | |
181 | struct list_head list; /* List of all slab caches on the system */ | |
182 | }; | |
183 | ||
184 | #endif /* CONFIG_SLOB */ | |
185 | ||
186 | /* | |
187 | * Kmalloc array related definitions | |
188 | */ | |
189 | ||
190 | #ifdef CONFIG_SLAB | |
191 | /* | |
192 | * The largest kmalloc size supported by the SLAB allocators is | |
193 | * 32 megabyte (2^25) or the maximum allocatable page order if that is | |
194 | * less than 32 MB. | |
195 | * | |
196 | * WARNING: Its not easy to increase this value since the allocators have | |
197 | * to do various tricks to work around compiler limitations in order to | |
198 | * ensure proper constant folding. | |
199 | */ | |
200 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ | |
201 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | |
202 | #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH | |
203 | #ifndef KMALLOC_SHIFT_LOW | |
204 | #define KMALLOC_SHIFT_LOW 5 | |
205 | #endif | |
206 | #endif | |
207 | ||
208 | #ifdef CONFIG_SLUB | |
209 | /* | |
210 | * SLUB directly allocates requests fitting in to an order-1 page | |
211 | * (PAGE_SIZE*2). Larger requests are passed to the page allocator. | |
212 | */ | |
213 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) | |
214 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) | |
215 | #ifndef KMALLOC_SHIFT_LOW | |
216 | #define KMALLOC_SHIFT_LOW 3 | |
217 | #endif | |
218 | #endif | |
219 | ||
220 | #ifdef CONFIG_SLOB | |
221 | /* | |
222 | * SLOB passes all requests larger than one page to the page allocator. | |
223 | * No kmalloc array is necessary since objects of different sizes can | |
224 | * be allocated from the same page. | |
225 | */ | |
226 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT | |
227 | #define KMALLOC_SHIFT_MAX 30 | |
228 | #ifndef KMALLOC_SHIFT_LOW | |
229 | #define KMALLOC_SHIFT_LOW 3 | |
230 | #endif | |
231 | #endif | |
232 | ||
233 | /* Maximum allocatable size */ | |
234 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) | |
235 | /* Maximum size for which we actually use a slab cache */ | |
236 | #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) | |
237 | /* Maximum order allocatable via the slab allocagtor */ | |
238 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) | |
239 | ||
240 | /* | |
241 | * Kmalloc subsystem. | |
242 | */ | |
243 | #ifndef KMALLOC_MIN_SIZE | |
244 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) | |
245 | #endif | |
246 | ||
247 | /* | |
248 | * This restriction comes from byte sized index implementation. | |
249 | * Page size is normally 2^12 bytes and, in this case, if we want to use | |
250 | * byte sized index which can represent 2^8 entries, the size of the object | |
251 | * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. | |
252 | * If minimum size of kmalloc is less than 16, we use it as minimum object | |
253 | * size and give up to use byte sized index. | |
254 | */ | |
255 | #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ | |
256 | (KMALLOC_MIN_SIZE) : 16) | |
257 | ||
258 | #ifndef CONFIG_SLOB | |
259 | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |
260 | #ifdef CONFIG_ZONE_DMA | |
261 | extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | |
262 | #endif | |
263 | ||
264 | /* | |
265 | * Figure out which kmalloc slab an allocation of a certain size | |
266 | * belongs to. | |
267 | * 0 = zero alloc | |
268 | * 1 = 65 .. 96 bytes | |
269 | * 2 = 120 .. 192 bytes | |
270 | * n = 2^(n-1) .. 2^n -1 | |
271 | */ | |
272 | static __always_inline int kmalloc_index(size_t size) | |
273 | { | |
274 | if (!size) | |
275 | return 0; | |
276 | ||
277 | if (size <= KMALLOC_MIN_SIZE) | |
278 | return KMALLOC_SHIFT_LOW; | |
279 | ||
280 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | |
281 | return 1; | |
282 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | |
283 | return 2; | |
284 | if (size <= 8) return 3; | |
285 | if (size <= 16) return 4; | |
286 | if (size <= 32) return 5; | |
287 | if (size <= 64) return 6; | |
288 | if (size <= 128) return 7; | |
289 | if (size <= 256) return 8; | |
290 | if (size <= 512) return 9; | |
291 | if (size <= 1024) return 10; | |
292 | if (size <= 2 * 1024) return 11; | |
293 | if (size <= 4 * 1024) return 12; | |
294 | if (size <= 8 * 1024) return 13; | |
295 | if (size <= 16 * 1024) return 14; | |
296 | if (size <= 32 * 1024) return 15; | |
297 | if (size <= 64 * 1024) return 16; | |
298 | if (size <= 128 * 1024) return 17; | |
299 | if (size <= 256 * 1024) return 18; | |
300 | if (size <= 512 * 1024) return 19; | |
301 | if (size <= 1024 * 1024) return 20; | |
302 | if (size <= 2 * 1024 * 1024) return 21; | |
303 | if (size <= 4 * 1024 * 1024) return 22; | |
304 | if (size <= 8 * 1024 * 1024) return 23; | |
305 | if (size <= 16 * 1024 * 1024) return 24; | |
306 | if (size <= 32 * 1024 * 1024) return 25; | |
307 | if (size <= 64 * 1024 * 1024) return 26; | |
308 | BUG(); | |
309 | ||
310 | /* Will never be reached. Needed because the compiler may complain */ | |
311 | return -1; | |
312 | } | |
313 | #endif /* !CONFIG_SLOB */ | |
314 | ||
315 | void *__kmalloc(size_t size, gfp_t flags); | |
316 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | |
317 | ||
318 | #ifdef CONFIG_NUMA | |
319 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
320 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
321 | #else | |
322 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
323 | { | |
324 | return __kmalloc(size, flags); | |
325 | } | |
326 | ||
327 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | |
328 | { | |
329 | return kmem_cache_alloc(s, flags); | |
330 | } | |
331 | #endif | |
332 | ||
333 | #ifdef CONFIG_TRACING | |
334 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | |
335 | ||
336 | #ifdef CONFIG_NUMA | |
337 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
338 | gfp_t gfpflags, | |
339 | int node, size_t size); | |
340 | #else | |
341 | static __always_inline void * | |
342 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
343 | gfp_t gfpflags, | |
344 | int node, size_t size) | |
345 | { | |
346 | return kmem_cache_alloc_trace(s, gfpflags, size); | |
347 | } | |
348 | #endif /* CONFIG_NUMA */ | |
349 | ||
350 | #else /* CONFIG_TRACING */ | |
351 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | |
352 | gfp_t flags, size_t size) | |
353 | { | |
354 | return kmem_cache_alloc(s, flags); | |
355 | } | |
356 | ||
357 | static __always_inline void * | |
358 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
359 | gfp_t gfpflags, | |
360 | int node, size_t size) | |
361 | { | |
362 | return kmem_cache_alloc_node(s, gfpflags, node); | |
363 | } | |
364 | #endif /* CONFIG_TRACING */ | |
365 | ||
366 | #ifdef CONFIG_SLAB | |
367 | #include <linux/slab_def.h> | |
368 | #endif | |
369 | ||
370 | #ifdef CONFIG_SLUB | |
371 | #include <linux/slub_def.h> | |
372 | #endif | |
373 | ||
374 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | |
375 | ||
376 | #ifdef CONFIG_TRACING | |
377 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | |
378 | #else | |
379 | static __always_inline void * | |
380 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | |
381 | { | |
382 | return kmalloc_order(size, flags, order); | |
383 | } | |
384 | #endif | |
385 | ||
386 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |
387 | { | |
388 | unsigned int order = get_order(size); | |
389 | return kmalloc_order_trace(size, flags, order); | |
390 | } | |
391 | ||
392 | /** | |
393 | * kmalloc - allocate memory | |
394 | * @size: how many bytes of memory are required. | |
395 | * @flags: the type of memory to allocate. | |
396 | * | |
397 | * kmalloc is the normal method of allocating memory | |
398 | * for objects smaller than page size in the kernel. | |
399 | * | |
400 | * The @flags argument may be one of: | |
401 | * | |
402 | * %GFP_USER - Allocate memory on behalf of user. May sleep. | |
403 | * | |
404 | * %GFP_KERNEL - Allocate normal kernel ram. May sleep. | |
405 | * | |
406 | * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. | |
407 | * For example, use this inside interrupt handlers. | |
408 | * | |
409 | * %GFP_HIGHUSER - Allocate pages from high memory. | |
410 | * | |
411 | * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | |
412 | * | |
413 | * %GFP_NOFS - Do not make any fs calls while trying to get memory. | |
414 | * | |
415 | * %GFP_NOWAIT - Allocation will not sleep. | |
416 | * | |
417 | * %__GFP_THISNODE - Allocate node-local memory only. | |
418 | * | |
419 | * %GFP_DMA - Allocation suitable for DMA. | |
420 | * Should only be used for kmalloc() caches. Otherwise, use a | |
421 | * slab created with SLAB_DMA. | |
422 | * | |
423 | * Also it is possible to set different flags by OR'ing | |
424 | * in one or more of the following additional @flags: | |
425 | * | |
426 | * %__GFP_COLD - Request cache-cold pages instead of | |
427 | * trying to return cache-warm pages. | |
428 | * | |
429 | * %__GFP_HIGH - This allocation has high priority and may use emergency pools. | |
430 | * | |
431 | * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail | |
432 | * (think twice before using). | |
433 | * | |
434 | * %__GFP_NORETRY - If memory is not immediately available, | |
435 | * then give up at once. | |
436 | * | |
437 | * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | |
438 | * | |
439 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | |
440 | * | |
441 | * There are other flags available as well, but these are not intended | |
442 | * for general use, and so are not documented here. For a full list of | |
443 | * potential flags, always refer to linux/gfp.h. | |
444 | */ | |
445 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
446 | { | |
447 | if (__builtin_constant_p(size)) { | |
448 | if (size > KMALLOC_MAX_CACHE_SIZE) | |
449 | return kmalloc_large(size, flags); | |
450 | #ifndef CONFIG_SLOB | |
451 | if (!(flags & GFP_DMA)) { | |
452 | int index = kmalloc_index(size); | |
453 | ||
454 | if (!index) | |
455 | return ZERO_SIZE_PTR; | |
456 | ||
457 | return kmem_cache_alloc_trace(kmalloc_caches[index], | |
458 | flags, size); | |
459 | } | |
460 | #endif | |
461 | } | |
462 | return __kmalloc(size, flags); | |
463 | } | |
464 | ||
465 | /* | |
466 | * Determine size used for the nth kmalloc cache. | |
467 | * return size or 0 if a kmalloc cache for that | |
468 | * size does not exist | |
469 | */ | |
470 | static __always_inline int kmalloc_size(int n) | |
471 | { | |
472 | #ifndef CONFIG_SLOB | |
473 | if (n > 2) | |
474 | return 1 << n; | |
475 | ||
476 | if (n == 1 && KMALLOC_MIN_SIZE <= 32) | |
477 | return 96; | |
478 | ||
479 | if (n == 2 && KMALLOC_MIN_SIZE <= 64) | |
480 | return 192; | |
481 | #endif | |
482 | return 0; | |
483 | } | |
484 | ||
485 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
486 | { | |
487 | #ifndef CONFIG_SLOB | |
488 | if (__builtin_constant_p(size) && | |
489 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { | |
490 | int i = kmalloc_index(size); | |
491 | ||
492 | if (!i) | |
493 | return ZERO_SIZE_PTR; | |
494 | ||
495 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | |
496 | flags, node, size); | |
497 | } | |
498 | #endif | |
499 | return __kmalloc_node(size, flags, node); | |
500 | } | |
501 | ||
502 | /* | |
503 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | |
504 | * Intended for arches that get misalignment faults even for 64 bit integer | |
505 | * aligned buffers. | |
506 | */ | |
507 | #ifndef ARCH_SLAB_MINALIGN | |
508 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | |
509 | #endif | |
510 | /* | |
511 | * This is the main placeholder for memcg-related information in kmem caches. | |
512 | * struct kmem_cache will hold a pointer to it, so the memory cost while | |
513 | * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it | |
514 | * would otherwise be if that would be bundled in kmem_cache: we'll need an | |
515 | * extra pointer chase. But the trade off clearly lays in favor of not | |
516 | * penalizing non-users. | |
517 | * | |
518 | * Both the root cache and the child caches will have it. For the root cache, | |
519 | * this will hold a dynamically allocated array large enough to hold | |
520 | * information about the currently limited memcgs in the system. To allow the | |
521 | * array to be accessed without taking any locks, on relocation we free the old | |
522 | * version only after a grace period. | |
523 | * | |
524 | * Child caches will hold extra metadata needed for its operation. Fields are: | |
525 | * | |
526 | * @memcg: pointer to the memcg this cache belongs to | |
527 | * @list: list_head for the list of all caches in this memcg | |
528 | * @root_cache: pointer to the global, root cache, this cache was derived from | |
529 | * @nr_pages: number of pages that belongs to this cache. | |
530 | */ | |
531 | struct memcg_cache_params { | |
532 | bool is_root_cache; | |
533 | union { | |
534 | struct { | |
535 | struct rcu_head rcu_head; | |
536 | struct kmem_cache *memcg_caches[0]; | |
537 | }; | |
538 | struct { | |
539 | struct mem_cgroup *memcg; | |
540 | struct list_head list; | |
541 | struct kmem_cache *root_cache; | |
542 | atomic_t nr_pages; | |
543 | }; | |
544 | }; | |
545 | }; | |
546 | ||
547 | int memcg_update_all_caches(int num_memcgs); | |
548 | ||
549 | struct seq_file; | |
550 | int cache_show(struct kmem_cache *s, struct seq_file *m); | |
551 | void print_slabinfo_header(struct seq_file *m); | |
552 | ||
553 | /** | |
554 | * kmalloc_array - allocate memory for an array. | |
555 | * @n: number of elements. | |
556 | * @size: element size. | |
557 | * @flags: the type of memory to allocate (see kmalloc). | |
558 | */ | |
559 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | |
560 | { | |
561 | if (size != 0 && n > SIZE_MAX / size) | |
562 | return NULL; | |
563 | return __kmalloc(n * size, flags); | |
564 | } | |
565 | ||
566 | /** | |
567 | * kcalloc - allocate memory for an array. The memory is set to zero. | |
568 | * @n: number of elements. | |
569 | * @size: element size. | |
570 | * @flags: the type of memory to allocate (see kmalloc). | |
571 | */ | |
572 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |
573 | { | |
574 | return kmalloc_array(n, size, flags | __GFP_ZERO); | |
575 | } | |
576 | ||
577 | /* | |
578 | * kmalloc_track_caller is a special version of kmalloc that records the | |
579 | * calling function of the routine calling it for slab leak tracking instead | |
580 | * of just the calling function (confusing, eh?). | |
581 | * It's useful when the call to kmalloc comes from a widely-used standard | |
582 | * allocator where we care about the real place the memory allocation | |
583 | * request comes from. | |
584 | */ | |
585 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | |
586 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ | |
587 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
588 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | |
589 | #define kmalloc_track_caller(size, flags) \ | |
590 | __kmalloc_track_caller(size, flags, _RET_IP_) | |
591 | #else | |
592 | #define kmalloc_track_caller(size, flags) \ | |
593 | __kmalloc(size, flags) | |
594 | #endif /* DEBUG_SLAB */ | |
595 | ||
596 | #ifdef CONFIG_NUMA | |
597 | /* | |
598 | * kmalloc_node_track_caller is a special version of kmalloc_node that | |
599 | * records the calling function of the routine calling it for slab leak | |
600 | * tracking instead of just the calling function (confusing, eh?). | |
601 | * It's useful when the call to kmalloc_node comes from a widely-used | |
602 | * standard allocator where we care about the real place the memory | |
603 | * allocation request comes from. | |
604 | */ | |
605 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | |
606 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ | |
607 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
608 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); | |
609 | #define kmalloc_node_track_caller(size, flags, node) \ | |
610 | __kmalloc_node_track_caller(size, flags, node, \ | |
611 | _RET_IP_) | |
612 | #else | |
613 | #define kmalloc_node_track_caller(size, flags, node) \ | |
614 | __kmalloc_node(size, flags, node) | |
615 | #endif | |
616 | ||
617 | #else /* CONFIG_NUMA */ | |
618 | ||
619 | #define kmalloc_node_track_caller(size, flags, node) \ | |
620 | kmalloc_track_caller(size, flags) | |
621 | ||
622 | #endif /* CONFIG_NUMA */ | |
623 | ||
624 | /* | |
625 | * Shortcuts | |
626 | */ | |
627 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | |
628 | { | |
629 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | |
630 | } | |
631 | ||
632 | /** | |
633 | * kzalloc - allocate memory. The memory is set to zero. | |
634 | * @size: how many bytes of memory are required. | |
635 | * @flags: the type of memory to allocate (see kmalloc). | |
636 | */ | |
637 | static inline void *kzalloc(size_t size, gfp_t flags) | |
638 | { | |
639 | return kmalloc(size, flags | __GFP_ZERO); | |
640 | } | |
641 | ||
642 | /** | |
643 | * kzalloc_node - allocate zeroed memory from a particular memory node. | |
644 | * @size: how many bytes of memory are required. | |
645 | * @flags: the type of memory to allocate (see kmalloc). | |
646 | * @node: memory node from which to allocate | |
647 | */ | |
648 | static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |
649 | { | |
650 | return kmalloc_node(size, flags | __GFP_ZERO, node); | |
651 | } | |
652 | ||
653 | /* | |
654 | * Determine the size of a slab object | |
655 | */ | |
656 | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | |
657 | { | |
658 | return s->object_size; | |
659 | } | |
660 | ||
661 | void __init kmem_cache_init_late(void); | |
662 | ||
663 | #endif /* _LINUX_SLAB_H */ |