]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/slab.h
6d9bd6fc0c571775b4b7d49cba613f7d6940dd97
[mirror_ubuntu-eoan-kernel.git] / include / linux / slab.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
10 */
11
12 #ifndef _LINUX_SLAB_H
13 #define _LINUX_SLAB_H
14
15 #include <linux/gfp.h>
16 #include <linux/overflow.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19
20
21 /*
22 * Flags to pass to kmem_cache_create().
23 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
24 */
25 /* DEBUG: Perform (expensive) checks on alloc/free */
26 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
27 /* DEBUG: Red zone objs in a cache */
28 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
29 /* DEBUG: Poison objects */
30 #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
31 /* Align objs on cache lines */
32 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33 /* Use GFP_DMA memory */
34 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35 /* DEBUG: Store the last owner for bug hunting */
36 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37 /* Panic if kmem_cache_create() fails */
38 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
39 /*
40 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
41 *
42 * This delays freeing the SLAB page by a grace period, it does _NOT_
43 * delay object freeing. This means that if you do kmem_cache_free()
44 * that memory location is free to be reused at any time. Thus it may
45 * be possible to see another object there in the same RCU grace period.
46 *
47 * This feature only ensures the memory location backing the object
48 * stays valid, the trick to using this is relying on an independent
49 * object validation pass. Something like:
50 *
51 * rcu_read_lock()
52 * again:
53 * obj = lockless_lookup(key);
54 * if (obj) {
55 * if (!try_get_ref(obj)) // might fail for free objects
56 * goto again;
57 *
58 * if (obj->key != key) { // not the object we expected
59 * put_ref(obj);
60 * goto again;
61 * }
62 * }
63 * rcu_read_unlock();
64 *
65 * This is useful if we need to approach a kernel structure obliquely,
66 * from its address obtained without the usual locking. We can lock
67 * the structure to stabilize it and check it's still at the given address,
68 * only if we can be sure that the memory has not been meanwhile reused
69 * for some other kind of object (which our subsystem's lock might corrupt).
70 *
71 * rcu_read_lock before reading the address, then rcu_read_unlock after
72 * taking the spinlock within the structure expected at that address.
73 *
74 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
75 */
76 /* Defer freeing slabs to RCU */
77 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
78 /* Spread some memory over cpuset */
79 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
80 /* Trace allocations and frees */
81 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
82
83 /* Flag to prevent checks on free */
84 #ifdef CONFIG_DEBUG_OBJECTS
85 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
86 #else
87 # define SLAB_DEBUG_OBJECTS 0
88 #endif
89
90 /* Avoid kmemleak tracing */
91 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
92
93 /* Fault injection mark */
94 #ifdef CONFIG_FAILSLAB
95 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
96 #else
97 # define SLAB_FAILSLAB 0
98 #endif
99 /* Account to memcg */
100 #ifdef CONFIG_MEMCG_KMEM
101 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
102 #else
103 # define SLAB_ACCOUNT 0
104 #endif
105
106 #ifdef CONFIG_KASAN
107 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
108 #else
109 #define SLAB_KASAN 0
110 #endif
111
112 /* The following flags affect the page allocator grouping pages by mobility */
113 /* Objects are reclaimable */
114 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
115 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
116 /*
117 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
118 *
119 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
120 *
121 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
122 * Both make kfree a no-op.
123 */
124 #define ZERO_SIZE_PTR ((void *)16)
125
126 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
127 (unsigned long)ZERO_SIZE_PTR)
128
129 #include <linux/kasan.h>
130
131 struct mem_cgroup;
132 /*
133 * struct kmem_cache related prototypes
134 */
135 void __init kmem_cache_init(void);
136 bool slab_is_available(void);
137
138 extern bool usercopy_fallback;
139
140 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
141 unsigned int align, slab_flags_t flags,
142 void (*ctor)(void *));
143 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 unsigned int size, unsigned int align,
145 slab_flags_t flags,
146 unsigned int useroffset, unsigned int usersize,
147 void (*ctor)(void *));
148 void kmem_cache_destroy(struct kmem_cache *);
149 int kmem_cache_shrink(struct kmem_cache *);
150
151 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
152 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
153 void memcg_destroy_kmem_caches(struct mem_cgroup *);
154
155 /*
156 * Please use this macro to create slab caches. Simply specify the
157 * name of the structure and maybe some flags that are listed above.
158 *
159 * The alignment of the struct determines object alignment. If you
160 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
161 * then the objects will be properly aligned in SMP configurations.
162 */
163 #define KMEM_CACHE(__struct, __flags) \
164 kmem_cache_create(#__struct, sizeof(struct __struct), \
165 __alignof__(struct __struct), (__flags), NULL)
166
167 /*
168 * To whitelist a single field for copying to/from usercopy, use this
169 * macro instead for KMEM_CACHE() above.
170 */
171 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
172 kmem_cache_create_usercopy(#__struct, \
173 sizeof(struct __struct), \
174 __alignof__(struct __struct), (__flags), \
175 offsetof(struct __struct, __field), \
176 sizeof_field(struct __struct, __field), NULL)
177
178 /*
179 * Common kmalloc functions provided by all allocators
180 */
181 void * __must_check __krealloc(const void *, size_t, gfp_t);
182 void * __must_check krealloc(const void *, size_t, gfp_t);
183 void kfree(const void *);
184 void kzfree(const void *);
185 size_t ksize(const void *);
186
187 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
188 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
189 bool to_user);
190 #else
191 static inline void __check_heap_object(const void *ptr, unsigned long n,
192 struct page *page, bool to_user) { }
193 #endif
194
195 /*
196 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
197 * alignment larger than the alignment of a 64-bit integer.
198 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
199 */
200 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
201 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
202 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
203 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
204 #else
205 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
206 #endif
207
208 /*
209 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
210 * Intended for arches that get misalignment faults even for 64 bit integer
211 * aligned buffers.
212 */
213 #ifndef ARCH_SLAB_MINALIGN
214 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
215 #endif
216
217 /*
218 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
219 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
220 * aligned pointers.
221 */
222 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
223 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
224 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
225
226 /*
227 * Kmalloc array related definitions
228 */
229
230 #ifdef CONFIG_SLAB
231 /*
232 * The largest kmalloc size supported by the SLAB allocators is
233 * 32 megabyte (2^25) or the maximum allocatable page order if that is
234 * less than 32 MB.
235 *
236 * WARNING: Its not easy to increase this value since the allocators have
237 * to do various tricks to work around compiler limitations in order to
238 * ensure proper constant folding.
239 */
240 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
241 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
242 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
243 #ifndef KMALLOC_SHIFT_LOW
244 #define KMALLOC_SHIFT_LOW 5
245 #endif
246 #endif
247
248 #ifdef CONFIG_SLUB
249 /*
250 * SLUB directly allocates requests fitting in to an order-1 page
251 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
252 */
253 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
254 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
255 #ifndef KMALLOC_SHIFT_LOW
256 #define KMALLOC_SHIFT_LOW 3
257 #endif
258 #endif
259
260 #ifdef CONFIG_SLOB
261 /*
262 * SLOB passes all requests larger than one page to the page allocator.
263 * No kmalloc array is necessary since objects of different sizes can
264 * be allocated from the same page.
265 */
266 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
267 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
268 #ifndef KMALLOC_SHIFT_LOW
269 #define KMALLOC_SHIFT_LOW 3
270 #endif
271 #endif
272
273 /* Maximum allocatable size */
274 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
275 /* Maximum size for which we actually use a slab cache */
276 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
277 /* Maximum order allocatable via the slab allocagtor */
278 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
279
280 /*
281 * Kmalloc subsystem.
282 */
283 #ifndef KMALLOC_MIN_SIZE
284 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
285 #endif
286
287 /*
288 * This restriction comes from byte sized index implementation.
289 * Page size is normally 2^12 bytes and, in this case, if we want to use
290 * byte sized index which can represent 2^8 entries, the size of the object
291 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
292 * If minimum size of kmalloc is less than 16, we use it as minimum object
293 * size and give up to use byte sized index.
294 */
295 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
296 (KMALLOC_MIN_SIZE) : 16)
297
298 /*
299 * Whenever changing this, take care of that kmalloc_type() and
300 * create_kmalloc_caches() still work as intended.
301 */
302 enum kmalloc_cache_type {
303 KMALLOC_NORMAL = 0,
304 KMALLOC_RECLAIM,
305 #ifdef CONFIG_ZONE_DMA
306 KMALLOC_DMA,
307 #endif
308 NR_KMALLOC_TYPES
309 };
310
311 #ifndef CONFIG_SLOB
312 extern struct kmem_cache *
313 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
314
315 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
316 {
317 #ifdef CONFIG_ZONE_DMA
318 /*
319 * The most common case is KMALLOC_NORMAL, so test for it
320 * with a single branch for both flags.
321 */
322 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
323 return KMALLOC_NORMAL;
324
325 /*
326 * At least one of the flags has to be set. If both are, __GFP_DMA
327 * is more important.
328 */
329 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
330 #else
331 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
332 #endif
333 }
334
335 /*
336 * Figure out which kmalloc slab an allocation of a certain size
337 * belongs to.
338 * 0 = zero alloc
339 * 1 = 65 .. 96 bytes
340 * 2 = 129 .. 192 bytes
341 * n = 2^(n-1)+1 .. 2^n
342 */
343 static __always_inline unsigned int kmalloc_index(size_t size)
344 {
345 if (!size)
346 return 0;
347
348 if (size <= KMALLOC_MIN_SIZE)
349 return KMALLOC_SHIFT_LOW;
350
351 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
352 return 1;
353 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
354 return 2;
355 if (size <= 8) return 3;
356 if (size <= 16) return 4;
357 if (size <= 32) return 5;
358 if (size <= 64) return 6;
359 if (size <= 128) return 7;
360 if (size <= 256) return 8;
361 if (size <= 512) return 9;
362 if (size <= 1024) return 10;
363 if (size <= 2 * 1024) return 11;
364 if (size <= 4 * 1024) return 12;
365 if (size <= 8 * 1024) return 13;
366 if (size <= 16 * 1024) return 14;
367 if (size <= 32 * 1024) return 15;
368 if (size <= 64 * 1024) return 16;
369 if (size <= 128 * 1024) return 17;
370 if (size <= 256 * 1024) return 18;
371 if (size <= 512 * 1024) return 19;
372 if (size <= 1024 * 1024) return 20;
373 if (size <= 2 * 1024 * 1024) return 21;
374 if (size <= 4 * 1024 * 1024) return 22;
375 if (size <= 8 * 1024 * 1024) return 23;
376 if (size <= 16 * 1024 * 1024) return 24;
377 if (size <= 32 * 1024 * 1024) return 25;
378 if (size <= 64 * 1024 * 1024) return 26;
379 BUG();
380
381 /* Will never be reached. Needed because the compiler may complain */
382 return -1;
383 }
384 #endif /* !CONFIG_SLOB */
385
386 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
387 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
388 void kmem_cache_free(struct kmem_cache *, void *);
389
390 /*
391 * Bulk allocation and freeing operations. These are accelerated in an
392 * allocator specific way to avoid taking locks repeatedly or building
393 * metadata structures unnecessarily.
394 *
395 * Note that interrupts must be enabled when calling these functions.
396 */
397 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
398 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
399
400 /*
401 * Caller must not use kfree_bulk() on memory not originally allocated
402 * by kmalloc(), because the SLOB allocator cannot handle this.
403 */
404 static __always_inline void kfree_bulk(size_t size, void **p)
405 {
406 kmem_cache_free_bulk(NULL, size, p);
407 }
408
409 #ifdef CONFIG_NUMA
410 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
411 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
412 #else
413 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
414 {
415 return __kmalloc(size, flags);
416 }
417
418 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
419 {
420 return kmem_cache_alloc(s, flags);
421 }
422 #endif
423
424 #ifdef CONFIG_TRACING
425 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
426
427 #ifdef CONFIG_NUMA
428 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
429 gfp_t gfpflags,
430 int node, size_t size) __assume_slab_alignment __malloc;
431 #else
432 static __always_inline void *
433 kmem_cache_alloc_node_trace(struct kmem_cache *s,
434 gfp_t gfpflags,
435 int node, size_t size)
436 {
437 return kmem_cache_alloc_trace(s, gfpflags, size);
438 }
439 #endif /* CONFIG_NUMA */
440
441 #else /* CONFIG_TRACING */
442 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
443 gfp_t flags, size_t size)
444 {
445 void *ret = kmem_cache_alloc(s, flags);
446
447 ret = kasan_kmalloc(s, ret, size, flags);
448 return ret;
449 }
450
451 static __always_inline void *
452 kmem_cache_alloc_node_trace(struct kmem_cache *s,
453 gfp_t gfpflags,
454 int node, size_t size)
455 {
456 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
457
458 ret = kasan_kmalloc(s, ret, size, gfpflags);
459 return ret;
460 }
461 #endif /* CONFIG_TRACING */
462
463 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
464
465 #ifdef CONFIG_TRACING
466 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
467 #else
468 static __always_inline void *
469 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
470 {
471 return kmalloc_order(size, flags, order);
472 }
473 #endif
474
475 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
476 {
477 unsigned int order = get_order(size);
478 return kmalloc_order_trace(size, flags, order);
479 }
480
481 /**
482 * kmalloc - allocate memory
483 * @size: how many bytes of memory are required.
484 * @flags: the type of memory to allocate.
485 *
486 * kmalloc is the normal method of allocating memory
487 * for objects smaller than page size in the kernel.
488 *
489 * The @flags argument may be one of:
490 *
491 * %GFP_USER - Allocate memory on behalf of user. May sleep.
492 *
493 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
494 *
495 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
496 * For example, use this inside interrupt handlers.
497 *
498 * %GFP_HIGHUSER - Allocate pages from high memory.
499 *
500 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
501 *
502 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
503 *
504 * %GFP_NOWAIT - Allocation will not sleep.
505 *
506 * %__GFP_THISNODE - Allocate node-local memory only.
507 *
508 * %GFP_DMA - Allocation suitable for DMA.
509 * Should only be used for kmalloc() caches. Otherwise, use a
510 * slab created with SLAB_DMA.
511 *
512 * Also it is possible to set different flags by OR'ing
513 * in one or more of the following additional @flags:
514 *
515 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
516 *
517 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
518 * (think twice before using).
519 *
520 * %__GFP_NORETRY - If memory is not immediately available,
521 * then give up at once.
522 *
523 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
524 *
525 * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
526 * eventually.
527 *
528 * There are other flags available as well, but these are not intended
529 * for general use, and so are not documented here. For a full list of
530 * potential flags, always refer to linux/gfp.h.
531 */
532 static __always_inline void *kmalloc(size_t size, gfp_t flags)
533 {
534 if (__builtin_constant_p(size)) {
535 #ifndef CONFIG_SLOB
536 unsigned int index;
537 #endif
538 if (size > KMALLOC_MAX_CACHE_SIZE)
539 return kmalloc_large(size, flags);
540 #ifndef CONFIG_SLOB
541 index = kmalloc_index(size);
542
543 if (!index)
544 return ZERO_SIZE_PTR;
545
546 return kmem_cache_alloc_trace(
547 kmalloc_caches[kmalloc_type(flags)][index],
548 flags, size);
549 #endif
550 }
551 return __kmalloc(size, flags);
552 }
553
554 /*
555 * Determine size used for the nth kmalloc cache.
556 * return size or 0 if a kmalloc cache for that
557 * size does not exist
558 */
559 static __always_inline unsigned int kmalloc_size(unsigned int n)
560 {
561 #ifndef CONFIG_SLOB
562 if (n > 2)
563 return 1U << n;
564
565 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
566 return 96;
567
568 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
569 return 192;
570 #endif
571 return 0;
572 }
573
574 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
575 {
576 #ifndef CONFIG_SLOB
577 if (__builtin_constant_p(size) &&
578 size <= KMALLOC_MAX_CACHE_SIZE) {
579 unsigned int i = kmalloc_index(size);
580
581 if (!i)
582 return ZERO_SIZE_PTR;
583
584 return kmem_cache_alloc_node_trace(
585 kmalloc_caches[kmalloc_type(flags)][i],
586 flags, node, size);
587 }
588 #endif
589 return __kmalloc_node(size, flags, node);
590 }
591
592 struct memcg_cache_array {
593 struct rcu_head rcu;
594 struct kmem_cache *entries[0];
595 };
596
597 /*
598 * This is the main placeholder for memcg-related information in kmem caches.
599 * Both the root cache and the child caches will have it. For the root cache,
600 * this will hold a dynamically allocated array large enough to hold
601 * information about the currently limited memcgs in the system. To allow the
602 * array to be accessed without taking any locks, on relocation we free the old
603 * version only after a grace period.
604 *
605 * Root and child caches hold different metadata.
606 *
607 * @root_cache: Common to root and child caches. NULL for root, pointer to
608 * the root cache for children.
609 *
610 * The following fields are specific to root caches.
611 *
612 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
613 * used to index child cachces during allocation and cleared
614 * early during shutdown.
615 *
616 * @root_caches_node: List node for slab_root_caches list.
617 *
618 * @children: List of all child caches. While the child caches are also
619 * reachable through @memcg_caches, a child cache remains on
620 * this list until it is actually destroyed.
621 *
622 * The following fields are specific to child caches.
623 *
624 * @memcg: Pointer to the memcg this cache belongs to.
625 *
626 * @children_node: List node for @root_cache->children list.
627 *
628 * @kmem_caches_node: List node for @memcg->kmem_caches list.
629 */
630 struct memcg_cache_params {
631 struct kmem_cache *root_cache;
632 union {
633 struct {
634 struct memcg_cache_array __rcu *memcg_caches;
635 struct list_head __root_caches_node;
636 struct list_head children;
637 bool dying;
638 };
639 struct {
640 struct mem_cgroup *memcg;
641 struct list_head children_node;
642 struct list_head kmem_caches_node;
643
644 void (*deact_fn)(struct kmem_cache *);
645 union {
646 struct rcu_head deact_rcu_head;
647 struct work_struct deact_work;
648 };
649 };
650 };
651 };
652
653 int memcg_update_all_caches(int num_memcgs);
654
655 /**
656 * kmalloc_array - allocate memory for an array.
657 * @n: number of elements.
658 * @size: element size.
659 * @flags: the type of memory to allocate (see kmalloc).
660 */
661 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
662 {
663 size_t bytes;
664
665 if (unlikely(check_mul_overflow(n, size, &bytes)))
666 return NULL;
667 if (__builtin_constant_p(n) && __builtin_constant_p(size))
668 return kmalloc(bytes, flags);
669 return __kmalloc(bytes, flags);
670 }
671
672 /**
673 * kcalloc - allocate memory for an array. The memory is set to zero.
674 * @n: number of elements.
675 * @size: element size.
676 * @flags: the type of memory to allocate (see kmalloc).
677 */
678 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
679 {
680 return kmalloc_array(n, size, flags | __GFP_ZERO);
681 }
682
683 /*
684 * kmalloc_track_caller is a special version of kmalloc that records the
685 * calling function of the routine calling it for slab leak tracking instead
686 * of just the calling function (confusing, eh?).
687 * It's useful when the call to kmalloc comes from a widely-used standard
688 * allocator where we care about the real place the memory allocation
689 * request comes from.
690 */
691 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
692 #define kmalloc_track_caller(size, flags) \
693 __kmalloc_track_caller(size, flags, _RET_IP_)
694
695 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
696 int node)
697 {
698 size_t bytes;
699
700 if (unlikely(check_mul_overflow(n, size, &bytes)))
701 return NULL;
702 if (__builtin_constant_p(n) && __builtin_constant_p(size))
703 return kmalloc_node(bytes, flags, node);
704 return __kmalloc_node(bytes, flags, node);
705 }
706
707 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
708 {
709 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
710 }
711
712
713 #ifdef CONFIG_NUMA
714 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
715 #define kmalloc_node_track_caller(size, flags, node) \
716 __kmalloc_node_track_caller(size, flags, node, \
717 _RET_IP_)
718
719 #else /* CONFIG_NUMA */
720
721 #define kmalloc_node_track_caller(size, flags, node) \
722 kmalloc_track_caller(size, flags)
723
724 #endif /* CONFIG_NUMA */
725
726 /*
727 * Shortcuts
728 */
729 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
730 {
731 return kmem_cache_alloc(k, flags | __GFP_ZERO);
732 }
733
734 /**
735 * kzalloc - allocate memory. The memory is set to zero.
736 * @size: how many bytes of memory are required.
737 * @flags: the type of memory to allocate (see kmalloc).
738 */
739 static inline void *kzalloc(size_t size, gfp_t flags)
740 {
741 return kmalloc(size, flags | __GFP_ZERO);
742 }
743
744 /**
745 * kzalloc_node - allocate zeroed memory from a particular memory node.
746 * @size: how many bytes of memory are required.
747 * @flags: the type of memory to allocate (see kmalloc).
748 * @node: memory node from which to allocate
749 */
750 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
751 {
752 return kmalloc_node(size, flags | __GFP_ZERO, node);
753 }
754
755 unsigned int kmem_cache_size(struct kmem_cache *s);
756 void __init kmem_cache_init_late(void);
757
758 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
759 int slab_prepare_cpu(unsigned int cpu);
760 int slab_dead_cpu(unsigned int cpu);
761 #else
762 #define slab_prepare_cpu NULL
763 #define slab_dead_cpu NULL
764 #endif
765
766 #endif /* _LINUX_SLAB_H */