2 * Slab allocator functions that are independent of the allocator strategy
4 * (C) 2012 Christoph Lameter <cl@linux.com>
6 #include <linux/slab.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <linux/memcontrol.h>
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
28 enum slab_state slab_state
;
29 LIST_HEAD(slab_caches
);
30 DEFINE_MUTEX(slab_mutex
);
31 struct kmem_cache
*kmem_cache
;
33 #ifdef CONFIG_DEBUG_VM
34 static int kmem_cache_sanity_check(const char *name
, size_t size
)
36 struct kmem_cache
*s
= NULL
;
38 if (!name
|| in_interrupt() || size
< sizeof(void *) ||
39 size
> KMALLOC_MAX_SIZE
) {
40 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
44 list_for_each_entry(s
, &slab_caches
, list
) {
49 * This happens when the module gets unloaded and doesn't
50 * destroy its slab cache and no-one else reuses the vmalloc
51 * area of the module. Print a warning.
53 res
= probe_kernel_address(s
->name
, tmp
);
55 pr_err("Slab cache with size %d has lost its name\n",
60 #if !defined(CONFIG_SLUB)
61 if (!strcmp(s
->name
, name
)) {
62 pr_err("%s (%s): Cache name already exists.\n",
71 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
75 static inline int kmem_cache_sanity_check(const char *name
, size_t size
)
81 #ifdef CONFIG_MEMCG_KMEM
82 int memcg_update_all_caches(int num_memcgs
)
86 mutex_lock(&slab_mutex
);
88 list_for_each_entry(s
, &slab_caches
, list
) {
89 if (!is_root_cache(s
))
92 ret
= memcg_update_cache_size(s
, num_memcgs
);
94 * See comment in memcontrol.c, memcg_update_cache_size:
95 * Instead of freeing the memory, we'll just leave the caches
96 * up to this point in an updated state.
102 memcg_update_array_size(num_memcgs
);
104 mutex_unlock(&slab_mutex
);
110 * Figure out what the alignment of the objects will be given a set of
111 * flags, a user specified alignment and the size of the objects.
113 unsigned long calculate_alignment(unsigned long flags
,
114 unsigned long align
, unsigned long size
)
117 * If the user wants hardware cache aligned objects then follow that
118 * suggestion if the object is sufficiently large.
120 * The hardware cache alignment cannot override the specified
121 * alignment though. If that is greater then use it.
123 if (flags
& SLAB_HWCACHE_ALIGN
) {
124 unsigned long ralign
= cache_line_size();
125 while (size
<= ralign
/ 2)
127 align
= max(align
, ralign
);
130 if (align
< ARCH_SLAB_MINALIGN
)
131 align
= ARCH_SLAB_MINALIGN
;
133 return ALIGN(align
, sizeof(void *));
136 static struct kmem_cache
*
137 do_kmem_cache_create(char *name
, size_t object_size
, size_t size
, size_t align
,
138 unsigned long flags
, void (*ctor
)(void *),
139 struct mem_cgroup
*memcg
, struct kmem_cache
*root_cache
)
141 struct kmem_cache
*s
;
145 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
150 s
->object_size
= object_size
;
155 err
= memcg_alloc_cache_params(memcg
, s
, root_cache
);
159 err
= __kmem_cache_create(s
, flags
);
164 list_add(&s
->list
, &slab_caches
);
171 memcg_free_cache_params(s
);
177 * kmem_cache_create - Create a cache.
178 * @name: A string which is used in /proc/slabinfo to identify this cache.
179 * @size: The size of objects to be created in this cache.
180 * @align: The required alignment for the objects.
182 * @ctor: A constructor for the objects.
184 * Returns a ptr to the cache on success, NULL on failure.
185 * Cannot be called within a interrupt, but can be interrupted.
186 * The @ctor is run when new pages are allocated by the cache.
190 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
191 * to catch references to uninitialised memory.
193 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
194 * for buffer overruns.
196 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
197 * cacheline. This can be beneficial if you're counting cycles as closely
201 kmem_cache_create(const char *name
, size_t size
, size_t align
,
202 unsigned long flags
, void (*ctor
)(void *))
204 struct kmem_cache
*s
;
211 mutex_lock(&slab_mutex
);
213 err
= kmem_cache_sanity_check(name
, size
);
218 * Some allocators will constraint the set of valid flags to a subset
219 * of all flags. We expect them to define CACHE_CREATE_MASK in this
220 * case, and we'll just provide them with a sanitized version of the
223 flags
&= CACHE_CREATE_MASK
;
225 s
= __kmem_cache_alias(name
, size
, align
, flags
, ctor
);
229 cache_name
= kstrdup(name
, GFP_KERNEL
);
235 s
= do_kmem_cache_create(cache_name
, size
, size
,
236 calculate_alignment(flags
, align
, size
),
237 flags
, ctor
, NULL
, NULL
);
244 mutex_unlock(&slab_mutex
);
250 if (flags
& SLAB_PANIC
)
251 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
254 printk(KERN_WARNING
"kmem_cache_create(%s) failed with error %d",
262 EXPORT_SYMBOL(kmem_cache_create
);
264 #ifdef CONFIG_MEMCG_KMEM
266 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
267 * @memcg: The memory cgroup the new cache is for.
268 * @root_cache: The parent of the new cache.
269 * @memcg_name: The name of the memory cgroup (used for naming the new cache).
271 * This function attempts to create a kmem cache that will serve allocation
272 * requests going from @memcg to @root_cache. The new cache inherits properties
275 struct kmem_cache
*memcg_create_kmem_cache(struct mem_cgroup
*memcg
,
276 struct kmem_cache
*root_cache
,
277 const char *memcg_name
)
279 struct kmem_cache
*s
= NULL
;
285 mutex_lock(&slab_mutex
);
287 cache_name
= kasprintf(GFP_KERNEL
, "%s(%d:%s)", root_cache
->name
,
288 memcg_cache_id(memcg
), memcg_name
);
292 s
= do_kmem_cache_create(cache_name
, root_cache
->object_size
,
293 root_cache
->size
, root_cache
->align
,
294 root_cache
->flags
, root_cache
->ctor
,
302 mutex_unlock(&slab_mutex
);
310 static int memcg_cleanup_cache_params(struct kmem_cache
*s
)
314 if (!s
->memcg_params
||
315 !s
->memcg_params
->is_root_cache
)
318 mutex_unlock(&slab_mutex
);
319 rc
= __memcg_cleanup_cache_params(s
);
320 mutex_lock(&slab_mutex
);
325 static int memcg_cleanup_cache_params(struct kmem_cache
*s
)
329 #endif /* CONFIG_MEMCG_KMEM */
331 void slab_kmem_cache_release(struct kmem_cache
*s
)
334 kmem_cache_free(kmem_cache
, s
);
337 void kmem_cache_destroy(struct kmem_cache
*s
)
342 mutex_lock(&slab_mutex
);
348 if (memcg_cleanup_cache_params(s
) != 0)
351 if (__kmem_cache_shutdown(s
) != 0) {
352 printk(KERN_ERR
"kmem_cache_destroy %s: "
353 "Slab cache still has objects\n", s
->name
);
360 mutex_unlock(&slab_mutex
);
361 if (s
->flags
& SLAB_DESTROY_BY_RCU
)
364 memcg_free_cache_params(s
);
365 #ifdef SLAB_SUPPORTS_SYSFS
366 sysfs_slab_remove(s
);
368 slab_kmem_cache_release(s
);
373 mutex_unlock(&slab_mutex
);
378 EXPORT_SYMBOL(kmem_cache_destroy
);
381 * kmem_cache_shrink - Shrink a cache.
382 * @cachep: The cache to shrink.
384 * Releases as many slabs as possible for a cache.
385 * To help debugging, a zero exit status indicates all slabs were released.
387 int kmem_cache_shrink(struct kmem_cache
*cachep
)
393 ret
= __kmem_cache_shrink(cachep
);
398 EXPORT_SYMBOL(kmem_cache_shrink
);
400 int slab_is_available(void)
402 return slab_state
>= UP
;
406 /* Create a cache during boot when no slab services are available yet */
407 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
, size_t size
,
413 s
->size
= s
->object_size
= size
;
414 s
->align
= calculate_alignment(flags
, ARCH_KMALLOC_MINALIGN
, size
);
415 err
= __kmem_cache_create(s
, flags
);
418 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
421 s
->refcount
= -1; /* Exempt from merging for now */
424 struct kmem_cache
*__init
create_kmalloc_cache(const char *name
, size_t size
,
427 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
430 panic("Out of memory when creating slab %s\n", name
);
432 create_boot_cache(s
, name
, size
, flags
);
433 list_add(&s
->list
, &slab_caches
);
438 struct kmem_cache
*kmalloc_caches
[KMALLOC_SHIFT_HIGH
+ 1];
439 EXPORT_SYMBOL(kmalloc_caches
);
441 #ifdef CONFIG_ZONE_DMA
442 struct kmem_cache
*kmalloc_dma_caches
[KMALLOC_SHIFT_HIGH
+ 1];
443 EXPORT_SYMBOL(kmalloc_dma_caches
);
447 * Conversion table for small slabs sizes / 8 to the index in the
448 * kmalloc array. This is necessary for slabs < 192 since we have non power
449 * of two cache sizes there. The size of larger slabs can be determined using
452 static s8 size_index
[24] = {
479 static inline int size_index_elem(size_t bytes
)
481 return (bytes
- 1) / 8;
485 * Find the kmem_cache structure that serves a given size of
488 struct kmem_cache
*kmalloc_slab(size_t size
, gfp_t flags
)
492 if (unlikely(size
> KMALLOC_MAX_SIZE
)) {
493 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
499 return ZERO_SIZE_PTR
;
501 index
= size_index
[size_index_elem(size
)];
503 index
= fls(size
- 1);
505 #ifdef CONFIG_ZONE_DMA
506 if (unlikely((flags
& GFP_DMA
)))
507 return kmalloc_dma_caches
[index
];
510 return kmalloc_caches
[index
];
514 * Create the kmalloc array. Some of the regular kmalloc arrays
515 * may already have been created because they were needed to
516 * enable allocations for slab creation.
518 void __init
create_kmalloc_caches(unsigned long flags
)
523 * Patch up the size_index table if we have strange large alignment
524 * requirements for the kmalloc array. This is only the case for
525 * MIPS it seems. The standard arches will not generate any code here.
527 * Largest permitted alignment is 256 bytes due to the way we
528 * handle the index determination for the smaller caches.
530 * Make sure that nothing crazy happens if someone starts tinkering
531 * around with ARCH_KMALLOC_MINALIGN
533 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
534 (KMALLOC_MIN_SIZE
& (KMALLOC_MIN_SIZE
- 1)));
536 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
537 int elem
= size_index_elem(i
);
539 if (elem
>= ARRAY_SIZE(size_index
))
541 size_index
[elem
] = KMALLOC_SHIFT_LOW
;
544 if (KMALLOC_MIN_SIZE
>= 64) {
546 * The 96 byte size cache is not used if the alignment
549 for (i
= 64 + 8; i
<= 96; i
+= 8)
550 size_index
[size_index_elem(i
)] = 7;
554 if (KMALLOC_MIN_SIZE
>= 128) {
556 * The 192 byte sized cache is not used if the alignment
557 * is 128 byte. Redirect kmalloc to use the 256 byte cache
560 for (i
= 128 + 8; i
<= 192; i
+= 8)
561 size_index
[size_index_elem(i
)] = 8;
563 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
564 if (!kmalloc_caches
[i
]) {
565 kmalloc_caches
[i
] = create_kmalloc_cache(NULL
,
570 * Caches that are not of the two-to-the-power-of size.
571 * These have to be created immediately after the
572 * earlier power of two caches
574 if (KMALLOC_MIN_SIZE
<= 32 && !kmalloc_caches
[1] && i
== 6)
575 kmalloc_caches
[1] = create_kmalloc_cache(NULL
, 96, flags
);
577 if (KMALLOC_MIN_SIZE
<= 64 && !kmalloc_caches
[2] && i
== 7)
578 kmalloc_caches
[2] = create_kmalloc_cache(NULL
, 192, flags
);
581 /* Kmalloc array is now usable */
584 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
585 struct kmem_cache
*s
= kmalloc_caches
[i
];
589 n
= kasprintf(GFP_NOWAIT
, "kmalloc-%d", kmalloc_size(i
));
596 #ifdef CONFIG_ZONE_DMA
597 for (i
= 0; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
598 struct kmem_cache
*s
= kmalloc_caches
[i
];
601 int size
= kmalloc_size(i
);
602 char *n
= kasprintf(GFP_NOWAIT
,
603 "dma-kmalloc-%d", size
);
606 kmalloc_dma_caches
[i
] = create_kmalloc_cache(n
,
607 size
, SLAB_CACHE_DMA
| flags
);
612 #endif /* !CONFIG_SLOB */
615 * To avoid unnecessary overhead, we pass through large allocation requests
616 * directly to the page allocator. We use __GFP_COMP, because we will need to
617 * know the allocation order to free the pages properly in kfree.
619 void *kmalloc_order(size_t size
, gfp_t flags
, unsigned int order
)
625 page
= alloc_kmem_pages(flags
, order
);
626 ret
= page
? page_address(page
) : NULL
;
627 kmemleak_alloc(ret
, size
, 1, flags
);
630 EXPORT_SYMBOL(kmalloc_order
);
632 #ifdef CONFIG_TRACING
633 void *kmalloc_order_trace(size_t size
, gfp_t flags
, unsigned int order
)
635 void *ret
= kmalloc_order(size
, flags
, order
);
636 trace_kmalloc(_RET_IP_
, ret
, size
, PAGE_SIZE
<< order
, flags
);
639 EXPORT_SYMBOL(kmalloc_order_trace
);
642 #ifdef CONFIG_SLABINFO
645 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
647 #define SLABINFO_RIGHTS S_IRUSR
650 void print_slabinfo_header(struct seq_file
*m
)
653 * Output format version, so at least we can change it
654 * without _too_ many complaints.
656 #ifdef CONFIG_DEBUG_SLAB
657 seq_puts(m
, "slabinfo - version: 2.1 (statistics)\n");
659 seq_puts(m
, "slabinfo - version: 2.1\n");
661 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> "
662 "<objperslab> <pagesperslab>");
663 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
664 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
665 #ifdef CONFIG_DEBUG_SLAB
666 seq_puts(m
, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
667 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
668 seq_puts(m
, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
673 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
677 mutex_lock(&slab_mutex
);
679 print_slabinfo_header(m
);
681 return seq_list_start(&slab_caches
, *pos
);
684 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
686 return seq_list_next(p
, &slab_caches
, pos
);
689 void slab_stop(struct seq_file
*m
, void *p
)
691 mutex_unlock(&slab_mutex
);
695 memcg_accumulate_slabinfo(struct kmem_cache
*s
, struct slabinfo
*info
)
697 struct kmem_cache
*c
;
698 struct slabinfo sinfo
;
701 if (!is_root_cache(s
))
704 for_each_memcg_cache_index(i
) {
705 c
= cache_from_memcg_idx(s
, i
);
709 memset(&sinfo
, 0, sizeof(sinfo
));
710 get_slabinfo(c
, &sinfo
);
712 info
->active_slabs
+= sinfo
.active_slabs
;
713 info
->num_slabs
+= sinfo
.num_slabs
;
714 info
->shared_avail
+= sinfo
.shared_avail
;
715 info
->active_objs
+= sinfo
.active_objs
;
716 info
->num_objs
+= sinfo
.num_objs
;
720 int cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
722 struct slabinfo sinfo
;
724 memset(&sinfo
, 0, sizeof(sinfo
));
725 get_slabinfo(s
, &sinfo
);
727 memcg_accumulate_slabinfo(s
, &sinfo
);
729 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
730 cache_name(s
), sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
731 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
733 seq_printf(m
, " : tunables %4u %4u %4u",
734 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
735 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
736 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
737 slabinfo_show_stats(m
, s
);
742 static int s_show(struct seq_file
*m
, void *p
)
744 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
746 if (!is_root_cache(s
))
748 return cache_show(s
, m
);
752 * slabinfo_op - iterator that generates /proc/slabinfo
762 * + further values on SMP and with statistics enabled
764 static const struct seq_operations slabinfo_op
= {
771 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
773 return seq_open(file
, &slabinfo_op
);
776 static const struct file_operations proc_slabinfo_operations
= {
777 .open
= slabinfo_open
,
779 .write
= slabinfo_write
,
781 .release
= seq_release
,
784 static int __init
slab_proc_init(void)
786 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
,
787 &proc_slabinfo_operations
);
790 module_init(slab_proc_init
);
791 #endif /* CONFIG_SLABINFO */
793 static __always_inline
void *__do_krealloc(const void *p
, size_t new_size
,
805 ret
= kmalloc_track_caller(new_size
, flags
);
813 * __krealloc - like krealloc() but don't free @p.
814 * @p: object to reallocate memory for.
815 * @new_size: how many bytes of memory are required.
816 * @flags: the type of memory to allocate.
818 * This function is like krealloc() except it never frees the originally
819 * allocated buffer. Use this if you don't want to free the buffer immediately
820 * like, for example, with RCU.
822 void *__krealloc(const void *p
, size_t new_size
, gfp_t flags
)
824 if (unlikely(!new_size
))
825 return ZERO_SIZE_PTR
;
827 return __do_krealloc(p
, new_size
, flags
);
830 EXPORT_SYMBOL(__krealloc
);
833 * krealloc - reallocate memory. The contents will remain unchanged.
834 * @p: object to reallocate memory for.
835 * @new_size: how many bytes of memory are required.
836 * @flags: the type of memory to allocate.
838 * The contents of the object pointed to are preserved up to the
839 * lesser of the new and old sizes. If @p is %NULL, krealloc()
840 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
841 * %NULL pointer, the object pointed to is freed.
843 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
847 if (unlikely(!new_size
)) {
849 return ZERO_SIZE_PTR
;
852 ret
= __do_krealloc(p
, new_size
, flags
);
858 EXPORT_SYMBOL(krealloc
);
861 * kzfree - like kfree but zero memory
862 * @p: object to free memory of
864 * The memory of the object @p points to is zeroed before freed.
865 * If @p is %NULL, kzfree() does nothing.
867 * Note: this function zeroes the whole allocated buffer which can be a good
868 * deal bigger than the requested buffer size passed to kmalloc(). So be
869 * careful when using this function in performance sensitive code.
871 void kzfree(const void *p
)
874 void *mem
= (void *)p
;
876 if (unlikely(ZERO_OR_NULL_PTR(mem
)))
882 EXPORT_SYMBOL(kzfree
);
884 /* Tracepoints definitions. */
885 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
886 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
887 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node
);
888 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node
);
889 EXPORT_TRACEPOINT_SYMBOL(kfree
);
890 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);