]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/slab_common.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[mirror_ubuntu-bionic-kernel.git] / mm / slab_common.c
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
7
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25
26 #include "slab.h"
27
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32
33 /*
34 * Set of flags that will prevent slab merging
35 */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB)
39
40 #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42
43 /*
44 * Merge control. If this is set then no merging of slab caches will occur.
45 * (Could be removed. This was introduced to pacify the merge skeptics.)
46 */
47 static int slab_nomerge;
48
49 static int __init setup_slab_nomerge(char *str)
50 {
51 slab_nomerge = 1;
52 return 1;
53 }
54
55 #ifdef CONFIG_SLUB
56 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57 #endif
58
59 __setup("slab_nomerge", setup_slab_nomerge);
60
61 /*
62 * Determine the size of a slab object
63 */
64 unsigned int kmem_cache_size(struct kmem_cache *s)
65 {
66 return s->object_size;
67 }
68 EXPORT_SYMBOL(kmem_cache_size);
69
70 #ifdef CONFIG_DEBUG_VM
71 static int kmem_cache_sanity_check(const char *name, size_t size)
72 {
73 struct kmem_cache *s = NULL;
74
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 return -EINVAL;
79 }
80
81 list_for_each_entry(s, &slab_caches, list) {
82 char tmp;
83 int res;
84
85 /*
86 * This happens when the module gets unloaded and doesn't
87 * destroy its slab cache and no-one else reuses the vmalloc
88 * area of the module. Print a warning.
89 */
90 res = probe_kernel_address(s->name, tmp);
91 if (res) {
92 pr_err("Slab cache with size %d has lost its name\n",
93 s->object_size);
94 continue;
95 }
96 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100 }
101 #else
102 static inline int kmem_cache_sanity_check(const char *name, size_t size)
103 {
104 return 0;
105 }
106 #endif
107
108 #ifdef CONFIG_MEMCG_KMEM
109 static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
110 struct kmem_cache *s, struct kmem_cache *root_cache)
111 {
112 size_t size;
113
114 if (!memcg_kmem_enabled())
115 return 0;
116
117 if (!memcg) {
118 size = offsetof(struct memcg_cache_params, memcg_caches);
119 size += memcg_limited_groups_array_size * sizeof(void *);
120 } else
121 size = sizeof(struct memcg_cache_params);
122
123 s->memcg_params = kzalloc(size, GFP_KERNEL);
124 if (!s->memcg_params)
125 return -ENOMEM;
126
127 if (memcg) {
128 s->memcg_params->memcg = memcg;
129 s->memcg_params->root_cache = root_cache;
130 } else
131 s->memcg_params->is_root_cache = true;
132
133 return 0;
134 }
135
136 static void memcg_free_cache_params(struct kmem_cache *s)
137 {
138 kfree(s->memcg_params);
139 }
140
141 static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
142 {
143 int size;
144 struct memcg_cache_params *new_params, *cur_params;
145
146 BUG_ON(!is_root_cache(s));
147
148 size = offsetof(struct memcg_cache_params, memcg_caches);
149 size += num_memcgs * sizeof(void *);
150
151 new_params = kzalloc(size, GFP_KERNEL);
152 if (!new_params)
153 return -ENOMEM;
154
155 cur_params = s->memcg_params;
156 memcpy(new_params->memcg_caches, cur_params->memcg_caches,
157 memcg_limited_groups_array_size * sizeof(void *));
158
159 new_params->is_root_cache = true;
160
161 rcu_assign_pointer(s->memcg_params, new_params);
162 if (cur_params)
163 kfree_rcu(cur_params, rcu_head);
164
165 return 0;
166 }
167
168 int memcg_update_all_caches(int num_memcgs)
169 {
170 struct kmem_cache *s;
171 int ret = 0;
172 mutex_lock(&slab_mutex);
173
174 list_for_each_entry(s, &slab_caches, list) {
175 if (!is_root_cache(s))
176 continue;
177
178 ret = memcg_update_cache_params(s, num_memcgs);
179 /*
180 * Instead of freeing the memory, we'll just leave the caches
181 * up to this point in an updated state.
182 */
183 if (ret)
184 goto out;
185 }
186
187 memcg_update_array_size(num_memcgs);
188 out:
189 mutex_unlock(&slab_mutex);
190 return ret;
191 }
192 #else
193 static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
194 struct kmem_cache *s, struct kmem_cache *root_cache)
195 {
196 return 0;
197 }
198
199 static inline void memcg_free_cache_params(struct kmem_cache *s)
200 {
201 }
202 #endif /* CONFIG_MEMCG_KMEM */
203
204 /*
205 * Find a mergeable slab cache
206 */
207 int slab_unmergeable(struct kmem_cache *s)
208 {
209 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
210 return 1;
211
212 if (!is_root_cache(s))
213 return 1;
214
215 if (s->ctor)
216 return 1;
217
218 /*
219 * We may have set a slab to be unmergeable during bootstrap.
220 */
221 if (s->refcount < 0)
222 return 1;
223
224 return 0;
225 }
226
227 struct kmem_cache *find_mergeable(size_t size, size_t align,
228 unsigned long flags, const char *name, void (*ctor)(void *))
229 {
230 struct kmem_cache *s;
231
232 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
233 return NULL;
234
235 if (ctor)
236 return NULL;
237
238 size = ALIGN(size, sizeof(void *));
239 align = calculate_alignment(flags, align, size);
240 size = ALIGN(size, align);
241 flags = kmem_cache_flags(size, flags, name, NULL);
242
243 list_for_each_entry_reverse(s, &slab_caches, list) {
244 if (slab_unmergeable(s))
245 continue;
246
247 if (size > s->size)
248 continue;
249
250 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
251 continue;
252 /*
253 * Check if alignment is compatible.
254 * Courtesy of Adrian Drzewiecki
255 */
256 if ((s->size & ~(align - 1)) != s->size)
257 continue;
258
259 if (s->size - size >= sizeof(void *))
260 continue;
261
262 if (IS_ENABLED(CONFIG_SLAB) && align &&
263 (align > s->align || s->align % align))
264 continue;
265
266 return s;
267 }
268 return NULL;
269 }
270
271 /*
272 * Figure out what the alignment of the objects will be given a set of
273 * flags, a user specified alignment and the size of the objects.
274 */
275 unsigned long calculate_alignment(unsigned long flags,
276 unsigned long align, unsigned long size)
277 {
278 /*
279 * If the user wants hardware cache aligned objects then follow that
280 * suggestion if the object is sufficiently large.
281 *
282 * The hardware cache alignment cannot override the specified
283 * alignment though. If that is greater then use it.
284 */
285 if (flags & SLAB_HWCACHE_ALIGN) {
286 unsigned long ralign = cache_line_size();
287 while (size <= ralign / 2)
288 ralign /= 2;
289 align = max(align, ralign);
290 }
291
292 if (align < ARCH_SLAB_MINALIGN)
293 align = ARCH_SLAB_MINALIGN;
294
295 return ALIGN(align, sizeof(void *));
296 }
297
298 static struct kmem_cache *
299 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
300 unsigned long flags, void (*ctor)(void *),
301 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
302 {
303 struct kmem_cache *s;
304 int err;
305
306 err = -ENOMEM;
307 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
308 if (!s)
309 goto out;
310
311 s->name = name;
312 s->object_size = object_size;
313 s->size = size;
314 s->align = align;
315 s->ctor = ctor;
316
317 err = memcg_alloc_cache_params(memcg, s, root_cache);
318 if (err)
319 goto out_free_cache;
320
321 err = __kmem_cache_create(s, flags);
322 if (err)
323 goto out_free_cache;
324
325 s->refcount = 1;
326 list_add(&s->list, &slab_caches);
327 out:
328 if (err)
329 return ERR_PTR(err);
330 return s;
331
332 out_free_cache:
333 memcg_free_cache_params(s);
334 kmem_cache_free(kmem_cache, s);
335 goto out;
336 }
337
338 /*
339 * kmem_cache_create - Create a cache.
340 * @name: A string which is used in /proc/slabinfo to identify this cache.
341 * @size: The size of objects to be created in this cache.
342 * @align: The required alignment for the objects.
343 * @flags: SLAB flags
344 * @ctor: A constructor for the objects.
345 *
346 * Returns a ptr to the cache on success, NULL on failure.
347 * Cannot be called within a interrupt, but can be interrupted.
348 * The @ctor is run when new pages are allocated by the cache.
349 *
350 * The flags are
351 *
352 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
353 * to catch references to uninitialised memory.
354 *
355 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
356 * for buffer overruns.
357 *
358 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
359 * cacheline. This can be beneficial if you're counting cycles as closely
360 * as davem.
361 */
362 struct kmem_cache *
363 kmem_cache_create(const char *name, size_t size, size_t align,
364 unsigned long flags, void (*ctor)(void *))
365 {
366 struct kmem_cache *s;
367 char *cache_name;
368 int err;
369
370 get_online_cpus();
371 get_online_mems();
372
373 mutex_lock(&slab_mutex);
374
375 err = kmem_cache_sanity_check(name, size);
376 if (err) {
377 s = NULL; /* suppress uninit var warning */
378 goto out_unlock;
379 }
380
381 /*
382 * Some allocators will constraint the set of valid flags to a subset
383 * of all flags. We expect them to define CACHE_CREATE_MASK in this
384 * case, and we'll just provide them with a sanitized version of the
385 * passed flags.
386 */
387 flags &= CACHE_CREATE_MASK;
388
389 s = __kmem_cache_alias(name, size, align, flags, ctor);
390 if (s)
391 goto out_unlock;
392
393 cache_name = kstrdup(name, GFP_KERNEL);
394 if (!cache_name) {
395 err = -ENOMEM;
396 goto out_unlock;
397 }
398
399 s = do_kmem_cache_create(cache_name, size, size,
400 calculate_alignment(flags, align, size),
401 flags, ctor, NULL, NULL);
402 if (IS_ERR(s)) {
403 err = PTR_ERR(s);
404 kfree(cache_name);
405 }
406
407 out_unlock:
408 mutex_unlock(&slab_mutex);
409
410 put_online_mems();
411 put_online_cpus();
412
413 if (err) {
414 if (flags & SLAB_PANIC)
415 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
416 name, err);
417 else {
418 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
419 name, err);
420 dump_stack();
421 }
422 return NULL;
423 }
424 return s;
425 }
426 EXPORT_SYMBOL(kmem_cache_create);
427
428 static int do_kmem_cache_shutdown(struct kmem_cache *s,
429 struct list_head *release, bool *need_rcu_barrier)
430 {
431 if (__kmem_cache_shutdown(s) != 0) {
432 printk(KERN_ERR "kmem_cache_destroy %s: "
433 "Slab cache still has objects\n", s->name);
434 dump_stack();
435 return -EBUSY;
436 }
437
438 if (s->flags & SLAB_DESTROY_BY_RCU)
439 *need_rcu_barrier = true;
440
441 #ifdef CONFIG_MEMCG_KMEM
442 if (!is_root_cache(s)) {
443 struct kmem_cache *root_cache = s->memcg_params->root_cache;
444 int memcg_id = memcg_cache_id(s->memcg_params->memcg);
445
446 BUG_ON(root_cache->memcg_params->memcg_caches[memcg_id] != s);
447 root_cache->memcg_params->memcg_caches[memcg_id] = NULL;
448 }
449 #endif
450 list_move(&s->list, release);
451 return 0;
452 }
453
454 static void do_kmem_cache_release(struct list_head *release,
455 bool need_rcu_barrier)
456 {
457 struct kmem_cache *s, *s2;
458
459 if (need_rcu_barrier)
460 rcu_barrier();
461
462 list_for_each_entry_safe(s, s2, release, list) {
463 #ifdef SLAB_SUPPORTS_SYSFS
464 sysfs_slab_remove(s);
465 #else
466 slab_kmem_cache_release(s);
467 #endif
468 }
469 }
470
471 #ifdef CONFIG_MEMCG_KMEM
472 /*
473 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
474 * @memcg: The memory cgroup the new cache is for.
475 * @root_cache: The parent of the new cache.
476 *
477 * This function attempts to create a kmem cache that will serve allocation
478 * requests going from @memcg to @root_cache. The new cache inherits properties
479 * from its parent.
480 */
481 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
482 struct kmem_cache *root_cache)
483 {
484 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
485 int memcg_id = memcg_cache_id(memcg);
486 struct kmem_cache *s = NULL;
487 char *cache_name;
488
489 get_online_cpus();
490 get_online_mems();
491
492 mutex_lock(&slab_mutex);
493
494 /*
495 * Since per-memcg caches are created asynchronously on first
496 * allocation (see memcg_kmem_get_cache()), several threads can try to
497 * create the same cache, but only one of them may succeed.
498 */
499 if (cache_from_memcg_idx(root_cache, memcg_id))
500 goto out_unlock;
501
502 cgroup_name(mem_cgroup_css(memcg)->cgroup,
503 memcg_name_buf, sizeof(memcg_name_buf));
504 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
505 memcg_cache_id(memcg), memcg_name_buf);
506 if (!cache_name)
507 goto out_unlock;
508
509 s = do_kmem_cache_create(cache_name, root_cache->object_size,
510 root_cache->size, root_cache->align,
511 root_cache->flags, root_cache->ctor,
512 memcg, root_cache);
513 /*
514 * If we could not create a memcg cache, do not complain, because
515 * that's not critical at all as we can always proceed with the root
516 * cache.
517 */
518 if (IS_ERR(s)) {
519 kfree(cache_name);
520 goto out_unlock;
521 }
522
523 /*
524 * Since readers won't lock (see cache_from_memcg_idx()), we need a
525 * barrier here to ensure nobody will see the kmem_cache partially
526 * initialized.
527 */
528 smp_wmb();
529 root_cache->memcg_params->memcg_caches[memcg_id] = s;
530
531 out_unlock:
532 mutex_unlock(&slab_mutex);
533
534 put_online_mems();
535 put_online_cpus();
536 }
537
538 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
539 {
540 LIST_HEAD(release);
541 bool need_rcu_barrier = false;
542 struct kmem_cache *s, *s2;
543
544 get_online_cpus();
545 get_online_mems();
546
547 mutex_lock(&slab_mutex);
548 list_for_each_entry_safe(s, s2, &slab_caches, list) {
549 if (is_root_cache(s) || s->memcg_params->memcg != memcg)
550 continue;
551 /*
552 * The cgroup is about to be freed and therefore has no charges
553 * left. Hence, all its caches must be empty by now.
554 */
555 BUG_ON(do_kmem_cache_shutdown(s, &release, &need_rcu_barrier));
556 }
557 mutex_unlock(&slab_mutex);
558
559 put_online_mems();
560 put_online_cpus();
561
562 do_kmem_cache_release(&release, need_rcu_barrier);
563 }
564 #endif /* CONFIG_MEMCG_KMEM */
565
566 void slab_kmem_cache_release(struct kmem_cache *s)
567 {
568 memcg_free_cache_params(s);
569 kfree(s->name);
570 kmem_cache_free(kmem_cache, s);
571 }
572
573 void kmem_cache_destroy(struct kmem_cache *s)
574 {
575 int i;
576 LIST_HEAD(release);
577 bool need_rcu_barrier = false;
578 bool busy = false;
579
580 get_online_cpus();
581 get_online_mems();
582
583 mutex_lock(&slab_mutex);
584
585 s->refcount--;
586 if (s->refcount)
587 goto out_unlock;
588
589 for_each_memcg_cache_index(i) {
590 struct kmem_cache *c = cache_from_memcg_idx(s, i);
591
592 if (c && do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
593 busy = true;
594 }
595
596 if (!busy)
597 do_kmem_cache_shutdown(s, &release, &need_rcu_barrier);
598
599 out_unlock:
600 mutex_unlock(&slab_mutex);
601
602 put_online_mems();
603 put_online_cpus();
604
605 do_kmem_cache_release(&release, need_rcu_barrier);
606 }
607 EXPORT_SYMBOL(kmem_cache_destroy);
608
609 /**
610 * kmem_cache_shrink - Shrink a cache.
611 * @cachep: The cache to shrink.
612 *
613 * Releases as many slabs as possible for a cache.
614 * To help debugging, a zero exit status indicates all slabs were released.
615 */
616 int kmem_cache_shrink(struct kmem_cache *cachep)
617 {
618 int ret;
619
620 get_online_cpus();
621 get_online_mems();
622 ret = __kmem_cache_shrink(cachep);
623 put_online_mems();
624 put_online_cpus();
625 return ret;
626 }
627 EXPORT_SYMBOL(kmem_cache_shrink);
628
629 int slab_is_available(void)
630 {
631 return slab_state >= UP;
632 }
633
634 #ifndef CONFIG_SLOB
635 /* Create a cache during boot when no slab services are available yet */
636 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
637 unsigned long flags)
638 {
639 int err;
640
641 s->name = name;
642 s->size = s->object_size = size;
643 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
644 err = __kmem_cache_create(s, flags);
645
646 if (err)
647 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
648 name, size, err);
649
650 s->refcount = -1; /* Exempt from merging for now */
651 }
652
653 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
654 unsigned long flags)
655 {
656 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
657
658 if (!s)
659 panic("Out of memory when creating slab %s\n", name);
660
661 create_boot_cache(s, name, size, flags);
662 list_add(&s->list, &slab_caches);
663 s->refcount = 1;
664 return s;
665 }
666
667 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
668 EXPORT_SYMBOL(kmalloc_caches);
669
670 #ifdef CONFIG_ZONE_DMA
671 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
672 EXPORT_SYMBOL(kmalloc_dma_caches);
673 #endif
674
675 /*
676 * Conversion table for small slabs sizes / 8 to the index in the
677 * kmalloc array. This is necessary for slabs < 192 since we have non power
678 * of two cache sizes there. The size of larger slabs can be determined using
679 * fls.
680 */
681 static s8 size_index[24] = {
682 3, /* 8 */
683 4, /* 16 */
684 5, /* 24 */
685 5, /* 32 */
686 6, /* 40 */
687 6, /* 48 */
688 6, /* 56 */
689 6, /* 64 */
690 1, /* 72 */
691 1, /* 80 */
692 1, /* 88 */
693 1, /* 96 */
694 7, /* 104 */
695 7, /* 112 */
696 7, /* 120 */
697 7, /* 128 */
698 2, /* 136 */
699 2, /* 144 */
700 2, /* 152 */
701 2, /* 160 */
702 2, /* 168 */
703 2, /* 176 */
704 2, /* 184 */
705 2 /* 192 */
706 };
707
708 static inline int size_index_elem(size_t bytes)
709 {
710 return (bytes - 1) / 8;
711 }
712
713 /*
714 * Find the kmem_cache structure that serves a given size of
715 * allocation
716 */
717 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
718 {
719 int index;
720
721 if (unlikely(size > KMALLOC_MAX_SIZE)) {
722 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
723 return NULL;
724 }
725
726 if (size <= 192) {
727 if (!size)
728 return ZERO_SIZE_PTR;
729
730 index = size_index[size_index_elem(size)];
731 } else
732 index = fls(size - 1);
733
734 #ifdef CONFIG_ZONE_DMA
735 if (unlikely((flags & GFP_DMA)))
736 return kmalloc_dma_caches[index];
737
738 #endif
739 return kmalloc_caches[index];
740 }
741
742 /*
743 * Create the kmalloc array. Some of the regular kmalloc arrays
744 * may already have been created because they were needed to
745 * enable allocations for slab creation.
746 */
747 void __init create_kmalloc_caches(unsigned long flags)
748 {
749 int i;
750
751 /*
752 * Patch up the size_index table if we have strange large alignment
753 * requirements for the kmalloc array. This is only the case for
754 * MIPS it seems. The standard arches will not generate any code here.
755 *
756 * Largest permitted alignment is 256 bytes due to the way we
757 * handle the index determination for the smaller caches.
758 *
759 * Make sure that nothing crazy happens if someone starts tinkering
760 * around with ARCH_KMALLOC_MINALIGN
761 */
762 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
763 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
764
765 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
766 int elem = size_index_elem(i);
767
768 if (elem >= ARRAY_SIZE(size_index))
769 break;
770 size_index[elem] = KMALLOC_SHIFT_LOW;
771 }
772
773 if (KMALLOC_MIN_SIZE >= 64) {
774 /*
775 * The 96 byte size cache is not used if the alignment
776 * is 64 byte.
777 */
778 for (i = 64 + 8; i <= 96; i += 8)
779 size_index[size_index_elem(i)] = 7;
780
781 }
782
783 if (KMALLOC_MIN_SIZE >= 128) {
784 /*
785 * The 192 byte sized cache is not used if the alignment
786 * is 128 byte. Redirect kmalloc to use the 256 byte cache
787 * instead.
788 */
789 for (i = 128 + 8; i <= 192; i += 8)
790 size_index[size_index_elem(i)] = 8;
791 }
792 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
793 if (!kmalloc_caches[i]) {
794 kmalloc_caches[i] = create_kmalloc_cache(NULL,
795 1 << i, flags);
796 }
797
798 /*
799 * Caches that are not of the two-to-the-power-of size.
800 * These have to be created immediately after the
801 * earlier power of two caches
802 */
803 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
804 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
805
806 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
807 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
808 }
809
810 /* Kmalloc array is now usable */
811 slab_state = UP;
812
813 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
814 struct kmem_cache *s = kmalloc_caches[i];
815 char *n;
816
817 if (s) {
818 n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
819
820 BUG_ON(!n);
821 s->name = n;
822 }
823 }
824
825 #ifdef CONFIG_ZONE_DMA
826 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
827 struct kmem_cache *s = kmalloc_caches[i];
828
829 if (s) {
830 int size = kmalloc_size(i);
831 char *n = kasprintf(GFP_NOWAIT,
832 "dma-kmalloc-%d", size);
833
834 BUG_ON(!n);
835 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
836 size, SLAB_CACHE_DMA | flags);
837 }
838 }
839 #endif
840 }
841 #endif /* !CONFIG_SLOB */
842
843 /*
844 * To avoid unnecessary overhead, we pass through large allocation requests
845 * directly to the page allocator. We use __GFP_COMP, because we will need to
846 * know the allocation order to free the pages properly in kfree.
847 */
848 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
849 {
850 void *ret;
851 struct page *page;
852
853 flags |= __GFP_COMP;
854 page = alloc_kmem_pages(flags, order);
855 ret = page ? page_address(page) : NULL;
856 kmemleak_alloc(ret, size, 1, flags);
857 return ret;
858 }
859 EXPORT_SYMBOL(kmalloc_order);
860
861 #ifdef CONFIG_TRACING
862 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
863 {
864 void *ret = kmalloc_order(size, flags, order);
865 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
866 return ret;
867 }
868 EXPORT_SYMBOL(kmalloc_order_trace);
869 #endif
870
871 #ifdef CONFIG_SLABINFO
872
873 #ifdef CONFIG_SLAB
874 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
875 #else
876 #define SLABINFO_RIGHTS S_IRUSR
877 #endif
878
879 static void print_slabinfo_header(struct seq_file *m)
880 {
881 /*
882 * Output format version, so at least we can change it
883 * without _too_ many complaints.
884 */
885 #ifdef CONFIG_DEBUG_SLAB
886 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
887 #else
888 seq_puts(m, "slabinfo - version: 2.1\n");
889 #endif
890 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
891 "<objperslab> <pagesperslab>");
892 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
893 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
894 #ifdef CONFIG_DEBUG_SLAB
895 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
896 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
897 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
898 #endif
899 seq_putc(m, '\n');
900 }
901
902 void *slab_start(struct seq_file *m, loff_t *pos)
903 {
904 mutex_lock(&slab_mutex);
905 return seq_list_start(&slab_caches, *pos);
906 }
907
908 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
909 {
910 return seq_list_next(p, &slab_caches, pos);
911 }
912
913 void slab_stop(struct seq_file *m, void *p)
914 {
915 mutex_unlock(&slab_mutex);
916 }
917
918 static void
919 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
920 {
921 struct kmem_cache *c;
922 struct slabinfo sinfo;
923 int i;
924
925 if (!is_root_cache(s))
926 return;
927
928 for_each_memcg_cache_index(i) {
929 c = cache_from_memcg_idx(s, i);
930 if (!c)
931 continue;
932
933 memset(&sinfo, 0, sizeof(sinfo));
934 get_slabinfo(c, &sinfo);
935
936 info->active_slabs += sinfo.active_slabs;
937 info->num_slabs += sinfo.num_slabs;
938 info->shared_avail += sinfo.shared_avail;
939 info->active_objs += sinfo.active_objs;
940 info->num_objs += sinfo.num_objs;
941 }
942 }
943
944 static void cache_show(struct kmem_cache *s, struct seq_file *m)
945 {
946 struct slabinfo sinfo;
947
948 memset(&sinfo, 0, sizeof(sinfo));
949 get_slabinfo(s, &sinfo);
950
951 memcg_accumulate_slabinfo(s, &sinfo);
952
953 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
954 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
955 sinfo.objects_per_slab, (1 << sinfo.cache_order));
956
957 seq_printf(m, " : tunables %4u %4u %4u",
958 sinfo.limit, sinfo.batchcount, sinfo.shared);
959 seq_printf(m, " : slabdata %6lu %6lu %6lu",
960 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
961 slabinfo_show_stats(m, s);
962 seq_putc(m, '\n');
963 }
964
965 static int slab_show(struct seq_file *m, void *p)
966 {
967 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
968
969 if (p == slab_caches.next)
970 print_slabinfo_header(m);
971 if (is_root_cache(s))
972 cache_show(s, m);
973 return 0;
974 }
975
976 #ifdef CONFIG_MEMCG_KMEM
977 int memcg_slab_show(struct seq_file *m, void *p)
978 {
979 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
980 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
981
982 if (p == slab_caches.next)
983 print_slabinfo_header(m);
984 if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
985 cache_show(s, m);
986 return 0;
987 }
988 #endif
989
990 /*
991 * slabinfo_op - iterator that generates /proc/slabinfo
992 *
993 * Output layout:
994 * cache-name
995 * num-active-objs
996 * total-objs
997 * object size
998 * num-active-slabs
999 * total-slabs
1000 * num-pages-per-slab
1001 * + further values on SMP and with statistics enabled
1002 */
1003 static const struct seq_operations slabinfo_op = {
1004 .start = slab_start,
1005 .next = slab_next,
1006 .stop = slab_stop,
1007 .show = slab_show,
1008 };
1009
1010 static int slabinfo_open(struct inode *inode, struct file *file)
1011 {
1012 return seq_open(file, &slabinfo_op);
1013 }
1014
1015 static const struct file_operations proc_slabinfo_operations = {
1016 .open = slabinfo_open,
1017 .read = seq_read,
1018 .write = slabinfo_write,
1019 .llseek = seq_lseek,
1020 .release = seq_release,
1021 };
1022
1023 static int __init slab_proc_init(void)
1024 {
1025 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1026 &proc_slabinfo_operations);
1027 return 0;
1028 }
1029 module_init(slab_proc_init);
1030 #endif /* CONFIG_SLABINFO */
1031
1032 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1033 gfp_t flags)
1034 {
1035 void *ret;
1036 size_t ks = 0;
1037
1038 if (p)
1039 ks = ksize(p);
1040
1041 if (ks >= new_size)
1042 return (void *)p;
1043
1044 ret = kmalloc_track_caller(new_size, flags);
1045 if (ret && p)
1046 memcpy(ret, p, ks);
1047
1048 return ret;
1049 }
1050
1051 /**
1052 * __krealloc - like krealloc() but don't free @p.
1053 * @p: object to reallocate memory for.
1054 * @new_size: how many bytes of memory are required.
1055 * @flags: the type of memory to allocate.
1056 *
1057 * This function is like krealloc() except it never frees the originally
1058 * allocated buffer. Use this if you don't want to free the buffer immediately
1059 * like, for example, with RCU.
1060 */
1061 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1062 {
1063 if (unlikely(!new_size))
1064 return ZERO_SIZE_PTR;
1065
1066 return __do_krealloc(p, new_size, flags);
1067
1068 }
1069 EXPORT_SYMBOL(__krealloc);
1070
1071 /**
1072 * krealloc - reallocate memory. The contents will remain unchanged.
1073 * @p: object to reallocate memory for.
1074 * @new_size: how many bytes of memory are required.
1075 * @flags: the type of memory to allocate.
1076 *
1077 * The contents of the object pointed to are preserved up to the
1078 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1079 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1080 * %NULL pointer, the object pointed to is freed.
1081 */
1082 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1083 {
1084 void *ret;
1085
1086 if (unlikely(!new_size)) {
1087 kfree(p);
1088 return ZERO_SIZE_PTR;
1089 }
1090
1091 ret = __do_krealloc(p, new_size, flags);
1092 if (ret && p != ret)
1093 kfree(p);
1094
1095 return ret;
1096 }
1097 EXPORT_SYMBOL(krealloc);
1098
1099 /**
1100 * kzfree - like kfree but zero memory
1101 * @p: object to free memory of
1102 *
1103 * The memory of the object @p points to is zeroed before freed.
1104 * If @p is %NULL, kzfree() does nothing.
1105 *
1106 * Note: this function zeroes the whole allocated buffer which can be a good
1107 * deal bigger than the requested buffer size passed to kmalloc(). So be
1108 * careful when using this function in performance sensitive code.
1109 */
1110 void kzfree(const void *p)
1111 {
1112 size_t ks;
1113 void *mem = (void *)p;
1114
1115 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1116 return;
1117 ks = ksize(mem);
1118 memset(mem, 0, ks);
1119 kfree(mem);
1120 }
1121 EXPORT_SYMBOL(kzfree);
1122
1123 /* Tracepoints definitions. */
1124 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1125 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1126 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1127 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1128 EXPORT_TRACEPOINT_SYMBOL(kfree);
1129 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);