]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/slab_common.c
Merge branches 'acpi-soc', 'acpi-wdat' and 'acpi-cppc'
[mirror_ubuntu-artful-kernel.git] / mm / slab_common.c
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
7
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25
26 #include "slab.h"
27
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32
33 static LIST_HEAD(slab_caches_to_rcu_destroy);
34 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
35 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
36 slab_caches_to_rcu_destroy_workfn);
37
38 /*
39 * Set of flags that will prevent slab merging
40 */
41 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
42 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
43 SLAB_FAILSLAB | SLAB_KASAN)
44
45 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
46 SLAB_NOTRACK | SLAB_ACCOUNT)
47
48 /*
49 * Merge control. If this is set then no merging of slab caches will occur.
50 */
51 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
52
53 static int __init setup_slab_nomerge(char *str)
54 {
55 slab_nomerge = true;
56 return 1;
57 }
58
59 #ifdef CONFIG_SLUB
60 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
61 #endif
62
63 __setup("slab_nomerge", setup_slab_nomerge);
64
65 /*
66 * Determine the size of a slab object
67 */
68 unsigned int kmem_cache_size(struct kmem_cache *s)
69 {
70 return s->object_size;
71 }
72 EXPORT_SYMBOL(kmem_cache_size);
73
74 #ifdef CONFIG_DEBUG_VM
75 static int kmem_cache_sanity_check(const char *name, size_t size)
76 {
77 struct kmem_cache *s = NULL;
78
79 if (!name || in_interrupt() || size < sizeof(void *) ||
80 size > KMALLOC_MAX_SIZE) {
81 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
82 return -EINVAL;
83 }
84
85 list_for_each_entry(s, &slab_caches, list) {
86 char tmp;
87 int res;
88
89 /*
90 * This happens when the module gets unloaded and doesn't
91 * destroy its slab cache and no-one else reuses the vmalloc
92 * area of the module. Print a warning.
93 */
94 res = probe_kernel_address(s->name, tmp);
95 if (res) {
96 pr_err("Slab cache with size %d has lost its name\n",
97 s->object_size);
98 continue;
99 }
100 }
101
102 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
103 return 0;
104 }
105 #else
106 static inline int kmem_cache_sanity_check(const char *name, size_t size)
107 {
108 return 0;
109 }
110 #endif
111
112 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
113 {
114 size_t i;
115
116 for (i = 0; i < nr; i++) {
117 if (s)
118 kmem_cache_free(s, p[i]);
119 else
120 kfree(p[i]);
121 }
122 }
123
124 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
125 void **p)
126 {
127 size_t i;
128
129 for (i = 0; i < nr; i++) {
130 void *x = p[i] = kmem_cache_alloc(s, flags);
131 if (!x) {
132 __kmem_cache_free_bulk(s, i, p);
133 return 0;
134 }
135 }
136 return i;
137 }
138
139 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
140
141 LIST_HEAD(slab_root_caches);
142
143 void slab_init_memcg_params(struct kmem_cache *s)
144 {
145 s->memcg_params.root_cache = NULL;
146 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
147 INIT_LIST_HEAD(&s->memcg_params.children);
148 }
149
150 static int init_memcg_params(struct kmem_cache *s,
151 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
152 {
153 struct memcg_cache_array *arr;
154
155 if (root_cache) {
156 s->memcg_params.root_cache = root_cache;
157 s->memcg_params.memcg = memcg;
158 INIT_LIST_HEAD(&s->memcg_params.children_node);
159 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
160 return 0;
161 }
162
163 slab_init_memcg_params(s);
164
165 if (!memcg_nr_cache_ids)
166 return 0;
167
168 arr = kzalloc(sizeof(struct memcg_cache_array) +
169 memcg_nr_cache_ids * sizeof(void *),
170 GFP_KERNEL);
171 if (!arr)
172 return -ENOMEM;
173
174 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
175 return 0;
176 }
177
178 static void destroy_memcg_params(struct kmem_cache *s)
179 {
180 if (is_root_cache(s))
181 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
182 }
183
184 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
185 {
186 struct memcg_cache_array *old, *new;
187
188 new = kzalloc(sizeof(struct memcg_cache_array) +
189 new_array_size * sizeof(void *), GFP_KERNEL);
190 if (!new)
191 return -ENOMEM;
192
193 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
194 lockdep_is_held(&slab_mutex));
195 if (old)
196 memcpy(new->entries, old->entries,
197 memcg_nr_cache_ids * sizeof(void *));
198
199 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
200 if (old)
201 kfree_rcu(old, rcu);
202 return 0;
203 }
204
205 int memcg_update_all_caches(int num_memcgs)
206 {
207 struct kmem_cache *s;
208 int ret = 0;
209
210 mutex_lock(&slab_mutex);
211 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
212 ret = update_memcg_params(s, num_memcgs);
213 /*
214 * Instead of freeing the memory, we'll just leave the caches
215 * up to this point in an updated state.
216 */
217 if (ret)
218 break;
219 }
220 mutex_unlock(&slab_mutex);
221 return ret;
222 }
223
224 void memcg_link_cache(struct kmem_cache *s)
225 {
226 if (is_root_cache(s)) {
227 list_add(&s->root_caches_node, &slab_root_caches);
228 } else {
229 list_add(&s->memcg_params.children_node,
230 &s->memcg_params.root_cache->memcg_params.children);
231 list_add(&s->memcg_params.kmem_caches_node,
232 &s->memcg_params.memcg->kmem_caches);
233 }
234 }
235
236 static void memcg_unlink_cache(struct kmem_cache *s)
237 {
238 if (is_root_cache(s)) {
239 list_del(&s->root_caches_node);
240 } else {
241 list_del(&s->memcg_params.children_node);
242 list_del(&s->memcg_params.kmem_caches_node);
243 }
244 }
245 #else
246 static inline int init_memcg_params(struct kmem_cache *s,
247 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
248 {
249 return 0;
250 }
251
252 static inline void destroy_memcg_params(struct kmem_cache *s)
253 {
254 }
255
256 static inline void memcg_unlink_cache(struct kmem_cache *s)
257 {
258 }
259 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
260
261 /*
262 * Find a mergeable slab cache
263 */
264 int slab_unmergeable(struct kmem_cache *s)
265 {
266 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
267 return 1;
268
269 if (!is_root_cache(s))
270 return 1;
271
272 if (s->ctor)
273 return 1;
274
275 /*
276 * We may have set a slab to be unmergeable during bootstrap.
277 */
278 if (s->refcount < 0)
279 return 1;
280
281 return 0;
282 }
283
284 struct kmem_cache *find_mergeable(size_t size, size_t align,
285 unsigned long flags, const char *name, void (*ctor)(void *))
286 {
287 struct kmem_cache *s;
288
289 if (slab_nomerge)
290 return NULL;
291
292 if (ctor)
293 return NULL;
294
295 size = ALIGN(size, sizeof(void *));
296 align = calculate_alignment(flags, align, size);
297 size = ALIGN(size, align);
298 flags = kmem_cache_flags(size, flags, name, NULL);
299
300 if (flags & SLAB_NEVER_MERGE)
301 return NULL;
302
303 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
304 if (slab_unmergeable(s))
305 continue;
306
307 if (size > s->size)
308 continue;
309
310 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
311 continue;
312 /*
313 * Check if alignment is compatible.
314 * Courtesy of Adrian Drzewiecki
315 */
316 if ((s->size & ~(align - 1)) != s->size)
317 continue;
318
319 if (s->size - size >= sizeof(void *))
320 continue;
321
322 if (IS_ENABLED(CONFIG_SLAB) && align &&
323 (align > s->align || s->align % align))
324 continue;
325
326 return s;
327 }
328 return NULL;
329 }
330
331 /*
332 * Figure out what the alignment of the objects will be given a set of
333 * flags, a user specified alignment and the size of the objects.
334 */
335 unsigned long calculate_alignment(unsigned long flags,
336 unsigned long align, unsigned long size)
337 {
338 /*
339 * If the user wants hardware cache aligned objects then follow that
340 * suggestion if the object is sufficiently large.
341 *
342 * The hardware cache alignment cannot override the specified
343 * alignment though. If that is greater then use it.
344 */
345 if (flags & SLAB_HWCACHE_ALIGN) {
346 unsigned long ralign = cache_line_size();
347 while (size <= ralign / 2)
348 ralign /= 2;
349 align = max(align, ralign);
350 }
351
352 if (align < ARCH_SLAB_MINALIGN)
353 align = ARCH_SLAB_MINALIGN;
354
355 return ALIGN(align, sizeof(void *));
356 }
357
358 static struct kmem_cache *create_cache(const char *name,
359 size_t object_size, size_t size, size_t align,
360 unsigned long flags, void (*ctor)(void *),
361 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
362 {
363 struct kmem_cache *s;
364 int err;
365
366 err = -ENOMEM;
367 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
368 if (!s)
369 goto out;
370
371 s->name = name;
372 s->object_size = object_size;
373 s->size = size;
374 s->align = align;
375 s->ctor = ctor;
376
377 err = init_memcg_params(s, memcg, root_cache);
378 if (err)
379 goto out_free_cache;
380
381 err = __kmem_cache_create(s, flags);
382 if (err)
383 goto out_free_cache;
384
385 s->refcount = 1;
386 list_add(&s->list, &slab_caches);
387 memcg_link_cache(s);
388 out:
389 if (err)
390 return ERR_PTR(err);
391 return s;
392
393 out_free_cache:
394 destroy_memcg_params(s);
395 kmem_cache_free(kmem_cache, s);
396 goto out;
397 }
398
399 /*
400 * kmem_cache_create - Create a cache.
401 * @name: A string which is used in /proc/slabinfo to identify this cache.
402 * @size: The size of objects to be created in this cache.
403 * @align: The required alignment for the objects.
404 * @flags: SLAB flags
405 * @ctor: A constructor for the objects.
406 *
407 * Returns a ptr to the cache on success, NULL on failure.
408 * Cannot be called within a interrupt, but can be interrupted.
409 * The @ctor is run when new pages are allocated by the cache.
410 *
411 * The flags are
412 *
413 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
414 * to catch references to uninitialised memory.
415 *
416 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
417 * for buffer overruns.
418 *
419 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
420 * cacheline. This can be beneficial if you're counting cycles as closely
421 * as davem.
422 */
423 struct kmem_cache *
424 kmem_cache_create(const char *name, size_t size, size_t align,
425 unsigned long flags, void (*ctor)(void *))
426 {
427 struct kmem_cache *s = NULL;
428 const char *cache_name;
429 int err;
430
431 get_online_cpus();
432 get_online_mems();
433 memcg_get_cache_ids();
434
435 mutex_lock(&slab_mutex);
436
437 err = kmem_cache_sanity_check(name, size);
438 if (err) {
439 goto out_unlock;
440 }
441
442 /* Refuse requests with allocator specific flags */
443 if (flags & ~SLAB_FLAGS_PERMITTED) {
444 err = -EINVAL;
445 goto out_unlock;
446 }
447
448 /*
449 * Some allocators will constraint the set of valid flags to a subset
450 * of all flags. We expect them to define CACHE_CREATE_MASK in this
451 * case, and we'll just provide them with a sanitized version of the
452 * passed flags.
453 */
454 flags &= CACHE_CREATE_MASK;
455
456 s = __kmem_cache_alias(name, size, align, flags, ctor);
457 if (s)
458 goto out_unlock;
459
460 cache_name = kstrdup_const(name, GFP_KERNEL);
461 if (!cache_name) {
462 err = -ENOMEM;
463 goto out_unlock;
464 }
465
466 s = create_cache(cache_name, size, size,
467 calculate_alignment(flags, align, size),
468 flags, ctor, NULL, NULL);
469 if (IS_ERR(s)) {
470 err = PTR_ERR(s);
471 kfree_const(cache_name);
472 }
473
474 out_unlock:
475 mutex_unlock(&slab_mutex);
476
477 memcg_put_cache_ids();
478 put_online_mems();
479 put_online_cpus();
480
481 if (err) {
482 if (flags & SLAB_PANIC)
483 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
484 name, err);
485 else {
486 pr_warn("kmem_cache_create(%s) failed with error %d\n",
487 name, err);
488 dump_stack();
489 }
490 return NULL;
491 }
492 return s;
493 }
494 EXPORT_SYMBOL(kmem_cache_create);
495
496 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
497 {
498 LIST_HEAD(to_destroy);
499 struct kmem_cache *s, *s2;
500
501 /*
502 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
503 * @slab_caches_to_rcu_destroy list. The slab pages are freed
504 * through RCU and and the associated kmem_cache are dereferenced
505 * while freeing the pages, so the kmem_caches should be freed only
506 * after the pending RCU operations are finished. As rcu_barrier()
507 * is a pretty slow operation, we batch all pending destructions
508 * asynchronously.
509 */
510 mutex_lock(&slab_mutex);
511 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
512 mutex_unlock(&slab_mutex);
513
514 if (list_empty(&to_destroy))
515 return;
516
517 rcu_barrier();
518
519 list_for_each_entry_safe(s, s2, &to_destroy, list) {
520 #ifdef SLAB_SUPPORTS_SYSFS
521 sysfs_slab_release(s);
522 #else
523 slab_kmem_cache_release(s);
524 #endif
525 }
526 }
527
528 static int shutdown_cache(struct kmem_cache *s)
529 {
530 /* free asan quarantined objects */
531 kasan_cache_shutdown(s);
532
533 if (__kmem_cache_shutdown(s) != 0)
534 return -EBUSY;
535
536 memcg_unlink_cache(s);
537 list_del(&s->list);
538
539 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
540 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
541 schedule_work(&slab_caches_to_rcu_destroy_work);
542 } else {
543 #ifdef SLAB_SUPPORTS_SYSFS
544 sysfs_slab_release(s);
545 #else
546 slab_kmem_cache_release(s);
547 #endif
548 }
549
550 return 0;
551 }
552
553 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
554 /*
555 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
556 * @memcg: The memory cgroup the new cache is for.
557 * @root_cache: The parent of the new cache.
558 *
559 * This function attempts to create a kmem cache that will serve allocation
560 * requests going from @memcg to @root_cache. The new cache inherits properties
561 * from its parent.
562 */
563 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
564 struct kmem_cache *root_cache)
565 {
566 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
567 struct cgroup_subsys_state *css = &memcg->css;
568 struct memcg_cache_array *arr;
569 struct kmem_cache *s = NULL;
570 char *cache_name;
571 int idx;
572
573 get_online_cpus();
574 get_online_mems();
575
576 mutex_lock(&slab_mutex);
577
578 /*
579 * The memory cgroup could have been offlined while the cache
580 * creation work was pending.
581 */
582 if (memcg->kmem_state != KMEM_ONLINE)
583 goto out_unlock;
584
585 idx = memcg_cache_id(memcg);
586 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
587 lockdep_is_held(&slab_mutex));
588
589 /*
590 * Since per-memcg caches are created asynchronously on first
591 * allocation (see memcg_kmem_get_cache()), several threads can try to
592 * create the same cache, but only one of them may succeed.
593 */
594 if (arr->entries[idx])
595 goto out_unlock;
596
597 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
598 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
599 css->serial_nr, memcg_name_buf);
600 if (!cache_name)
601 goto out_unlock;
602
603 s = create_cache(cache_name, root_cache->object_size,
604 root_cache->size, root_cache->align,
605 root_cache->flags & CACHE_CREATE_MASK,
606 root_cache->ctor, memcg, root_cache);
607 /*
608 * If we could not create a memcg cache, do not complain, because
609 * that's not critical at all as we can always proceed with the root
610 * cache.
611 */
612 if (IS_ERR(s)) {
613 kfree(cache_name);
614 goto out_unlock;
615 }
616
617 /*
618 * Since readers won't lock (see cache_from_memcg_idx()), we need a
619 * barrier here to ensure nobody will see the kmem_cache partially
620 * initialized.
621 */
622 smp_wmb();
623 arr->entries[idx] = s;
624
625 out_unlock:
626 mutex_unlock(&slab_mutex);
627
628 put_online_mems();
629 put_online_cpus();
630 }
631
632 static void kmemcg_deactivate_workfn(struct work_struct *work)
633 {
634 struct kmem_cache *s = container_of(work, struct kmem_cache,
635 memcg_params.deact_work);
636
637 get_online_cpus();
638 get_online_mems();
639
640 mutex_lock(&slab_mutex);
641
642 s->memcg_params.deact_fn(s);
643
644 mutex_unlock(&slab_mutex);
645
646 put_online_mems();
647 put_online_cpus();
648
649 /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */
650 css_put(&s->memcg_params.memcg->css);
651 }
652
653 static void kmemcg_deactivate_rcufn(struct rcu_head *head)
654 {
655 struct kmem_cache *s = container_of(head, struct kmem_cache,
656 memcg_params.deact_rcu_head);
657
658 /*
659 * We need to grab blocking locks. Bounce to ->deact_work. The
660 * work item shares the space with the RCU head and can't be
661 * initialized eariler.
662 */
663 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
664 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
665 }
666
667 /**
668 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
669 * sched RCU grace period
670 * @s: target kmem_cache
671 * @deact_fn: deactivation function to call
672 *
673 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex
674 * held after a sched RCU grace period. The slab is guaranteed to stay
675 * alive until @deact_fn is finished. This is to be used from
676 * __kmemcg_cache_deactivate().
677 */
678 void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
679 void (*deact_fn)(struct kmem_cache *))
680 {
681 if (WARN_ON_ONCE(is_root_cache(s)) ||
682 WARN_ON_ONCE(s->memcg_params.deact_fn))
683 return;
684
685 /* pin memcg so that @s doesn't get destroyed in the middle */
686 css_get(&s->memcg_params.memcg->css);
687
688 s->memcg_params.deact_fn = deact_fn;
689 call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
690 }
691
692 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
693 {
694 int idx;
695 struct memcg_cache_array *arr;
696 struct kmem_cache *s, *c;
697
698 idx = memcg_cache_id(memcg);
699
700 get_online_cpus();
701 get_online_mems();
702
703 mutex_lock(&slab_mutex);
704 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
705 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
706 lockdep_is_held(&slab_mutex));
707 c = arr->entries[idx];
708 if (!c)
709 continue;
710
711 __kmemcg_cache_deactivate(c);
712 arr->entries[idx] = NULL;
713 }
714 mutex_unlock(&slab_mutex);
715
716 put_online_mems();
717 put_online_cpus();
718 }
719
720 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
721 {
722 struct kmem_cache *s, *s2;
723
724 get_online_cpus();
725 get_online_mems();
726
727 mutex_lock(&slab_mutex);
728 list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
729 memcg_params.kmem_caches_node) {
730 /*
731 * The cgroup is about to be freed and therefore has no charges
732 * left. Hence, all its caches must be empty by now.
733 */
734 BUG_ON(shutdown_cache(s));
735 }
736 mutex_unlock(&slab_mutex);
737
738 put_online_mems();
739 put_online_cpus();
740 }
741
742 static int shutdown_memcg_caches(struct kmem_cache *s)
743 {
744 struct memcg_cache_array *arr;
745 struct kmem_cache *c, *c2;
746 LIST_HEAD(busy);
747 int i;
748
749 BUG_ON(!is_root_cache(s));
750
751 /*
752 * First, shutdown active caches, i.e. caches that belong to online
753 * memory cgroups.
754 */
755 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
756 lockdep_is_held(&slab_mutex));
757 for_each_memcg_cache_index(i) {
758 c = arr->entries[i];
759 if (!c)
760 continue;
761 if (shutdown_cache(c))
762 /*
763 * The cache still has objects. Move it to a temporary
764 * list so as not to try to destroy it for a second
765 * time while iterating over inactive caches below.
766 */
767 list_move(&c->memcg_params.children_node, &busy);
768 else
769 /*
770 * The cache is empty and will be destroyed soon. Clear
771 * the pointer to it in the memcg_caches array so that
772 * it will never be accessed even if the root cache
773 * stays alive.
774 */
775 arr->entries[i] = NULL;
776 }
777
778 /*
779 * Second, shutdown all caches left from memory cgroups that are now
780 * offline.
781 */
782 list_for_each_entry_safe(c, c2, &s->memcg_params.children,
783 memcg_params.children_node)
784 shutdown_cache(c);
785
786 list_splice(&busy, &s->memcg_params.children);
787
788 /*
789 * A cache being destroyed must be empty. In particular, this means
790 * that all per memcg caches attached to it must be empty too.
791 */
792 if (!list_empty(&s->memcg_params.children))
793 return -EBUSY;
794 return 0;
795 }
796 #else
797 static inline int shutdown_memcg_caches(struct kmem_cache *s)
798 {
799 return 0;
800 }
801 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
802
803 void slab_kmem_cache_release(struct kmem_cache *s)
804 {
805 __kmem_cache_release(s);
806 destroy_memcg_params(s);
807 kfree_const(s->name);
808 kmem_cache_free(kmem_cache, s);
809 }
810
811 void kmem_cache_destroy(struct kmem_cache *s)
812 {
813 int err;
814
815 if (unlikely(!s))
816 return;
817
818 get_online_cpus();
819 get_online_mems();
820
821 mutex_lock(&slab_mutex);
822
823 s->refcount--;
824 if (s->refcount)
825 goto out_unlock;
826
827 err = shutdown_memcg_caches(s);
828 if (!err)
829 err = shutdown_cache(s);
830
831 if (err) {
832 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
833 s->name);
834 dump_stack();
835 }
836 out_unlock:
837 mutex_unlock(&slab_mutex);
838
839 put_online_mems();
840 put_online_cpus();
841 }
842 EXPORT_SYMBOL(kmem_cache_destroy);
843
844 /**
845 * kmem_cache_shrink - Shrink a cache.
846 * @cachep: The cache to shrink.
847 *
848 * Releases as many slabs as possible for a cache.
849 * To help debugging, a zero exit status indicates all slabs were released.
850 */
851 int kmem_cache_shrink(struct kmem_cache *cachep)
852 {
853 int ret;
854
855 get_online_cpus();
856 get_online_mems();
857 kasan_cache_shrink(cachep);
858 ret = __kmem_cache_shrink(cachep);
859 put_online_mems();
860 put_online_cpus();
861 return ret;
862 }
863 EXPORT_SYMBOL(kmem_cache_shrink);
864
865 bool slab_is_available(void)
866 {
867 return slab_state >= UP;
868 }
869
870 #ifndef CONFIG_SLOB
871 /* Create a cache during boot when no slab services are available yet */
872 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
873 unsigned long flags)
874 {
875 int err;
876
877 s->name = name;
878 s->size = s->object_size = size;
879 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
880
881 slab_init_memcg_params(s);
882
883 err = __kmem_cache_create(s, flags);
884
885 if (err)
886 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
887 name, size, err);
888
889 s->refcount = -1; /* Exempt from merging for now */
890 }
891
892 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
893 unsigned long flags)
894 {
895 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
896
897 if (!s)
898 panic("Out of memory when creating slab %s\n", name);
899
900 create_boot_cache(s, name, size, flags);
901 list_add(&s->list, &slab_caches);
902 memcg_link_cache(s);
903 s->refcount = 1;
904 return s;
905 }
906
907 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
908 EXPORT_SYMBOL(kmalloc_caches);
909
910 #ifdef CONFIG_ZONE_DMA
911 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
912 EXPORT_SYMBOL(kmalloc_dma_caches);
913 #endif
914
915 /*
916 * Conversion table for small slabs sizes / 8 to the index in the
917 * kmalloc array. This is necessary for slabs < 192 since we have non power
918 * of two cache sizes there. The size of larger slabs can be determined using
919 * fls.
920 */
921 static s8 size_index[24] = {
922 3, /* 8 */
923 4, /* 16 */
924 5, /* 24 */
925 5, /* 32 */
926 6, /* 40 */
927 6, /* 48 */
928 6, /* 56 */
929 6, /* 64 */
930 1, /* 72 */
931 1, /* 80 */
932 1, /* 88 */
933 1, /* 96 */
934 7, /* 104 */
935 7, /* 112 */
936 7, /* 120 */
937 7, /* 128 */
938 2, /* 136 */
939 2, /* 144 */
940 2, /* 152 */
941 2, /* 160 */
942 2, /* 168 */
943 2, /* 176 */
944 2, /* 184 */
945 2 /* 192 */
946 };
947
948 static inline int size_index_elem(size_t bytes)
949 {
950 return (bytes - 1) / 8;
951 }
952
953 /*
954 * Find the kmem_cache structure that serves a given size of
955 * allocation
956 */
957 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
958 {
959 int index;
960
961 if (unlikely(size > KMALLOC_MAX_SIZE)) {
962 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
963 return NULL;
964 }
965
966 if (size <= 192) {
967 if (!size)
968 return ZERO_SIZE_PTR;
969
970 index = size_index[size_index_elem(size)];
971 } else
972 index = fls(size - 1);
973
974 #ifdef CONFIG_ZONE_DMA
975 if (unlikely((flags & GFP_DMA)))
976 return kmalloc_dma_caches[index];
977
978 #endif
979 return kmalloc_caches[index];
980 }
981
982 /*
983 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
984 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
985 * kmalloc-67108864.
986 */
987 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
988 {NULL, 0}, {"kmalloc-96", 96},
989 {"kmalloc-192", 192}, {"kmalloc-8", 8},
990 {"kmalloc-16", 16}, {"kmalloc-32", 32},
991 {"kmalloc-64", 64}, {"kmalloc-128", 128},
992 {"kmalloc-256", 256}, {"kmalloc-512", 512},
993 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
994 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
995 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
996 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
997 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
998 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
999 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
1000 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
1001 {"kmalloc-67108864", 67108864}
1002 };
1003
1004 /*
1005 * Patch up the size_index table if we have strange large alignment
1006 * requirements for the kmalloc array. This is only the case for
1007 * MIPS it seems. The standard arches will not generate any code here.
1008 *
1009 * Largest permitted alignment is 256 bytes due to the way we
1010 * handle the index determination for the smaller caches.
1011 *
1012 * Make sure that nothing crazy happens if someone starts tinkering
1013 * around with ARCH_KMALLOC_MINALIGN
1014 */
1015 void __init setup_kmalloc_cache_index_table(void)
1016 {
1017 int i;
1018
1019 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1020 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1021
1022 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1023 int elem = size_index_elem(i);
1024
1025 if (elem >= ARRAY_SIZE(size_index))
1026 break;
1027 size_index[elem] = KMALLOC_SHIFT_LOW;
1028 }
1029
1030 if (KMALLOC_MIN_SIZE >= 64) {
1031 /*
1032 * The 96 byte size cache is not used if the alignment
1033 * is 64 byte.
1034 */
1035 for (i = 64 + 8; i <= 96; i += 8)
1036 size_index[size_index_elem(i)] = 7;
1037
1038 }
1039
1040 if (KMALLOC_MIN_SIZE >= 128) {
1041 /*
1042 * The 192 byte sized cache is not used if the alignment
1043 * is 128 byte. Redirect kmalloc to use the 256 byte cache
1044 * instead.
1045 */
1046 for (i = 128 + 8; i <= 192; i += 8)
1047 size_index[size_index_elem(i)] = 8;
1048 }
1049 }
1050
1051 static void __init new_kmalloc_cache(int idx, unsigned long flags)
1052 {
1053 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
1054 kmalloc_info[idx].size, flags);
1055 }
1056
1057 /*
1058 * Create the kmalloc array. Some of the regular kmalloc arrays
1059 * may already have been created because they were needed to
1060 * enable allocations for slab creation.
1061 */
1062 void __init create_kmalloc_caches(unsigned long flags)
1063 {
1064 int i;
1065
1066 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1067 if (!kmalloc_caches[i])
1068 new_kmalloc_cache(i, flags);
1069
1070 /*
1071 * Caches that are not of the two-to-the-power-of size.
1072 * These have to be created immediately after the
1073 * earlier power of two caches
1074 */
1075 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
1076 new_kmalloc_cache(1, flags);
1077 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
1078 new_kmalloc_cache(2, flags);
1079 }
1080
1081 /* Kmalloc array is now usable */
1082 slab_state = UP;
1083
1084 #ifdef CONFIG_ZONE_DMA
1085 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1086 struct kmem_cache *s = kmalloc_caches[i];
1087
1088 if (s) {
1089 int size = kmalloc_size(i);
1090 char *n = kasprintf(GFP_NOWAIT,
1091 "dma-kmalloc-%d", size);
1092
1093 BUG_ON(!n);
1094 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
1095 size, SLAB_CACHE_DMA | flags);
1096 }
1097 }
1098 #endif
1099 }
1100 #endif /* !CONFIG_SLOB */
1101
1102 /*
1103 * To avoid unnecessary overhead, we pass through large allocation requests
1104 * directly to the page allocator. We use __GFP_COMP, because we will need to
1105 * know the allocation order to free the pages properly in kfree.
1106 */
1107 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1108 {
1109 void *ret;
1110 struct page *page;
1111
1112 flags |= __GFP_COMP;
1113 page = alloc_pages(flags, order);
1114 ret = page ? page_address(page) : NULL;
1115 kmemleak_alloc(ret, size, 1, flags);
1116 kasan_kmalloc_large(ret, size, flags);
1117 return ret;
1118 }
1119 EXPORT_SYMBOL(kmalloc_order);
1120
1121 #ifdef CONFIG_TRACING
1122 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1123 {
1124 void *ret = kmalloc_order(size, flags, order);
1125 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1126 return ret;
1127 }
1128 EXPORT_SYMBOL(kmalloc_order_trace);
1129 #endif
1130
1131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1132 /* Randomize a generic freelist */
1133 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1134 size_t count)
1135 {
1136 size_t i;
1137 unsigned int rand;
1138
1139 for (i = 0; i < count; i++)
1140 list[i] = i;
1141
1142 /* Fisher-Yates shuffle */
1143 for (i = count - 1; i > 0; i--) {
1144 rand = prandom_u32_state(state);
1145 rand %= (i + 1);
1146 swap(list[i], list[rand]);
1147 }
1148 }
1149
1150 /* Create a random sequence per cache */
1151 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1152 gfp_t gfp)
1153 {
1154 struct rnd_state state;
1155
1156 if (count < 2 || cachep->random_seq)
1157 return 0;
1158
1159 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1160 if (!cachep->random_seq)
1161 return -ENOMEM;
1162
1163 /* Get best entropy at this stage of boot */
1164 prandom_seed_state(&state, get_random_long());
1165
1166 freelist_randomize(&state, cachep->random_seq, count);
1167 return 0;
1168 }
1169
1170 /* Destroy the per-cache random freelist sequence */
1171 void cache_random_seq_destroy(struct kmem_cache *cachep)
1172 {
1173 kfree(cachep->random_seq);
1174 cachep->random_seq = NULL;
1175 }
1176 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1177
1178 #ifdef CONFIG_SLABINFO
1179
1180 #ifdef CONFIG_SLAB
1181 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1182 #else
1183 #define SLABINFO_RIGHTS S_IRUSR
1184 #endif
1185
1186 static void print_slabinfo_header(struct seq_file *m)
1187 {
1188 /*
1189 * Output format version, so at least we can change it
1190 * without _too_ many complaints.
1191 */
1192 #ifdef CONFIG_DEBUG_SLAB
1193 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1194 #else
1195 seq_puts(m, "slabinfo - version: 2.1\n");
1196 #endif
1197 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1198 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1199 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1200 #ifdef CONFIG_DEBUG_SLAB
1201 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1202 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1203 #endif
1204 seq_putc(m, '\n');
1205 }
1206
1207 void *slab_start(struct seq_file *m, loff_t *pos)
1208 {
1209 mutex_lock(&slab_mutex);
1210 return seq_list_start(&slab_root_caches, *pos);
1211 }
1212
1213 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1214 {
1215 return seq_list_next(p, &slab_root_caches, pos);
1216 }
1217
1218 void slab_stop(struct seq_file *m, void *p)
1219 {
1220 mutex_unlock(&slab_mutex);
1221 }
1222
1223 static void
1224 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1225 {
1226 struct kmem_cache *c;
1227 struct slabinfo sinfo;
1228
1229 if (!is_root_cache(s))
1230 return;
1231
1232 for_each_memcg_cache(c, s) {
1233 memset(&sinfo, 0, sizeof(sinfo));
1234 get_slabinfo(c, &sinfo);
1235
1236 info->active_slabs += sinfo.active_slabs;
1237 info->num_slabs += sinfo.num_slabs;
1238 info->shared_avail += sinfo.shared_avail;
1239 info->active_objs += sinfo.active_objs;
1240 info->num_objs += sinfo.num_objs;
1241 }
1242 }
1243
1244 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1245 {
1246 struct slabinfo sinfo;
1247
1248 memset(&sinfo, 0, sizeof(sinfo));
1249 get_slabinfo(s, &sinfo);
1250
1251 memcg_accumulate_slabinfo(s, &sinfo);
1252
1253 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1254 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1255 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1256
1257 seq_printf(m, " : tunables %4u %4u %4u",
1258 sinfo.limit, sinfo.batchcount, sinfo.shared);
1259 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1260 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1261 slabinfo_show_stats(m, s);
1262 seq_putc(m, '\n');
1263 }
1264
1265 static int slab_show(struct seq_file *m, void *p)
1266 {
1267 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
1268
1269 if (p == slab_root_caches.next)
1270 print_slabinfo_header(m);
1271 cache_show(s, m);
1272 return 0;
1273 }
1274
1275 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1276 void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1277 {
1278 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1279
1280 mutex_lock(&slab_mutex);
1281 return seq_list_start(&memcg->kmem_caches, *pos);
1282 }
1283
1284 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1285 {
1286 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1287
1288 return seq_list_next(p, &memcg->kmem_caches, pos);
1289 }
1290
1291 void memcg_slab_stop(struct seq_file *m, void *p)
1292 {
1293 mutex_unlock(&slab_mutex);
1294 }
1295
1296 int memcg_slab_show(struct seq_file *m, void *p)
1297 {
1298 struct kmem_cache *s = list_entry(p, struct kmem_cache,
1299 memcg_params.kmem_caches_node);
1300 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1301
1302 if (p == memcg->kmem_caches.next)
1303 print_slabinfo_header(m);
1304 cache_show(s, m);
1305 return 0;
1306 }
1307 #endif
1308
1309 /*
1310 * slabinfo_op - iterator that generates /proc/slabinfo
1311 *
1312 * Output layout:
1313 * cache-name
1314 * num-active-objs
1315 * total-objs
1316 * object size
1317 * num-active-slabs
1318 * total-slabs
1319 * num-pages-per-slab
1320 * + further values on SMP and with statistics enabled
1321 */
1322 static const struct seq_operations slabinfo_op = {
1323 .start = slab_start,
1324 .next = slab_next,
1325 .stop = slab_stop,
1326 .show = slab_show,
1327 };
1328
1329 static int slabinfo_open(struct inode *inode, struct file *file)
1330 {
1331 return seq_open(file, &slabinfo_op);
1332 }
1333
1334 static const struct file_operations proc_slabinfo_operations = {
1335 .open = slabinfo_open,
1336 .read = seq_read,
1337 .write = slabinfo_write,
1338 .llseek = seq_lseek,
1339 .release = seq_release,
1340 };
1341
1342 static int __init slab_proc_init(void)
1343 {
1344 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1345 &proc_slabinfo_operations);
1346 return 0;
1347 }
1348 module_init(slab_proc_init);
1349 #endif /* CONFIG_SLABINFO */
1350
1351 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1352 gfp_t flags)
1353 {
1354 void *ret;
1355 size_t ks = 0;
1356
1357 if (p)
1358 ks = ksize(p);
1359
1360 if (ks >= new_size) {
1361 kasan_krealloc((void *)p, new_size, flags);
1362 return (void *)p;
1363 }
1364
1365 ret = kmalloc_track_caller(new_size, flags);
1366 if (ret && p)
1367 memcpy(ret, p, ks);
1368
1369 return ret;
1370 }
1371
1372 /**
1373 * __krealloc - like krealloc() but don't free @p.
1374 * @p: object to reallocate memory for.
1375 * @new_size: how many bytes of memory are required.
1376 * @flags: the type of memory to allocate.
1377 *
1378 * This function is like krealloc() except it never frees the originally
1379 * allocated buffer. Use this if you don't want to free the buffer immediately
1380 * like, for example, with RCU.
1381 */
1382 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1383 {
1384 if (unlikely(!new_size))
1385 return ZERO_SIZE_PTR;
1386
1387 return __do_krealloc(p, new_size, flags);
1388
1389 }
1390 EXPORT_SYMBOL(__krealloc);
1391
1392 /**
1393 * krealloc - reallocate memory. The contents will remain unchanged.
1394 * @p: object to reallocate memory for.
1395 * @new_size: how many bytes of memory are required.
1396 * @flags: the type of memory to allocate.
1397 *
1398 * The contents of the object pointed to are preserved up to the
1399 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1400 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1401 * %NULL pointer, the object pointed to is freed.
1402 */
1403 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1404 {
1405 void *ret;
1406
1407 if (unlikely(!new_size)) {
1408 kfree(p);
1409 return ZERO_SIZE_PTR;
1410 }
1411
1412 ret = __do_krealloc(p, new_size, flags);
1413 if (ret && p != ret)
1414 kfree(p);
1415
1416 return ret;
1417 }
1418 EXPORT_SYMBOL(krealloc);
1419
1420 /**
1421 * kzfree - like kfree but zero memory
1422 * @p: object to free memory of
1423 *
1424 * The memory of the object @p points to is zeroed before freed.
1425 * If @p is %NULL, kzfree() does nothing.
1426 *
1427 * Note: this function zeroes the whole allocated buffer which can be a good
1428 * deal bigger than the requested buffer size passed to kmalloc(). So be
1429 * careful when using this function in performance sensitive code.
1430 */
1431 void kzfree(const void *p)
1432 {
1433 size_t ks;
1434 void *mem = (void *)p;
1435
1436 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1437 return;
1438 ks = ksize(mem);
1439 memset(mem, 0, ks);
1440 kfree(mem);
1441 }
1442 EXPORT_SYMBOL(kzfree);
1443
1444 /* Tracepoints definitions. */
1445 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1446 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1447 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1448 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1449 EXPORT_TRACEPOINT_SYMBOL(kfree);
1450 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);