]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/slub.c
kasan, mm: fix crash with HW_TAGS and DEBUG_PAGEALLOC
[mirror_ubuntu-hirsute-kernel.git] / mm / slub.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
11 */
12
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* struct reclaim_state */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/bitops.h>
19 #include <linux/slab.h>
20 #include "slab.h"
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kasan.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/mempolicy.h>
27 #include <linux/ctype.h>
28 #include <linux/debugobjects.h>
29 #include <linux/kallsyms.h>
30 #include <linux/memory.h>
31 #include <linux/math64.h>
32 #include <linux/fault-inject.h>
33 #include <linux/stacktrace.h>
34 #include <linux/prefetch.h>
35 #include <linux/memcontrol.h>
36 #include <linux/random.h>
37
38 #include <trace/events/kmem.h>
39
40 #include "internal.h"
41
42 /*
43 * Lock order:
44 * 1. slab_mutex (Global Mutex)
45 * 2. node->list_lock
46 * 3. slab_lock(page) (Only on some arches and for debugging)
47 *
48 * slab_mutex
49 *
50 * The role of the slab_mutex is to protect the list of all the slabs
51 * and to synchronize major metadata changes to slab cache structures.
52 *
53 * The slab_lock is only used for debugging and on arches that do not
54 * have the ability to do a cmpxchg_double. It only protects:
55 * A. page->freelist -> List of object free in a page
56 * B. page->inuse -> Number of objects in use
57 * C. page->objects -> Number of objects in page
58 * D. page->frozen -> frozen state
59 *
60 * If a slab is frozen then it is exempt from list management. It is not
61 * on any list except per cpu partial list. The processor that froze the
62 * slab is the one who can perform list operations on the page. Other
63 * processors may put objects onto the freelist but the processor that
64 * froze the slab is the only one that can retrieve the objects from the
65 * page's freelist.
66 *
67 * The list_lock protects the partial and full list on each node and
68 * the partial slab counter. If taken then no new slabs may be added or
69 * removed from the lists nor make the number of partial slabs be modified.
70 * (Note that the total number of slabs is an atomic value that may be
71 * modified without taking the list lock).
72 *
73 * The list_lock is a centralized lock and thus we avoid taking it as
74 * much as possible. As long as SLUB does not have to handle partial
75 * slabs, operations can continue without any centralized lock. F.e.
76 * allocating a long series of objects that fill up slabs does not require
77 * the list lock.
78 * Interrupts are disabled during allocation and deallocation in order to
79 * make the slab allocator safe to use in the context of an irq. In addition
80 * interrupts are disabled to ensure that the processor does not change
81 * while handling per_cpu slabs, due to kernel preemption.
82 *
83 * SLUB assigns one slab for allocation to each processor.
84 * Allocations only occur from these slabs called cpu slabs.
85 *
86 * Slabs with free elements are kept on a partial list and during regular
87 * operations no list for full slabs is used. If an object in a full slab is
88 * freed then the slab will show up again on the partial lists.
89 * We track full slabs for debugging purposes though because otherwise we
90 * cannot scan all objects.
91 *
92 * Slabs are freed when they become empty. Teardown and setup is
93 * minimal so we rely on the page allocators per cpu caches for
94 * fast frees and allocs.
95 *
96 * page->frozen The slab is frozen and exempt from list processing.
97 * This means that the slab is dedicated to a purpose
98 * such as satisfying allocations for a specific
99 * processor. Objects may be freed in the slab while
100 * it is frozen but slab_free will then skip the usual
101 * list operations. It is up to the processor holding
102 * the slab to integrate the slab into the slab lists
103 * when the slab is no longer needed.
104 *
105 * One use of this flag is to mark slabs that are
106 * used for allocations. Then such a slab becomes a cpu
107 * slab. The cpu slab may be equipped with an additional
108 * freelist that allows lockless access to
109 * free objects in addition to the regular freelist
110 * that requires the slab lock.
111 *
112 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
113 * options set. This moves slab handling out of
114 * the fast path and disables lockless freelists.
115 */
116
117 #ifdef CONFIG_SLUB_DEBUG
118 #ifdef CONFIG_SLUB_DEBUG_ON
119 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
120 #else
121 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
122 #endif
123 #endif
124
125 static inline bool kmem_cache_debug(struct kmem_cache *s)
126 {
127 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
128 }
129
130 void *fixup_red_left(struct kmem_cache *s, void *p)
131 {
132 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
133 p += s->red_left_pad;
134
135 return p;
136 }
137
138 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
139 {
140 #ifdef CONFIG_SLUB_CPU_PARTIAL
141 return !kmem_cache_debug(s);
142 #else
143 return false;
144 #endif
145 }
146
147 /*
148 * Issues still to be resolved:
149 *
150 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
151 *
152 * - Variable sizing of the per node arrays
153 */
154
155 /* Enable to test recovery from slab corruption on boot */
156 #undef SLUB_RESILIENCY_TEST
157
158 /* Enable to log cmpxchg failures */
159 #undef SLUB_DEBUG_CMPXCHG
160
161 /*
162 * Mininum number of partial slabs. These will be left on the partial
163 * lists even if they are empty. kmem_cache_shrink may reclaim them.
164 */
165 #define MIN_PARTIAL 5
166
167 /*
168 * Maximum number of desirable partial slabs.
169 * The existence of more partial slabs makes kmem_cache_shrink
170 * sort the partial list by the number of objects in use.
171 */
172 #define MAX_PARTIAL 10
173
174 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
175 SLAB_POISON | SLAB_STORE_USER)
176
177 /*
178 * These debug flags cannot use CMPXCHG because there might be consistency
179 * issues when checking or reading debug information
180 */
181 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
182 SLAB_TRACE)
183
184
185 /*
186 * Debugging flags that require metadata to be stored in the slab. These get
187 * disabled when slub_debug=O is used and a cache's min order increases with
188 * metadata.
189 */
190 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
191
192 #define OO_SHIFT 16
193 #define OO_MASK ((1 << OO_SHIFT) - 1)
194 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
195
196 /* Internal SLUB flags */
197 /* Poison object */
198 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
199 /* Use cmpxchg_double */
200 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
201
202 /*
203 * Tracking user of a slab.
204 */
205 #define TRACK_ADDRS_COUNT 16
206 struct track {
207 unsigned long addr; /* Called from address */
208 #ifdef CONFIG_STACKTRACE
209 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
210 #endif
211 int cpu; /* Was running on cpu */
212 int pid; /* Pid context */
213 unsigned long when; /* When did the operation occur */
214 };
215
216 enum track_item { TRACK_ALLOC, TRACK_FREE };
217
218 #ifdef CONFIG_SYSFS
219 static int sysfs_slab_add(struct kmem_cache *);
220 static int sysfs_slab_alias(struct kmem_cache *, const char *);
221 #else
222 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
223 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
224 { return 0; }
225 #endif
226
227 static inline void stat(const struct kmem_cache *s, enum stat_item si)
228 {
229 #ifdef CONFIG_SLUB_STATS
230 /*
231 * The rmw is racy on a preemptible kernel but this is acceptable, so
232 * avoid this_cpu_add()'s irq-disable overhead.
233 */
234 raw_cpu_inc(s->cpu_slab->stat[si]);
235 #endif
236 }
237
238 /********************************************************************
239 * Core slab cache functions
240 *******************************************************************/
241
242 /*
243 * Returns freelist pointer (ptr). With hardening, this is obfuscated
244 * with an XOR of the address where the pointer is held and a per-cache
245 * random number.
246 */
247 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
248 unsigned long ptr_addr)
249 {
250 #ifdef CONFIG_SLAB_FREELIST_HARDENED
251 /*
252 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
253 * Normally, this doesn't cause any issues, as both set_freepointer()
254 * and get_freepointer() are called with a pointer with the same tag.
255 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
256 * example, when __free_slub() iterates over objects in a cache, it
257 * passes untagged pointers to check_object(). check_object() in turns
258 * calls get_freepointer() with an untagged pointer, which causes the
259 * freepointer to be restored incorrectly.
260 */
261 return (void *)((unsigned long)ptr ^ s->random ^
262 swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
263 #else
264 return ptr;
265 #endif
266 }
267
268 /* Returns the freelist pointer recorded at location ptr_addr. */
269 static inline void *freelist_dereference(const struct kmem_cache *s,
270 void *ptr_addr)
271 {
272 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
273 (unsigned long)ptr_addr);
274 }
275
276 static inline void *get_freepointer(struct kmem_cache *s, void *object)
277 {
278 object = kasan_reset_tag(object);
279 return freelist_dereference(s, object + s->offset);
280 }
281
282 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
283 {
284 prefetch(object + s->offset);
285 }
286
287 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
288 {
289 unsigned long freepointer_addr;
290 void *p;
291
292 if (!debug_pagealloc_enabled_static())
293 return get_freepointer(s, object);
294
295 freepointer_addr = (unsigned long)object + s->offset;
296 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
297 return freelist_ptr(s, p, freepointer_addr);
298 }
299
300 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
301 {
302 unsigned long freeptr_addr = (unsigned long)object + s->offset;
303
304 #ifdef CONFIG_SLAB_FREELIST_HARDENED
305 BUG_ON(object == fp); /* naive detection of double free or corruption */
306 #endif
307
308 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
309 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
310 }
311
312 /* Loop over all objects in a slab */
313 #define for_each_object(__p, __s, __addr, __objects) \
314 for (__p = fixup_red_left(__s, __addr); \
315 __p < (__addr) + (__objects) * (__s)->size; \
316 __p += (__s)->size)
317
318 static inline unsigned int order_objects(unsigned int order, unsigned int size)
319 {
320 return ((unsigned int)PAGE_SIZE << order) / size;
321 }
322
323 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
324 unsigned int size)
325 {
326 struct kmem_cache_order_objects x = {
327 (order << OO_SHIFT) + order_objects(order, size)
328 };
329
330 return x;
331 }
332
333 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
334 {
335 return x.x >> OO_SHIFT;
336 }
337
338 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
339 {
340 return x.x & OO_MASK;
341 }
342
343 /*
344 * Per slab locking using the pagelock
345 */
346 static __always_inline void slab_lock(struct page *page)
347 {
348 VM_BUG_ON_PAGE(PageTail(page), page);
349 bit_spin_lock(PG_locked, &page->flags);
350 }
351
352 static __always_inline void slab_unlock(struct page *page)
353 {
354 VM_BUG_ON_PAGE(PageTail(page), page);
355 __bit_spin_unlock(PG_locked, &page->flags);
356 }
357
358 /* Interrupts must be disabled (for the fallback code to work right) */
359 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
360 void *freelist_old, unsigned long counters_old,
361 void *freelist_new, unsigned long counters_new,
362 const char *n)
363 {
364 VM_BUG_ON(!irqs_disabled());
365 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
366 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
367 if (s->flags & __CMPXCHG_DOUBLE) {
368 if (cmpxchg_double(&page->freelist, &page->counters,
369 freelist_old, counters_old,
370 freelist_new, counters_new))
371 return true;
372 } else
373 #endif
374 {
375 slab_lock(page);
376 if (page->freelist == freelist_old &&
377 page->counters == counters_old) {
378 page->freelist = freelist_new;
379 page->counters = counters_new;
380 slab_unlock(page);
381 return true;
382 }
383 slab_unlock(page);
384 }
385
386 cpu_relax();
387 stat(s, CMPXCHG_DOUBLE_FAIL);
388
389 #ifdef SLUB_DEBUG_CMPXCHG
390 pr_info("%s %s: cmpxchg double redo ", n, s->name);
391 #endif
392
393 return false;
394 }
395
396 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
397 void *freelist_old, unsigned long counters_old,
398 void *freelist_new, unsigned long counters_new,
399 const char *n)
400 {
401 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
402 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
403 if (s->flags & __CMPXCHG_DOUBLE) {
404 if (cmpxchg_double(&page->freelist, &page->counters,
405 freelist_old, counters_old,
406 freelist_new, counters_new))
407 return true;
408 } else
409 #endif
410 {
411 unsigned long flags;
412
413 local_irq_save(flags);
414 slab_lock(page);
415 if (page->freelist == freelist_old &&
416 page->counters == counters_old) {
417 page->freelist = freelist_new;
418 page->counters = counters_new;
419 slab_unlock(page);
420 local_irq_restore(flags);
421 return true;
422 }
423 slab_unlock(page);
424 local_irq_restore(flags);
425 }
426
427 cpu_relax();
428 stat(s, CMPXCHG_DOUBLE_FAIL);
429
430 #ifdef SLUB_DEBUG_CMPXCHG
431 pr_info("%s %s: cmpxchg double redo ", n, s->name);
432 #endif
433
434 return false;
435 }
436
437 #ifdef CONFIG_SLUB_DEBUG
438 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
439 static DEFINE_SPINLOCK(object_map_lock);
440
441 /*
442 * Determine a map of object in use on a page.
443 *
444 * Node listlock must be held to guarantee that the page does
445 * not vanish from under us.
446 */
447 static unsigned long *get_map(struct kmem_cache *s, struct page *page)
448 __acquires(&object_map_lock)
449 {
450 void *p;
451 void *addr = page_address(page);
452
453 VM_BUG_ON(!irqs_disabled());
454
455 spin_lock(&object_map_lock);
456
457 bitmap_zero(object_map, page->objects);
458
459 for (p = page->freelist; p; p = get_freepointer(s, p))
460 set_bit(__obj_to_index(s, addr, p), object_map);
461
462 return object_map;
463 }
464
465 static void put_map(unsigned long *map) __releases(&object_map_lock)
466 {
467 VM_BUG_ON(map != object_map);
468 spin_unlock(&object_map_lock);
469 }
470
471 static inline unsigned int size_from_object(struct kmem_cache *s)
472 {
473 if (s->flags & SLAB_RED_ZONE)
474 return s->size - s->red_left_pad;
475
476 return s->size;
477 }
478
479 static inline void *restore_red_left(struct kmem_cache *s, void *p)
480 {
481 if (s->flags & SLAB_RED_ZONE)
482 p -= s->red_left_pad;
483
484 return p;
485 }
486
487 /*
488 * Debug settings:
489 */
490 #if defined(CONFIG_SLUB_DEBUG_ON)
491 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
492 #else
493 static slab_flags_t slub_debug;
494 #endif
495
496 static char *slub_debug_string;
497 static int disable_higher_order_debug;
498
499 /*
500 * slub is about to manipulate internal object metadata. This memory lies
501 * outside the range of the allocated object, so accessing it would normally
502 * be reported by kasan as a bounds error. metadata_access_enable() is used
503 * to tell kasan that these accesses are OK.
504 */
505 static inline void metadata_access_enable(void)
506 {
507 kasan_disable_current();
508 }
509
510 static inline void metadata_access_disable(void)
511 {
512 kasan_enable_current();
513 }
514
515 /*
516 * Object debugging
517 */
518
519 /* Verify that a pointer has an address that is valid within a slab page */
520 static inline int check_valid_pointer(struct kmem_cache *s,
521 struct page *page, void *object)
522 {
523 void *base;
524
525 if (!object)
526 return 1;
527
528 base = page_address(page);
529 object = kasan_reset_tag(object);
530 object = restore_red_left(s, object);
531 if (object < base || object >= base + page->objects * s->size ||
532 (object - base) % s->size) {
533 return 0;
534 }
535
536 return 1;
537 }
538
539 static void print_section(char *level, char *text, u8 *addr,
540 unsigned int length)
541 {
542 metadata_access_enable();
543 print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
544 16, 1, addr, length, 1);
545 metadata_access_disable();
546 }
547
548 /*
549 * See comment in calculate_sizes().
550 */
551 static inline bool freeptr_outside_object(struct kmem_cache *s)
552 {
553 return s->offset >= s->inuse;
554 }
555
556 /*
557 * Return offset of the end of info block which is inuse + free pointer if
558 * not overlapping with object.
559 */
560 static inline unsigned int get_info_end(struct kmem_cache *s)
561 {
562 if (freeptr_outside_object(s))
563 return s->inuse + sizeof(void *);
564 else
565 return s->inuse;
566 }
567
568 static struct track *get_track(struct kmem_cache *s, void *object,
569 enum track_item alloc)
570 {
571 struct track *p;
572
573 p = object + get_info_end(s);
574
575 return kasan_reset_tag(p + alloc);
576 }
577
578 static void set_track(struct kmem_cache *s, void *object,
579 enum track_item alloc, unsigned long addr)
580 {
581 struct track *p = get_track(s, object, alloc);
582
583 if (addr) {
584 #ifdef CONFIG_STACKTRACE
585 unsigned int nr_entries;
586
587 metadata_access_enable();
588 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
589 TRACK_ADDRS_COUNT, 3);
590 metadata_access_disable();
591
592 if (nr_entries < TRACK_ADDRS_COUNT)
593 p->addrs[nr_entries] = 0;
594 #endif
595 p->addr = addr;
596 p->cpu = smp_processor_id();
597 p->pid = current->pid;
598 p->when = jiffies;
599 } else {
600 memset(p, 0, sizeof(struct track));
601 }
602 }
603
604 static void init_tracking(struct kmem_cache *s, void *object)
605 {
606 if (!(s->flags & SLAB_STORE_USER))
607 return;
608
609 set_track(s, object, TRACK_FREE, 0UL);
610 set_track(s, object, TRACK_ALLOC, 0UL);
611 }
612
613 static void print_track(const char *s, struct track *t, unsigned long pr_time)
614 {
615 if (!t->addr)
616 return;
617
618 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
619 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
620 #ifdef CONFIG_STACKTRACE
621 {
622 int i;
623 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
624 if (t->addrs[i])
625 pr_err("\t%pS\n", (void *)t->addrs[i]);
626 else
627 break;
628 }
629 #endif
630 }
631
632 void print_tracking(struct kmem_cache *s, void *object)
633 {
634 unsigned long pr_time = jiffies;
635 if (!(s->flags & SLAB_STORE_USER))
636 return;
637
638 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
639 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
640 }
641
642 static void print_page_info(struct page *page)
643 {
644 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
645 page, page->objects, page->inuse, page->freelist, page->flags);
646
647 }
648
649 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
650 {
651 struct va_format vaf;
652 va_list args;
653
654 va_start(args, fmt);
655 vaf.fmt = fmt;
656 vaf.va = &args;
657 pr_err("=============================================================================\n");
658 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
659 pr_err("-----------------------------------------------------------------------------\n\n");
660
661 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
662 va_end(args);
663 }
664
665 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
666 {
667 struct va_format vaf;
668 va_list args;
669
670 va_start(args, fmt);
671 vaf.fmt = fmt;
672 vaf.va = &args;
673 pr_err("FIX %s: %pV\n", s->name, &vaf);
674 va_end(args);
675 }
676
677 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
678 void **freelist, void *nextfree)
679 {
680 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
681 !check_valid_pointer(s, page, nextfree) && freelist) {
682 object_err(s, page, *freelist, "Freechain corrupt");
683 *freelist = NULL;
684 slab_fix(s, "Isolate corrupted freechain");
685 return true;
686 }
687
688 return false;
689 }
690
691 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
692 {
693 unsigned int off; /* Offset of last byte */
694 u8 *addr = page_address(page);
695
696 print_tracking(s, p);
697
698 print_page_info(page);
699
700 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
701 p, p - addr, get_freepointer(s, p));
702
703 if (s->flags & SLAB_RED_ZONE)
704 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
705 s->red_left_pad);
706 else if (p > addr + 16)
707 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
708
709 print_section(KERN_ERR, "Object ", p,
710 min_t(unsigned int, s->object_size, PAGE_SIZE));
711 if (s->flags & SLAB_RED_ZONE)
712 print_section(KERN_ERR, "Redzone ", p + s->object_size,
713 s->inuse - s->object_size);
714
715 off = get_info_end(s);
716
717 if (s->flags & SLAB_STORE_USER)
718 off += 2 * sizeof(struct track);
719
720 off += kasan_metadata_size(s);
721
722 if (off != size_from_object(s))
723 /* Beginning of the filler is the free pointer */
724 print_section(KERN_ERR, "Padding ", p + off,
725 size_from_object(s) - off);
726
727 dump_stack();
728 }
729
730 void object_err(struct kmem_cache *s, struct page *page,
731 u8 *object, char *reason)
732 {
733 slab_bug(s, "%s", reason);
734 print_trailer(s, page, object);
735 }
736
737 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
738 const char *fmt, ...)
739 {
740 va_list args;
741 char buf[100];
742
743 va_start(args, fmt);
744 vsnprintf(buf, sizeof(buf), fmt, args);
745 va_end(args);
746 slab_bug(s, "%s", buf);
747 print_page_info(page);
748 dump_stack();
749 }
750
751 static void init_object(struct kmem_cache *s, void *object, u8 val)
752 {
753 u8 *p = kasan_reset_tag(object);
754
755 if (s->flags & SLAB_RED_ZONE)
756 memset(p - s->red_left_pad, val, s->red_left_pad);
757
758 if (s->flags & __OBJECT_POISON) {
759 memset(p, POISON_FREE, s->object_size - 1);
760 p[s->object_size - 1] = POISON_END;
761 }
762
763 if (s->flags & SLAB_RED_ZONE)
764 memset(p + s->object_size, val, s->inuse - s->object_size);
765 }
766
767 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
768 void *from, void *to)
769 {
770 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
771 memset(from, data, to - from);
772 }
773
774 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
775 u8 *object, char *what,
776 u8 *start, unsigned int value, unsigned int bytes)
777 {
778 u8 *fault;
779 u8 *end;
780 u8 *addr = page_address(page);
781
782 metadata_access_enable();
783 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
784 metadata_access_disable();
785 if (!fault)
786 return 1;
787
788 end = start + bytes;
789 while (end > fault && end[-1] == value)
790 end--;
791
792 slab_bug(s, "%s overwritten", what);
793 pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
794 fault, end - 1, fault - addr,
795 fault[0], value);
796 print_trailer(s, page, object);
797
798 restore_bytes(s, what, value, fault, end);
799 return 0;
800 }
801
802 /*
803 * Object layout:
804 *
805 * object address
806 * Bytes of the object to be managed.
807 * If the freepointer may overlay the object then the free
808 * pointer is at the middle of the object.
809 *
810 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
811 * 0xa5 (POISON_END)
812 *
813 * object + s->object_size
814 * Padding to reach word boundary. This is also used for Redzoning.
815 * Padding is extended by another word if Redzoning is enabled and
816 * object_size == inuse.
817 *
818 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
819 * 0xcc (RED_ACTIVE) for objects in use.
820 *
821 * object + s->inuse
822 * Meta data starts here.
823 *
824 * A. Free pointer (if we cannot overwrite object on free)
825 * B. Tracking data for SLAB_STORE_USER
826 * C. Padding to reach required alignment boundary or at mininum
827 * one word if debugging is on to be able to detect writes
828 * before the word boundary.
829 *
830 * Padding is done using 0x5a (POISON_INUSE)
831 *
832 * object + s->size
833 * Nothing is used beyond s->size.
834 *
835 * If slabcaches are merged then the object_size and inuse boundaries are mostly
836 * ignored. And therefore no slab options that rely on these boundaries
837 * may be used with merged slabcaches.
838 */
839
840 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
841 {
842 unsigned long off = get_info_end(s); /* The end of info */
843
844 if (s->flags & SLAB_STORE_USER)
845 /* We also have user information there */
846 off += 2 * sizeof(struct track);
847
848 off += kasan_metadata_size(s);
849
850 if (size_from_object(s) == off)
851 return 1;
852
853 return check_bytes_and_report(s, page, p, "Object padding",
854 p + off, POISON_INUSE, size_from_object(s) - off);
855 }
856
857 /* Check the pad bytes at the end of a slab page */
858 static int slab_pad_check(struct kmem_cache *s, struct page *page)
859 {
860 u8 *start;
861 u8 *fault;
862 u8 *end;
863 u8 *pad;
864 int length;
865 int remainder;
866
867 if (!(s->flags & SLAB_POISON))
868 return 1;
869
870 start = page_address(page);
871 length = page_size(page);
872 end = start + length;
873 remainder = length % s->size;
874 if (!remainder)
875 return 1;
876
877 pad = end - remainder;
878 metadata_access_enable();
879 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
880 metadata_access_disable();
881 if (!fault)
882 return 1;
883 while (end > fault && end[-1] == POISON_INUSE)
884 end--;
885
886 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
887 fault, end - 1, fault - start);
888 print_section(KERN_ERR, "Padding ", pad, remainder);
889
890 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
891 return 0;
892 }
893
894 static int check_object(struct kmem_cache *s, struct page *page,
895 void *object, u8 val)
896 {
897 u8 *p = object;
898 u8 *endobject = object + s->object_size;
899
900 if (s->flags & SLAB_RED_ZONE) {
901 if (!check_bytes_and_report(s, page, object, "Redzone",
902 object - s->red_left_pad, val, s->red_left_pad))
903 return 0;
904
905 if (!check_bytes_and_report(s, page, object, "Redzone",
906 endobject, val, s->inuse - s->object_size))
907 return 0;
908 } else {
909 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
910 check_bytes_and_report(s, page, p, "Alignment padding",
911 endobject, POISON_INUSE,
912 s->inuse - s->object_size);
913 }
914 }
915
916 if (s->flags & SLAB_POISON) {
917 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
918 (!check_bytes_and_report(s, page, p, "Poison", p,
919 POISON_FREE, s->object_size - 1) ||
920 !check_bytes_and_report(s, page, p, "Poison",
921 p + s->object_size - 1, POISON_END, 1)))
922 return 0;
923 /*
924 * check_pad_bytes cleans up on its own.
925 */
926 check_pad_bytes(s, page, p);
927 }
928
929 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
930 /*
931 * Object and freepointer overlap. Cannot check
932 * freepointer while object is allocated.
933 */
934 return 1;
935
936 /* Check free pointer validity */
937 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
938 object_err(s, page, p, "Freepointer corrupt");
939 /*
940 * No choice but to zap it and thus lose the remainder
941 * of the free objects in this slab. May cause
942 * another error because the object count is now wrong.
943 */
944 set_freepointer(s, p, NULL);
945 return 0;
946 }
947 return 1;
948 }
949
950 static int check_slab(struct kmem_cache *s, struct page *page)
951 {
952 int maxobj;
953
954 VM_BUG_ON(!irqs_disabled());
955
956 if (!PageSlab(page)) {
957 slab_err(s, page, "Not a valid slab page");
958 return 0;
959 }
960
961 maxobj = order_objects(compound_order(page), s->size);
962 if (page->objects > maxobj) {
963 slab_err(s, page, "objects %u > max %u",
964 page->objects, maxobj);
965 return 0;
966 }
967 if (page->inuse > page->objects) {
968 slab_err(s, page, "inuse %u > max %u",
969 page->inuse, page->objects);
970 return 0;
971 }
972 /* Slab_pad_check fixes things up after itself */
973 slab_pad_check(s, page);
974 return 1;
975 }
976
977 /*
978 * Determine if a certain object on a page is on the freelist. Must hold the
979 * slab lock to guarantee that the chains are in a consistent state.
980 */
981 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
982 {
983 int nr = 0;
984 void *fp;
985 void *object = NULL;
986 int max_objects;
987
988 fp = page->freelist;
989 while (fp && nr <= page->objects) {
990 if (fp == search)
991 return 1;
992 if (!check_valid_pointer(s, page, fp)) {
993 if (object) {
994 object_err(s, page, object,
995 "Freechain corrupt");
996 set_freepointer(s, object, NULL);
997 } else {
998 slab_err(s, page, "Freepointer corrupt");
999 page->freelist = NULL;
1000 page->inuse = page->objects;
1001 slab_fix(s, "Freelist cleared");
1002 return 0;
1003 }
1004 break;
1005 }
1006 object = fp;
1007 fp = get_freepointer(s, object);
1008 nr++;
1009 }
1010
1011 max_objects = order_objects(compound_order(page), s->size);
1012 if (max_objects > MAX_OBJS_PER_PAGE)
1013 max_objects = MAX_OBJS_PER_PAGE;
1014
1015 if (page->objects != max_objects) {
1016 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1017 page->objects, max_objects);
1018 page->objects = max_objects;
1019 slab_fix(s, "Number of objects adjusted.");
1020 }
1021 if (page->inuse != page->objects - nr) {
1022 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1023 page->inuse, page->objects - nr);
1024 page->inuse = page->objects - nr;
1025 slab_fix(s, "Object count adjusted.");
1026 }
1027 return search == NULL;
1028 }
1029
1030 static void trace(struct kmem_cache *s, struct page *page, void *object,
1031 int alloc)
1032 {
1033 if (s->flags & SLAB_TRACE) {
1034 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1035 s->name,
1036 alloc ? "alloc" : "free",
1037 object, page->inuse,
1038 page->freelist);
1039
1040 if (!alloc)
1041 print_section(KERN_INFO, "Object ", (void *)object,
1042 s->object_size);
1043
1044 dump_stack();
1045 }
1046 }
1047
1048 /*
1049 * Tracking of fully allocated slabs for debugging purposes.
1050 */
1051 static void add_full(struct kmem_cache *s,
1052 struct kmem_cache_node *n, struct page *page)
1053 {
1054 if (!(s->flags & SLAB_STORE_USER))
1055 return;
1056
1057 lockdep_assert_held(&n->list_lock);
1058 list_add(&page->slab_list, &n->full);
1059 }
1060
1061 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1062 {
1063 if (!(s->flags & SLAB_STORE_USER))
1064 return;
1065
1066 lockdep_assert_held(&n->list_lock);
1067 list_del(&page->slab_list);
1068 }
1069
1070 /* Tracking of the number of slabs for debugging purposes */
1071 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1072 {
1073 struct kmem_cache_node *n = get_node(s, node);
1074
1075 return atomic_long_read(&n->nr_slabs);
1076 }
1077
1078 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1079 {
1080 return atomic_long_read(&n->nr_slabs);
1081 }
1082
1083 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1084 {
1085 struct kmem_cache_node *n = get_node(s, node);
1086
1087 /*
1088 * May be called early in order to allocate a slab for the
1089 * kmem_cache_node structure. Solve the chicken-egg
1090 * dilemma by deferring the increment of the count during
1091 * bootstrap (see early_kmem_cache_node_alloc).
1092 */
1093 if (likely(n)) {
1094 atomic_long_inc(&n->nr_slabs);
1095 atomic_long_add(objects, &n->total_objects);
1096 }
1097 }
1098 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1099 {
1100 struct kmem_cache_node *n = get_node(s, node);
1101
1102 atomic_long_dec(&n->nr_slabs);
1103 atomic_long_sub(objects, &n->total_objects);
1104 }
1105
1106 /* Object debug checks for alloc/free paths */
1107 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1108 void *object)
1109 {
1110 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1111 return;
1112
1113 init_object(s, object, SLUB_RED_INACTIVE);
1114 init_tracking(s, object);
1115 }
1116
1117 static
1118 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1119 {
1120 if (!kmem_cache_debug_flags(s, SLAB_POISON))
1121 return;
1122
1123 metadata_access_enable();
1124 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
1125 metadata_access_disable();
1126 }
1127
1128 static inline int alloc_consistency_checks(struct kmem_cache *s,
1129 struct page *page, void *object)
1130 {
1131 if (!check_slab(s, page))
1132 return 0;
1133
1134 if (!check_valid_pointer(s, page, object)) {
1135 object_err(s, page, object, "Freelist Pointer check fails");
1136 return 0;
1137 }
1138
1139 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1140 return 0;
1141
1142 return 1;
1143 }
1144
1145 static noinline int alloc_debug_processing(struct kmem_cache *s,
1146 struct page *page,
1147 void *object, unsigned long addr)
1148 {
1149 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1150 if (!alloc_consistency_checks(s, page, object))
1151 goto bad;
1152 }
1153
1154 /* Success perform special debug activities for allocs */
1155 if (s->flags & SLAB_STORE_USER)
1156 set_track(s, object, TRACK_ALLOC, addr);
1157 trace(s, page, object, 1);
1158 init_object(s, object, SLUB_RED_ACTIVE);
1159 return 1;
1160
1161 bad:
1162 if (PageSlab(page)) {
1163 /*
1164 * If this is a slab page then lets do the best we can
1165 * to avoid issues in the future. Marking all objects
1166 * as used avoids touching the remaining objects.
1167 */
1168 slab_fix(s, "Marking all objects used");
1169 page->inuse = page->objects;
1170 page->freelist = NULL;
1171 }
1172 return 0;
1173 }
1174
1175 static inline int free_consistency_checks(struct kmem_cache *s,
1176 struct page *page, void *object, unsigned long addr)
1177 {
1178 if (!check_valid_pointer(s, page, object)) {
1179 slab_err(s, page, "Invalid object pointer 0x%p", object);
1180 return 0;
1181 }
1182
1183 if (on_freelist(s, page, object)) {
1184 object_err(s, page, object, "Object already free");
1185 return 0;
1186 }
1187
1188 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1189 return 0;
1190
1191 if (unlikely(s != page->slab_cache)) {
1192 if (!PageSlab(page)) {
1193 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1194 object);
1195 } else if (!page->slab_cache) {
1196 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1197 object);
1198 dump_stack();
1199 } else
1200 object_err(s, page, object,
1201 "page slab pointer corrupt.");
1202 return 0;
1203 }
1204 return 1;
1205 }
1206
1207 /* Supports checking bulk free of a constructed freelist */
1208 static noinline int free_debug_processing(
1209 struct kmem_cache *s, struct page *page,
1210 void *head, void *tail, int bulk_cnt,
1211 unsigned long addr)
1212 {
1213 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1214 void *object = head;
1215 int cnt = 0;
1216 unsigned long flags;
1217 int ret = 0;
1218
1219 spin_lock_irqsave(&n->list_lock, flags);
1220 slab_lock(page);
1221
1222 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1223 if (!check_slab(s, page))
1224 goto out;
1225 }
1226
1227 next_object:
1228 cnt++;
1229
1230 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1231 if (!free_consistency_checks(s, page, object, addr))
1232 goto out;
1233 }
1234
1235 if (s->flags & SLAB_STORE_USER)
1236 set_track(s, object, TRACK_FREE, addr);
1237 trace(s, page, object, 0);
1238 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1239 init_object(s, object, SLUB_RED_INACTIVE);
1240
1241 /* Reached end of constructed freelist yet? */
1242 if (object != tail) {
1243 object = get_freepointer(s, object);
1244 goto next_object;
1245 }
1246 ret = 1;
1247
1248 out:
1249 if (cnt != bulk_cnt)
1250 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1251 bulk_cnt, cnt);
1252
1253 slab_unlock(page);
1254 spin_unlock_irqrestore(&n->list_lock, flags);
1255 if (!ret)
1256 slab_fix(s, "Object at 0x%p not freed", object);
1257 return ret;
1258 }
1259
1260 /*
1261 * Parse a block of slub_debug options. Blocks are delimited by ';'
1262 *
1263 * @str: start of block
1264 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1265 * @slabs: return start of list of slabs, or NULL when there's no list
1266 * @init: assume this is initial parsing and not per-kmem-create parsing
1267 *
1268 * returns the start of next block if there's any, or NULL
1269 */
1270 static char *
1271 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1272 {
1273 bool higher_order_disable = false;
1274
1275 /* Skip any completely empty blocks */
1276 while (*str && *str == ';')
1277 str++;
1278
1279 if (*str == ',') {
1280 /*
1281 * No options but restriction on slabs. This means full
1282 * debugging for slabs matching a pattern.
1283 */
1284 *flags = DEBUG_DEFAULT_FLAGS;
1285 goto check_slabs;
1286 }
1287 *flags = 0;
1288
1289 /* Determine which debug features should be switched on */
1290 for (; *str && *str != ',' && *str != ';'; str++) {
1291 switch (tolower(*str)) {
1292 case '-':
1293 *flags = 0;
1294 break;
1295 case 'f':
1296 *flags |= SLAB_CONSISTENCY_CHECKS;
1297 break;
1298 case 'z':
1299 *flags |= SLAB_RED_ZONE;
1300 break;
1301 case 'p':
1302 *flags |= SLAB_POISON;
1303 break;
1304 case 'u':
1305 *flags |= SLAB_STORE_USER;
1306 break;
1307 case 't':
1308 *flags |= SLAB_TRACE;
1309 break;
1310 case 'a':
1311 *flags |= SLAB_FAILSLAB;
1312 break;
1313 case 'o':
1314 /*
1315 * Avoid enabling debugging on caches if its minimum
1316 * order would increase as a result.
1317 */
1318 higher_order_disable = true;
1319 break;
1320 default:
1321 if (init)
1322 pr_err("slub_debug option '%c' unknown. skipped\n", *str);
1323 }
1324 }
1325 check_slabs:
1326 if (*str == ',')
1327 *slabs = ++str;
1328 else
1329 *slabs = NULL;
1330
1331 /* Skip over the slab list */
1332 while (*str && *str != ';')
1333 str++;
1334
1335 /* Skip any completely empty blocks */
1336 while (*str && *str == ';')
1337 str++;
1338
1339 if (init && higher_order_disable)
1340 disable_higher_order_debug = 1;
1341
1342 if (*str)
1343 return str;
1344 else
1345 return NULL;
1346 }
1347
1348 static int __init setup_slub_debug(char *str)
1349 {
1350 slab_flags_t flags;
1351 char *saved_str;
1352 char *slab_list;
1353 bool global_slub_debug_changed = false;
1354 bool slab_list_specified = false;
1355
1356 slub_debug = DEBUG_DEFAULT_FLAGS;
1357 if (*str++ != '=' || !*str)
1358 /*
1359 * No options specified. Switch on full debugging.
1360 */
1361 goto out;
1362
1363 saved_str = str;
1364 while (str) {
1365 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1366
1367 if (!slab_list) {
1368 slub_debug = flags;
1369 global_slub_debug_changed = true;
1370 } else {
1371 slab_list_specified = true;
1372 }
1373 }
1374
1375 /*
1376 * For backwards compatibility, a single list of flags with list of
1377 * slabs means debugging is only enabled for those slabs, so the global
1378 * slub_debug should be 0. We can extended that to multiple lists as
1379 * long as there is no option specifying flags without a slab list.
1380 */
1381 if (slab_list_specified) {
1382 if (!global_slub_debug_changed)
1383 slub_debug = 0;
1384 slub_debug_string = saved_str;
1385 }
1386 out:
1387 if (slub_debug != 0 || slub_debug_string)
1388 static_branch_enable(&slub_debug_enabled);
1389 if ((static_branch_unlikely(&init_on_alloc) ||
1390 static_branch_unlikely(&init_on_free)) &&
1391 (slub_debug & SLAB_POISON))
1392 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1393 return 1;
1394 }
1395
1396 __setup("slub_debug", setup_slub_debug);
1397
1398 /*
1399 * kmem_cache_flags - apply debugging options to the cache
1400 * @object_size: the size of an object without meta data
1401 * @flags: flags to set
1402 * @name: name of the cache
1403 * @ctor: constructor function
1404 *
1405 * Debug option(s) are applied to @flags. In addition to the debug
1406 * option(s), if a slab name (or multiple) is specified i.e.
1407 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1408 * then only the select slabs will receive the debug option(s).
1409 */
1410 slab_flags_t kmem_cache_flags(unsigned int object_size,
1411 slab_flags_t flags, const char *name,
1412 void (*ctor)(void *))
1413 {
1414 char *iter;
1415 size_t len;
1416 char *next_block;
1417 slab_flags_t block_flags;
1418
1419 len = strlen(name);
1420 next_block = slub_debug_string;
1421 /* Go through all blocks of debug options, see if any matches our slab's name */
1422 while (next_block) {
1423 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1424 if (!iter)
1425 continue;
1426 /* Found a block that has a slab list, search it */
1427 while (*iter) {
1428 char *end, *glob;
1429 size_t cmplen;
1430
1431 end = strchrnul(iter, ',');
1432 if (next_block && next_block < end)
1433 end = next_block - 1;
1434
1435 glob = strnchr(iter, end - iter, '*');
1436 if (glob)
1437 cmplen = glob - iter;
1438 else
1439 cmplen = max_t(size_t, len, (end - iter));
1440
1441 if (!strncmp(name, iter, cmplen)) {
1442 flags |= block_flags;
1443 return flags;
1444 }
1445
1446 if (!*end || *end == ';')
1447 break;
1448 iter = end + 1;
1449 }
1450 }
1451
1452 return flags | slub_debug;
1453 }
1454 #else /* !CONFIG_SLUB_DEBUG */
1455 static inline void setup_object_debug(struct kmem_cache *s,
1456 struct page *page, void *object) {}
1457 static inline
1458 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1459
1460 static inline int alloc_debug_processing(struct kmem_cache *s,
1461 struct page *page, void *object, unsigned long addr) { return 0; }
1462
1463 static inline int free_debug_processing(
1464 struct kmem_cache *s, struct page *page,
1465 void *head, void *tail, int bulk_cnt,
1466 unsigned long addr) { return 0; }
1467
1468 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1469 { return 1; }
1470 static inline int check_object(struct kmem_cache *s, struct page *page,
1471 void *object, u8 val) { return 1; }
1472 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1473 struct page *page) {}
1474 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1475 struct page *page) {}
1476 slab_flags_t kmem_cache_flags(unsigned int object_size,
1477 slab_flags_t flags, const char *name,
1478 void (*ctor)(void *))
1479 {
1480 return flags;
1481 }
1482 #define slub_debug 0
1483
1484 #define disable_higher_order_debug 0
1485
1486 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1487 { return 0; }
1488 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1489 { return 0; }
1490 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1491 int objects) {}
1492 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1493 int objects) {}
1494
1495 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1496 void **freelist, void *nextfree)
1497 {
1498 return false;
1499 }
1500 #endif /* CONFIG_SLUB_DEBUG */
1501
1502 /*
1503 * Hooks for other subsystems that check memory allocations. In a typical
1504 * production configuration these hooks all should produce no code at all.
1505 */
1506 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1507 {
1508 ptr = kasan_kmalloc_large(ptr, size, flags);
1509 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1510 kmemleak_alloc(ptr, size, 1, flags);
1511 return ptr;
1512 }
1513
1514 static __always_inline void kfree_hook(void *x)
1515 {
1516 kmemleak_free(x);
1517 kasan_kfree_large(x, _RET_IP_);
1518 }
1519
1520 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1521 {
1522 kmemleak_free_recursive(x, s->flags);
1523
1524 /*
1525 * Trouble is that we may no longer disable interrupts in the fast path
1526 * So in order to make the debug calls that expect irqs to be
1527 * disabled we need to disable interrupts temporarily.
1528 */
1529 #ifdef CONFIG_LOCKDEP
1530 {
1531 unsigned long flags;
1532
1533 local_irq_save(flags);
1534 debug_check_no_locks_freed(x, s->object_size);
1535 local_irq_restore(flags);
1536 }
1537 #endif
1538 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1539 debug_check_no_obj_freed(x, s->object_size);
1540
1541 /* Use KCSAN to help debug racy use-after-free. */
1542 if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
1543 __kcsan_check_access(x, s->object_size,
1544 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
1545
1546 /* KASAN might put x into memory quarantine, delaying its reuse */
1547 return kasan_slab_free(s, x, _RET_IP_);
1548 }
1549
1550 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1551 void **head, void **tail)
1552 {
1553
1554 void *object;
1555 void *next = *head;
1556 void *old_tail = *tail ? *tail : *head;
1557 int rsize;
1558
1559 /* Head and tail of the reconstructed freelist */
1560 *head = NULL;
1561 *tail = NULL;
1562
1563 do {
1564 object = next;
1565 next = get_freepointer(s, object);
1566
1567 if (slab_want_init_on_free(s)) {
1568 /*
1569 * Clear the object and the metadata, but don't touch
1570 * the redzone.
1571 */
1572 memset(kasan_reset_tag(object), 0, s->object_size);
1573 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
1574 : 0;
1575 memset((char *)kasan_reset_tag(object) + s->inuse, 0,
1576 s->size - s->inuse - rsize);
1577
1578 }
1579 /* If object's reuse doesn't have to be delayed */
1580 if (!slab_free_hook(s, object)) {
1581 /* Move object to the new freelist */
1582 set_freepointer(s, object, *head);
1583 *head = object;
1584 if (!*tail)
1585 *tail = object;
1586 }
1587 } while (object != old_tail);
1588
1589 if (*head == *tail)
1590 *tail = NULL;
1591
1592 return *head != NULL;
1593 }
1594
1595 static void *setup_object(struct kmem_cache *s, struct page *page,
1596 void *object)
1597 {
1598 setup_object_debug(s, page, object);
1599 object = kasan_init_slab_obj(s, object);
1600 if (unlikely(s->ctor)) {
1601 kasan_unpoison_object_data(s, object);
1602 s->ctor(object);
1603 kasan_poison_object_data(s, object);
1604 }
1605 return object;
1606 }
1607
1608 /*
1609 * Slab allocation and freeing
1610 */
1611 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1612 gfp_t flags, int node, struct kmem_cache_order_objects oo)
1613 {
1614 struct page *page;
1615 unsigned int order = oo_order(oo);
1616
1617 if (node == NUMA_NO_NODE)
1618 page = alloc_pages(flags, order);
1619 else
1620 page = __alloc_pages_node(node, flags, order);
1621
1622 return page;
1623 }
1624
1625 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1626 /* Pre-initialize the random sequence cache */
1627 static int init_cache_random_seq(struct kmem_cache *s)
1628 {
1629 unsigned int count = oo_objects(s->oo);
1630 int err;
1631
1632 /* Bailout if already initialised */
1633 if (s->random_seq)
1634 return 0;
1635
1636 err = cache_random_seq_create(s, count, GFP_KERNEL);
1637 if (err) {
1638 pr_err("SLUB: Unable to initialize free list for %s\n",
1639 s->name);
1640 return err;
1641 }
1642
1643 /* Transform to an offset on the set of pages */
1644 if (s->random_seq) {
1645 unsigned int i;
1646
1647 for (i = 0; i < count; i++)
1648 s->random_seq[i] *= s->size;
1649 }
1650 return 0;
1651 }
1652
1653 /* Initialize each random sequence freelist per cache */
1654 static void __init init_freelist_randomization(void)
1655 {
1656 struct kmem_cache *s;
1657
1658 mutex_lock(&slab_mutex);
1659
1660 list_for_each_entry(s, &slab_caches, list)
1661 init_cache_random_seq(s);
1662
1663 mutex_unlock(&slab_mutex);
1664 }
1665
1666 /* Get the next entry on the pre-computed freelist randomized */
1667 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1668 unsigned long *pos, void *start,
1669 unsigned long page_limit,
1670 unsigned long freelist_count)
1671 {
1672 unsigned int idx;
1673
1674 /*
1675 * If the target page allocation failed, the number of objects on the
1676 * page might be smaller than the usual size defined by the cache.
1677 */
1678 do {
1679 idx = s->random_seq[*pos];
1680 *pos += 1;
1681 if (*pos >= freelist_count)
1682 *pos = 0;
1683 } while (unlikely(idx >= page_limit));
1684
1685 return (char *)start + idx;
1686 }
1687
1688 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1689 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1690 {
1691 void *start;
1692 void *cur;
1693 void *next;
1694 unsigned long idx, pos, page_limit, freelist_count;
1695
1696 if (page->objects < 2 || !s->random_seq)
1697 return false;
1698
1699 freelist_count = oo_objects(s->oo);
1700 pos = get_random_int() % freelist_count;
1701
1702 page_limit = page->objects * s->size;
1703 start = fixup_red_left(s, page_address(page));
1704
1705 /* First entry is used as the base of the freelist */
1706 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1707 freelist_count);
1708 cur = setup_object(s, page, cur);
1709 page->freelist = cur;
1710
1711 for (idx = 1; idx < page->objects; idx++) {
1712 next = next_freelist_entry(s, page, &pos, start, page_limit,
1713 freelist_count);
1714 next = setup_object(s, page, next);
1715 set_freepointer(s, cur, next);
1716 cur = next;
1717 }
1718 set_freepointer(s, cur, NULL);
1719
1720 return true;
1721 }
1722 #else
1723 static inline int init_cache_random_seq(struct kmem_cache *s)
1724 {
1725 return 0;
1726 }
1727 static inline void init_freelist_randomization(void) { }
1728 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1729 {
1730 return false;
1731 }
1732 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1733
1734 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1735 {
1736 struct page *page;
1737 struct kmem_cache_order_objects oo = s->oo;
1738 gfp_t alloc_gfp;
1739 void *start, *p, *next;
1740 int idx;
1741 bool shuffle;
1742
1743 flags &= gfp_allowed_mask;
1744
1745 if (gfpflags_allow_blocking(flags))
1746 local_irq_enable();
1747
1748 flags |= s->allocflags;
1749
1750 /*
1751 * Let the initial higher-order allocation fail under memory pressure
1752 * so we fall-back to the minimum order allocation.
1753 */
1754 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1755 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1756 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1757
1758 page = alloc_slab_page(s, alloc_gfp, node, oo);
1759 if (unlikely(!page)) {
1760 oo = s->min;
1761 alloc_gfp = flags;
1762 /*
1763 * Allocation may have failed due to fragmentation.
1764 * Try a lower order alloc if possible
1765 */
1766 page = alloc_slab_page(s, alloc_gfp, node, oo);
1767 if (unlikely(!page))
1768 goto out;
1769 stat(s, ORDER_FALLBACK);
1770 }
1771
1772 page->objects = oo_objects(oo);
1773
1774 account_slab_page(page, oo_order(oo), s);
1775
1776 page->slab_cache = s;
1777 __SetPageSlab(page);
1778 if (page_is_pfmemalloc(page))
1779 SetPageSlabPfmemalloc(page);
1780
1781 kasan_poison_slab(page);
1782
1783 start = page_address(page);
1784
1785 setup_page_debug(s, page, start);
1786
1787 shuffle = shuffle_freelist(s, page);
1788
1789 if (!shuffle) {
1790 start = fixup_red_left(s, start);
1791 start = setup_object(s, page, start);
1792 page->freelist = start;
1793 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1794 next = p + s->size;
1795 next = setup_object(s, page, next);
1796 set_freepointer(s, p, next);
1797 p = next;
1798 }
1799 set_freepointer(s, p, NULL);
1800 }
1801
1802 page->inuse = page->objects;
1803 page->frozen = 1;
1804
1805 out:
1806 if (gfpflags_allow_blocking(flags))
1807 local_irq_disable();
1808 if (!page)
1809 return NULL;
1810
1811 inc_slabs_node(s, page_to_nid(page), page->objects);
1812
1813 return page;
1814 }
1815
1816 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1817 {
1818 if (unlikely(flags & GFP_SLAB_BUG_MASK))
1819 flags = kmalloc_fix_flags(flags);
1820
1821 return allocate_slab(s,
1822 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1823 }
1824
1825 static void __free_slab(struct kmem_cache *s, struct page *page)
1826 {
1827 int order = compound_order(page);
1828 int pages = 1 << order;
1829
1830 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
1831 void *p;
1832
1833 slab_pad_check(s, page);
1834 for_each_object(p, s, page_address(page),
1835 page->objects)
1836 check_object(s, page, p, SLUB_RED_INACTIVE);
1837 }
1838
1839 __ClearPageSlabPfmemalloc(page);
1840 __ClearPageSlab(page);
1841 /* In union with page->mapping where page allocator expects NULL */
1842 page->slab_cache = NULL;
1843 if (current->reclaim_state)
1844 current->reclaim_state->reclaimed_slab += pages;
1845 unaccount_slab_page(page, order, s);
1846 __free_pages(page, order);
1847 }
1848
1849 static void rcu_free_slab(struct rcu_head *h)
1850 {
1851 struct page *page = container_of(h, struct page, rcu_head);
1852
1853 __free_slab(page->slab_cache, page);
1854 }
1855
1856 static void free_slab(struct kmem_cache *s, struct page *page)
1857 {
1858 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1859 call_rcu(&page->rcu_head, rcu_free_slab);
1860 } else
1861 __free_slab(s, page);
1862 }
1863
1864 static void discard_slab(struct kmem_cache *s, struct page *page)
1865 {
1866 dec_slabs_node(s, page_to_nid(page), page->objects);
1867 free_slab(s, page);
1868 }
1869
1870 /*
1871 * Management of partially allocated slabs.
1872 */
1873 static inline void
1874 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1875 {
1876 n->nr_partial++;
1877 if (tail == DEACTIVATE_TO_TAIL)
1878 list_add_tail(&page->slab_list, &n->partial);
1879 else
1880 list_add(&page->slab_list, &n->partial);
1881 }
1882
1883 static inline void add_partial(struct kmem_cache_node *n,
1884 struct page *page, int tail)
1885 {
1886 lockdep_assert_held(&n->list_lock);
1887 __add_partial(n, page, tail);
1888 }
1889
1890 static inline void remove_partial(struct kmem_cache_node *n,
1891 struct page *page)
1892 {
1893 lockdep_assert_held(&n->list_lock);
1894 list_del(&page->slab_list);
1895 n->nr_partial--;
1896 }
1897
1898 /*
1899 * Remove slab from the partial list, freeze it and
1900 * return the pointer to the freelist.
1901 *
1902 * Returns a list of objects or NULL if it fails.
1903 */
1904 static inline void *acquire_slab(struct kmem_cache *s,
1905 struct kmem_cache_node *n, struct page *page,
1906 int mode, int *objects)
1907 {
1908 void *freelist;
1909 unsigned long counters;
1910 struct page new;
1911
1912 lockdep_assert_held(&n->list_lock);
1913
1914 /*
1915 * Zap the freelist and set the frozen bit.
1916 * The old freelist is the list of objects for the
1917 * per cpu allocation list.
1918 */
1919 freelist = page->freelist;
1920 counters = page->counters;
1921 new.counters = counters;
1922 *objects = new.objects - new.inuse;
1923 if (mode) {
1924 new.inuse = page->objects;
1925 new.freelist = NULL;
1926 } else {
1927 new.freelist = freelist;
1928 }
1929
1930 VM_BUG_ON(new.frozen);
1931 new.frozen = 1;
1932
1933 if (!__cmpxchg_double_slab(s, page,
1934 freelist, counters,
1935 new.freelist, new.counters,
1936 "acquire_slab"))
1937 return NULL;
1938
1939 remove_partial(n, page);
1940 WARN_ON(!freelist);
1941 return freelist;
1942 }
1943
1944 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1945 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1946
1947 /*
1948 * Try to allocate a partial slab from a specific node.
1949 */
1950 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1951 struct kmem_cache_cpu *c, gfp_t flags)
1952 {
1953 struct page *page, *page2;
1954 void *object = NULL;
1955 unsigned int available = 0;
1956 int objects;
1957
1958 /*
1959 * Racy check. If we mistakenly see no partial slabs then we
1960 * just allocate an empty slab. If we mistakenly try to get a
1961 * partial slab and there is none available then get_partial()
1962 * will return NULL.
1963 */
1964 if (!n || !n->nr_partial)
1965 return NULL;
1966
1967 spin_lock(&n->list_lock);
1968 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
1969 void *t;
1970
1971 if (!pfmemalloc_match(page, flags))
1972 continue;
1973
1974 t = acquire_slab(s, n, page, object == NULL, &objects);
1975 if (!t)
1976 break;
1977
1978 available += objects;
1979 if (!object) {
1980 c->page = page;
1981 stat(s, ALLOC_FROM_PARTIAL);
1982 object = t;
1983 } else {
1984 put_cpu_partial(s, page, 0);
1985 stat(s, CPU_PARTIAL_NODE);
1986 }
1987 if (!kmem_cache_has_cpu_partial(s)
1988 || available > slub_cpu_partial(s) / 2)
1989 break;
1990
1991 }
1992 spin_unlock(&n->list_lock);
1993 return object;
1994 }
1995
1996 /*
1997 * Get a page from somewhere. Search in increasing NUMA distances.
1998 */
1999 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
2000 struct kmem_cache_cpu *c)
2001 {
2002 #ifdef CONFIG_NUMA
2003 struct zonelist *zonelist;
2004 struct zoneref *z;
2005 struct zone *zone;
2006 enum zone_type highest_zoneidx = gfp_zone(flags);
2007 void *object;
2008 unsigned int cpuset_mems_cookie;
2009
2010 /*
2011 * The defrag ratio allows a configuration of the tradeoffs between
2012 * inter node defragmentation and node local allocations. A lower
2013 * defrag_ratio increases the tendency to do local allocations
2014 * instead of attempting to obtain partial slabs from other nodes.
2015 *
2016 * If the defrag_ratio is set to 0 then kmalloc() always
2017 * returns node local objects. If the ratio is higher then kmalloc()
2018 * may return off node objects because partial slabs are obtained
2019 * from other nodes and filled up.
2020 *
2021 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2022 * (which makes defrag_ratio = 1000) then every (well almost)
2023 * allocation will first attempt to defrag slab caches on other nodes.
2024 * This means scanning over all nodes to look for partial slabs which
2025 * may be expensive if we do it every time we are trying to find a slab
2026 * with available objects.
2027 */
2028 if (!s->remote_node_defrag_ratio ||
2029 get_cycles() % 1024 > s->remote_node_defrag_ratio)
2030 return NULL;
2031
2032 do {
2033 cpuset_mems_cookie = read_mems_allowed_begin();
2034 zonelist = node_zonelist(mempolicy_slab_node(), flags);
2035 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2036 struct kmem_cache_node *n;
2037
2038 n = get_node(s, zone_to_nid(zone));
2039
2040 if (n && cpuset_zone_allowed(zone, flags) &&
2041 n->nr_partial > s->min_partial) {
2042 object = get_partial_node(s, n, c, flags);
2043 if (object) {
2044 /*
2045 * Don't check read_mems_allowed_retry()
2046 * here - if mems_allowed was updated in
2047 * parallel, that was a harmless race
2048 * between allocation and the cpuset
2049 * update
2050 */
2051 return object;
2052 }
2053 }
2054 }
2055 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2056 #endif /* CONFIG_NUMA */
2057 return NULL;
2058 }
2059
2060 /*
2061 * Get a partial page, lock it and return it.
2062 */
2063 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
2064 struct kmem_cache_cpu *c)
2065 {
2066 void *object;
2067 int searchnode = node;
2068
2069 if (node == NUMA_NO_NODE)
2070 searchnode = numa_mem_id();
2071
2072 object = get_partial_node(s, get_node(s, searchnode), c, flags);
2073 if (object || node != NUMA_NO_NODE)
2074 return object;
2075
2076 return get_any_partial(s, flags, c);
2077 }
2078
2079 #ifdef CONFIG_PREEMPTION
2080 /*
2081 * Calculate the next globally unique transaction for disambiguation
2082 * during cmpxchg. The transactions start with the cpu number and are then
2083 * incremented by CONFIG_NR_CPUS.
2084 */
2085 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2086 #else
2087 /*
2088 * No preemption supported therefore also no need to check for
2089 * different cpus.
2090 */
2091 #define TID_STEP 1
2092 #endif
2093
2094 static inline unsigned long next_tid(unsigned long tid)
2095 {
2096 return tid + TID_STEP;
2097 }
2098
2099 #ifdef SLUB_DEBUG_CMPXCHG
2100 static inline unsigned int tid_to_cpu(unsigned long tid)
2101 {
2102 return tid % TID_STEP;
2103 }
2104
2105 static inline unsigned long tid_to_event(unsigned long tid)
2106 {
2107 return tid / TID_STEP;
2108 }
2109 #endif
2110
2111 static inline unsigned int init_tid(int cpu)
2112 {
2113 return cpu;
2114 }
2115
2116 static inline void note_cmpxchg_failure(const char *n,
2117 const struct kmem_cache *s, unsigned long tid)
2118 {
2119 #ifdef SLUB_DEBUG_CMPXCHG
2120 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2121
2122 pr_info("%s %s: cmpxchg redo ", n, s->name);
2123
2124 #ifdef CONFIG_PREEMPTION
2125 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2126 pr_warn("due to cpu change %d -> %d\n",
2127 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2128 else
2129 #endif
2130 if (tid_to_event(tid) != tid_to_event(actual_tid))
2131 pr_warn("due to cpu running other code. Event %ld->%ld\n",
2132 tid_to_event(tid), tid_to_event(actual_tid));
2133 else
2134 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2135 actual_tid, tid, next_tid(tid));
2136 #endif
2137 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2138 }
2139
2140 static void init_kmem_cache_cpus(struct kmem_cache *s)
2141 {
2142 int cpu;
2143
2144 for_each_possible_cpu(cpu)
2145 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2146 }
2147
2148 /*
2149 * Remove the cpu slab
2150 */
2151 static void deactivate_slab(struct kmem_cache *s, struct page *page,
2152 void *freelist, struct kmem_cache_cpu *c)
2153 {
2154 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2155 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2156 int lock = 0;
2157 enum slab_modes l = M_NONE, m = M_NONE;
2158 void *nextfree;
2159 int tail = DEACTIVATE_TO_HEAD;
2160 struct page new;
2161 struct page old;
2162
2163 if (page->freelist) {
2164 stat(s, DEACTIVATE_REMOTE_FREES);
2165 tail = DEACTIVATE_TO_TAIL;
2166 }
2167
2168 /*
2169 * Stage one: Free all available per cpu objects back
2170 * to the page freelist while it is still frozen. Leave the
2171 * last one.
2172 *
2173 * There is no need to take the list->lock because the page
2174 * is still frozen.
2175 */
2176 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2177 void *prior;
2178 unsigned long counters;
2179
2180 /*
2181 * If 'nextfree' is invalid, it is possible that the object at
2182 * 'freelist' is already corrupted. So isolate all objects
2183 * starting at 'freelist'.
2184 */
2185 if (freelist_corrupted(s, page, &freelist, nextfree))
2186 break;
2187
2188 do {
2189 prior = page->freelist;
2190 counters = page->counters;
2191 set_freepointer(s, freelist, prior);
2192 new.counters = counters;
2193 new.inuse--;
2194 VM_BUG_ON(!new.frozen);
2195
2196 } while (!__cmpxchg_double_slab(s, page,
2197 prior, counters,
2198 freelist, new.counters,
2199 "drain percpu freelist"));
2200
2201 freelist = nextfree;
2202 }
2203
2204 /*
2205 * Stage two: Ensure that the page is unfrozen while the
2206 * list presence reflects the actual number of objects
2207 * during unfreeze.
2208 *
2209 * We setup the list membership and then perform a cmpxchg
2210 * with the count. If there is a mismatch then the page
2211 * is not unfrozen but the page is on the wrong list.
2212 *
2213 * Then we restart the process which may have to remove
2214 * the page from the list that we just put it on again
2215 * because the number of objects in the slab may have
2216 * changed.
2217 */
2218 redo:
2219
2220 old.freelist = page->freelist;
2221 old.counters = page->counters;
2222 VM_BUG_ON(!old.frozen);
2223
2224 /* Determine target state of the slab */
2225 new.counters = old.counters;
2226 if (freelist) {
2227 new.inuse--;
2228 set_freepointer(s, freelist, old.freelist);
2229 new.freelist = freelist;
2230 } else
2231 new.freelist = old.freelist;
2232
2233 new.frozen = 0;
2234
2235 if (!new.inuse && n->nr_partial >= s->min_partial)
2236 m = M_FREE;
2237 else if (new.freelist) {
2238 m = M_PARTIAL;
2239 if (!lock) {
2240 lock = 1;
2241 /*
2242 * Taking the spinlock removes the possibility
2243 * that acquire_slab() will see a slab page that
2244 * is frozen
2245 */
2246 spin_lock(&n->list_lock);
2247 }
2248 } else {
2249 m = M_FULL;
2250 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
2251 lock = 1;
2252 /*
2253 * This also ensures that the scanning of full
2254 * slabs from diagnostic functions will not see
2255 * any frozen slabs.
2256 */
2257 spin_lock(&n->list_lock);
2258 }
2259 }
2260
2261 if (l != m) {
2262 if (l == M_PARTIAL)
2263 remove_partial(n, page);
2264 else if (l == M_FULL)
2265 remove_full(s, n, page);
2266
2267 if (m == M_PARTIAL)
2268 add_partial(n, page, tail);
2269 else if (m == M_FULL)
2270 add_full(s, n, page);
2271 }
2272
2273 l = m;
2274 if (!__cmpxchg_double_slab(s, page,
2275 old.freelist, old.counters,
2276 new.freelist, new.counters,
2277 "unfreezing slab"))
2278 goto redo;
2279
2280 if (lock)
2281 spin_unlock(&n->list_lock);
2282
2283 if (m == M_PARTIAL)
2284 stat(s, tail);
2285 else if (m == M_FULL)
2286 stat(s, DEACTIVATE_FULL);
2287 else if (m == M_FREE) {
2288 stat(s, DEACTIVATE_EMPTY);
2289 discard_slab(s, page);
2290 stat(s, FREE_SLAB);
2291 }
2292
2293 c->page = NULL;
2294 c->freelist = NULL;
2295 }
2296
2297 /*
2298 * Unfreeze all the cpu partial slabs.
2299 *
2300 * This function must be called with interrupts disabled
2301 * for the cpu using c (or some other guarantee must be there
2302 * to guarantee no concurrent accesses).
2303 */
2304 static void unfreeze_partials(struct kmem_cache *s,
2305 struct kmem_cache_cpu *c)
2306 {
2307 #ifdef CONFIG_SLUB_CPU_PARTIAL
2308 struct kmem_cache_node *n = NULL, *n2 = NULL;
2309 struct page *page, *discard_page = NULL;
2310
2311 while ((page = slub_percpu_partial(c))) {
2312 struct page new;
2313 struct page old;
2314
2315 slub_set_percpu_partial(c, page);
2316
2317 n2 = get_node(s, page_to_nid(page));
2318 if (n != n2) {
2319 if (n)
2320 spin_unlock(&n->list_lock);
2321
2322 n = n2;
2323 spin_lock(&n->list_lock);
2324 }
2325
2326 do {
2327
2328 old.freelist = page->freelist;
2329 old.counters = page->counters;
2330 VM_BUG_ON(!old.frozen);
2331
2332 new.counters = old.counters;
2333 new.freelist = old.freelist;
2334
2335 new.frozen = 0;
2336
2337 } while (!__cmpxchg_double_slab(s, page,
2338 old.freelist, old.counters,
2339 new.freelist, new.counters,
2340 "unfreezing slab"));
2341
2342 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2343 page->next = discard_page;
2344 discard_page = page;
2345 } else {
2346 add_partial(n, page, DEACTIVATE_TO_TAIL);
2347 stat(s, FREE_ADD_PARTIAL);
2348 }
2349 }
2350
2351 if (n)
2352 spin_unlock(&n->list_lock);
2353
2354 while (discard_page) {
2355 page = discard_page;
2356 discard_page = discard_page->next;
2357
2358 stat(s, DEACTIVATE_EMPTY);
2359 discard_slab(s, page);
2360 stat(s, FREE_SLAB);
2361 }
2362 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2363 }
2364
2365 /*
2366 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2367 * partial page slot if available.
2368 *
2369 * If we did not find a slot then simply move all the partials to the
2370 * per node partial list.
2371 */
2372 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2373 {
2374 #ifdef CONFIG_SLUB_CPU_PARTIAL
2375 struct page *oldpage;
2376 int pages;
2377 int pobjects;
2378
2379 preempt_disable();
2380 do {
2381 pages = 0;
2382 pobjects = 0;
2383 oldpage = this_cpu_read(s->cpu_slab->partial);
2384
2385 if (oldpage) {
2386 pobjects = oldpage->pobjects;
2387 pages = oldpage->pages;
2388 if (drain && pobjects > slub_cpu_partial(s)) {
2389 unsigned long flags;
2390 /*
2391 * partial array is full. Move the existing
2392 * set to the per node partial list.
2393 */
2394 local_irq_save(flags);
2395 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2396 local_irq_restore(flags);
2397 oldpage = NULL;
2398 pobjects = 0;
2399 pages = 0;
2400 stat(s, CPU_PARTIAL_DRAIN);
2401 }
2402 }
2403
2404 pages++;
2405 pobjects += page->objects - page->inuse;
2406
2407 page->pages = pages;
2408 page->pobjects = pobjects;
2409 page->next = oldpage;
2410
2411 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2412 != oldpage);
2413 if (unlikely(!slub_cpu_partial(s))) {
2414 unsigned long flags;
2415
2416 local_irq_save(flags);
2417 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2418 local_irq_restore(flags);
2419 }
2420 preempt_enable();
2421 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2422 }
2423
2424 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2425 {
2426 stat(s, CPUSLAB_FLUSH);
2427 deactivate_slab(s, c->page, c->freelist, c);
2428
2429 c->tid = next_tid(c->tid);
2430 }
2431
2432 /*
2433 * Flush cpu slab.
2434 *
2435 * Called from IPI handler with interrupts disabled.
2436 */
2437 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2438 {
2439 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2440
2441 if (c->page)
2442 flush_slab(s, c);
2443
2444 unfreeze_partials(s, c);
2445 }
2446
2447 static void flush_cpu_slab(void *d)
2448 {
2449 struct kmem_cache *s = d;
2450
2451 __flush_cpu_slab(s, smp_processor_id());
2452 }
2453
2454 static bool has_cpu_slab(int cpu, void *info)
2455 {
2456 struct kmem_cache *s = info;
2457 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2458
2459 return c->page || slub_percpu_partial(c);
2460 }
2461
2462 static void flush_all(struct kmem_cache *s)
2463 {
2464 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
2465 }
2466
2467 /*
2468 * Use the cpu notifier to insure that the cpu slabs are flushed when
2469 * necessary.
2470 */
2471 static int slub_cpu_dead(unsigned int cpu)
2472 {
2473 struct kmem_cache *s;
2474 unsigned long flags;
2475
2476 mutex_lock(&slab_mutex);
2477 list_for_each_entry(s, &slab_caches, list) {
2478 local_irq_save(flags);
2479 __flush_cpu_slab(s, cpu);
2480 local_irq_restore(flags);
2481 }
2482 mutex_unlock(&slab_mutex);
2483 return 0;
2484 }
2485
2486 /*
2487 * Check if the objects in a per cpu structure fit numa
2488 * locality expectations.
2489 */
2490 static inline int node_match(struct page *page, int node)
2491 {
2492 #ifdef CONFIG_NUMA
2493 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2494 return 0;
2495 #endif
2496 return 1;
2497 }
2498
2499 #ifdef CONFIG_SLUB_DEBUG
2500 static int count_free(struct page *page)
2501 {
2502 return page->objects - page->inuse;
2503 }
2504
2505 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2506 {
2507 return atomic_long_read(&n->total_objects);
2508 }
2509 #endif /* CONFIG_SLUB_DEBUG */
2510
2511 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2512 static unsigned long count_partial(struct kmem_cache_node *n,
2513 int (*get_count)(struct page *))
2514 {
2515 unsigned long flags;
2516 unsigned long x = 0;
2517 struct page *page;
2518
2519 spin_lock_irqsave(&n->list_lock, flags);
2520 list_for_each_entry(page, &n->partial, slab_list)
2521 x += get_count(page);
2522 spin_unlock_irqrestore(&n->list_lock, flags);
2523 return x;
2524 }
2525 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2526
2527 static noinline void
2528 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2529 {
2530 #ifdef CONFIG_SLUB_DEBUG
2531 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2532 DEFAULT_RATELIMIT_BURST);
2533 int node;
2534 struct kmem_cache_node *n;
2535
2536 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2537 return;
2538
2539 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2540 nid, gfpflags, &gfpflags);
2541 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2542 s->name, s->object_size, s->size, oo_order(s->oo),
2543 oo_order(s->min));
2544
2545 if (oo_order(s->min) > get_order(s->object_size))
2546 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2547 s->name);
2548
2549 for_each_kmem_cache_node(s, node, n) {
2550 unsigned long nr_slabs;
2551 unsigned long nr_objs;
2552 unsigned long nr_free;
2553
2554 nr_free = count_partial(n, count_free);
2555 nr_slabs = node_nr_slabs(n);
2556 nr_objs = node_nr_objs(n);
2557
2558 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2559 node, nr_slabs, nr_objs, nr_free);
2560 }
2561 #endif
2562 }
2563
2564 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2565 int node, struct kmem_cache_cpu **pc)
2566 {
2567 void *freelist;
2568 struct kmem_cache_cpu *c = *pc;
2569 struct page *page;
2570
2571 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2572
2573 freelist = get_partial(s, flags, node, c);
2574
2575 if (freelist)
2576 return freelist;
2577
2578 page = new_slab(s, flags, node);
2579 if (page) {
2580 c = raw_cpu_ptr(s->cpu_slab);
2581 if (c->page)
2582 flush_slab(s, c);
2583
2584 /*
2585 * No other reference to the page yet so we can
2586 * muck around with it freely without cmpxchg
2587 */
2588 freelist = page->freelist;
2589 page->freelist = NULL;
2590
2591 stat(s, ALLOC_SLAB);
2592 c->page = page;
2593 *pc = c;
2594 }
2595
2596 return freelist;
2597 }
2598
2599 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2600 {
2601 if (unlikely(PageSlabPfmemalloc(page)))
2602 return gfp_pfmemalloc_allowed(gfpflags);
2603
2604 return true;
2605 }
2606
2607 /*
2608 * Check the page->freelist of a page and either transfer the freelist to the
2609 * per cpu freelist or deactivate the page.
2610 *
2611 * The page is still frozen if the return value is not NULL.
2612 *
2613 * If this function returns NULL then the page has been unfrozen.
2614 *
2615 * This function must be called with interrupt disabled.
2616 */
2617 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2618 {
2619 struct page new;
2620 unsigned long counters;
2621 void *freelist;
2622
2623 do {
2624 freelist = page->freelist;
2625 counters = page->counters;
2626
2627 new.counters = counters;
2628 VM_BUG_ON(!new.frozen);
2629
2630 new.inuse = page->objects;
2631 new.frozen = freelist != NULL;
2632
2633 } while (!__cmpxchg_double_slab(s, page,
2634 freelist, counters,
2635 NULL, new.counters,
2636 "get_freelist"));
2637
2638 return freelist;
2639 }
2640
2641 /*
2642 * Slow path. The lockless freelist is empty or we need to perform
2643 * debugging duties.
2644 *
2645 * Processing is still very fast if new objects have been freed to the
2646 * regular freelist. In that case we simply take over the regular freelist
2647 * as the lockless freelist and zap the regular freelist.
2648 *
2649 * If that is not working then we fall back to the partial lists. We take the
2650 * first element of the freelist as the object to allocate now and move the
2651 * rest of the freelist to the lockless freelist.
2652 *
2653 * And if we were unable to get a new slab from the partial slab lists then
2654 * we need to allocate a new slab. This is the slowest path since it involves
2655 * a call to the page allocator and the setup of a new slab.
2656 *
2657 * Version of __slab_alloc to use when we know that interrupts are
2658 * already disabled (which is the case for bulk allocation).
2659 */
2660 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2661 unsigned long addr, struct kmem_cache_cpu *c)
2662 {
2663 void *freelist;
2664 struct page *page;
2665
2666 stat(s, ALLOC_SLOWPATH);
2667
2668 page = c->page;
2669 if (!page) {
2670 /*
2671 * if the node is not online or has no normal memory, just
2672 * ignore the node constraint
2673 */
2674 if (unlikely(node != NUMA_NO_NODE &&
2675 !node_state(node, N_NORMAL_MEMORY)))
2676 node = NUMA_NO_NODE;
2677 goto new_slab;
2678 }
2679 redo:
2680
2681 if (unlikely(!node_match(page, node))) {
2682 /*
2683 * same as above but node_match() being false already
2684 * implies node != NUMA_NO_NODE
2685 */
2686 if (!node_state(node, N_NORMAL_MEMORY)) {
2687 node = NUMA_NO_NODE;
2688 goto redo;
2689 } else {
2690 stat(s, ALLOC_NODE_MISMATCH);
2691 deactivate_slab(s, page, c->freelist, c);
2692 goto new_slab;
2693 }
2694 }
2695
2696 /*
2697 * By rights, we should be searching for a slab page that was
2698 * PFMEMALLOC but right now, we are losing the pfmemalloc
2699 * information when the page leaves the per-cpu allocator
2700 */
2701 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2702 deactivate_slab(s, page, c->freelist, c);
2703 goto new_slab;
2704 }
2705
2706 /* must check again c->freelist in case of cpu migration or IRQ */
2707 freelist = c->freelist;
2708 if (freelist)
2709 goto load_freelist;
2710
2711 freelist = get_freelist(s, page);
2712
2713 if (!freelist) {
2714 c->page = NULL;
2715 stat(s, DEACTIVATE_BYPASS);
2716 goto new_slab;
2717 }
2718
2719 stat(s, ALLOC_REFILL);
2720
2721 load_freelist:
2722 /*
2723 * freelist is pointing to the list of objects to be used.
2724 * page is pointing to the page from which the objects are obtained.
2725 * That page must be frozen for per cpu allocations to work.
2726 */
2727 VM_BUG_ON(!c->page->frozen);
2728 c->freelist = get_freepointer(s, freelist);
2729 c->tid = next_tid(c->tid);
2730 return freelist;
2731
2732 new_slab:
2733
2734 if (slub_percpu_partial(c)) {
2735 page = c->page = slub_percpu_partial(c);
2736 slub_set_percpu_partial(c, page);
2737 stat(s, CPU_PARTIAL_ALLOC);
2738 goto redo;
2739 }
2740
2741 freelist = new_slab_objects(s, gfpflags, node, &c);
2742
2743 if (unlikely(!freelist)) {
2744 slab_out_of_memory(s, gfpflags, node);
2745 return NULL;
2746 }
2747
2748 page = c->page;
2749 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2750 goto load_freelist;
2751
2752 /* Only entered in the debug case */
2753 if (kmem_cache_debug(s) &&
2754 !alloc_debug_processing(s, page, freelist, addr))
2755 goto new_slab; /* Slab failed checks. Next slab needed */
2756
2757 deactivate_slab(s, page, get_freepointer(s, freelist), c);
2758 return freelist;
2759 }
2760
2761 /*
2762 * Another one that disabled interrupt and compensates for possible
2763 * cpu changes by refetching the per cpu area pointer.
2764 */
2765 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2766 unsigned long addr, struct kmem_cache_cpu *c)
2767 {
2768 void *p;
2769 unsigned long flags;
2770
2771 local_irq_save(flags);
2772 #ifdef CONFIG_PREEMPTION
2773 /*
2774 * We may have been preempted and rescheduled on a different
2775 * cpu before disabling interrupts. Need to reload cpu area
2776 * pointer.
2777 */
2778 c = this_cpu_ptr(s->cpu_slab);
2779 #endif
2780
2781 p = ___slab_alloc(s, gfpflags, node, addr, c);
2782 local_irq_restore(flags);
2783 return p;
2784 }
2785
2786 /*
2787 * If the object has been wiped upon free, make sure it's fully initialized by
2788 * zeroing out freelist pointer.
2789 */
2790 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2791 void *obj)
2792 {
2793 if (unlikely(slab_want_init_on_free(s)) && obj)
2794 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
2795 0, sizeof(void *));
2796 }
2797
2798 /*
2799 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2800 * have the fastpath folded into their functions. So no function call
2801 * overhead for requests that can be satisfied on the fastpath.
2802 *
2803 * The fastpath works by first checking if the lockless freelist can be used.
2804 * If not then __slab_alloc is called for slow processing.
2805 *
2806 * Otherwise we can simply pick the next object from the lockless free list.
2807 */
2808 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2809 gfp_t gfpflags, int node, unsigned long addr)
2810 {
2811 void *object;
2812 struct kmem_cache_cpu *c;
2813 struct page *page;
2814 unsigned long tid;
2815 struct obj_cgroup *objcg = NULL;
2816
2817 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
2818 if (!s)
2819 return NULL;
2820 redo:
2821 /*
2822 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2823 * enabled. We may switch back and forth between cpus while
2824 * reading from one cpu area. That does not matter as long
2825 * as we end up on the original cpu again when doing the cmpxchg.
2826 *
2827 * We should guarantee that tid and kmem_cache are retrieved on
2828 * the same cpu. It could be different if CONFIG_PREEMPTION so we need
2829 * to check if it is matched or not.
2830 */
2831 do {
2832 tid = this_cpu_read(s->cpu_slab->tid);
2833 c = raw_cpu_ptr(s->cpu_slab);
2834 } while (IS_ENABLED(CONFIG_PREEMPTION) &&
2835 unlikely(tid != READ_ONCE(c->tid)));
2836
2837 /*
2838 * Irqless object alloc/free algorithm used here depends on sequence
2839 * of fetching cpu_slab's data. tid should be fetched before anything
2840 * on c to guarantee that object and page associated with previous tid
2841 * won't be used with current tid. If we fetch tid first, object and
2842 * page could be one associated with next tid and our alloc/free
2843 * request will be failed. In this case, we will retry. So, no problem.
2844 */
2845 barrier();
2846
2847 /*
2848 * The transaction ids are globally unique per cpu and per operation on
2849 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2850 * occurs on the right processor and that there was no operation on the
2851 * linked list in between.
2852 */
2853
2854 object = c->freelist;
2855 page = c->page;
2856 if (unlikely(!object || !page || !node_match(page, node))) {
2857 object = __slab_alloc(s, gfpflags, node, addr, c);
2858 } else {
2859 void *next_object = get_freepointer_safe(s, object);
2860
2861 /*
2862 * The cmpxchg will only match if there was no additional
2863 * operation and if we are on the right processor.
2864 *
2865 * The cmpxchg does the following atomically (without lock
2866 * semantics!)
2867 * 1. Relocate first pointer to the current per cpu area.
2868 * 2. Verify that tid and freelist have not been changed
2869 * 3. If they were not changed replace tid and freelist
2870 *
2871 * Since this is without lock semantics the protection is only
2872 * against code executing on this cpu *not* from access by
2873 * other cpus.
2874 */
2875 if (unlikely(!this_cpu_cmpxchg_double(
2876 s->cpu_slab->freelist, s->cpu_slab->tid,
2877 object, tid,
2878 next_object, next_tid(tid)))) {
2879
2880 note_cmpxchg_failure("slab_alloc", s, tid);
2881 goto redo;
2882 }
2883 prefetch_freepointer(s, next_object);
2884 stat(s, ALLOC_FASTPATH);
2885 }
2886
2887 maybe_wipe_obj_freeptr(s, object);
2888
2889 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2890 memset(kasan_reset_tag(object), 0, s->object_size);
2891
2892 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
2893
2894 return object;
2895 }
2896
2897 static __always_inline void *slab_alloc(struct kmem_cache *s,
2898 gfp_t gfpflags, unsigned long addr)
2899 {
2900 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2901 }
2902
2903 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2904 {
2905 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2906
2907 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2908 s->size, gfpflags);
2909
2910 return ret;
2911 }
2912 EXPORT_SYMBOL(kmem_cache_alloc);
2913
2914 #ifdef CONFIG_TRACING
2915 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2916 {
2917 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2918 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2919 ret = kasan_kmalloc(s, ret, size, gfpflags);
2920 return ret;
2921 }
2922 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2923 #endif
2924
2925 #ifdef CONFIG_NUMA
2926 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2927 {
2928 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2929
2930 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2931 s->object_size, s->size, gfpflags, node);
2932
2933 return ret;
2934 }
2935 EXPORT_SYMBOL(kmem_cache_alloc_node);
2936
2937 #ifdef CONFIG_TRACING
2938 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2939 gfp_t gfpflags,
2940 int node, size_t size)
2941 {
2942 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2943
2944 trace_kmalloc_node(_RET_IP_, ret,
2945 size, s->size, gfpflags, node);
2946
2947 ret = kasan_kmalloc(s, ret, size, gfpflags);
2948 return ret;
2949 }
2950 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2951 #endif
2952 #endif /* CONFIG_NUMA */
2953
2954 /*
2955 * Slow path handling. This may still be called frequently since objects
2956 * have a longer lifetime than the cpu slabs in most processing loads.
2957 *
2958 * So we still attempt to reduce cache line usage. Just take the slab
2959 * lock and free the item. If there is no additional partial page
2960 * handling required then we can return immediately.
2961 */
2962 static void __slab_free(struct kmem_cache *s, struct page *page,
2963 void *head, void *tail, int cnt,
2964 unsigned long addr)
2965
2966 {
2967 void *prior;
2968 int was_frozen;
2969 struct page new;
2970 unsigned long counters;
2971 struct kmem_cache_node *n = NULL;
2972 unsigned long flags;
2973
2974 stat(s, FREE_SLOWPATH);
2975
2976 if (kmem_cache_debug(s) &&
2977 !free_debug_processing(s, page, head, tail, cnt, addr))
2978 return;
2979
2980 do {
2981 if (unlikely(n)) {
2982 spin_unlock_irqrestore(&n->list_lock, flags);
2983 n = NULL;
2984 }
2985 prior = page->freelist;
2986 counters = page->counters;
2987 set_freepointer(s, tail, prior);
2988 new.counters = counters;
2989 was_frozen = new.frozen;
2990 new.inuse -= cnt;
2991 if ((!new.inuse || !prior) && !was_frozen) {
2992
2993 if (kmem_cache_has_cpu_partial(s) && !prior) {
2994
2995 /*
2996 * Slab was on no list before and will be
2997 * partially empty
2998 * We can defer the list move and instead
2999 * freeze it.
3000 */
3001 new.frozen = 1;
3002
3003 } else { /* Needs to be taken off a list */
3004
3005 n = get_node(s, page_to_nid(page));
3006 /*
3007 * Speculatively acquire the list_lock.
3008 * If the cmpxchg does not succeed then we may
3009 * drop the list_lock without any processing.
3010 *
3011 * Otherwise the list_lock will synchronize with
3012 * other processors updating the list of slabs.
3013 */
3014 spin_lock_irqsave(&n->list_lock, flags);
3015
3016 }
3017 }
3018
3019 } while (!cmpxchg_double_slab(s, page,
3020 prior, counters,
3021 head, new.counters,
3022 "__slab_free"));
3023
3024 if (likely(!n)) {
3025
3026 if (likely(was_frozen)) {
3027 /*
3028 * The list lock was not taken therefore no list
3029 * activity can be necessary.
3030 */
3031 stat(s, FREE_FROZEN);
3032 } else if (new.frozen) {
3033 /*
3034 * If we just froze the page then put it onto the
3035 * per cpu partial list.
3036 */
3037 put_cpu_partial(s, page, 1);
3038 stat(s, CPU_PARTIAL_FREE);
3039 }
3040
3041 return;
3042 }
3043
3044 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
3045 goto slab_empty;
3046
3047 /*
3048 * Objects left in the slab. If it was not on the partial list before
3049 * then add it.
3050 */
3051 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
3052 remove_full(s, n, page);
3053 add_partial(n, page, DEACTIVATE_TO_TAIL);
3054 stat(s, FREE_ADD_PARTIAL);
3055 }
3056 spin_unlock_irqrestore(&n->list_lock, flags);
3057 return;
3058
3059 slab_empty:
3060 if (prior) {
3061 /*
3062 * Slab on the partial list.
3063 */
3064 remove_partial(n, page);
3065 stat(s, FREE_REMOVE_PARTIAL);
3066 } else {
3067 /* Slab must be on the full list */
3068 remove_full(s, n, page);
3069 }
3070
3071 spin_unlock_irqrestore(&n->list_lock, flags);
3072 stat(s, FREE_SLAB);
3073 discard_slab(s, page);
3074 }
3075
3076 /*
3077 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3078 * can perform fastpath freeing without additional function calls.
3079 *
3080 * The fastpath is only possible if we are freeing to the current cpu slab
3081 * of this processor. This typically the case if we have just allocated
3082 * the item before.
3083 *
3084 * If fastpath is not possible then fall back to __slab_free where we deal
3085 * with all sorts of special processing.
3086 *
3087 * Bulk free of a freelist with several objects (all pointing to the
3088 * same page) possible by specifying head and tail ptr, plus objects
3089 * count (cnt). Bulk free indicated by tail pointer being set.
3090 */
3091 static __always_inline void do_slab_free(struct kmem_cache *s,
3092 struct page *page, void *head, void *tail,
3093 int cnt, unsigned long addr)
3094 {
3095 void *tail_obj = tail ? : head;
3096 struct kmem_cache_cpu *c;
3097 unsigned long tid;
3098
3099 memcg_slab_free_hook(s, &head, 1);
3100 redo:
3101 /*
3102 * Determine the currently cpus per cpu slab.
3103 * The cpu may change afterward. However that does not matter since
3104 * data is retrieved via this pointer. If we are on the same cpu
3105 * during the cmpxchg then the free will succeed.
3106 */
3107 do {
3108 tid = this_cpu_read(s->cpu_slab->tid);
3109 c = raw_cpu_ptr(s->cpu_slab);
3110 } while (IS_ENABLED(CONFIG_PREEMPTION) &&
3111 unlikely(tid != READ_ONCE(c->tid)));
3112
3113 /* Same with comment on barrier() in slab_alloc_node() */
3114 barrier();
3115
3116 if (likely(page == c->page)) {
3117 void **freelist = READ_ONCE(c->freelist);
3118
3119 set_freepointer(s, tail_obj, freelist);
3120
3121 if (unlikely(!this_cpu_cmpxchg_double(
3122 s->cpu_slab->freelist, s->cpu_slab->tid,
3123 freelist, tid,
3124 head, next_tid(tid)))) {
3125
3126 note_cmpxchg_failure("slab_free", s, tid);
3127 goto redo;
3128 }
3129 stat(s, FREE_FASTPATH);
3130 } else
3131 __slab_free(s, page, head, tail_obj, cnt, addr);
3132
3133 }
3134
3135 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3136 void *head, void *tail, int cnt,
3137 unsigned long addr)
3138 {
3139 /*
3140 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3141 * to remove objects, whose reuse must be delayed.
3142 */
3143 if (slab_free_freelist_hook(s, &head, &tail))
3144 do_slab_free(s, page, head, tail, cnt, addr);
3145 }
3146
3147 #ifdef CONFIG_KASAN_GENERIC
3148 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3149 {
3150 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3151 }
3152 #endif
3153
3154 void kmem_cache_free(struct kmem_cache *s, void *x)
3155 {
3156 s = cache_from_obj(s, x);
3157 if (!s)
3158 return;
3159 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3160 trace_kmem_cache_free(_RET_IP_, x);
3161 }
3162 EXPORT_SYMBOL(kmem_cache_free);
3163
3164 struct detached_freelist {
3165 struct page *page;
3166 void *tail;
3167 void *freelist;
3168 int cnt;
3169 struct kmem_cache *s;
3170 };
3171
3172 /*
3173 * This function progressively scans the array with free objects (with
3174 * a limited look ahead) and extract objects belonging to the same
3175 * page. It builds a detached freelist directly within the given
3176 * page/objects. This can happen without any need for
3177 * synchronization, because the objects are owned by running process.
3178 * The freelist is build up as a single linked list in the objects.
3179 * The idea is, that this detached freelist can then be bulk
3180 * transferred to the real freelist(s), but only requiring a single
3181 * synchronization primitive. Look ahead in the array is limited due
3182 * to performance reasons.
3183 */
3184 static inline
3185 int build_detached_freelist(struct kmem_cache *s, size_t size,
3186 void **p, struct detached_freelist *df)
3187 {
3188 size_t first_skipped_index = 0;
3189 int lookahead = 3;
3190 void *object;
3191 struct page *page;
3192
3193 /* Always re-init detached_freelist */
3194 df->page = NULL;
3195
3196 do {
3197 object = p[--size];
3198 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3199 } while (!object && size);
3200
3201 if (!object)
3202 return 0;
3203
3204 page = virt_to_head_page(object);
3205 if (!s) {
3206 /* Handle kalloc'ed objects */
3207 if (unlikely(!PageSlab(page))) {
3208 BUG_ON(!PageCompound(page));
3209 kfree_hook(object);
3210 __free_pages(page, compound_order(page));
3211 p[size] = NULL; /* mark object processed */
3212 return size;
3213 }
3214 /* Derive kmem_cache from object */
3215 df->s = page->slab_cache;
3216 } else {
3217 df->s = cache_from_obj(s, object); /* Support for memcg */
3218 }
3219
3220 /* Start new detached freelist */
3221 df->page = page;
3222 set_freepointer(df->s, object, NULL);
3223 df->tail = object;
3224 df->freelist = object;
3225 p[size] = NULL; /* mark object processed */
3226 df->cnt = 1;
3227
3228 while (size) {
3229 object = p[--size];
3230 if (!object)
3231 continue; /* Skip processed objects */
3232
3233 /* df->page is always set at this point */
3234 if (df->page == virt_to_head_page(object)) {
3235 /* Opportunity build freelist */
3236 set_freepointer(df->s, object, df->freelist);
3237 df->freelist = object;
3238 df->cnt++;
3239 p[size] = NULL; /* mark object processed */
3240
3241 continue;
3242 }
3243
3244 /* Limit look ahead search */
3245 if (!--lookahead)
3246 break;
3247
3248 if (!first_skipped_index)
3249 first_skipped_index = size + 1;
3250 }
3251
3252 return first_skipped_index;
3253 }
3254
3255 /* Note that interrupts must be enabled when calling this function. */
3256 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3257 {
3258 if (WARN_ON(!size))
3259 return;
3260
3261 memcg_slab_free_hook(s, p, size);
3262 do {
3263 struct detached_freelist df;
3264
3265 size = build_detached_freelist(s, size, p, &df);
3266 if (!df.page)
3267 continue;
3268
3269 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3270 } while (likely(size));
3271 }
3272 EXPORT_SYMBOL(kmem_cache_free_bulk);
3273
3274 /* Note that interrupts must be enabled when calling this function. */
3275 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3276 void **p)
3277 {
3278 struct kmem_cache_cpu *c;
3279 int i;
3280 struct obj_cgroup *objcg = NULL;
3281
3282 /* memcg and kmem_cache debug support */
3283 s = slab_pre_alloc_hook(s, &objcg, size, flags);
3284 if (unlikely(!s))
3285 return false;
3286 /*
3287 * Drain objects in the per cpu slab, while disabling local
3288 * IRQs, which protects against PREEMPT and interrupts
3289 * handlers invoking normal fastpath.
3290 */
3291 local_irq_disable();
3292 c = this_cpu_ptr(s->cpu_slab);
3293
3294 for (i = 0; i < size; i++) {
3295 void *object = c->freelist;
3296
3297 if (unlikely(!object)) {
3298 /*
3299 * We may have removed an object from c->freelist using
3300 * the fastpath in the previous iteration; in that case,
3301 * c->tid has not been bumped yet.
3302 * Since ___slab_alloc() may reenable interrupts while
3303 * allocating memory, we should bump c->tid now.
3304 */
3305 c->tid = next_tid(c->tid);
3306
3307 /*
3308 * Invoking slow path likely have side-effect
3309 * of re-populating per CPU c->freelist
3310 */
3311 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3312 _RET_IP_, c);
3313 if (unlikely(!p[i]))
3314 goto error;
3315
3316 c = this_cpu_ptr(s->cpu_slab);
3317 maybe_wipe_obj_freeptr(s, p[i]);
3318
3319 continue; /* goto for-loop */
3320 }
3321 c->freelist = get_freepointer(s, object);
3322 p[i] = object;
3323 maybe_wipe_obj_freeptr(s, p[i]);
3324 }
3325 c->tid = next_tid(c->tid);
3326 local_irq_enable();
3327
3328 /* Clear memory outside IRQ disabled fastpath loop */
3329 if (unlikely(slab_want_init_on_alloc(flags, s))) {
3330 int j;
3331
3332 for (j = 0; j < i; j++)
3333 memset(kasan_reset_tag(p[j]), 0, s->object_size);
3334 }
3335
3336 /* memcg and kmem_cache debug support */
3337 slab_post_alloc_hook(s, objcg, flags, size, p);
3338 return i;
3339 error:
3340 local_irq_enable();
3341 slab_post_alloc_hook(s, objcg, flags, i, p);
3342 __kmem_cache_free_bulk(s, i, p);
3343 return 0;
3344 }
3345 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3346
3347
3348 /*
3349 * Object placement in a slab is made very easy because we always start at
3350 * offset 0. If we tune the size of the object to the alignment then we can
3351 * get the required alignment by putting one properly sized object after
3352 * another.
3353 *
3354 * Notice that the allocation order determines the sizes of the per cpu
3355 * caches. Each processor has always one slab available for allocations.
3356 * Increasing the allocation order reduces the number of times that slabs
3357 * must be moved on and off the partial lists and is therefore a factor in
3358 * locking overhead.
3359 */
3360
3361 /*
3362 * Mininum / Maximum order of slab pages. This influences locking overhead
3363 * and slab fragmentation. A higher order reduces the number of partial slabs
3364 * and increases the number of allocations possible without having to
3365 * take the list_lock.
3366 */
3367 static unsigned int slub_min_order;
3368 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3369 static unsigned int slub_min_objects;
3370
3371 /*
3372 * Calculate the order of allocation given an slab object size.
3373 *
3374 * The order of allocation has significant impact on performance and other
3375 * system components. Generally order 0 allocations should be preferred since
3376 * order 0 does not cause fragmentation in the page allocator. Larger objects
3377 * be problematic to put into order 0 slabs because there may be too much
3378 * unused space left. We go to a higher order if more than 1/16th of the slab
3379 * would be wasted.
3380 *
3381 * In order to reach satisfactory performance we must ensure that a minimum
3382 * number of objects is in one slab. Otherwise we may generate too much
3383 * activity on the partial lists which requires taking the list_lock. This is
3384 * less a concern for large slabs though which are rarely used.
3385 *
3386 * slub_max_order specifies the order where we begin to stop considering the
3387 * number of objects in a slab as critical. If we reach slub_max_order then
3388 * we try to keep the page order as low as possible. So we accept more waste
3389 * of space in favor of a small page order.
3390 *
3391 * Higher order allocations also allow the placement of more objects in a
3392 * slab and thereby reduce object handling overhead. If the user has
3393 * requested a higher mininum order then we start with that one instead of
3394 * the smallest order which will fit the object.
3395 */
3396 static inline unsigned int slab_order(unsigned int size,
3397 unsigned int min_objects, unsigned int max_order,
3398 unsigned int fract_leftover)
3399 {
3400 unsigned int min_order = slub_min_order;
3401 unsigned int order;
3402
3403 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3404 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3405
3406 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3407 order <= max_order; order++) {
3408
3409 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3410 unsigned int rem;
3411
3412 rem = slab_size % size;
3413
3414 if (rem <= slab_size / fract_leftover)
3415 break;
3416 }
3417
3418 return order;
3419 }
3420
3421 static inline int calculate_order(unsigned int size)
3422 {
3423 unsigned int order;
3424 unsigned int min_objects;
3425 unsigned int max_objects;
3426 unsigned int nr_cpus;
3427
3428 /*
3429 * Attempt to find best configuration for a slab. This
3430 * works by first attempting to generate a layout with
3431 * the best configuration and backing off gradually.
3432 *
3433 * First we increase the acceptable waste in a slab. Then
3434 * we reduce the minimum objects required in a slab.
3435 */
3436 min_objects = slub_min_objects;
3437 if (!min_objects) {
3438 /*
3439 * Some architectures will only update present cpus when
3440 * onlining them, so don't trust the number if it's just 1. But
3441 * we also don't want to use nr_cpu_ids always, as on some other
3442 * architectures, there can be many possible cpus, but never
3443 * onlined. Here we compromise between trying to avoid too high
3444 * order on systems that appear larger than they are, and too
3445 * low order on systems that appear smaller than they are.
3446 */
3447 nr_cpus = num_present_cpus();
3448 if (nr_cpus <= 1)
3449 nr_cpus = nr_cpu_ids;
3450 min_objects = 4 * (fls(nr_cpus) + 1);
3451 }
3452 max_objects = order_objects(slub_max_order, size);
3453 min_objects = min(min_objects, max_objects);
3454
3455 while (min_objects > 1) {
3456 unsigned int fraction;
3457
3458 fraction = 16;
3459 while (fraction >= 4) {
3460 order = slab_order(size, min_objects,
3461 slub_max_order, fraction);
3462 if (order <= slub_max_order)
3463 return order;
3464 fraction /= 2;
3465 }
3466 min_objects--;
3467 }
3468
3469 /*
3470 * We were unable to place multiple objects in a slab. Now
3471 * lets see if we can place a single object there.
3472 */
3473 order = slab_order(size, 1, slub_max_order, 1);
3474 if (order <= slub_max_order)
3475 return order;
3476
3477 /*
3478 * Doh this slab cannot be placed using slub_max_order.
3479 */
3480 order = slab_order(size, 1, MAX_ORDER, 1);
3481 if (order < MAX_ORDER)
3482 return order;
3483 return -ENOSYS;
3484 }
3485
3486 static void
3487 init_kmem_cache_node(struct kmem_cache_node *n)
3488 {
3489 n->nr_partial = 0;
3490 spin_lock_init(&n->list_lock);
3491 INIT_LIST_HEAD(&n->partial);
3492 #ifdef CONFIG_SLUB_DEBUG
3493 atomic_long_set(&n->nr_slabs, 0);
3494 atomic_long_set(&n->total_objects, 0);
3495 INIT_LIST_HEAD(&n->full);
3496 #endif
3497 }
3498
3499 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3500 {
3501 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3502 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3503
3504 /*
3505 * Must align to double word boundary for the double cmpxchg
3506 * instructions to work; see __pcpu_double_call_return_bool().
3507 */
3508 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3509 2 * sizeof(void *));
3510
3511 if (!s->cpu_slab)
3512 return 0;
3513
3514 init_kmem_cache_cpus(s);
3515
3516 return 1;
3517 }
3518
3519 static struct kmem_cache *kmem_cache_node;
3520
3521 /*
3522 * No kmalloc_node yet so do it by hand. We know that this is the first
3523 * slab on the node for this slabcache. There are no concurrent accesses
3524 * possible.
3525 *
3526 * Note that this function only works on the kmem_cache_node
3527 * when allocating for the kmem_cache_node. This is used for bootstrapping
3528 * memory on a fresh node that has no slab structures yet.
3529 */
3530 static void early_kmem_cache_node_alloc(int node)
3531 {
3532 struct page *page;
3533 struct kmem_cache_node *n;
3534
3535 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3536
3537 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3538
3539 BUG_ON(!page);
3540 if (page_to_nid(page) != node) {
3541 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3542 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3543 }
3544
3545 n = page->freelist;
3546 BUG_ON(!n);
3547 #ifdef CONFIG_SLUB_DEBUG
3548 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3549 init_tracking(kmem_cache_node, n);
3550 #endif
3551 n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3552 GFP_KERNEL);
3553 page->freelist = get_freepointer(kmem_cache_node, n);
3554 page->inuse = 1;
3555 page->frozen = 0;
3556 kmem_cache_node->node[node] = n;
3557 init_kmem_cache_node(n);
3558 inc_slabs_node(kmem_cache_node, node, page->objects);
3559
3560 /*
3561 * No locks need to be taken here as it has just been
3562 * initialized and there is no concurrent access.
3563 */
3564 __add_partial(n, page, DEACTIVATE_TO_HEAD);
3565 }
3566
3567 static void free_kmem_cache_nodes(struct kmem_cache *s)
3568 {
3569 int node;
3570 struct kmem_cache_node *n;
3571
3572 for_each_kmem_cache_node(s, node, n) {
3573 s->node[node] = NULL;
3574 kmem_cache_free(kmem_cache_node, n);
3575 }
3576 }
3577
3578 void __kmem_cache_release(struct kmem_cache *s)
3579 {
3580 cache_random_seq_destroy(s);
3581 free_percpu(s->cpu_slab);
3582 free_kmem_cache_nodes(s);
3583 }
3584
3585 static int init_kmem_cache_nodes(struct kmem_cache *s)
3586 {
3587 int node;
3588
3589 for_each_node_state(node, N_NORMAL_MEMORY) {
3590 struct kmem_cache_node *n;
3591
3592 if (slab_state == DOWN) {
3593 early_kmem_cache_node_alloc(node);
3594 continue;
3595 }
3596 n = kmem_cache_alloc_node(kmem_cache_node,
3597 GFP_KERNEL, node);
3598
3599 if (!n) {
3600 free_kmem_cache_nodes(s);
3601 return 0;
3602 }
3603
3604 init_kmem_cache_node(n);
3605 s->node[node] = n;
3606 }
3607 return 1;
3608 }
3609
3610 static void set_min_partial(struct kmem_cache *s, unsigned long min)
3611 {
3612 if (min < MIN_PARTIAL)
3613 min = MIN_PARTIAL;
3614 else if (min > MAX_PARTIAL)
3615 min = MAX_PARTIAL;
3616 s->min_partial = min;
3617 }
3618
3619 static void set_cpu_partial(struct kmem_cache *s)
3620 {
3621 #ifdef CONFIG_SLUB_CPU_PARTIAL
3622 /*
3623 * cpu_partial determined the maximum number of objects kept in the
3624 * per cpu partial lists of a processor.
3625 *
3626 * Per cpu partial lists mainly contain slabs that just have one
3627 * object freed. If they are used for allocation then they can be
3628 * filled up again with minimal effort. The slab will never hit the
3629 * per node partial lists and therefore no locking will be required.
3630 *
3631 * This setting also determines
3632 *
3633 * A) The number of objects from per cpu partial slabs dumped to the
3634 * per node list when we reach the limit.
3635 * B) The number of objects in cpu partial slabs to extract from the
3636 * per node list when we run out of per cpu objects. We only fetch
3637 * 50% to keep some capacity around for frees.
3638 */
3639 if (!kmem_cache_has_cpu_partial(s))
3640 slub_set_cpu_partial(s, 0);
3641 else if (s->size >= PAGE_SIZE)
3642 slub_set_cpu_partial(s, 2);
3643 else if (s->size >= 1024)
3644 slub_set_cpu_partial(s, 6);
3645 else if (s->size >= 256)
3646 slub_set_cpu_partial(s, 13);
3647 else
3648 slub_set_cpu_partial(s, 30);
3649 #endif
3650 }
3651
3652 /*
3653 * calculate_sizes() determines the order and the distribution of data within
3654 * a slab object.
3655 */
3656 static int calculate_sizes(struct kmem_cache *s, int forced_order)
3657 {
3658 slab_flags_t flags = s->flags;
3659 unsigned int size = s->object_size;
3660 unsigned int freepointer_area;
3661 unsigned int order;
3662
3663 /*
3664 * Round up object size to the next word boundary. We can only
3665 * place the free pointer at word boundaries and this determines
3666 * the possible location of the free pointer.
3667 */
3668 size = ALIGN(size, sizeof(void *));
3669 /*
3670 * This is the area of the object where a freepointer can be
3671 * safely written. If redzoning adds more to the inuse size, we
3672 * can't use that portion for writing the freepointer, so
3673 * s->offset must be limited within this for the general case.
3674 */
3675 freepointer_area = size;
3676
3677 #ifdef CONFIG_SLUB_DEBUG
3678 /*
3679 * Determine if we can poison the object itself. If the user of
3680 * the slab may touch the object after free or before allocation
3681 * then we should never poison the object itself.
3682 */
3683 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3684 !s->ctor)
3685 s->flags |= __OBJECT_POISON;
3686 else
3687 s->flags &= ~__OBJECT_POISON;
3688
3689
3690 /*
3691 * If we are Redzoning then check if there is some space between the
3692 * end of the object and the free pointer. If not then add an
3693 * additional word to have some bytes to store Redzone information.
3694 */
3695 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3696 size += sizeof(void *);
3697 #endif
3698
3699 /*
3700 * With that we have determined the number of bytes in actual use
3701 * by the object. This is the potential offset to the free pointer.
3702 */
3703 s->inuse = size;
3704
3705 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3706 s->ctor)) {
3707 /*
3708 * Relocate free pointer after the object if it is not
3709 * permitted to overwrite the first word of the object on
3710 * kmem_cache_free.
3711 *
3712 * This is the case if we do RCU, have a constructor or
3713 * destructor or are poisoning the objects.
3714 *
3715 * The assumption that s->offset >= s->inuse means free
3716 * pointer is outside of the object is used in the
3717 * freeptr_outside_object() function. If that is no
3718 * longer true, the function needs to be modified.
3719 */
3720 s->offset = size;
3721 size += sizeof(void *);
3722 } else if (freepointer_area > sizeof(void *)) {
3723 /*
3724 * Store freelist pointer near middle of object to keep
3725 * it away from the edges of the object to avoid small
3726 * sized over/underflows from neighboring allocations.
3727 */
3728 s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
3729 }
3730
3731 #ifdef CONFIG_SLUB_DEBUG
3732 if (flags & SLAB_STORE_USER)
3733 /*
3734 * Need to store information about allocs and frees after
3735 * the object.
3736 */
3737 size += 2 * sizeof(struct track);
3738 #endif
3739
3740 kasan_cache_create(s, &size, &s->flags);
3741 #ifdef CONFIG_SLUB_DEBUG
3742 if (flags & SLAB_RED_ZONE) {
3743 /*
3744 * Add some empty padding so that we can catch
3745 * overwrites from earlier objects rather than let
3746 * tracking information or the free pointer be
3747 * corrupted if a user writes before the start
3748 * of the object.
3749 */
3750 size += sizeof(void *);
3751
3752 s->red_left_pad = sizeof(void *);
3753 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3754 size += s->red_left_pad;
3755 }
3756 #endif
3757
3758 /*
3759 * SLUB stores one object immediately after another beginning from
3760 * offset 0. In order to align the objects we have to simply size
3761 * each object to conform to the alignment.
3762 */
3763 size = ALIGN(size, s->align);
3764 s->size = size;
3765 s->reciprocal_size = reciprocal_value(size);
3766 if (forced_order >= 0)
3767 order = forced_order;
3768 else
3769 order = calculate_order(size);
3770
3771 if ((int)order < 0)
3772 return 0;
3773
3774 s->allocflags = 0;
3775 if (order)
3776 s->allocflags |= __GFP_COMP;
3777
3778 if (s->flags & SLAB_CACHE_DMA)
3779 s->allocflags |= GFP_DMA;
3780
3781 if (s->flags & SLAB_CACHE_DMA32)
3782 s->allocflags |= GFP_DMA32;
3783
3784 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3785 s->allocflags |= __GFP_RECLAIMABLE;
3786
3787 /*
3788 * Determine the number of objects per slab
3789 */
3790 s->oo = oo_make(order, size);
3791 s->min = oo_make(get_order(size), size);
3792 if (oo_objects(s->oo) > oo_objects(s->max))
3793 s->max = s->oo;
3794
3795 return !!oo_objects(s->oo);
3796 }
3797
3798 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3799 {
3800 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3801 #ifdef CONFIG_SLAB_FREELIST_HARDENED
3802 s->random = get_random_long();
3803 #endif
3804
3805 if (!calculate_sizes(s, -1))
3806 goto error;
3807 if (disable_higher_order_debug) {
3808 /*
3809 * Disable debugging flags that store metadata if the min slab
3810 * order increased.
3811 */
3812 if (get_order(s->size) > get_order(s->object_size)) {
3813 s->flags &= ~DEBUG_METADATA_FLAGS;
3814 s->offset = 0;
3815 if (!calculate_sizes(s, -1))
3816 goto error;
3817 }
3818 }
3819
3820 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3821 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3822 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3823 /* Enable fast mode */
3824 s->flags |= __CMPXCHG_DOUBLE;
3825 #endif
3826
3827 /*
3828 * The larger the object size is, the more pages we want on the partial
3829 * list to avoid pounding the page allocator excessively.
3830 */
3831 set_min_partial(s, ilog2(s->size) / 2);
3832
3833 set_cpu_partial(s);
3834
3835 #ifdef CONFIG_NUMA
3836 s->remote_node_defrag_ratio = 1000;
3837 #endif
3838
3839 /* Initialize the pre-computed randomized freelist if slab is up */
3840 if (slab_state >= UP) {
3841 if (init_cache_random_seq(s))
3842 goto error;
3843 }
3844
3845 if (!init_kmem_cache_nodes(s))
3846 goto error;
3847
3848 if (alloc_kmem_cache_cpus(s))
3849 return 0;
3850
3851 free_kmem_cache_nodes(s);
3852 error:
3853 return -EINVAL;
3854 }
3855
3856 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3857 const char *text)
3858 {
3859 #ifdef CONFIG_SLUB_DEBUG
3860 void *addr = page_address(page);
3861 unsigned long *map;
3862 void *p;
3863
3864 slab_err(s, page, text, s->name);
3865 slab_lock(page);
3866
3867 map = get_map(s, page);
3868 for_each_object(p, s, addr, page->objects) {
3869
3870 if (!test_bit(__obj_to_index(s, addr, p), map)) {
3871 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3872 print_tracking(s, p);
3873 }
3874 }
3875 put_map(map);
3876 slab_unlock(page);
3877 #endif
3878 }
3879
3880 /*
3881 * Attempt to free all partial slabs on a node.
3882 * This is called from __kmem_cache_shutdown(). We must take list_lock
3883 * because sysfs file might still access partial list after the shutdowning.
3884 */
3885 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3886 {
3887 LIST_HEAD(discard);
3888 struct page *page, *h;
3889
3890 BUG_ON(irqs_disabled());
3891 spin_lock_irq(&n->list_lock);
3892 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3893 if (!page->inuse) {
3894 remove_partial(n, page);
3895 list_add(&page->slab_list, &discard);
3896 } else {
3897 list_slab_objects(s, page,
3898 "Objects remaining in %s on __kmem_cache_shutdown()");
3899 }
3900 }
3901 spin_unlock_irq(&n->list_lock);
3902
3903 list_for_each_entry_safe(page, h, &discard, slab_list)
3904 discard_slab(s, page);
3905 }
3906
3907 bool __kmem_cache_empty(struct kmem_cache *s)
3908 {
3909 int node;
3910 struct kmem_cache_node *n;
3911
3912 for_each_kmem_cache_node(s, node, n)
3913 if (n->nr_partial || slabs_node(s, node))
3914 return false;
3915 return true;
3916 }
3917
3918 /*
3919 * Release all resources used by a slab cache.
3920 */
3921 int __kmem_cache_shutdown(struct kmem_cache *s)
3922 {
3923 int node;
3924 struct kmem_cache_node *n;
3925
3926 flush_all(s);
3927 /* Attempt to free all objects */
3928 for_each_kmem_cache_node(s, node, n) {
3929 free_partial(s, n);
3930 if (n->nr_partial || slabs_node(s, node))
3931 return 1;
3932 }
3933 return 0;
3934 }
3935
3936 /********************************************************************
3937 * Kmalloc subsystem
3938 *******************************************************************/
3939
3940 static int __init setup_slub_min_order(char *str)
3941 {
3942 get_option(&str, (int *)&slub_min_order);
3943
3944 return 1;
3945 }
3946
3947 __setup("slub_min_order=", setup_slub_min_order);
3948
3949 static int __init setup_slub_max_order(char *str)
3950 {
3951 get_option(&str, (int *)&slub_max_order);
3952 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3953
3954 return 1;
3955 }
3956
3957 __setup("slub_max_order=", setup_slub_max_order);
3958
3959 static int __init setup_slub_min_objects(char *str)
3960 {
3961 get_option(&str, (int *)&slub_min_objects);
3962
3963 return 1;
3964 }
3965
3966 __setup("slub_min_objects=", setup_slub_min_objects);
3967
3968 void *__kmalloc(size_t size, gfp_t flags)
3969 {
3970 struct kmem_cache *s;
3971 void *ret;
3972
3973 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3974 return kmalloc_large(size, flags);
3975
3976 s = kmalloc_slab(size, flags);
3977
3978 if (unlikely(ZERO_OR_NULL_PTR(s)))
3979 return s;
3980
3981 ret = slab_alloc(s, flags, _RET_IP_);
3982
3983 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3984
3985 ret = kasan_kmalloc(s, ret, size, flags);
3986
3987 return ret;
3988 }
3989 EXPORT_SYMBOL(__kmalloc);
3990
3991 #ifdef CONFIG_NUMA
3992 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3993 {
3994 struct page *page;
3995 void *ptr = NULL;
3996 unsigned int order = get_order(size);
3997
3998 flags |= __GFP_COMP;
3999 page = alloc_pages_node(node, flags, order);
4000 if (page) {
4001 ptr = page_address(page);
4002 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4003 PAGE_SIZE << order);
4004 }
4005
4006 return kmalloc_large_node_hook(ptr, size, flags);
4007 }
4008
4009 void *__kmalloc_node(size_t size, gfp_t flags, int node)
4010 {
4011 struct kmem_cache *s;
4012 void *ret;
4013
4014 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4015 ret = kmalloc_large_node(size, flags, node);
4016
4017 trace_kmalloc_node(_RET_IP_, ret,
4018 size, PAGE_SIZE << get_order(size),
4019 flags, node);
4020
4021 return ret;
4022 }
4023
4024 s = kmalloc_slab(size, flags);
4025
4026 if (unlikely(ZERO_OR_NULL_PTR(s)))
4027 return s;
4028
4029 ret = slab_alloc_node(s, flags, node, _RET_IP_);
4030
4031 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4032
4033 ret = kasan_kmalloc(s, ret, size, flags);
4034
4035 return ret;
4036 }
4037 EXPORT_SYMBOL(__kmalloc_node);
4038 #endif /* CONFIG_NUMA */
4039
4040 #ifdef CONFIG_HARDENED_USERCOPY
4041 /*
4042 * Rejects incorrectly sized objects and objects that are to be copied
4043 * to/from userspace but do not fall entirely within the containing slab
4044 * cache's usercopy region.
4045 *
4046 * Returns NULL if check passes, otherwise const char * to name of cache
4047 * to indicate an error.
4048 */
4049 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4050 bool to_user)
4051 {
4052 struct kmem_cache *s;
4053 unsigned int offset;
4054 size_t object_size;
4055
4056 ptr = kasan_reset_tag(ptr);
4057
4058 /* Find object and usable object size. */
4059 s = page->slab_cache;
4060
4061 /* Reject impossible pointers. */
4062 if (ptr < page_address(page))
4063 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4064 to_user, 0, n);
4065
4066 /* Find offset within object. */
4067 offset = (ptr - page_address(page)) % s->size;
4068
4069 /* Adjust for redzone and reject if within the redzone. */
4070 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
4071 if (offset < s->red_left_pad)
4072 usercopy_abort("SLUB object in left red zone",
4073 s->name, to_user, offset, n);
4074 offset -= s->red_left_pad;
4075 }
4076
4077 /* Allow address range falling entirely within usercopy region. */
4078 if (offset >= s->useroffset &&
4079 offset - s->useroffset <= s->usersize &&
4080 n <= s->useroffset - offset + s->usersize)
4081 return;
4082
4083 /*
4084 * If the copy is still within the allocated object, produce
4085 * a warning instead of rejecting the copy. This is intended
4086 * to be a temporary method to find any missing usercopy
4087 * whitelists.
4088 */
4089 object_size = slab_ksize(s);
4090 if (usercopy_fallback &&
4091 offset <= object_size && n <= object_size - offset) {
4092 usercopy_warn("SLUB object", s->name, to_user, offset, n);
4093 return;
4094 }
4095
4096 usercopy_abort("SLUB object", s->name, to_user, offset, n);
4097 }
4098 #endif /* CONFIG_HARDENED_USERCOPY */
4099
4100 size_t __ksize(const void *object)
4101 {
4102 struct page *page;
4103
4104 if (unlikely(object == ZERO_SIZE_PTR))
4105 return 0;
4106
4107 page = virt_to_head_page(object);
4108
4109 if (unlikely(!PageSlab(page))) {
4110 WARN_ON(!PageCompound(page));
4111 return page_size(page);
4112 }
4113
4114 return slab_ksize(page->slab_cache);
4115 }
4116 EXPORT_SYMBOL(__ksize);
4117
4118 void kfree(const void *x)
4119 {
4120 struct page *page;
4121 void *object = (void *)x;
4122
4123 trace_kfree(_RET_IP_, x);
4124
4125 if (unlikely(ZERO_OR_NULL_PTR(x)))
4126 return;
4127
4128 page = virt_to_head_page(x);
4129 if (unlikely(!PageSlab(page))) {
4130 unsigned int order = compound_order(page);
4131
4132 BUG_ON(!PageCompound(page));
4133 kfree_hook(object);
4134 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4135 -(PAGE_SIZE << order));
4136 __free_pages(page, order);
4137 return;
4138 }
4139 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4140 }
4141 EXPORT_SYMBOL(kfree);
4142
4143 #define SHRINK_PROMOTE_MAX 32
4144
4145 /*
4146 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4147 * up most to the head of the partial lists. New allocations will then
4148 * fill those up and thus they can be removed from the partial lists.
4149 *
4150 * The slabs with the least items are placed last. This results in them
4151 * being allocated from last increasing the chance that the last objects
4152 * are freed in them.
4153 */
4154 int __kmem_cache_shrink(struct kmem_cache *s)
4155 {
4156 int node;
4157 int i;
4158 struct kmem_cache_node *n;
4159 struct page *page;
4160 struct page *t;
4161 struct list_head discard;
4162 struct list_head promote[SHRINK_PROMOTE_MAX];
4163 unsigned long flags;
4164 int ret = 0;
4165
4166 flush_all(s);
4167 for_each_kmem_cache_node(s, node, n) {
4168 INIT_LIST_HEAD(&discard);
4169 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4170 INIT_LIST_HEAD(promote + i);
4171
4172 spin_lock_irqsave(&n->list_lock, flags);
4173
4174 /*
4175 * Build lists of slabs to discard or promote.
4176 *
4177 * Note that concurrent frees may occur while we hold the
4178 * list_lock. page->inuse here is the upper limit.
4179 */
4180 list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4181 int free = page->objects - page->inuse;
4182
4183 /* Do not reread page->inuse */
4184 barrier();
4185
4186 /* We do not keep full slabs on the list */
4187 BUG_ON(free <= 0);
4188
4189 if (free == page->objects) {
4190 list_move(&page->slab_list, &discard);
4191 n->nr_partial--;
4192 } else if (free <= SHRINK_PROMOTE_MAX)
4193 list_move(&page->slab_list, promote + free - 1);
4194 }
4195
4196 /*
4197 * Promote the slabs filled up most to the head of the
4198 * partial list.
4199 */
4200 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4201 list_splice(promote + i, &n->partial);
4202
4203 spin_unlock_irqrestore(&n->list_lock, flags);
4204
4205 /* Release empty slabs */
4206 list_for_each_entry_safe(page, t, &discard, slab_list)
4207 discard_slab(s, page);
4208
4209 if (slabs_node(s, node))
4210 ret = 1;
4211 }
4212
4213 return ret;
4214 }
4215
4216 static int slab_mem_going_offline_callback(void *arg)
4217 {
4218 struct kmem_cache *s;
4219
4220 mutex_lock(&slab_mutex);
4221 list_for_each_entry(s, &slab_caches, list)
4222 __kmem_cache_shrink(s);
4223 mutex_unlock(&slab_mutex);
4224
4225 return 0;
4226 }
4227
4228 static void slab_mem_offline_callback(void *arg)
4229 {
4230 struct kmem_cache_node *n;
4231 struct kmem_cache *s;
4232 struct memory_notify *marg = arg;
4233 int offline_node;
4234
4235 offline_node = marg->status_change_nid_normal;
4236
4237 /*
4238 * If the node still has available memory. we need kmem_cache_node
4239 * for it yet.
4240 */
4241 if (offline_node < 0)
4242 return;
4243
4244 mutex_lock(&slab_mutex);
4245 list_for_each_entry(s, &slab_caches, list) {
4246 n = get_node(s, offline_node);
4247 if (n) {
4248 /*
4249 * if n->nr_slabs > 0, slabs still exist on the node
4250 * that is going down. We were unable to free them,
4251 * and offline_pages() function shouldn't call this
4252 * callback. So, we must fail.
4253 */
4254 BUG_ON(slabs_node(s, offline_node));
4255
4256 s->node[offline_node] = NULL;
4257 kmem_cache_free(kmem_cache_node, n);
4258 }
4259 }
4260 mutex_unlock(&slab_mutex);
4261 }
4262
4263 static int slab_mem_going_online_callback(void *arg)
4264 {
4265 struct kmem_cache_node *n;
4266 struct kmem_cache *s;
4267 struct memory_notify *marg = arg;
4268 int nid = marg->status_change_nid_normal;
4269 int ret = 0;
4270
4271 /*
4272 * If the node's memory is already available, then kmem_cache_node is
4273 * already created. Nothing to do.
4274 */
4275 if (nid < 0)
4276 return 0;
4277
4278 /*
4279 * We are bringing a node online. No memory is available yet. We must
4280 * allocate a kmem_cache_node structure in order to bring the node
4281 * online.
4282 */
4283 mutex_lock(&slab_mutex);
4284 list_for_each_entry(s, &slab_caches, list) {
4285 /*
4286 * XXX: kmem_cache_alloc_node will fallback to other nodes
4287 * since memory is not yet available from the node that
4288 * is brought up.
4289 */
4290 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4291 if (!n) {
4292 ret = -ENOMEM;
4293 goto out;
4294 }
4295 init_kmem_cache_node(n);
4296 s->node[nid] = n;
4297 }
4298 out:
4299 mutex_unlock(&slab_mutex);
4300 return ret;
4301 }
4302
4303 static int slab_memory_callback(struct notifier_block *self,
4304 unsigned long action, void *arg)
4305 {
4306 int ret = 0;
4307
4308 switch (action) {
4309 case MEM_GOING_ONLINE:
4310 ret = slab_mem_going_online_callback(arg);
4311 break;
4312 case MEM_GOING_OFFLINE:
4313 ret = slab_mem_going_offline_callback(arg);
4314 break;
4315 case MEM_OFFLINE:
4316 case MEM_CANCEL_ONLINE:
4317 slab_mem_offline_callback(arg);
4318 break;
4319 case MEM_ONLINE:
4320 case MEM_CANCEL_OFFLINE:
4321 break;
4322 }
4323 if (ret)
4324 ret = notifier_from_errno(ret);
4325 else
4326 ret = NOTIFY_OK;
4327 return ret;
4328 }
4329
4330 static struct notifier_block slab_memory_callback_nb = {
4331 .notifier_call = slab_memory_callback,
4332 .priority = SLAB_CALLBACK_PRI,
4333 };
4334
4335 /********************************************************************
4336 * Basic setup of slabs
4337 *******************************************************************/
4338
4339 /*
4340 * Used for early kmem_cache structures that were allocated using
4341 * the page allocator. Allocate them properly then fix up the pointers
4342 * that may be pointing to the wrong kmem_cache structure.
4343 */
4344
4345 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4346 {
4347 int node;
4348 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4349 struct kmem_cache_node *n;
4350
4351 memcpy(s, static_cache, kmem_cache->object_size);
4352
4353 /*
4354 * This runs very early, and only the boot processor is supposed to be
4355 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4356 * IPIs around.
4357 */
4358 __flush_cpu_slab(s, smp_processor_id());
4359 for_each_kmem_cache_node(s, node, n) {
4360 struct page *p;
4361
4362 list_for_each_entry(p, &n->partial, slab_list)
4363 p->slab_cache = s;
4364
4365 #ifdef CONFIG_SLUB_DEBUG
4366 list_for_each_entry(p, &n->full, slab_list)
4367 p->slab_cache = s;
4368 #endif
4369 }
4370 list_add(&s->list, &slab_caches);
4371 return s;
4372 }
4373
4374 void __init kmem_cache_init(void)
4375 {
4376 static __initdata struct kmem_cache boot_kmem_cache,
4377 boot_kmem_cache_node;
4378
4379 if (debug_guardpage_minorder())
4380 slub_max_order = 0;
4381
4382 kmem_cache_node = &boot_kmem_cache_node;
4383 kmem_cache = &boot_kmem_cache;
4384
4385 create_boot_cache(kmem_cache_node, "kmem_cache_node",
4386 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4387
4388 register_hotmemory_notifier(&slab_memory_callback_nb);
4389
4390 /* Able to allocate the per node structures */
4391 slab_state = PARTIAL;
4392
4393 create_boot_cache(kmem_cache, "kmem_cache",
4394 offsetof(struct kmem_cache, node) +
4395 nr_node_ids * sizeof(struct kmem_cache_node *),
4396 SLAB_HWCACHE_ALIGN, 0, 0);
4397
4398 kmem_cache = bootstrap(&boot_kmem_cache);
4399 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4400
4401 /* Now we can use the kmem_cache to allocate kmalloc slabs */
4402 setup_kmalloc_cache_index_table();
4403 create_kmalloc_caches(0);
4404
4405 /* Setup random freelists for each cache */
4406 init_freelist_randomization();
4407
4408 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4409 slub_cpu_dead);
4410
4411 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4412 cache_line_size(),
4413 slub_min_order, slub_max_order, slub_min_objects,
4414 nr_cpu_ids, nr_node_ids);
4415 }
4416
4417 void __init kmem_cache_init_late(void)
4418 {
4419 }
4420
4421 struct kmem_cache *
4422 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4423 slab_flags_t flags, void (*ctor)(void *))
4424 {
4425 struct kmem_cache *s;
4426
4427 s = find_mergeable(size, align, flags, name, ctor);
4428 if (s) {
4429 s->refcount++;
4430
4431 /*
4432 * Adjust the object sizes so that we clear
4433 * the complete object on kzalloc.
4434 */
4435 s->object_size = max(s->object_size, size);
4436 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4437
4438 if (sysfs_slab_alias(s, name)) {
4439 s->refcount--;
4440 s = NULL;
4441 }
4442 }
4443
4444 return s;
4445 }
4446
4447 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4448 {
4449 int err;
4450
4451 err = kmem_cache_open(s, flags);
4452 if (err)
4453 return err;
4454
4455 /* Mutex is not taken during early boot */
4456 if (slab_state <= UP)
4457 return 0;
4458
4459 err = sysfs_slab_add(s);
4460 if (err)
4461 __kmem_cache_release(s);
4462
4463 return err;
4464 }
4465
4466 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4467 {
4468 struct kmem_cache *s;
4469 void *ret;
4470
4471 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4472 return kmalloc_large(size, gfpflags);
4473
4474 s = kmalloc_slab(size, gfpflags);
4475
4476 if (unlikely(ZERO_OR_NULL_PTR(s)))
4477 return s;
4478
4479 ret = slab_alloc(s, gfpflags, caller);
4480
4481 /* Honor the call site pointer we received. */
4482 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4483
4484 return ret;
4485 }
4486 EXPORT_SYMBOL(__kmalloc_track_caller);
4487
4488 #ifdef CONFIG_NUMA
4489 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4490 int node, unsigned long caller)
4491 {
4492 struct kmem_cache *s;
4493 void *ret;
4494
4495 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4496 ret = kmalloc_large_node(size, gfpflags, node);
4497
4498 trace_kmalloc_node(caller, ret,
4499 size, PAGE_SIZE << get_order(size),
4500 gfpflags, node);
4501
4502 return ret;
4503 }
4504
4505 s = kmalloc_slab(size, gfpflags);
4506
4507 if (unlikely(ZERO_OR_NULL_PTR(s)))
4508 return s;
4509
4510 ret = slab_alloc_node(s, gfpflags, node, caller);
4511
4512 /* Honor the call site pointer we received. */
4513 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4514
4515 return ret;
4516 }
4517 EXPORT_SYMBOL(__kmalloc_node_track_caller);
4518 #endif
4519
4520 #ifdef CONFIG_SYSFS
4521 static int count_inuse(struct page *page)
4522 {
4523 return page->inuse;
4524 }
4525
4526 static int count_total(struct page *page)
4527 {
4528 return page->objects;
4529 }
4530 #endif
4531
4532 #ifdef CONFIG_SLUB_DEBUG
4533 static void validate_slab(struct kmem_cache *s, struct page *page)
4534 {
4535 void *p;
4536 void *addr = page_address(page);
4537 unsigned long *map;
4538
4539 slab_lock(page);
4540
4541 if (!check_slab(s, page) || !on_freelist(s, page, NULL))
4542 goto unlock;
4543
4544 /* Now we know that a valid freelist exists */
4545 map = get_map(s, page);
4546 for_each_object(p, s, addr, page->objects) {
4547 u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
4548 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
4549
4550 if (!check_object(s, page, p, val))
4551 break;
4552 }
4553 put_map(map);
4554 unlock:
4555 slab_unlock(page);
4556 }
4557
4558 static int validate_slab_node(struct kmem_cache *s,
4559 struct kmem_cache_node *n)
4560 {
4561 unsigned long count = 0;
4562 struct page *page;
4563 unsigned long flags;
4564
4565 spin_lock_irqsave(&n->list_lock, flags);
4566
4567 list_for_each_entry(page, &n->partial, slab_list) {
4568 validate_slab(s, page);
4569 count++;
4570 }
4571 if (count != n->nr_partial)
4572 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4573 s->name, count, n->nr_partial);
4574
4575 if (!(s->flags & SLAB_STORE_USER))
4576 goto out;
4577
4578 list_for_each_entry(page, &n->full, slab_list) {
4579 validate_slab(s, page);
4580 count++;
4581 }
4582 if (count != atomic_long_read(&n->nr_slabs))
4583 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4584 s->name, count, atomic_long_read(&n->nr_slabs));
4585
4586 out:
4587 spin_unlock_irqrestore(&n->list_lock, flags);
4588 return count;
4589 }
4590
4591 static long validate_slab_cache(struct kmem_cache *s)
4592 {
4593 int node;
4594 unsigned long count = 0;
4595 struct kmem_cache_node *n;
4596
4597 flush_all(s);
4598 for_each_kmem_cache_node(s, node, n)
4599 count += validate_slab_node(s, n);
4600
4601 return count;
4602 }
4603 /*
4604 * Generate lists of code addresses where slabcache objects are allocated
4605 * and freed.
4606 */
4607
4608 struct location {
4609 unsigned long count;
4610 unsigned long addr;
4611 long long sum_time;
4612 long min_time;
4613 long max_time;
4614 long min_pid;
4615 long max_pid;
4616 DECLARE_BITMAP(cpus, NR_CPUS);
4617 nodemask_t nodes;
4618 };
4619
4620 struct loc_track {
4621 unsigned long max;
4622 unsigned long count;
4623 struct location *loc;
4624 };
4625
4626 static void free_loc_track(struct loc_track *t)
4627 {
4628 if (t->max)
4629 free_pages((unsigned long)t->loc,
4630 get_order(sizeof(struct location) * t->max));
4631 }
4632
4633 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4634 {
4635 struct location *l;
4636 int order;
4637
4638 order = get_order(sizeof(struct location) * max);
4639
4640 l = (void *)__get_free_pages(flags, order);
4641 if (!l)
4642 return 0;
4643
4644 if (t->count) {
4645 memcpy(l, t->loc, sizeof(struct location) * t->count);
4646 free_loc_track(t);
4647 }
4648 t->max = max;
4649 t->loc = l;
4650 return 1;
4651 }
4652
4653 static int add_location(struct loc_track *t, struct kmem_cache *s,
4654 const struct track *track)
4655 {
4656 long start, end, pos;
4657 struct location *l;
4658 unsigned long caddr;
4659 unsigned long age = jiffies - track->when;
4660
4661 start = -1;
4662 end = t->count;
4663
4664 for ( ; ; ) {
4665 pos = start + (end - start + 1) / 2;
4666
4667 /*
4668 * There is nothing at "end". If we end up there
4669 * we need to add something to before end.
4670 */
4671 if (pos == end)
4672 break;
4673
4674 caddr = t->loc[pos].addr;
4675 if (track->addr == caddr) {
4676
4677 l = &t->loc[pos];
4678 l->count++;
4679 if (track->when) {
4680 l->sum_time += age;
4681 if (age < l->min_time)
4682 l->min_time = age;
4683 if (age > l->max_time)
4684 l->max_time = age;
4685
4686 if (track->pid < l->min_pid)
4687 l->min_pid = track->pid;
4688 if (track->pid > l->max_pid)
4689 l->max_pid = track->pid;
4690
4691 cpumask_set_cpu(track->cpu,
4692 to_cpumask(l->cpus));
4693 }
4694 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4695 return 1;
4696 }
4697
4698 if (track->addr < caddr)
4699 end = pos;
4700 else
4701 start = pos;
4702 }
4703
4704 /*
4705 * Not found. Insert new tracking element.
4706 */
4707 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4708 return 0;
4709
4710 l = t->loc + pos;
4711 if (pos < t->count)
4712 memmove(l + 1, l,
4713 (t->count - pos) * sizeof(struct location));
4714 t->count++;
4715 l->count = 1;
4716 l->addr = track->addr;
4717 l->sum_time = age;
4718 l->min_time = age;
4719 l->max_time = age;
4720 l->min_pid = track->pid;
4721 l->max_pid = track->pid;
4722 cpumask_clear(to_cpumask(l->cpus));
4723 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4724 nodes_clear(l->nodes);
4725 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4726 return 1;
4727 }
4728
4729 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4730 struct page *page, enum track_item alloc)
4731 {
4732 void *addr = page_address(page);
4733 void *p;
4734 unsigned long *map;
4735
4736 map = get_map(s, page);
4737 for_each_object(p, s, addr, page->objects)
4738 if (!test_bit(__obj_to_index(s, addr, p), map))
4739 add_location(t, s, get_track(s, p, alloc));
4740 put_map(map);
4741 }
4742
4743 static int list_locations(struct kmem_cache *s, char *buf,
4744 enum track_item alloc)
4745 {
4746 int len = 0;
4747 unsigned long i;
4748 struct loc_track t = { 0, 0, NULL };
4749 int node;
4750 struct kmem_cache_node *n;
4751
4752 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4753 GFP_KERNEL)) {
4754 return sysfs_emit(buf, "Out of memory\n");
4755 }
4756 /* Push back cpu slabs */
4757 flush_all(s);
4758
4759 for_each_kmem_cache_node(s, node, n) {
4760 unsigned long flags;
4761 struct page *page;
4762
4763 if (!atomic_long_read(&n->nr_slabs))
4764 continue;
4765
4766 spin_lock_irqsave(&n->list_lock, flags);
4767 list_for_each_entry(page, &n->partial, slab_list)
4768 process_slab(&t, s, page, alloc);
4769 list_for_each_entry(page, &n->full, slab_list)
4770 process_slab(&t, s, page, alloc);
4771 spin_unlock_irqrestore(&n->list_lock, flags);
4772 }
4773
4774 for (i = 0; i < t.count; i++) {
4775 struct location *l = &t.loc[i];
4776
4777 len += sysfs_emit_at(buf, len, "%7ld ", l->count);
4778
4779 if (l->addr)
4780 len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr);
4781 else
4782 len += sysfs_emit_at(buf, len, "<not-available>");
4783
4784 if (l->sum_time != l->min_time)
4785 len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld",
4786 l->min_time,
4787 (long)div_u64(l->sum_time,
4788 l->count),
4789 l->max_time);
4790 else
4791 len += sysfs_emit_at(buf, len, " age=%ld", l->min_time);
4792
4793 if (l->min_pid != l->max_pid)
4794 len += sysfs_emit_at(buf, len, " pid=%ld-%ld",
4795 l->min_pid, l->max_pid);
4796 else
4797 len += sysfs_emit_at(buf, len, " pid=%ld",
4798 l->min_pid);
4799
4800 if (num_online_cpus() > 1 &&
4801 !cpumask_empty(to_cpumask(l->cpus)))
4802 len += sysfs_emit_at(buf, len, " cpus=%*pbl",
4803 cpumask_pr_args(to_cpumask(l->cpus)));
4804
4805 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
4806 len += sysfs_emit_at(buf, len, " nodes=%*pbl",
4807 nodemask_pr_args(&l->nodes));
4808
4809 len += sysfs_emit_at(buf, len, "\n");
4810 }
4811
4812 free_loc_track(&t);
4813 if (!t.count)
4814 len += sysfs_emit_at(buf, len, "No data\n");
4815
4816 return len;
4817 }
4818 #endif /* CONFIG_SLUB_DEBUG */
4819
4820 #ifdef SLUB_RESILIENCY_TEST
4821 static void __init resiliency_test(void)
4822 {
4823 u8 *p;
4824 int type = KMALLOC_NORMAL;
4825
4826 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4827
4828 pr_err("SLUB resiliency testing\n");
4829 pr_err("-----------------------\n");
4830 pr_err("A. Corruption after allocation\n");
4831
4832 p = kzalloc(16, GFP_KERNEL);
4833 p[16] = 0x12;
4834 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4835 p + 16);
4836
4837 validate_slab_cache(kmalloc_caches[type][4]);
4838
4839 /* Hmmm... The next two are dangerous */
4840 p = kzalloc(32, GFP_KERNEL);
4841 p[32 + sizeof(void *)] = 0x34;
4842 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4843 p);
4844 pr_err("If allocated object is overwritten then not detectable\n\n");
4845
4846 validate_slab_cache(kmalloc_caches[type][5]);
4847 p = kzalloc(64, GFP_KERNEL);
4848 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4849 *p = 0x56;
4850 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4851 p);
4852 pr_err("If allocated object is overwritten then not detectable\n\n");
4853 validate_slab_cache(kmalloc_caches[type][6]);
4854
4855 pr_err("\nB. Corruption after free\n");
4856 p = kzalloc(128, GFP_KERNEL);
4857 kfree(p);
4858 *p = 0x78;
4859 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4860 validate_slab_cache(kmalloc_caches[type][7]);
4861
4862 p = kzalloc(256, GFP_KERNEL);
4863 kfree(p);
4864 p[50] = 0x9a;
4865 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4866 validate_slab_cache(kmalloc_caches[type][8]);
4867
4868 p = kzalloc(512, GFP_KERNEL);
4869 kfree(p);
4870 p[512] = 0xab;
4871 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4872 validate_slab_cache(kmalloc_caches[type][9]);
4873 }
4874 #else
4875 #ifdef CONFIG_SYSFS
4876 static void resiliency_test(void) {};
4877 #endif
4878 #endif /* SLUB_RESILIENCY_TEST */
4879
4880 #ifdef CONFIG_SYSFS
4881 enum slab_stat_type {
4882 SL_ALL, /* All slabs */
4883 SL_PARTIAL, /* Only partially allocated slabs */
4884 SL_CPU, /* Only slabs used for cpu caches */
4885 SL_OBJECTS, /* Determine allocated objects not slabs */
4886 SL_TOTAL /* Determine object capacity not slabs */
4887 };
4888
4889 #define SO_ALL (1 << SL_ALL)
4890 #define SO_PARTIAL (1 << SL_PARTIAL)
4891 #define SO_CPU (1 << SL_CPU)
4892 #define SO_OBJECTS (1 << SL_OBJECTS)
4893 #define SO_TOTAL (1 << SL_TOTAL)
4894
4895 #ifdef CONFIG_MEMCG
4896 static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4897
4898 static int __init setup_slub_memcg_sysfs(char *str)
4899 {
4900 int v;
4901
4902 if (get_option(&str, &v) > 0)
4903 memcg_sysfs_enabled = v;
4904
4905 return 1;
4906 }
4907
4908 __setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4909 #endif
4910
4911 static ssize_t show_slab_objects(struct kmem_cache *s,
4912 char *buf, unsigned long flags)
4913 {
4914 unsigned long total = 0;
4915 int node;
4916 int x;
4917 unsigned long *nodes;
4918 int len = 0;
4919
4920 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
4921 if (!nodes)
4922 return -ENOMEM;
4923
4924 if (flags & SO_CPU) {
4925 int cpu;
4926
4927 for_each_possible_cpu(cpu) {
4928 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4929 cpu);
4930 int node;
4931 struct page *page;
4932
4933 page = READ_ONCE(c->page);
4934 if (!page)
4935 continue;
4936
4937 node = page_to_nid(page);
4938 if (flags & SO_TOTAL)
4939 x = page->objects;
4940 else if (flags & SO_OBJECTS)
4941 x = page->inuse;
4942 else
4943 x = 1;
4944
4945 total += x;
4946 nodes[node] += x;
4947
4948 page = slub_percpu_partial_read_once(c);
4949 if (page) {
4950 node = page_to_nid(page);
4951 if (flags & SO_TOTAL)
4952 WARN_ON_ONCE(1);
4953 else if (flags & SO_OBJECTS)
4954 WARN_ON_ONCE(1);
4955 else
4956 x = page->pages;
4957 total += x;
4958 nodes[node] += x;
4959 }
4960 }
4961 }
4962
4963 /*
4964 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4965 * already held which will conflict with an existing lock order:
4966 *
4967 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4968 *
4969 * We don't really need mem_hotplug_lock (to hold off
4970 * slab_mem_going_offline_callback) here because slab's memory hot
4971 * unplug code doesn't destroy the kmem_cache->node[] data.
4972 */
4973
4974 #ifdef CONFIG_SLUB_DEBUG
4975 if (flags & SO_ALL) {
4976 struct kmem_cache_node *n;
4977
4978 for_each_kmem_cache_node(s, node, n) {
4979
4980 if (flags & SO_TOTAL)
4981 x = atomic_long_read(&n->total_objects);
4982 else if (flags & SO_OBJECTS)
4983 x = atomic_long_read(&n->total_objects) -
4984 count_partial(n, count_free);
4985 else
4986 x = atomic_long_read(&n->nr_slabs);
4987 total += x;
4988 nodes[node] += x;
4989 }
4990
4991 } else
4992 #endif
4993 if (flags & SO_PARTIAL) {
4994 struct kmem_cache_node *n;
4995
4996 for_each_kmem_cache_node(s, node, n) {
4997 if (flags & SO_TOTAL)
4998 x = count_partial(n, count_total);
4999 else if (flags & SO_OBJECTS)
5000 x = count_partial(n, count_inuse);
5001 else
5002 x = n->nr_partial;
5003 total += x;
5004 nodes[node] += x;
5005 }
5006 }
5007
5008 len += sysfs_emit_at(buf, len, "%lu", total);
5009 #ifdef CONFIG_NUMA
5010 for (node = 0; node < nr_node_ids; node++) {
5011 if (nodes[node])
5012 len += sysfs_emit_at(buf, len, " N%d=%lu",
5013 node, nodes[node]);
5014 }
5015 #endif
5016 len += sysfs_emit_at(buf, len, "\n");
5017 kfree(nodes);
5018
5019 return len;
5020 }
5021
5022 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
5023 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
5024
5025 struct slab_attribute {
5026 struct attribute attr;
5027 ssize_t (*show)(struct kmem_cache *s, char *buf);
5028 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5029 };
5030
5031 #define SLAB_ATTR_RO(_name) \
5032 static struct slab_attribute _name##_attr = \
5033 __ATTR(_name, 0400, _name##_show, NULL)
5034
5035 #define SLAB_ATTR(_name) \
5036 static struct slab_attribute _name##_attr = \
5037 __ATTR(_name, 0600, _name##_show, _name##_store)
5038
5039 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
5040 {
5041 return sysfs_emit(buf, "%u\n", s->size);
5042 }
5043 SLAB_ATTR_RO(slab_size);
5044
5045 static ssize_t align_show(struct kmem_cache *s, char *buf)
5046 {
5047 return sysfs_emit(buf, "%u\n", s->align);
5048 }
5049 SLAB_ATTR_RO(align);
5050
5051 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5052 {
5053 return sysfs_emit(buf, "%u\n", s->object_size);
5054 }
5055 SLAB_ATTR_RO(object_size);
5056
5057 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5058 {
5059 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
5060 }
5061 SLAB_ATTR_RO(objs_per_slab);
5062
5063 static ssize_t order_show(struct kmem_cache *s, char *buf)
5064 {
5065 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
5066 }
5067 SLAB_ATTR_RO(order);
5068
5069 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5070 {
5071 return sysfs_emit(buf, "%lu\n", s->min_partial);
5072 }
5073
5074 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5075 size_t length)
5076 {
5077 unsigned long min;
5078 int err;
5079
5080 err = kstrtoul(buf, 10, &min);
5081 if (err)
5082 return err;
5083
5084 set_min_partial(s, min);
5085 return length;
5086 }
5087 SLAB_ATTR(min_partial);
5088
5089 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5090 {
5091 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s));
5092 }
5093
5094 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5095 size_t length)
5096 {
5097 unsigned int objects;
5098 int err;
5099
5100 err = kstrtouint(buf, 10, &objects);
5101 if (err)
5102 return err;
5103 if (objects && !kmem_cache_has_cpu_partial(s))
5104 return -EINVAL;
5105
5106 slub_set_cpu_partial(s, objects);
5107 flush_all(s);
5108 return length;
5109 }
5110 SLAB_ATTR(cpu_partial);
5111
5112 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5113 {
5114 if (!s->ctor)
5115 return 0;
5116 return sysfs_emit(buf, "%pS\n", s->ctor);
5117 }
5118 SLAB_ATTR_RO(ctor);
5119
5120 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5121 {
5122 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5123 }
5124 SLAB_ATTR_RO(aliases);
5125
5126 static ssize_t partial_show(struct kmem_cache *s, char *buf)
5127 {
5128 return show_slab_objects(s, buf, SO_PARTIAL);
5129 }
5130 SLAB_ATTR_RO(partial);
5131
5132 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5133 {
5134 return show_slab_objects(s, buf, SO_CPU);
5135 }
5136 SLAB_ATTR_RO(cpu_slabs);
5137
5138 static ssize_t objects_show(struct kmem_cache *s, char *buf)
5139 {
5140 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5141 }
5142 SLAB_ATTR_RO(objects);
5143
5144 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5145 {
5146 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5147 }
5148 SLAB_ATTR_RO(objects_partial);
5149
5150 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5151 {
5152 int objects = 0;
5153 int pages = 0;
5154 int cpu;
5155 int len = 0;
5156
5157 for_each_online_cpu(cpu) {
5158 struct page *page;
5159
5160 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5161
5162 if (page) {
5163 pages += page->pages;
5164 objects += page->pobjects;
5165 }
5166 }
5167
5168 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
5169
5170 #ifdef CONFIG_SMP
5171 for_each_online_cpu(cpu) {
5172 struct page *page;
5173
5174 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5175 if (page)
5176 len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
5177 cpu, page->pobjects, page->pages);
5178 }
5179 #endif
5180 len += sysfs_emit_at(buf, len, "\n");
5181
5182 return len;
5183 }
5184 SLAB_ATTR_RO(slabs_cpu_partial);
5185
5186 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5187 {
5188 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5189 }
5190 SLAB_ATTR_RO(reclaim_account);
5191
5192 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5193 {
5194 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5195 }
5196 SLAB_ATTR_RO(hwcache_align);
5197
5198 #ifdef CONFIG_ZONE_DMA
5199 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5200 {
5201 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5202 }
5203 SLAB_ATTR_RO(cache_dma);
5204 #endif
5205
5206 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5207 {
5208 return sysfs_emit(buf, "%u\n", s->usersize);
5209 }
5210 SLAB_ATTR_RO(usersize);
5211
5212 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5213 {
5214 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5215 }
5216 SLAB_ATTR_RO(destroy_by_rcu);
5217
5218 #ifdef CONFIG_SLUB_DEBUG
5219 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5220 {
5221 return show_slab_objects(s, buf, SO_ALL);
5222 }
5223 SLAB_ATTR_RO(slabs);
5224
5225 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5226 {
5227 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5228 }
5229 SLAB_ATTR_RO(total_objects);
5230
5231 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5232 {
5233 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5234 }
5235 SLAB_ATTR_RO(sanity_checks);
5236
5237 static ssize_t trace_show(struct kmem_cache *s, char *buf)
5238 {
5239 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5240 }
5241 SLAB_ATTR_RO(trace);
5242
5243 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5244 {
5245 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5246 }
5247
5248 SLAB_ATTR_RO(red_zone);
5249
5250 static ssize_t poison_show(struct kmem_cache *s, char *buf)
5251 {
5252 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
5253 }
5254
5255 SLAB_ATTR_RO(poison);
5256
5257 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5258 {
5259 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5260 }
5261
5262 SLAB_ATTR_RO(store_user);
5263
5264 static ssize_t validate_show(struct kmem_cache *s, char *buf)
5265 {
5266 return 0;
5267 }
5268
5269 static ssize_t validate_store(struct kmem_cache *s,
5270 const char *buf, size_t length)
5271 {
5272 int ret = -EINVAL;
5273
5274 if (buf[0] == '1') {
5275 ret = validate_slab_cache(s);
5276 if (ret >= 0)
5277 ret = length;
5278 }
5279 return ret;
5280 }
5281 SLAB_ATTR(validate);
5282
5283 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5284 {
5285 if (!(s->flags & SLAB_STORE_USER))
5286 return -ENOSYS;
5287 return list_locations(s, buf, TRACK_ALLOC);
5288 }
5289 SLAB_ATTR_RO(alloc_calls);
5290
5291 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5292 {
5293 if (!(s->flags & SLAB_STORE_USER))
5294 return -ENOSYS;
5295 return list_locations(s, buf, TRACK_FREE);
5296 }
5297 SLAB_ATTR_RO(free_calls);
5298 #endif /* CONFIG_SLUB_DEBUG */
5299
5300 #ifdef CONFIG_FAILSLAB
5301 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5302 {
5303 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5304 }
5305 SLAB_ATTR_RO(failslab);
5306 #endif
5307
5308 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5309 {
5310 return 0;
5311 }
5312
5313 static ssize_t shrink_store(struct kmem_cache *s,
5314 const char *buf, size_t length)
5315 {
5316 if (buf[0] == '1')
5317 kmem_cache_shrink(s);
5318 else
5319 return -EINVAL;
5320 return length;
5321 }
5322 SLAB_ATTR(shrink);
5323
5324 #ifdef CONFIG_NUMA
5325 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5326 {
5327 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5328 }
5329
5330 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5331 const char *buf, size_t length)
5332 {
5333 unsigned int ratio;
5334 int err;
5335
5336 err = kstrtouint(buf, 10, &ratio);
5337 if (err)
5338 return err;
5339 if (ratio > 100)
5340 return -ERANGE;
5341
5342 s->remote_node_defrag_ratio = ratio * 10;
5343
5344 return length;
5345 }
5346 SLAB_ATTR(remote_node_defrag_ratio);
5347 #endif
5348
5349 #ifdef CONFIG_SLUB_STATS
5350 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5351 {
5352 unsigned long sum = 0;
5353 int cpu;
5354 int len = 0;
5355 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5356
5357 if (!data)
5358 return -ENOMEM;
5359
5360 for_each_online_cpu(cpu) {
5361 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5362
5363 data[cpu] = x;
5364 sum += x;
5365 }
5366
5367 len += sysfs_emit_at(buf, len, "%lu", sum);
5368
5369 #ifdef CONFIG_SMP
5370 for_each_online_cpu(cpu) {
5371 if (data[cpu])
5372 len += sysfs_emit_at(buf, len, " C%d=%u",
5373 cpu, data[cpu]);
5374 }
5375 #endif
5376 kfree(data);
5377 len += sysfs_emit_at(buf, len, "\n");
5378
5379 return len;
5380 }
5381
5382 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5383 {
5384 int cpu;
5385
5386 for_each_online_cpu(cpu)
5387 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5388 }
5389
5390 #define STAT_ATTR(si, text) \
5391 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5392 { \
5393 return show_stat(s, buf, si); \
5394 } \
5395 static ssize_t text##_store(struct kmem_cache *s, \
5396 const char *buf, size_t length) \
5397 { \
5398 if (buf[0] != '0') \
5399 return -EINVAL; \
5400 clear_stat(s, si); \
5401 return length; \
5402 } \
5403 SLAB_ATTR(text); \
5404
5405 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5406 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5407 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5408 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5409 STAT_ATTR(FREE_FROZEN, free_frozen);
5410 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5411 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5412 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5413 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5414 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5415 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5416 STAT_ATTR(FREE_SLAB, free_slab);
5417 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5418 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5419 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5420 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5421 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5422 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5423 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5424 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5425 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5426 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5427 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5428 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5429 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5430 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5431 #endif /* CONFIG_SLUB_STATS */
5432
5433 static struct attribute *slab_attrs[] = {
5434 &slab_size_attr.attr,
5435 &object_size_attr.attr,
5436 &objs_per_slab_attr.attr,
5437 &order_attr.attr,
5438 &min_partial_attr.attr,
5439 &cpu_partial_attr.attr,
5440 &objects_attr.attr,
5441 &objects_partial_attr.attr,
5442 &partial_attr.attr,
5443 &cpu_slabs_attr.attr,
5444 &ctor_attr.attr,
5445 &aliases_attr.attr,
5446 &align_attr.attr,
5447 &hwcache_align_attr.attr,
5448 &reclaim_account_attr.attr,
5449 &destroy_by_rcu_attr.attr,
5450 &shrink_attr.attr,
5451 &slabs_cpu_partial_attr.attr,
5452 #ifdef CONFIG_SLUB_DEBUG
5453 &total_objects_attr.attr,
5454 &slabs_attr.attr,
5455 &sanity_checks_attr.attr,
5456 &trace_attr.attr,
5457 &red_zone_attr.attr,
5458 &poison_attr.attr,
5459 &store_user_attr.attr,
5460 &validate_attr.attr,
5461 &alloc_calls_attr.attr,
5462 &free_calls_attr.attr,
5463 #endif
5464 #ifdef CONFIG_ZONE_DMA
5465 &cache_dma_attr.attr,
5466 #endif
5467 #ifdef CONFIG_NUMA
5468 &remote_node_defrag_ratio_attr.attr,
5469 #endif
5470 #ifdef CONFIG_SLUB_STATS
5471 &alloc_fastpath_attr.attr,
5472 &alloc_slowpath_attr.attr,
5473 &free_fastpath_attr.attr,
5474 &free_slowpath_attr.attr,
5475 &free_frozen_attr.attr,
5476 &free_add_partial_attr.attr,
5477 &free_remove_partial_attr.attr,
5478 &alloc_from_partial_attr.attr,
5479 &alloc_slab_attr.attr,
5480 &alloc_refill_attr.attr,
5481 &alloc_node_mismatch_attr.attr,
5482 &free_slab_attr.attr,
5483 &cpuslab_flush_attr.attr,
5484 &deactivate_full_attr.attr,
5485 &deactivate_empty_attr.attr,
5486 &deactivate_to_head_attr.attr,
5487 &deactivate_to_tail_attr.attr,
5488 &deactivate_remote_frees_attr.attr,
5489 &deactivate_bypass_attr.attr,
5490 &order_fallback_attr.attr,
5491 &cmpxchg_double_fail_attr.attr,
5492 &cmpxchg_double_cpu_fail_attr.attr,
5493 &cpu_partial_alloc_attr.attr,
5494 &cpu_partial_free_attr.attr,
5495 &cpu_partial_node_attr.attr,
5496 &cpu_partial_drain_attr.attr,
5497 #endif
5498 #ifdef CONFIG_FAILSLAB
5499 &failslab_attr.attr,
5500 #endif
5501 &usersize_attr.attr,
5502
5503 NULL
5504 };
5505
5506 static const struct attribute_group slab_attr_group = {
5507 .attrs = slab_attrs,
5508 };
5509
5510 static ssize_t slab_attr_show(struct kobject *kobj,
5511 struct attribute *attr,
5512 char *buf)
5513 {
5514 struct slab_attribute *attribute;
5515 struct kmem_cache *s;
5516 int err;
5517
5518 attribute = to_slab_attr(attr);
5519 s = to_slab(kobj);
5520
5521 if (!attribute->show)
5522 return -EIO;
5523
5524 err = attribute->show(s, buf);
5525
5526 return err;
5527 }
5528
5529 static ssize_t slab_attr_store(struct kobject *kobj,
5530 struct attribute *attr,
5531 const char *buf, size_t len)
5532 {
5533 struct slab_attribute *attribute;
5534 struct kmem_cache *s;
5535 int err;
5536
5537 attribute = to_slab_attr(attr);
5538 s = to_slab(kobj);
5539
5540 if (!attribute->store)
5541 return -EIO;
5542
5543 err = attribute->store(s, buf, len);
5544 return err;
5545 }
5546
5547 static void kmem_cache_release(struct kobject *k)
5548 {
5549 slab_kmem_cache_release(to_slab(k));
5550 }
5551
5552 static const struct sysfs_ops slab_sysfs_ops = {
5553 .show = slab_attr_show,
5554 .store = slab_attr_store,
5555 };
5556
5557 static struct kobj_type slab_ktype = {
5558 .sysfs_ops = &slab_sysfs_ops,
5559 .release = kmem_cache_release,
5560 };
5561
5562 static struct kset *slab_kset;
5563
5564 static inline struct kset *cache_kset(struct kmem_cache *s)
5565 {
5566 return slab_kset;
5567 }
5568
5569 #define ID_STR_LENGTH 64
5570
5571 /* Create a unique string id for a slab cache:
5572 *
5573 * Format :[flags-]size
5574 */
5575 static char *create_unique_id(struct kmem_cache *s)
5576 {
5577 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5578 char *p = name;
5579
5580 BUG_ON(!name);
5581
5582 *p++ = ':';
5583 /*
5584 * First flags affecting slabcache operations. We will only
5585 * get here for aliasable slabs so we do not need to support
5586 * too many flags. The flags here must cover all flags that
5587 * are matched during merging to guarantee that the id is
5588 * unique.
5589 */
5590 if (s->flags & SLAB_CACHE_DMA)
5591 *p++ = 'd';
5592 if (s->flags & SLAB_CACHE_DMA32)
5593 *p++ = 'D';
5594 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5595 *p++ = 'a';
5596 if (s->flags & SLAB_CONSISTENCY_CHECKS)
5597 *p++ = 'F';
5598 if (s->flags & SLAB_ACCOUNT)
5599 *p++ = 'A';
5600 if (p != name + 1)
5601 *p++ = '-';
5602 p += sprintf(p, "%07u", s->size);
5603
5604 BUG_ON(p > name + ID_STR_LENGTH - 1);
5605 return name;
5606 }
5607
5608 static int sysfs_slab_add(struct kmem_cache *s)
5609 {
5610 int err;
5611 const char *name;
5612 struct kset *kset = cache_kset(s);
5613 int unmergeable = slab_unmergeable(s);
5614
5615 if (!kset) {
5616 kobject_init(&s->kobj, &slab_ktype);
5617 return 0;
5618 }
5619
5620 if (!unmergeable && disable_higher_order_debug &&
5621 (slub_debug & DEBUG_METADATA_FLAGS))
5622 unmergeable = 1;
5623
5624 if (unmergeable) {
5625 /*
5626 * Slabcache can never be merged so we can use the name proper.
5627 * This is typically the case for debug situations. In that
5628 * case we can catch duplicate names easily.
5629 */
5630 sysfs_remove_link(&slab_kset->kobj, s->name);
5631 name = s->name;
5632 } else {
5633 /*
5634 * Create a unique name for the slab as a target
5635 * for the symlinks.
5636 */
5637 name = create_unique_id(s);
5638 }
5639
5640 s->kobj.kset = kset;
5641 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5642 if (err)
5643 goto out;
5644
5645 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5646 if (err)
5647 goto out_del_kobj;
5648
5649 if (!unmergeable) {
5650 /* Setup first alias */
5651 sysfs_slab_alias(s, s->name);
5652 }
5653 out:
5654 if (!unmergeable)
5655 kfree(name);
5656 return err;
5657 out_del_kobj:
5658 kobject_del(&s->kobj);
5659 goto out;
5660 }
5661
5662 void sysfs_slab_unlink(struct kmem_cache *s)
5663 {
5664 if (slab_state >= FULL)
5665 kobject_del(&s->kobj);
5666 }
5667
5668 void sysfs_slab_release(struct kmem_cache *s)
5669 {
5670 if (slab_state >= FULL)
5671 kobject_put(&s->kobj);
5672 }
5673
5674 /*
5675 * Need to buffer aliases during bootup until sysfs becomes
5676 * available lest we lose that information.
5677 */
5678 struct saved_alias {
5679 struct kmem_cache *s;
5680 const char *name;
5681 struct saved_alias *next;
5682 };
5683
5684 static struct saved_alias *alias_list;
5685
5686 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5687 {
5688 struct saved_alias *al;
5689
5690 if (slab_state == FULL) {
5691 /*
5692 * If we have a leftover link then remove it.
5693 */
5694 sysfs_remove_link(&slab_kset->kobj, name);
5695 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5696 }
5697
5698 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5699 if (!al)
5700 return -ENOMEM;
5701
5702 al->s = s;
5703 al->name = name;
5704 al->next = alias_list;
5705 alias_list = al;
5706 return 0;
5707 }
5708
5709 static int __init slab_sysfs_init(void)
5710 {
5711 struct kmem_cache *s;
5712 int err;
5713
5714 mutex_lock(&slab_mutex);
5715
5716 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
5717 if (!slab_kset) {
5718 mutex_unlock(&slab_mutex);
5719 pr_err("Cannot register slab subsystem.\n");
5720 return -ENOSYS;
5721 }
5722
5723 slab_state = FULL;
5724
5725 list_for_each_entry(s, &slab_caches, list) {
5726 err = sysfs_slab_add(s);
5727 if (err)
5728 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5729 s->name);
5730 }
5731
5732 while (alias_list) {
5733 struct saved_alias *al = alias_list;
5734
5735 alias_list = alias_list->next;
5736 err = sysfs_slab_alias(al->s, al->name);
5737 if (err)
5738 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5739 al->name);
5740 kfree(al);
5741 }
5742
5743 mutex_unlock(&slab_mutex);
5744 resiliency_test();
5745 return 0;
5746 }
5747
5748 __initcall(slab_sysfs_init);
5749 #endif /* CONFIG_SYSFS */
5750
5751 /*
5752 * The /proc/slabinfo ABI
5753 */
5754 #ifdef CONFIG_SLUB_DEBUG
5755 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5756 {
5757 unsigned long nr_slabs = 0;
5758 unsigned long nr_objs = 0;
5759 unsigned long nr_free = 0;
5760 int node;
5761 struct kmem_cache_node *n;
5762
5763 for_each_kmem_cache_node(s, node, n) {
5764 nr_slabs += node_nr_slabs(n);
5765 nr_objs += node_nr_objs(n);
5766 nr_free += count_partial(n, count_free);
5767 }
5768
5769 sinfo->active_objs = nr_objs - nr_free;
5770 sinfo->num_objs = nr_objs;
5771 sinfo->active_slabs = nr_slabs;
5772 sinfo->num_slabs = nr_slabs;
5773 sinfo->objects_per_slab = oo_objects(s->oo);
5774 sinfo->cache_order = oo_order(s->oo);
5775 }
5776
5777 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5778 {
5779 }
5780
5781 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5782 size_t count, loff_t *ppos)
5783 {
5784 return -EIO;
5785 }
5786 #endif /* CONFIG_SLUB_DEBUG */