1 // SPDX-License-Identifier: GPL-2.0
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
14 #include <linux/swap.h> /* struct reclaim_state */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/mempolicy.h>
28 #include <linux/ctype.h>
29 #include <linux/debugobjects.h>
30 #include <linux/kallsyms.h>
31 #include <linux/kfence.h>
32 #include <linux/memory.h>
33 #include <linux/math64.h>
34 #include <linux/fault-inject.h>
35 #include <linux/stacktrace.h>
36 #include <linux/prefetch.h>
37 #include <linux/memcontrol.h>
38 #include <linux/random.h>
39 #include <kunit/test.h>
41 #include <linux/debugfs.h>
42 #include <trace/events/kmem.h>
48 * 1. slab_mutex (Global Mutex)
50 * 3. slab_lock(page) (Only on some arches and for debugging)
54 * The role of the slab_mutex is to protect the list of all the slabs
55 * and to synchronize major metadata changes to slab cache structures.
57 * The slab_lock is only used for debugging and on arches that do not
58 * have the ability to do a cmpxchg_double. It only protects:
59 * A. page->freelist -> List of object free in a page
60 * B. page->inuse -> Number of objects in use
61 * C. page->objects -> Number of objects in page
62 * D. page->frozen -> frozen state
64 * If a slab is frozen then it is exempt from list management. It is not
65 * on any list except per cpu partial list. The processor that froze the
66 * slab is the one who can perform list operations on the page. Other
67 * processors may put objects onto the freelist but the processor that
68 * froze the slab is the only one that can retrieve the objects from the
71 * The list_lock protects the partial and full list on each node and
72 * the partial slab counter. If taken then no new slabs may be added or
73 * removed from the lists nor make the number of partial slabs be modified.
74 * (Note that the total number of slabs is an atomic value that may be
75 * modified without taking the list lock).
77 * The list_lock is a centralized lock and thus we avoid taking it as
78 * much as possible. As long as SLUB does not have to handle partial
79 * slabs, operations can continue without any centralized lock. F.e.
80 * allocating a long series of objects that fill up slabs does not require
82 * Interrupts are disabled during allocation and deallocation in order to
83 * make the slab allocator safe to use in the context of an irq. In addition
84 * interrupts are disabled to ensure that the processor does not change
85 * while handling per_cpu slabs, due to kernel preemption.
87 * SLUB assigns one slab for allocation to each processor.
88 * Allocations only occur from these slabs called cpu slabs.
90 * Slabs with free elements are kept on a partial list and during regular
91 * operations no list for full slabs is used. If an object in a full slab is
92 * freed then the slab will show up again on the partial lists.
93 * We track full slabs for debugging purposes though because otherwise we
94 * cannot scan all objects.
96 * Slabs are freed when they become empty. Teardown and setup is
97 * minimal so we rely on the page allocators per cpu caches for
98 * fast frees and allocs.
100 * page->frozen The slab is frozen and exempt from list processing.
101 * This means that the slab is dedicated to a purpose
102 * such as satisfying allocations for a specific
103 * processor. Objects may be freed in the slab while
104 * it is frozen but slab_free will then skip the usual
105 * list operations. It is up to the processor holding
106 * the slab to integrate the slab into the slab lists
107 * when the slab is no longer needed.
109 * One use of this flag is to mark slabs that are
110 * used for allocations. Then such a slab becomes a cpu
111 * slab. The cpu slab may be equipped with an additional
112 * freelist that allows lockless access to
113 * free objects in addition to the regular freelist
114 * that requires the slab lock.
116 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
117 * options set. This moves slab handling out of
118 * the fast path and disables lockless freelists.
121 #ifdef CONFIG_SLUB_DEBUG
122 #ifdef CONFIG_SLUB_DEBUG_ON
123 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled
);
125 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled
);
127 #endif /* CONFIG_SLUB_DEBUG */
129 static inline bool kmem_cache_debug(struct kmem_cache
*s
)
131 return kmem_cache_debug_flags(s
, SLAB_DEBUG_FLAGS
);
134 void *fixup_red_left(struct kmem_cache
*s
, void *p
)
136 if (kmem_cache_debug_flags(s
, SLAB_RED_ZONE
))
137 p
+= s
->red_left_pad
;
142 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache
*s
)
144 #ifdef CONFIG_SLUB_CPU_PARTIAL
145 return !kmem_cache_debug(s
);
152 * Issues still to be resolved:
154 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
156 * - Variable sizing of the per node arrays
159 /* Enable to log cmpxchg failures */
160 #undef SLUB_DEBUG_CMPXCHG
163 * Minimum number of partial slabs. These will be left on the partial
164 * lists even if they are empty. kmem_cache_shrink may reclaim them.
166 #define MIN_PARTIAL 5
169 * Maximum number of desirable partial slabs.
170 * The existence of more partial slabs makes kmem_cache_shrink
171 * sort the partial list by the number of objects in use.
173 #define MAX_PARTIAL 10
175 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
176 SLAB_POISON | SLAB_STORE_USER)
179 * These debug flags cannot use CMPXCHG because there might be consistency
180 * issues when checking or reading debug information
182 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
187 * Debugging flags that require metadata to be stored in the slab. These get
188 * disabled when slub_debug=O is used and a cache's min order increases with
191 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
194 #define OO_MASK ((1 << OO_SHIFT) - 1)
195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
197 /* Internal SLUB flags */
199 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
200 /* Use cmpxchg_double */
201 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
204 * Tracking user of a slab.
206 #define TRACK_ADDRS_COUNT 16
208 unsigned long addr
; /* Called from address */
209 #ifdef CONFIG_STACKTRACE
210 unsigned long addrs
[TRACK_ADDRS_COUNT
]; /* Called from address */
212 int cpu
; /* Was running on cpu */
213 int pid
; /* Pid context */
214 unsigned long when
; /* When did the operation occur */
217 enum track_item
{ TRACK_ALLOC
, TRACK_FREE
};
220 static int sysfs_slab_add(struct kmem_cache
*);
221 static int sysfs_slab_alias(struct kmem_cache
*, const char *);
223 static inline int sysfs_slab_add(struct kmem_cache
*s
) { return 0; }
224 static inline int sysfs_slab_alias(struct kmem_cache
*s
, const char *p
)
228 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
229 static void debugfs_slab_add(struct kmem_cache
*);
231 static inline void debugfs_slab_add(struct kmem_cache
*s
) { }
234 static inline void stat(const struct kmem_cache
*s
, enum stat_item si
)
236 #ifdef CONFIG_SLUB_STATS
238 * The rmw is racy on a preemptible kernel but this is acceptable, so
239 * avoid this_cpu_add()'s irq-disable overhead.
241 raw_cpu_inc(s
->cpu_slab
->stat
[si
]);
246 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
247 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
248 * differ during memory hotplug/hotremove operations.
249 * Protected by slab_mutex.
251 static nodemask_t slab_nodes
;
253 /********************************************************************
254 * Core slab cache functions
255 *******************************************************************/
258 * Returns freelist pointer (ptr). With hardening, this is obfuscated
259 * with an XOR of the address where the pointer is held and a per-cache
262 static inline void *freelist_ptr(const struct kmem_cache
*s
, void *ptr
,
263 unsigned long ptr_addr
)
265 #ifdef CONFIG_SLAB_FREELIST_HARDENED
267 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
268 * Normally, this doesn't cause any issues, as both set_freepointer()
269 * and get_freepointer() are called with a pointer with the same tag.
270 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
271 * example, when __free_slub() iterates over objects in a cache, it
272 * passes untagged pointers to check_object(). check_object() in turns
273 * calls get_freepointer() with an untagged pointer, which causes the
274 * freepointer to be restored incorrectly.
276 return (void *)((unsigned long)ptr
^ s
->random
^
277 swab((unsigned long)kasan_reset_tag((void *)ptr_addr
)));
283 /* Returns the freelist pointer recorded at location ptr_addr. */
284 static inline void *freelist_dereference(const struct kmem_cache
*s
,
287 return freelist_ptr(s
, (void *)*(unsigned long *)(ptr_addr
),
288 (unsigned long)ptr_addr
);
291 static inline void *get_freepointer(struct kmem_cache
*s
, void *object
)
293 object
= kasan_reset_tag(object
);
294 return freelist_dereference(s
, object
+ s
->offset
);
297 static void prefetch_freepointer(const struct kmem_cache
*s
, void *object
)
299 prefetch(object
+ s
->offset
);
302 static inline void *get_freepointer_safe(struct kmem_cache
*s
, void *object
)
304 unsigned long freepointer_addr
;
307 if (!debug_pagealloc_enabled_static())
308 return get_freepointer(s
, object
);
310 object
= kasan_reset_tag(object
);
311 freepointer_addr
= (unsigned long)object
+ s
->offset
;
312 copy_from_kernel_nofault(&p
, (void **)freepointer_addr
, sizeof(p
));
313 return freelist_ptr(s
, p
, freepointer_addr
);
316 static inline void set_freepointer(struct kmem_cache
*s
, void *object
, void *fp
)
318 unsigned long freeptr_addr
= (unsigned long)object
+ s
->offset
;
320 #ifdef CONFIG_SLAB_FREELIST_HARDENED
321 BUG_ON(object
== fp
); /* naive detection of double free or corruption */
324 freeptr_addr
= (unsigned long)kasan_reset_tag((void *)freeptr_addr
);
325 *(void **)freeptr_addr
= freelist_ptr(s
, fp
, freeptr_addr
);
328 /* Loop over all objects in a slab */
329 #define for_each_object(__p, __s, __addr, __objects) \
330 for (__p = fixup_red_left(__s, __addr); \
331 __p < (__addr) + (__objects) * (__s)->size; \
334 static inline unsigned int order_objects(unsigned int order
, unsigned int size
)
336 return ((unsigned int)PAGE_SIZE
<< order
) / size
;
339 static inline struct kmem_cache_order_objects
oo_make(unsigned int order
,
342 struct kmem_cache_order_objects x
= {
343 (order
<< OO_SHIFT
) + order_objects(order
, size
)
349 static inline unsigned int oo_order(struct kmem_cache_order_objects x
)
351 return x
.x
>> OO_SHIFT
;
354 static inline unsigned int oo_objects(struct kmem_cache_order_objects x
)
356 return x
.x
& OO_MASK
;
360 * Per slab locking using the pagelock
362 static __always_inline
void slab_lock(struct page
*page
)
364 VM_BUG_ON_PAGE(PageTail(page
), page
);
365 bit_spin_lock(PG_locked
, &page
->flags
);
368 static __always_inline
void slab_unlock(struct page
*page
)
370 VM_BUG_ON_PAGE(PageTail(page
), page
);
371 __bit_spin_unlock(PG_locked
, &page
->flags
);
374 /* Interrupts must be disabled (for the fallback code to work right) */
375 static inline bool __cmpxchg_double_slab(struct kmem_cache
*s
, struct page
*page
,
376 void *freelist_old
, unsigned long counters_old
,
377 void *freelist_new
, unsigned long counters_new
,
380 VM_BUG_ON(!irqs_disabled());
381 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
382 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
383 if (s
->flags
& __CMPXCHG_DOUBLE
) {
384 if (cmpxchg_double(&page
->freelist
, &page
->counters
,
385 freelist_old
, counters_old
,
386 freelist_new
, counters_new
))
392 if (page
->freelist
== freelist_old
&&
393 page
->counters
== counters_old
) {
394 page
->freelist
= freelist_new
;
395 page
->counters
= counters_new
;
403 stat(s
, CMPXCHG_DOUBLE_FAIL
);
405 #ifdef SLUB_DEBUG_CMPXCHG
406 pr_info("%s %s: cmpxchg double redo ", n
, s
->name
);
412 static inline bool cmpxchg_double_slab(struct kmem_cache
*s
, struct page
*page
,
413 void *freelist_old
, unsigned long counters_old
,
414 void *freelist_new
, unsigned long counters_new
,
417 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
418 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
419 if (s
->flags
& __CMPXCHG_DOUBLE
) {
420 if (cmpxchg_double(&page
->freelist
, &page
->counters
,
421 freelist_old
, counters_old
,
422 freelist_new
, counters_new
))
429 local_irq_save(flags
);
431 if (page
->freelist
== freelist_old
&&
432 page
->counters
== counters_old
) {
433 page
->freelist
= freelist_new
;
434 page
->counters
= counters_new
;
436 local_irq_restore(flags
);
440 local_irq_restore(flags
);
444 stat(s
, CMPXCHG_DOUBLE_FAIL
);
446 #ifdef SLUB_DEBUG_CMPXCHG
447 pr_info("%s %s: cmpxchg double redo ", n
, s
->name
);
453 #ifdef CONFIG_SLUB_DEBUG
454 static unsigned long object_map
[BITS_TO_LONGS(MAX_OBJS_PER_PAGE
)];
455 static DEFINE_SPINLOCK(object_map_lock
);
457 #if IS_ENABLED(CONFIG_KUNIT)
458 static bool slab_add_kunit_errors(void)
460 struct kunit_resource
*resource
;
462 if (likely(!current
->kunit_test
))
465 resource
= kunit_find_named_resource(current
->kunit_test
, "slab_errors");
469 (*(int *)resource
->data
)++;
470 kunit_put_resource(resource
);
474 static inline bool slab_add_kunit_errors(void) { return false; }
478 * Determine a map of object in use on a page.
480 * Node listlock must be held to guarantee that the page does
481 * not vanish from under us.
483 static unsigned long *get_map(struct kmem_cache
*s
, struct page
*page
)
484 __acquires(&object_map_lock
)
487 void *addr
= page_address(page
);
489 VM_BUG_ON(!irqs_disabled());
491 spin_lock(&object_map_lock
);
493 bitmap_zero(object_map
, page
->objects
);
495 for (p
= page
->freelist
; p
; p
= get_freepointer(s
, p
))
496 set_bit(__obj_to_index(s
, addr
, p
), object_map
);
501 static void put_map(unsigned long *map
) __releases(&object_map_lock
)
503 VM_BUG_ON(map
!= object_map
);
504 spin_unlock(&object_map_lock
);
507 static inline unsigned int size_from_object(struct kmem_cache
*s
)
509 if (s
->flags
& SLAB_RED_ZONE
)
510 return s
->size
- s
->red_left_pad
;
515 static inline void *restore_red_left(struct kmem_cache
*s
, void *p
)
517 if (s
->flags
& SLAB_RED_ZONE
)
518 p
-= s
->red_left_pad
;
526 #if defined(CONFIG_SLUB_DEBUG_ON)
527 static slab_flags_t slub_debug
= DEBUG_DEFAULT_FLAGS
;
529 static slab_flags_t slub_debug
;
532 static char *slub_debug_string
;
533 static int disable_higher_order_debug
;
536 * slub is about to manipulate internal object metadata. This memory lies
537 * outside the range of the allocated object, so accessing it would normally
538 * be reported by kasan as a bounds error. metadata_access_enable() is used
539 * to tell kasan that these accesses are OK.
541 static inline void metadata_access_enable(void)
543 kasan_disable_current();
546 static inline void metadata_access_disable(void)
548 kasan_enable_current();
555 /* Verify that a pointer has an address that is valid within a slab page */
556 static inline int check_valid_pointer(struct kmem_cache
*s
,
557 struct page
*page
, void *object
)
564 base
= page_address(page
);
565 object
= kasan_reset_tag(object
);
566 object
= restore_red_left(s
, object
);
567 if (object
< base
|| object
>= base
+ page
->objects
* s
->size
||
568 (object
- base
) % s
->size
) {
575 static void print_section(char *level
, char *text
, u8
*addr
,
578 metadata_access_enable();
579 print_hex_dump(level
, text
, DUMP_PREFIX_ADDRESS
,
580 16, 1, kasan_reset_tag((void *)addr
), length
, 1);
581 metadata_access_disable();
585 * See comment in calculate_sizes().
587 static inline bool freeptr_outside_object(struct kmem_cache
*s
)
589 return s
->offset
>= s
->inuse
;
593 * Return offset of the end of info block which is inuse + free pointer if
594 * not overlapping with object.
596 static inline unsigned int get_info_end(struct kmem_cache
*s
)
598 if (freeptr_outside_object(s
))
599 return s
->inuse
+ sizeof(void *);
604 static struct track
*get_track(struct kmem_cache
*s
, void *object
,
605 enum track_item alloc
)
609 p
= object
+ get_info_end(s
);
611 return kasan_reset_tag(p
+ alloc
);
614 static void set_track(struct kmem_cache
*s
, void *object
,
615 enum track_item alloc
, unsigned long addr
)
617 struct track
*p
= get_track(s
, object
, alloc
);
620 #ifdef CONFIG_STACKTRACE
621 unsigned int nr_entries
;
623 metadata_access_enable();
624 nr_entries
= stack_trace_save(kasan_reset_tag(p
->addrs
),
625 TRACK_ADDRS_COUNT
, 3);
626 metadata_access_disable();
628 if (nr_entries
< TRACK_ADDRS_COUNT
)
629 p
->addrs
[nr_entries
] = 0;
632 p
->cpu
= smp_processor_id();
633 p
->pid
= current
->pid
;
636 memset(p
, 0, sizeof(struct track
));
640 static void init_tracking(struct kmem_cache
*s
, void *object
)
642 if (!(s
->flags
& SLAB_STORE_USER
))
645 set_track(s
, object
, TRACK_FREE
, 0UL);
646 set_track(s
, object
, TRACK_ALLOC
, 0UL);
649 static void print_track(const char *s
, struct track
*t
, unsigned long pr_time
)
654 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
655 s
, (void *)t
->addr
, pr_time
- t
->when
, t
->cpu
, t
->pid
);
656 #ifdef CONFIG_STACKTRACE
659 for (i
= 0; i
< TRACK_ADDRS_COUNT
; i
++)
661 pr_err("\t%pS\n", (void *)t
->addrs
[i
]);
668 void print_tracking(struct kmem_cache
*s
, void *object
)
670 unsigned long pr_time
= jiffies
;
671 if (!(s
->flags
& SLAB_STORE_USER
))
674 print_track("Allocated", get_track(s
, object
, TRACK_ALLOC
), pr_time
);
675 print_track("Freed", get_track(s
, object
, TRACK_FREE
), pr_time
);
678 static void print_page_info(struct page
*page
)
680 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n",
681 page
, page
->objects
, page
->inuse
, page
->freelist
,
682 page
->flags
, &page
->flags
);
686 static void slab_bug(struct kmem_cache
*s
, char *fmt
, ...)
688 struct va_format vaf
;
694 pr_err("=============================================================================\n");
695 pr_err("BUG %s (%s): %pV\n", s
->name
, print_tainted(), &vaf
);
696 pr_err("-----------------------------------------------------------------------------\n\n");
701 static void slab_fix(struct kmem_cache
*s
, char *fmt
, ...)
703 struct va_format vaf
;
706 if (slab_add_kunit_errors())
712 pr_err("FIX %s: %pV\n", s
->name
, &vaf
);
716 static bool freelist_corrupted(struct kmem_cache
*s
, struct page
*page
,
717 void **freelist
, void *nextfree
)
719 if ((s
->flags
& SLAB_CONSISTENCY_CHECKS
) &&
720 !check_valid_pointer(s
, page
, nextfree
) && freelist
) {
721 object_err(s
, page
, *freelist
, "Freechain corrupt");
723 slab_fix(s
, "Isolate corrupted freechain");
730 static void print_trailer(struct kmem_cache
*s
, struct page
*page
, u8
*p
)
732 unsigned int off
; /* Offset of last byte */
733 u8
*addr
= page_address(page
);
735 print_tracking(s
, p
);
737 print_page_info(page
);
739 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
740 p
, p
- addr
, get_freepointer(s
, p
));
742 if (s
->flags
& SLAB_RED_ZONE
)
743 print_section(KERN_ERR
, "Redzone ", p
- s
->red_left_pad
,
745 else if (p
> addr
+ 16)
746 print_section(KERN_ERR
, "Bytes b4 ", p
- 16, 16);
748 print_section(KERN_ERR
, "Object ", p
,
749 min_t(unsigned int, s
->object_size
, PAGE_SIZE
));
750 if (s
->flags
& SLAB_RED_ZONE
)
751 print_section(KERN_ERR
, "Redzone ", p
+ s
->object_size
,
752 s
->inuse
- s
->object_size
);
754 off
= get_info_end(s
);
756 if (s
->flags
& SLAB_STORE_USER
)
757 off
+= 2 * sizeof(struct track
);
759 off
+= kasan_metadata_size(s
);
761 if (off
!= size_from_object(s
))
762 /* Beginning of the filler is the free pointer */
763 print_section(KERN_ERR
, "Padding ", p
+ off
,
764 size_from_object(s
) - off
);
769 void object_err(struct kmem_cache
*s
, struct page
*page
,
770 u8
*object
, char *reason
)
772 if (slab_add_kunit_errors())
775 slab_bug(s
, "%s", reason
);
776 print_trailer(s
, page
, object
);
777 add_taint(TAINT_BAD_PAGE
, LOCKDEP_NOW_UNRELIABLE
);
780 static __printf(3, 4) void slab_err(struct kmem_cache
*s
, struct page
*page
,
781 const char *fmt
, ...)
786 if (slab_add_kunit_errors())
790 vsnprintf(buf
, sizeof(buf
), fmt
, args
);
792 slab_bug(s
, "%s", buf
);
793 print_page_info(page
);
795 add_taint(TAINT_BAD_PAGE
, LOCKDEP_NOW_UNRELIABLE
);
798 static void init_object(struct kmem_cache
*s
, void *object
, u8 val
)
800 u8
*p
= kasan_reset_tag(object
);
802 if (s
->flags
& SLAB_RED_ZONE
)
803 memset(p
- s
->red_left_pad
, val
, s
->red_left_pad
);
805 if (s
->flags
& __OBJECT_POISON
) {
806 memset(p
, POISON_FREE
, s
->object_size
- 1);
807 p
[s
->object_size
- 1] = POISON_END
;
810 if (s
->flags
& SLAB_RED_ZONE
)
811 memset(p
+ s
->object_size
, val
, s
->inuse
- s
->object_size
);
814 static void restore_bytes(struct kmem_cache
*s
, char *message
, u8 data
,
815 void *from
, void *to
)
817 slab_fix(s
, "Restoring %s 0x%p-0x%p=0x%x", message
, from
, to
- 1, data
);
818 memset(from
, data
, to
- from
);
821 static int check_bytes_and_report(struct kmem_cache
*s
, struct page
*page
,
822 u8
*object
, char *what
,
823 u8
*start
, unsigned int value
, unsigned int bytes
)
827 u8
*addr
= page_address(page
);
829 metadata_access_enable();
830 fault
= memchr_inv(kasan_reset_tag(start
), value
, bytes
);
831 metadata_access_disable();
836 while (end
> fault
&& end
[-1] == value
)
839 if (slab_add_kunit_errors())
842 slab_bug(s
, "%s overwritten", what
);
843 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
844 fault
, end
- 1, fault
- addr
,
846 print_trailer(s
, page
, object
);
847 add_taint(TAINT_BAD_PAGE
, LOCKDEP_NOW_UNRELIABLE
);
850 restore_bytes(s
, what
, value
, fault
, end
);
858 * Bytes of the object to be managed.
859 * If the freepointer may overlay the object then the free
860 * pointer is at the middle of the object.
862 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
865 * object + s->object_size
866 * Padding to reach word boundary. This is also used for Redzoning.
867 * Padding is extended by another word if Redzoning is enabled and
868 * object_size == inuse.
870 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
871 * 0xcc (RED_ACTIVE) for objects in use.
874 * Meta data starts here.
876 * A. Free pointer (if we cannot overwrite object on free)
877 * B. Tracking data for SLAB_STORE_USER
878 * C. Padding to reach required alignment boundary or at minimum
879 * one word if debugging is on to be able to detect writes
880 * before the word boundary.
882 * Padding is done using 0x5a (POISON_INUSE)
885 * Nothing is used beyond s->size.
887 * If slabcaches are merged then the object_size and inuse boundaries are mostly
888 * ignored. And therefore no slab options that rely on these boundaries
889 * may be used with merged slabcaches.
892 static int check_pad_bytes(struct kmem_cache
*s
, struct page
*page
, u8
*p
)
894 unsigned long off
= get_info_end(s
); /* The end of info */
896 if (s
->flags
& SLAB_STORE_USER
)
897 /* We also have user information there */
898 off
+= 2 * sizeof(struct track
);
900 off
+= kasan_metadata_size(s
);
902 if (size_from_object(s
) == off
)
905 return check_bytes_and_report(s
, page
, p
, "Object padding",
906 p
+ off
, POISON_INUSE
, size_from_object(s
) - off
);
909 /* Check the pad bytes at the end of a slab page */
910 static int slab_pad_check(struct kmem_cache
*s
, struct page
*page
)
919 if (!(s
->flags
& SLAB_POISON
))
922 start
= page_address(page
);
923 length
= page_size(page
);
924 end
= start
+ length
;
925 remainder
= length
% s
->size
;
929 pad
= end
- remainder
;
930 metadata_access_enable();
931 fault
= memchr_inv(kasan_reset_tag(pad
), POISON_INUSE
, remainder
);
932 metadata_access_disable();
935 while (end
> fault
&& end
[-1] == POISON_INUSE
)
938 slab_err(s
, page
, "Padding overwritten. 0x%p-0x%p @offset=%tu",
939 fault
, end
- 1, fault
- start
);
940 print_section(KERN_ERR
, "Padding ", pad
, remainder
);
942 restore_bytes(s
, "slab padding", POISON_INUSE
, fault
, end
);
946 static int check_object(struct kmem_cache
*s
, struct page
*page
,
947 void *object
, u8 val
)
950 u8
*endobject
= object
+ s
->object_size
;
952 if (s
->flags
& SLAB_RED_ZONE
) {
953 if (!check_bytes_and_report(s
, page
, object
, "Left Redzone",
954 object
- s
->red_left_pad
, val
, s
->red_left_pad
))
957 if (!check_bytes_and_report(s
, page
, object
, "Right Redzone",
958 endobject
, val
, s
->inuse
- s
->object_size
))
961 if ((s
->flags
& SLAB_POISON
) && s
->object_size
< s
->inuse
) {
962 check_bytes_and_report(s
, page
, p
, "Alignment padding",
963 endobject
, POISON_INUSE
,
964 s
->inuse
- s
->object_size
);
968 if (s
->flags
& SLAB_POISON
) {
969 if (val
!= SLUB_RED_ACTIVE
&& (s
->flags
& __OBJECT_POISON
) &&
970 (!check_bytes_and_report(s
, page
, p
, "Poison", p
,
971 POISON_FREE
, s
->object_size
- 1) ||
972 !check_bytes_and_report(s
, page
, p
, "End Poison",
973 p
+ s
->object_size
- 1, POISON_END
, 1)))
976 * check_pad_bytes cleans up on its own.
978 check_pad_bytes(s
, page
, p
);
981 if (!freeptr_outside_object(s
) && val
== SLUB_RED_ACTIVE
)
983 * Object and freepointer overlap. Cannot check
984 * freepointer while object is allocated.
988 /* Check free pointer validity */
989 if (!check_valid_pointer(s
, page
, get_freepointer(s
, p
))) {
990 object_err(s
, page
, p
, "Freepointer corrupt");
992 * No choice but to zap it and thus lose the remainder
993 * of the free objects in this slab. May cause
994 * another error because the object count is now wrong.
996 set_freepointer(s
, p
, NULL
);
1002 static int check_slab(struct kmem_cache
*s
, struct page
*page
)
1006 VM_BUG_ON(!irqs_disabled());
1008 if (!PageSlab(page
)) {
1009 slab_err(s
, page
, "Not a valid slab page");
1013 maxobj
= order_objects(compound_order(page
), s
->size
);
1014 if (page
->objects
> maxobj
) {
1015 slab_err(s
, page
, "objects %u > max %u",
1016 page
->objects
, maxobj
);
1019 if (page
->inuse
> page
->objects
) {
1020 slab_err(s
, page
, "inuse %u > max %u",
1021 page
->inuse
, page
->objects
);
1024 /* Slab_pad_check fixes things up after itself */
1025 slab_pad_check(s
, page
);
1030 * Determine if a certain object on a page is on the freelist. Must hold the
1031 * slab lock to guarantee that the chains are in a consistent state.
1033 static int on_freelist(struct kmem_cache
*s
, struct page
*page
, void *search
)
1037 void *object
= NULL
;
1040 fp
= page
->freelist
;
1041 while (fp
&& nr
<= page
->objects
) {
1044 if (!check_valid_pointer(s
, page
, fp
)) {
1046 object_err(s
, page
, object
,
1047 "Freechain corrupt");
1048 set_freepointer(s
, object
, NULL
);
1050 slab_err(s
, page
, "Freepointer corrupt");
1051 page
->freelist
= NULL
;
1052 page
->inuse
= page
->objects
;
1053 slab_fix(s
, "Freelist cleared");
1059 fp
= get_freepointer(s
, object
);
1063 max_objects
= order_objects(compound_order(page
), s
->size
);
1064 if (max_objects
> MAX_OBJS_PER_PAGE
)
1065 max_objects
= MAX_OBJS_PER_PAGE
;
1067 if (page
->objects
!= max_objects
) {
1068 slab_err(s
, page
, "Wrong number of objects. Found %d but should be %d",
1069 page
->objects
, max_objects
);
1070 page
->objects
= max_objects
;
1071 slab_fix(s
, "Number of objects adjusted");
1073 if (page
->inuse
!= page
->objects
- nr
) {
1074 slab_err(s
, page
, "Wrong object count. Counter is %d but counted were %d",
1075 page
->inuse
, page
->objects
- nr
);
1076 page
->inuse
= page
->objects
- nr
;
1077 slab_fix(s
, "Object count adjusted");
1079 return search
== NULL
;
1082 static void trace(struct kmem_cache
*s
, struct page
*page
, void *object
,
1085 if (s
->flags
& SLAB_TRACE
) {
1086 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1088 alloc
? "alloc" : "free",
1089 object
, page
->inuse
,
1093 print_section(KERN_INFO
, "Object ", (void *)object
,
1101 * Tracking of fully allocated slabs for debugging purposes.
1103 static void add_full(struct kmem_cache
*s
,
1104 struct kmem_cache_node
*n
, struct page
*page
)
1106 if (!(s
->flags
& SLAB_STORE_USER
))
1109 lockdep_assert_held(&n
->list_lock
);
1110 list_add(&page
->slab_list
, &n
->full
);
1113 static void remove_full(struct kmem_cache
*s
, struct kmem_cache_node
*n
, struct page
*page
)
1115 if (!(s
->flags
& SLAB_STORE_USER
))
1118 lockdep_assert_held(&n
->list_lock
);
1119 list_del(&page
->slab_list
);
1122 /* Tracking of the number of slabs for debugging purposes */
1123 static inline unsigned long slabs_node(struct kmem_cache
*s
, int node
)
1125 struct kmem_cache_node
*n
= get_node(s
, node
);
1127 return atomic_long_read(&n
->nr_slabs
);
1130 static inline unsigned long node_nr_slabs(struct kmem_cache_node
*n
)
1132 return atomic_long_read(&n
->nr_slabs
);
1135 static inline void inc_slabs_node(struct kmem_cache
*s
, int node
, int objects
)
1137 struct kmem_cache_node
*n
= get_node(s
, node
);
1140 * May be called early in order to allocate a slab for the
1141 * kmem_cache_node structure. Solve the chicken-egg
1142 * dilemma by deferring the increment of the count during
1143 * bootstrap (see early_kmem_cache_node_alloc).
1146 atomic_long_inc(&n
->nr_slabs
);
1147 atomic_long_add(objects
, &n
->total_objects
);
1150 static inline void dec_slabs_node(struct kmem_cache
*s
, int node
, int objects
)
1152 struct kmem_cache_node
*n
= get_node(s
, node
);
1154 atomic_long_dec(&n
->nr_slabs
);
1155 atomic_long_sub(objects
, &n
->total_objects
);
1158 /* Object debug checks for alloc/free paths */
1159 static void setup_object_debug(struct kmem_cache
*s
, struct page
*page
,
1162 if (!kmem_cache_debug_flags(s
, SLAB_STORE_USER
|SLAB_RED_ZONE
|__OBJECT_POISON
))
1165 init_object(s
, object
, SLUB_RED_INACTIVE
);
1166 init_tracking(s
, object
);
1170 void setup_page_debug(struct kmem_cache
*s
, struct page
*page
, void *addr
)
1172 if (!kmem_cache_debug_flags(s
, SLAB_POISON
))
1175 metadata_access_enable();
1176 memset(kasan_reset_tag(addr
), POISON_INUSE
, page_size(page
));
1177 metadata_access_disable();
1180 static inline int alloc_consistency_checks(struct kmem_cache
*s
,
1181 struct page
*page
, void *object
)
1183 if (!check_slab(s
, page
))
1186 if (!check_valid_pointer(s
, page
, object
)) {
1187 object_err(s
, page
, object
, "Freelist Pointer check fails");
1191 if (!check_object(s
, page
, object
, SLUB_RED_INACTIVE
))
1197 static noinline
int alloc_debug_processing(struct kmem_cache
*s
,
1199 void *object
, unsigned long addr
)
1201 if (s
->flags
& SLAB_CONSISTENCY_CHECKS
) {
1202 if (!alloc_consistency_checks(s
, page
, object
))
1206 /* Success perform special debug activities for allocs */
1207 if (s
->flags
& SLAB_STORE_USER
)
1208 set_track(s
, object
, TRACK_ALLOC
, addr
);
1209 trace(s
, page
, object
, 1);
1210 init_object(s
, object
, SLUB_RED_ACTIVE
);
1214 if (PageSlab(page
)) {
1216 * If this is a slab page then lets do the best we can
1217 * to avoid issues in the future. Marking all objects
1218 * as used avoids touching the remaining objects.
1220 slab_fix(s
, "Marking all objects used");
1221 page
->inuse
= page
->objects
;
1222 page
->freelist
= NULL
;
1227 static inline int free_consistency_checks(struct kmem_cache
*s
,
1228 struct page
*page
, void *object
, unsigned long addr
)
1230 if (!check_valid_pointer(s
, page
, object
)) {
1231 slab_err(s
, page
, "Invalid object pointer 0x%p", object
);
1235 if (on_freelist(s
, page
, object
)) {
1236 object_err(s
, page
, object
, "Object already free");
1240 if (!check_object(s
, page
, object
, SLUB_RED_ACTIVE
))
1243 if (unlikely(s
!= page
->slab_cache
)) {
1244 if (!PageSlab(page
)) {
1245 slab_err(s
, page
, "Attempt to free object(0x%p) outside of slab",
1247 } else if (!page
->slab_cache
) {
1248 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1252 object_err(s
, page
, object
,
1253 "page slab pointer corrupt.");
1259 /* Supports checking bulk free of a constructed freelist */
1260 static noinline
int free_debug_processing(
1261 struct kmem_cache
*s
, struct page
*page
,
1262 void *head
, void *tail
, int bulk_cnt
,
1265 struct kmem_cache_node
*n
= get_node(s
, page_to_nid(page
));
1266 void *object
= head
;
1268 unsigned long flags
;
1271 spin_lock_irqsave(&n
->list_lock
, flags
);
1274 if (s
->flags
& SLAB_CONSISTENCY_CHECKS
) {
1275 if (!check_slab(s
, page
))
1282 if (s
->flags
& SLAB_CONSISTENCY_CHECKS
) {
1283 if (!free_consistency_checks(s
, page
, object
, addr
))
1287 if (s
->flags
& SLAB_STORE_USER
)
1288 set_track(s
, object
, TRACK_FREE
, addr
);
1289 trace(s
, page
, object
, 0);
1290 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1291 init_object(s
, object
, SLUB_RED_INACTIVE
);
1293 /* Reached end of constructed freelist yet? */
1294 if (object
!= tail
) {
1295 object
= get_freepointer(s
, object
);
1301 if (cnt
!= bulk_cnt
)
1302 slab_err(s
, page
, "Bulk freelist count(%d) invalid(%d)\n",
1306 spin_unlock_irqrestore(&n
->list_lock
, flags
);
1308 slab_fix(s
, "Object at 0x%p not freed", object
);
1313 * Parse a block of slub_debug options. Blocks are delimited by ';'
1315 * @str: start of block
1316 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1317 * @slabs: return start of list of slabs, or NULL when there's no list
1318 * @init: assume this is initial parsing and not per-kmem-create parsing
1320 * returns the start of next block if there's any, or NULL
1323 parse_slub_debug_flags(char *str
, slab_flags_t
*flags
, char **slabs
, bool init
)
1325 bool higher_order_disable
= false;
1327 /* Skip any completely empty blocks */
1328 while (*str
&& *str
== ';')
1333 * No options but restriction on slabs. This means full
1334 * debugging for slabs matching a pattern.
1336 *flags
= DEBUG_DEFAULT_FLAGS
;
1341 /* Determine which debug features should be switched on */
1342 for (; *str
&& *str
!= ',' && *str
!= ';'; str
++) {
1343 switch (tolower(*str
)) {
1348 *flags
|= SLAB_CONSISTENCY_CHECKS
;
1351 *flags
|= SLAB_RED_ZONE
;
1354 *flags
|= SLAB_POISON
;
1357 *flags
|= SLAB_STORE_USER
;
1360 *flags
|= SLAB_TRACE
;
1363 *flags
|= SLAB_FAILSLAB
;
1367 * Avoid enabling debugging on caches if its minimum
1368 * order would increase as a result.
1370 higher_order_disable
= true;
1374 pr_err("slub_debug option '%c' unknown. skipped\n", *str
);
1383 /* Skip over the slab list */
1384 while (*str
&& *str
!= ';')
1387 /* Skip any completely empty blocks */
1388 while (*str
&& *str
== ';')
1391 if (init
&& higher_order_disable
)
1392 disable_higher_order_debug
= 1;
1400 static int __init
setup_slub_debug(char *str
)
1403 slab_flags_t global_flags
;
1406 bool global_slub_debug_changed
= false;
1407 bool slab_list_specified
= false;
1409 global_flags
= DEBUG_DEFAULT_FLAGS
;
1410 if (*str
++ != '=' || !*str
)
1412 * No options specified. Switch on full debugging.
1418 str
= parse_slub_debug_flags(str
, &flags
, &slab_list
, true);
1421 global_flags
= flags
;
1422 global_slub_debug_changed
= true;
1424 slab_list_specified
= true;
1429 * For backwards compatibility, a single list of flags with list of
1430 * slabs means debugging is only changed for those slabs, so the global
1431 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1432 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1433 * long as there is no option specifying flags without a slab list.
1435 if (slab_list_specified
) {
1436 if (!global_slub_debug_changed
)
1437 global_flags
= slub_debug
;
1438 slub_debug_string
= saved_str
;
1441 slub_debug
= global_flags
;
1442 if (slub_debug
!= 0 || slub_debug_string
)
1443 static_branch_enable(&slub_debug_enabled
);
1445 static_branch_disable(&slub_debug_enabled
);
1446 if ((static_branch_unlikely(&init_on_alloc
) ||
1447 static_branch_unlikely(&init_on_free
)) &&
1448 (slub_debug
& SLAB_POISON
))
1449 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1453 __setup("slub_debug", setup_slub_debug
);
1456 * kmem_cache_flags - apply debugging options to the cache
1457 * @object_size: the size of an object without meta data
1458 * @flags: flags to set
1459 * @name: name of the cache
1461 * Debug option(s) are applied to @flags. In addition to the debug
1462 * option(s), if a slab name (or multiple) is specified i.e.
1463 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1464 * then only the select slabs will receive the debug option(s).
1466 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
1467 slab_flags_t flags
, const char *name
)
1472 slab_flags_t block_flags
;
1473 slab_flags_t slub_debug_local
= slub_debug
;
1476 * If the slab cache is for debugging (e.g. kmemleak) then
1477 * don't store user (stack trace) information by default,
1478 * but let the user enable it via the command line below.
1480 if (flags
& SLAB_NOLEAKTRACE
)
1481 slub_debug_local
&= ~SLAB_STORE_USER
;
1484 next_block
= slub_debug_string
;
1485 /* Go through all blocks of debug options, see if any matches our slab's name */
1486 while (next_block
) {
1487 next_block
= parse_slub_debug_flags(next_block
, &block_flags
, &iter
, false);
1490 /* Found a block that has a slab list, search it */
1495 end
= strchrnul(iter
, ',');
1496 if (next_block
&& next_block
< end
)
1497 end
= next_block
- 1;
1499 glob
= strnchr(iter
, end
- iter
, '*');
1501 cmplen
= glob
- iter
;
1503 cmplen
= max_t(size_t, len
, (end
- iter
));
1505 if (!strncmp(name
, iter
, cmplen
)) {
1506 flags
|= block_flags
;
1510 if (!*end
|| *end
== ';')
1516 return flags
| slub_debug_local
;
1518 #else /* !CONFIG_SLUB_DEBUG */
1519 static inline void setup_object_debug(struct kmem_cache
*s
,
1520 struct page
*page
, void *object
) {}
1522 void setup_page_debug(struct kmem_cache
*s
, struct page
*page
, void *addr
) {}
1524 static inline int alloc_debug_processing(struct kmem_cache
*s
,
1525 struct page
*page
, void *object
, unsigned long addr
) { return 0; }
1527 static inline int free_debug_processing(
1528 struct kmem_cache
*s
, struct page
*page
,
1529 void *head
, void *tail
, int bulk_cnt
,
1530 unsigned long addr
) { return 0; }
1532 static inline int slab_pad_check(struct kmem_cache
*s
, struct page
*page
)
1534 static inline int check_object(struct kmem_cache
*s
, struct page
*page
,
1535 void *object
, u8 val
) { return 1; }
1536 static inline void add_full(struct kmem_cache
*s
, struct kmem_cache_node
*n
,
1537 struct page
*page
) {}
1538 static inline void remove_full(struct kmem_cache
*s
, struct kmem_cache_node
*n
,
1539 struct page
*page
) {}
1540 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
1541 slab_flags_t flags
, const char *name
)
1545 #define slub_debug 0
1547 #define disable_higher_order_debug 0
1549 static inline unsigned long slabs_node(struct kmem_cache
*s
, int node
)
1551 static inline unsigned long node_nr_slabs(struct kmem_cache_node
*n
)
1553 static inline void inc_slabs_node(struct kmem_cache
*s
, int node
,
1555 static inline void dec_slabs_node(struct kmem_cache
*s
, int node
,
1558 static bool freelist_corrupted(struct kmem_cache
*s
, struct page
*page
,
1559 void **freelist
, void *nextfree
)
1563 #endif /* CONFIG_SLUB_DEBUG */
1566 * Hooks for other subsystems that check memory allocations. In a typical
1567 * production configuration these hooks all should produce no code at all.
1569 static inline void *kmalloc_large_node_hook(void *ptr
, size_t size
, gfp_t flags
)
1571 ptr
= kasan_kmalloc_large(ptr
, size
, flags
);
1572 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1573 kmemleak_alloc(ptr
, size
, 1, flags
);
1577 static __always_inline
void kfree_hook(void *x
)
1580 kasan_kfree_large(x
);
1583 static __always_inline
bool slab_free_hook(struct kmem_cache
*s
,
1586 kmemleak_free_recursive(x
, s
->flags
);
1589 * Trouble is that we may no longer disable interrupts in the fast path
1590 * So in order to make the debug calls that expect irqs to be
1591 * disabled we need to disable interrupts temporarily.
1593 #ifdef CONFIG_LOCKDEP
1595 unsigned long flags
;
1597 local_irq_save(flags
);
1598 debug_check_no_locks_freed(x
, s
->object_size
);
1599 local_irq_restore(flags
);
1602 if (!(s
->flags
& SLAB_DEBUG_OBJECTS
))
1603 debug_check_no_obj_freed(x
, s
->object_size
);
1605 /* Use KCSAN to help debug racy use-after-free. */
1606 if (!(s
->flags
& SLAB_TYPESAFE_BY_RCU
))
1607 __kcsan_check_access(x
, s
->object_size
,
1608 KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_ASSERT
);
1611 * As memory initialization might be integrated into KASAN,
1612 * kasan_slab_free and initialization memset's must be
1613 * kept together to avoid discrepancies in behavior.
1615 * The initialization memset's clear the object and the metadata,
1616 * but don't touch the SLAB redzone.
1621 if (!kasan_has_integrated_init())
1622 memset(kasan_reset_tag(x
), 0, s
->object_size
);
1623 rsize
= (s
->flags
& SLAB_RED_ZONE
) ? s
->red_left_pad
: 0;
1624 memset((char *)kasan_reset_tag(x
) + s
->inuse
, 0,
1625 s
->size
- s
->inuse
- rsize
);
1627 /* KASAN might put x into memory quarantine, delaying its reuse. */
1628 return kasan_slab_free(s
, x
, init
);
1631 static inline bool slab_free_freelist_hook(struct kmem_cache
*s
,
1632 void **head
, void **tail
)
1637 void *old_tail
= *tail
? *tail
: *head
;
1639 if (is_kfence_address(next
)) {
1640 slab_free_hook(s
, next
, false);
1644 /* Head and tail of the reconstructed freelist */
1650 next
= get_freepointer(s
, object
);
1652 /* If object's reuse doesn't have to be delayed */
1653 if (!slab_free_hook(s
, object
, slab_want_init_on_free(s
))) {
1654 /* Move object to the new freelist */
1655 set_freepointer(s
, object
, *head
);
1660 } while (object
!= old_tail
);
1665 return *head
!= NULL
;
1668 static void *setup_object(struct kmem_cache
*s
, struct page
*page
,
1671 setup_object_debug(s
, page
, object
);
1672 object
= kasan_init_slab_obj(s
, object
);
1673 if (unlikely(s
->ctor
)) {
1674 kasan_unpoison_object_data(s
, object
);
1676 kasan_poison_object_data(s
, object
);
1682 * Slab allocation and freeing
1684 static inline struct page
*alloc_slab_page(struct kmem_cache
*s
,
1685 gfp_t flags
, int node
, struct kmem_cache_order_objects oo
)
1688 unsigned int order
= oo_order(oo
);
1690 if (node
== NUMA_NO_NODE
)
1691 page
= alloc_pages(flags
, order
);
1693 page
= __alloc_pages_node(node
, flags
, order
);
1698 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1699 /* Pre-initialize the random sequence cache */
1700 static int init_cache_random_seq(struct kmem_cache
*s
)
1702 unsigned int count
= oo_objects(s
->oo
);
1705 /* Bailout if already initialised */
1709 err
= cache_random_seq_create(s
, count
, GFP_KERNEL
);
1711 pr_err("SLUB: Unable to initialize free list for %s\n",
1716 /* Transform to an offset on the set of pages */
1717 if (s
->random_seq
) {
1720 for (i
= 0; i
< count
; i
++)
1721 s
->random_seq
[i
] *= s
->size
;
1726 /* Initialize each random sequence freelist per cache */
1727 static void __init
init_freelist_randomization(void)
1729 struct kmem_cache
*s
;
1731 mutex_lock(&slab_mutex
);
1733 list_for_each_entry(s
, &slab_caches
, list
)
1734 init_cache_random_seq(s
);
1736 mutex_unlock(&slab_mutex
);
1739 /* Get the next entry on the pre-computed freelist randomized */
1740 static void *next_freelist_entry(struct kmem_cache
*s
, struct page
*page
,
1741 unsigned long *pos
, void *start
,
1742 unsigned long page_limit
,
1743 unsigned long freelist_count
)
1748 * If the target page allocation failed, the number of objects on the
1749 * page might be smaller than the usual size defined by the cache.
1752 idx
= s
->random_seq
[*pos
];
1754 if (*pos
>= freelist_count
)
1756 } while (unlikely(idx
>= page_limit
));
1758 return (char *)start
+ idx
;
1761 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1762 static bool shuffle_freelist(struct kmem_cache
*s
, struct page
*page
)
1767 unsigned long idx
, pos
, page_limit
, freelist_count
;
1769 if (page
->objects
< 2 || !s
->random_seq
)
1772 freelist_count
= oo_objects(s
->oo
);
1773 pos
= get_random_int() % freelist_count
;
1775 page_limit
= page
->objects
* s
->size
;
1776 start
= fixup_red_left(s
, page_address(page
));
1778 /* First entry is used as the base of the freelist */
1779 cur
= next_freelist_entry(s
, page
, &pos
, start
, page_limit
,
1781 cur
= setup_object(s
, page
, cur
);
1782 page
->freelist
= cur
;
1784 for (idx
= 1; idx
< page
->objects
; idx
++) {
1785 next
= next_freelist_entry(s
, page
, &pos
, start
, page_limit
,
1787 next
= setup_object(s
, page
, next
);
1788 set_freepointer(s
, cur
, next
);
1791 set_freepointer(s
, cur
, NULL
);
1796 static inline int init_cache_random_seq(struct kmem_cache
*s
)
1800 static inline void init_freelist_randomization(void) { }
1801 static inline bool shuffle_freelist(struct kmem_cache
*s
, struct page
*page
)
1805 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1807 static struct page
*allocate_slab(struct kmem_cache
*s
, gfp_t flags
, int node
)
1810 struct kmem_cache_order_objects oo
= s
->oo
;
1812 void *start
, *p
, *next
;
1816 flags
&= gfp_allowed_mask
;
1818 if (gfpflags_allow_blocking(flags
))
1821 flags
|= s
->allocflags
;
1824 * Let the initial higher-order allocation fail under memory pressure
1825 * so we fall-back to the minimum order allocation.
1827 alloc_gfp
= (flags
| __GFP_NOWARN
| __GFP_NORETRY
) & ~__GFP_NOFAIL
;
1828 if ((alloc_gfp
& __GFP_DIRECT_RECLAIM
) && oo_order(oo
) > oo_order(s
->min
))
1829 alloc_gfp
= (alloc_gfp
| __GFP_NOMEMALLOC
) & ~(__GFP_RECLAIM
|__GFP_NOFAIL
);
1831 page
= alloc_slab_page(s
, alloc_gfp
, node
, oo
);
1832 if (unlikely(!page
)) {
1836 * Allocation may have failed due to fragmentation.
1837 * Try a lower order alloc if possible
1839 page
= alloc_slab_page(s
, alloc_gfp
, node
, oo
);
1840 if (unlikely(!page
))
1842 stat(s
, ORDER_FALLBACK
);
1845 page
->objects
= oo_objects(oo
);
1847 account_slab_page(page
, oo_order(oo
), s
, flags
);
1849 page
->slab_cache
= s
;
1850 __SetPageSlab(page
);
1851 if (page_is_pfmemalloc(page
))
1852 SetPageSlabPfmemalloc(page
);
1854 kasan_poison_slab(page
);
1856 start
= page_address(page
);
1858 setup_page_debug(s
, page
, start
);
1860 shuffle
= shuffle_freelist(s
, page
);
1863 start
= fixup_red_left(s
, start
);
1864 start
= setup_object(s
, page
, start
);
1865 page
->freelist
= start
;
1866 for (idx
= 0, p
= start
; idx
< page
->objects
- 1; idx
++) {
1868 next
= setup_object(s
, page
, next
);
1869 set_freepointer(s
, p
, next
);
1872 set_freepointer(s
, p
, NULL
);
1875 page
->inuse
= page
->objects
;
1879 if (gfpflags_allow_blocking(flags
))
1880 local_irq_disable();
1884 inc_slabs_node(s
, page_to_nid(page
), page
->objects
);
1889 static struct page
*new_slab(struct kmem_cache
*s
, gfp_t flags
, int node
)
1891 if (unlikely(flags
& GFP_SLAB_BUG_MASK
))
1892 flags
= kmalloc_fix_flags(flags
);
1894 return allocate_slab(s
,
1895 flags
& (GFP_RECLAIM_MASK
| GFP_CONSTRAINT_MASK
), node
);
1898 static void __free_slab(struct kmem_cache
*s
, struct page
*page
)
1900 int order
= compound_order(page
);
1901 int pages
= 1 << order
;
1903 if (kmem_cache_debug_flags(s
, SLAB_CONSISTENCY_CHECKS
)) {
1906 slab_pad_check(s
, page
);
1907 for_each_object(p
, s
, page_address(page
),
1909 check_object(s
, page
, p
, SLUB_RED_INACTIVE
);
1912 __ClearPageSlabPfmemalloc(page
);
1913 __ClearPageSlab(page
);
1914 /* In union with page->mapping where page allocator expects NULL */
1915 page
->slab_cache
= NULL
;
1916 if (current
->reclaim_state
)
1917 current
->reclaim_state
->reclaimed_slab
+= pages
;
1918 unaccount_slab_page(page
, order
, s
);
1919 __free_pages(page
, order
);
1922 static void rcu_free_slab(struct rcu_head
*h
)
1924 struct page
*page
= container_of(h
, struct page
, rcu_head
);
1926 __free_slab(page
->slab_cache
, page
);
1929 static void free_slab(struct kmem_cache
*s
, struct page
*page
)
1931 if (unlikely(s
->flags
& SLAB_TYPESAFE_BY_RCU
)) {
1932 call_rcu(&page
->rcu_head
, rcu_free_slab
);
1934 __free_slab(s
, page
);
1937 static void discard_slab(struct kmem_cache
*s
, struct page
*page
)
1939 dec_slabs_node(s
, page_to_nid(page
), page
->objects
);
1944 * Management of partially allocated slabs.
1947 __add_partial(struct kmem_cache_node
*n
, struct page
*page
, int tail
)
1950 if (tail
== DEACTIVATE_TO_TAIL
)
1951 list_add_tail(&page
->slab_list
, &n
->partial
);
1953 list_add(&page
->slab_list
, &n
->partial
);
1956 static inline void add_partial(struct kmem_cache_node
*n
,
1957 struct page
*page
, int tail
)
1959 lockdep_assert_held(&n
->list_lock
);
1960 __add_partial(n
, page
, tail
);
1963 static inline void remove_partial(struct kmem_cache_node
*n
,
1966 lockdep_assert_held(&n
->list_lock
);
1967 list_del(&page
->slab_list
);
1972 * Remove slab from the partial list, freeze it and
1973 * return the pointer to the freelist.
1975 * Returns a list of objects or NULL if it fails.
1977 static inline void *acquire_slab(struct kmem_cache
*s
,
1978 struct kmem_cache_node
*n
, struct page
*page
,
1979 int mode
, int *objects
)
1982 unsigned long counters
;
1985 lockdep_assert_held(&n
->list_lock
);
1988 * Zap the freelist and set the frozen bit.
1989 * The old freelist is the list of objects for the
1990 * per cpu allocation list.
1992 freelist
= page
->freelist
;
1993 counters
= page
->counters
;
1994 new.counters
= counters
;
1995 *objects
= new.objects
- new.inuse
;
1997 new.inuse
= page
->objects
;
1998 new.freelist
= NULL
;
2000 new.freelist
= freelist
;
2003 VM_BUG_ON(new.frozen
);
2006 if (!__cmpxchg_double_slab(s
, page
,
2008 new.freelist
, new.counters
,
2012 remove_partial(n
, page
);
2017 static void put_cpu_partial(struct kmem_cache
*s
, struct page
*page
, int drain
);
2018 static inline bool pfmemalloc_match(struct page
*page
, gfp_t gfpflags
);
2021 * Try to allocate a partial slab from a specific node.
2023 static void *get_partial_node(struct kmem_cache
*s
, struct kmem_cache_node
*n
,
2024 struct kmem_cache_cpu
*c
, gfp_t flags
)
2026 struct page
*page
, *page2
;
2027 void *object
= NULL
;
2028 unsigned int available
= 0;
2032 * Racy check. If we mistakenly see no partial slabs then we
2033 * just allocate an empty slab. If we mistakenly try to get a
2034 * partial slab and there is none available then get_partial()
2037 if (!n
|| !n
->nr_partial
)
2040 spin_lock(&n
->list_lock
);
2041 list_for_each_entry_safe(page
, page2
, &n
->partial
, slab_list
) {
2044 if (!pfmemalloc_match(page
, flags
))
2047 t
= acquire_slab(s
, n
, page
, object
== NULL
, &objects
);
2051 available
+= objects
;
2054 stat(s
, ALLOC_FROM_PARTIAL
);
2057 put_cpu_partial(s
, page
, 0);
2058 stat(s
, CPU_PARTIAL_NODE
);
2060 if (!kmem_cache_has_cpu_partial(s
)
2061 || available
> slub_cpu_partial(s
) / 2)
2065 spin_unlock(&n
->list_lock
);
2070 * Get a page from somewhere. Search in increasing NUMA distances.
2072 static void *get_any_partial(struct kmem_cache
*s
, gfp_t flags
,
2073 struct kmem_cache_cpu
*c
)
2076 struct zonelist
*zonelist
;
2079 enum zone_type highest_zoneidx
= gfp_zone(flags
);
2081 unsigned int cpuset_mems_cookie
;
2084 * The defrag ratio allows a configuration of the tradeoffs between
2085 * inter node defragmentation and node local allocations. A lower
2086 * defrag_ratio increases the tendency to do local allocations
2087 * instead of attempting to obtain partial slabs from other nodes.
2089 * If the defrag_ratio is set to 0 then kmalloc() always
2090 * returns node local objects. If the ratio is higher then kmalloc()
2091 * may return off node objects because partial slabs are obtained
2092 * from other nodes and filled up.
2094 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2095 * (which makes defrag_ratio = 1000) then every (well almost)
2096 * allocation will first attempt to defrag slab caches on other nodes.
2097 * This means scanning over all nodes to look for partial slabs which
2098 * may be expensive if we do it every time we are trying to find a slab
2099 * with available objects.
2101 if (!s
->remote_node_defrag_ratio
||
2102 get_cycles() % 1024 > s
->remote_node_defrag_ratio
)
2106 cpuset_mems_cookie
= read_mems_allowed_begin();
2107 zonelist
= node_zonelist(mempolicy_slab_node(), flags
);
2108 for_each_zone_zonelist(zone
, z
, zonelist
, highest_zoneidx
) {
2109 struct kmem_cache_node
*n
;
2111 n
= get_node(s
, zone_to_nid(zone
));
2113 if (n
&& cpuset_zone_allowed(zone
, flags
) &&
2114 n
->nr_partial
> s
->min_partial
) {
2115 object
= get_partial_node(s
, n
, c
, flags
);
2118 * Don't check read_mems_allowed_retry()
2119 * here - if mems_allowed was updated in
2120 * parallel, that was a harmless race
2121 * between allocation and the cpuset
2128 } while (read_mems_allowed_retry(cpuset_mems_cookie
));
2129 #endif /* CONFIG_NUMA */
2134 * Get a partial page, lock it and return it.
2136 static void *get_partial(struct kmem_cache
*s
, gfp_t flags
, int node
,
2137 struct kmem_cache_cpu
*c
)
2140 int searchnode
= node
;
2142 if (node
== NUMA_NO_NODE
)
2143 searchnode
= numa_mem_id();
2145 object
= get_partial_node(s
, get_node(s
, searchnode
), c
, flags
);
2146 if (object
|| node
!= NUMA_NO_NODE
)
2149 return get_any_partial(s
, flags
, c
);
2152 #ifdef CONFIG_PREEMPTION
2154 * Calculate the next globally unique transaction for disambiguation
2155 * during cmpxchg. The transactions start with the cpu number and are then
2156 * incremented by CONFIG_NR_CPUS.
2158 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2161 * No preemption supported therefore also no need to check for
2167 static inline unsigned long next_tid(unsigned long tid
)
2169 return tid
+ TID_STEP
;
2172 #ifdef SLUB_DEBUG_CMPXCHG
2173 static inline unsigned int tid_to_cpu(unsigned long tid
)
2175 return tid
% TID_STEP
;
2178 static inline unsigned long tid_to_event(unsigned long tid
)
2180 return tid
/ TID_STEP
;
2184 static inline unsigned int init_tid(int cpu
)
2189 static inline void note_cmpxchg_failure(const char *n
,
2190 const struct kmem_cache
*s
, unsigned long tid
)
2192 #ifdef SLUB_DEBUG_CMPXCHG
2193 unsigned long actual_tid
= __this_cpu_read(s
->cpu_slab
->tid
);
2195 pr_info("%s %s: cmpxchg redo ", n
, s
->name
);
2197 #ifdef CONFIG_PREEMPTION
2198 if (tid_to_cpu(tid
) != tid_to_cpu(actual_tid
))
2199 pr_warn("due to cpu change %d -> %d\n",
2200 tid_to_cpu(tid
), tid_to_cpu(actual_tid
));
2203 if (tid_to_event(tid
) != tid_to_event(actual_tid
))
2204 pr_warn("due to cpu running other code. Event %ld->%ld\n",
2205 tid_to_event(tid
), tid_to_event(actual_tid
));
2207 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2208 actual_tid
, tid
, next_tid(tid
));
2210 stat(s
, CMPXCHG_DOUBLE_CPU_FAIL
);
2213 static void init_kmem_cache_cpus(struct kmem_cache
*s
)
2217 for_each_possible_cpu(cpu
)
2218 per_cpu_ptr(s
->cpu_slab
, cpu
)->tid
= init_tid(cpu
);
2222 * Remove the cpu slab
2224 static void deactivate_slab(struct kmem_cache
*s
, struct page
*page
,
2225 void *freelist
, struct kmem_cache_cpu
*c
)
2227 enum slab_modes
{ M_NONE
, M_PARTIAL
, M_FULL
, M_FREE
};
2228 struct kmem_cache_node
*n
= get_node(s
, page_to_nid(page
));
2229 int lock
= 0, free_delta
= 0;
2230 enum slab_modes l
= M_NONE
, m
= M_NONE
;
2231 void *nextfree
, *freelist_iter
, *freelist_tail
;
2232 int tail
= DEACTIVATE_TO_HEAD
;
2236 if (page
->freelist
) {
2237 stat(s
, DEACTIVATE_REMOTE_FREES
);
2238 tail
= DEACTIVATE_TO_TAIL
;
2242 * Stage one: Count the objects on cpu's freelist as free_delta and
2243 * remember the last object in freelist_tail for later splicing.
2245 freelist_tail
= NULL
;
2246 freelist_iter
= freelist
;
2247 while (freelist_iter
) {
2248 nextfree
= get_freepointer(s
, freelist_iter
);
2251 * If 'nextfree' is invalid, it is possible that the object at
2252 * 'freelist_iter' is already corrupted. So isolate all objects
2253 * starting at 'freelist_iter' by skipping them.
2255 if (freelist_corrupted(s
, page
, &freelist_iter
, nextfree
))
2258 freelist_tail
= freelist_iter
;
2261 freelist_iter
= nextfree
;
2265 * Stage two: Unfreeze the page while splicing the per-cpu
2266 * freelist to the head of page's freelist.
2268 * Ensure that the page is unfrozen while the list presence
2269 * reflects the actual number of objects during unfreeze.
2271 * We setup the list membership and then perform a cmpxchg
2272 * with the count. If there is a mismatch then the page
2273 * is not unfrozen but the page is on the wrong list.
2275 * Then we restart the process which may have to remove
2276 * the page from the list that we just put it on again
2277 * because the number of objects in the slab may have
2282 old
.freelist
= READ_ONCE(page
->freelist
);
2283 old
.counters
= READ_ONCE(page
->counters
);
2284 VM_BUG_ON(!old
.frozen
);
2286 /* Determine target state of the slab */
2287 new.counters
= old
.counters
;
2288 if (freelist_tail
) {
2289 new.inuse
-= free_delta
;
2290 set_freepointer(s
, freelist_tail
, old
.freelist
);
2291 new.freelist
= freelist
;
2293 new.freelist
= old
.freelist
;
2297 if (!new.inuse
&& n
->nr_partial
>= s
->min_partial
)
2299 else if (new.freelist
) {
2304 * Taking the spinlock removes the possibility
2305 * that acquire_slab() will see a slab page that
2308 spin_lock(&n
->list_lock
);
2312 if (kmem_cache_debug_flags(s
, SLAB_STORE_USER
) && !lock
) {
2315 * This also ensures that the scanning of full
2316 * slabs from diagnostic functions will not see
2319 spin_lock(&n
->list_lock
);
2325 remove_partial(n
, page
);
2326 else if (l
== M_FULL
)
2327 remove_full(s
, n
, page
);
2330 add_partial(n
, page
, tail
);
2331 else if (m
== M_FULL
)
2332 add_full(s
, n
, page
);
2336 if (!__cmpxchg_double_slab(s
, page
,
2337 old
.freelist
, old
.counters
,
2338 new.freelist
, new.counters
,
2343 spin_unlock(&n
->list_lock
);
2347 else if (m
== M_FULL
)
2348 stat(s
, DEACTIVATE_FULL
);
2349 else if (m
== M_FREE
) {
2350 stat(s
, DEACTIVATE_EMPTY
);
2351 discard_slab(s
, page
);
2360 * Unfreeze all the cpu partial slabs.
2362 * This function must be called with interrupts disabled
2363 * for the cpu using c (or some other guarantee must be there
2364 * to guarantee no concurrent accesses).
2366 static void unfreeze_partials(struct kmem_cache
*s
,
2367 struct kmem_cache_cpu
*c
)
2369 #ifdef CONFIG_SLUB_CPU_PARTIAL
2370 struct kmem_cache_node
*n
= NULL
, *n2
= NULL
;
2371 struct page
*page
, *discard_page
= NULL
;
2373 while ((page
= slub_percpu_partial(c
))) {
2377 slub_set_percpu_partial(c
, page
);
2379 n2
= get_node(s
, page_to_nid(page
));
2382 spin_unlock(&n
->list_lock
);
2385 spin_lock(&n
->list_lock
);
2390 old
.freelist
= page
->freelist
;
2391 old
.counters
= page
->counters
;
2392 VM_BUG_ON(!old
.frozen
);
2394 new.counters
= old
.counters
;
2395 new.freelist
= old
.freelist
;
2399 } while (!__cmpxchg_double_slab(s
, page
,
2400 old
.freelist
, old
.counters
,
2401 new.freelist
, new.counters
,
2402 "unfreezing slab"));
2404 if (unlikely(!new.inuse
&& n
->nr_partial
>= s
->min_partial
)) {
2405 page
->next
= discard_page
;
2406 discard_page
= page
;
2408 add_partial(n
, page
, DEACTIVATE_TO_TAIL
);
2409 stat(s
, FREE_ADD_PARTIAL
);
2414 spin_unlock(&n
->list_lock
);
2416 while (discard_page
) {
2417 page
= discard_page
;
2418 discard_page
= discard_page
->next
;
2420 stat(s
, DEACTIVATE_EMPTY
);
2421 discard_slab(s
, page
);
2424 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2428 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2429 * partial page slot if available.
2431 * If we did not find a slot then simply move all the partials to the
2432 * per node partial list.
2434 static void put_cpu_partial(struct kmem_cache
*s
, struct page
*page
, int drain
)
2436 #ifdef CONFIG_SLUB_CPU_PARTIAL
2437 struct page
*oldpage
;
2445 oldpage
= this_cpu_read(s
->cpu_slab
->partial
);
2448 pobjects
= oldpage
->pobjects
;
2449 pages
= oldpage
->pages
;
2450 if (drain
&& pobjects
> slub_cpu_partial(s
)) {
2451 unsigned long flags
;
2453 * partial array is full. Move the existing
2454 * set to the per node partial list.
2456 local_irq_save(flags
);
2457 unfreeze_partials(s
, this_cpu_ptr(s
->cpu_slab
));
2458 local_irq_restore(flags
);
2462 stat(s
, CPU_PARTIAL_DRAIN
);
2467 pobjects
+= page
->objects
- page
->inuse
;
2469 page
->pages
= pages
;
2470 page
->pobjects
= pobjects
;
2471 page
->next
= oldpage
;
2473 } while (this_cpu_cmpxchg(s
->cpu_slab
->partial
, oldpage
, page
)
2475 if (unlikely(!slub_cpu_partial(s
))) {
2476 unsigned long flags
;
2478 local_irq_save(flags
);
2479 unfreeze_partials(s
, this_cpu_ptr(s
->cpu_slab
));
2480 local_irq_restore(flags
);
2483 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2486 static inline void flush_slab(struct kmem_cache
*s
, struct kmem_cache_cpu
*c
)
2488 stat(s
, CPUSLAB_FLUSH
);
2489 deactivate_slab(s
, c
->page
, c
->freelist
, c
);
2491 c
->tid
= next_tid(c
->tid
);
2497 * Called from IPI handler with interrupts disabled.
2499 static inline void __flush_cpu_slab(struct kmem_cache
*s
, int cpu
)
2501 struct kmem_cache_cpu
*c
= per_cpu_ptr(s
->cpu_slab
, cpu
);
2506 unfreeze_partials(s
, c
);
2509 static void flush_cpu_slab(void *d
)
2511 struct kmem_cache
*s
= d
;
2513 __flush_cpu_slab(s
, smp_processor_id());
2516 static bool has_cpu_slab(int cpu
, void *info
)
2518 struct kmem_cache
*s
= info
;
2519 struct kmem_cache_cpu
*c
= per_cpu_ptr(s
->cpu_slab
, cpu
);
2521 return c
->page
|| slub_percpu_partial(c
);
2524 static void flush_all(struct kmem_cache
*s
)
2526 on_each_cpu_cond(has_cpu_slab
, flush_cpu_slab
, s
, 1);
2530 * Use the cpu notifier to insure that the cpu slabs are flushed when
2533 static int slub_cpu_dead(unsigned int cpu
)
2535 struct kmem_cache
*s
;
2536 unsigned long flags
;
2538 mutex_lock(&slab_mutex
);
2539 list_for_each_entry(s
, &slab_caches
, list
) {
2540 local_irq_save(flags
);
2541 __flush_cpu_slab(s
, cpu
);
2542 local_irq_restore(flags
);
2544 mutex_unlock(&slab_mutex
);
2549 * Check if the objects in a per cpu structure fit numa
2550 * locality expectations.
2552 static inline int node_match(struct page
*page
, int node
)
2555 if (node
!= NUMA_NO_NODE
&& page_to_nid(page
) != node
)
2561 #ifdef CONFIG_SLUB_DEBUG
2562 static int count_free(struct page
*page
)
2564 return page
->objects
- page
->inuse
;
2567 static inline unsigned long node_nr_objs(struct kmem_cache_node
*n
)
2569 return atomic_long_read(&n
->total_objects
);
2571 #endif /* CONFIG_SLUB_DEBUG */
2573 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2574 static unsigned long count_partial(struct kmem_cache_node
*n
,
2575 int (*get_count
)(struct page
*))
2577 unsigned long flags
;
2578 unsigned long x
= 0;
2581 spin_lock_irqsave(&n
->list_lock
, flags
);
2582 list_for_each_entry(page
, &n
->partial
, slab_list
)
2583 x
+= get_count(page
);
2584 spin_unlock_irqrestore(&n
->list_lock
, flags
);
2587 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2589 static noinline
void
2590 slab_out_of_memory(struct kmem_cache
*s
, gfp_t gfpflags
, int nid
)
2592 #ifdef CONFIG_SLUB_DEBUG
2593 static DEFINE_RATELIMIT_STATE(slub_oom_rs
, DEFAULT_RATELIMIT_INTERVAL
,
2594 DEFAULT_RATELIMIT_BURST
);
2596 struct kmem_cache_node
*n
;
2598 if ((gfpflags
& __GFP_NOWARN
) || !__ratelimit(&slub_oom_rs
))
2601 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2602 nid
, gfpflags
, &gfpflags
);
2603 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2604 s
->name
, s
->object_size
, s
->size
, oo_order(s
->oo
),
2607 if (oo_order(s
->min
) > get_order(s
->object_size
))
2608 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2611 for_each_kmem_cache_node(s
, node
, n
) {
2612 unsigned long nr_slabs
;
2613 unsigned long nr_objs
;
2614 unsigned long nr_free
;
2616 nr_free
= count_partial(n
, count_free
);
2617 nr_slabs
= node_nr_slabs(n
);
2618 nr_objs
= node_nr_objs(n
);
2620 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2621 node
, nr_slabs
, nr_objs
, nr_free
);
2626 static inline void *new_slab_objects(struct kmem_cache
*s
, gfp_t flags
,
2627 int node
, struct kmem_cache_cpu
**pc
)
2630 struct kmem_cache_cpu
*c
= *pc
;
2633 WARN_ON_ONCE(s
->ctor
&& (flags
& __GFP_ZERO
));
2635 freelist
= get_partial(s
, flags
, node
, c
);
2640 page
= new_slab(s
, flags
, node
);
2642 c
= raw_cpu_ptr(s
->cpu_slab
);
2647 * No other reference to the page yet so we can
2648 * muck around with it freely without cmpxchg
2650 freelist
= page
->freelist
;
2651 page
->freelist
= NULL
;
2653 stat(s
, ALLOC_SLAB
);
2661 static inline bool pfmemalloc_match(struct page
*page
, gfp_t gfpflags
)
2663 if (unlikely(PageSlabPfmemalloc(page
)))
2664 return gfp_pfmemalloc_allowed(gfpflags
);
2670 * Check the page->freelist of a page and either transfer the freelist to the
2671 * per cpu freelist or deactivate the page.
2673 * The page is still frozen if the return value is not NULL.
2675 * If this function returns NULL then the page has been unfrozen.
2677 * This function must be called with interrupt disabled.
2679 static inline void *get_freelist(struct kmem_cache
*s
, struct page
*page
)
2682 unsigned long counters
;
2686 freelist
= page
->freelist
;
2687 counters
= page
->counters
;
2689 new.counters
= counters
;
2690 VM_BUG_ON(!new.frozen
);
2692 new.inuse
= page
->objects
;
2693 new.frozen
= freelist
!= NULL
;
2695 } while (!__cmpxchg_double_slab(s
, page
,
2704 * Slow path. The lockless freelist is empty or we need to perform
2707 * Processing is still very fast if new objects have been freed to the
2708 * regular freelist. In that case we simply take over the regular freelist
2709 * as the lockless freelist and zap the regular freelist.
2711 * If that is not working then we fall back to the partial lists. We take the
2712 * first element of the freelist as the object to allocate now and move the
2713 * rest of the freelist to the lockless freelist.
2715 * And if we were unable to get a new slab from the partial slab lists then
2716 * we need to allocate a new slab. This is the slowest path since it involves
2717 * a call to the page allocator and the setup of a new slab.
2719 * Version of __slab_alloc to use when we know that interrupts are
2720 * already disabled (which is the case for bulk allocation).
2722 static void *___slab_alloc(struct kmem_cache
*s
, gfp_t gfpflags
, int node
,
2723 unsigned long addr
, struct kmem_cache_cpu
*c
)
2728 stat(s
, ALLOC_SLOWPATH
);
2733 * if the node is not online or has no normal memory, just
2734 * ignore the node constraint
2736 if (unlikely(node
!= NUMA_NO_NODE
&&
2737 !node_isset(node
, slab_nodes
)))
2738 node
= NUMA_NO_NODE
;
2743 if (unlikely(!node_match(page
, node
))) {
2745 * same as above but node_match() being false already
2746 * implies node != NUMA_NO_NODE
2748 if (!node_isset(node
, slab_nodes
)) {
2749 node
= NUMA_NO_NODE
;
2752 stat(s
, ALLOC_NODE_MISMATCH
);
2753 deactivate_slab(s
, page
, c
->freelist
, c
);
2759 * By rights, we should be searching for a slab page that was
2760 * PFMEMALLOC but right now, we are losing the pfmemalloc
2761 * information when the page leaves the per-cpu allocator
2763 if (unlikely(!pfmemalloc_match(page
, gfpflags
))) {
2764 deactivate_slab(s
, page
, c
->freelist
, c
);
2768 /* must check again c->freelist in case of cpu migration or IRQ */
2769 freelist
= c
->freelist
;
2773 freelist
= get_freelist(s
, page
);
2777 stat(s
, DEACTIVATE_BYPASS
);
2781 stat(s
, ALLOC_REFILL
);
2785 * freelist is pointing to the list of objects to be used.
2786 * page is pointing to the page from which the objects are obtained.
2787 * That page must be frozen for per cpu allocations to work.
2789 VM_BUG_ON(!c
->page
->frozen
);
2790 c
->freelist
= get_freepointer(s
, freelist
);
2791 c
->tid
= next_tid(c
->tid
);
2796 if (slub_percpu_partial(c
)) {
2797 page
= c
->page
= slub_percpu_partial(c
);
2798 slub_set_percpu_partial(c
, page
);
2799 stat(s
, CPU_PARTIAL_ALLOC
);
2803 freelist
= new_slab_objects(s
, gfpflags
, node
, &c
);
2805 if (unlikely(!freelist
)) {
2806 slab_out_of_memory(s
, gfpflags
, node
);
2811 if (likely(!kmem_cache_debug(s
) && pfmemalloc_match(page
, gfpflags
)))
2814 /* Only entered in the debug case */
2815 if (kmem_cache_debug(s
) &&
2816 !alloc_debug_processing(s
, page
, freelist
, addr
))
2817 goto new_slab
; /* Slab failed checks. Next slab needed */
2819 deactivate_slab(s
, page
, get_freepointer(s
, freelist
), c
);
2824 * Another one that disabled interrupt and compensates for possible
2825 * cpu changes by refetching the per cpu area pointer.
2827 static void *__slab_alloc(struct kmem_cache
*s
, gfp_t gfpflags
, int node
,
2828 unsigned long addr
, struct kmem_cache_cpu
*c
)
2831 unsigned long flags
;
2833 local_irq_save(flags
);
2834 #ifdef CONFIG_PREEMPTION
2836 * We may have been preempted and rescheduled on a different
2837 * cpu before disabling interrupts. Need to reload cpu area
2840 c
= this_cpu_ptr(s
->cpu_slab
);
2843 p
= ___slab_alloc(s
, gfpflags
, node
, addr
, c
);
2844 local_irq_restore(flags
);
2849 * If the object has been wiped upon free, make sure it's fully initialized by
2850 * zeroing out freelist pointer.
2852 static __always_inline
void maybe_wipe_obj_freeptr(struct kmem_cache
*s
,
2855 if (unlikely(slab_want_init_on_free(s
)) && obj
)
2856 memset((void *)((char *)kasan_reset_tag(obj
) + s
->offset
),
2861 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2862 * have the fastpath folded into their functions. So no function call
2863 * overhead for requests that can be satisfied on the fastpath.
2865 * The fastpath works by first checking if the lockless freelist can be used.
2866 * If not then __slab_alloc is called for slow processing.
2868 * Otherwise we can simply pick the next object from the lockless free list.
2870 static __always_inline
void *slab_alloc_node(struct kmem_cache
*s
,
2871 gfp_t gfpflags
, int node
, unsigned long addr
, size_t orig_size
)
2874 struct kmem_cache_cpu
*c
;
2877 struct obj_cgroup
*objcg
= NULL
;
2880 s
= slab_pre_alloc_hook(s
, &objcg
, 1, gfpflags
);
2884 object
= kfence_alloc(s
, orig_size
, gfpflags
);
2885 if (unlikely(object
))
2890 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2891 * enabled. We may switch back and forth between cpus while
2892 * reading from one cpu area. That does not matter as long
2893 * as we end up on the original cpu again when doing the cmpxchg.
2895 * We should guarantee that tid and kmem_cache are retrieved on
2896 * the same cpu. It could be different if CONFIG_PREEMPTION so we need
2897 * to check if it is matched or not.
2900 tid
= this_cpu_read(s
->cpu_slab
->tid
);
2901 c
= raw_cpu_ptr(s
->cpu_slab
);
2902 } while (IS_ENABLED(CONFIG_PREEMPTION
) &&
2903 unlikely(tid
!= READ_ONCE(c
->tid
)));
2906 * Irqless object alloc/free algorithm used here depends on sequence
2907 * of fetching cpu_slab's data. tid should be fetched before anything
2908 * on c to guarantee that object and page associated with previous tid
2909 * won't be used with current tid. If we fetch tid first, object and
2910 * page could be one associated with next tid and our alloc/free
2911 * request will be failed. In this case, we will retry. So, no problem.
2916 * The transaction ids are globally unique per cpu and per operation on
2917 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2918 * occurs on the right processor and that there was no operation on the
2919 * linked list in between.
2922 object
= c
->freelist
;
2924 if (unlikely(!object
|| !page
|| !node_match(page
, node
))) {
2925 object
= __slab_alloc(s
, gfpflags
, node
, addr
, c
);
2927 void *next_object
= get_freepointer_safe(s
, object
);
2930 * The cmpxchg will only match if there was no additional
2931 * operation and if we are on the right processor.
2933 * The cmpxchg does the following atomically (without lock
2935 * 1. Relocate first pointer to the current per cpu area.
2936 * 2. Verify that tid and freelist have not been changed
2937 * 3. If they were not changed replace tid and freelist
2939 * Since this is without lock semantics the protection is only
2940 * against code executing on this cpu *not* from access by
2943 if (unlikely(!this_cpu_cmpxchg_double(
2944 s
->cpu_slab
->freelist
, s
->cpu_slab
->tid
,
2946 next_object
, next_tid(tid
)))) {
2948 note_cmpxchg_failure("slab_alloc", s
, tid
);
2951 prefetch_freepointer(s
, next_object
);
2952 stat(s
, ALLOC_FASTPATH
);
2955 maybe_wipe_obj_freeptr(s
, object
);
2956 init
= slab_want_init_on_alloc(gfpflags
, s
);
2959 slab_post_alloc_hook(s
, objcg
, gfpflags
, 1, &object
, init
);
2964 static __always_inline
void *slab_alloc(struct kmem_cache
*s
,
2965 gfp_t gfpflags
, unsigned long addr
, size_t orig_size
)
2967 return slab_alloc_node(s
, gfpflags
, NUMA_NO_NODE
, addr
, orig_size
);
2970 void *kmem_cache_alloc(struct kmem_cache
*s
, gfp_t gfpflags
)
2972 void *ret
= slab_alloc(s
, gfpflags
, _RET_IP_
, s
->object_size
);
2974 trace_kmem_cache_alloc(_RET_IP_
, ret
, s
->object_size
,
2979 EXPORT_SYMBOL(kmem_cache_alloc
);
2981 #ifdef CONFIG_TRACING
2982 void *kmem_cache_alloc_trace(struct kmem_cache
*s
, gfp_t gfpflags
, size_t size
)
2984 void *ret
= slab_alloc(s
, gfpflags
, _RET_IP_
, size
);
2985 trace_kmalloc(_RET_IP_
, ret
, size
, s
->size
, gfpflags
);
2986 ret
= kasan_kmalloc(s
, ret
, size
, gfpflags
);
2989 EXPORT_SYMBOL(kmem_cache_alloc_trace
);
2993 void *kmem_cache_alloc_node(struct kmem_cache
*s
, gfp_t gfpflags
, int node
)
2995 void *ret
= slab_alloc_node(s
, gfpflags
, node
, _RET_IP_
, s
->object_size
);
2997 trace_kmem_cache_alloc_node(_RET_IP_
, ret
,
2998 s
->object_size
, s
->size
, gfpflags
, node
);
3002 EXPORT_SYMBOL(kmem_cache_alloc_node
);
3004 #ifdef CONFIG_TRACING
3005 void *kmem_cache_alloc_node_trace(struct kmem_cache
*s
,
3007 int node
, size_t size
)
3009 void *ret
= slab_alloc_node(s
, gfpflags
, node
, _RET_IP_
, size
);
3011 trace_kmalloc_node(_RET_IP_
, ret
,
3012 size
, s
->size
, gfpflags
, node
);
3014 ret
= kasan_kmalloc(s
, ret
, size
, gfpflags
);
3017 EXPORT_SYMBOL(kmem_cache_alloc_node_trace
);
3019 #endif /* CONFIG_NUMA */
3022 * Slow path handling. This may still be called frequently since objects
3023 * have a longer lifetime than the cpu slabs in most processing loads.
3025 * So we still attempt to reduce cache line usage. Just take the slab
3026 * lock and free the item. If there is no additional partial page
3027 * handling required then we can return immediately.
3029 static void __slab_free(struct kmem_cache
*s
, struct page
*page
,
3030 void *head
, void *tail
, int cnt
,
3037 unsigned long counters
;
3038 struct kmem_cache_node
*n
= NULL
;
3039 unsigned long flags
;
3041 stat(s
, FREE_SLOWPATH
);
3043 if (kfence_free(head
))
3046 if (kmem_cache_debug(s
) &&
3047 !free_debug_processing(s
, page
, head
, tail
, cnt
, addr
))
3052 spin_unlock_irqrestore(&n
->list_lock
, flags
);
3055 prior
= page
->freelist
;
3056 counters
= page
->counters
;
3057 set_freepointer(s
, tail
, prior
);
3058 new.counters
= counters
;
3059 was_frozen
= new.frozen
;
3061 if ((!new.inuse
|| !prior
) && !was_frozen
) {
3063 if (kmem_cache_has_cpu_partial(s
) && !prior
) {
3066 * Slab was on no list before and will be
3068 * We can defer the list move and instead
3073 } else { /* Needs to be taken off a list */
3075 n
= get_node(s
, page_to_nid(page
));
3077 * Speculatively acquire the list_lock.
3078 * If the cmpxchg does not succeed then we may
3079 * drop the list_lock without any processing.
3081 * Otherwise the list_lock will synchronize with
3082 * other processors updating the list of slabs.
3084 spin_lock_irqsave(&n
->list_lock
, flags
);
3089 } while (!cmpxchg_double_slab(s
, page
,
3096 if (likely(was_frozen
)) {
3098 * The list lock was not taken therefore no list
3099 * activity can be necessary.
3101 stat(s
, FREE_FROZEN
);
3102 } else if (new.frozen
) {
3104 * If we just froze the page then put it onto the
3105 * per cpu partial list.
3107 put_cpu_partial(s
, page
, 1);
3108 stat(s
, CPU_PARTIAL_FREE
);
3114 if (unlikely(!new.inuse
&& n
->nr_partial
>= s
->min_partial
))
3118 * Objects left in the slab. If it was not on the partial list before
3121 if (!kmem_cache_has_cpu_partial(s
) && unlikely(!prior
)) {
3122 remove_full(s
, n
, page
);
3123 add_partial(n
, page
, DEACTIVATE_TO_TAIL
);
3124 stat(s
, FREE_ADD_PARTIAL
);
3126 spin_unlock_irqrestore(&n
->list_lock
, flags
);
3132 * Slab on the partial list.
3134 remove_partial(n
, page
);
3135 stat(s
, FREE_REMOVE_PARTIAL
);
3137 /* Slab must be on the full list */
3138 remove_full(s
, n
, page
);
3141 spin_unlock_irqrestore(&n
->list_lock
, flags
);
3143 discard_slab(s
, page
);
3147 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3148 * can perform fastpath freeing without additional function calls.
3150 * The fastpath is only possible if we are freeing to the current cpu slab
3151 * of this processor. This typically the case if we have just allocated
3154 * If fastpath is not possible then fall back to __slab_free where we deal
3155 * with all sorts of special processing.
3157 * Bulk free of a freelist with several objects (all pointing to the
3158 * same page) possible by specifying head and tail ptr, plus objects
3159 * count (cnt). Bulk free indicated by tail pointer being set.
3161 static __always_inline
void do_slab_free(struct kmem_cache
*s
,
3162 struct page
*page
, void *head
, void *tail
,
3163 int cnt
, unsigned long addr
)
3165 void *tail_obj
= tail
? : head
;
3166 struct kmem_cache_cpu
*c
;
3169 memcg_slab_free_hook(s
, &head
, 1);
3172 * Determine the currently cpus per cpu slab.
3173 * The cpu may change afterward. However that does not matter since
3174 * data is retrieved via this pointer. If we are on the same cpu
3175 * during the cmpxchg then the free will succeed.
3178 tid
= this_cpu_read(s
->cpu_slab
->tid
);
3179 c
= raw_cpu_ptr(s
->cpu_slab
);
3180 } while (IS_ENABLED(CONFIG_PREEMPTION
) &&
3181 unlikely(tid
!= READ_ONCE(c
->tid
)));
3183 /* Same with comment on barrier() in slab_alloc_node() */
3186 if (likely(page
== c
->page
)) {
3187 void **freelist
= READ_ONCE(c
->freelist
);
3189 set_freepointer(s
, tail_obj
, freelist
);
3191 if (unlikely(!this_cpu_cmpxchg_double(
3192 s
->cpu_slab
->freelist
, s
->cpu_slab
->tid
,
3194 head
, next_tid(tid
)))) {
3196 note_cmpxchg_failure("slab_free", s
, tid
);
3199 stat(s
, FREE_FASTPATH
);
3201 __slab_free(s
, page
, head
, tail_obj
, cnt
, addr
);
3205 static __always_inline
void slab_free(struct kmem_cache
*s
, struct page
*page
,
3206 void *head
, void *tail
, int cnt
,
3210 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3211 * to remove objects, whose reuse must be delayed.
3213 if (slab_free_freelist_hook(s
, &head
, &tail
))
3214 do_slab_free(s
, page
, head
, tail
, cnt
, addr
);
3217 #ifdef CONFIG_KASAN_GENERIC
3218 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
)
3220 do_slab_free(cache
, virt_to_head_page(x
), x
, NULL
, 1, addr
);
3224 void kmem_cache_free(struct kmem_cache
*s
, void *x
)
3226 s
= cache_from_obj(s
, x
);
3229 slab_free(s
, virt_to_head_page(x
), x
, NULL
, 1, _RET_IP_
);
3230 trace_kmem_cache_free(_RET_IP_
, x
, s
->name
);
3232 EXPORT_SYMBOL(kmem_cache_free
);
3234 struct detached_freelist
{
3239 struct kmem_cache
*s
;
3242 static inline void free_nonslab_page(struct page
*page
, void *object
)
3244 unsigned int order
= compound_order(page
);
3246 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
3248 mod_lruvec_page_state(page
, NR_SLAB_UNRECLAIMABLE_B
, -(PAGE_SIZE
<< order
));
3249 __free_pages(page
, order
);
3253 * This function progressively scans the array with free objects (with
3254 * a limited look ahead) and extract objects belonging to the same
3255 * page. It builds a detached freelist directly within the given
3256 * page/objects. This can happen without any need for
3257 * synchronization, because the objects are owned by running process.
3258 * The freelist is build up as a single linked list in the objects.
3259 * The idea is, that this detached freelist can then be bulk
3260 * transferred to the real freelist(s), but only requiring a single
3261 * synchronization primitive. Look ahead in the array is limited due
3262 * to performance reasons.
3265 int build_detached_freelist(struct kmem_cache
*s
, size_t size
,
3266 void **p
, struct detached_freelist
*df
)
3268 size_t first_skipped_index
= 0;
3273 /* Always re-init detached_freelist */
3278 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3279 } while (!object
&& size
);
3284 page
= virt_to_head_page(object
);
3286 /* Handle kalloc'ed objects */
3287 if (unlikely(!PageSlab(page
))) {
3288 free_nonslab_page(page
, object
);
3289 p
[size
] = NULL
; /* mark object processed */
3292 /* Derive kmem_cache from object */
3293 df
->s
= page
->slab_cache
;
3295 df
->s
= cache_from_obj(s
, object
); /* Support for memcg */
3298 if (is_kfence_address(object
)) {
3299 slab_free_hook(df
->s
, object
, false);
3300 __kfence_free(object
);
3301 p
[size
] = NULL
; /* mark object processed */
3305 /* Start new detached freelist */
3307 set_freepointer(df
->s
, object
, NULL
);
3309 df
->freelist
= object
;
3310 p
[size
] = NULL
; /* mark object processed */
3316 continue; /* Skip processed objects */
3318 /* df->page is always set at this point */
3319 if (df
->page
== virt_to_head_page(object
)) {
3320 /* Opportunity build freelist */
3321 set_freepointer(df
->s
, object
, df
->freelist
);
3322 df
->freelist
= object
;
3324 p
[size
] = NULL
; /* mark object processed */
3329 /* Limit look ahead search */
3333 if (!first_skipped_index
)
3334 first_skipped_index
= size
+ 1;
3337 return first_skipped_index
;
3340 /* Note that interrupts must be enabled when calling this function. */
3341 void kmem_cache_free_bulk(struct kmem_cache
*s
, size_t size
, void **p
)
3346 memcg_slab_free_hook(s
, p
, size
);
3348 struct detached_freelist df
;
3350 size
= build_detached_freelist(s
, size
, p
, &df
);
3354 slab_free(df
.s
, df
.page
, df
.freelist
, df
.tail
, df
.cnt
, _RET_IP_
);
3355 } while (likely(size
));
3357 EXPORT_SYMBOL(kmem_cache_free_bulk
);
3359 /* Note that interrupts must be enabled when calling this function. */
3360 int kmem_cache_alloc_bulk(struct kmem_cache
*s
, gfp_t flags
, size_t size
,
3363 struct kmem_cache_cpu
*c
;
3365 struct obj_cgroup
*objcg
= NULL
;
3367 /* memcg and kmem_cache debug support */
3368 s
= slab_pre_alloc_hook(s
, &objcg
, size
, flags
);
3372 * Drain objects in the per cpu slab, while disabling local
3373 * IRQs, which protects against PREEMPT and interrupts
3374 * handlers invoking normal fastpath.
3376 local_irq_disable();
3377 c
= this_cpu_ptr(s
->cpu_slab
);
3379 for (i
= 0; i
< size
; i
++) {
3380 void *object
= kfence_alloc(s
, s
->object_size
, flags
);
3382 if (unlikely(object
)) {
3387 object
= c
->freelist
;
3388 if (unlikely(!object
)) {
3390 * We may have removed an object from c->freelist using
3391 * the fastpath in the previous iteration; in that case,
3392 * c->tid has not been bumped yet.
3393 * Since ___slab_alloc() may reenable interrupts while
3394 * allocating memory, we should bump c->tid now.
3396 c
->tid
= next_tid(c
->tid
);
3399 * Invoking slow path likely have side-effect
3400 * of re-populating per CPU c->freelist
3402 p
[i
] = ___slab_alloc(s
, flags
, NUMA_NO_NODE
,
3404 if (unlikely(!p
[i
]))
3407 c
= this_cpu_ptr(s
->cpu_slab
);
3408 maybe_wipe_obj_freeptr(s
, p
[i
]);
3410 continue; /* goto for-loop */
3412 c
->freelist
= get_freepointer(s
, object
);
3414 maybe_wipe_obj_freeptr(s
, p
[i
]);
3416 c
->tid
= next_tid(c
->tid
);
3420 * memcg and kmem_cache debug support and memory initialization.
3421 * Done outside of the IRQ disabled fastpath loop.
3423 slab_post_alloc_hook(s
, objcg
, flags
, size
, p
,
3424 slab_want_init_on_alloc(flags
, s
));
3428 slab_post_alloc_hook(s
, objcg
, flags
, i
, p
, false);
3429 __kmem_cache_free_bulk(s
, i
, p
);
3432 EXPORT_SYMBOL(kmem_cache_alloc_bulk
);
3436 * Object placement in a slab is made very easy because we always start at
3437 * offset 0. If we tune the size of the object to the alignment then we can
3438 * get the required alignment by putting one properly sized object after
3441 * Notice that the allocation order determines the sizes of the per cpu
3442 * caches. Each processor has always one slab available for allocations.
3443 * Increasing the allocation order reduces the number of times that slabs
3444 * must be moved on and off the partial lists and is therefore a factor in
3449 * Minimum / Maximum order of slab pages. This influences locking overhead
3450 * and slab fragmentation. A higher order reduces the number of partial slabs
3451 * and increases the number of allocations possible without having to
3452 * take the list_lock.
3454 static unsigned int slub_min_order
;
3455 static unsigned int slub_max_order
= PAGE_ALLOC_COSTLY_ORDER
;
3456 static unsigned int slub_min_objects
;
3459 * Calculate the order of allocation given an slab object size.
3461 * The order of allocation has significant impact on performance and other
3462 * system components. Generally order 0 allocations should be preferred since
3463 * order 0 does not cause fragmentation in the page allocator. Larger objects
3464 * be problematic to put into order 0 slabs because there may be too much
3465 * unused space left. We go to a higher order if more than 1/16th of the slab
3468 * In order to reach satisfactory performance we must ensure that a minimum
3469 * number of objects is in one slab. Otherwise we may generate too much
3470 * activity on the partial lists which requires taking the list_lock. This is
3471 * less a concern for large slabs though which are rarely used.
3473 * slub_max_order specifies the order where we begin to stop considering the
3474 * number of objects in a slab as critical. If we reach slub_max_order then
3475 * we try to keep the page order as low as possible. So we accept more waste
3476 * of space in favor of a small page order.
3478 * Higher order allocations also allow the placement of more objects in a
3479 * slab and thereby reduce object handling overhead. If the user has
3480 * requested a higher minimum order then we start with that one instead of
3481 * the smallest order which will fit the object.
3483 static inline unsigned int slab_order(unsigned int size
,
3484 unsigned int min_objects
, unsigned int max_order
,
3485 unsigned int fract_leftover
)
3487 unsigned int min_order
= slub_min_order
;
3490 if (order_objects(min_order
, size
) > MAX_OBJS_PER_PAGE
)
3491 return get_order(size
* MAX_OBJS_PER_PAGE
) - 1;
3493 for (order
= max(min_order
, (unsigned int)get_order(min_objects
* size
));
3494 order
<= max_order
; order
++) {
3496 unsigned int slab_size
= (unsigned int)PAGE_SIZE
<< order
;
3499 rem
= slab_size
% size
;
3501 if (rem
<= slab_size
/ fract_leftover
)
3508 static inline int calculate_order(unsigned int size
)
3511 unsigned int min_objects
;
3512 unsigned int max_objects
;
3513 unsigned int nr_cpus
;
3516 * Attempt to find best configuration for a slab. This
3517 * works by first attempting to generate a layout with
3518 * the best configuration and backing off gradually.
3520 * First we increase the acceptable waste in a slab. Then
3521 * we reduce the minimum objects required in a slab.
3523 min_objects
= slub_min_objects
;
3526 * Some architectures will only update present cpus when
3527 * onlining them, so don't trust the number if it's just 1. But
3528 * we also don't want to use nr_cpu_ids always, as on some other
3529 * architectures, there can be many possible cpus, but never
3530 * onlined. Here we compromise between trying to avoid too high
3531 * order on systems that appear larger than they are, and too
3532 * low order on systems that appear smaller than they are.
3534 nr_cpus
= num_present_cpus();
3536 nr_cpus
= nr_cpu_ids
;
3537 min_objects
= 4 * (fls(nr_cpus
) + 1);
3539 max_objects
= order_objects(slub_max_order
, size
);
3540 min_objects
= min(min_objects
, max_objects
);
3542 while (min_objects
> 1) {
3543 unsigned int fraction
;
3546 while (fraction
>= 4) {
3547 order
= slab_order(size
, min_objects
,
3548 slub_max_order
, fraction
);
3549 if (order
<= slub_max_order
)
3557 * We were unable to place multiple objects in a slab. Now
3558 * lets see if we can place a single object there.
3560 order
= slab_order(size
, 1, slub_max_order
, 1);
3561 if (order
<= slub_max_order
)
3565 * Doh this slab cannot be placed using slub_max_order.
3567 order
= slab_order(size
, 1, MAX_ORDER
, 1);
3568 if (order
< MAX_ORDER
)
3574 init_kmem_cache_node(struct kmem_cache_node
*n
)
3577 spin_lock_init(&n
->list_lock
);
3578 INIT_LIST_HEAD(&n
->partial
);
3579 #ifdef CONFIG_SLUB_DEBUG
3580 atomic_long_set(&n
->nr_slabs
, 0);
3581 atomic_long_set(&n
->total_objects
, 0);
3582 INIT_LIST_HEAD(&n
->full
);
3586 static inline int alloc_kmem_cache_cpus(struct kmem_cache
*s
)
3588 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE
<
3589 KMALLOC_SHIFT_HIGH
* sizeof(struct kmem_cache_cpu
));
3592 * Must align to double word boundary for the double cmpxchg
3593 * instructions to work; see __pcpu_double_call_return_bool().
3595 s
->cpu_slab
= __alloc_percpu(sizeof(struct kmem_cache_cpu
),
3596 2 * sizeof(void *));
3601 init_kmem_cache_cpus(s
);
3606 static struct kmem_cache
*kmem_cache_node
;
3609 * No kmalloc_node yet so do it by hand. We know that this is the first
3610 * slab on the node for this slabcache. There are no concurrent accesses
3613 * Note that this function only works on the kmem_cache_node
3614 * when allocating for the kmem_cache_node. This is used for bootstrapping
3615 * memory on a fresh node that has no slab structures yet.
3617 static void early_kmem_cache_node_alloc(int node
)
3620 struct kmem_cache_node
*n
;
3622 BUG_ON(kmem_cache_node
->size
< sizeof(struct kmem_cache_node
));
3624 page
= new_slab(kmem_cache_node
, GFP_NOWAIT
, node
);
3627 if (page_to_nid(page
) != node
) {
3628 pr_err("SLUB: Unable to allocate memory from node %d\n", node
);
3629 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3634 #ifdef CONFIG_SLUB_DEBUG
3635 init_object(kmem_cache_node
, n
, SLUB_RED_ACTIVE
);
3636 init_tracking(kmem_cache_node
, n
);
3638 n
= kasan_slab_alloc(kmem_cache_node
, n
, GFP_KERNEL
, false);
3639 page
->freelist
= get_freepointer(kmem_cache_node
, n
);
3642 kmem_cache_node
->node
[node
] = n
;
3643 init_kmem_cache_node(n
);
3644 inc_slabs_node(kmem_cache_node
, node
, page
->objects
);
3647 * No locks need to be taken here as it has just been
3648 * initialized and there is no concurrent access.
3650 __add_partial(n
, page
, DEACTIVATE_TO_HEAD
);
3653 static void free_kmem_cache_nodes(struct kmem_cache
*s
)
3656 struct kmem_cache_node
*n
;
3658 for_each_kmem_cache_node(s
, node
, n
) {
3659 s
->node
[node
] = NULL
;
3660 kmem_cache_free(kmem_cache_node
, n
);
3664 void __kmem_cache_release(struct kmem_cache
*s
)
3666 cache_random_seq_destroy(s
);
3667 free_percpu(s
->cpu_slab
);
3668 free_kmem_cache_nodes(s
);
3671 static int init_kmem_cache_nodes(struct kmem_cache
*s
)
3675 for_each_node_mask(node
, slab_nodes
) {
3676 struct kmem_cache_node
*n
;
3678 if (slab_state
== DOWN
) {
3679 early_kmem_cache_node_alloc(node
);
3682 n
= kmem_cache_alloc_node(kmem_cache_node
,
3686 free_kmem_cache_nodes(s
);
3690 init_kmem_cache_node(n
);
3696 static void set_min_partial(struct kmem_cache
*s
, unsigned long min
)
3698 if (min
< MIN_PARTIAL
)
3700 else if (min
> MAX_PARTIAL
)
3702 s
->min_partial
= min
;
3705 static void set_cpu_partial(struct kmem_cache
*s
)
3707 #ifdef CONFIG_SLUB_CPU_PARTIAL
3709 * cpu_partial determined the maximum number of objects kept in the
3710 * per cpu partial lists of a processor.
3712 * Per cpu partial lists mainly contain slabs that just have one
3713 * object freed. If they are used for allocation then they can be
3714 * filled up again with minimal effort. The slab will never hit the
3715 * per node partial lists and therefore no locking will be required.
3717 * This setting also determines
3719 * A) The number of objects from per cpu partial slabs dumped to the
3720 * per node list when we reach the limit.
3721 * B) The number of objects in cpu partial slabs to extract from the
3722 * per node list when we run out of per cpu objects. We only fetch
3723 * 50% to keep some capacity around for frees.
3725 if (!kmem_cache_has_cpu_partial(s
))
3726 slub_set_cpu_partial(s
, 0);
3727 else if (s
->size
>= PAGE_SIZE
)
3728 slub_set_cpu_partial(s
, 2);
3729 else if (s
->size
>= 1024)
3730 slub_set_cpu_partial(s
, 6);
3731 else if (s
->size
>= 256)
3732 slub_set_cpu_partial(s
, 13);
3734 slub_set_cpu_partial(s
, 30);
3739 * calculate_sizes() determines the order and the distribution of data within
3742 static int calculate_sizes(struct kmem_cache
*s
, int forced_order
)
3744 slab_flags_t flags
= s
->flags
;
3745 unsigned int size
= s
->object_size
;
3749 * Round up object size to the next word boundary. We can only
3750 * place the free pointer at word boundaries and this determines
3751 * the possible location of the free pointer.
3753 size
= ALIGN(size
, sizeof(void *));
3755 #ifdef CONFIG_SLUB_DEBUG
3757 * Determine if we can poison the object itself. If the user of
3758 * the slab may touch the object after free or before allocation
3759 * then we should never poison the object itself.
3761 if ((flags
& SLAB_POISON
) && !(flags
& SLAB_TYPESAFE_BY_RCU
) &&
3763 s
->flags
|= __OBJECT_POISON
;
3765 s
->flags
&= ~__OBJECT_POISON
;
3769 * If we are Redzoning then check if there is some space between the
3770 * end of the object and the free pointer. If not then add an
3771 * additional word to have some bytes to store Redzone information.
3773 if ((flags
& SLAB_RED_ZONE
) && size
== s
->object_size
)
3774 size
+= sizeof(void *);
3778 * With that we have determined the number of bytes in actual use
3779 * by the object and redzoning.
3783 if ((flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
)) ||
3784 ((flags
& SLAB_RED_ZONE
) && s
->object_size
< sizeof(void *)) ||
3787 * Relocate free pointer after the object if it is not
3788 * permitted to overwrite the first word of the object on
3791 * This is the case if we do RCU, have a constructor or
3792 * destructor, are poisoning the objects, or are
3793 * redzoning an object smaller than sizeof(void *).
3795 * The assumption that s->offset >= s->inuse means free
3796 * pointer is outside of the object is used in the
3797 * freeptr_outside_object() function. If that is no
3798 * longer true, the function needs to be modified.
3801 size
+= sizeof(void *);
3804 * Store freelist pointer near middle of object to keep
3805 * it away from the edges of the object to avoid small
3806 * sized over/underflows from neighboring allocations.
3808 s
->offset
= ALIGN_DOWN(s
->object_size
/ 2, sizeof(void *));
3811 #ifdef CONFIG_SLUB_DEBUG
3812 if (flags
& SLAB_STORE_USER
)
3814 * Need to store information about allocs and frees after
3817 size
+= 2 * sizeof(struct track
);
3820 kasan_cache_create(s
, &size
, &s
->flags
);
3821 #ifdef CONFIG_SLUB_DEBUG
3822 if (flags
& SLAB_RED_ZONE
) {
3824 * Add some empty padding so that we can catch
3825 * overwrites from earlier objects rather than let
3826 * tracking information or the free pointer be
3827 * corrupted if a user writes before the start
3830 size
+= sizeof(void *);
3832 s
->red_left_pad
= sizeof(void *);
3833 s
->red_left_pad
= ALIGN(s
->red_left_pad
, s
->align
);
3834 size
+= s
->red_left_pad
;
3839 * SLUB stores one object immediately after another beginning from
3840 * offset 0. In order to align the objects we have to simply size
3841 * each object to conform to the alignment.
3843 size
= ALIGN(size
, s
->align
);
3845 s
->reciprocal_size
= reciprocal_value(size
);
3846 if (forced_order
>= 0)
3847 order
= forced_order
;
3849 order
= calculate_order(size
);
3856 s
->allocflags
|= __GFP_COMP
;
3858 if (s
->flags
& SLAB_CACHE_DMA
)
3859 s
->allocflags
|= GFP_DMA
;
3861 if (s
->flags
& SLAB_CACHE_DMA32
)
3862 s
->allocflags
|= GFP_DMA32
;
3864 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
3865 s
->allocflags
|= __GFP_RECLAIMABLE
;
3868 * Determine the number of objects per slab
3870 s
->oo
= oo_make(order
, size
);
3871 s
->min
= oo_make(get_order(size
), size
);
3872 if (oo_objects(s
->oo
) > oo_objects(s
->max
))
3875 return !!oo_objects(s
->oo
);
3878 static int kmem_cache_open(struct kmem_cache
*s
, slab_flags_t flags
)
3880 s
->flags
= kmem_cache_flags(s
->size
, flags
, s
->name
);
3881 #ifdef CONFIG_SLAB_FREELIST_HARDENED
3882 s
->random
= get_random_long();
3885 if (!calculate_sizes(s
, -1))
3887 if (disable_higher_order_debug
) {
3889 * Disable debugging flags that store metadata if the min slab
3892 if (get_order(s
->size
) > get_order(s
->object_size
)) {
3893 s
->flags
&= ~DEBUG_METADATA_FLAGS
;
3895 if (!calculate_sizes(s
, -1))
3900 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3901 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3902 if (system_has_cmpxchg_double() && (s
->flags
& SLAB_NO_CMPXCHG
) == 0)
3903 /* Enable fast mode */
3904 s
->flags
|= __CMPXCHG_DOUBLE
;
3908 * The larger the object size is, the more pages we want on the partial
3909 * list to avoid pounding the page allocator excessively.
3911 set_min_partial(s
, ilog2(s
->size
) / 2);
3916 s
->remote_node_defrag_ratio
= 1000;
3919 /* Initialize the pre-computed randomized freelist if slab is up */
3920 if (slab_state
>= UP
) {
3921 if (init_cache_random_seq(s
))
3925 if (!init_kmem_cache_nodes(s
))
3928 if (alloc_kmem_cache_cpus(s
))
3931 free_kmem_cache_nodes(s
);
3936 static void list_slab_objects(struct kmem_cache
*s
, struct page
*page
,
3939 #ifdef CONFIG_SLUB_DEBUG
3940 void *addr
= page_address(page
);
3944 slab_err(s
, page
, text
, s
->name
);
3947 map
= get_map(s
, page
);
3948 for_each_object(p
, s
, addr
, page
->objects
) {
3950 if (!test_bit(__obj_to_index(s
, addr
, p
), map
)) {
3951 pr_err("Object 0x%p @offset=%tu\n", p
, p
- addr
);
3952 print_tracking(s
, p
);
3961 * Attempt to free all partial slabs on a node.
3962 * This is called from __kmem_cache_shutdown(). We must take list_lock
3963 * because sysfs file might still access partial list after the shutdowning.
3965 static void free_partial(struct kmem_cache
*s
, struct kmem_cache_node
*n
)
3968 struct page
*page
, *h
;
3970 BUG_ON(irqs_disabled());
3971 spin_lock_irq(&n
->list_lock
);
3972 list_for_each_entry_safe(page
, h
, &n
->partial
, slab_list
) {
3974 remove_partial(n
, page
);
3975 list_add(&page
->slab_list
, &discard
);
3977 list_slab_objects(s
, page
,
3978 "Objects remaining in %s on __kmem_cache_shutdown()");
3981 spin_unlock_irq(&n
->list_lock
);
3983 list_for_each_entry_safe(page
, h
, &discard
, slab_list
)
3984 discard_slab(s
, page
);
3987 bool __kmem_cache_empty(struct kmem_cache
*s
)
3990 struct kmem_cache_node
*n
;
3992 for_each_kmem_cache_node(s
, node
, n
)
3993 if (n
->nr_partial
|| slabs_node(s
, node
))
3999 * Release all resources used by a slab cache.
4001 int __kmem_cache_shutdown(struct kmem_cache
*s
)
4004 struct kmem_cache_node
*n
;
4007 /* Attempt to free all objects */
4008 for_each_kmem_cache_node(s
, node
, n
) {
4010 if (n
->nr_partial
|| slabs_node(s
, node
))
4016 #ifdef CONFIG_PRINTK
4017 void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct page
*page
)
4020 int __maybe_unused i
;
4024 struct kmem_cache
*s
= page
->slab_cache
;
4025 struct track __maybe_unused
*trackp
;
4027 kpp
->kp_ptr
= object
;
4028 kpp
->kp_page
= page
;
4029 kpp
->kp_slab_cache
= s
;
4030 base
= page_address(page
);
4031 objp0
= kasan_reset_tag(object
);
4032 #ifdef CONFIG_SLUB_DEBUG
4033 objp
= restore_red_left(s
, objp0
);
4037 objnr
= obj_to_index(s
, page
, objp
);
4038 kpp
->kp_data_offset
= (unsigned long)((char *)objp0
- (char *)objp
);
4039 objp
= base
+ s
->size
* objnr
;
4040 kpp
->kp_objp
= objp
;
4041 if (WARN_ON_ONCE(objp
< base
|| objp
>= base
+ page
->objects
* s
->size
|| (objp
- base
) % s
->size
) ||
4042 !(s
->flags
& SLAB_STORE_USER
))
4044 #ifdef CONFIG_SLUB_DEBUG
4045 objp
= fixup_red_left(s
, objp
);
4046 trackp
= get_track(s
, objp
, TRACK_ALLOC
);
4047 kpp
->kp_ret
= (void *)trackp
->addr
;
4048 #ifdef CONFIG_STACKTRACE
4049 for (i
= 0; i
< KS_ADDRS_COUNT
&& i
< TRACK_ADDRS_COUNT
; i
++) {
4050 kpp
->kp_stack
[i
] = (void *)trackp
->addrs
[i
];
4051 if (!kpp
->kp_stack
[i
])
4055 trackp
= get_track(s
, objp
, TRACK_FREE
);
4056 for (i
= 0; i
< KS_ADDRS_COUNT
&& i
< TRACK_ADDRS_COUNT
; i
++) {
4057 kpp
->kp_free_stack
[i
] = (void *)trackp
->addrs
[i
];
4058 if (!kpp
->kp_free_stack
[i
])
4066 /********************************************************************
4068 *******************************************************************/
4070 static int __init
setup_slub_min_order(char *str
)
4072 get_option(&str
, (int *)&slub_min_order
);
4077 __setup("slub_min_order=", setup_slub_min_order
);
4079 static int __init
setup_slub_max_order(char *str
)
4081 get_option(&str
, (int *)&slub_max_order
);
4082 slub_max_order
= min(slub_max_order
, (unsigned int)MAX_ORDER
- 1);
4087 __setup("slub_max_order=", setup_slub_max_order
);
4089 static int __init
setup_slub_min_objects(char *str
)
4091 get_option(&str
, (int *)&slub_min_objects
);
4096 __setup("slub_min_objects=", setup_slub_min_objects
);
4098 void *__kmalloc(size_t size
, gfp_t flags
)
4100 struct kmem_cache
*s
;
4103 if (unlikely(size
> KMALLOC_MAX_CACHE_SIZE
))
4104 return kmalloc_large(size
, flags
);
4106 s
= kmalloc_slab(size
, flags
);
4108 if (unlikely(ZERO_OR_NULL_PTR(s
)))
4111 ret
= slab_alloc(s
, flags
, _RET_IP_
, size
);
4113 trace_kmalloc(_RET_IP_
, ret
, size
, s
->size
, flags
);
4115 ret
= kasan_kmalloc(s
, ret
, size
, flags
);
4119 EXPORT_SYMBOL(__kmalloc
);
4122 static void *kmalloc_large_node(size_t size
, gfp_t flags
, int node
)
4126 unsigned int order
= get_order(size
);
4128 flags
|= __GFP_COMP
;
4129 page
= alloc_pages_node(node
, flags
, order
);
4131 ptr
= page_address(page
);
4132 mod_lruvec_page_state(page
, NR_SLAB_UNRECLAIMABLE_B
,
4133 PAGE_SIZE
<< order
);
4136 return kmalloc_large_node_hook(ptr
, size
, flags
);
4139 void *__kmalloc_node(size_t size
, gfp_t flags
, int node
)
4141 struct kmem_cache
*s
;
4144 if (unlikely(size
> KMALLOC_MAX_CACHE_SIZE
)) {
4145 ret
= kmalloc_large_node(size
, flags
, node
);
4147 trace_kmalloc_node(_RET_IP_
, ret
,
4148 size
, PAGE_SIZE
<< get_order(size
),
4154 s
= kmalloc_slab(size
, flags
);
4156 if (unlikely(ZERO_OR_NULL_PTR(s
)))
4159 ret
= slab_alloc_node(s
, flags
, node
, _RET_IP_
, size
);
4161 trace_kmalloc_node(_RET_IP_
, ret
, size
, s
->size
, flags
, node
);
4163 ret
= kasan_kmalloc(s
, ret
, size
, flags
);
4167 EXPORT_SYMBOL(__kmalloc_node
);
4168 #endif /* CONFIG_NUMA */
4170 #ifdef CONFIG_HARDENED_USERCOPY
4172 * Rejects incorrectly sized objects and objects that are to be copied
4173 * to/from userspace but do not fall entirely within the containing slab
4174 * cache's usercopy region.
4176 * Returns NULL if check passes, otherwise const char * to name of cache
4177 * to indicate an error.
4179 void __check_heap_object(const void *ptr
, unsigned long n
, struct page
*page
,
4182 struct kmem_cache
*s
;
4183 unsigned int offset
;
4185 bool is_kfence
= is_kfence_address(ptr
);
4187 ptr
= kasan_reset_tag(ptr
);
4189 /* Find object and usable object size. */
4190 s
= page
->slab_cache
;
4192 /* Reject impossible pointers. */
4193 if (ptr
< page_address(page
))
4194 usercopy_abort("SLUB object not in SLUB page?!", NULL
,
4197 /* Find offset within object. */
4199 offset
= ptr
- kfence_object_start(ptr
);
4201 offset
= (ptr
- page_address(page
)) % s
->size
;
4203 /* Adjust for redzone and reject if within the redzone. */
4204 if (!is_kfence
&& kmem_cache_debug_flags(s
, SLAB_RED_ZONE
)) {
4205 if (offset
< s
->red_left_pad
)
4206 usercopy_abort("SLUB object in left red zone",
4207 s
->name
, to_user
, offset
, n
);
4208 offset
-= s
->red_left_pad
;
4211 /* Allow address range falling entirely within usercopy region. */
4212 if (offset
>= s
->useroffset
&&
4213 offset
- s
->useroffset
<= s
->usersize
&&
4214 n
<= s
->useroffset
- offset
+ s
->usersize
)
4218 * If the copy is still within the allocated object, produce
4219 * a warning instead of rejecting the copy. This is intended
4220 * to be a temporary method to find any missing usercopy
4223 object_size
= slab_ksize(s
);
4224 if (usercopy_fallback
&&
4225 offset
<= object_size
&& n
<= object_size
- offset
) {
4226 usercopy_warn("SLUB object", s
->name
, to_user
, offset
, n
);
4230 usercopy_abort("SLUB object", s
->name
, to_user
, offset
, n
);
4232 #endif /* CONFIG_HARDENED_USERCOPY */
4234 size_t __ksize(const void *object
)
4238 if (unlikely(object
== ZERO_SIZE_PTR
))
4241 page
= virt_to_head_page(object
);
4243 if (unlikely(!PageSlab(page
))) {
4244 WARN_ON(!PageCompound(page
));
4245 return page_size(page
);
4248 return slab_ksize(page
->slab_cache
);
4250 EXPORT_SYMBOL(__ksize
);
4252 void kfree(const void *x
)
4255 void *object
= (void *)x
;
4257 trace_kfree(_RET_IP_
, x
);
4259 if (unlikely(ZERO_OR_NULL_PTR(x
)))
4262 page
= virt_to_head_page(x
);
4263 if (unlikely(!PageSlab(page
))) {
4264 free_nonslab_page(page
, object
);
4267 slab_free(page
->slab_cache
, page
, object
, NULL
, 1, _RET_IP_
);
4269 EXPORT_SYMBOL(kfree
);
4271 #define SHRINK_PROMOTE_MAX 32
4274 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4275 * up most to the head of the partial lists. New allocations will then
4276 * fill those up and thus they can be removed from the partial lists.
4278 * The slabs with the least items are placed last. This results in them
4279 * being allocated from last increasing the chance that the last objects
4280 * are freed in them.
4282 int __kmem_cache_shrink(struct kmem_cache
*s
)
4286 struct kmem_cache_node
*n
;
4289 struct list_head discard
;
4290 struct list_head promote
[SHRINK_PROMOTE_MAX
];
4291 unsigned long flags
;
4295 for_each_kmem_cache_node(s
, node
, n
) {
4296 INIT_LIST_HEAD(&discard
);
4297 for (i
= 0; i
< SHRINK_PROMOTE_MAX
; i
++)
4298 INIT_LIST_HEAD(promote
+ i
);
4300 spin_lock_irqsave(&n
->list_lock
, flags
);
4303 * Build lists of slabs to discard or promote.
4305 * Note that concurrent frees may occur while we hold the
4306 * list_lock. page->inuse here is the upper limit.
4308 list_for_each_entry_safe(page
, t
, &n
->partial
, slab_list
) {
4309 int free
= page
->objects
- page
->inuse
;
4311 /* Do not reread page->inuse */
4314 /* We do not keep full slabs on the list */
4317 if (free
== page
->objects
) {
4318 list_move(&page
->slab_list
, &discard
);
4320 } else if (free
<= SHRINK_PROMOTE_MAX
)
4321 list_move(&page
->slab_list
, promote
+ free
- 1);
4325 * Promote the slabs filled up most to the head of the
4328 for (i
= SHRINK_PROMOTE_MAX
- 1; i
>= 0; i
--)
4329 list_splice(promote
+ i
, &n
->partial
);
4331 spin_unlock_irqrestore(&n
->list_lock
, flags
);
4333 /* Release empty slabs */
4334 list_for_each_entry_safe(page
, t
, &discard
, slab_list
)
4335 discard_slab(s
, page
);
4337 if (slabs_node(s
, node
))
4344 static int slab_mem_going_offline_callback(void *arg
)
4346 struct kmem_cache
*s
;
4348 mutex_lock(&slab_mutex
);
4349 list_for_each_entry(s
, &slab_caches
, list
)
4350 __kmem_cache_shrink(s
);
4351 mutex_unlock(&slab_mutex
);
4356 static void slab_mem_offline_callback(void *arg
)
4358 struct memory_notify
*marg
= arg
;
4361 offline_node
= marg
->status_change_nid_normal
;
4364 * If the node still has available memory. we need kmem_cache_node
4367 if (offline_node
< 0)
4370 mutex_lock(&slab_mutex
);
4371 node_clear(offline_node
, slab_nodes
);
4373 * We no longer free kmem_cache_node structures here, as it would be
4374 * racy with all get_node() users, and infeasible to protect them with
4377 mutex_unlock(&slab_mutex
);
4380 static int slab_mem_going_online_callback(void *arg
)
4382 struct kmem_cache_node
*n
;
4383 struct kmem_cache
*s
;
4384 struct memory_notify
*marg
= arg
;
4385 int nid
= marg
->status_change_nid_normal
;
4389 * If the node's memory is already available, then kmem_cache_node is
4390 * already created. Nothing to do.
4396 * We are bringing a node online. No memory is available yet. We must
4397 * allocate a kmem_cache_node structure in order to bring the node
4400 mutex_lock(&slab_mutex
);
4401 list_for_each_entry(s
, &slab_caches
, list
) {
4403 * The structure may already exist if the node was previously
4404 * onlined and offlined.
4406 if (get_node(s
, nid
))
4409 * XXX: kmem_cache_alloc_node will fallback to other nodes
4410 * since memory is not yet available from the node that
4413 n
= kmem_cache_alloc(kmem_cache_node
, GFP_KERNEL
);
4418 init_kmem_cache_node(n
);
4422 * Any cache created after this point will also have kmem_cache_node
4423 * initialized for the new node.
4425 node_set(nid
, slab_nodes
);
4427 mutex_unlock(&slab_mutex
);
4431 static int slab_memory_callback(struct notifier_block
*self
,
4432 unsigned long action
, void *arg
)
4437 case MEM_GOING_ONLINE
:
4438 ret
= slab_mem_going_online_callback(arg
);
4440 case MEM_GOING_OFFLINE
:
4441 ret
= slab_mem_going_offline_callback(arg
);
4444 case MEM_CANCEL_ONLINE
:
4445 slab_mem_offline_callback(arg
);
4448 case MEM_CANCEL_OFFLINE
:
4452 ret
= notifier_from_errno(ret
);
4458 static struct notifier_block slab_memory_callback_nb
= {
4459 .notifier_call
= slab_memory_callback
,
4460 .priority
= SLAB_CALLBACK_PRI
,
4463 /********************************************************************
4464 * Basic setup of slabs
4465 *******************************************************************/
4468 * Used for early kmem_cache structures that were allocated using
4469 * the page allocator. Allocate them properly then fix up the pointers
4470 * that may be pointing to the wrong kmem_cache structure.
4473 static struct kmem_cache
* __init
bootstrap(struct kmem_cache
*static_cache
)
4476 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
4477 struct kmem_cache_node
*n
;
4479 memcpy(s
, static_cache
, kmem_cache
->object_size
);
4482 * This runs very early, and only the boot processor is supposed to be
4483 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4486 __flush_cpu_slab(s
, smp_processor_id());
4487 for_each_kmem_cache_node(s
, node
, n
) {
4490 list_for_each_entry(p
, &n
->partial
, slab_list
)
4493 #ifdef CONFIG_SLUB_DEBUG
4494 list_for_each_entry(p
, &n
->full
, slab_list
)
4498 list_add(&s
->list
, &slab_caches
);
4502 void __init
kmem_cache_init(void)
4504 static __initdata
struct kmem_cache boot_kmem_cache
,
4505 boot_kmem_cache_node
;
4508 if (debug_guardpage_minorder())
4511 /* Print slub debugging pointers without hashing */
4512 if (__slub_debug_enabled())
4513 no_hash_pointers_enable(NULL
);
4515 kmem_cache_node
= &boot_kmem_cache_node
;
4516 kmem_cache
= &boot_kmem_cache
;
4519 * Initialize the nodemask for which we will allocate per node
4520 * structures. Here we don't need taking slab_mutex yet.
4522 for_each_node_state(node
, N_NORMAL_MEMORY
)
4523 node_set(node
, slab_nodes
);
4525 create_boot_cache(kmem_cache_node
, "kmem_cache_node",
4526 sizeof(struct kmem_cache_node
), SLAB_HWCACHE_ALIGN
, 0, 0);
4528 register_hotmemory_notifier(&slab_memory_callback_nb
);
4530 /* Able to allocate the per node structures */
4531 slab_state
= PARTIAL
;
4533 create_boot_cache(kmem_cache
, "kmem_cache",
4534 offsetof(struct kmem_cache
, node
) +
4535 nr_node_ids
* sizeof(struct kmem_cache_node
*),
4536 SLAB_HWCACHE_ALIGN
, 0, 0);
4538 kmem_cache
= bootstrap(&boot_kmem_cache
);
4539 kmem_cache_node
= bootstrap(&boot_kmem_cache_node
);
4541 /* Now we can use the kmem_cache to allocate kmalloc slabs */
4542 setup_kmalloc_cache_index_table();
4543 create_kmalloc_caches(0);
4545 /* Setup random freelists for each cache */
4546 init_freelist_randomization();
4548 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD
, "slub:dead", NULL
,
4551 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4553 slub_min_order
, slub_max_order
, slub_min_objects
,
4554 nr_cpu_ids
, nr_node_ids
);
4557 void __init
kmem_cache_init_late(void)
4562 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
4563 slab_flags_t flags
, void (*ctor
)(void *))
4565 struct kmem_cache
*s
;
4567 s
= find_mergeable(size
, align
, flags
, name
, ctor
);
4572 * Adjust the object sizes so that we clear
4573 * the complete object on kzalloc.
4575 s
->object_size
= max(s
->object_size
, size
);
4576 s
->inuse
= max(s
->inuse
, ALIGN(size
, sizeof(void *)));
4578 if (sysfs_slab_alias(s
, name
)) {
4587 int __kmem_cache_create(struct kmem_cache
*s
, slab_flags_t flags
)
4591 err
= kmem_cache_open(s
, flags
);
4595 /* Mutex is not taken during early boot */
4596 if (slab_state
<= UP
)
4599 err
= sysfs_slab_add(s
);
4601 __kmem_cache_release(s
);
4603 if (s
->flags
& SLAB_STORE_USER
)
4604 debugfs_slab_add(s
);
4609 void *__kmalloc_track_caller(size_t size
, gfp_t gfpflags
, unsigned long caller
)
4611 struct kmem_cache
*s
;
4614 if (unlikely(size
> KMALLOC_MAX_CACHE_SIZE
))
4615 return kmalloc_large(size
, gfpflags
);
4617 s
= kmalloc_slab(size
, gfpflags
);
4619 if (unlikely(ZERO_OR_NULL_PTR(s
)))
4622 ret
= slab_alloc(s
, gfpflags
, caller
, size
);
4624 /* Honor the call site pointer we received. */
4625 trace_kmalloc(caller
, ret
, size
, s
->size
, gfpflags
);
4629 EXPORT_SYMBOL(__kmalloc_track_caller
);
4632 void *__kmalloc_node_track_caller(size_t size
, gfp_t gfpflags
,
4633 int node
, unsigned long caller
)
4635 struct kmem_cache
*s
;
4638 if (unlikely(size
> KMALLOC_MAX_CACHE_SIZE
)) {
4639 ret
= kmalloc_large_node(size
, gfpflags
, node
);
4641 trace_kmalloc_node(caller
, ret
,
4642 size
, PAGE_SIZE
<< get_order(size
),
4648 s
= kmalloc_slab(size
, gfpflags
);
4650 if (unlikely(ZERO_OR_NULL_PTR(s
)))
4653 ret
= slab_alloc_node(s
, gfpflags
, node
, caller
, size
);
4655 /* Honor the call site pointer we received. */
4656 trace_kmalloc_node(caller
, ret
, size
, s
->size
, gfpflags
, node
);
4660 EXPORT_SYMBOL(__kmalloc_node_track_caller
);
4664 static int count_inuse(struct page
*page
)
4669 static int count_total(struct page
*page
)
4671 return page
->objects
;
4675 #ifdef CONFIG_SLUB_DEBUG
4676 static void validate_slab(struct kmem_cache
*s
, struct page
*page
)
4679 void *addr
= page_address(page
);
4684 if (!check_slab(s
, page
) || !on_freelist(s
, page
, NULL
))
4687 /* Now we know that a valid freelist exists */
4688 map
= get_map(s
, page
);
4689 for_each_object(p
, s
, addr
, page
->objects
) {
4690 u8 val
= test_bit(__obj_to_index(s
, addr
, p
), map
) ?
4691 SLUB_RED_INACTIVE
: SLUB_RED_ACTIVE
;
4693 if (!check_object(s
, page
, p
, val
))
4701 static int validate_slab_node(struct kmem_cache
*s
,
4702 struct kmem_cache_node
*n
)
4704 unsigned long count
= 0;
4706 unsigned long flags
;
4708 spin_lock_irqsave(&n
->list_lock
, flags
);
4710 list_for_each_entry(page
, &n
->partial
, slab_list
) {
4711 validate_slab(s
, page
);
4714 if (count
!= n
->nr_partial
) {
4715 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4716 s
->name
, count
, n
->nr_partial
);
4717 slab_add_kunit_errors();
4720 if (!(s
->flags
& SLAB_STORE_USER
))
4723 list_for_each_entry(page
, &n
->full
, slab_list
) {
4724 validate_slab(s
, page
);
4727 if (count
!= atomic_long_read(&n
->nr_slabs
)) {
4728 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4729 s
->name
, count
, atomic_long_read(&n
->nr_slabs
));
4730 slab_add_kunit_errors();
4734 spin_unlock_irqrestore(&n
->list_lock
, flags
);
4738 long validate_slab_cache(struct kmem_cache
*s
)
4741 unsigned long count
= 0;
4742 struct kmem_cache_node
*n
;
4745 for_each_kmem_cache_node(s
, node
, n
)
4746 count
+= validate_slab_node(s
, n
);
4750 EXPORT_SYMBOL(validate_slab_cache
);
4752 #ifdef CONFIG_DEBUG_FS
4754 * Generate lists of code addresses where slabcache objects are allocated
4759 unsigned long count
;
4766 DECLARE_BITMAP(cpus
, NR_CPUS
);
4772 unsigned long count
;
4773 struct location
*loc
;
4776 static struct dentry
*slab_debugfs_root
;
4778 static void free_loc_track(struct loc_track
*t
)
4781 free_pages((unsigned long)t
->loc
,
4782 get_order(sizeof(struct location
) * t
->max
));
4785 static int alloc_loc_track(struct loc_track
*t
, unsigned long max
, gfp_t flags
)
4790 order
= get_order(sizeof(struct location
) * max
);
4792 l
= (void *)__get_free_pages(flags
, order
);
4797 memcpy(l
, t
->loc
, sizeof(struct location
) * t
->count
);
4805 static int add_location(struct loc_track
*t
, struct kmem_cache
*s
,
4806 const struct track
*track
)
4808 long start
, end
, pos
;
4810 unsigned long caddr
;
4811 unsigned long age
= jiffies
- track
->when
;
4817 pos
= start
+ (end
- start
+ 1) / 2;
4820 * There is nothing at "end". If we end up there
4821 * we need to add something to before end.
4826 caddr
= t
->loc
[pos
].addr
;
4827 if (track
->addr
== caddr
) {
4833 if (age
< l
->min_time
)
4835 if (age
> l
->max_time
)
4838 if (track
->pid
< l
->min_pid
)
4839 l
->min_pid
= track
->pid
;
4840 if (track
->pid
> l
->max_pid
)
4841 l
->max_pid
= track
->pid
;
4843 cpumask_set_cpu(track
->cpu
,
4844 to_cpumask(l
->cpus
));
4846 node_set(page_to_nid(virt_to_page(track
)), l
->nodes
);
4850 if (track
->addr
< caddr
)
4857 * Not found. Insert new tracking element.
4859 if (t
->count
>= t
->max
&& !alloc_loc_track(t
, 2 * t
->max
, GFP_ATOMIC
))
4865 (t
->count
- pos
) * sizeof(struct location
));
4868 l
->addr
= track
->addr
;
4872 l
->min_pid
= track
->pid
;
4873 l
->max_pid
= track
->pid
;
4874 cpumask_clear(to_cpumask(l
->cpus
));
4875 cpumask_set_cpu(track
->cpu
, to_cpumask(l
->cpus
));
4876 nodes_clear(l
->nodes
);
4877 node_set(page_to_nid(virt_to_page(track
)), l
->nodes
);
4881 static void process_slab(struct loc_track
*t
, struct kmem_cache
*s
,
4882 struct page
*page
, enum track_item alloc
)
4884 void *addr
= page_address(page
);
4888 map
= get_map(s
, page
);
4889 for_each_object(p
, s
, addr
, page
->objects
)
4890 if (!test_bit(__obj_to_index(s
, addr
, p
), map
))
4891 add_location(t
, s
, get_track(s
, p
, alloc
));
4894 #endif /* CONFIG_DEBUG_FS */
4895 #endif /* CONFIG_SLUB_DEBUG */
4898 enum slab_stat_type
{
4899 SL_ALL
, /* All slabs */
4900 SL_PARTIAL
, /* Only partially allocated slabs */
4901 SL_CPU
, /* Only slabs used for cpu caches */
4902 SL_OBJECTS
, /* Determine allocated objects not slabs */
4903 SL_TOTAL
/* Determine object capacity not slabs */
4906 #define SO_ALL (1 << SL_ALL)
4907 #define SO_PARTIAL (1 << SL_PARTIAL)
4908 #define SO_CPU (1 << SL_CPU)
4909 #define SO_OBJECTS (1 << SL_OBJECTS)
4910 #define SO_TOTAL (1 << SL_TOTAL)
4912 static ssize_t
show_slab_objects(struct kmem_cache
*s
,
4913 char *buf
, unsigned long flags
)
4915 unsigned long total
= 0;
4918 unsigned long *nodes
;
4921 nodes
= kcalloc(nr_node_ids
, sizeof(unsigned long), GFP_KERNEL
);
4925 if (flags
& SO_CPU
) {
4928 for_each_possible_cpu(cpu
) {
4929 struct kmem_cache_cpu
*c
= per_cpu_ptr(s
->cpu_slab
,
4934 page
= READ_ONCE(c
->page
);
4938 node
= page_to_nid(page
);
4939 if (flags
& SO_TOTAL
)
4941 else if (flags
& SO_OBJECTS
)
4949 page
= slub_percpu_partial_read_once(c
);
4951 node
= page_to_nid(page
);
4952 if (flags
& SO_TOTAL
)
4954 else if (flags
& SO_OBJECTS
)
4965 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4966 * already held which will conflict with an existing lock order:
4968 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4970 * We don't really need mem_hotplug_lock (to hold off
4971 * slab_mem_going_offline_callback) here because slab's memory hot
4972 * unplug code doesn't destroy the kmem_cache->node[] data.
4975 #ifdef CONFIG_SLUB_DEBUG
4976 if (flags
& SO_ALL
) {
4977 struct kmem_cache_node
*n
;
4979 for_each_kmem_cache_node(s
, node
, n
) {
4981 if (flags
& SO_TOTAL
)
4982 x
= atomic_long_read(&n
->total_objects
);
4983 else if (flags
& SO_OBJECTS
)
4984 x
= atomic_long_read(&n
->total_objects
) -
4985 count_partial(n
, count_free
);
4987 x
= atomic_long_read(&n
->nr_slabs
);
4994 if (flags
& SO_PARTIAL
) {
4995 struct kmem_cache_node
*n
;
4997 for_each_kmem_cache_node(s
, node
, n
) {
4998 if (flags
& SO_TOTAL
)
4999 x
= count_partial(n
, count_total
);
5000 else if (flags
& SO_OBJECTS
)
5001 x
= count_partial(n
, count_inuse
);
5009 len
+= sysfs_emit_at(buf
, len
, "%lu", total
);
5011 for (node
= 0; node
< nr_node_ids
; node
++) {
5013 len
+= sysfs_emit_at(buf
, len
, " N%d=%lu",
5017 len
+= sysfs_emit_at(buf
, len
, "\n");
5023 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
5024 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
5026 struct slab_attribute
{
5027 struct attribute attr
;
5028 ssize_t (*show
)(struct kmem_cache
*s
, char *buf
);
5029 ssize_t (*store
)(struct kmem_cache
*s
, const char *x
, size_t count
);
5032 #define SLAB_ATTR_RO(_name) \
5033 static struct slab_attribute _name##_attr = \
5034 __ATTR(_name, 0400, _name##_show, NULL)
5036 #define SLAB_ATTR(_name) \
5037 static struct slab_attribute _name##_attr = \
5038 __ATTR(_name, 0600, _name##_show, _name##_store)
5040 static ssize_t
slab_size_show(struct kmem_cache
*s
, char *buf
)
5042 return sysfs_emit(buf
, "%u\n", s
->size
);
5044 SLAB_ATTR_RO(slab_size
);
5046 static ssize_t
align_show(struct kmem_cache
*s
, char *buf
)
5048 return sysfs_emit(buf
, "%u\n", s
->align
);
5050 SLAB_ATTR_RO(align
);
5052 static ssize_t
object_size_show(struct kmem_cache
*s
, char *buf
)
5054 return sysfs_emit(buf
, "%u\n", s
->object_size
);
5056 SLAB_ATTR_RO(object_size
);
5058 static ssize_t
objs_per_slab_show(struct kmem_cache
*s
, char *buf
)
5060 return sysfs_emit(buf
, "%u\n", oo_objects(s
->oo
));
5062 SLAB_ATTR_RO(objs_per_slab
);
5064 static ssize_t
order_show(struct kmem_cache
*s
, char *buf
)
5066 return sysfs_emit(buf
, "%u\n", oo_order(s
->oo
));
5068 SLAB_ATTR_RO(order
);
5070 static ssize_t
min_partial_show(struct kmem_cache
*s
, char *buf
)
5072 return sysfs_emit(buf
, "%lu\n", s
->min_partial
);
5075 static ssize_t
min_partial_store(struct kmem_cache
*s
, const char *buf
,
5081 err
= kstrtoul(buf
, 10, &min
);
5085 set_min_partial(s
, min
);
5088 SLAB_ATTR(min_partial
);
5090 static ssize_t
cpu_partial_show(struct kmem_cache
*s
, char *buf
)
5092 return sysfs_emit(buf
, "%u\n", slub_cpu_partial(s
));
5095 static ssize_t
cpu_partial_store(struct kmem_cache
*s
, const char *buf
,
5098 unsigned int objects
;
5101 err
= kstrtouint(buf
, 10, &objects
);
5104 if (objects
&& !kmem_cache_has_cpu_partial(s
))
5107 slub_set_cpu_partial(s
, objects
);
5111 SLAB_ATTR(cpu_partial
);
5113 static ssize_t
ctor_show(struct kmem_cache
*s
, char *buf
)
5117 return sysfs_emit(buf
, "%pS\n", s
->ctor
);
5121 static ssize_t
aliases_show(struct kmem_cache
*s
, char *buf
)
5123 return sysfs_emit(buf
, "%d\n", s
->refcount
< 0 ? 0 : s
->refcount
- 1);
5125 SLAB_ATTR_RO(aliases
);
5127 static ssize_t
partial_show(struct kmem_cache
*s
, char *buf
)
5129 return show_slab_objects(s
, buf
, SO_PARTIAL
);
5131 SLAB_ATTR_RO(partial
);
5133 static ssize_t
cpu_slabs_show(struct kmem_cache
*s
, char *buf
)
5135 return show_slab_objects(s
, buf
, SO_CPU
);
5137 SLAB_ATTR_RO(cpu_slabs
);
5139 static ssize_t
objects_show(struct kmem_cache
*s
, char *buf
)
5141 return show_slab_objects(s
, buf
, SO_ALL
|SO_OBJECTS
);
5143 SLAB_ATTR_RO(objects
);
5145 static ssize_t
objects_partial_show(struct kmem_cache
*s
, char *buf
)
5147 return show_slab_objects(s
, buf
, SO_PARTIAL
|SO_OBJECTS
);
5149 SLAB_ATTR_RO(objects_partial
);
5151 static ssize_t
slabs_cpu_partial_show(struct kmem_cache
*s
, char *buf
)
5158 for_each_online_cpu(cpu
) {
5161 page
= slub_percpu_partial(per_cpu_ptr(s
->cpu_slab
, cpu
));
5164 pages
+= page
->pages
;
5165 objects
+= page
->pobjects
;
5169 len
+= sysfs_emit_at(buf
, len
, "%d(%d)", objects
, pages
);
5172 for_each_online_cpu(cpu
) {
5175 page
= slub_percpu_partial(per_cpu_ptr(s
->cpu_slab
, cpu
));
5177 len
+= sysfs_emit_at(buf
, len
, " C%d=%d(%d)",
5178 cpu
, page
->pobjects
, page
->pages
);
5181 len
+= sysfs_emit_at(buf
, len
, "\n");
5185 SLAB_ATTR_RO(slabs_cpu_partial
);
5187 static ssize_t
reclaim_account_show(struct kmem_cache
*s
, char *buf
)
5189 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_RECLAIM_ACCOUNT
));
5191 SLAB_ATTR_RO(reclaim_account
);
5193 static ssize_t
hwcache_align_show(struct kmem_cache
*s
, char *buf
)
5195 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_HWCACHE_ALIGN
));
5197 SLAB_ATTR_RO(hwcache_align
);
5199 #ifdef CONFIG_ZONE_DMA
5200 static ssize_t
cache_dma_show(struct kmem_cache
*s
, char *buf
)
5202 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_CACHE_DMA
));
5204 SLAB_ATTR_RO(cache_dma
);
5207 static ssize_t
usersize_show(struct kmem_cache
*s
, char *buf
)
5209 return sysfs_emit(buf
, "%u\n", s
->usersize
);
5211 SLAB_ATTR_RO(usersize
);
5213 static ssize_t
destroy_by_rcu_show(struct kmem_cache
*s
, char *buf
)
5215 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_TYPESAFE_BY_RCU
));
5217 SLAB_ATTR_RO(destroy_by_rcu
);
5219 #ifdef CONFIG_SLUB_DEBUG
5220 static ssize_t
slabs_show(struct kmem_cache
*s
, char *buf
)
5222 return show_slab_objects(s
, buf
, SO_ALL
);
5224 SLAB_ATTR_RO(slabs
);
5226 static ssize_t
total_objects_show(struct kmem_cache
*s
, char *buf
)
5228 return show_slab_objects(s
, buf
, SO_ALL
|SO_TOTAL
);
5230 SLAB_ATTR_RO(total_objects
);
5232 static ssize_t
sanity_checks_show(struct kmem_cache
*s
, char *buf
)
5234 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_CONSISTENCY_CHECKS
));
5236 SLAB_ATTR_RO(sanity_checks
);
5238 static ssize_t
trace_show(struct kmem_cache
*s
, char *buf
)
5240 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_TRACE
));
5242 SLAB_ATTR_RO(trace
);
5244 static ssize_t
red_zone_show(struct kmem_cache
*s
, char *buf
)
5246 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_RED_ZONE
));
5249 SLAB_ATTR_RO(red_zone
);
5251 static ssize_t
poison_show(struct kmem_cache
*s
, char *buf
)
5253 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_POISON
));
5256 SLAB_ATTR_RO(poison
);
5258 static ssize_t
store_user_show(struct kmem_cache
*s
, char *buf
)
5260 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_STORE_USER
));
5263 SLAB_ATTR_RO(store_user
);
5265 static ssize_t
validate_show(struct kmem_cache
*s
, char *buf
)
5270 static ssize_t
validate_store(struct kmem_cache
*s
,
5271 const char *buf
, size_t length
)
5275 if (buf
[0] == '1') {
5276 ret
= validate_slab_cache(s
);
5282 SLAB_ATTR(validate
);
5284 #endif /* CONFIG_SLUB_DEBUG */
5286 #ifdef CONFIG_FAILSLAB
5287 static ssize_t
failslab_show(struct kmem_cache
*s
, char *buf
)
5289 return sysfs_emit(buf
, "%d\n", !!(s
->flags
& SLAB_FAILSLAB
));
5291 SLAB_ATTR_RO(failslab
);
5294 static ssize_t
shrink_show(struct kmem_cache
*s
, char *buf
)
5299 static ssize_t
shrink_store(struct kmem_cache
*s
,
5300 const char *buf
, size_t length
)
5303 kmem_cache_shrink(s
);
5311 static ssize_t
remote_node_defrag_ratio_show(struct kmem_cache
*s
, char *buf
)
5313 return sysfs_emit(buf
, "%u\n", s
->remote_node_defrag_ratio
/ 10);
5316 static ssize_t
remote_node_defrag_ratio_store(struct kmem_cache
*s
,
5317 const char *buf
, size_t length
)
5322 err
= kstrtouint(buf
, 10, &ratio
);
5328 s
->remote_node_defrag_ratio
= ratio
* 10;
5332 SLAB_ATTR(remote_node_defrag_ratio
);
5335 #ifdef CONFIG_SLUB_STATS
5336 static int show_stat(struct kmem_cache
*s
, char *buf
, enum stat_item si
)
5338 unsigned long sum
= 0;
5341 int *data
= kmalloc_array(nr_cpu_ids
, sizeof(int), GFP_KERNEL
);
5346 for_each_online_cpu(cpu
) {
5347 unsigned x
= per_cpu_ptr(s
->cpu_slab
, cpu
)->stat
[si
];
5353 len
+= sysfs_emit_at(buf
, len
, "%lu", sum
);
5356 for_each_online_cpu(cpu
) {
5358 len
+= sysfs_emit_at(buf
, len
, " C%d=%u",
5363 len
+= sysfs_emit_at(buf
, len
, "\n");
5368 static void clear_stat(struct kmem_cache
*s
, enum stat_item si
)
5372 for_each_online_cpu(cpu
)
5373 per_cpu_ptr(s
->cpu_slab
, cpu
)->stat
[si
] = 0;
5376 #define STAT_ATTR(si, text) \
5377 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5379 return show_stat(s, buf, si); \
5381 static ssize_t text##_store(struct kmem_cache *s, \
5382 const char *buf, size_t length) \
5384 if (buf[0] != '0') \
5386 clear_stat(s, si); \
5391 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5392 STAT_ATTR(ALLOC_SLOWPATH
, alloc_slowpath
);
5393 STAT_ATTR(FREE_FASTPATH
, free_fastpath
);
5394 STAT_ATTR(FREE_SLOWPATH
, free_slowpath
);
5395 STAT_ATTR(FREE_FROZEN
, free_frozen
);
5396 STAT_ATTR(FREE_ADD_PARTIAL
, free_add_partial
);
5397 STAT_ATTR(FREE_REMOVE_PARTIAL
, free_remove_partial
);
5398 STAT_ATTR(ALLOC_FROM_PARTIAL
, alloc_from_partial
);
5399 STAT_ATTR(ALLOC_SLAB
, alloc_slab
);
5400 STAT_ATTR(ALLOC_REFILL
, alloc_refill
);
5401 STAT_ATTR(ALLOC_NODE_MISMATCH
, alloc_node_mismatch
);
5402 STAT_ATTR(FREE_SLAB
, free_slab
);
5403 STAT_ATTR(CPUSLAB_FLUSH
, cpuslab_flush
);
5404 STAT_ATTR(DEACTIVATE_FULL
, deactivate_full
);
5405 STAT_ATTR(DEACTIVATE_EMPTY
, deactivate_empty
);
5406 STAT_ATTR(DEACTIVATE_TO_HEAD
, deactivate_to_head
);
5407 STAT_ATTR(DEACTIVATE_TO_TAIL
, deactivate_to_tail
);
5408 STAT_ATTR(DEACTIVATE_REMOTE_FREES
, deactivate_remote_frees
);
5409 STAT_ATTR(DEACTIVATE_BYPASS
, deactivate_bypass
);
5410 STAT_ATTR(ORDER_FALLBACK
, order_fallback
);
5411 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL
, cmpxchg_double_cpu_fail
);
5412 STAT_ATTR(CMPXCHG_DOUBLE_FAIL
, cmpxchg_double_fail
);
5413 STAT_ATTR(CPU_PARTIAL_ALLOC
, cpu_partial_alloc
);
5414 STAT_ATTR(CPU_PARTIAL_FREE
, cpu_partial_free
);
5415 STAT_ATTR(CPU_PARTIAL_NODE
, cpu_partial_node
);
5416 STAT_ATTR(CPU_PARTIAL_DRAIN
, cpu_partial_drain
);
5417 #endif /* CONFIG_SLUB_STATS */
5419 static struct attribute
*slab_attrs
[] = {
5420 &slab_size_attr
.attr
,
5421 &object_size_attr
.attr
,
5422 &objs_per_slab_attr
.attr
,
5424 &min_partial_attr
.attr
,
5425 &cpu_partial_attr
.attr
,
5427 &objects_partial_attr
.attr
,
5429 &cpu_slabs_attr
.attr
,
5433 &hwcache_align_attr
.attr
,
5434 &reclaim_account_attr
.attr
,
5435 &destroy_by_rcu_attr
.attr
,
5437 &slabs_cpu_partial_attr
.attr
,
5438 #ifdef CONFIG_SLUB_DEBUG
5439 &total_objects_attr
.attr
,
5441 &sanity_checks_attr
.attr
,
5443 &red_zone_attr
.attr
,
5445 &store_user_attr
.attr
,
5446 &validate_attr
.attr
,
5448 #ifdef CONFIG_ZONE_DMA
5449 &cache_dma_attr
.attr
,
5452 &remote_node_defrag_ratio_attr
.attr
,
5454 #ifdef CONFIG_SLUB_STATS
5455 &alloc_fastpath_attr
.attr
,
5456 &alloc_slowpath_attr
.attr
,
5457 &free_fastpath_attr
.attr
,
5458 &free_slowpath_attr
.attr
,
5459 &free_frozen_attr
.attr
,
5460 &free_add_partial_attr
.attr
,
5461 &free_remove_partial_attr
.attr
,
5462 &alloc_from_partial_attr
.attr
,
5463 &alloc_slab_attr
.attr
,
5464 &alloc_refill_attr
.attr
,
5465 &alloc_node_mismatch_attr
.attr
,
5466 &free_slab_attr
.attr
,
5467 &cpuslab_flush_attr
.attr
,
5468 &deactivate_full_attr
.attr
,
5469 &deactivate_empty_attr
.attr
,
5470 &deactivate_to_head_attr
.attr
,
5471 &deactivate_to_tail_attr
.attr
,
5472 &deactivate_remote_frees_attr
.attr
,
5473 &deactivate_bypass_attr
.attr
,
5474 &order_fallback_attr
.attr
,
5475 &cmpxchg_double_fail_attr
.attr
,
5476 &cmpxchg_double_cpu_fail_attr
.attr
,
5477 &cpu_partial_alloc_attr
.attr
,
5478 &cpu_partial_free_attr
.attr
,
5479 &cpu_partial_node_attr
.attr
,
5480 &cpu_partial_drain_attr
.attr
,
5482 #ifdef CONFIG_FAILSLAB
5483 &failslab_attr
.attr
,
5485 &usersize_attr
.attr
,
5490 static const struct attribute_group slab_attr_group
= {
5491 .attrs
= slab_attrs
,
5494 static ssize_t
slab_attr_show(struct kobject
*kobj
,
5495 struct attribute
*attr
,
5498 struct slab_attribute
*attribute
;
5499 struct kmem_cache
*s
;
5502 attribute
= to_slab_attr(attr
);
5505 if (!attribute
->show
)
5508 err
= attribute
->show(s
, buf
);
5513 static ssize_t
slab_attr_store(struct kobject
*kobj
,
5514 struct attribute
*attr
,
5515 const char *buf
, size_t len
)
5517 struct slab_attribute
*attribute
;
5518 struct kmem_cache
*s
;
5521 attribute
= to_slab_attr(attr
);
5524 if (!attribute
->store
)
5527 err
= attribute
->store(s
, buf
, len
);
5531 static void kmem_cache_release(struct kobject
*k
)
5533 slab_kmem_cache_release(to_slab(k
));
5536 static const struct sysfs_ops slab_sysfs_ops
= {
5537 .show
= slab_attr_show
,
5538 .store
= slab_attr_store
,
5541 static struct kobj_type slab_ktype
= {
5542 .sysfs_ops
= &slab_sysfs_ops
,
5543 .release
= kmem_cache_release
,
5546 static struct kset
*slab_kset
;
5548 static inline struct kset
*cache_kset(struct kmem_cache
*s
)
5553 #define ID_STR_LENGTH 64
5555 /* Create a unique string id for a slab cache:
5557 * Format :[flags-]size
5559 static char *create_unique_id(struct kmem_cache
*s
)
5561 char *name
= kmalloc(ID_STR_LENGTH
, GFP_KERNEL
);
5568 * First flags affecting slabcache operations. We will only
5569 * get here for aliasable slabs so we do not need to support
5570 * too many flags. The flags here must cover all flags that
5571 * are matched during merging to guarantee that the id is
5574 if (s
->flags
& SLAB_CACHE_DMA
)
5576 if (s
->flags
& SLAB_CACHE_DMA32
)
5578 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
5580 if (s
->flags
& SLAB_CONSISTENCY_CHECKS
)
5582 if (s
->flags
& SLAB_ACCOUNT
)
5586 p
+= sprintf(p
, "%07u", s
->size
);
5588 BUG_ON(p
> name
+ ID_STR_LENGTH
- 1);
5592 static int sysfs_slab_add(struct kmem_cache
*s
)
5596 struct kset
*kset
= cache_kset(s
);
5597 int unmergeable
= slab_unmergeable(s
);
5600 kobject_init(&s
->kobj
, &slab_ktype
);
5604 if (!unmergeable
&& disable_higher_order_debug
&&
5605 (slub_debug
& DEBUG_METADATA_FLAGS
))
5610 * Slabcache can never be merged so we can use the name proper.
5611 * This is typically the case for debug situations. In that
5612 * case we can catch duplicate names easily.
5614 sysfs_remove_link(&slab_kset
->kobj
, s
->name
);
5618 * Create a unique name for the slab as a target
5621 name
= create_unique_id(s
);
5624 s
->kobj
.kset
= kset
;
5625 err
= kobject_init_and_add(&s
->kobj
, &slab_ktype
, NULL
, "%s", name
);
5629 err
= sysfs_create_group(&s
->kobj
, &slab_attr_group
);
5634 /* Setup first alias */
5635 sysfs_slab_alias(s
, s
->name
);
5642 kobject_del(&s
->kobj
);
5646 void sysfs_slab_unlink(struct kmem_cache
*s
)
5648 if (slab_state
>= FULL
)
5649 kobject_del(&s
->kobj
);
5652 void sysfs_slab_release(struct kmem_cache
*s
)
5654 if (slab_state
>= FULL
)
5655 kobject_put(&s
->kobj
);
5659 * Need to buffer aliases during bootup until sysfs becomes
5660 * available lest we lose that information.
5662 struct saved_alias
{
5663 struct kmem_cache
*s
;
5665 struct saved_alias
*next
;
5668 static struct saved_alias
*alias_list
;
5670 static int sysfs_slab_alias(struct kmem_cache
*s
, const char *name
)
5672 struct saved_alias
*al
;
5674 if (slab_state
== FULL
) {
5676 * If we have a leftover link then remove it.
5678 sysfs_remove_link(&slab_kset
->kobj
, name
);
5679 return sysfs_create_link(&slab_kset
->kobj
, &s
->kobj
, name
);
5682 al
= kmalloc(sizeof(struct saved_alias
), GFP_KERNEL
);
5688 al
->next
= alias_list
;
5693 static int __init
slab_sysfs_init(void)
5695 struct kmem_cache
*s
;
5698 mutex_lock(&slab_mutex
);
5700 slab_kset
= kset_create_and_add("slab", NULL
, kernel_kobj
);
5702 mutex_unlock(&slab_mutex
);
5703 pr_err("Cannot register slab subsystem.\n");
5709 list_for_each_entry(s
, &slab_caches
, list
) {
5710 err
= sysfs_slab_add(s
);
5712 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5716 while (alias_list
) {
5717 struct saved_alias
*al
= alias_list
;
5719 alias_list
= alias_list
->next
;
5720 err
= sysfs_slab_alias(al
->s
, al
->name
);
5722 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5727 mutex_unlock(&slab_mutex
);
5731 __initcall(slab_sysfs_init
);
5732 #endif /* CONFIG_SYSFS */
5734 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
5735 static int slab_debugfs_show(struct seq_file
*seq
, void *v
)
5739 unsigned int idx
= *(unsigned int *)v
;
5740 struct loc_track
*t
= seq
->private;
5742 if (idx
< t
->count
) {
5745 seq_printf(seq
, "%7ld ", l
->count
);
5748 seq_printf(seq
, "%pS", (void *)l
->addr
);
5750 seq_puts(seq
, "<not-available>");
5752 if (l
->sum_time
!= l
->min_time
) {
5753 seq_printf(seq
, " age=%ld/%llu/%ld",
5754 l
->min_time
, div_u64(l
->sum_time
, l
->count
),
5757 seq_printf(seq
, " age=%ld", l
->min_time
);
5759 if (l
->min_pid
!= l
->max_pid
)
5760 seq_printf(seq
, " pid=%ld-%ld", l
->min_pid
, l
->max_pid
);
5762 seq_printf(seq
, " pid=%ld",
5765 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l
->cpus
)))
5766 seq_printf(seq
, " cpus=%*pbl",
5767 cpumask_pr_args(to_cpumask(l
->cpus
)));
5769 if (nr_online_nodes
> 1 && !nodes_empty(l
->nodes
))
5770 seq_printf(seq
, " nodes=%*pbl",
5771 nodemask_pr_args(&l
->nodes
));
5773 seq_puts(seq
, "\n");
5776 if (!idx
&& !t
->count
)
5777 seq_puts(seq
, "No data\n");
5782 static void slab_debugfs_stop(struct seq_file
*seq
, void *v
)
5786 static void *slab_debugfs_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
5788 struct loc_track
*t
= seq
->private;
5792 if (*ppos
<= t
->count
)
5798 static void *slab_debugfs_start(struct seq_file
*seq
, loff_t
*ppos
)
5803 static const struct seq_operations slab_debugfs_sops
= {
5804 .start
= slab_debugfs_start
,
5805 .next
= slab_debugfs_next
,
5806 .stop
= slab_debugfs_stop
,
5807 .show
= slab_debugfs_show
,
5810 static int slab_debug_trace_open(struct inode
*inode
, struct file
*filep
)
5813 struct kmem_cache_node
*n
;
5814 enum track_item alloc
;
5816 struct loc_track
*t
= __seq_open_private(filep
, &slab_debugfs_sops
,
5817 sizeof(struct loc_track
));
5818 struct kmem_cache
*s
= file_inode(filep
)->i_private
;
5820 if (strcmp(filep
->f_path
.dentry
->d_name
.name
, "alloc_traces") == 0)
5821 alloc
= TRACK_ALLOC
;
5825 if (!alloc_loc_track(t
, PAGE_SIZE
/ sizeof(struct location
), GFP_KERNEL
))
5828 /* Push back cpu slabs */
5831 for_each_kmem_cache_node(s
, node
, n
) {
5832 unsigned long flags
;
5835 if (!atomic_long_read(&n
->nr_slabs
))
5838 spin_lock_irqsave(&n
->list_lock
, flags
);
5839 list_for_each_entry(page
, &n
->partial
, slab_list
)
5840 process_slab(t
, s
, page
, alloc
);
5841 list_for_each_entry(page
, &n
->full
, slab_list
)
5842 process_slab(t
, s
, page
, alloc
);
5843 spin_unlock_irqrestore(&n
->list_lock
, flags
);
5849 static int slab_debug_trace_release(struct inode
*inode
, struct file
*file
)
5851 struct seq_file
*seq
= file
->private_data
;
5852 struct loc_track
*t
= seq
->private;
5855 return seq_release_private(inode
, file
);
5858 static const struct file_operations slab_debugfs_fops
= {
5859 .open
= slab_debug_trace_open
,
5861 .llseek
= seq_lseek
,
5862 .release
= slab_debug_trace_release
,
5865 static void debugfs_slab_add(struct kmem_cache
*s
)
5867 struct dentry
*slab_cache_dir
;
5869 if (unlikely(!slab_debugfs_root
))
5872 slab_cache_dir
= debugfs_create_dir(s
->name
, slab_debugfs_root
);
5874 debugfs_create_file("alloc_traces", 0400,
5875 slab_cache_dir
, s
, &slab_debugfs_fops
);
5877 debugfs_create_file("free_traces", 0400,
5878 slab_cache_dir
, s
, &slab_debugfs_fops
);
5881 void debugfs_slab_release(struct kmem_cache
*s
)
5883 debugfs_remove_recursive(debugfs_lookup(s
->name
, slab_debugfs_root
));
5886 static int __init
slab_debugfs_init(void)
5888 struct kmem_cache
*s
;
5890 slab_debugfs_root
= debugfs_create_dir("slab", NULL
);
5892 list_for_each_entry(s
, &slab_caches
, list
)
5893 if (s
->flags
& SLAB_STORE_USER
)
5894 debugfs_slab_add(s
);
5899 __initcall(slab_debugfs_init
);
5902 * The /proc/slabinfo ABI
5904 #ifdef CONFIG_SLUB_DEBUG
5905 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
)
5907 unsigned long nr_slabs
= 0;
5908 unsigned long nr_objs
= 0;
5909 unsigned long nr_free
= 0;
5911 struct kmem_cache_node
*n
;
5913 for_each_kmem_cache_node(s
, node
, n
) {
5914 nr_slabs
+= node_nr_slabs(n
);
5915 nr_objs
+= node_nr_objs(n
);
5916 nr_free
+= count_partial(n
, count_free
);
5919 sinfo
->active_objs
= nr_objs
- nr_free
;
5920 sinfo
->num_objs
= nr_objs
;
5921 sinfo
->active_slabs
= nr_slabs
;
5922 sinfo
->num_slabs
= nr_slabs
;
5923 sinfo
->objects_per_slab
= oo_objects(s
->oo
);
5924 sinfo
->cache_order
= oo_order(s
->oo
);
5927 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
)
5931 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
5932 size_t count
, loff_t
*ppos
)
5936 #endif /* CONFIG_SLUB_DEBUG */