1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size
;/* The original size of the object */
22 unsigned int size
; /* The aligned/padded/added on size */
23 unsigned int align
; /* Alignment as calculated */
24 slab_flags_t flags
; /* Active flags on the slab */
25 unsigned int useroffset
;/* Usercopy region offset */
26 unsigned int usersize
; /* Usercopy region size */
27 const char *name
; /* Slab name for sysfs */
28 int refcount
; /* Use counter */
29 void (*ctor
)(void *); /* Called on object slot creation */
30 struct list_head list
; /* List of all slab caches on the system */
33 #endif /* CONFIG_SLOB */
36 #include <linux/slab_def.h>
40 #include <linux/slub_def.h>
43 #include <linux/memcontrol.h>
44 #include <linux/fault-inject.h>
45 #include <linux/kasan.h>
46 #include <linux/kmemleak.h>
47 #include <linux/random.h>
48 #include <linux/sched/mm.h>
51 * State of the slab allocator.
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
59 DOWN
, /* No slab functionality yet */
60 PARTIAL
, /* SLUB: kmem_cache_node available */
61 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
62 UP
, /* Slab caches usable but not all extras yet */
63 FULL
/* Everything is working */
66 extern enum slab_state slab_state
;
68 /* The slab cache mutex protects the management structures during changes */
69 extern struct mutex slab_mutex
;
71 /* The list of all slab caches on the system */
72 extern struct list_head slab_caches
;
74 /* The slab cache that manages slab cache information */
75 extern struct kmem_cache
*kmem_cache
;
77 /* A table of kmalloc cache names and sizes */
78 extern const struct kmalloc_info_struct
{
79 const char *name
[NR_KMALLOC_TYPES
];
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(slab_flags_t
);
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
92 gfp_t
kmalloc_fix_flags(gfp_t flags
);
94 /* Functions provided by the slab allocators */
95 int __kmem_cache_create(struct kmem_cache
*, slab_flags_t flags
);
97 struct kmem_cache
*create_kmalloc_cache(const char *name
, unsigned int size
,
98 slab_flags_t flags
, unsigned int useroffset
,
99 unsigned int usersize
);
100 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
101 unsigned int size
, slab_flags_t flags
,
102 unsigned int useroffset
, unsigned int usersize
);
104 int slab_unmergeable(struct kmem_cache
*s
);
105 struct kmem_cache
*find_mergeable(unsigned size
, unsigned align
,
106 slab_flags_t flags
, const char *name
, void (*ctor
)(void *));
109 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
110 slab_flags_t flags
, void (*ctor
)(void *));
112 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
113 slab_flags_t flags
, const char *name
,
114 void (*ctor
)(void *));
116 static inline struct kmem_cache
*
117 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
118 slab_flags_t flags
, void (*ctor
)(void *))
121 static inline slab_flags_t
kmem_cache_flags(unsigned int object_size
,
122 slab_flags_t flags
, const char *name
,
123 void (*ctor
)(void *))
130 /* Legal flag mask for kmem_cache_create(), for various configurations */
131 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
132 SLAB_CACHE_DMA32 | SLAB_PANIC | \
133 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
135 #if defined(CONFIG_DEBUG_SLAB)
136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
137 #elif defined(CONFIG_SLUB_DEBUG)
138 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
139 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
141 #define SLAB_DEBUG_FLAGS (0)
144 #if defined(CONFIG_SLAB)
145 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
146 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
148 #elif defined(CONFIG_SLUB)
149 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
150 SLAB_TEMPORARY | SLAB_ACCOUNT)
152 #define SLAB_CACHE_FLAGS (0)
155 /* Common flags available with current configuration */
156 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
158 /* Common flags permitted for kmem_cache_create */
159 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
164 SLAB_CONSISTENCY_CHECKS | \
167 SLAB_RECLAIM_ACCOUNT | \
171 bool __kmem_cache_empty(struct kmem_cache
*);
172 int __kmem_cache_shutdown(struct kmem_cache
*);
173 void __kmem_cache_release(struct kmem_cache
*);
174 int __kmem_cache_shrink(struct kmem_cache
*);
175 void slab_kmem_cache_release(struct kmem_cache
*);
181 unsigned long active_objs
;
182 unsigned long num_objs
;
183 unsigned long active_slabs
;
184 unsigned long num_slabs
;
185 unsigned long shared_avail
;
187 unsigned int batchcount
;
189 unsigned int objects_per_slab
;
190 unsigned int cache_order
;
193 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
194 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
195 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
196 size_t count
, loff_t
*ppos
);
199 * Generic implementation of bulk operations
200 * These are useful for situations in which the allocator cannot
201 * perform optimizations. In that case segments of the object listed
202 * may be allocated or freed using these operations.
204 void __kmem_cache_free_bulk(struct kmem_cache
*, size_t, void **);
205 int __kmem_cache_alloc_bulk(struct kmem_cache
*, gfp_t
, size_t, void **);
207 static inline int cache_vmstat_idx(struct kmem_cache
*s
)
209 return (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
210 NR_SLAB_RECLAIMABLE_B
: NR_SLAB_UNRECLAIMABLE_B
;
213 #ifdef CONFIG_SLUB_DEBUG
214 #ifdef CONFIG_SLUB_DEBUG_ON
215 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled
);
217 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled
);
219 extern void print_tracking(struct kmem_cache
*s
, void *object
);
221 static inline void print_tracking(struct kmem_cache
*s
, void *object
)
227 * Returns true if any of the specified slub_debug flags is enabled for the
228 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
231 static inline bool kmem_cache_debug_flags(struct kmem_cache
*s
, slab_flags_t flags
)
233 #ifdef CONFIG_SLUB_DEBUG
234 VM_WARN_ON_ONCE(!(flags
& SLAB_DEBUG_FLAGS
));
235 if (static_branch_unlikely(&slub_debug_enabled
))
236 return s
->flags
& flags
;
241 #ifdef CONFIG_MEMCG_KMEM
242 static inline struct obj_cgroup
**page_obj_cgroups(struct page
*page
)
245 * page->mem_cgroup and page->obj_cgroups are sharing the same
246 * space. To distinguish between them in case we don't know for sure
247 * that the page is a slab page (e.g. page_cgroup_ino()), let's
248 * always set the lowest bit of obj_cgroups.
250 return (struct obj_cgroup
**)
251 ((unsigned long)page
->obj_cgroups
& ~0x1UL
);
254 static inline bool page_has_obj_cgroups(struct page
*page
)
256 return ((unsigned long)page
->obj_cgroups
& 0x1UL
);
259 int memcg_alloc_page_obj_cgroups(struct page
*page
, struct kmem_cache
*s
,
262 static inline void memcg_free_page_obj_cgroups(struct page
*page
)
264 kfree(page_obj_cgroups(page
));
265 page
->obj_cgroups
= NULL
;
268 static inline size_t obj_full_size(struct kmem_cache
*s
)
271 * For each accounted object there is an extra space which is used
272 * to store obj_cgroup membership. Charge it too.
274 return s
->size
+ sizeof(struct obj_cgroup
*);
277 static inline struct obj_cgroup
*memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
281 struct obj_cgroup
*objcg
;
283 objcg
= get_obj_cgroup_from_current();
287 if (obj_cgroup_charge(objcg
, flags
, objects
* obj_full_size(s
))) {
288 obj_cgroup_put(objcg
);
295 static inline void mod_objcg_state(struct obj_cgroup
*objcg
,
296 struct pglist_data
*pgdat
,
299 struct mem_cgroup
*memcg
;
300 struct lruvec
*lruvec
;
303 memcg
= obj_cgroup_memcg(objcg
);
304 lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
305 mod_memcg_lruvec_state(lruvec
, idx
, nr
);
309 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
310 struct obj_cgroup
*objcg
,
311 gfp_t flags
, size_t size
,
321 flags
&= ~__GFP_ACCOUNT
;
322 for (i
= 0; i
< size
; i
++) {
324 page
= virt_to_head_page(p
[i
]);
326 if (!page_has_obj_cgroups(page
) &&
327 memcg_alloc_page_obj_cgroups(page
, s
, flags
)) {
328 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
332 off
= obj_to_index(s
, page
, p
[i
]);
333 obj_cgroup_get(objcg
);
334 page_obj_cgroups(page
)[off
] = objcg
;
335 mod_objcg_state(objcg
, page_pgdat(page
),
336 cache_vmstat_idx(s
), obj_full_size(s
));
338 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
341 obj_cgroup_put(objcg
);
344 static inline void memcg_slab_free_hook(struct kmem_cache
*s_orig
,
345 void **p
, int objects
)
347 struct kmem_cache
*s
;
348 struct obj_cgroup
*objcg
;
353 if (!memcg_kmem_enabled())
356 for (i
= 0; i
< objects
; i
++) {
360 page
= virt_to_head_page(p
[i
]);
361 if (!page_has_obj_cgroups(page
))
365 s
= page
->slab_cache
;
369 off
= obj_to_index(s
, page
, p
[i
]);
370 objcg
= page_obj_cgroups(page
)[off
];
374 page_obj_cgroups(page
)[off
] = NULL
;
375 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
376 mod_objcg_state(objcg
, page_pgdat(page
), cache_vmstat_idx(s
),
378 obj_cgroup_put(objcg
);
382 #else /* CONFIG_MEMCG_KMEM */
383 static inline bool page_has_obj_cgroups(struct page
*page
)
388 static inline struct mem_cgroup
*memcg_from_slab_obj(void *ptr
)
393 static inline int memcg_alloc_page_obj_cgroups(struct page
*page
,
394 struct kmem_cache
*s
, gfp_t gfp
)
399 static inline void memcg_free_page_obj_cgroups(struct page
*page
)
403 static inline struct obj_cgroup
*memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
410 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
411 struct obj_cgroup
*objcg
,
412 gfp_t flags
, size_t size
,
417 static inline void memcg_slab_free_hook(struct kmem_cache
*s
,
418 void **p
, int objects
)
421 #endif /* CONFIG_MEMCG_KMEM */
423 static inline struct kmem_cache
*virt_to_cache(const void *obj
)
427 page
= virt_to_head_page(obj
);
428 if (WARN_ONCE(!PageSlab(page
), "%s: Object is not a Slab page!\n",
431 return page
->slab_cache
;
434 static __always_inline
void account_slab_page(struct page
*page
, int order
,
435 struct kmem_cache
*s
)
437 mod_node_page_state(page_pgdat(page
), cache_vmstat_idx(s
),
441 static __always_inline
void unaccount_slab_page(struct page
*page
, int order
,
442 struct kmem_cache
*s
)
444 if (memcg_kmem_enabled())
445 memcg_free_page_obj_cgroups(page
);
447 mod_node_page_state(page_pgdat(page
), cache_vmstat_idx(s
),
448 -(PAGE_SIZE
<< order
));
451 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
453 struct kmem_cache
*cachep
;
455 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED
) &&
456 !kmem_cache_debug_flags(s
, SLAB_CONSISTENCY_CHECKS
))
459 cachep
= virt_to_cache(x
);
460 if (WARN(cachep
&& cachep
!= s
,
461 "%s: Wrong slab cache. %s but object is from %s\n",
462 __func__
, s
->name
, cachep
->name
))
463 print_tracking(cachep
, x
);
467 static inline size_t slab_ksize(const struct kmem_cache
*s
)
470 return s
->object_size
;
472 #else /* CONFIG_SLUB */
473 # ifdef CONFIG_SLUB_DEBUG
475 * Debugging requires use of the padding between object
476 * and whatever may come after it.
478 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
479 return s
->object_size
;
481 if (s
->flags
& SLAB_KASAN
)
482 return s
->object_size
;
484 * If we have the need to store the freelist pointer
485 * back there or track user information then we can
486 * only use the space before that information.
488 if (s
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_STORE_USER
))
491 * Else we can use all the padding etc for the allocation
497 static inline struct kmem_cache
*slab_pre_alloc_hook(struct kmem_cache
*s
,
498 struct obj_cgroup
**objcgp
,
499 size_t size
, gfp_t flags
)
501 flags
&= gfp_allowed_mask
;
503 fs_reclaim_acquire(flags
);
504 fs_reclaim_release(flags
);
506 might_sleep_if(gfpflags_allow_blocking(flags
));
508 if (should_failslab(s
, flags
))
511 if (memcg_kmem_enabled() &&
512 ((flags
& __GFP_ACCOUNT
) || (s
->flags
& SLAB_ACCOUNT
)))
513 *objcgp
= memcg_slab_pre_alloc_hook(s
, size
, flags
);
518 static inline void slab_post_alloc_hook(struct kmem_cache
*s
,
519 struct obj_cgroup
*objcg
,
520 gfp_t flags
, size_t size
, void **p
)
524 flags
&= gfp_allowed_mask
;
525 for (i
= 0; i
< size
; i
++) {
526 p
[i
] = kasan_slab_alloc(s
, p
[i
], flags
);
527 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
528 kmemleak_alloc_recursive(p
[i
], s
->object_size
, 1,
532 if (memcg_kmem_enabled())
533 memcg_slab_post_alloc_hook(s
, objcg
, flags
, size
, p
);
538 * The slab lists for all objects.
540 struct kmem_cache_node
{
541 spinlock_t list_lock
;
544 struct list_head slabs_partial
; /* partial list first, better asm code */
545 struct list_head slabs_full
;
546 struct list_head slabs_free
;
547 unsigned long total_slabs
; /* length of all slab lists */
548 unsigned long free_slabs
; /* length of free slab list only */
549 unsigned long free_objects
;
550 unsigned int free_limit
;
551 unsigned int colour_next
; /* Per-node cache coloring */
552 struct array_cache
*shared
; /* shared per node */
553 struct alien_cache
**alien
; /* on other nodes */
554 unsigned long next_reap
; /* updated without locking */
555 int free_touched
; /* updated without locking */
559 unsigned long nr_partial
;
560 struct list_head partial
;
561 #ifdef CONFIG_SLUB_DEBUG
562 atomic_long_t nr_slabs
;
563 atomic_long_t total_objects
;
564 struct list_head full
;
570 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
572 return s
->node
[node
];
576 * Iterator over all nodes. The body will be executed for each node that has
577 * a kmem_cache_node structure allocated (which is true for all online nodes)
579 #define for_each_kmem_cache_node(__s, __node, __n) \
580 for (__node = 0; __node < nr_node_ids; __node++) \
581 if ((__n = get_node(__s, __node)))
585 void *slab_start(struct seq_file
*m
, loff_t
*pos
);
586 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
587 void slab_stop(struct seq_file
*m
, void *p
);
588 int memcg_slab_show(struct seq_file
*m
, void *p
);
590 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
591 void dump_unreclaimable_slab(void);
593 static inline void dump_unreclaimable_slab(void)
598 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
);
600 #ifdef CONFIG_SLAB_FREELIST_RANDOM
601 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
603 void cache_random_seq_destroy(struct kmem_cache
*cachep
);
605 static inline int cache_random_seq_create(struct kmem_cache
*cachep
,
606 unsigned int count
, gfp_t gfp
)
610 static inline void cache_random_seq_destroy(struct kmem_cache
*cachep
) { }
611 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
613 static inline bool slab_want_init_on_alloc(gfp_t flags
, struct kmem_cache
*c
)
615 if (static_branch_unlikely(&init_on_alloc
)) {
618 if (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
))
619 return flags
& __GFP_ZERO
;
622 return flags
& __GFP_ZERO
;
625 static inline bool slab_want_init_on_free(struct kmem_cache
*c
)
627 if (static_branch_unlikely(&init_on_free
))
629 (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
)));
633 #endif /* MM_SLAB_H */