1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size
;/* The original size of the object */
22 unsigned int size
; /* The aligned/padded/added on size */
23 unsigned int align
; /* Alignment as calculated */
24 slab_flags_t flags
; /* Active flags on the slab */
25 unsigned int useroffset
;/* Usercopy region offset */
26 unsigned int usersize
; /* Usercopy region size */
27 const char *name
; /* Slab name for sysfs */
28 int refcount
; /* Use counter */
29 void (*ctor
)(void *); /* Called on object slot creation */
30 struct list_head list
; /* List of all slab caches on the system */
33 #endif /* CONFIG_SLOB */
36 #include <linux/slab_def.h>
40 #include <linux/slub_def.h>
43 #include <linux/memcontrol.h>
44 #include <linux/fault-inject.h>
45 #include <linux/kasan.h>
46 #include <linux/kmemleak.h>
47 #include <linux/random.h>
48 #include <linux/sched/mm.h>
51 * State of the slab allocator.
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
59 DOWN
, /* No slab functionality yet */
60 PARTIAL
, /* SLUB: kmem_cache_node available */
61 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
62 UP
, /* Slab caches usable but not all extras yet */
63 FULL
/* Everything is working */
66 extern enum slab_state slab_state
;
68 /* The slab cache mutex protects the management structures during changes */
69 extern struct mutex slab_mutex
;
71 /* The list of all slab caches on the system */
72 extern struct list_head slab_caches
;
74 /* The slab cache that manages slab cache information */
75 extern struct kmem_cache
*kmem_cache
;
77 /* A table of kmalloc cache names and sizes */
78 extern const struct kmalloc_info_struct
{
79 const char *name
[NR_KMALLOC_TYPES
];
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(slab_flags_t
);
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
92 gfp_t
kmalloc_fix_flags(gfp_t flags
);
94 /* Functions provided by the slab allocators */
95 int __kmem_cache_create(struct kmem_cache
*, slab_flags_t flags
);
97 struct kmem_cache
*create_kmalloc_cache(const char *name
, unsigned int size
,
98 slab_flags_t flags
, unsigned int useroffset
,
99 unsigned int usersize
);
100 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
101 unsigned int size
, slab_flags_t flags
,
102 unsigned int useroffset
, unsigned int usersize
);
104 int slab_unmergeable(struct kmem_cache
*s
);
105 struct kmem_cache
*find_mergeable(unsigned size
, unsigned align
,
106 slab_flags_t flags
, const char *name
, void (*ctor
)(void *));
109 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
110 slab_flags_t flags
, void (*ctor
)(void *));
112 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
113 slab_flags_t flags
, const char *name
);
115 static inline struct kmem_cache
*
116 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
117 slab_flags_t flags
, void (*ctor
)(void *))
120 static inline slab_flags_t
kmem_cache_flags(unsigned int object_size
,
121 slab_flags_t flags
, const char *name
)
128 /* Legal flag mask for kmem_cache_create(), for various configurations */
129 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
130 SLAB_CACHE_DMA32 | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
133 #if defined(CONFIG_DEBUG_SLAB)
134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135 #elif defined(CONFIG_SLUB_DEBUG)
136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
139 #define SLAB_DEBUG_FLAGS (0)
142 #if defined(CONFIG_SLAB)
143 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
146 #elif defined(CONFIG_SLUB)
147 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148 SLAB_TEMPORARY | SLAB_ACCOUNT)
150 #define SLAB_CACHE_FLAGS (0)
153 /* Common flags available with current configuration */
154 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156 /* Common flags permitted for kmem_cache_create */
157 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
162 SLAB_CONSISTENCY_CHECKS | \
165 SLAB_RECLAIM_ACCOUNT | \
169 bool __kmem_cache_empty(struct kmem_cache
*);
170 int __kmem_cache_shutdown(struct kmem_cache
*);
171 void __kmem_cache_release(struct kmem_cache
*);
172 int __kmem_cache_shrink(struct kmem_cache
*);
173 void slab_kmem_cache_release(struct kmem_cache
*);
179 unsigned long active_objs
;
180 unsigned long num_objs
;
181 unsigned long active_slabs
;
182 unsigned long num_slabs
;
183 unsigned long shared_avail
;
185 unsigned int batchcount
;
187 unsigned int objects_per_slab
;
188 unsigned int cache_order
;
191 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
192 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
193 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
194 size_t count
, loff_t
*ppos
);
197 * Generic implementation of bulk operations
198 * These are useful for situations in which the allocator cannot
199 * perform optimizations. In that case segments of the object listed
200 * may be allocated or freed using these operations.
202 void __kmem_cache_free_bulk(struct kmem_cache
*, size_t, void **);
203 int __kmem_cache_alloc_bulk(struct kmem_cache
*, gfp_t
, size_t, void **);
205 static inline enum node_stat_item
cache_vmstat_idx(struct kmem_cache
*s
)
207 return (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
208 NR_SLAB_RECLAIMABLE_B
: NR_SLAB_UNRECLAIMABLE_B
;
211 #ifdef CONFIG_SLUB_DEBUG
212 #ifdef CONFIG_SLUB_DEBUG_ON
213 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled
);
215 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled
);
217 extern void print_tracking(struct kmem_cache
*s
, void *object
);
219 static inline void print_tracking(struct kmem_cache
*s
, void *object
)
225 * Returns true if any of the specified slub_debug flags is enabled for the
226 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
229 static inline bool kmem_cache_debug_flags(struct kmem_cache
*s
, slab_flags_t flags
)
231 #ifdef CONFIG_SLUB_DEBUG
232 VM_WARN_ON_ONCE(!(flags
& SLAB_DEBUG_FLAGS
));
233 if (static_branch_unlikely(&slub_debug_enabled
))
234 return s
->flags
& flags
;
239 #ifdef CONFIG_MEMCG_KMEM
240 int memcg_alloc_page_obj_cgroups(struct page
*page
, struct kmem_cache
*s
,
241 gfp_t gfp
, bool new_page
);
243 static inline void memcg_free_page_obj_cgroups(struct page
*page
)
245 kfree(page_objcgs(page
));
246 page
->memcg_data
= 0;
249 static inline size_t obj_full_size(struct kmem_cache
*s
)
252 * For each accounted object there is an extra space which is used
253 * to store obj_cgroup membership. Charge it too.
255 return s
->size
+ sizeof(struct obj_cgroup
*);
259 * Returns false if the allocation should fail.
261 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
262 struct obj_cgroup
**objcgp
,
263 size_t objects
, gfp_t flags
)
265 struct obj_cgroup
*objcg
;
267 if (!memcg_kmem_enabled())
270 if (!(flags
& __GFP_ACCOUNT
) && !(s
->flags
& SLAB_ACCOUNT
))
273 objcg
= get_obj_cgroup_from_current();
277 if (obj_cgroup_charge(objcg
, flags
, objects
* obj_full_size(s
))) {
278 obj_cgroup_put(objcg
);
286 static inline void mod_objcg_state(struct obj_cgroup
*objcg
,
287 struct pglist_data
*pgdat
,
288 enum node_stat_item idx
, int nr
)
290 struct mem_cgroup
*memcg
;
291 struct lruvec
*lruvec
;
294 memcg
= obj_cgroup_memcg(objcg
);
295 lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
296 mod_memcg_lruvec_state(lruvec
, idx
, nr
);
300 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
301 struct obj_cgroup
*objcg
,
302 gfp_t flags
, size_t size
,
309 if (!memcg_kmem_enabled() || !objcg
)
312 flags
&= ~__GFP_ACCOUNT
;
313 for (i
= 0; i
< size
; i
++) {
315 page
= virt_to_head_page(p
[i
]);
317 if (!page_objcgs(page
) &&
318 memcg_alloc_page_obj_cgroups(page
, s
, flags
,
320 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
324 off
= obj_to_index(s
, page
, p
[i
]);
325 obj_cgroup_get(objcg
);
326 page_objcgs(page
)[off
] = objcg
;
327 mod_objcg_state(objcg
, page_pgdat(page
),
328 cache_vmstat_idx(s
), obj_full_size(s
));
330 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
333 obj_cgroup_put(objcg
);
336 static inline void memcg_slab_free_hook(struct kmem_cache
*s_orig
,
337 void **p
, int objects
)
339 struct kmem_cache
*s
;
340 struct obj_cgroup
**objcgs
;
341 struct obj_cgroup
*objcg
;
346 if (!memcg_kmem_enabled())
349 for (i
= 0; i
< objects
; i
++) {
353 page
= virt_to_head_page(p
[i
]);
354 objcgs
= page_objcgs(page
);
359 s
= page
->slab_cache
;
363 off
= obj_to_index(s
, page
, p
[i
]);
369 obj_cgroup_uncharge(objcg
, obj_full_size(s
));
370 mod_objcg_state(objcg
, page_pgdat(page
), cache_vmstat_idx(s
),
372 obj_cgroup_put(objcg
);
376 #else /* CONFIG_MEMCG_KMEM */
377 static inline struct mem_cgroup
*memcg_from_slab_obj(void *ptr
)
382 static inline int memcg_alloc_page_obj_cgroups(struct page
*page
,
383 struct kmem_cache
*s
, gfp_t gfp
,
389 static inline void memcg_free_page_obj_cgroups(struct page
*page
)
393 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache
*s
,
394 struct obj_cgroup
**objcgp
,
395 size_t objects
, gfp_t flags
)
400 static inline void memcg_slab_post_alloc_hook(struct kmem_cache
*s
,
401 struct obj_cgroup
*objcg
,
402 gfp_t flags
, size_t size
,
407 static inline void memcg_slab_free_hook(struct kmem_cache
*s
,
408 void **p
, int objects
)
411 #endif /* CONFIG_MEMCG_KMEM */
413 static inline struct kmem_cache
*virt_to_cache(const void *obj
)
417 page
= virt_to_head_page(obj
);
418 if (WARN_ONCE(!PageSlab(page
), "%s: Object is not a Slab page!\n",
421 return page
->slab_cache
;
424 static __always_inline
void account_slab_page(struct page
*page
, int order
,
425 struct kmem_cache
*s
,
428 if (memcg_kmem_enabled() && (s
->flags
& SLAB_ACCOUNT
))
429 memcg_alloc_page_obj_cgroups(page
, s
, gfp
, true);
431 mod_node_page_state(page_pgdat(page
), cache_vmstat_idx(s
),
435 static __always_inline
void unaccount_slab_page(struct page
*page
, int order
,
436 struct kmem_cache
*s
)
438 if (memcg_kmem_enabled())
439 memcg_free_page_obj_cgroups(page
);
441 mod_node_page_state(page_pgdat(page
), cache_vmstat_idx(s
),
442 -(PAGE_SIZE
<< order
));
445 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
447 struct kmem_cache
*cachep
;
449 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED
) &&
450 !kmem_cache_debug_flags(s
, SLAB_CONSISTENCY_CHECKS
))
453 cachep
= virt_to_cache(x
);
454 if (WARN(cachep
&& cachep
!= s
,
455 "%s: Wrong slab cache. %s but object is from %s\n",
456 __func__
, s
->name
, cachep
->name
))
457 print_tracking(cachep
, x
);
461 static inline size_t slab_ksize(const struct kmem_cache
*s
)
464 return s
->object_size
;
466 #else /* CONFIG_SLUB */
467 # ifdef CONFIG_SLUB_DEBUG
469 * Debugging requires use of the padding between object
470 * and whatever may come after it.
472 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
473 return s
->object_size
;
475 if (s
->flags
& SLAB_KASAN
)
476 return s
->object_size
;
478 * If we have the need to store the freelist pointer
479 * back there or track user information then we can
480 * only use the space before that information.
482 if (s
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_STORE_USER
))
485 * Else we can use all the padding etc for the allocation
491 static inline struct kmem_cache
*slab_pre_alloc_hook(struct kmem_cache
*s
,
492 struct obj_cgroup
**objcgp
,
493 size_t size
, gfp_t flags
)
495 flags
&= gfp_allowed_mask
;
499 if (should_failslab(s
, flags
))
502 if (!memcg_slab_pre_alloc_hook(s
, objcgp
, size
, flags
))
508 static inline void slab_post_alloc_hook(struct kmem_cache
*s
,
509 struct obj_cgroup
*objcg
,
510 gfp_t flags
, size_t size
, void **p
)
514 flags
&= gfp_allowed_mask
;
515 for (i
= 0; i
< size
; i
++) {
516 p
[i
] = kasan_slab_alloc(s
, p
[i
], flags
);
517 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
518 kmemleak_alloc_recursive(p
[i
], s
->object_size
, 1,
522 memcg_slab_post_alloc_hook(s
, objcg
, flags
, size
, p
);
527 * The slab lists for all objects.
529 struct kmem_cache_node
{
530 spinlock_t list_lock
;
533 struct list_head slabs_partial
; /* partial list first, better asm code */
534 struct list_head slabs_full
;
535 struct list_head slabs_free
;
536 unsigned long total_slabs
; /* length of all slab lists */
537 unsigned long free_slabs
; /* length of free slab list only */
538 unsigned long free_objects
;
539 unsigned int free_limit
;
540 unsigned int colour_next
; /* Per-node cache coloring */
541 struct array_cache
*shared
; /* shared per node */
542 struct alien_cache
**alien
; /* on other nodes */
543 unsigned long next_reap
; /* updated without locking */
544 int free_touched
; /* updated without locking */
548 unsigned long nr_partial
;
549 struct list_head partial
;
550 #ifdef CONFIG_SLUB_DEBUG
551 atomic_long_t nr_slabs
;
552 atomic_long_t total_objects
;
553 struct list_head full
;
559 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
561 return s
->node
[node
];
565 * Iterator over all nodes. The body will be executed for each node that has
566 * a kmem_cache_node structure allocated (which is true for all online nodes)
568 #define for_each_kmem_cache_node(__s, __node, __n) \
569 for (__node = 0; __node < nr_node_ids; __node++) \
570 if ((__n = get_node(__s, __node)))
574 void *slab_start(struct seq_file
*m
, loff_t
*pos
);
575 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
576 void slab_stop(struct seq_file
*m
, void *p
);
577 int memcg_slab_show(struct seq_file
*m
, void *p
);
579 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
580 void dump_unreclaimable_slab(void);
582 static inline void dump_unreclaimable_slab(void)
587 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
);
589 #ifdef CONFIG_SLAB_FREELIST_RANDOM
590 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
592 void cache_random_seq_destroy(struct kmem_cache
*cachep
);
594 static inline int cache_random_seq_create(struct kmem_cache
*cachep
,
595 unsigned int count
, gfp_t gfp
)
599 static inline void cache_random_seq_destroy(struct kmem_cache
*cachep
) { }
600 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
602 static inline bool slab_want_init_on_alloc(gfp_t flags
, struct kmem_cache
*c
)
604 if (static_branch_unlikely(&init_on_alloc
)) {
607 if (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
))
608 return flags
& __GFP_ZERO
;
611 return flags
& __GFP_ZERO
;
614 static inline bool slab_want_init_on_free(struct kmem_cache
*c
)
616 if (static_branch_unlikely(&init_on_free
))
618 (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
)));
622 #define KS_ADDRS_COUNT 16
623 struct kmem_obj_info
{
625 struct page
*kp_page
;
627 unsigned long kp_data_offset
;
628 struct kmem_cache
*kp_slab_cache
;
630 void *kp_stack
[KS_ADDRS_COUNT
];
632 void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct page
*page
);
634 #endif /* MM_SLAB_H */