1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
6 * SLUB : A Slab allocator without object queues.
8 * (C) 2007 SGI, Christoph Lameter
10 #include <linux/kobject.h>
13 ALLOC_FASTPATH
, /* Allocation from cpu slab */
14 ALLOC_SLOWPATH
, /* Allocation by getting a new cpu slab */
15 FREE_FASTPATH
, /* Free to cpu slab */
16 FREE_SLOWPATH
, /* Freeing not to cpu slab */
17 FREE_FROZEN
, /* Freeing to frozen slab */
18 FREE_ADD_PARTIAL
, /* Freeing moves slab to partial list */
19 FREE_REMOVE_PARTIAL
, /* Freeing removes last object */
20 ALLOC_FROM_PARTIAL
, /* Cpu slab acquired from node partial list */
21 ALLOC_SLAB
, /* Cpu slab acquired from page allocator */
22 ALLOC_REFILL
, /* Refill cpu slab from slab freelist */
23 ALLOC_NODE_MISMATCH
, /* Switching cpu slab */
24 FREE_SLAB
, /* Slab freed to the page allocator */
25 CPUSLAB_FLUSH
, /* Abandoning of the cpu slab */
26 DEACTIVATE_FULL
, /* Cpu slab was full when deactivated */
27 DEACTIVATE_EMPTY
, /* Cpu slab was empty when deactivated */
28 DEACTIVATE_TO_HEAD
, /* Cpu slab was moved to the head of partials */
29 DEACTIVATE_TO_TAIL
, /* Cpu slab was moved to the tail of partials */
30 DEACTIVATE_REMOTE_FREES
,/* Slab contained remotely freed objects */
31 DEACTIVATE_BYPASS
, /* Implicit deactivation */
32 ORDER_FALLBACK
, /* Number of times fallback was necessary */
33 CMPXCHG_DOUBLE_CPU_FAIL
,/* Failure of this_cpu_cmpxchg_double */
34 CMPXCHG_DOUBLE_FAIL
, /* Number of times that cmpxchg double did not match */
35 CPU_PARTIAL_ALLOC
, /* Used cpu partial on alloc */
36 CPU_PARTIAL_FREE
, /* Refill cpu partial on free */
37 CPU_PARTIAL_NODE
, /* Refill cpu partial from node partial */
38 CPU_PARTIAL_DRAIN
, /* Drain cpu partial to node partial */
41 struct kmem_cache_cpu
{
42 void **freelist
; /* Pointer to next available object */
43 unsigned long tid
; /* Globally unique transaction id */
44 struct page
*page
; /* The slab from which we are allocating */
45 #ifdef CONFIG_SLUB_CPU_PARTIAL
46 struct page
*partial
; /* Partially allocated frozen slabs */
48 #ifdef CONFIG_SLUB_STATS
49 unsigned stat
[NR_SLUB_STAT_ITEMS
];
53 #ifdef CONFIG_SLUB_CPU_PARTIAL
54 #define slub_percpu_partial(c) ((c)->partial)
56 #define slub_set_percpu_partial(c, p) \
58 slub_percpu_partial(c) = (p)->next; \
61 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
63 #define slub_percpu_partial(c) NULL
65 #define slub_set_percpu_partial(c, p)
67 #define slub_percpu_partial_read_once(c) NULL
68 #endif // CONFIG_SLUB_CPU_PARTIAL
71 * Word size structure that can be atomically updated or read and that
72 * contains both the order and the number of objects that a slab of the
73 * given order would contain.
75 struct kmem_cache_order_objects
{
80 * Slab cache management.
83 struct kmem_cache_cpu __percpu
*cpu_slab
;
84 /* Used for retriving partial slabs etc */
86 unsigned long min_partial
;
87 int size
; /* The size of an object including meta data */
88 int object_size
; /* The size of an object without meta data */
89 int offset
; /* Free pointer offset. */
90 #ifdef CONFIG_SLUB_CPU_PARTIAL
91 /* Number of per cpu partial objects to keep around */
92 unsigned int cpu_partial
;
94 struct kmem_cache_order_objects oo
;
96 /* Allocation and freeing of slabs */
97 struct kmem_cache_order_objects max
;
98 struct kmem_cache_order_objects min
;
99 gfp_t allocflags
; /* gfp flags to use on each alloc */
100 int refcount
; /* Refcount for slab cache destroy */
101 void (*ctor
)(void *);
102 int inuse
; /* Offset to metadata */
103 int align
; /* Alignment */
104 int reserved
; /* Reserved bytes at the end of slabs */
105 int red_left_pad
; /* Left redzone padding size */
106 const char *name
; /* Name (only for display!) */
107 struct list_head list
; /* List of slab caches */
109 struct kobject kobj
; /* For sysfs */
110 struct work_struct kobj_remove_work
;
113 struct memcg_cache_params memcg_params
;
114 int max_attr_size
; /* for propagation, maximum size of a stored attr */
116 struct kset
*memcg_kset
;
120 #ifdef CONFIG_SLAB_FREELIST_HARDENED
121 unsigned long random
;
126 * Defragmentation by allocating from a remote node.
128 int remote_node_defrag_ratio
;
131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
132 unsigned int *random_seq
;
136 struct kasan_cache kasan_info
;
139 struct kmem_cache_node
*node
[MAX_NUMNODES
];
142 #ifdef CONFIG_SLUB_CPU_PARTIAL
143 #define slub_cpu_partial(s) ((s)->cpu_partial)
144 #define slub_set_cpu_partial(s, n) \
146 slub_cpu_partial(s) = (n); \
149 #define slub_cpu_partial(s) (0)
150 #define slub_set_cpu_partial(s, n)
151 #endif // CONFIG_SLUB_CPU_PARTIAL
154 #define SLAB_SUPPORTS_SYSFS
155 void sysfs_slab_unlink(struct kmem_cache
*);
156 void sysfs_slab_release(struct kmem_cache
*);
158 static inline void sysfs_slab_unlink(struct kmem_cache
*s
)
161 static inline void sysfs_slab_release(struct kmem_cache
*s
)
166 void object_err(struct kmem_cache
*s
, struct page
*page
,
167 u8
*object
, char *reason
);
169 void *fixup_red_left(struct kmem_cache
*s
, void *p
);
171 static inline void *nearest_obj(struct kmem_cache
*cache
, struct page
*page
,
173 void *object
= x
- (x
- page_address(page
)) % cache
->size
;
174 void *last_object
= page_address(page
) +
175 (page
->objects
- 1) * cache
->size
;
176 void *result
= (unlikely(object
> last_object
)) ? last_object
: object
;
178 result
= fixup_red_left(cache
, result
);
182 #endif /* _LINUX_SLUB_DEF_H */