1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLAB_DEF_H
3 #define _LINUX_SLAB_DEF_H
5 #include <linux/kfence.h>
6 #include <linux/reciprocal_div.h>
9 * Definitions unique to the original Linux SLAB allocator.
13 struct array_cache __percpu
*cpu_cache
;
15 /* 1) Cache tunables. Protected by slab_mutex */
16 unsigned int batchcount
;
21 struct reciprocal_value reciprocal_buffer_size
;
22 /* 2) touched by every alloc & free from the backend */
24 slab_flags_t flags
; /* constant flags */
25 unsigned int num
; /* # of objs per slab */
27 /* 3) cache_grow/shrink */
28 /* order of pgs per slab (2^n) */
29 unsigned int gfporder
;
31 /* force GFP flags, e.g. GFP_DMA */
34 size_t colour
; /* cache colouring range */
35 unsigned int colour_off
; /* colour offset */
36 struct kmem_cache
*freelist_cache
;
37 unsigned int freelist_size
;
39 /* constructor func */
40 void (*ctor
)(void *obj
);
42 /* 4) cache creation/removal */
44 struct list_head list
;
50 #ifdef CONFIG_DEBUG_SLAB
51 unsigned long num_active
;
52 unsigned long num_allocations
;
53 unsigned long high_mark
;
57 unsigned long max_freeable
;
58 unsigned long node_allocs
;
59 unsigned long node_frees
;
60 unsigned long node_overflow
;
67 * If debugging is enabled, then the allocator can add additional
68 * fields and/or padding to every object. 'size' contains the total
69 * object size including these internal fields, while 'obj_offset'
70 * and 'object_size' contain the offset to the user object and its
74 #endif /* CONFIG_DEBUG_SLAB */
77 struct kasan_cache kasan_info
;
80 #ifdef CONFIG_SLAB_FREELIST_RANDOM
81 unsigned int *random_seq
;
84 unsigned int useroffset
; /* Usercopy region offset */
85 unsigned int usersize
; /* Usercopy region size */
87 struct kmem_cache_node
*node
[MAX_NUMNODES
];
90 static inline void *nearest_obj(struct kmem_cache
*cache
, struct page
*page
,
93 void *object
= x
- (x
- page
->s_mem
) % cache
->size
;
94 void *last_object
= page
->s_mem
+ (cache
->num
- 1) * cache
->size
;
96 if (unlikely(object
> last_object
))
103 * We want to avoid an expensive divide : (offset / cache->size)
104 * Using the fact that size is a constant for a particular cache,
105 * we can replace (offset / cache->size) by
106 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
108 static inline unsigned int obj_to_index(const struct kmem_cache
*cache
,
109 const struct page
*page
, void *obj
)
111 u32 offset
= (obj
- page
->s_mem
);
112 return reciprocal_divide(offset
, cache
->reciprocal_buffer_size
);
115 static inline int objs_per_slab_page(const struct kmem_cache
*cache
,
116 const struct page
*page
)
118 if (is_kfence_address(page_address(page
)))
123 #endif /* _LINUX_SLAB_DEF_H */