]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/slub_def.h
Merge tag 'staging-5.13-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[mirror_ubuntu-jammy-kernel.git] / include / linux / slub_def.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
81819f0f
CL
2#ifndef _LINUX_SLUB_DEF_H
3#define _LINUX_SLUB_DEF_H
4
5/*
6 * SLUB : A Slab allocator without object queues.
7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
81819f0f 9 */
b89fb5ef 10#include <linux/kfence.h>
81819f0f 11#include <linux/kobject.h>
4138fdfc 12#include <linux/reciprocal_div.h>
81819f0f 13
8ff12cfc
CL
14enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */
16 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
a941f836 17 FREE_FASTPATH, /* Free to cpu slab */
8ff12cfc
CL
18 FREE_SLOWPATH, /* Freeing not to cpu slab */
19 FREE_FROZEN, /* Freeing to frozen slab */
20 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
21 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
8028dcea 22 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
8ff12cfc
CL
23 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
24 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
e36a2652 25 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
8ff12cfc
CL
26 FREE_SLAB, /* Slab freed to the page allocator */
27 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
28 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
29 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
30 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
31 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
32 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af 33 DEACTIVATE_BYPASS, /* Implicit deactivation */
65c3376a 34 ORDER_FALLBACK, /* Number of times fallback was necessary */
4fdccdfb 35 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef51 36 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
49e22585 37 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
8028dcea
AS
38 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
39 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
40 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
8ff12cfc
CL
41 NR_SLUB_STAT_ITEMS };
42
dfb4f096 43struct kmem_cache_cpu {
8a5ec0ba 44 void **freelist; /* Pointer to next available object */
8a5ec0ba 45 unsigned long tid; /* Globally unique transaction id */
da89b79e 46 struct page *page; /* The slab from which we are allocating */
a93cf07b 47#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585 48 struct page *partial; /* Partially allocated frozen slabs */
a93cf07b 49#endif
8ff12cfc
CL
50#ifdef CONFIG_SLUB_STATS
51 unsigned stat[NR_SLUB_STAT_ITEMS];
52#endif
4c93c355 53};
dfb4f096 54
a93cf07b
WY
55#ifdef CONFIG_SLUB_CPU_PARTIAL
56#define slub_percpu_partial(c) ((c)->partial)
57
58#define slub_set_percpu_partial(c, p) \
59({ \
60 slub_percpu_partial(c) = (p)->next; \
61})
62
63#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
64#else
65#define slub_percpu_partial(c) NULL
66
67#define slub_set_percpu_partial(c, p)
68
69#define slub_percpu_partial_read_once(c) NULL
70#endif // CONFIG_SLUB_CPU_PARTIAL
71
834f3d11
CL
72/*
73 * Word size structure that can be atomically updated or read and that
74 * contains both the order and the number of objects that a slab of the
75 * given order would contain.
76 */
77struct kmem_cache_order_objects {
19af27af 78 unsigned int x;
834f3d11
CL
79};
80
81819f0f
CL
81/*
82 * Slab cache management.
83 */
84struct kmem_cache {
1b5ad248 85 struct kmem_cache_cpu __percpu *cpu_slab;
de810f49 86 /* Used for retrieving partial slabs, etc. */
d50112ed 87 slab_flags_t flags;
1a757fe5 88 unsigned long min_partial;
de810f49
TH
89 unsigned int size; /* The size of an object including metadata */
90 unsigned int object_size;/* The size of an object without metadata */
4138fdfc 91 struct reciprocal_value reciprocal_size;
de810f49 92 unsigned int offset; /* Free pointer offset */
e6d0e1dc 93#ifdef CONFIG_SLUB_CPU_PARTIAL
e5d9998f
AD
94 /* Number of per cpu partial objects to keep around */
95 unsigned int cpu_partial;
e6d0e1dc 96#endif
834f3d11 97 struct kmem_cache_order_objects oo;
81819f0f 98
81819f0f 99 /* Allocation and freeing of slabs */
205ab99d 100 struct kmem_cache_order_objects max;
65c3376a 101 struct kmem_cache_order_objects min;
b7a49f0d 102 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 103 int refcount; /* Refcount for slab cache destroy */
51cc5068 104 void (*ctor)(void *);
52ee6d74 105 unsigned int inuse; /* Offset to metadata */
3a3791ec 106 unsigned int align; /* Alignment */
2ca6d39b 107 unsigned int red_left_pad; /* Left redzone padding size */
81819f0f
CL
108 const char *name; /* Name (only for display!) */
109 struct list_head list; /* List of slab caches */
ab4d5ed5 110#ifdef CONFIG_SYSFS
81819f0f 111 struct kobject kobj; /* For sysfs */
0c710013 112#endif
2482ddec
KC
113#ifdef CONFIG_SLAB_FREELIST_HARDENED
114 unsigned long random;
115#endif
116
81819f0f 117#ifdef CONFIG_NUMA
9824601e
CL
118 /*
119 * Defragmentation by allocating from a remote node.
120 */
eb7235eb 121 unsigned int remote_node_defrag_ratio;
81819f0f 122#endif
210e7a43
TG
123
124#ifdef CONFIG_SLAB_FREELIST_RANDOM
125 unsigned int *random_seq;
126#endif
127
80a9201a
AP
128#ifdef CONFIG_KASAN
129 struct kasan_cache kasan_info;
130#endif
131
7bbdb81e
AD
132 unsigned int useroffset; /* Usercopy region offset */
133 unsigned int usersize; /* Usercopy region size */
8eb8284b 134
7340cc84 135 struct kmem_cache_node *node[MAX_NUMNODES];
81819f0f
CL
136};
137
e6d0e1dc
WY
138#ifdef CONFIG_SLUB_CPU_PARTIAL
139#define slub_cpu_partial(s) ((s)->cpu_partial)
140#define slub_set_cpu_partial(s, n) \
141({ \
142 slub_cpu_partial(s) = (n); \
143})
144#else
145#define slub_cpu_partial(s) (0)
146#define slub_set_cpu_partial(s, n)
de810f49 147#endif /* CONFIG_SLUB_CPU_PARTIAL */
e6d0e1dc 148
41a21285
CL
149#ifdef CONFIG_SYSFS
150#define SLAB_SUPPORTS_SYSFS
d50d82fa 151void sysfs_slab_unlink(struct kmem_cache *);
bf5eb3de 152void sysfs_slab_release(struct kmem_cache *);
41a21285 153#else
d50d82fa
MP
154static inline void sysfs_slab_unlink(struct kmem_cache *s)
155{
156}
bf5eb3de 157static inline void sysfs_slab_release(struct kmem_cache *s)
41a21285
CL
158{
159}
160#endif
161
75c66def
AR
162void object_err(struct kmem_cache *s, struct page *page,
163 u8 *object, char *reason);
164
c146a2b9
AP
165void *fixup_red_left(struct kmem_cache *s, void *p);
166
7ed2f9e6
AP
167static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
168 void *x) {
169 void *object = x - (x - page_address(page)) % cache->size;
170 void *last_object = page_address(page) +
171 (page->objects - 1) * cache->size;
c146a2b9
AP
172 void *result = (unlikely(object > last_object)) ? last_object : object;
173
174 result = fixup_red_left(cache, result);
175 return result;
7ed2f9e6
AP
176}
177
4138fdfc
RG
178/* Determine object index from a given position */
179static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
180 void *addr, void *obj)
181{
182 return reciprocal_divide(kasan_reset_tag(obj) - addr,
183 cache->reciprocal_size);
184}
185
186static inline unsigned int obj_to_index(const struct kmem_cache *cache,
187 const struct page *page, void *obj)
188{
b89fb5ef
AP
189 if (is_kfence_address(obj))
190 return 0;
4138fdfc
RG
191 return __obj_to_index(cache, page_address(page), obj);
192}
193
286e04b8
RG
194static inline int objs_per_slab_page(const struct kmem_cache *cache,
195 const struct page *page)
196{
197 return page->objects;
198}
81819f0f 199#endif /* _LINUX_SLUB_DEF_H */