]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/slub_def.h
Merge tag 'x86-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / linux / slub_def.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
81819f0f
CL
2#ifndef _LINUX_SLUB_DEF_H
3#define _LINUX_SLUB_DEF_H
4
5/*
6 * SLUB : A Slab allocator without object queues.
7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
81819f0f 9 */
81819f0f 10#include <linux/kobject.h>
4138fdfc 11#include <linux/reciprocal_div.h>
81819f0f 12
8ff12cfc
CL
13enum stat_item {
14 ALLOC_FASTPATH, /* Allocation from cpu slab */
15 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
a941f836 16 FREE_FASTPATH, /* Free to cpu slab */
8ff12cfc
CL
17 FREE_SLOWPATH, /* Freeing not to cpu slab */
18 FREE_FROZEN, /* Freeing to frozen slab */
19 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
20 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
8028dcea 21 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
8ff12cfc
CL
22 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
23 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
e36a2652 24 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
8ff12cfc
CL
25 FREE_SLAB, /* Slab freed to the page allocator */
26 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
27 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
28 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
29 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
30 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
31 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af 32 DEACTIVATE_BYPASS, /* Implicit deactivation */
65c3376a 33 ORDER_FALLBACK, /* Number of times fallback was necessary */
4fdccdfb 34 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef51 35 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
49e22585 36 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
8028dcea
AS
37 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
38 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
39 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
8ff12cfc
CL
40 NR_SLUB_STAT_ITEMS };
41
dfb4f096 42struct kmem_cache_cpu {
8a5ec0ba 43 void **freelist; /* Pointer to next available object */
8a5ec0ba 44 unsigned long tid; /* Globally unique transaction id */
da89b79e 45 struct page *page; /* The slab from which we are allocating */
a93cf07b 46#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585 47 struct page *partial; /* Partially allocated frozen slabs */
a93cf07b 48#endif
8ff12cfc
CL
49#ifdef CONFIG_SLUB_STATS
50 unsigned stat[NR_SLUB_STAT_ITEMS];
51#endif
4c93c355 52};
dfb4f096 53
a93cf07b
WY
54#ifdef CONFIG_SLUB_CPU_PARTIAL
55#define slub_percpu_partial(c) ((c)->partial)
56
57#define slub_set_percpu_partial(c, p) \
58({ \
59 slub_percpu_partial(c) = (p)->next; \
60})
61
62#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
63#else
64#define slub_percpu_partial(c) NULL
65
66#define slub_set_percpu_partial(c, p)
67
68#define slub_percpu_partial_read_once(c) NULL
69#endif // CONFIG_SLUB_CPU_PARTIAL
70
834f3d11
CL
71/*
72 * Word size structure that can be atomically updated or read and that
73 * contains both the order and the number of objects that a slab of the
74 * given order would contain.
75 */
76struct kmem_cache_order_objects {
19af27af 77 unsigned int x;
834f3d11
CL
78};
79
81819f0f
CL
80/*
81 * Slab cache management.
82 */
83struct kmem_cache {
1b5ad248 84 struct kmem_cache_cpu __percpu *cpu_slab;
de810f49 85 /* Used for retrieving partial slabs, etc. */
d50112ed 86 slab_flags_t flags;
1a757fe5 87 unsigned long min_partial;
de810f49
TH
88 unsigned int size; /* The size of an object including metadata */
89 unsigned int object_size;/* The size of an object without metadata */
4138fdfc 90 struct reciprocal_value reciprocal_size;
de810f49 91 unsigned int offset; /* Free pointer offset */
e6d0e1dc 92#ifdef CONFIG_SLUB_CPU_PARTIAL
e5d9998f
AD
93 /* Number of per cpu partial objects to keep around */
94 unsigned int cpu_partial;
e6d0e1dc 95#endif
834f3d11 96 struct kmem_cache_order_objects oo;
81819f0f 97
81819f0f 98 /* Allocation and freeing of slabs */
205ab99d 99 struct kmem_cache_order_objects max;
65c3376a 100 struct kmem_cache_order_objects min;
b7a49f0d 101 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 102 int refcount; /* Refcount for slab cache destroy */
51cc5068 103 void (*ctor)(void *);
52ee6d74 104 unsigned int inuse; /* Offset to metadata */
3a3791ec 105 unsigned int align; /* Alignment */
2ca6d39b 106 unsigned int red_left_pad; /* Left redzone padding size */
81819f0f
CL
107 const char *name; /* Name (only for display!) */
108 struct list_head list; /* List of slab caches */
ab4d5ed5 109#ifdef CONFIG_SYSFS
81819f0f 110 struct kobject kobj; /* For sysfs */
0c710013 111#endif
2482ddec
KC
112#ifdef CONFIG_SLAB_FREELIST_HARDENED
113 unsigned long random;
114#endif
115
81819f0f 116#ifdef CONFIG_NUMA
9824601e
CL
117 /*
118 * Defragmentation by allocating from a remote node.
119 */
eb7235eb 120 unsigned int remote_node_defrag_ratio;
81819f0f 121#endif
210e7a43
TG
122
123#ifdef CONFIG_SLAB_FREELIST_RANDOM
124 unsigned int *random_seq;
125#endif
126
80a9201a
AP
127#ifdef CONFIG_KASAN
128 struct kasan_cache kasan_info;
129#endif
130
7bbdb81e
AD
131 unsigned int useroffset; /* Usercopy region offset */
132 unsigned int usersize; /* Usercopy region size */
8eb8284b 133
7340cc84 134 struct kmem_cache_node *node[MAX_NUMNODES];
81819f0f
CL
135};
136
e6d0e1dc
WY
137#ifdef CONFIG_SLUB_CPU_PARTIAL
138#define slub_cpu_partial(s) ((s)->cpu_partial)
139#define slub_set_cpu_partial(s, n) \
140({ \
141 slub_cpu_partial(s) = (n); \
142})
143#else
144#define slub_cpu_partial(s) (0)
145#define slub_set_cpu_partial(s, n)
de810f49 146#endif /* CONFIG_SLUB_CPU_PARTIAL */
e6d0e1dc 147
41a21285
CL
148#ifdef CONFIG_SYSFS
149#define SLAB_SUPPORTS_SYSFS
d50d82fa 150void sysfs_slab_unlink(struct kmem_cache *);
bf5eb3de 151void sysfs_slab_release(struct kmem_cache *);
41a21285 152#else
d50d82fa
MP
153static inline void sysfs_slab_unlink(struct kmem_cache *s)
154{
155}
bf5eb3de 156static inline void sysfs_slab_release(struct kmem_cache *s)
41a21285
CL
157{
158}
159#endif
160
75c66def
AR
161void object_err(struct kmem_cache *s, struct page *page,
162 u8 *object, char *reason);
163
c146a2b9
AP
164void *fixup_red_left(struct kmem_cache *s, void *p);
165
7ed2f9e6
AP
166static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
167 void *x) {
168 void *object = x - (x - page_address(page)) % cache->size;
169 void *last_object = page_address(page) +
170 (page->objects - 1) * cache->size;
c146a2b9
AP
171 void *result = (unlikely(object > last_object)) ? last_object : object;
172
173 result = fixup_red_left(cache, result);
174 return result;
7ed2f9e6
AP
175}
176
4138fdfc
RG
177/* Determine object index from a given position */
178static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
179 void *addr, void *obj)
180{
181 return reciprocal_divide(kasan_reset_tag(obj) - addr,
182 cache->reciprocal_size);
183}
184
185static inline unsigned int obj_to_index(const struct kmem_cache *cache,
186 const struct page *page, void *obj)
187{
188 return __obj_to_index(cache, page_address(page), obj);
189}
190
286e04b8
RG
191static inline int objs_per_slab_page(const struct kmem_cache *cache,
192 const struct page *page)
193{
194 return page->objects;
195}
81819f0f 196#endif /* _LINUX_SLUB_DEF_H */