]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/slub_def.h
UBUNTU: SAUCE: LSM stacking: procfs: add smack subdir to attrs
[mirror_ubuntu-artful-kernel.git] / include / linux / slub_def.h
CommitLineData
81819f0f
CL
1#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4/*
5 * SLUB : A Slab allocator without object queues.
6 *
cde53535 7 * (C) 2007 SGI, Christoph Lameter
81819f0f 8 */
81819f0f
CL
9#include <linux/kobject.h>
10
8ff12cfc
CL
11enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
a941f836 14 FREE_FASTPATH, /* Free to cpu slab */
8ff12cfc
CL
15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
18 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
8028dcea 19 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
8ff12cfc
CL
20 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
21 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
e36a2652 22 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
8ff12cfc
CL
23 FREE_SLAB, /* Slab freed to the page allocator */
24 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
25 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
26 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
27 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
28 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
29 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af 30 DEACTIVATE_BYPASS, /* Implicit deactivation */
65c3376a 31 ORDER_FALLBACK, /* Number of times fallback was necessary */
4fdccdfb 32 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef51 33 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
49e22585 34 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
8028dcea
AS
35 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
36 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
37 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
8ff12cfc
CL
38 NR_SLUB_STAT_ITEMS };
39
dfb4f096 40struct kmem_cache_cpu {
8a5ec0ba 41 void **freelist; /* Pointer to next available object */
8a5ec0ba 42 unsigned long tid; /* Globally unique transaction id */
da89b79e 43 struct page *page; /* The slab from which we are allocating */
a93cf07b 44#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585 45 struct page *partial; /* Partially allocated frozen slabs */
a93cf07b 46#endif
8ff12cfc
CL
47#ifdef CONFIG_SLUB_STATS
48 unsigned stat[NR_SLUB_STAT_ITEMS];
49#endif
4c93c355 50};
dfb4f096 51
a93cf07b
WY
52#ifdef CONFIG_SLUB_CPU_PARTIAL
53#define slub_percpu_partial(c) ((c)->partial)
54
55#define slub_set_percpu_partial(c, p) \
56({ \
57 slub_percpu_partial(c) = (p)->next; \
58})
59
60#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
61#else
62#define slub_percpu_partial(c) NULL
63
64#define slub_set_percpu_partial(c, p)
65
66#define slub_percpu_partial_read_once(c) NULL
67#endif // CONFIG_SLUB_CPU_PARTIAL
68
834f3d11
CL
69/*
70 * Word size structure that can be atomically updated or read and that
71 * contains both the order and the number of objects that a slab of the
72 * given order would contain.
73 */
74struct kmem_cache_order_objects {
75 unsigned long x;
76};
77
81819f0f
CL
78/*
79 * Slab cache management.
80 */
81struct kmem_cache {
1b5ad248 82 struct kmem_cache_cpu __percpu *cpu_slab;
81819f0f
CL
83 /* Used for retriving partial slabs etc */
84 unsigned long flags;
1a757fe5 85 unsigned long min_partial;
81819f0f 86 int size; /* The size of an object including meta data */
3b0efdfa 87 int object_size; /* The size of an object without meta data */
81819f0f 88 int offset; /* Free pointer offset. */
e6d0e1dc 89#ifdef CONFIG_SLUB_CPU_PARTIAL
9f264904 90 int cpu_partial; /* Number of per cpu partial objects to keep around */
e6d0e1dc 91#endif
834f3d11 92 struct kmem_cache_order_objects oo;
81819f0f 93
81819f0f 94 /* Allocation and freeing of slabs */
205ab99d 95 struct kmem_cache_order_objects max;
65c3376a 96 struct kmem_cache_order_objects min;
b7a49f0d 97 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 98 int refcount; /* Refcount for slab cache destroy */
51cc5068 99 void (*ctor)(void *);
81819f0f
CL
100 int inuse; /* Offset to metadata */
101 int align; /* Alignment */
ab9a0f19 102 int reserved; /* Reserved bytes at the end of slabs */
d3111e6c 103 int red_left_pad; /* Left redzone padding size */
81819f0f
CL
104 const char *name; /* Name (only for display!) */
105 struct list_head list; /* List of slab caches */
ab4d5ed5 106#ifdef CONFIG_SYSFS
81819f0f 107 struct kobject kobj; /* For sysfs */
3b7b3140 108 struct work_struct kobj_remove_work;
0c710013 109#endif
127424c8 110#ifdef CONFIG_MEMCG
f7ce3190 111 struct memcg_cache_params memcg_params;
107dab5c 112 int max_attr_size; /* for propagation, maximum size of a stored attr */
9a41707b
VD
113#ifdef CONFIG_SYSFS
114 struct kset *memcg_kset;
115#endif
ba6c496e 116#endif
81819f0f
CL
117
118#ifdef CONFIG_NUMA
9824601e
CL
119 /*
120 * Defragmentation by allocating from a remote node.
121 */
122 int remote_node_defrag_ratio;
81819f0f 123#endif
210e7a43
TG
124
125#ifdef CONFIG_SLAB_FREELIST_RANDOM
126 unsigned int *random_seq;
127#endif
128
80a9201a
AP
129#ifdef CONFIG_KASAN
130 struct kasan_cache kasan_info;
131#endif
132
7340cc84 133 struct kmem_cache_node *node[MAX_NUMNODES];
81819f0f
CL
134};
135
e6d0e1dc
WY
136#ifdef CONFIG_SLUB_CPU_PARTIAL
137#define slub_cpu_partial(s) ((s)->cpu_partial)
138#define slub_set_cpu_partial(s, n) \
139({ \
140 slub_cpu_partial(s) = (n); \
141})
142#else
143#define slub_cpu_partial(s) (0)
144#define slub_set_cpu_partial(s, n)
145#endif // CONFIG_SLUB_CPU_PARTIAL
146
41a21285
CL
147#ifdef CONFIG_SYSFS
148#define SLAB_SUPPORTS_SYSFS
bf5eb3de 149void sysfs_slab_release(struct kmem_cache *);
41a21285 150#else
bf5eb3de 151static inline void sysfs_slab_release(struct kmem_cache *s)
41a21285
CL
152{
153}
154#endif
155
75c66def
AR
156void object_err(struct kmem_cache *s, struct page *page,
157 u8 *object, char *reason);
158
c146a2b9
AP
159void *fixup_red_left(struct kmem_cache *s, void *p);
160
7ed2f9e6
AP
161static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
162 void *x) {
163 void *object = x - (x - page_address(page)) % cache->size;
164 void *last_object = page_address(page) +
165 (page->objects - 1) * cache->size;
c146a2b9
AP
166 void *result = (unlikely(object > last_object)) ? last_object : object;
167
168 result = fixup_red_left(cache, result);
169 return result;
7ed2f9e6
AP
170}
171
81819f0f 172#endif /* _LINUX_SLUB_DEF_H */