]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/slab.h
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / mm / slab.h
CommitLineData
97d06609
CL
1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
07f361b2
JK
7#ifdef CONFIG_SLOB
8/*
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
13 *
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
18 */
19struct kmem_cache {
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
28};
29
30#endif /* CONFIG_SLOB */
31
32#ifdef CONFIG_SLAB
33#include <linux/slab_def.h>
34#endif
35
36#ifdef CONFIG_SLUB
37#include <linux/slub_def.h>
38#endif
39
40#include <linux/memcontrol.h>
41
97d06609
CL
42/*
43 * State of the slab allocator.
44 *
45 * This is used to describe the states of the allocator during bootup.
46 * Allocators use this to gradually bootstrap themselves. Most allocators
47 * have the problem that the structures used for managing slab caches are
48 * allocated from slab caches themselves.
49 */
50enum slab_state {
51 DOWN, /* No slab functionality yet */
52 PARTIAL, /* SLUB: kmem_cache_node available */
ce8eb6c4 53 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
97d06609
CL
54 UP, /* Slab caches usable but not all extras yet */
55 FULL /* Everything is working */
56};
57
58extern enum slab_state slab_state;
59
18004c5d
CL
60/* The slab cache mutex protects the management structures during changes */
61extern struct mutex slab_mutex;
9b030cb8
CL
62
63/* The list of all slab caches on the system */
18004c5d
CL
64extern struct list_head slab_caches;
65
9b030cb8
CL
66/* The slab cache that manages slab cache information */
67extern struct kmem_cache *kmem_cache;
68
45906855
CL
69unsigned long calculate_alignment(unsigned long flags,
70 unsigned long align, unsigned long size);
71
f97d5f63
CL
72#ifndef CONFIG_SLOB
73/* Kmalloc array related functions */
34cc6990 74void setup_kmalloc_cache_index_table(void);
f97d5f63 75void create_kmalloc_caches(unsigned long);
2c59dd65
CL
76
77/* Find the kmalloc slab corresponding for a certain size */
78struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f63
CL
79#endif
80
81
9b030cb8 82/* Functions provided by the slab allocators */
8a13a4cc 83extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
97d06609 84
45530c44
CL
85extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
86 unsigned long flags);
87extern void create_boot_cache(struct kmem_cache *, const char *name,
88 size_t size, unsigned long flags);
89
423c929c
JK
90int slab_unmergeable(struct kmem_cache *s);
91struct kmem_cache *find_mergeable(size_t size, size_t align,
92 unsigned long flags, const char *name, void (*ctor)(void *));
12220dea 93#ifndef CONFIG_SLOB
2633d7a0 94struct kmem_cache *
a44cb944
VD
95__kmem_cache_alias(const char *name, size_t size, size_t align,
96 unsigned long flags, void (*ctor)(void *));
423c929c
JK
97
98unsigned long kmem_cache_flags(unsigned long object_size,
99 unsigned long flags, const char *name,
100 void (*ctor)(void *));
cbb79694 101#else
2633d7a0 102static inline struct kmem_cache *
a44cb944
VD
103__kmem_cache_alias(const char *name, size_t size, size_t align,
104 unsigned long flags, void (*ctor)(void *))
cbb79694 105{ return NULL; }
423c929c
JK
106
107static inline unsigned long kmem_cache_flags(unsigned long object_size,
108 unsigned long flags, const char *name,
109 void (*ctor)(void *))
110{
111 return flags;
112}
cbb79694
CL
113#endif
114
115
d8843922
GC
116/* Legal flag mask for kmem_cache_create(), for various configurations */
117#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
118 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
119
120#if defined(CONFIG_DEBUG_SLAB)
121#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
122#elif defined(CONFIG_SLUB_DEBUG)
123#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
124 SLAB_TRACE | SLAB_DEBUG_FREE)
125#else
126#define SLAB_DEBUG_FLAGS (0)
127#endif
128
129#if defined(CONFIG_SLAB)
130#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
132#elif defined(CONFIG_SLUB)
133#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
134 SLAB_TEMPORARY | SLAB_NOTRACK)
135#else
136#define SLAB_CACHE_FLAGS (0)
137#endif
138
139#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
140
945cf2b6 141int __kmem_cache_shutdown(struct kmem_cache *);
d6e0b7fa 142int __kmem_cache_shrink(struct kmem_cache *, bool);
41a21285 143void slab_kmem_cache_release(struct kmem_cache *);
945cf2b6 144
b7454ad3
GC
145struct seq_file;
146struct file;
b7454ad3 147
0d7561c6
GC
148struct slabinfo {
149 unsigned long active_objs;
150 unsigned long num_objs;
151 unsigned long active_slabs;
152 unsigned long num_slabs;
153 unsigned long shared_avail;
154 unsigned int limit;
155 unsigned int batchcount;
156 unsigned int shared;
157 unsigned int objects_per_slab;
158 unsigned int cache_order;
159};
160
161void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
162void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3
GC
163ssize_t slabinfo_write(struct file *file, const char __user *buffer,
164 size_t count, loff_t *ppos);
ba6c496e 165
484748f0
CL
166/*
167 * Generic implementation of bulk operations
168 * These are useful for situations in which the allocator cannot
169 * perform optimizations. In that case segments of the objecct listed
170 * may be allocated or freed using these operations.
171 */
172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a8 173int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0 174
ba6c496e 175#ifdef CONFIG_MEMCG_KMEM
426589f5
VD
176/*
177 * Iterate over all memcg caches of the given root cache. The caller must hold
178 * slab_mutex.
179 */
180#define for_each_memcg_cache(iter, root) \
181 list_for_each_entry(iter, &(root)->memcg_params.list, \
182 memcg_params.list)
183
ba6c496e
GC
184static inline bool is_root_cache(struct kmem_cache *s)
185{
f7ce3190 186 return s->memcg_params.is_root_cache;
ba6c496e 187}
2633d7a0 188
b9ce5ef4 189static inline bool slab_equal_or_root(struct kmem_cache *s,
f7ce3190 190 struct kmem_cache *p)
b9ce5ef4 191{
f7ce3190 192 return p == s || p == s->memcg_params.root_cache;
b9ce5ef4 193}
749c5415
GC
194
195/*
196 * We use suffixes to the name in memcg because we can't have caches
197 * created in the system with the same name. But when we print them
198 * locally, better refer to them with the base name
199 */
200static inline const char *cache_name(struct kmem_cache *s)
201{
202 if (!is_root_cache(s))
f7ce3190 203 s = s->memcg_params.root_cache;
749c5415
GC
204 return s->name;
205}
206
f8570263
VD
207/*
208 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
f7ce3190
VD
209 * That said the caller must assure the memcg's cache won't go away by either
210 * taking a css reference to the owner cgroup, or holding the slab_mutex.
f8570263 211 */
2ade4de8
QH
212static inline struct kmem_cache *
213cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415 214{
959c8963 215 struct kmem_cache *cachep;
f7ce3190 216 struct memcg_cache_array *arr;
f8570263
VD
217
218 rcu_read_lock();
f7ce3190 219 arr = rcu_dereference(s->memcg_params.memcg_caches);
959c8963
VD
220
221 /*
222 * Make sure we will access the up-to-date value. The code updating
223 * memcg_caches issues a write barrier to match this (see
f7ce3190 224 * memcg_create_kmem_cache()).
959c8963 225 */
f7ce3190 226 cachep = lockless_dereference(arr->entries[idx]);
8df0c2dc
PK
227 rcu_read_unlock();
228
959c8963 229 return cachep;
749c5415 230}
943a451a
GC
231
232static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
233{
234 if (is_root_cache(s))
235 return s;
f7ce3190 236 return s->memcg_params.root_cache;
943a451a 237}
5dfb4175 238
f3ccb2c4
VD
239static __always_inline int memcg_charge_slab(struct page *page,
240 gfp_t gfp, int order,
241 struct kmem_cache *s)
5dfb4175
VD
242{
243 if (!memcg_kmem_enabled())
244 return 0;
245 if (is_root_cache(s))
246 return 0;
f3ccb2c4
VD
247 return __memcg_kmem_charge_memcg(page, gfp, order,
248 s->memcg_params.memcg);
5dfb4175 249}
f7ce3190
VD
250
251extern void slab_init_memcg_params(struct kmem_cache *);
252
253#else /* !CONFIG_MEMCG_KMEM */
254
426589f5
VD
255#define for_each_memcg_cache(iter, root) \
256 for ((void)(iter), (void)(root); 0; )
426589f5 257
ba6c496e
GC
258static inline bool is_root_cache(struct kmem_cache *s)
259{
260 return true;
261}
262
b9ce5ef4
GC
263static inline bool slab_equal_or_root(struct kmem_cache *s,
264 struct kmem_cache *p)
265{
266 return true;
267}
749c5415
GC
268
269static inline const char *cache_name(struct kmem_cache *s)
270{
271 return s->name;
272}
273
2ade4de8
QH
274static inline struct kmem_cache *
275cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415
GC
276{
277 return NULL;
278}
943a451a
GC
279
280static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
281{
282 return s;
283}
5dfb4175 284
f3ccb2c4
VD
285static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
286 struct kmem_cache *s)
5dfb4175
VD
287{
288 return 0;
289}
290
f7ce3190
VD
291static inline void slab_init_memcg_params(struct kmem_cache *s)
292{
293}
294#endif /* CONFIG_MEMCG_KMEM */
b9ce5ef4
GC
295
296static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
297{
298 struct kmem_cache *cachep;
299 struct page *page;
300
301 /*
302 * When kmemcg is not being used, both assignments should return the
303 * same value. but we don't want to pay the assignment price in that
304 * case. If it is not compiled in, the compiler should be smart enough
305 * to not do even the assignment. In that case, slab_equal_or_root
306 * will also be a constant.
307 */
308 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
309 return s;
310
311 page = virt_to_head_page(x);
312 cachep = page->slab_cache;
313 if (slab_equal_or_root(cachep, s))
314 return cachep;
315
316 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
2d16e0fd 317 __func__, s->name, cachep->name);
b9ce5ef4
GC
318 WARN_ON_ONCE(1);
319 return s;
320}
ca34956b 321
44c5356f 322#ifndef CONFIG_SLOB
ca34956b
CL
323/*
324 * The slab lists for all objects.
325 */
326struct kmem_cache_node {
327 spinlock_t list_lock;
328
329#ifdef CONFIG_SLAB
330 struct list_head slabs_partial; /* partial list first, better asm code */
331 struct list_head slabs_full;
332 struct list_head slabs_free;
333 unsigned long free_objects;
334 unsigned int free_limit;
335 unsigned int colour_next; /* Per-node cache coloring */
336 struct array_cache *shared; /* shared per node */
c8522a3a 337 struct alien_cache **alien; /* on other nodes */
ca34956b
CL
338 unsigned long next_reap; /* updated without locking */
339 int free_touched; /* updated without locking */
340#endif
341
342#ifdef CONFIG_SLUB
343 unsigned long nr_partial;
344 struct list_head partial;
345#ifdef CONFIG_SLUB_DEBUG
346 atomic_long_t nr_slabs;
347 atomic_long_t total_objects;
348 struct list_head full;
349#endif
350#endif
351
352};
e25839f6 353
44c5356f
CL
354static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
355{
356 return s->node[node];
357}
358
359/*
360 * Iterator over all nodes. The body will be executed for each node that has
361 * a kmem_cache_node structure allocated (which is true for all online nodes)
362 */
363#define for_each_kmem_cache_node(__s, __node, __n) \
9163582c
MP
364 for (__node = 0; __node < nr_node_ids; __node++) \
365 if ((__n = get_node(__s, __node)))
44c5356f
CL
366
367#endif
368
1df3b26f 369void *slab_start(struct seq_file *m, loff_t *pos);
276a2439
WL
370void *slab_next(struct seq_file *m, void *p, loff_t *pos);
371void slab_stop(struct seq_file *m, void *p);
b047501c 372int memcg_slab_show(struct seq_file *m, void *p);
5240ab40
AR
373
374#endif /* MM_SLAB_H */