]>
Commit | Line | Data |
---|---|---|
97d06609 CL |
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H | |
3 | /* | |
4 | * Internal slab definitions | |
5 | */ | |
6 | ||
07f361b2 JK |
7 | #ifdef CONFIG_SLOB |
8 | /* | |
9 | * Common fields provided in kmem_cache by all slab allocators | |
10 | * This struct is either used directly by the allocator (SLOB) | |
11 | * or the allocator must include definitions for all fields | |
12 | * provided in kmem_cache_common in their definition of kmem_cache. | |
13 | * | |
14 | * Once we can do anonymous structs (C11 standard) we could put a | |
15 | * anonymous struct definition in these allocators so that the | |
16 | * separate allocations in the kmem_cache structure of SLAB and | |
17 | * SLUB is no longer needed. | |
18 | */ | |
19 | struct kmem_cache { | |
20 | unsigned int object_size;/* The original size of the object */ | |
21 | unsigned int size; /* The aligned/padded/added on size */ | |
22 | unsigned int align; /* Alignment as calculated */ | |
23 | unsigned long flags; /* Active flags on the slab */ | |
24 | const char *name; /* Slab name for sysfs */ | |
25 | int refcount; /* Use counter */ | |
26 | void (*ctor)(void *); /* Called on object slot creation */ | |
27 | struct list_head list; /* List of all slab caches on the system */ | |
28 | }; | |
29 | ||
30 | #endif /* CONFIG_SLOB */ | |
31 | ||
32 | #ifdef CONFIG_SLAB | |
33 | #include <linux/slab_def.h> | |
34 | #endif | |
35 | ||
36 | #ifdef CONFIG_SLUB | |
37 | #include <linux/slub_def.h> | |
38 | #endif | |
39 | ||
40 | #include <linux/memcontrol.h> | |
11c7aec2 JDB |
41 | #include <linux/fault-inject.h> |
42 | #include <linux/kmemcheck.h> | |
43 | #include <linux/kasan.h> | |
44 | #include <linux/kmemleak.h> | |
07f361b2 | 45 | |
97d06609 CL |
46 | /* |
47 | * State of the slab allocator. | |
48 | * | |
49 | * This is used to describe the states of the allocator during bootup. | |
50 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
51 | * have the problem that the structures used for managing slab caches are | |
52 | * allocated from slab caches themselves. | |
53 | */ | |
54 | enum slab_state { | |
55 | DOWN, /* No slab functionality yet */ | |
56 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 57 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
58 | UP, /* Slab caches usable but not all extras yet */ |
59 | FULL /* Everything is working */ | |
60 | }; | |
61 | ||
62 | extern enum slab_state slab_state; | |
63 | ||
18004c5d CL |
64 | /* The slab cache mutex protects the management structures during changes */ |
65 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
66 | |
67 | /* The list of all slab caches on the system */ | |
18004c5d CL |
68 | extern struct list_head slab_caches; |
69 | ||
9b030cb8 CL |
70 | /* The slab cache that manages slab cache information */ |
71 | extern struct kmem_cache *kmem_cache; | |
72 | ||
45906855 CL |
73 | unsigned long calculate_alignment(unsigned long flags, |
74 | unsigned long align, unsigned long size); | |
75 | ||
f97d5f63 CL |
76 | #ifndef CONFIG_SLOB |
77 | /* Kmalloc array related functions */ | |
34cc6990 | 78 | void setup_kmalloc_cache_index_table(void); |
f97d5f63 | 79 | void create_kmalloc_caches(unsigned long); |
2c59dd65 CL |
80 | |
81 | /* Find the kmalloc slab corresponding for a certain size */ | |
82 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
83 | #endif |
84 | ||
85 | ||
9b030cb8 | 86 | /* Functions provided by the slab allocators */ |
8a13a4cc | 87 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
97d06609 | 88 | |
45530c44 CL |
89 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
90 | unsigned long flags); | |
91 | extern void create_boot_cache(struct kmem_cache *, const char *name, | |
92 | size_t size, unsigned long flags); | |
93 | ||
423c929c JK |
94 | int slab_unmergeable(struct kmem_cache *s); |
95 | struct kmem_cache *find_mergeable(size_t size, size_t align, | |
96 | unsigned long flags, const char *name, void (*ctor)(void *)); | |
12220dea | 97 | #ifndef CONFIG_SLOB |
2633d7a0 | 98 | struct kmem_cache * |
a44cb944 VD |
99 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
100 | unsigned long flags, void (*ctor)(void *)); | |
423c929c JK |
101 | |
102 | unsigned long kmem_cache_flags(unsigned long object_size, | |
103 | unsigned long flags, const char *name, | |
104 | void (*ctor)(void *)); | |
cbb79694 | 105 | #else |
2633d7a0 | 106 | static inline struct kmem_cache * |
a44cb944 VD |
107 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
108 | unsigned long flags, void (*ctor)(void *)) | |
cbb79694 | 109 | { return NULL; } |
423c929c JK |
110 | |
111 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | |
112 | unsigned long flags, const char *name, | |
113 | void (*ctor)(void *)) | |
114 | { | |
115 | return flags; | |
116 | } | |
cbb79694 CL |
117 | #endif |
118 | ||
119 | ||
d8843922 GC |
120 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
121 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | |
122 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) | |
123 | ||
124 | #if defined(CONFIG_DEBUG_SLAB) | |
125 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
126 | #elif defined(CONFIG_SLUB_DEBUG) | |
127 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 128 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
129 | #else |
130 | #define SLAB_DEBUG_FLAGS (0) | |
131 | #endif | |
132 | ||
133 | #if defined(CONFIG_SLAB) | |
134 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 VD |
135 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
136 | SLAB_NOTRACK | SLAB_ACCOUNT) | |
d8843922 GC |
137 | #elif defined(CONFIG_SLUB) |
138 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
230e9fc2 | 139 | SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) |
d8843922 GC |
140 | #else |
141 | #define SLAB_CACHE_FLAGS (0) | |
142 | #endif | |
143 | ||
144 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | |
145 | ||
945cf2b6 | 146 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 147 | void __kmem_cache_release(struct kmem_cache *); |
d6e0b7fa | 148 | int __kmem_cache_shrink(struct kmem_cache *, bool); |
41a21285 | 149 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 150 | |
b7454ad3 GC |
151 | struct seq_file; |
152 | struct file; | |
b7454ad3 | 153 | |
0d7561c6 GC |
154 | struct slabinfo { |
155 | unsigned long active_objs; | |
156 | unsigned long num_objs; | |
157 | unsigned long active_slabs; | |
158 | unsigned long num_slabs; | |
159 | unsigned long shared_avail; | |
160 | unsigned int limit; | |
161 | unsigned int batchcount; | |
162 | unsigned int shared; | |
163 | unsigned int objects_per_slab; | |
164 | unsigned int cache_order; | |
165 | }; | |
166 | ||
167 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
168 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
169 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
170 | size_t count, loff_t *ppos); | |
ba6c496e | 171 | |
484748f0 CL |
172 | /* |
173 | * Generic implementation of bulk operations | |
174 | * These are useful for situations in which the allocator cannot | |
9f706d68 | 175 | * perform optimizations. In that case segments of the object listed |
484748f0 CL |
176 | * may be allocated or freed using these operations. |
177 | */ | |
178 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |
865762a8 | 179 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0 | 180 | |
127424c8 | 181 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
426589f5 VD |
182 | /* |
183 | * Iterate over all memcg caches of the given root cache. The caller must hold | |
184 | * slab_mutex. | |
185 | */ | |
186 | #define for_each_memcg_cache(iter, root) \ | |
187 | list_for_each_entry(iter, &(root)->memcg_params.list, \ | |
188 | memcg_params.list) | |
189 | ||
ba6c496e GC |
190 | static inline bool is_root_cache(struct kmem_cache *s) |
191 | { | |
f7ce3190 | 192 | return s->memcg_params.is_root_cache; |
ba6c496e | 193 | } |
2633d7a0 | 194 | |
b9ce5ef4 | 195 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190 | 196 | struct kmem_cache *p) |
b9ce5ef4 | 197 | { |
f7ce3190 | 198 | return p == s || p == s->memcg_params.root_cache; |
b9ce5ef4 | 199 | } |
749c5415 GC |
200 | |
201 | /* | |
202 | * We use suffixes to the name in memcg because we can't have caches | |
203 | * created in the system with the same name. But when we print them | |
204 | * locally, better refer to them with the base name | |
205 | */ | |
206 | static inline const char *cache_name(struct kmem_cache *s) | |
207 | { | |
208 | if (!is_root_cache(s)) | |
f7ce3190 | 209 | s = s->memcg_params.root_cache; |
749c5415 GC |
210 | return s->name; |
211 | } | |
212 | ||
f8570263 VD |
213 | /* |
214 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | |
f7ce3190 VD |
215 | * That said the caller must assure the memcg's cache won't go away by either |
216 | * taking a css reference to the owner cgroup, or holding the slab_mutex. | |
f8570263 | 217 | */ |
2ade4de8 QH |
218 | static inline struct kmem_cache * |
219 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 | 220 | { |
959c8963 | 221 | struct kmem_cache *cachep; |
f7ce3190 | 222 | struct memcg_cache_array *arr; |
f8570263 VD |
223 | |
224 | rcu_read_lock(); | |
f7ce3190 | 225 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
959c8963 VD |
226 | |
227 | /* | |
228 | * Make sure we will access the up-to-date value. The code updating | |
229 | * memcg_caches issues a write barrier to match this (see | |
f7ce3190 | 230 | * memcg_create_kmem_cache()). |
959c8963 | 231 | */ |
f7ce3190 | 232 | cachep = lockless_dereference(arr->entries[idx]); |
8df0c2dc PK |
233 | rcu_read_unlock(); |
234 | ||
959c8963 | 235 | return cachep; |
749c5415 | 236 | } |
943a451a GC |
237 | |
238 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
239 | { | |
240 | if (is_root_cache(s)) | |
241 | return s; | |
f7ce3190 | 242 | return s->memcg_params.root_cache; |
943a451a | 243 | } |
5dfb4175 | 244 | |
f3ccb2c4 VD |
245 | static __always_inline int memcg_charge_slab(struct page *page, |
246 | gfp_t gfp, int order, | |
247 | struct kmem_cache *s) | |
5dfb4175 VD |
248 | { |
249 | if (!memcg_kmem_enabled()) | |
250 | return 0; | |
251 | if (is_root_cache(s)) | |
252 | return 0; | |
f3ccb2c4 VD |
253 | return __memcg_kmem_charge_memcg(page, gfp, order, |
254 | s->memcg_params.memcg); | |
5dfb4175 | 255 | } |
f7ce3190 VD |
256 | |
257 | extern void slab_init_memcg_params(struct kmem_cache *); | |
258 | ||
127424c8 | 259 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
f7ce3190 | 260 | |
426589f5 VD |
261 | #define for_each_memcg_cache(iter, root) \ |
262 | for ((void)(iter), (void)(root); 0; ) | |
426589f5 | 263 | |
ba6c496e GC |
264 | static inline bool is_root_cache(struct kmem_cache *s) |
265 | { | |
266 | return true; | |
267 | } | |
268 | ||
b9ce5ef4 GC |
269 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
270 | struct kmem_cache *p) | |
271 | { | |
272 | return true; | |
273 | } | |
749c5415 GC |
274 | |
275 | static inline const char *cache_name(struct kmem_cache *s) | |
276 | { | |
277 | return s->name; | |
278 | } | |
279 | ||
2ade4de8 QH |
280 | static inline struct kmem_cache * |
281 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 GC |
282 | { |
283 | return NULL; | |
284 | } | |
943a451a GC |
285 | |
286 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
287 | { | |
288 | return s; | |
289 | } | |
5dfb4175 | 290 | |
f3ccb2c4 VD |
291 | static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, |
292 | struct kmem_cache *s) | |
5dfb4175 VD |
293 | { |
294 | return 0; | |
295 | } | |
296 | ||
f7ce3190 VD |
297 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
298 | { | |
299 | } | |
127424c8 | 300 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
b9ce5ef4 GC |
301 | |
302 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |
303 | { | |
304 | struct kmem_cache *cachep; | |
305 | struct page *page; | |
306 | ||
307 | /* | |
308 | * When kmemcg is not being used, both assignments should return the | |
309 | * same value. but we don't want to pay the assignment price in that | |
310 | * case. If it is not compiled in, the compiler should be smart enough | |
311 | * to not do even the assignment. In that case, slab_equal_or_root | |
312 | * will also be a constant. | |
313 | */ | |
becfda68 LA |
314 | if (!memcg_kmem_enabled() && |
315 | !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) | |
b9ce5ef4 GC |
316 | return s; |
317 | ||
318 | page = virt_to_head_page(x); | |
319 | cachep = page->slab_cache; | |
320 | if (slab_equal_or_root(cachep, s)) | |
321 | return cachep; | |
322 | ||
323 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", | |
2d16e0fd | 324 | __func__, s->name, cachep->name); |
b9ce5ef4 GC |
325 | WARN_ON_ONCE(1); |
326 | return s; | |
327 | } | |
ca34956b | 328 | |
11c7aec2 JDB |
329 | static inline size_t slab_ksize(const struct kmem_cache *s) |
330 | { | |
331 | #ifndef CONFIG_SLUB | |
332 | return s->object_size; | |
333 | ||
334 | #else /* CONFIG_SLUB */ | |
335 | # ifdef CONFIG_SLUB_DEBUG | |
336 | /* | |
337 | * Debugging requires use of the padding between object | |
338 | * and whatever may come after it. | |
339 | */ | |
340 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
341 | return s->object_size; | |
342 | # endif | |
343 | /* | |
344 | * If we have the need to store the freelist pointer | |
345 | * back there or track user information then we can | |
346 | * only use the space before that information. | |
347 | */ | |
348 | if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) | |
349 | return s->inuse; | |
350 | /* | |
351 | * Else we can use all the padding etc for the allocation | |
352 | */ | |
353 | return s->size; | |
354 | #endif | |
355 | } | |
356 | ||
357 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
358 | gfp_t flags) | |
359 | { | |
360 | flags &= gfp_allowed_mask; | |
361 | lockdep_trace_alloc(flags); | |
362 | might_sleep_if(gfpflags_allow_blocking(flags)); | |
363 | ||
fab9963a | 364 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
365 | return NULL; |
366 | ||
367 | return memcg_kmem_get_cache(s, flags); | |
368 | } | |
369 | ||
370 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |
371 | size_t size, void **p) | |
372 | { | |
373 | size_t i; | |
374 | ||
375 | flags &= gfp_allowed_mask; | |
376 | for (i = 0; i < size; i++) { | |
377 | void *object = p[i]; | |
378 | ||
379 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | |
380 | kmemleak_alloc_recursive(object, s->object_size, 1, | |
381 | s->flags, flags); | |
382 | kasan_slab_alloc(s, object); | |
383 | } | |
384 | memcg_kmem_put_cache(s); | |
385 | } | |
386 | ||
44c5356f | 387 | #ifndef CONFIG_SLOB |
ca34956b CL |
388 | /* |
389 | * The slab lists for all objects. | |
390 | */ | |
391 | struct kmem_cache_node { | |
392 | spinlock_t list_lock; | |
393 | ||
394 | #ifdef CONFIG_SLAB | |
395 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
396 | struct list_head slabs_full; | |
397 | struct list_head slabs_free; | |
398 | unsigned long free_objects; | |
399 | unsigned int free_limit; | |
400 | unsigned int colour_next; /* Per-node cache coloring */ | |
401 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 402 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
403 | unsigned long next_reap; /* updated without locking */ |
404 | int free_touched; /* updated without locking */ | |
405 | #endif | |
406 | ||
407 | #ifdef CONFIG_SLUB | |
408 | unsigned long nr_partial; | |
409 | struct list_head partial; | |
410 | #ifdef CONFIG_SLUB_DEBUG | |
411 | atomic_long_t nr_slabs; | |
412 | atomic_long_t total_objects; | |
413 | struct list_head full; | |
414 | #endif | |
415 | #endif | |
416 | ||
417 | }; | |
e25839f6 | 418 | |
44c5356f CL |
419 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
420 | { | |
421 | return s->node[node]; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Iterator over all nodes. The body will be executed for each node that has | |
426 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
427 | */ | |
428 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
429 | for (__node = 0; __node < nr_node_ids; __node++) \ |
430 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
431 | |
432 | #endif | |
433 | ||
1df3b26f | 434 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
435 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
436 | void slab_stop(struct seq_file *m, void *p); | |
b047501c | 437 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 AR |
438 | |
439 | #endif /* MM_SLAB_H */ |