4 * Internal slab definitions
8 * State of the slab allocator.
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
16 DOWN
, /* No slab functionality yet */
17 PARTIAL
, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE
, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
20 UP
, /* Slab caches usable but not all extras yet */
21 FULL
/* Everything is working */
24 extern enum slab_state slab_state
;
26 /* The slab cache mutex protects the management structures during changes */
27 extern struct mutex slab_mutex
;
29 /* The list of all slab caches on the system */
30 extern struct list_head slab_caches
;
32 /* The slab cache that manages slab cache information */
33 extern struct kmem_cache
*kmem_cache
;
35 unsigned long calculate_alignment(unsigned long flags
,
36 unsigned long align
, unsigned long size
);
39 /* Kmalloc array related functions */
40 void create_kmalloc_caches(unsigned long);
42 /* Find the kmalloc slab corresponding for a certain size */
43 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
47 /* Functions provided by the slab allocators */
48 extern int __kmem_cache_create(struct kmem_cache
*, unsigned long flags
);
50 extern struct kmem_cache
*create_kmalloc_cache(const char *name
, size_t size
,
52 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
53 size_t size
, unsigned long flags
);
58 __kmem_cache_alias(const char *name
, size_t size
, size_t align
,
59 unsigned long flags
, void (*ctor
)(void *));
61 static inline struct kmem_cache
*
62 __kmem_cache_alias(const char *name
, size_t size
, size_t align
,
63 unsigned long flags
, void (*ctor
)(void *))
68 /* Legal flag mask for kmem_cache_create(), for various configurations */
69 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
72 #if defined(CONFIG_DEBUG_SLAB)
73 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74 #elif defined(CONFIG_SLUB_DEBUG)
75 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_DEBUG_FREE)
78 #define SLAB_DEBUG_FLAGS (0)
81 #if defined(CONFIG_SLAB)
82 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84 #elif defined(CONFIG_SLUB)
85 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 SLAB_TEMPORARY | SLAB_NOTRACK)
88 #define SLAB_CACHE_FLAGS (0)
91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
93 int __kmem_cache_shutdown(struct kmem_cache
*);
94 int __kmem_cache_shrink(struct kmem_cache
*);
95 void slab_kmem_cache_release(struct kmem_cache
*);
101 unsigned long active_objs
;
102 unsigned long num_objs
;
103 unsigned long active_slabs
;
104 unsigned long num_slabs
;
105 unsigned long shared_avail
;
107 unsigned int batchcount
;
109 unsigned int objects_per_slab
;
110 unsigned int cache_order
;
113 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
114 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
115 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
116 size_t count
, loff_t
*ppos
);
118 #ifdef CONFIG_MEMCG_KMEM
119 static inline bool is_root_cache(struct kmem_cache
*s
)
121 return !s
->memcg_params
|| s
->memcg_params
->is_root_cache
;
124 static inline void memcg_bind_pages(struct kmem_cache
*s
, int order
)
126 if (!is_root_cache(s
))
127 atomic_add(1 << order
, &s
->memcg_params
->nr_pages
);
130 static inline void memcg_release_pages(struct kmem_cache
*s
, int order
)
132 if (is_root_cache(s
))
135 if (atomic_sub_and_test((1 << order
), &s
->memcg_params
->nr_pages
))
136 mem_cgroup_destroy_cache(s
);
139 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
140 struct kmem_cache
*p
)
143 (s
->memcg_params
&& (p
== s
->memcg_params
->root_cache
));
147 * We use suffixes to the name in memcg because we can't have caches
148 * created in the system with the same name. But when we print them
149 * locally, better refer to them with the base name
151 static inline const char *cache_name(struct kmem_cache
*s
)
153 if (!is_root_cache(s
))
154 return s
->memcg_params
->root_cache
->name
;
159 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
160 * That said the caller must assure the memcg's cache won't go away. Since once
161 * created a memcg's cache is destroyed only along with the root cache, it is
162 * true if we are going to allocate from the cache or hold a reference to the
163 * root cache by other means. Otherwise, we should hold either the slab_mutex
164 * or the memcg's slab_caches_mutex while calling this function and accessing
165 * the returned value.
167 static inline struct kmem_cache
*
168 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
170 struct kmem_cache
*cachep
;
171 struct memcg_cache_params
*params
;
173 if (!s
->memcg_params
)
177 params
= rcu_dereference(s
->memcg_params
);
178 cachep
= params
->memcg_caches
[idx
];
182 * Make sure we will access the up-to-date value. The code updating
183 * memcg_caches issues a write barrier to match this (see
184 * memcg_register_cache()).
186 smp_read_barrier_depends();
190 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
192 if (is_root_cache(s
))
194 return s
->memcg_params
->root_cache
;
197 static __always_inline
int memcg_charge_slab(struct kmem_cache
*s
,
198 gfp_t gfp
, int order
)
200 if (!memcg_kmem_enabled())
202 if (is_root_cache(s
))
204 return memcg_charge_kmem(s
->memcg_params
->memcg
, gfp
,
208 static __always_inline
void memcg_uncharge_slab(struct kmem_cache
*s
, int order
)
210 if (!memcg_kmem_enabled())
212 if (is_root_cache(s
))
214 memcg_uncharge_kmem(s
->memcg_params
->memcg
, PAGE_SIZE
<< order
);
217 static inline bool is_root_cache(struct kmem_cache
*s
)
222 static inline void memcg_bind_pages(struct kmem_cache
*s
, int order
)
226 static inline void memcg_release_pages(struct kmem_cache
*s
, int order
)
230 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
231 struct kmem_cache
*p
)
236 static inline const char *cache_name(struct kmem_cache
*s
)
241 static inline struct kmem_cache
*
242 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
247 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
252 static inline int memcg_charge_slab(struct kmem_cache
*s
, gfp_t gfp
, int order
)
257 static inline void memcg_uncharge_slab(struct kmem_cache
*s
, int order
)
262 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
264 struct kmem_cache
*cachep
;
268 * When kmemcg is not being used, both assignments should return the
269 * same value. but we don't want to pay the assignment price in that
270 * case. If it is not compiled in, the compiler should be smart enough
271 * to not do even the assignment. In that case, slab_equal_or_root
272 * will also be a constant.
274 if (!memcg_kmem_enabled() && !unlikely(s
->flags
& SLAB_DEBUG_FREE
))
277 page
= virt_to_head_page(x
);
278 cachep
= page
->slab_cache
;
279 if (slab_equal_or_root(cachep
, s
))
282 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
283 __FUNCTION__
, cachep
->name
, s
->name
);
291 * The slab lists for all objects.
293 struct kmem_cache_node
{
294 spinlock_t list_lock
;
297 struct list_head slabs_partial
; /* partial list first, better asm code */
298 struct list_head slabs_full
;
299 struct list_head slabs_free
;
300 unsigned long free_objects
;
301 unsigned int free_limit
;
302 unsigned int colour_next
; /* Per-node cache coloring */
303 struct array_cache
*shared
; /* shared per node */
304 struct array_cache
**alien
; /* on other nodes */
305 unsigned long next_reap
; /* updated without locking */
306 int free_touched
; /* updated without locking */
310 unsigned long nr_partial
;
311 struct list_head partial
;
312 #ifdef CONFIG_SLUB_DEBUG
313 atomic_long_t nr_slabs
;
314 atomic_long_t total_objects
;
315 struct list_head full
;
321 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
322 void slab_stop(struct seq_file
*m
, void *p
);