]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/slab.c | |
3 | * Written by Mark Hemment, 1996/97. | |
4 | * (markhe@nextd.demon.co.uk) | |
5 | * | |
6 | * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli | |
7 | * | |
8 | * Major cleanup, different bufctl logic, per-cpu arrays | |
9 | * (c) 2000 Manfred Spraul | |
10 | * | |
11 | * Cleanup, make the head arrays unconditional, preparation for NUMA | |
12 | * (c) 2002 Manfred Spraul | |
13 | * | |
14 | * An implementation of the Slab Allocator as described in outline in; | |
15 | * UNIX Internals: The New Frontiers by Uresh Vahalia | |
16 | * Pub: Prentice Hall ISBN 0-13-101908-2 | |
17 | * or with a little more detail in; | |
18 | * The Slab Allocator: An Object-Caching Kernel Memory Allocator | |
19 | * Jeff Bonwick (Sun Microsystems). | |
20 | * Presented at: USENIX Summer 1994 Technical Conference | |
21 | * | |
22 | * The memory is organized in caches, one cache for each object type. | |
23 | * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) | |
24 | * Each cache consists out of many slabs (they are small (usually one | |
25 | * page long) and always contiguous), and each slab contains multiple | |
26 | * initialized objects. | |
27 | * | |
28 | * This means, that your constructor is used only for newly allocated | |
29 | * slabs and you must pass objects with the same initializations to | |
30 | * kmem_cache_free. | |
31 | * | |
32 | * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, | |
33 | * normal). If you need a special memory type, then must create a new | |
34 | * cache for that memory type. | |
35 | * | |
36 | * In order to reduce fragmentation, the slabs are sorted in 3 groups: | |
37 | * full slabs with 0 free objects | |
38 | * partial slabs | |
39 | * empty slabs with no allocated objects | |
40 | * | |
41 | * If partial slabs exist, then new allocations come from these slabs, | |
42 | * otherwise from empty slabs or new slabs are allocated. | |
43 | * | |
44 | * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache | |
45 | * during kmem_cache_destroy(). The caller must prevent concurrent allocs. | |
46 | * | |
47 | * Each cache has a short per-cpu head array, most allocs | |
48 | * and frees go into that array, and if that array overflows, then 1/2 | |
49 | * of the entries in the array are given back into the global cache. | |
50 | * The head array is strictly LIFO and should improve the cache hit rates. | |
51 | * On SMP, it additionally reduces the spinlock operations. | |
52 | * | |
53 | * The c_cpuarray may not be read with enabled local interrupts - | |
54 | * it's changed with a smp_call_function(). | |
55 | * | |
56 | * SMP synchronization: | |
57 | * constructors and destructors are called without any locking. | |
58 | * Several members in struct kmem_cache and struct slab never change, they | |
59 | * are accessed without any locking. | |
60 | * The per-cpu arrays are never accessed from the wrong cpu, no locking, | |
61 | * and local interrupts are disabled so slab code is preempt-safe. | |
62 | * The non-constant members are protected with a per-cache irq spinlock. | |
63 | * | |
64 | * Many thanks to Mark Hemment, who wrote another per-cpu slab patch | |
65 | * in 2000 - many ideas in the current implementation are derived from | |
66 | * his patch. | |
67 | * | |
68 | * Further notes from the original documentation: | |
69 | * | |
70 | * 11 April '97. Started multi-threading - markhe | |
71 | * The global cache-chain is protected by the mutex 'slab_mutex'. | |
72 | * The sem is only needed when accessing/extending the cache-chain, which | |
73 | * can never happen inside an interrupt (kmem_cache_create(), | |
74 | * kmem_cache_shrink() and kmem_cache_reap()). | |
75 | * | |
76 | * At present, each engine can be growing a cache. This should be blocked. | |
77 | * | |
78 | * 15 March 2005. NUMA slab allocator. | |
79 | * Shai Fultheim <shai@scalex86.org>. | |
80 | * Shobhit Dayal <shobhit@calsoftinc.com> | |
81 | * Alok N Kataria <alokk@calsoftinc.com> | |
82 | * Christoph Lameter <christoph@lameter.com> | |
83 | * | |
84 | * Modified the slab allocator to be node aware on NUMA systems. | |
85 | * Each node has its own list of partial, free and full slabs. | |
86 | * All object allocations for a node occur from node specific slab lists. | |
87 | */ | |
88 | ||
89 | #include <linux/slab.h> | |
90 | #include <linux/mm.h> | |
91 | #include <linux/poison.h> | |
92 | #include <linux/swap.h> | |
93 | #include <linux/cache.h> | |
94 | #include <linux/interrupt.h> | |
95 | #include <linux/init.h> | |
96 | #include <linux/compiler.h> | |
97 | #include <linux/cpuset.h> | |
98 | #include <linux/proc_fs.h> | |
99 | #include <linux/seq_file.h> | |
100 | #include <linux/notifier.h> | |
101 | #include <linux/kallsyms.h> | |
102 | #include <linux/cpu.h> | |
103 | #include <linux/sysctl.h> | |
104 | #include <linux/module.h> | |
105 | #include <linux/rcupdate.h> | |
106 | #include <linux/string.h> | |
107 | #include <linux/uaccess.h> | |
108 | #include <linux/nodemask.h> | |
109 | #include <linux/kmemleak.h> | |
110 | #include <linux/mempolicy.h> | |
111 | #include <linux/mutex.h> | |
112 | #include <linux/fault-inject.h> | |
113 | #include <linux/rtmutex.h> | |
114 | #include <linux/reciprocal_div.h> | |
115 | #include <linux/debugobjects.h> | |
116 | #include <linux/kmemcheck.h> | |
117 | #include <linux/memory.h> | |
118 | #include <linux/prefetch.h> | |
119 | #include <linux/sched/task_stack.h> | |
120 | ||
121 | #include <net/sock.h> | |
122 | ||
123 | #include <asm/cacheflush.h> | |
124 | #include <asm/tlbflush.h> | |
125 | #include <asm/page.h> | |
126 | ||
127 | #include <trace/events/kmem.h> | |
128 | ||
129 | #include "internal.h" | |
130 | ||
131 | #include "slab.h" | |
132 | ||
133 | /* | |
134 | * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. | |
135 | * 0 for faster, smaller code (especially in the critical paths). | |
136 | * | |
137 | * STATS - 1 to collect stats for /proc/slabinfo. | |
138 | * 0 for faster, smaller code (especially in the critical paths). | |
139 | * | |
140 | * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) | |
141 | */ | |
142 | ||
143 | #ifdef CONFIG_DEBUG_SLAB | |
144 | #define DEBUG 1 | |
145 | #define STATS 1 | |
146 | #define FORCED_DEBUG 1 | |
147 | #else | |
148 | #define DEBUG 0 | |
149 | #define STATS 0 | |
150 | #define FORCED_DEBUG 0 | |
151 | #endif | |
152 | ||
153 | /* Shouldn't this be in a header file somewhere? */ | |
154 | #define BYTES_PER_WORD sizeof(void *) | |
155 | #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) | |
156 | ||
157 | #ifndef ARCH_KMALLOC_FLAGS | |
158 | #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN | |
159 | #endif | |
160 | ||
161 | #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ | |
162 | <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) | |
163 | ||
164 | #if FREELIST_BYTE_INDEX | |
165 | typedef unsigned char freelist_idx_t; | |
166 | #else | |
167 | typedef unsigned short freelist_idx_t; | |
168 | #endif | |
169 | ||
170 | #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) | |
171 | ||
172 | /* | |
173 | * struct array_cache | |
174 | * | |
175 | * Purpose: | |
176 | * - LIFO ordering, to hand out cache-warm objects from _alloc | |
177 | * - reduce the number of linked list operations | |
178 | * - reduce spinlock operations | |
179 | * | |
180 | * The limit is stored in the per-cpu structure to reduce the data cache | |
181 | * footprint. | |
182 | * | |
183 | */ | |
184 | struct array_cache { | |
185 | unsigned int avail; | |
186 | unsigned int limit; | |
187 | unsigned int batchcount; | |
188 | unsigned int touched; | |
189 | void *entry[]; /* | |
190 | * Must have this definition in here for the proper | |
191 | * alignment of array_cache. Also simplifies accessing | |
192 | * the entries. | |
193 | */ | |
194 | }; | |
195 | ||
196 | struct alien_cache { | |
197 | spinlock_t lock; | |
198 | struct array_cache ac; | |
199 | }; | |
200 | ||
201 | /* | |
202 | * Need this for bootstrapping a per node allocator. | |
203 | */ | |
204 | #define NUM_INIT_LISTS (2 * MAX_NUMNODES) | |
205 | static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; | |
206 | #define CACHE_CACHE 0 | |
207 | #define SIZE_NODE (MAX_NUMNODES) | |
208 | ||
209 | static int drain_freelist(struct kmem_cache *cache, | |
210 | struct kmem_cache_node *n, int tofree); | |
211 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | |
212 | int node, struct list_head *list); | |
213 | static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); | |
214 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); | |
215 | static void cache_reap(struct work_struct *unused); | |
216 | ||
217 | static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, | |
218 | void **list); | |
219 | static inline void fixup_slab_list(struct kmem_cache *cachep, | |
220 | struct kmem_cache_node *n, struct page *page, | |
221 | void **list); | |
222 | static int slab_early_init = 1; | |
223 | ||
224 | #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) | |
225 | ||
226 | static void kmem_cache_node_init(struct kmem_cache_node *parent) | |
227 | { | |
228 | INIT_LIST_HEAD(&parent->slabs_full); | |
229 | INIT_LIST_HEAD(&parent->slabs_partial); | |
230 | INIT_LIST_HEAD(&parent->slabs_free); | |
231 | parent->total_slabs = 0; | |
232 | parent->free_slabs = 0; | |
233 | parent->shared = NULL; | |
234 | parent->alien = NULL; | |
235 | parent->colour_next = 0; | |
236 | spin_lock_init(&parent->list_lock); | |
237 | parent->free_objects = 0; | |
238 | parent->free_touched = 0; | |
239 | } | |
240 | ||
241 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ | |
242 | do { \ | |
243 | INIT_LIST_HEAD(listp); \ | |
244 | list_splice(&get_node(cachep, nodeid)->slab, listp); \ | |
245 | } while (0) | |
246 | ||
247 | #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ | |
248 | do { \ | |
249 | MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ | |
250 | MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ | |
251 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ | |
252 | } while (0) | |
253 | ||
254 | #define CFLGS_OBJFREELIST_SLAB (0x40000000UL) | |
255 | #define CFLGS_OFF_SLAB (0x80000000UL) | |
256 | #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) | |
257 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) | |
258 | ||
259 | #define BATCHREFILL_LIMIT 16 | |
260 | /* | |
261 | * Optimization question: fewer reaps means less probability for unnessary | |
262 | * cpucache drain/refill cycles. | |
263 | * | |
264 | * OTOH the cpuarrays can contain lots of objects, | |
265 | * which could lock up otherwise freeable slabs. | |
266 | */ | |
267 | #define REAPTIMEOUT_AC (2*HZ) | |
268 | #define REAPTIMEOUT_NODE (4*HZ) | |
269 | ||
270 | #if STATS | |
271 | #define STATS_INC_ACTIVE(x) ((x)->num_active++) | |
272 | #define STATS_DEC_ACTIVE(x) ((x)->num_active--) | |
273 | #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) | |
274 | #define STATS_INC_GROWN(x) ((x)->grown++) | |
275 | #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) | |
276 | #define STATS_SET_HIGH(x) \ | |
277 | do { \ | |
278 | if ((x)->num_active > (x)->high_mark) \ | |
279 | (x)->high_mark = (x)->num_active; \ | |
280 | } while (0) | |
281 | #define STATS_INC_ERR(x) ((x)->errors++) | |
282 | #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) | |
283 | #define STATS_INC_NODEFREES(x) ((x)->node_frees++) | |
284 | #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) | |
285 | #define STATS_SET_FREEABLE(x, i) \ | |
286 | do { \ | |
287 | if ((x)->max_freeable < i) \ | |
288 | (x)->max_freeable = i; \ | |
289 | } while (0) | |
290 | #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) | |
291 | #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) | |
292 | #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) | |
293 | #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) | |
294 | #else | |
295 | #define STATS_INC_ACTIVE(x) do { } while (0) | |
296 | #define STATS_DEC_ACTIVE(x) do { } while (0) | |
297 | #define STATS_INC_ALLOCED(x) do { } while (0) | |
298 | #define STATS_INC_GROWN(x) do { } while (0) | |
299 | #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) | |
300 | #define STATS_SET_HIGH(x) do { } while (0) | |
301 | #define STATS_INC_ERR(x) do { } while (0) | |
302 | #define STATS_INC_NODEALLOCS(x) do { } while (0) | |
303 | #define STATS_INC_NODEFREES(x) do { } while (0) | |
304 | #define STATS_INC_ACOVERFLOW(x) do { } while (0) | |
305 | #define STATS_SET_FREEABLE(x, i) do { } while (0) | |
306 | #define STATS_INC_ALLOCHIT(x) do { } while (0) | |
307 | #define STATS_INC_ALLOCMISS(x) do { } while (0) | |
308 | #define STATS_INC_FREEHIT(x) do { } while (0) | |
309 | #define STATS_INC_FREEMISS(x) do { } while (0) | |
310 | #endif | |
311 | ||
312 | #if DEBUG | |
313 | ||
314 | /* | |
315 | * memory layout of objects: | |
316 | * 0 : objp | |
317 | * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that | |
318 | * the end of an object is aligned with the end of the real | |
319 | * allocation. Catches writes behind the end of the allocation. | |
320 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: | |
321 | * redzone word. | |
322 | * cachep->obj_offset: The real object. | |
323 | * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] | |
324 | * cachep->size - 1* BYTES_PER_WORD: last caller address | |
325 | * [BYTES_PER_WORD long] | |
326 | */ | |
327 | static int obj_offset(struct kmem_cache *cachep) | |
328 | { | |
329 | return cachep->obj_offset; | |
330 | } | |
331 | ||
332 | static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | |
333 | { | |
334 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | |
335 | return (unsigned long long*) (objp + obj_offset(cachep) - | |
336 | sizeof(unsigned long long)); | |
337 | } | |
338 | ||
339 | static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | |
340 | { | |
341 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | |
342 | if (cachep->flags & SLAB_STORE_USER) | |
343 | return (unsigned long long *)(objp + cachep->size - | |
344 | sizeof(unsigned long long) - | |
345 | REDZONE_ALIGN); | |
346 | return (unsigned long long *) (objp + cachep->size - | |
347 | sizeof(unsigned long long)); | |
348 | } | |
349 | ||
350 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |
351 | { | |
352 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | |
353 | return (void **)(objp + cachep->size - BYTES_PER_WORD); | |
354 | } | |
355 | ||
356 | #else | |
357 | ||
358 | #define obj_offset(x) 0 | |
359 | #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | |
360 | #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | |
361 | #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) | |
362 | ||
363 | #endif | |
364 | ||
365 | #ifdef CONFIG_DEBUG_SLAB_LEAK | |
366 | ||
367 | static inline bool is_store_user_clean(struct kmem_cache *cachep) | |
368 | { | |
369 | return atomic_read(&cachep->store_user_clean) == 1; | |
370 | } | |
371 | ||
372 | static inline void set_store_user_clean(struct kmem_cache *cachep) | |
373 | { | |
374 | atomic_set(&cachep->store_user_clean, 1); | |
375 | } | |
376 | ||
377 | static inline void set_store_user_dirty(struct kmem_cache *cachep) | |
378 | { | |
379 | if (is_store_user_clean(cachep)) | |
380 | atomic_set(&cachep->store_user_clean, 0); | |
381 | } | |
382 | ||
383 | #else | |
384 | static inline void set_store_user_dirty(struct kmem_cache *cachep) {} | |
385 | ||
386 | #endif | |
387 | ||
388 | /* | |
389 | * Do not go above this order unless 0 objects fit into the slab or | |
390 | * overridden on the command line. | |
391 | */ | |
392 | #define SLAB_MAX_ORDER_HI 1 | |
393 | #define SLAB_MAX_ORDER_LO 0 | |
394 | static int slab_max_order = SLAB_MAX_ORDER_LO; | |
395 | static bool slab_max_order_set __initdata; | |
396 | ||
397 | static inline struct kmem_cache *virt_to_cache(const void *obj) | |
398 | { | |
399 | struct page *page = virt_to_head_page(obj); | |
400 | return page->slab_cache; | |
401 | } | |
402 | ||
403 | static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, | |
404 | unsigned int idx) | |
405 | { | |
406 | return page->s_mem + cache->size * idx; | |
407 | } | |
408 | ||
409 | /* | |
410 | * We want to avoid an expensive divide : (offset / cache->size) | |
411 | * Using the fact that size is a constant for a particular cache, | |
412 | * we can replace (offset / cache->size) by | |
413 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | |
414 | */ | |
415 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | |
416 | const struct page *page, void *obj) | |
417 | { | |
418 | u32 offset = (obj - page->s_mem); | |
419 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | |
420 | } | |
421 | ||
422 | #define BOOT_CPUCACHE_ENTRIES 1 | |
423 | /* internal cache of cache description objs */ | |
424 | static struct kmem_cache kmem_cache_boot = { | |
425 | .batchcount = 1, | |
426 | .limit = BOOT_CPUCACHE_ENTRIES, | |
427 | .shared = 1, | |
428 | .size = sizeof(struct kmem_cache), | |
429 | .name = "kmem_cache", | |
430 | }; | |
431 | ||
432 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); | |
433 | ||
434 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |
435 | { | |
436 | return this_cpu_ptr(cachep->cpu_cache); | |
437 | } | |
438 | ||
439 | /* | |
440 | * Calculate the number of objects and left-over bytes for a given buffer size. | |
441 | */ | |
442 | static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, | |
443 | unsigned long flags, size_t *left_over) | |
444 | { | |
445 | unsigned int num; | |
446 | size_t slab_size = PAGE_SIZE << gfporder; | |
447 | ||
448 | /* | |
449 | * The slab management structure can be either off the slab or | |
450 | * on it. For the latter case, the memory allocated for a | |
451 | * slab is used for: | |
452 | * | |
453 | * - @buffer_size bytes for each object | |
454 | * - One freelist_idx_t for each object | |
455 | * | |
456 | * We don't need to consider alignment of freelist because | |
457 | * freelist will be at the end of slab page. The objects will be | |
458 | * at the correct alignment. | |
459 | * | |
460 | * If the slab management structure is off the slab, then the | |
461 | * alignment will already be calculated into the size. Because | |
462 | * the slabs are all pages aligned, the objects will be at the | |
463 | * correct alignment when allocated. | |
464 | */ | |
465 | if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { | |
466 | num = slab_size / buffer_size; | |
467 | *left_over = slab_size % buffer_size; | |
468 | } else { | |
469 | num = slab_size / (buffer_size + sizeof(freelist_idx_t)); | |
470 | *left_over = slab_size % | |
471 | (buffer_size + sizeof(freelist_idx_t)); | |
472 | } | |
473 | ||
474 | return num; | |
475 | } | |
476 | ||
477 | #if DEBUG | |
478 | #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) | |
479 | ||
480 | static void __slab_error(const char *function, struct kmem_cache *cachep, | |
481 | char *msg) | |
482 | { | |
483 | pr_err("slab error in %s(): cache `%s': %s\n", | |
484 | function, cachep->name, msg); | |
485 | dump_stack(); | |
486 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | |
487 | } | |
488 | #endif | |
489 | ||
490 | /* | |
491 | * By default on NUMA we use alien caches to stage the freeing of | |
492 | * objects allocated from other nodes. This causes massive memory | |
493 | * inefficiencies when using fake NUMA setup to split memory into a | |
494 | * large number of small nodes, so it can be disabled on the command | |
495 | * line | |
496 | */ | |
497 | ||
498 | static int use_alien_caches __read_mostly = 1; | |
499 | static int __init noaliencache_setup(char *s) | |
500 | { | |
501 | use_alien_caches = 0; | |
502 | return 1; | |
503 | } | |
504 | __setup("noaliencache", noaliencache_setup); | |
505 | ||
506 | static int __init slab_max_order_setup(char *str) | |
507 | { | |
508 | get_option(&str, &slab_max_order); | |
509 | slab_max_order = slab_max_order < 0 ? 0 : | |
510 | min(slab_max_order, MAX_ORDER - 1); | |
511 | slab_max_order_set = true; | |
512 | ||
513 | return 1; | |
514 | } | |
515 | __setup("slab_max_order=", slab_max_order_setup); | |
516 | ||
517 | #ifdef CONFIG_NUMA | |
518 | /* | |
519 | * Special reaping functions for NUMA systems called from cache_reap(). | |
520 | * These take care of doing round robin flushing of alien caches (containing | |
521 | * objects freed on different nodes from which they were allocated) and the | |
522 | * flushing of remote pcps by calling drain_node_pages. | |
523 | */ | |
524 | static DEFINE_PER_CPU(unsigned long, slab_reap_node); | |
525 | ||
526 | static void init_reap_node(int cpu) | |
527 | { | |
528 | per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), | |
529 | node_online_map); | |
530 | } | |
531 | ||
532 | static void next_reap_node(void) | |
533 | { | |
534 | int node = __this_cpu_read(slab_reap_node); | |
535 | ||
536 | node = next_node_in(node, node_online_map); | |
537 | __this_cpu_write(slab_reap_node, node); | |
538 | } | |
539 | ||
540 | #else | |
541 | #define init_reap_node(cpu) do { } while (0) | |
542 | #define next_reap_node(void) do { } while (0) | |
543 | #endif | |
544 | ||
545 | /* | |
546 | * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz | |
547 | * via the workqueue/eventd. | |
548 | * Add the CPU number into the expiration time to minimize the possibility of | |
549 | * the CPUs getting into lockstep and contending for the global cache chain | |
550 | * lock. | |
551 | */ | |
552 | static void start_cpu_timer(int cpu) | |
553 | { | |
554 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); | |
555 | ||
556 | if (reap_work->work.func == NULL) { | |
557 | init_reap_node(cpu); | |
558 | INIT_DEFERRABLE_WORK(reap_work, cache_reap); | |
559 | schedule_delayed_work_on(cpu, reap_work, | |
560 | __round_jiffies_relative(HZ, cpu)); | |
561 | } | |
562 | } | |
563 | ||
564 | static void init_arraycache(struct array_cache *ac, int limit, int batch) | |
565 | { | |
566 | /* | |
567 | * The array_cache structures contain pointers to free object. | |
568 | * However, when such objects are allocated or transferred to another | |
569 | * cache the pointers are not cleared and they could be counted as | |
570 | * valid references during a kmemleak scan. Therefore, kmemleak must | |
571 | * not scan such objects. | |
572 | */ | |
573 | kmemleak_no_scan(ac); | |
574 | if (ac) { | |
575 | ac->avail = 0; | |
576 | ac->limit = limit; | |
577 | ac->batchcount = batch; | |
578 | ac->touched = 0; | |
579 | } | |
580 | } | |
581 | ||
582 | static struct array_cache *alloc_arraycache(int node, int entries, | |
583 | int batchcount, gfp_t gfp) | |
584 | { | |
585 | size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); | |
586 | struct array_cache *ac = NULL; | |
587 | ||
588 | ac = kmalloc_node(memsize, gfp, node); | |
589 | init_arraycache(ac, entries, batchcount); | |
590 | return ac; | |
591 | } | |
592 | ||
593 | static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, | |
594 | struct page *page, void *objp) | |
595 | { | |
596 | struct kmem_cache_node *n; | |
597 | int page_node; | |
598 | LIST_HEAD(list); | |
599 | ||
600 | page_node = page_to_nid(page); | |
601 | n = get_node(cachep, page_node); | |
602 | ||
603 | spin_lock(&n->list_lock); | |
604 | free_block(cachep, &objp, 1, page_node, &list); | |
605 | spin_unlock(&n->list_lock); | |
606 | ||
607 | slabs_destroy(cachep, &list); | |
608 | } | |
609 | ||
610 | /* | |
611 | * Transfer objects in one arraycache to another. | |
612 | * Locking must be handled by the caller. | |
613 | * | |
614 | * Return the number of entries transferred. | |
615 | */ | |
616 | static int transfer_objects(struct array_cache *to, | |
617 | struct array_cache *from, unsigned int max) | |
618 | { | |
619 | /* Figure out how many entries to transfer */ | |
620 | int nr = min3(from->avail, max, to->limit - to->avail); | |
621 | ||
622 | if (!nr) | |
623 | return 0; | |
624 | ||
625 | memcpy(to->entry + to->avail, from->entry + from->avail -nr, | |
626 | sizeof(void *) *nr); | |
627 | ||
628 | from->avail -= nr; | |
629 | to->avail += nr; | |
630 | return nr; | |
631 | } | |
632 | ||
633 | #ifndef CONFIG_NUMA | |
634 | ||
635 | #define drain_alien_cache(cachep, alien) do { } while (0) | |
636 | #define reap_alien(cachep, n) do { } while (0) | |
637 | ||
638 | static inline struct alien_cache **alloc_alien_cache(int node, | |
639 | int limit, gfp_t gfp) | |
640 | { | |
641 | return NULL; | |
642 | } | |
643 | ||
644 | static inline void free_alien_cache(struct alien_cache **ac_ptr) | |
645 | { | |
646 | } | |
647 | ||
648 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |
649 | { | |
650 | return 0; | |
651 | } | |
652 | ||
653 | static inline void *alternate_node_alloc(struct kmem_cache *cachep, | |
654 | gfp_t flags) | |
655 | { | |
656 | return NULL; | |
657 | } | |
658 | ||
659 | static inline void *____cache_alloc_node(struct kmem_cache *cachep, | |
660 | gfp_t flags, int nodeid) | |
661 | { | |
662 | return NULL; | |
663 | } | |
664 | ||
665 | static inline gfp_t gfp_exact_node(gfp_t flags) | |
666 | { | |
667 | return flags & ~__GFP_NOFAIL; | |
668 | } | |
669 | ||
670 | #else /* CONFIG_NUMA */ | |
671 | ||
672 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); | |
673 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | |
674 | ||
675 | static struct alien_cache *__alloc_alien_cache(int node, int entries, | |
676 | int batch, gfp_t gfp) | |
677 | { | |
678 | size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); | |
679 | struct alien_cache *alc = NULL; | |
680 | ||
681 | alc = kmalloc_node(memsize, gfp, node); | |
682 | init_arraycache(&alc->ac, entries, batch); | |
683 | spin_lock_init(&alc->lock); | |
684 | return alc; | |
685 | } | |
686 | ||
687 | static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) | |
688 | { | |
689 | struct alien_cache **alc_ptr; | |
690 | size_t memsize = sizeof(void *) * nr_node_ids; | |
691 | int i; | |
692 | ||
693 | if (limit > 1) | |
694 | limit = 12; | |
695 | alc_ptr = kzalloc_node(memsize, gfp, node); | |
696 | if (!alc_ptr) | |
697 | return NULL; | |
698 | ||
699 | for_each_node(i) { | |
700 | if (i == node || !node_online(i)) | |
701 | continue; | |
702 | alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); | |
703 | if (!alc_ptr[i]) { | |
704 | for (i--; i >= 0; i--) | |
705 | kfree(alc_ptr[i]); | |
706 | kfree(alc_ptr); | |
707 | return NULL; | |
708 | } | |
709 | } | |
710 | return alc_ptr; | |
711 | } | |
712 | ||
713 | static void free_alien_cache(struct alien_cache **alc_ptr) | |
714 | { | |
715 | int i; | |
716 | ||
717 | if (!alc_ptr) | |
718 | return; | |
719 | for_each_node(i) | |
720 | kfree(alc_ptr[i]); | |
721 | kfree(alc_ptr); | |
722 | } | |
723 | ||
724 | static void __drain_alien_cache(struct kmem_cache *cachep, | |
725 | struct array_cache *ac, int node, | |
726 | struct list_head *list) | |
727 | { | |
728 | struct kmem_cache_node *n = get_node(cachep, node); | |
729 | ||
730 | if (ac->avail) { | |
731 | spin_lock(&n->list_lock); | |
732 | /* | |
733 | * Stuff objects into the remote nodes shared array first. | |
734 | * That way we could avoid the overhead of putting the objects | |
735 | * into the free lists and getting them back later. | |
736 | */ | |
737 | if (n->shared) | |
738 | transfer_objects(n->shared, ac, ac->limit); | |
739 | ||
740 | free_block(cachep, ac->entry, ac->avail, node, list); | |
741 | ac->avail = 0; | |
742 | spin_unlock(&n->list_lock); | |
743 | } | |
744 | } | |
745 | ||
746 | /* | |
747 | * Called from cache_reap() to regularly drain alien caches round robin. | |
748 | */ | |
749 | static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) | |
750 | { | |
751 | int node = __this_cpu_read(slab_reap_node); | |
752 | ||
753 | if (n->alien) { | |
754 | struct alien_cache *alc = n->alien[node]; | |
755 | struct array_cache *ac; | |
756 | ||
757 | if (alc) { | |
758 | ac = &alc->ac; | |
759 | if (ac->avail && spin_trylock_irq(&alc->lock)) { | |
760 | LIST_HEAD(list); | |
761 | ||
762 | __drain_alien_cache(cachep, ac, node, &list); | |
763 | spin_unlock_irq(&alc->lock); | |
764 | slabs_destroy(cachep, &list); | |
765 | } | |
766 | } | |
767 | } | |
768 | } | |
769 | ||
770 | static void drain_alien_cache(struct kmem_cache *cachep, | |
771 | struct alien_cache **alien) | |
772 | { | |
773 | int i = 0; | |
774 | struct alien_cache *alc; | |
775 | struct array_cache *ac; | |
776 | unsigned long flags; | |
777 | ||
778 | for_each_online_node(i) { | |
779 | alc = alien[i]; | |
780 | if (alc) { | |
781 | LIST_HEAD(list); | |
782 | ||
783 | ac = &alc->ac; | |
784 | spin_lock_irqsave(&alc->lock, flags); | |
785 | __drain_alien_cache(cachep, ac, i, &list); | |
786 | spin_unlock_irqrestore(&alc->lock, flags); | |
787 | slabs_destroy(cachep, &list); | |
788 | } | |
789 | } | |
790 | } | |
791 | ||
792 | static int __cache_free_alien(struct kmem_cache *cachep, void *objp, | |
793 | int node, int page_node) | |
794 | { | |
795 | struct kmem_cache_node *n; | |
796 | struct alien_cache *alien = NULL; | |
797 | struct array_cache *ac; | |
798 | LIST_HEAD(list); | |
799 | ||
800 | n = get_node(cachep, node); | |
801 | STATS_INC_NODEFREES(cachep); | |
802 | if (n->alien && n->alien[page_node]) { | |
803 | alien = n->alien[page_node]; | |
804 | ac = &alien->ac; | |
805 | spin_lock(&alien->lock); | |
806 | if (unlikely(ac->avail == ac->limit)) { | |
807 | STATS_INC_ACOVERFLOW(cachep); | |
808 | __drain_alien_cache(cachep, ac, page_node, &list); | |
809 | } | |
810 | ac->entry[ac->avail++] = objp; | |
811 | spin_unlock(&alien->lock); | |
812 | slabs_destroy(cachep, &list); | |
813 | } else { | |
814 | n = get_node(cachep, page_node); | |
815 | spin_lock(&n->list_lock); | |
816 | free_block(cachep, &objp, 1, page_node, &list); | |
817 | spin_unlock(&n->list_lock); | |
818 | slabs_destroy(cachep, &list); | |
819 | } | |
820 | return 1; | |
821 | } | |
822 | ||
823 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |
824 | { | |
825 | int page_node = page_to_nid(virt_to_page(objp)); | |
826 | int node = numa_mem_id(); | |
827 | /* | |
828 | * Make sure we are not freeing a object from another node to the array | |
829 | * cache on this cpu. | |
830 | */ | |
831 | if (likely(node == page_node)) | |
832 | return 0; | |
833 | ||
834 | return __cache_free_alien(cachep, objp, node, page_node); | |
835 | } | |
836 | ||
837 | /* | |
838 | * Construct gfp mask to allocate from a specific node but do not reclaim or | |
839 | * warn about failures. | |
840 | */ | |
841 | static inline gfp_t gfp_exact_node(gfp_t flags) | |
842 | { | |
843 | return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL); | |
844 | } | |
845 | #endif | |
846 | ||
847 | static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) | |
848 | { | |
849 | struct kmem_cache_node *n; | |
850 | ||
851 | /* | |
852 | * Set up the kmem_cache_node for cpu before we can | |
853 | * begin anything. Make sure some other cpu on this | |
854 | * node has not already allocated this | |
855 | */ | |
856 | n = get_node(cachep, node); | |
857 | if (n) { | |
858 | spin_lock_irq(&n->list_lock); | |
859 | n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + | |
860 | cachep->num; | |
861 | spin_unlock_irq(&n->list_lock); | |
862 | ||
863 | return 0; | |
864 | } | |
865 | ||
866 | n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); | |
867 | if (!n) | |
868 | return -ENOMEM; | |
869 | ||
870 | kmem_cache_node_init(n); | |
871 | n->next_reap = jiffies + REAPTIMEOUT_NODE + | |
872 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; | |
873 | ||
874 | n->free_limit = | |
875 | (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; | |
876 | ||
877 | /* | |
878 | * The kmem_cache_nodes don't come and go as CPUs | |
879 | * come and go. slab_mutex is sufficient | |
880 | * protection here. | |
881 | */ | |
882 | cachep->node[node] = n; | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
887 | #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP) | |
888 | /* | |
889 | * Allocates and initializes node for a node on each slab cache, used for | |
890 | * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node | |
891 | * will be allocated off-node since memory is not yet online for the new node. | |
892 | * When hotplugging memory or a cpu, existing node are not replaced if | |
893 | * already in use. | |
894 | * | |
895 | * Must hold slab_mutex. | |
896 | */ | |
897 | static int init_cache_node_node(int node) | |
898 | { | |
899 | int ret; | |
900 | struct kmem_cache *cachep; | |
901 | ||
902 | list_for_each_entry(cachep, &slab_caches, list) { | |
903 | ret = init_cache_node(cachep, node, GFP_KERNEL); | |
904 | if (ret) | |
905 | return ret; | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } | |
910 | #endif | |
911 | ||
912 | static int setup_kmem_cache_node(struct kmem_cache *cachep, | |
913 | int node, gfp_t gfp, bool force_change) | |
914 | { | |
915 | int ret = -ENOMEM; | |
916 | struct kmem_cache_node *n; | |
917 | struct array_cache *old_shared = NULL; | |
918 | struct array_cache *new_shared = NULL; | |
919 | struct alien_cache **new_alien = NULL; | |
920 | LIST_HEAD(list); | |
921 | ||
922 | if (use_alien_caches) { | |
923 | new_alien = alloc_alien_cache(node, cachep->limit, gfp); | |
924 | if (!new_alien) | |
925 | goto fail; | |
926 | } | |
927 | ||
928 | if (cachep->shared) { | |
929 | new_shared = alloc_arraycache(node, | |
930 | cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); | |
931 | if (!new_shared) | |
932 | goto fail; | |
933 | } | |
934 | ||
935 | ret = init_cache_node(cachep, node, gfp); | |
936 | if (ret) | |
937 | goto fail; | |
938 | ||
939 | n = get_node(cachep, node); | |
940 | spin_lock_irq(&n->list_lock); | |
941 | if (n->shared && force_change) { | |
942 | free_block(cachep, n->shared->entry, | |
943 | n->shared->avail, node, &list); | |
944 | n->shared->avail = 0; | |
945 | } | |
946 | ||
947 | if (!n->shared || force_change) { | |
948 | old_shared = n->shared; | |
949 | n->shared = new_shared; | |
950 | new_shared = NULL; | |
951 | } | |
952 | ||
953 | if (!n->alien) { | |
954 | n->alien = new_alien; | |
955 | new_alien = NULL; | |
956 | } | |
957 | ||
958 | spin_unlock_irq(&n->list_lock); | |
959 | slabs_destroy(cachep, &list); | |
960 | ||
961 | /* | |
962 | * To protect lockless access to n->shared during irq disabled context. | |
963 | * If n->shared isn't NULL in irq disabled context, accessing to it is | |
964 | * guaranteed to be valid until irq is re-enabled, because it will be | |
965 | * freed after synchronize_sched(). | |
966 | */ | |
967 | if (old_shared && force_change) | |
968 | synchronize_sched(); | |
969 | ||
970 | fail: | |
971 | kfree(old_shared); | |
972 | kfree(new_shared); | |
973 | free_alien_cache(new_alien); | |
974 | ||
975 | return ret; | |
976 | } | |
977 | ||
978 | #ifdef CONFIG_SMP | |
979 | ||
980 | static void cpuup_canceled(long cpu) | |
981 | { | |
982 | struct kmem_cache *cachep; | |
983 | struct kmem_cache_node *n = NULL; | |
984 | int node = cpu_to_mem(cpu); | |
985 | const struct cpumask *mask = cpumask_of_node(node); | |
986 | ||
987 | list_for_each_entry(cachep, &slab_caches, list) { | |
988 | struct array_cache *nc; | |
989 | struct array_cache *shared; | |
990 | struct alien_cache **alien; | |
991 | LIST_HEAD(list); | |
992 | ||
993 | n = get_node(cachep, node); | |
994 | if (!n) | |
995 | continue; | |
996 | ||
997 | spin_lock_irq(&n->list_lock); | |
998 | ||
999 | /* Free limit for this kmem_cache_node */ | |
1000 | n->free_limit -= cachep->batchcount; | |
1001 | ||
1002 | /* cpu is dead; no one can alloc from it. */ | |
1003 | nc = per_cpu_ptr(cachep->cpu_cache, cpu); | |
1004 | if (nc) { | |
1005 | free_block(cachep, nc->entry, nc->avail, node, &list); | |
1006 | nc->avail = 0; | |
1007 | } | |
1008 | ||
1009 | if (!cpumask_empty(mask)) { | |
1010 | spin_unlock_irq(&n->list_lock); | |
1011 | goto free_slab; | |
1012 | } | |
1013 | ||
1014 | shared = n->shared; | |
1015 | if (shared) { | |
1016 | free_block(cachep, shared->entry, | |
1017 | shared->avail, node, &list); | |
1018 | n->shared = NULL; | |
1019 | } | |
1020 | ||
1021 | alien = n->alien; | |
1022 | n->alien = NULL; | |
1023 | ||
1024 | spin_unlock_irq(&n->list_lock); | |
1025 | ||
1026 | kfree(shared); | |
1027 | if (alien) { | |
1028 | drain_alien_cache(cachep, alien); | |
1029 | free_alien_cache(alien); | |
1030 | } | |
1031 | ||
1032 | free_slab: | |
1033 | slabs_destroy(cachep, &list); | |
1034 | } | |
1035 | /* | |
1036 | * In the previous loop, all the objects were freed to | |
1037 | * the respective cache's slabs, now we can go ahead and | |
1038 | * shrink each nodelist to its limit. | |
1039 | */ | |
1040 | list_for_each_entry(cachep, &slab_caches, list) { | |
1041 | n = get_node(cachep, node); | |
1042 | if (!n) | |
1043 | continue; | |
1044 | drain_freelist(cachep, n, INT_MAX); | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | static int cpuup_prepare(long cpu) | |
1049 | { | |
1050 | struct kmem_cache *cachep; | |
1051 | int node = cpu_to_mem(cpu); | |
1052 | int err; | |
1053 | ||
1054 | /* | |
1055 | * We need to do this right in the beginning since | |
1056 | * alloc_arraycache's are going to use this list. | |
1057 | * kmalloc_node allows us to add the slab to the right | |
1058 | * kmem_cache_node and not this cpu's kmem_cache_node | |
1059 | */ | |
1060 | err = init_cache_node_node(node); | |
1061 | if (err < 0) | |
1062 | goto bad; | |
1063 | ||
1064 | /* | |
1065 | * Now we can go ahead with allocating the shared arrays and | |
1066 | * array caches | |
1067 | */ | |
1068 | list_for_each_entry(cachep, &slab_caches, list) { | |
1069 | err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); | |
1070 | if (err) | |
1071 | goto bad; | |
1072 | } | |
1073 | ||
1074 | return 0; | |
1075 | bad: | |
1076 | cpuup_canceled(cpu); | |
1077 | return -ENOMEM; | |
1078 | } | |
1079 | ||
1080 | int slab_prepare_cpu(unsigned int cpu) | |
1081 | { | |
1082 | int err; | |
1083 | ||
1084 | mutex_lock(&slab_mutex); | |
1085 | err = cpuup_prepare(cpu); | |
1086 | mutex_unlock(&slab_mutex); | |
1087 | return err; | |
1088 | } | |
1089 | ||
1090 | /* | |
1091 | * This is called for a failed online attempt and for a successful | |
1092 | * offline. | |
1093 | * | |
1094 | * Even if all the cpus of a node are down, we don't free the | |
1095 | * kmem_list3 of any cache. This to avoid a race between cpu_down, and | |
1096 | * a kmalloc allocation from another cpu for memory from the node of | |
1097 | * the cpu going down. The list3 structure is usually allocated from | |
1098 | * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). | |
1099 | */ | |
1100 | int slab_dead_cpu(unsigned int cpu) | |
1101 | { | |
1102 | mutex_lock(&slab_mutex); | |
1103 | cpuup_canceled(cpu); | |
1104 | mutex_unlock(&slab_mutex); | |
1105 | return 0; | |
1106 | } | |
1107 | #endif | |
1108 | ||
1109 | static int slab_online_cpu(unsigned int cpu) | |
1110 | { | |
1111 | start_cpu_timer(cpu); | |
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | static int slab_offline_cpu(unsigned int cpu) | |
1116 | { | |
1117 | /* | |
1118 | * Shutdown cache reaper. Note that the slab_mutex is held so | |
1119 | * that if cache_reap() is invoked it cannot do anything | |
1120 | * expensive but will only modify reap_work and reschedule the | |
1121 | * timer. | |
1122 | */ | |
1123 | cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); | |
1124 | /* Now the cache_reaper is guaranteed to be not running. */ | |
1125 | per_cpu(slab_reap_work, cpu).work.func = NULL; | |
1126 | return 0; | |
1127 | } | |
1128 | ||
1129 | #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) | |
1130 | /* | |
1131 | * Drains freelist for a node on each slab cache, used for memory hot-remove. | |
1132 | * Returns -EBUSY if all objects cannot be drained so that the node is not | |
1133 | * removed. | |
1134 | * | |
1135 | * Must hold slab_mutex. | |
1136 | */ | |
1137 | static int __meminit drain_cache_node_node(int node) | |
1138 | { | |
1139 | struct kmem_cache *cachep; | |
1140 | int ret = 0; | |
1141 | ||
1142 | list_for_each_entry(cachep, &slab_caches, list) { | |
1143 | struct kmem_cache_node *n; | |
1144 | ||
1145 | n = get_node(cachep, node); | |
1146 | if (!n) | |
1147 | continue; | |
1148 | ||
1149 | drain_freelist(cachep, n, INT_MAX); | |
1150 | ||
1151 | if (!list_empty(&n->slabs_full) || | |
1152 | !list_empty(&n->slabs_partial)) { | |
1153 | ret = -EBUSY; | |
1154 | break; | |
1155 | } | |
1156 | } | |
1157 | return ret; | |
1158 | } | |
1159 | ||
1160 | static int __meminit slab_memory_callback(struct notifier_block *self, | |
1161 | unsigned long action, void *arg) | |
1162 | { | |
1163 | struct memory_notify *mnb = arg; | |
1164 | int ret = 0; | |
1165 | int nid; | |
1166 | ||
1167 | nid = mnb->status_change_nid; | |
1168 | if (nid < 0) | |
1169 | goto out; | |
1170 | ||
1171 | switch (action) { | |
1172 | case MEM_GOING_ONLINE: | |
1173 | mutex_lock(&slab_mutex); | |
1174 | ret = init_cache_node_node(nid); | |
1175 | mutex_unlock(&slab_mutex); | |
1176 | break; | |
1177 | case MEM_GOING_OFFLINE: | |
1178 | mutex_lock(&slab_mutex); | |
1179 | ret = drain_cache_node_node(nid); | |
1180 | mutex_unlock(&slab_mutex); | |
1181 | break; | |
1182 | case MEM_ONLINE: | |
1183 | case MEM_OFFLINE: | |
1184 | case MEM_CANCEL_ONLINE: | |
1185 | case MEM_CANCEL_OFFLINE: | |
1186 | break; | |
1187 | } | |
1188 | out: | |
1189 | return notifier_from_errno(ret); | |
1190 | } | |
1191 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ | |
1192 | ||
1193 | /* | |
1194 | * swap the static kmem_cache_node with kmalloced memory | |
1195 | */ | |
1196 | static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, | |
1197 | int nodeid) | |
1198 | { | |
1199 | struct kmem_cache_node *ptr; | |
1200 | ||
1201 | ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); | |
1202 | BUG_ON(!ptr); | |
1203 | ||
1204 | memcpy(ptr, list, sizeof(struct kmem_cache_node)); | |
1205 | /* | |
1206 | * Do not assume that spinlocks can be initialized via memcpy: | |
1207 | */ | |
1208 | spin_lock_init(&ptr->list_lock); | |
1209 | ||
1210 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | |
1211 | cachep->node[nodeid] = ptr; | |
1212 | } | |
1213 | ||
1214 | /* | |
1215 | * For setting up all the kmem_cache_node for cache whose buffer_size is same as | |
1216 | * size of kmem_cache_node. | |
1217 | */ | |
1218 | static void __init set_up_node(struct kmem_cache *cachep, int index) | |
1219 | { | |
1220 | int node; | |
1221 | ||
1222 | for_each_online_node(node) { | |
1223 | cachep->node[node] = &init_kmem_cache_node[index + node]; | |
1224 | cachep->node[node]->next_reap = jiffies + | |
1225 | REAPTIMEOUT_NODE + | |
1226 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | /* | |
1231 | * Initialisation. Called after the page allocator have been initialised and | |
1232 | * before smp_init(). | |
1233 | */ | |
1234 | void __init kmem_cache_init(void) | |
1235 | { | |
1236 | int i; | |
1237 | ||
1238 | BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < | |
1239 | sizeof(struct rcu_head)); | |
1240 | kmem_cache = &kmem_cache_boot; | |
1241 | ||
1242 | if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1) | |
1243 | use_alien_caches = 0; | |
1244 | ||
1245 | for (i = 0; i < NUM_INIT_LISTS; i++) | |
1246 | kmem_cache_node_init(&init_kmem_cache_node[i]); | |
1247 | ||
1248 | /* | |
1249 | * Fragmentation resistance on low memory - only use bigger | |
1250 | * page orders on machines with more than 32MB of memory if | |
1251 | * not overridden on the command line. | |
1252 | */ | |
1253 | if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) | |
1254 | slab_max_order = SLAB_MAX_ORDER_HI; | |
1255 | ||
1256 | /* Bootstrap is tricky, because several objects are allocated | |
1257 | * from caches that do not exist yet: | |
1258 | * 1) initialize the kmem_cache cache: it contains the struct | |
1259 | * kmem_cache structures of all caches, except kmem_cache itself: | |
1260 | * kmem_cache is statically allocated. | |
1261 | * Initially an __init data area is used for the head array and the | |
1262 | * kmem_cache_node structures, it's replaced with a kmalloc allocated | |
1263 | * array at the end of the bootstrap. | |
1264 | * 2) Create the first kmalloc cache. | |
1265 | * The struct kmem_cache for the new cache is allocated normally. | |
1266 | * An __init data area is used for the head array. | |
1267 | * 3) Create the remaining kmalloc caches, with minimally sized | |
1268 | * head arrays. | |
1269 | * 4) Replace the __init data head arrays for kmem_cache and the first | |
1270 | * kmalloc cache with kmalloc allocated arrays. | |
1271 | * 5) Replace the __init data for kmem_cache_node for kmem_cache and | |
1272 | * the other cache's with kmalloc allocated memory. | |
1273 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | |
1274 | */ | |
1275 | ||
1276 | /* 1) create the kmem_cache */ | |
1277 | ||
1278 | /* | |
1279 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | |
1280 | */ | |
1281 | create_boot_cache(kmem_cache, "kmem_cache", | |
1282 | offsetof(struct kmem_cache, node) + | |
1283 | nr_node_ids * sizeof(struct kmem_cache_node *), | |
1284 | SLAB_HWCACHE_ALIGN); | |
1285 | list_add(&kmem_cache->list, &slab_caches); | |
1286 | slab_state = PARTIAL; | |
1287 | ||
1288 | /* | |
1289 | * Initialize the caches that provide memory for the kmem_cache_node | |
1290 | * structures first. Without this, further allocations will bug. | |
1291 | */ | |
1292 | kmalloc_caches[INDEX_NODE] = create_kmalloc_cache( | |
1293 | kmalloc_info[INDEX_NODE].name, | |
1294 | kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); | |
1295 | slab_state = PARTIAL_NODE; | |
1296 | setup_kmalloc_cache_index_table(); | |
1297 | ||
1298 | slab_early_init = 0; | |
1299 | ||
1300 | /* 5) Replace the bootstrap kmem_cache_node */ | |
1301 | { | |
1302 | int nid; | |
1303 | ||
1304 | for_each_online_node(nid) { | |
1305 | init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); | |
1306 | ||
1307 | init_list(kmalloc_caches[INDEX_NODE], | |
1308 | &init_kmem_cache_node[SIZE_NODE + nid], nid); | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | create_kmalloc_caches(ARCH_KMALLOC_FLAGS); | |
1313 | } | |
1314 | ||
1315 | void __init kmem_cache_init_late(void) | |
1316 | { | |
1317 | struct kmem_cache *cachep; | |
1318 | ||
1319 | slab_state = UP; | |
1320 | ||
1321 | /* 6) resize the head arrays to their final sizes */ | |
1322 | mutex_lock(&slab_mutex); | |
1323 | list_for_each_entry(cachep, &slab_caches, list) | |
1324 | if (enable_cpucache(cachep, GFP_NOWAIT)) | |
1325 | BUG(); | |
1326 | mutex_unlock(&slab_mutex); | |
1327 | ||
1328 | /* Done! */ | |
1329 | slab_state = FULL; | |
1330 | ||
1331 | #ifdef CONFIG_NUMA | |
1332 | /* | |
1333 | * Register a memory hotplug callback that initializes and frees | |
1334 | * node. | |
1335 | */ | |
1336 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | |
1337 | #endif | |
1338 | ||
1339 | /* | |
1340 | * The reap timers are started later, with a module init call: That part | |
1341 | * of the kernel is not yet operational. | |
1342 | */ | |
1343 | } | |
1344 | ||
1345 | static int __init cpucache_init(void) | |
1346 | { | |
1347 | int ret; | |
1348 | ||
1349 | /* | |
1350 | * Register the timers that return unneeded pages to the page allocator | |
1351 | */ | |
1352 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", | |
1353 | slab_online_cpu, slab_offline_cpu); | |
1354 | WARN_ON(ret < 0); | |
1355 | ||
1356 | /* Done! */ | |
1357 | slab_state = FULL; | |
1358 | return 0; | |
1359 | } | |
1360 | __initcall(cpucache_init); | |
1361 | ||
1362 | static noinline void | |
1363 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |
1364 | { | |
1365 | #if DEBUG | |
1366 | struct kmem_cache_node *n; | |
1367 | unsigned long flags; | |
1368 | int node; | |
1369 | static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, | |
1370 | DEFAULT_RATELIMIT_BURST); | |
1371 | ||
1372 | if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) | |
1373 | return; | |
1374 | ||
1375 | pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", | |
1376 | nodeid, gfpflags, &gfpflags); | |
1377 | pr_warn(" cache: %s, object size: %d, order: %d\n", | |
1378 | cachep->name, cachep->size, cachep->gfporder); | |
1379 | ||
1380 | for_each_kmem_cache_node(cachep, node, n) { | |
1381 | unsigned long total_slabs, free_slabs, free_objs; | |
1382 | ||
1383 | spin_lock_irqsave(&n->list_lock, flags); | |
1384 | total_slabs = n->total_slabs; | |
1385 | free_slabs = n->free_slabs; | |
1386 | free_objs = n->free_objects; | |
1387 | spin_unlock_irqrestore(&n->list_lock, flags); | |
1388 | ||
1389 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", | |
1390 | node, total_slabs - free_slabs, total_slabs, | |
1391 | (total_slabs * cachep->num) - free_objs, | |
1392 | total_slabs * cachep->num); | |
1393 | } | |
1394 | #endif | |
1395 | } | |
1396 | ||
1397 | /* | |
1398 | * Interface to system's page allocator. No need to hold the | |
1399 | * kmem_cache_node ->list_lock. | |
1400 | * | |
1401 | * If we requested dmaable memory, we will get it. Even if we | |
1402 | * did not request dmaable memory, we might get it, but that | |
1403 | * would be relatively rare and ignorable. | |
1404 | */ | |
1405 | static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |
1406 | int nodeid) | |
1407 | { | |
1408 | struct page *page; | |
1409 | int nr_pages; | |
1410 | ||
1411 | flags |= cachep->allocflags; | |
1412 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | |
1413 | flags |= __GFP_RECLAIMABLE; | |
1414 | ||
1415 | page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); | |
1416 | if (!page) { | |
1417 | slab_out_of_memory(cachep, flags, nodeid); | |
1418 | return NULL; | |
1419 | } | |
1420 | ||
1421 | if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { | |
1422 | __free_pages(page, cachep->gfporder); | |
1423 | return NULL; | |
1424 | } | |
1425 | ||
1426 | nr_pages = (1 << cachep->gfporder); | |
1427 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | |
1428 | mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages); | |
1429 | else | |
1430 | mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages); | |
1431 | ||
1432 | __SetPageSlab(page); | |
1433 | /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ | |
1434 | if (sk_memalloc_socks() && page_is_pfmemalloc(page)) | |
1435 | SetPageSlabPfmemalloc(page); | |
1436 | ||
1437 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | |
1438 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | |
1439 | ||
1440 | if (cachep->ctor) | |
1441 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | |
1442 | else | |
1443 | kmemcheck_mark_unallocated_pages(page, nr_pages); | |
1444 | } | |
1445 | ||
1446 | return page; | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * Interface to system's page release. | |
1451 | */ | |
1452 | static void kmem_freepages(struct kmem_cache *cachep, struct page *page) | |
1453 | { | |
1454 | int order = cachep->gfporder; | |
1455 | unsigned long nr_freed = (1 << order); | |
1456 | ||
1457 | kmemcheck_free_shadow(page, order); | |
1458 | ||
1459 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | |
1460 | mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed); | |
1461 | else | |
1462 | mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed); | |
1463 | ||
1464 | BUG_ON(!PageSlab(page)); | |
1465 | __ClearPageSlabPfmemalloc(page); | |
1466 | __ClearPageSlab(page); | |
1467 | page_mapcount_reset(page); | |
1468 | page->mapping = NULL; | |
1469 | ||
1470 | if (current->reclaim_state) | |
1471 | current->reclaim_state->reclaimed_slab += nr_freed; | |
1472 | memcg_uncharge_slab(page, order, cachep); | |
1473 | __free_pages(page, order); | |
1474 | } | |
1475 | ||
1476 | static void kmem_rcu_free(struct rcu_head *head) | |
1477 | { | |
1478 | struct kmem_cache *cachep; | |
1479 | struct page *page; | |
1480 | ||
1481 | page = container_of(head, struct page, rcu_head); | |
1482 | cachep = page->slab_cache; | |
1483 | ||
1484 | kmem_freepages(cachep, page); | |
1485 | } | |
1486 | ||
1487 | #if DEBUG | |
1488 | static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) | |
1489 | { | |
1490 | if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && | |
1491 | (cachep->size % PAGE_SIZE) == 0) | |
1492 | return true; | |
1493 | ||
1494 | return false; | |
1495 | } | |
1496 | ||
1497 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
1498 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | |
1499 | unsigned long caller) | |
1500 | { | |
1501 | int size = cachep->object_size; | |
1502 | ||
1503 | addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; | |
1504 | ||
1505 | if (size < 5 * sizeof(unsigned long)) | |
1506 | return; | |
1507 | ||
1508 | *addr++ = 0x12345678; | |
1509 | *addr++ = caller; | |
1510 | *addr++ = smp_processor_id(); | |
1511 | size -= 3 * sizeof(unsigned long); | |
1512 | { | |
1513 | unsigned long *sptr = &caller; | |
1514 | unsigned long svalue; | |
1515 | ||
1516 | while (!kstack_end(sptr)) { | |
1517 | svalue = *sptr++; | |
1518 | if (kernel_text_address(svalue)) { | |
1519 | *addr++ = svalue; | |
1520 | size -= sizeof(unsigned long); | |
1521 | if (size <= sizeof(unsigned long)) | |
1522 | break; | |
1523 | } | |
1524 | } | |
1525 | ||
1526 | } | |
1527 | *addr++ = 0x87654321; | |
1528 | } | |
1529 | ||
1530 | static void slab_kernel_map(struct kmem_cache *cachep, void *objp, | |
1531 | int map, unsigned long caller) | |
1532 | { | |
1533 | if (!is_debug_pagealloc_cache(cachep)) | |
1534 | return; | |
1535 | ||
1536 | if (caller) | |
1537 | store_stackinfo(cachep, objp, caller); | |
1538 | ||
1539 | kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); | |
1540 | } | |
1541 | ||
1542 | #else | |
1543 | static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, | |
1544 | int map, unsigned long caller) {} | |
1545 | ||
1546 | #endif | |
1547 | ||
1548 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | |
1549 | { | |
1550 | int size = cachep->object_size; | |
1551 | addr = &((char *)addr)[obj_offset(cachep)]; | |
1552 | ||
1553 | memset(addr, val, size); | |
1554 | *(unsigned char *)(addr + size - 1) = POISON_END; | |
1555 | } | |
1556 | ||
1557 | static void dump_line(char *data, int offset, int limit) | |
1558 | { | |
1559 | int i; | |
1560 | unsigned char error = 0; | |
1561 | int bad_count = 0; | |
1562 | ||
1563 | pr_err("%03x: ", offset); | |
1564 | for (i = 0; i < limit; i++) { | |
1565 | if (data[offset + i] != POISON_FREE) { | |
1566 | error = data[offset + i]; | |
1567 | bad_count++; | |
1568 | } | |
1569 | } | |
1570 | print_hex_dump(KERN_CONT, "", 0, 16, 1, | |
1571 | &data[offset], limit, 1); | |
1572 | ||
1573 | if (bad_count == 1) { | |
1574 | error ^= POISON_FREE; | |
1575 | if (!(error & (error - 1))) { | |
1576 | pr_err("Single bit error detected. Probably bad RAM.\n"); | |
1577 | #ifdef CONFIG_X86 | |
1578 | pr_err("Run memtest86+ or a similar memory test tool.\n"); | |
1579 | #else | |
1580 | pr_err("Run a memory test tool.\n"); | |
1581 | #endif | |
1582 | } | |
1583 | } | |
1584 | } | |
1585 | #endif | |
1586 | ||
1587 | #if DEBUG | |
1588 | ||
1589 | static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | |
1590 | { | |
1591 | int i, size; | |
1592 | char *realobj; | |
1593 | ||
1594 | if (cachep->flags & SLAB_RED_ZONE) { | |
1595 | pr_err("Redzone: 0x%llx/0x%llx\n", | |
1596 | *dbg_redzone1(cachep, objp), | |
1597 | *dbg_redzone2(cachep, objp)); | |
1598 | } | |
1599 | ||
1600 | if (cachep->flags & SLAB_STORE_USER) { | |
1601 | pr_err("Last user: [<%p>](%pSR)\n", | |
1602 | *dbg_userword(cachep, objp), | |
1603 | *dbg_userword(cachep, objp)); | |
1604 | } | |
1605 | realobj = (char *)objp + obj_offset(cachep); | |
1606 | size = cachep->object_size; | |
1607 | for (i = 0; i < size && lines; i += 16, lines--) { | |
1608 | int limit; | |
1609 | limit = 16; | |
1610 | if (i + limit > size) | |
1611 | limit = size - i; | |
1612 | dump_line(realobj, i, limit); | |
1613 | } | |
1614 | } | |
1615 | ||
1616 | static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |
1617 | { | |
1618 | char *realobj; | |
1619 | int size, i; | |
1620 | int lines = 0; | |
1621 | ||
1622 | if (is_debug_pagealloc_cache(cachep)) | |
1623 | return; | |
1624 | ||
1625 | realobj = (char *)objp + obj_offset(cachep); | |
1626 | size = cachep->object_size; | |
1627 | ||
1628 | for (i = 0; i < size; i++) { | |
1629 | char exp = POISON_FREE; | |
1630 | if (i == size - 1) | |
1631 | exp = POISON_END; | |
1632 | if (realobj[i] != exp) { | |
1633 | int limit; | |
1634 | /* Mismatch ! */ | |
1635 | /* Print header */ | |
1636 | if (lines == 0) { | |
1637 | pr_err("Slab corruption (%s): %s start=%p, len=%d\n", | |
1638 | print_tainted(), cachep->name, | |
1639 | realobj, size); | |
1640 | print_objinfo(cachep, objp, 0); | |
1641 | } | |
1642 | /* Hexdump the affected line */ | |
1643 | i = (i / 16) * 16; | |
1644 | limit = 16; | |
1645 | if (i + limit > size) | |
1646 | limit = size - i; | |
1647 | dump_line(realobj, i, limit); | |
1648 | i += 16; | |
1649 | lines++; | |
1650 | /* Limit to 5 lines */ | |
1651 | if (lines > 5) | |
1652 | break; | |
1653 | } | |
1654 | } | |
1655 | if (lines != 0) { | |
1656 | /* Print some data about the neighboring objects, if they | |
1657 | * exist: | |
1658 | */ | |
1659 | struct page *page = virt_to_head_page(objp); | |
1660 | unsigned int objnr; | |
1661 | ||
1662 | objnr = obj_to_index(cachep, page, objp); | |
1663 | if (objnr) { | |
1664 | objp = index_to_obj(cachep, page, objnr - 1); | |
1665 | realobj = (char *)objp + obj_offset(cachep); | |
1666 | pr_err("Prev obj: start=%p, len=%d\n", realobj, size); | |
1667 | print_objinfo(cachep, objp, 2); | |
1668 | } | |
1669 | if (objnr + 1 < cachep->num) { | |
1670 | objp = index_to_obj(cachep, page, objnr + 1); | |
1671 | realobj = (char *)objp + obj_offset(cachep); | |
1672 | pr_err("Next obj: start=%p, len=%d\n", realobj, size); | |
1673 | print_objinfo(cachep, objp, 2); | |
1674 | } | |
1675 | } | |
1676 | } | |
1677 | #endif | |
1678 | ||
1679 | #if DEBUG | |
1680 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, | |
1681 | struct page *page) | |
1682 | { | |
1683 | int i; | |
1684 | ||
1685 | if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { | |
1686 | poison_obj(cachep, page->freelist - obj_offset(cachep), | |
1687 | POISON_FREE); | |
1688 | } | |
1689 | ||
1690 | for (i = 0; i < cachep->num; i++) { | |
1691 | void *objp = index_to_obj(cachep, page, i); | |
1692 | ||
1693 | if (cachep->flags & SLAB_POISON) { | |
1694 | check_poison_obj(cachep, objp); | |
1695 | slab_kernel_map(cachep, objp, 1, 0); | |
1696 | } | |
1697 | if (cachep->flags & SLAB_RED_ZONE) { | |
1698 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | |
1699 | slab_error(cachep, "start of a freed object was overwritten"); | |
1700 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | |
1701 | slab_error(cachep, "end of a freed object was overwritten"); | |
1702 | } | |
1703 | } | |
1704 | } | |
1705 | #else | |
1706 | static void slab_destroy_debugcheck(struct kmem_cache *cachep, | |
1707 | struct page *page) | |
1708 | { | |
1709 | } | |
1710 | #endif | |
1711 | ||
1712 | /** | |
1713 | * slab_destroy - destroy and release all objects in a slab | |
1714 | * @cachep: cache pointer being destroyed | |
1715 | * @page: page pointer being destroyed | |
1716 | * | |
1717 | * Destroy all the objs in a slab page, and release the mem back to the system. | |
1718 | * Before calling the slab page must have been unlinked from the cache. The | |
1719 | * kmem_cache_node ->list_lock is not held/needed. | |
1720 | */ | |
1721 | static void slab_destroy(struct kmem_cache *cachep, struct page *page) | |
1722 | { | |
1723 | void *freelist; | |
1724 | ||
1725 | freelist = page->freelist; | |
1726 | slab_destroy_debugcheck(cachep, page); | |
1727 | if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) | |
1728 | call_rcu(&page->rcu_head, kmem_rcu_free); | |
1729 | else | |
1730 | kmem_freepages(cachep, page); | |
1731 | ||
1732 | /* | |
1733 | * From now on, we don't use freelist | |
1734 | * although actual page can be freed in rcu context | |
1735 | */ | |
1736 | if (OFF_SLAB(cachep)) | |
1737 | kmem_cache_free(cachep->freelist_cache, freelist); | |
1738 | } | |
1739 | ||
1740 | static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) | |
1741 | { | |
1742 | struct page *page, *n; | |
1743 | ||
1744 | list_for_each_entry_safe(page, n, list, lru) { | |
1745 | list_del(&page->lru); | |
1746 | slab_destroy(cachep, page); | |
1747 | } | |
1748 | } | |
1749 | ||
1750 | /** | |
1751 | * calculate_slab_order - calculate size (page order) of slabs | |
1752 | * @cachep: pointer to the cache that is being created | |
1753 | * @size: size of objects to be created in this cache. | |
1754 | * @flags: slab allocation flags | |
1755 | * | |
1756 | * Also calculates the number of objects per slab. | |
1757 | * | |
1758 | * This could be made much more intelligent. For now, try to avoid using | |
1759 | * high order pages for slabs. When the gfp() functions are more friendly | |
1760 | * towards high-order requests, this should be changed. | |
1761 | */ | |
1762 | static size_t calculate_slab_order(struct kmem_cache *cachep, | |
1763 | size_t size, unsigned long flags) | |
1764 | { | |
1765 | size_t left_over = 0; | |
1766 | int gfporder; | |
1767 | ||
1768 | for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { | |
1769 | unsigned int num; | |
1770 | size_t remainder; | |
1771 | ||
1772 | num = cache_estimate(gfporder, size, flags, &remainder); | |
1773 | if (!num) | |
1774 | continue; | |
1775 | ||
1776 | /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ | |
1777 | if (num > SLAB_OBJ_MAX_NUM) | |
1778 | break; | |
1779 | ||
1780 | if (flags & CFLGS_OFF_SLAB) { | |
1781 | struct kmem_cache *freelist_cache; | |
1782 | size_t freelist_size; | |
1783 | ||
1784 | freelist_size = num * sizeof(freelist_idx_t); | |
1785 | freelist_cache = kmalloc_slab(freelist_size, 0u); | |
1786 | if (!freelist_cache) | |
1787 | continue; | |
1788 | ||
1789 | /* | |
1790 | * Needed to avoid possible looping condition | |
1791 | * in cache_grow_begin() | |
1792 | */ | |
1793 | if (OFF_SLAB(freelist_cache)) | |
1794 | continue; | |
1795 | ||
1796 | /* check if off slab has enough benefit */ | |
1797 | if (freelist_cache->size > cachep->size / 2) | |
1798 | continue; | |
1799 | } | |
1800 | ||
1801 | /* Found something acceptable - save it away */ | |
1802 | cachep->num = num; | |
1803 | cachep->gfporder = gfporder; | |
1804 | left_over = remainder; | |
1805 | ||
1806 | /* | |
1807 | * A VFS-reclaimable slab tends to have most allocations | |
1808 | * as GFP_NOFS and we really don't want to have to be allocating | |
1809 | * higher-order pages when we are unable to shrink dcache. | |
1810 | */ | |
1811 | if (flags & SLAB_RECLAIM_ACCOUNT) | |
1812 | break; | |
1813 | ||
1814 | /* | |
1815 | * Large number of objects is good, but very large slabs are | |
1816 | * currently bad for the gfp()s. | |
1817 | */ | |
1818 | if (gfporder >= slab_max_order) | |
1819 | break; | |
1820 | ||
1821 | /* | |
1822 | * Acceptable internal fragmentation? | |
1823 | */ | |
1824 | if (left_over * 8 <= (PAGE_SIZE << gfporder)) | |
1825 | break; | |
1826 | } | |
1827 | return left_over; | |
1828 | } | |
1829 | ||
1830 | static struct array_cache __percpu *alloc_kmem_cache_cpus( | |
1831 | struct kmem_cache *cachep, int entries, int batchcount) | |
1832 | { | |
1833 | int cpu; | |
1834 | size_t size; | |
1835 | struct array_cache __percpu *cpu_cache; | |
1836 | ||
1837 | size = sizeof(void *) * entries + sizeof(struct array_cache); | |
1838 | cpu_cache = __alloc_percpu(size, sizeof(void *)); | |
1839 | ||
1840 | if (!cpu_cache) | |
1841 | return NULL; | |
1842 | ||
1843 | for_each_possible_cpu(cpu) { | |
1844 | init_arraycache(per_cpu_ptr(cpu_cache, cpu), | |
1845 | entries, batchcount); | |
1846 | } | |
1847 | ||
1848 | return cpu_cache; | |
1849 | } | |
1850 | ||
1851 | static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |
1852 | { | |
1853 | if (slab_state >= FULL) | |
1854 | return enable_cpucache(cachep, gfp); | |
1855 | ||
1856 | cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); | |
1857 | if (!cachep->cpu_cache) | |
1858 | return 1; | |
1859 | ||
1860 | if (slab_state == DOWN) { | |
1861 | /* Creation of first cache (kmem_cache). */ | |
1862 | set_up_node(kmem_cache, CACHE_CACHE); | |
1863 | } else if (slab_state == PARTIAL) { | |
1864 | /* For kmem_cache_node */ | |
1865 | set_up_node(cachep, SIZE_NODE); | |
1866 | } else { | |
1867 | int node; | |
1868 | ||
1869 | for_each_online_node(node) { | |
1870 | cachep->node[node] = kmalloc_node( | |
1871 | sizeof(struct kmem_cache_node), gfp, node); | |
1872 | BUG_ON(!cachep->node[node]); | |
1873 | kmem_cache_node_init(cachep->node[node]); | |
1874 | } | |
1875 | } | |
1876 | ||
1877 | cachep->node[numa_mem_id()]->next_reap = | |
1878 | jiffies + REAPTIMEOUT_NODE + | |
1879 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; | |
1880 | ||
1881 | cpu_cache_get(cachep)->avail = 0; | |
1882 | cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; | |
1883 | cpu_cache_get(cachep)->batchcount = 1; | |
1884 | cpu_cache_get(cachep)->touched = 0; | |
1885 | cachep->batchcount = 1; | |
1886 | cachep->limit = BOOT_CPUCACHE_ENTRIES; | |
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | unsigned long kmem_cache_flags(unsigned long object_size, | |
1891 | unsigned long flags, const char *name, | |
1892 | void (*ctor)(void *)) | |
1893 | { | |
1894 | return flags; | |
1895 | } | |
1896 | ||
1897 | struct kmem_cache * | |
1898 | __kmem_cache_alias(const char *name, size_t size, size_t align, | |
1899 | unsigned long flags, void (*ctor)(void *)) | |
1900 | { | |
1901 | struct kmem_cache *cachep; | |
1902 | ||
1903 | cachep = find_mergeable(size, align, flags, name, ctor); | |
1904 | if (cachep) { | |
1905 | cachep->refcount++; | |
1906 | ||
1907 | /* | |
1908 | * Adjust the object sizes so that we clear | |
1909 | * the complete object on kzalloc. | |
1910 | */ | |
1911 | cachep->object_size = max_t(int, cachep->object_size, size); | |
1912 | } | |
1913 | return cachep; | |
1914 | } | |
1915 | ||
1916 | static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, | |
1917 | size_t size, unsigned long flags) | |
1918 | { | |
1919 | size_t left; | |
1920 | ||
1921 | cachep->num = 0; | |
1922 | ||
1923 | if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) | |
1924 | return false; | |
1925 | ||
1926 | left = calculate_slab_order(cachep, size, | |
1927 | flags | CFLGS_OBJFREELIST_SLAB); | |
1928 | if (!cachep->num) | |
1929 | return false; | |
1930 | ||
1931 | if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) | |
1932 | return false; | |
1933 | ||
1934 | cachep->colour = left / cachep->colour_off; | |
1935 | ||
1936 | return true; | |
1937 | } | |
1938 | ||
1939 | static bool set_off_slab_cache(struct kmem_cache *cachep, | |
1940 | size_t size, unsigned long flags) | |
1941 | { | |
1942 | size_t left; | |
1943 | ||
1944 | cachep->num = 0; | |
1945 | ||
1946 | /* | |
1947 | * Always use on-slab management when SLAB_NOLEAKTRACE | |
1948 | * to avoid recursive calls into kmemleak. | |
1949 | */ | |
1950 | if (flags & SLAB_NOLEAKTRACE) | |
1951 | return false; | |
1952 | ||
1953 | /* | |
1954 | * Size is large, assume best to place the slab management obj | |
1955 | * off-slab (should allow better packing of objs). | |
1956 | */ | |
1957 | left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); | |
1958 | if (!cachep->num) | |
1959 | return false; | |
1960 | ||
1961 | /* | |
1962 | * If the slab has been placed off-slab, and we have enough space then | |
1963 | * move it on-slab. This is at the expense of any extra colouring. | |
1964 | */ | |
1965 | if (left >= cachep->num * sizeof(freelist_idx_t)) | |
1966 | return false; | |
1967 | ||
1968 | cachep->colour = left / cachep->colour_off; | |
1969 | ||
1970 | return true; | |
1971 | } | |
1972 | ||
1973 | static bool set_on_slab_cache(struct kmem_cache *cachep, | |
1974 | size_t size, unsigned long flags) | |
1975 | { | |
1976 | size_t left; | |
1977 | ||
1978 | cachep->num = 0; | |
1979 | ||
1980 | left = calculate_slab_order(cachep, size, flags); | |
1981 | if (!cachep->num) | |
1982 | return false; | |
1983 | ||
1984 | cachep->colour = left / cachep->colour_off; | |
1985 | ||
1986 | return true; | |
1987 | } | |
1988 | ||
1989 | /** | |
1990 | * __kmem_cache_create - Create a cache. | |
1991 | * @cachep: cache management descriptor | |
1992 | * @flags: SLAB flags | |
1993 | * | |
1994 | * Returns a ptr to the cache on success, NULL on failure. | |
1995 | * Cannot be called within a int, but can be interrupted. | |
1996 | * The @ctor is run when new pages are allocated by the cache. | |
1997 | * | |
1998 | * The flags are | |
1999 | * | |
2000 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | |
2001 | * to catch references to uninitialised memory. | |
2002 | * | |
2003 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | |
2004 | * for buffer overruns. | |
2005 | * | |
2006 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | |
2007 | * cacheline. This can be beneficial if you're counting cycles as closely | |
2008 | * as davem. | |
2009 | */ | |
2010 | int | |
2011 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |
2012 | { | |
2013 | size_t ralign = BYTES_PER_WORD; | |
2014 | gfp_t gfp; | |
2015 | int err; | |
2016 | size_t size = cachep->size; | |
2017 | ||
2018 | #if DEBUG | |
2019 | #if FORCED_DEBUG | |
2020 | /* | |
2021 | * Enable redzoning and last user accounting, except for caches with | |
2022 | * large objects, if the increased size would increase the object size | |
2023 | * above the next power of two: caches with object sizes just above a | |
2024 | * power of two have a significant amount of internal fragmentation. | |
2025 | */ | |
2026 | if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + | |
2027 | 2 * sizeof(unsigned long long))) | |
2028 | flags |= SLAB_RED_ZONE | SLAB_STORE_USER; | |
2029 | if (!(flags & SLAB_TYPESAFE_BY_RCU)) | |
2030 | flags |= SLAB_POISON; | |
2031 | #endif | |
2032 | #endif | |
2033 | ||
2034 | /* | |
2035 | * Check that size is in terms of words. This is needed to avoid | |
2036 | * unaligned accesses for some archs when redzoning is used, and makes | |
2037 | * sure any on-slab bufctl's are also correctly aligned. | |
2038 | */ | |
2039 | size = ALIGN(size, BYTES_PER_WORD); | |
2040 | ||
2041 | if (flags & SLAB_RED_ZONE) { | |
2042 | ralign = REDZONE_ALIGN; | |
2043 | /* If redzoning, ensure that the second redzone is suitably | |
2044 | * aligned, by adjusting the object size accordingly. */ | |
2045 | size = ALIGN(size, REDZONE_ALIGN); | |
2046 | } | |
2047 | ||
2048 | /* 3) caller mandated alignment */ | |
2049 | if (ralign < cachep->align) { | |
2050 | ralign = cachep->align; | |
2051 | } | |
2052 | /* disable debug if necessary */ | |
2053 | if (ralign > __alignof__(unsigned long long)) | |
2054 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | |
2055 | /* | |
2056 | * 4) Store it. | |
2057 | */ | |
2058 | cachep->align = ralign; | |
2059 | cachep->colour_off = cache_line_size(); | |
2060 | /* Offset must be a multiple of the alignment. */ | |
2061 | if (cachep->colour_off < cachep->align) | |
2062 | cachep->colour_off = cachep->align; | |
2063 | ||
2064 | if (slab_is_available()) | |
2065 | gfp = GFP_KERNEL; | |
2066 | else | |
2067 | gfp = GFP_NOWAIT; | |
2068 | ||
2069 | #if DEBUG | |
2070 | ||
2071 | /* | |
2072 | * Both debugging options require word-alignment which is calculated | |
2073 | * into align above. | |
2074 | */ | |
2075 | if (flags & SLAB_RED_ZONE) { | |
2076 | /* add space for red zone words */ | |
2077 | cachep->obj_offset += sizeof(unsigned long long); | |
2078 | size += 2 * sizeof(unsigned long long); | |
2079 | } | |
2080 | if (flags & SLAB_STORE_USER) { | |
2081 | /* user store requires one word storage behind the end of | |
2082 | * the real object. But if the second red zone needs to be | |
2083 | * aligned to 64 bits, we must allow that much space. | |
2084 | */ | |
2085 | if (flags & SLAB_RED_ZONE) | |
2086 | size += REDZONE_ALIGN; | |
2087 | else | |
2088 | size += BYTES_PER_WORD; | |
2089 | } | |
2090 | #endif | |
2091 | ||
2092 | kasan_cache_create(cachep, &size, &flags); | |
2093 | ||
2094 | size = ALIGN(size, cachep->align); | |
2095 | /* | |
2096 | * We should restrict the number of objects in a slab to implement | |
2097 | * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. | |
2098 | */ | |
2099 | if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) | |
2100 | size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); | |
2101 | ||
2102 | #if DEBUG | |
2103 | /* | |
2104 | * To activate debug pagealloc, off-slab management is necessary | |
2105 | * requirement. In early phase of initialization, small sized slab | |
2106 | * doesn't get initialized so it would not be possible. So, we need | |
2107 | * to check size >= 256. It guarantees that all necessary small | |
2108 | * sized slab is initialized in current slab initialization sequence. | |
2109 | */ | |
2110 | if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && | |
2111 | size >= 256 && cachep->object_size > cache_line_size()) { | |
2112 | if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { | |
2113 | size_t tmp_size = ALIGN(size, PAGE_SIZE); | |
2114 | ||
2115 | if (set_off_slab_cache(cachep, tmp_size, flags)) { | |
2116 | flags |= CFLGS_OFF_SLAB; | |
2117 | cachep->obj_offset += tmp_size - size; | |
2118 | size = tmp_size; | |
2119 | goto done; | |
2120 | } | |
2121 | } | |
2122 | } | |
2123 | #endif | |
2124 | ||
2125 | if (set_objfreelist_slab_cache(cachep, size, flags)) { | |
2126 | flags |= CFLGS_OBJFREELIST_SLAB; | |
2127 | goto done; | |
2128 | } | |
2129 | ||
2130 | if (set_off_slab_cache(cachep, size, flags)) { | |
2131 | flags |= CFLGS_OFF_SLAB; | |
2132 | goto done; | |
2133 | } | |
2134 | ||
2135 | if (set_on_slab_cache(cachep, size, flags)) | |
2136 | goto done; | |
2137 | ||
2138 | return -E2BIG; | |
2139 | ||
2140 | done: | |
2141 | cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); | |
2142 | cachep->flags = flags; | |
2143 | cachep->allocflags = __GFP_COMP; | |
2144 | if (flags & SLAB_CACHE_DMA) | |
2145 | cachep->allocflags |= GFP_DMA; | |
2146 | cachep->size = size; | |
2147 | cachep->reciprocal_buffer_size = reciprocal_value(size); | |
2148 | ||
2149 | #if DEBUG | |
2150 | /* | |
2151 | * If we're going to use the generic kernel_map_pages() | |
2152 | * poisoning, then it's going to smash the contents of | |
2153 | * the redzone and userword anyhow, so switch them off. | |
2154 | */ | |
2155 | if (IS_ENABLED(CONFIG_PAGE_POISONING) && | |
2156 | (cachep->flags & SLAB_POISON) && | |
2157 | is_debug_pagealloc_cache(cachep)) | |
2158 | cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | |
2159 | #endif | |
2160 | ||
2161 | if (OFF_SLAB(cachep)) { | |
2162 | cachep->freelist_cache = | |
2163 | kmalloc_slab(cachep->freelist_size, 0u); | |
2164 | } | |
2165 | ||
2166 | err = setup_cpu_cache(cachep, gfp); | |
2167 | if (err) { | |
2168 | __kmem_cache_release(cachep); | |
2169 | return err; | |
2170 | } | |
2171 | ||
2172 | return 0; | |
2173 | } | |
2174 | ||
2175 | #if DEBUG | |
2176 | static void check_irq_off(void) | |
2177 | { | |
2178 | BUG_ON(!irqs_disabled()); | |
2179 | } | |
2180 | ||
2181 | static void check_irq_on(void) | |
2182 | { | |
2183 | BUG_ON(irqs_disabled()); | |
2184 | } | |
2185 | ||
2186 | static void check_mutex_acquired(void) | |
2187 | { | |
2188 | BUG_ON(!mutex_is_locked(&slab_mutex)); | |
2189 | } | |
2190 | ||
2191 | static void check_spinlock_acquired(struct kmem_cache *cachep) | |
2192 | { | |
2193 | #ifdef CONFIG_SMP | |
2194 | check_irq_off(); | |
2195 | assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); | |
2196 | #endif | |
2197 | } | |
2198 | ||
2199 | static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |
2200 | { | |
2201 | #ifdef CONFIG_SMP | |
2202 | check_irq_off(); | |
2203 | assert_spin_locked(&get_node(cachep, node)->list_lock); | |
2204 | #endif | |
2205 | } | |
2206 | ||
2207 | #else | |
2208 | #define check_irq_off() do { } while(0) | |
2209 | #define check_irq_on() do { } while(0) | |
2210 | #define check_mutex_acquired() do { } while(0) | |
2211 | #define check_spinlock_acquired(x) do { } while(0) | |
2212 | #define check_spinlock_acquired_node(x, y) do { } while(0) | |
2213 | #endif | |
2214 | ||
2215 | static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, | |
2216 | int node, bool free_all, struct list_head *list) | |
2217 | { | |
2218 | int tofree; | |
2219 | ||
2220 | if (!ac || !ac->avail) | |
2221 | return; | |
2222 | ||
2223 | tofree = free_all ? ac->avail : (ac->limit + 4) / 5; | |
2224 | if (tofree > ac->avail) | |
2225 | tofree = (ac->avail + 1) / 2; | |
2226 | ||
2227 | free_block(cachep, ac->entry, tofree, node, list); | |
2228 | ac->avail -= tofree; | |
2229 | memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); | |
2230 | } | |
2231 | ||
2232 | static void do_drain(void *arg) | |
2233 | { | |
2234 | struct kmem_cache *cachep = arg; | |
2235 | struct array_cache *ac; | |
2236 | int node = numa_mem_id(); | |
2237 | struct kmem_cache_node *n; | |
2238 | LIST_HEAD(list); | |
2239 | ||
2240 | check_irq_off(); | |
2241 | ac = cpu_cache_get(cachep); | |
2242 | n = get_node(cachep, node); | |
2243 | spin_lock(&n->list_lock); | |
2244 | free_block(cachep, ac->entry, ac->avail, node, &list); | |
2245 | spin_unlock(&n->list_lock); | |
2246 | slabs_destroy(cachep, &list); | |
2247 | ac->avail = 0; | |
2248 | } | |
2249 | ||
2250 | static void drain_cpu_caches(struct kmem_cache *cachep) | |
2251 | { | |
2252 | struct kmem_cache_node *n; | |
2253 | int node; | |
2254 | LIST_HEAD(list); | |
2255 | ||
2256 | on_each_cpu(do_drain, cachep, 1); | |
2257 | check_irq_on(); | |
2258 | for_each_kmem_cache_node(cachep, node, n) | |
2259 | if (n->alien) | |
2260 | drain_alien_cache(cachep, n->alien); | |
2261 | ||
2262 | for_each_kmem_cache_node(cachep, node, n) { | |
2263 | spin_lock_irq(&n->list_lock); | |
2264 | drain_array_locked(cachep, n->shared, node, true, &list); | |
2265 | spin_unlock_irq(&n->list_lock); | |
2266 | ||
2267 | slabs_destroy(cachep, &list); | |
2268 | } | |
2269 | } | |
2270 | ||
2271 | /* | |
2272 | * Remove slabs from the list of free slabs. | |
2273 | * Specify the number of slabs to drain in tofree. | |
2274 | * | |
2275 | * Returns the actual number of slabs released. | |
2276 | */ | |
2277 | static int drain_freelist(struct kmem_cache *cache, | |
2278 | struct kmem_cache_node *n, int tofree) | |
2279 | { | |
2280 | struct list_head *p; | |
2281 | int nr_freed; | |
2282 | struct page *page; | |
2283 | ||
2284 | nr_freed = 0; | |
2285 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { | |
2286 | ||
2287 | spin_lock_irq(&n->list_lock); | |
2288 | p = n->slabs_free.prev; | |
2289 | if (p == &n->slabs_free) { | |
2290 | spin_unlock_irq(&n->list_lock); | |
2291 | goto out; | |
2292 | } | |
2293 | ||
2294 | page = list_entry(p, struct page, lru); | |
2295 | list_del(&page->lru); | |
2296 | n->free_slabs--; | |
2297 | n->total_slabs--; | |
2298 | /* | |
2299 | * Safe to drop the lock. The slab is no longer linked | |
2300 | * to the cache. | |
2301 | */ | |
2302 | n->free_objects -= cache->num; | |
2303 | spin_unlock_irq(&n->list_lock); | |
2304 | slab_destroy(cache, page); | |
2305 | nr_freed++; | |
2306 | } | |
2307 | out: | |
2308 | return nr_freed; | |
2309 | } | |
2310 | ||
2311 | int __kmem_cache_shrink(struct kmem_cache *cachep) | |
2312 | { | |
2313 | int ret = 0; | |
2314 | int node; | |
2315 | struct kmem_cache_node *n; | |
2316 | ||
2317 | drain_cpu_caches(cachep); | |
2318 | ||
2319 | check_irq_on(); | |
2320 | for_each_kmem_cache_node(cachep, node, n) { | |
2321 | drain_freelist(cachep, n, INT_MAX); | |
2322 | ||
2323 | ret += !list_empty(&n->slabs_full) || | |
2324 | !list_empty(&n->slabs_partial); | |
2325 | } | |
2326 | return (ret ? 1 : 0); | |
2327 | } | |
2328 | ||
2329 | #ifdef CONFIG_MEMCG | |
2330 | void __kmemcg_cache_deactivate(struct kmem_cache *cachep) | |
2331 | { | |
2332 | __kmem_cache_shrink(cachep); | |
2333 | } | |
2334 | #endif | |
2335 | ||
2336 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | |
2337 | { | |
2338 | return __kmem_cache_shrink(cachep); | |
2339 | } | |
2340 | ||
2341 | void __kmem_cache_release(struct kmem_cache *cachep) | |
2342 | { | |
2343 | int i; | |
2344 | struct kmem_cache_node *n; | |
2345 | ||
2346 | cache_random_seq_destroy(cachep); | |
2347 | ||
2348 | free_percpu(cachep->cpu_cache); | |
2349 | ||
2350 | /* NUMA: free the node structures */ | |
2351 | for_each_kmem_cache_node(cachep, i, n) { | |
2352 | kfree(n->shared); | |
2353 | free_alien_cache(n->alien); | |
2354 | kfree(n); | |
2355 | cachep->node[i] = NULL; | |
2356 | } | |
2357 | } | |
2358 | ||
2359 | /* | |
2360 | * Get the memory for a slab management obj. | |
2361 | * | |
2362 | * For a slab cache when the slab descriptor is off-slab, the | |
2363 | * slab descriptor can't come from the same cache which is being created, | |
2364 | * Because if it is the case, that means we defer the creation of | |
2365 | * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. | |
2366 | * And we eventually call down to __kmem_cache_create(), which | |
2367 | * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. | |
2368 | * This is a "chicken-and-egg" problem. | |
2369 | * | |
2370 | * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, | |
2371 | * which are all initialized during kmem_cache_init(). | |
2372 | */ | |
2373 | static void *alloc_slabmgmt(struct kmem_cache *cachep, | |
2374 | struct page *page, int colour_off, | |
2375 | gfp_t local_flags, int nodeid) | |
2376 | { | |
2377 | void *freelist; | |
2378 | void *addr = page_address(page); | |
2379 | ||
2380 | page->s_mem = addr + colour_off; | |
2381 | page->active = 0; | |
2382 | ||
2383 | if (OBJFREELIST_SLAB(cachep)) | |
2384 | freelist = NULL; | |
2385 | else if (OFF_SLAB(cachep)) { | |
2386 | /* Slab management obj is off-slab. */ | |
2387 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, | |
2388 | local_flags, nodeid); | |
2389 | if (!freelist) | |
2390 | return NULL; | |
2391 | } else { | |
2392 | /* We will use last bytes at the slab for freelist */ | |
2393 | freelist = addr + (PAGE_SIZE << cachep->gfporder) - | |
2394 | cachep->freelist_size; | |
2395 | } | |
2396 | ||
2397 | return freelist; | |
2398 | } | |
2399 | ||
2400 | static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) | |
2401 | { | |
2402 | return ((freelist_idx_t *)page->freelist)[idx]; | |
2403 | } | |
2404 | ||
2405 | static inline void set_free_obj(struct page *page, | |
2406 | unsigned int idx, freelist_idx_t val) | |
2407 | { | |
2408 | ((freelist_idx_t *)(page->freelist))[idx] = val; | |
2409 | } | |
2410 | ||
2411 | static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) | |
2412 | { | |
2413 | #if DEBUG | |
2414 | int i; | |
2415 | ||
2416 | for (i = 0; i < cachep->num; i++) { | |
2417 | void *objp = index_to_obj(cachep, page, i); | |
2418 | ||
2419 | if (cachep->flags & SLAB_STORE_USER) | |
2420 | *dbg_userword(cachep, objp) = NULL; | |
2421 | ||
2422 | if (cachep->flags & SLAB_RED_ZONE) { | |
2423 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | |
2424 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | |
2425 | } | |
2426 | /* | |
2427 | * Constructors are not allowed to allocate memory from the same | |
2428 | * cache which they are a constructor for. Otherwise, deadlock. | |
2429 | * They must also be threaded. | |
2430 | */ | |
2431 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { | |
2432 | kasan_unpoison_object_data(cachep, | |
2433 | objp + obj_offset(cachep)); | |
2434 | cachep->ctor(objp + obj_offset(cachep)); | |
2435 | kasan_poison_object_data( | |
2436 | cachep, objp + obj_offset(cachep)); | |
2437 | } | |
2438 | ||
2439 | if (cachep->flags & SLAB_RED_ZONE) { | |
2440 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | |
2441 | slab_error(cachep, "constructor overwrote the end of an object"); | |
2442 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | |
2443 | slab_error(cachep, "constructor overwrote the start of an object"); | |
2444 | } | |
2445 | /* need to poison the objs? */ | |
2446 | if (cachep->flags & SLAB_POISON) { | |
2447 | poison_obj(cachep, objp, POISON_FREE); | |
2448 | slab_kernel_map(cachep, objp, 0, 0); | |
2449 | } | |
2450 | } | |
2451 | #endif | |
2452 | } | |
2453 | ||
2454 | #ifdef CONFIG_SLAB_FREELIST_RANDOM | |
2455 | /* Hold information during a freelist initialization */ | |
2456 | union freelist_init_state { | |
2457 | struct { | |
2458 | unsigned int pos; | |
2459 | unsigned int *list; | |
2460 | unsigned int count; | |
2461 | }; | |
2462 | struct rnd_state rnd_state; | |
2463 | }; | |
2464 | ||
2465 | /* | |
2466 | * Initialize the state based on the randomization methode available. | |
2467 | * return true if the pre-computed list is available, false otherwize. | |
2468 | */ | |
2469 | static bool freelist_state_initialize(union freelist_init_state *state, | |
2470 | struct kmem_cache *cachep, | |
2471 | unsigned int count) | |
2472 | { | |
2473 | bool ret; | |
2474 | unsigned int rand; | |
2475 | ||
2476 | /* Use best entropy available to define a random shift */ | |
2477 | rand = get_random_int(); | |
2478 | ||
2479 | /* Use a random state if the pre-computed list is not available */ | |
2480 | if (!cachep->random_seq) { | |
2481 | prandom_seed_state(&state->rnd_state, rand); | |
2482 | ret = false; | |
2483 | } else { | |
2484 | state->list = cachep->random_seq; | |
2485 | state->count = count; | |
2486 | state->pos = rand % count; | |
2487 | ret = true; | |
2488 | } | |
2489 | return ret; | |
2490 | } | |
2491 | ||
2492 | /* Get the next entry on the list and randomize it using a random shift */ | |
2493 | static freelist_idx_t next_random_slot(union freelist_init_state *state) | |
2494 | { | |
2495 | if (state->pos >= state->count) | |
2496 | state->pos = 0; | |
2497 | return state->list[state->pos++]; | |
2498 | } | |
2499 | ||
2500 | /* Swap two freelist entries */ | |
2501 | static void swap_free_obj(struct page *page, unsigned int a, unsigned int b) | |
2502 | { | |
2503 | swap(((freelist_idx_t *)page->freelist)[a], | |
2504 | ((freelist_idx_t *)page->freelist)[b]); | |
2505 | } | |
2506 | ||
2507 | /* | |
2508 | * Shuffle the freelist initialization state based on pre-computed lists. | |
2509 | * return true if the list was successfully shuffled, false otherwise. | |
2510 | */ | |
2511 | static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) | |
2512 | { | |
2513 | unsigned int objfreelist = 0, i, rand, count = cachep->num; | |
2514 | union freelist_init_state state; | |
2515 | bool precomputed; | |
2516 | ||
2517 | if (count < 2) | |
2518 | return false; | |
2519 | ||
2520 | precomputed = freelist_state_initialize(&state, cachep, count); | |
2521 | ||
2522 | /* Take a random entry as the objfreelist */ | |
2523 | if (OBJFREELIST_SLAB(cachep)) { | |
2524 | if (!precomputed) | |
2525 | objfreelist = count - 1; | |
2526 | else | |
2527 | objfreelist = next_random_slot(&state); | |
2528 | page->freelist = index_to_obj(cachep, page, objfreelist) + | |
2529 | obj_offset(cachep); | |
2530 | count--; | |
2531 | } | |
2532 | ||
2533 | /* | |
2534 | * On early boot, generate the list dynamically. | |
2535 | * Later use a pre-computed list for speed. | |
2536 | */ | |
2537 | if (!precomputed) { | |
2538 | for (i = 0; i < count; i++) | |
2539 | set_free_obj(page, i, i); | |
2540 | ||
2541 | /* Fisher-Yates shuffle */ | |
2542 | for (i = count - 1; i > 0; i--) { | |
2543 | rand = prandom_u32_state(&state.rnd_state); | |
2544 | rand %= (i + 1); | |
2545 | swap_free_obj(page, i, rand); | |
2546 | } | |
2547 | } else { | |
2548 | for (i = 0; i < count; i++) | |
2549 | set_free_obj(page, i, next_random_slot(&state)); | |
2550 | } | |
2551 | ||
2552 | if (OBJFREELIST_SLAB(cachep)) | |
2553 | set_free_obj(page, cachep->num - 1, objfreelist); | |
2554 | ||
2555 | return true; | |
2556 | } | |
2557 | #else | |
2558 | static inline bool shuffle_freelist(struct kmem_cache *cachep, | |
2559 | struct page *page) | |
2560 | { | |
2561 | return false; | |
2562 | } | |
2563 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
2564 | ||
2565 | static void cache_init_objs(struct kmem_cache *cachep, | |
2566 | struct page *page) | |
2567 | { | |
2568 | int i; | |
2569 | void *objp; | |
2570 | bool shuffled; | |
2571 | ||
2572 | cache_init_objs_debug(cachep, page); | |
2573 | ||
2574 | /* Try to randomize the freelist if enabled */ | |
2575 | shuffled = shuffle_freelist(cachep, page); | |
2576 | ||
2577 | if (!shuffled && OBJFREELIST_SLAB(cachep)) { | |
2578 | page->freelist = index_to_obj(cachep, page, cachep->num - 1) + | |
2579 | obj_offset(cachep); | |
2580 | } | |
2581 | ||
2582 | for (i = 0; i < cachep->num; i++) { | |
2583 | objp = index_to_obj(cachep, page, i); | |
2584 | kasan_init_slab_obj(cachep, objp); | |
2585 | ||
2586 | /* constructor could break poison info */ | |
2587 | if (DEBUG == 0 && cachep->ctor) { | |
2588 | kasan_unpoison_object_data(cachep, objp); | |
2589 | cachep->ctor(objp); | |
2590 | kasan_poison_object_data(cachep, objp); | |
2591 | } | |
2592 | ||
2593 | if (!shuffled) | |
2594 | set_free_obj(page, i, i); | |
2595 | } | |
2596 | } | |
2597 | ||
2598 | static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) | |
2599 | { | |
2600 | void *objp; | |
2601 | ||
2602 | objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); | |
2603 | page->active++; | |
2604 | ||
2605 | #if DEBUG | |
2606 | if (cachep->flags & SLAB_STORE_USER) | |
2607 | set_store_user_dirty(cachep); | |
2608 | #endif | |
2609 | ||
2610 | return objp; | |
2611 | } | |
2612 | ||
2613 | static void slab_put_obj(struct kmem_cache *cachep, | |
2614 | struct page *page, void *objp) | |
2615 | { | |
2616 | unsigned int objnr = obj_to_index(cachep, page, objp); | |
2617 | #if DEBUG | |
2618 | unsigned int i; | |
2619 | ||
2620 | /* Verify double free bug */ | |
2621 | for (i = page->active; i < cachep->num; i++) { | |
2622 | if (get_free_obj(page, i) == objnr) { | |
2623 | pr_err("slab: double free detected in cache '%s', objp %p\n", | |
2624 | cachep->name, objp); | |
2625 | BUG(); | |
2626 | } | |
2627 | } | |
2628 | #endif | |
2629 | page->active--; | |
2630 | if (!page->freelist) | |
2631 | page->freelist = objp + obj_offset(cachep); | |
2632 | ||
2633 | set_free_obj(page, page->active, objnr); | |
2634 | } | |
2635 | ||
2636 | /* | |
2637 | * Map pages beginning at addr to the given cache and slab. This is required | |
2638 | * for the slab allocator to be able to lookup the cache and slab of a | |
2639 | * virtual address for kfree, ksize, and slab debugging. | |
2640 | */ | |
2641 | static void slab_map_pages(struct kmem_cache *cache, struct page *page, | |
2642 | void *freelist) | |
2643 | { | |
2644 | page->slab_cache = cache; | |
2645 | page->freelist = freelist; | |
2646 | } | |
2647 | ||
2648 | /* | |
2649 | * Grow (by 1) the number of slabs within a cache. This is called by | |
2650 | * kmem_cache_alloc() when there are no active objs left in a cache. | |
2651 | */ | |
2652 | static struct page *cache_grow_begin(struct kmem_cache *cachep, | |
2653 | gfp_t flags, int nodeid) | |
2654 | { | |
2655 | void *freelist; | |
2656 | size_t offset; | |
2657 | gfp_t local_flags; | |
2658 | int page_node; | |
2659 | struct kmem_cache_node *n; | |
2660 | struct page *page; | |
2661 | ||
2662 | /* | |
2663 | * Be lazy and only check for valid flags here, keeping it out of the | |
2664 | * critical path in kmem_cache_alloc(). | |
2665 | */ | |
2666 | if (unlikely(flags & GFP_SLAB_BUG_MASK)) { | |
2667 | gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; | |
2668 | flags &= ~GFP_SLAB_BUG_MASK; | |
2669 | pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", | |
2670 | invalid_mask, &invalid_mask, flags, &flags); | |
2671 | dump_stack(); | |
2672 | } | |
2673 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | |
2674 | ||
2675 | check_irq_off(); | |
2676 | if (gfpflags_allow_blocking(local_flags)) | |
2677 | local_irq_enable(); | |
2678 | ||
2679 | /* | |
2680 | * Get mem for the objs. Attempt to allocate a physical page from | |
2681 | * 'nodeid'. | |
2682 | */ | |
2683 | page = kmem_getpages(cachep, local_flags, nodeid); | |
2684 | if (!page) | |
2685 | goto failed; | |
2686 | ||
2687 | page_node = page_to_nid(page); | |
2688 | n = get_node(cachep, page_node); | |
2689 | ||
2690 | /* Get colour for the slab, and cal the next value. */ | |
2691 | n->colour_next++; | |
2692 | if (n->colour_next >= cachep->colour) | |
2693 | n->colour_next = 0; | |
2694 | ||
2695 | offset = n->colour_next; | |
2696 | if (offset >= cachep->colour) | |
2697 | offset = 0; | |
2698 | ||
2699 | offset *= cachep->colour_off; | |
2700 | ||
2701 | /* Get slab management. */ | |
2702 | freelist = alloc_slabmgmt(cachep, page, offset, | |
2703 | local_flags & ~GFP_CONSTRAINT_MASK, page_node); | |
2704 | if (OFF_SLAB(cachep) && !freelist) | |
2705 | goto opps1; | |
2706 | ||
2707 | slab_map_pages(cachep, page, freelist); | |
2708 | ||
2709 | kasan_poison_slab(page); | |
2710 | cache_init_objs(cachep, page); | |
2711 | ||
2712 | if (gfpflags_allow_blocking(local_flags)) | |
2713 | local_irq_disable(); | |
2714 | ||
2715 | return page; | |
2716 | ||
2717 | opps1: | |
2718 | kmem_freepages(cachep, page); | |
2719 | failed: | |
2720 | if (gfpflags_allow_blocking(local_flags)) | |
2721 | local_irq_disable(); | |
2722 | return NULL; | |
2723 | } | |
2724 | ||
2725 | static void cache_grow_end(struct kmem_cache *cachep, struct page *page) | |
2726 | { | |
2727 | struct kmem_cache_node *n; | |
2728 | void *list = NULL; | |
2729 | ||
2730 | check_irq_off(); | |
2731 | ||
2732 | if (!page) | |
2733 | return; | |
2734 | ||
2735 | INIT_LIST_HEAD(&page->lru); | |
2736 | n = get_node(cachep, page_to_nid(page)); | |
2737 | ||
2738 | spin_lock(&n->list_lock); | |
2739 | n->total_slabs++; | |
2740 | if (!page->active) { | |
2741 | list_add_tail(&page->lru, &(n->slabs_free)); | |
2742 | n->free_slabs++; | |
2743 | } else | |
2744 | fixup_slab_list(cachep, n, page, &list); | |
2745 | ||
2746 | STATS_INC_GROWN(cachep); | |
2747 | n->free_objects += cachep->num - page->active; | |
2748 | spin_unlock(&n->list_lock); | |
2749 | ||
2750 | fixup_objfreelist_debug(cachep, &list); | |
2751 | } | |
2752 | ||
2753 | #if DEBUG | |
2754 | ||
2755 | /* | |
2756 | * Perform extra freeing checks: | |
2757 | * - detect bad pointers. | |
2758 | * - POISON/RED_ZONE checking | |
2759 | */ | |
2760 | static void kfree_debugcheck(const void *objp) | |
2761 | { | |
2762 | if (!virt_addr_valid(objp)) { | |
2763 | pr_err("kfree_debugcheck: out of range ptr %lxh\n", | |
2764 | (unsigned long)objp); | |
2765 | BUG(); | |
2766 | } | |
2767 | } | |
2768 | ||
2769 | static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |
2770 | { | |
2771 | unsigned long long redzone1, redzone2; | |
2772 | ||
2773 | redzone1 = *dbg_redzone1(cache, obj); | |
2774 | redzone2 = *dbg_redzone2(cache, obj); | |
2775 | ||
2776 | /* | |
2777 | * Redzone is ok. | |
2778 | */ | |
2779 | if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) | |
2780 | return; | |
2781 | ||
2782 | if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) | |
2783 | slab_error(cache, "double free detected"); | |
2784 | else | |
2785 | slab_error(cache, "memory outside object was overwritten"); | |
2786 | ||
2787 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | |
2788 | obj, redzone1, redzone2); | |
2789 | } | |
2790 | ||
2791 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |
2792 | unsigned long caller) | |
2793 | { | |
2794 | unsigned int objnr; | |
2795 | struct page *page; | |
2796 | ||
2797 | BUG_ON(virt_to_cache(objp) != cachep); | |
2798 | ||
2799 | objp -= obj_offset(cachep); | |
2800 | kfree_debugcheck(objp); | |
2801 | page = virt_to_head_page(objp); | |
2802 | ||
2803 | if (cachep->flags & SLAB_RED_ZONE) { | |
2804 | verify_redzone_free(cachep, objp); | |
2805 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | |
2806 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | |
2807 | } | |
2808 | if (cachep->flags & SLAB_STORE_USER) { | |
2809 | set_store_user_dirty(cachep); | |
2810 | *dbg_userword(cachep, objp) = (void *)caller; | |
2811 | } | |
2812 | ||
2813 | objnr = obj_to_index(cachep, page, objp); | |
2814 | ||
2815 | BUG_ON(objnr >= cachep->num); | |
2816 | BUG_ON(objp != index_to_obj(cachep, page, objnr)); | |
2817 | ||
2818 | if (cachep->flags & SLAB_POISON) { | |
2819 | poison_obj(cachep, objp, POISON_FREE); | |
2820 | slab_kernel_map(cachep, objp, 0, caller); | |
2821 | } | |
2822 | return objp; | |
2823 | } | |
2824 | ||
2825 | #else | |
2826 | #define kfree_debugcheck(x) do { } while(0) | |
2827 | #define cache_free_debugcheck(x,objp,z) (objp) | |
2828 | #endif | |
2829 | ||
2830 | static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, | |
2831 | void **list) | |
2832 | { | |
2833 | #if DEBUG | |
2834 | void *next = *list; | |
2835 | void *objp; | |
2836 | ||
2837 | while (next) { | |
2838 | objp = next - obj_offset(cachep); | |
2839 | next = *(void **)next; | |
2840 | poison_obj(cachep, objp, POISON_FREE); | |
2841 | } | |
2842 | #endif | |
2843 | } | |
2844 | ||
2845 | static inline void fixup_slab_list(struct kmem_cache *cachep, | |
2846 | struct kmem_cache_node *n, struct page *page, | |
2847 | void **list) | |
2848 | { | |
2849 | /* move slabp to correct slabp list: */ | |
2850 | list_del(&page->lru); | |
2851 | if (page->active == cachep->num) { | |
2852 | list_add(&page->lru, &n->slabs_full); | |
2853 | if (OBJFREELIST_SLAB(cachep)) { | |
2854 | #if DEBUG | |
2855 | /* Poisoning will be done without holding the lock */ | |
2856 | if (cachep->flags & SLAB_POISON) { | |
2857 | void **objp = page->freelist; | |
2858 | ||
2859 | *objp = *list; | |
2860 | *list = objp; | |
2861 | } | |
2862 | #endif | |
2863 | page->freelist = NULL; | |
2864 | } | |
2865 | } else | |
2866 | list_add(&page->lru, &n->slabs_partial); | |
2867 | } | |
2868 | ||
2869 | /* Try to find non-pfmemalloc slab if needed */ | |
2870 | static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, | |
2871 | struct page *page, bool pfmemalloc) | |
2872 | { | |
2873 | if (!page) | |
2874 | return NULL; | |
2875 | ||
2876 | if (pfmemalloc) | |
2877 | return page; | |
2878 | ||
2879 | if (!PageSlabPfmemalloc(page)) | |
2880 | return page; | |
2881 | ||
2882 | /* No need to keep pfmemalloc slab if we have enough free objects */ | |
2883 | if (n->free_objects > n->free_limit) { | |
2884 | ClearPageSlabPfmemalloc(page); | |
2885 | return page; | |
2886 | } | |
2887 | ||
2888 | /* Move pfmemalloc slab to the end of list to speed up next search */ | |
2889 | list_del(&page->lru); | |
2890 | if (!page->active) { | |
2891 | list_add_tail(&page->lru, &n->slabs_free); | |
2892 | n->free_slabs++; | |
2893 | } else | |
2894 | list_add_tail(&page->lru, &n->slabs_partial); | |
2895 | ||
2896 | list_for_each_entry(page, &n->slabs_partial, lru) { | |
2897 | if (!PageSlabPfmemalloc(page)) | |
2898 | return page; | |
2899 | } | |
2900 | ||
2901 | n->free_touched = 1; | |
2902 | list_for_each_entry(page, &n->slabs_free, lru) { | |
2903 | if (!PageSlabPfmemalloc(page)) { | |
2904 | n->free_slabs--; | |
2905 | return page; | |
2906 | } | |
2907 | } | |
2908 | ||
2909 | return NULL; | |
2910 | } | |
2911 | ||
2912 | static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) | |
2913 | { | |
2914 | struct page *page; | |
2915 | ||
2916 | assert_spin_locked(&n->list_lock); | |
2917 | page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); | |
2918 | if (!page) { | |
2919 | n->free_touched = 1; | |
2920 | page = list_first_entry_or_null(&n->slabs_free, struct page, | |
2921 | lru); | |
2922 | if (page) | |
2923 | n->free_slabs--; | |
2924 | } | |
2925 | ||
2926 | if (sk_memalloc_socks()) | |
2927 | page = get_valid_first_slab(n, page, pfmemalloc); | |
2928 | ||
2929 | return page; | |
2930 | } | |
2931 | ||
2932 | static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, | |
2933 | struct kmem_cache_node *n, gfp_t flags) | |
2934 | { | |
2935 | struct page *page; | |
2936 | void *obj; | |
2937 | void *list = NULL; | |
2938 | ||
2939 | if (!gfp_pfmemalloc_allowed(flags)) | |
2940 | return NULL; | |
2941 | ||
2942 | spin_lock(&n->list_lock); | |
2943 | page = get_first_slab(n, true); | |
2944 | if (!page) { | |
2945 | spin_unlock(&n->list_lock); | |
2946 | return NULL; | |
2947 | } | |
2948 | ||
2949 | obj = slab_get_obj(cachep, page); | |
2950 | n->free_objects--; | |
2951 | ||
2952 | fixup_slab_list(cachep, n, page, &list); | |
2953 | ||
2954 | spin_unlock(&n->list_lock); | |
2955 | fixup_objfreelist_debug(cachep, &list); | |
2956 | ||
2957 | return obj; | |
2958 | } | |
2959 | ||
2960 | /* | |
2961 | * Slab list should be fixed up by fixup_slab_list() for existing slab | |
2962 | * or cache_grow_end() for new slab | |
2963 | */ | |
2964 | static __always_inline int alloc_block(struct kmem_cache *cachep, | |
2965 | struct array_cache *ac, struct page *page, int batchcount) | |
2966 | { | |
2967 | /* | |
2968 | * There must be at least one object available for | |
2969 | * allocation. | |
2970 | */ | |
2971 | BUG_ON(page->active >= cachep->num); | |
2972 | ||
2973 | while (page->active < cachep->num && batchcount--) { | |
2974 | STATS_INC_ALLOCED(cachep); | |
2975 | STATS_INC_ACTIVE(cachep); | |
2976 | STATS_SET_HIGH(cachep); | |
2977 | ||
2978 | ac->entry[ac->avail++] = slab_get_obj(cachep, page); | |
2979 | } | |
2980 | ||
2981 | return batchcount; | |
2982 | } | |
2983 | ||
2984 | static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) | |
2985 | { | |
2986 | int batchcount; | |
2987 | struct kmem_cache_node *n; | |
2988 | struct array_cache *ac, *shared; | |
2989 | int node; | |
2990 | void *list = NULL; | |
2991 | struct page *page; | |
2992 | ||
2993 | check_irq_off(); | |
2994 | node = numa_mem_id(); | |
2995 | ||
2996 | ac = cpu_cache_get(cachep); | |
2997 | batchcount = ac->batchcount; | |
2998 | if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { | |
2999 | /* | |
3000 | * If there was little recent activity on this cache, then | |
3001 | * perform only a partial refill. Otherwise we could generate | |
3002 | * refill bouncing. | |
3003 | */ | |
3004 | batchcount = BATCHREFILL_LIMIT; | |
3005 | } | |
3006 | n = get_node(cachep, node); | |
3007 | ||
3008 | BUG_ON(ac->avail > 0 || !n); | |
3009 | shared = READ_ONCE(n->shared); | |
3010 | if (!n->free_objects && (!shared || !shared->avail)) | |
3011 | goto direct_grow; | |
3012 | ||
3013 | spin_lock(&n->list_lock); | |
3014 | shared = READ_ONCE(n->shared); | |
3015 | ||
3016 | /* See if we can refill from the shared array */ | |
3017 | if (shared && transfer_objects(ac, shared, batchcount)) { | |
3018 | shared->touched = 1; | |
3019 | goto alloc_done; | |
3020 | } | |
3021 | ||
3022 | while (batchcount > 0) { | |
3023 | /* Get slab alloc is to come from. */ | |
3024 | page = get_first_slab(n, false); | |
3025 | if (!page) | |
3026 | goto must_grow; | |
3027 | ||
3028 | check_spinlock_acquired(cachep); | |
3029 | ||
3030 | batchcount = alloc_block(cachep, ac, page, batchcount); | |
3031 | fixup_slab_list(cachep, n, page, &list); | |
3032 | } | |
3033 | ||
3034 | must_grow: | |
3035 | n->free_objects -= ac->avail; | |
3036 | alloc_done: | |
3037 | spin_unlock(&n->list_lock); | |
3038 | fixup_objfreelist_debug(cachep, &list); | |
3039 | ||
3040 | direct_grow: | |
3041 | if (unlikely(!ac->avail)) { | |
3042 | /* Check if we can use obj in pfmemalloc slab */ | |
3043 | if (sk_memalloc_socks()) { | |
3044 | void *obj = cache_alloc_pfmemalloc(cachep, n, flags); | |
3045 | ||
3046 | if (obj) | |
3047 | return obj; | |
3048 | } | |
3049 | ||
3050 | page = cache_grow_begin(cachep, gfp_exact_node(flags), node); | |
3051 | ||
3052 | /* | |
3053 | * cache_grow_begin() can reenable interrupts, | |
3054 | * then ac could change. | |
3055 | */ | |
3056 | ac = cpu_cache_get(cachep); | |
3057 | if (!ac->avail && page) | |
3058 | alloc_block(cachep, ac, page, batchcount); | |
3059 | cache_grow_end(cachep, page); | |
3060 | ||
3061 | if (!ac->avail) | |
3062 | return NULL; | |
3063 | } | |
3064 | ac->touched = 1; | |
3065 | ||
3066 | return ac->entry[--ac->avail]; | |
3067 | } | |
3068 | ||
3069 | static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, | |
3070 | gfp_t flags) | |
3071 | { | |
3072 | might_sleep_if(gfpflags_allow_blocking(flags)); | |
3073 | } | |
3074 | ||
3075 | #if DEBUG | |
3076 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |
3077 | gfp_t flags, void *objp, unsigned long caller) | |
3078 | { | |
3079 | if (!objp) | |
3080 | return objp; | |
3081 | if (cachep->flags & SLAB_POISON) { | |
3082 | check_poison_obj(cachep, objp); | |
3083 | slab_kernel_map(cachep, objp, 1, 0); | |
3084 | poison_obj(cachep, objp, POISON_INUSE); | |
3085 | } | |
3086 | if (cachep->flags & SLAB_STORE_USER) | |
3087 | *dbg_userword(cachep, objp) = (void *)caller; | |
3088 | ||
3089 | if (cachep->flags & SLAB_RED_ZONE) { | |
3090 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | |
3091 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | |
3092 | slab_error(cachep, "double free, or memory outside object was overwritten"); | |
3093 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | |
3094 | objp, *dbg_redzone1(cachep, objp), | |
3095 | *dbg_redzone2(cachep, objp)); | |
3096 | } | |
3097 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | |
3098 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | |
3099 | } | |
3100 | ||
3101 | objp += obj_offset(cachep); | |
3102 | if (cachep->ctor && cachep->flags & SLAB_POISON) | |
3103 | cachep->ctor(objp); | |
3104 | if (ARCH_SLAB_MINALIGN && | |
3105 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { | |
3106 | pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | |
3107 | objp, (int)ARCH_SLAB_MINALIGN); | |
3108 | } | |
3109 | return objp; | |
3110 | } | |
3111 | #else | |
3112 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | |
3113 | #endif | |
3114 | ||
3115 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
3116 | { | |
3117 | void *objp; | |
3118 | struct array_cache *ac; | |
3119 | ||
3120 | check_irq_off(); | |
3121 | ||
3122 | ac = cpu_cache_get(cachep); | |
3123 | if (likely(ac->avail)) { | |
3124 | ac->touched = 1; | |
3125 | objp = ac->entry[--ac->avail]; | |
3126 | ||
3127 | STATS_INC_ALLOCHIT(cachep); | |
3128 | goto out; | |
3129 | } | |
3130 | ||
3131 | STATS_INC_ALLOCMISS(cachep); | |
3132 | objp = cache_alloc_refill(cachep, flags); | |
3133 | /* | |
3134 | * the 'ac' may be updated by cache_alloc_refill(), | |
3135 | * and kmemleak_erase() requires its correct value. | |
3136 | */ | |
3137 | ac = cpu_cache_get(cachep); | |
3138 | ||
3139 | out: | |
3140 | /* | |
3141 | * To avoid a false negative, if an object that is in one of the | |
3142 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't | |
3143 | * treat the array pointers as a reference to the object. | |
3144 | */ | |
3145 | if (objp) | |
3146 | kmemleak_erase(&ac->entry[ac->avail]); | |
3147 | return objp; | |
3148 | } | |
3149 | ||
3150 | #ifdef CONFIG_NUMA | |
3151 | /* | |
3152 | * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. | |
3153 | * | |
3154 | * If we are in_interrupt, then process context, including cpusets and | |
3155 | * mempolicy, may not apply and should not be used for allocation policy. | |
3156 | */ | |
3157 | static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |
3158 | { | |
3159 | int nid_alloc, nid_here; | |
3160 | ||
3161 | if (in_interrupt() || (flags & __GFP_THISNODE)) | |
3162 | return NULL; | |
3163 | nid_alloc = nid_here = numa_mem_id(); | |
3164 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | |
3165 | nid_alloc = cpuset_slab_spread_node(); | |
3166 | else if (current->mempolicy) | |
3167 | nid_alloc = mempolicy_slab_node(); | |
3168 | if (nid_alloc != nid_here) | |
3169 | return ____cache_alloc_node(cachep, flags, nid_alloc); | |
3170 | return NULL; | |
3171 | } | |
3172 | ||
3173 | /* | |
3174 | * Fallback function if there was no memory available and no objects on a | |
3175 | * certain node and fall back is permitted. First we scan all the | |
3176 | * available node for available objects. If that fails then we | |
3177 | * perform an allocation without specifying a node. This allows the page | |
3178 | * allocator to do its reclaim / fallback magic. We then insert the | |
3179 | * slab into the proper nodelist and then allocate from it. | |
3180 | */ | |
3181 | static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |
3182 | { | |
3183 | struct zonelist *zonelist; | |
3184 | struct zoneref *z; | |
3185 | struct zone *zone; | |
3186 | enum zone_type high_zoneidx = gfp_zone(flags); | |
3187 | void *obj = NULL; | |
3188 | struct page *page; | |
3189 | int nid; | |
3190 | unsigned int cpuset_mems_cookie; | |
3191 | ||
3192 | if (flags & __GFP_THISNODE) | |
3193 | return NULL; | |
3194 | ||
3195 | retry_cpuset: | |
3196 | cpuset_mems_cookie = read_mems_allowed_begin(); | |
3197 | zonelist = node_zonelist(mempolicy_slab_node(), flags); | |
3198 | ||
3199 | retry: | |
3200 | /* | |
3201 | * Look through allowed nodes for objects available | |
3202 | * from existing per node queues. | |
3203 | */ | |
3204 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | |
3205 | nid = zone_to_nid(zone); | |
3206 | ||
3207 | if (cpuset_zone_allowed(zone, flags) && | |
3208 | get_node(cache, nid) && | |
3209 | get_node(cache, nid)->free_objects) { | |
3210 | obj = ____cache_alloc_node(cache, | |
3211 | gfp_exact_node(flags), nid); | |
3212 | if (obj) | |
3213 | break; | |
3214 | } | |
3215 | } | |
3216 | ||
3217 | if (!obj) { | |
3218 | /* | |
3219 | * This allocation will be performed within the constraints | |
3220 | * of the current cpuset / memory policy requirements. | |
3221 | * We may trigger various forms of reclaim on the allowed | |
3222 | * set and go into memory reserves if necessary. | |
3223 | */ | |
3224 | page = cache_grow_begin(cache, flags, numa_mem_id()); | |
3225 | cache_grow_end(cache, page); | |
3226 | if (page) { | |
3227 | nid = page_to_nid(page); | |
3228 | obj = ____cache_alloc_node(cache, | |
3229 | gfp_exact_node(flags), nid); | |
3230 | ||
3231 | /* | |
3232 | * Another processor may allocate the objects in | |
3233 | * the slab since we are not holding any locks. | |
3234 | */ | |
3235 | if (!obj) | |
3236 | goto retry; | |
3237 | } | |
3238 | } | |
3239 | ||
3240 | if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) | |
3241 | goto retry_cpuset; | |
3242 | return obj; | |
3243 | } | |
3244 | ||
3245 | /* | |
3246 | * A interface to enable slab creation on nodeid | |
3247 | */ | |
3248 | static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |
3249 | int nodeid) | |
3250 | { | |
3251 | struct page *page; | |
3252 | struct kmem_cache_node *n; | |
3253 | void *obj = NULL; | |
3254 | void *list = NULL; | |
3255 | ||
3256 | VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); | |
3257 | n = get_node(cachep, nodeid); | |
3258 | BUG_ON(!n); | |
3259 | ||
3260 | check_irq_off(); | |
3261 | spin_lock(&n->list_lock); | |
3262 | page = get_first_slab(n, false); | |
3263 | if (!page) | |
3264 | goto must_grow; | |
3265 | ||
3266 | check_spinlock_acquired_node(cachep, nodeid); | |
3267 | ||
3268 | STATS_INC_NODEALLOCS(cachep); | |
3269 | STATS_INC_ACTIVE(cachep); | |
3270 | STATS_SET_HIGH(cachep); | |
3271 | ||
3272 | BUG_ON(page->active == cachep->num); | |
3273 | ||
3274 | obj = slab_get_obj(cachep, page); | |
3275 | n->free_objects--; | |
3276 | ||
3277 | fixup_slab_list(cachep, n, page, &list); | |
3278 | ||
3279 | spin_unlock(&n->list_lock); | |
3280 | fixup_objfreelist_debug(cachep, &list); | |
3281 | return obj; | |
3282 | ||
3283 | must_grow: | |
3284 | spin_unlock(&n->list_lock); | |
3285 | page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); | |
3286 | if (page) { | |
3287 | /* This slab isn't counted yet so don't update free_objects */ | |
3288 | obj = slab_get_obj(cachep, page); | |
3289 | } | |
3290 | cache_grow_end(cachep, page); | |
3291 | ||
3292 | return obj ? obj : fallback_alloc(cachep, flags); | |
3293 | } | |
3294 | ||
3295 | static __always_inline void * | |
3296 | slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |
3297 | unsigned long caller) | |
3298 | { | |
3299 | unsigned long save_flags; | |
3300 | void *ptr; | |
3301 | int slab_node = numa_mem_id(); | |
3302 | ||
3303 | flags &= gfp_allowed_mask; | |
3304 | cachep = slab_pre_alloc_hook(cachep, flags); | |
3305 | if (unlikely(!cachep)) | |
3306 | return NULL; | |
3307 | ||
3308 | cache_alloc_debugcheck_before(cachep, flags); | |
3309 | local_irq_save(save_flags); | |
3310 | ||
3311 | if (nodeid == NUMA_NO_NODE) | |
3312 | nodeid = slab_node; | |
3313 | ||
3314 | if (unlikely(!get_node(cachep, nodeid))) { | |
3315 | /* Node not bootstrapped yet */ | |
3316 | ptr = fallback_alloc(cachep, flags); | |
3317 | goto out; | |
3318 | } | |
3319 | ||
3320 | if (nodeid == slab_node) { | |
3321 | /* | |
3322 | * Use the locally cached objects if possible. | |
3323 | * However ____cache_alloc does not allow fallback | |
3324 | * to other nodes. It may fail while we still have | |
3325 | * objects on other nodes available. | |
3326 | */ | |
3327 | ptr = ____cache_alloc(cachep, flags); | |
3328 | if (ptr) | |
3329 | goto out; | |
3330 | } | |
3331 | /* ___cache_alloc_node can fall back to other nodes */ | |
3332 | ptr = ____cache_alloc_node(cachep, flags, nodeid); | |
3333 | out: | |
3334 | local_irq_restore(save_flags); | |
3335 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); | |
3336 | ||
3337 | if (unlikely(flags & __GFP_ZERO) && ptr) | |
3338 | memset(ptr, 0, cachep->object_size); | |
3339 | ||
3340 | slab_post_alloc_hook(cachep, flags, 1, &ptr); | |
3341 | return ptr; | |
3342 | } | |
3343 | ||
3344 | static __always_inline void * | |
3345 | __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) | |
3346 | { | |
3347 | void *objp; | |
3348 | ||
3349 | if (current->mempolicy || cpuset_do_slab_mem_spread()) { | |
3350 | objp = alternate_node_alloc(cache, flags); | |
3351 | if (objp) | |
3352 | goto out; | |
3353 | } | |
3354 | objp = ____cache_alloc(cache, flags); | |
3355 | ||
3356 | /* | |
3357 | * We may just have run out of memory on the local node. | |
3358 | * ____cache_alloc_node() knows how to locate memory on other nodes | |
3359 | */ | |
3360 | if (!objp) | |
3361 | objp = ____cache_alloc_node(cache, flags, numa_mem_id()); | |
3362 | ||
3363 | out: | |
3364 | return objp; | |
3365 | } | |
3366 | #else | |
3367 | ||
3368 | static __always_inline void * | |
3369 | __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
3370 | { | |
3371 | return ____cache_alloc(cachep, flags); | |
3372 | } | |
3373 | ||
3374 | #endif /* CONFIG_NUMA */ | |
3375 | ||
3376 | static __always_inline void * | |
3377 | slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) | |
3378 | { | |
3379 | unsigned long save_flags; | |
3380 | void *objp; | |
3381 | ||
3382 | flags &= gfp_allowed_mask; | |
3383 | cachep = slab_pre_alloc_hook(cachep, flags); | |
3384 | if (unlikely(!cachep)) | |
3385 | return NULL; | |
3386 | ||
3387 | cache_alloc_debugcheck_before(cachep, flags); | |
3388 | local_irq_save(save_flags); | |
3389 | objp = __do_cache_alloc(cachep, flags); | |
3390 | local_irq_restore(save_flags); | |
3391 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); | |
3392 | prefetchw(objp); | |
3393 | ||
3394 | if (unlikely(flags & __GFP_ZERO) && objp) | |
3395 | memset(objp, 0, cachep->object_size); | |
3396 | ||
3397 | slab_post_alloc_hook(cachep, flags, 1, &objp); | |
3398 | return objp; | |
3399 | } | |
3400 | ||
3401 | /* | |
3402 | * Caller needs to acquire correct kmem_cache_node's list_lock | |
3403 | * @list: List of detached free slabs should be freed by caller | |
3404 | */ | |
3405 | static void free_block(struct kmem_cache *cachep, void **objpp, | |
3406 | int nr_objects, int node, struct list_head *list) | |
3407 | { | |
3408 | int i; | |
3409 | struct kmem_cache_node *n = get_node(cachep, node); | |
3410 | struct page *page; | |
3411 | ||
3412 | n->free_objects += nr_objects; | |
3413 | ||
3414 | for (i = 0; i < nr_objects; i++) { | |
3415 | void *objp; | |
3416 | struct page *page; | |
3417 | ||
3418 | objp = objpp[i]; | |
3419 | ||
3420 | page = virt_to_head_page(objp); | |
3421 | list_del(&page->lru); | |
3422 | check_spinlock_acquired_node(cachep, node); | |
3423 | slab_put_obj(cachep, page, objp); | |
3424 | STATS_DEC_ACTIVE(cachep); | |
3425 | ||
3426 | /* fixup slab chains */ | |
3427 | if (page->active == 0) { | |
3428 | list_add(&page->lru, &n->slabs_free); | |
3429 | n->free_slabs++; | |
3430 | } else { | |
3431 | /* Unconditionally move a slab to the end of the | |
3432 | * partial list on free - maximum time for the | |
3433 | * other objects to be freed, too. | |
3434 | */ | |
3435 | list_add_tail(&page->lru, &n->slabs_partial); | |
3436 | } | |
3437 | } | |
3438 | ||
3439 | while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { | |
3440 | n->free_objects -= cachep->num; | |
3441 | ||
3442 | page = list_last_entry(&n->slabs_free, struct page, lru); | |
3443 | list_move(&page->lru, list); | |
3444 | n->free_slabs--; | |
3445 | n->total_slabs--; | |
3446 | } | |
3447 | } | |
3448 | ||
3449 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |
3450 | { | |
3451 | int batchcount; | |
3452 | struct kmem_cache_node *n; | |
3453 | int node = numa_mem_id(); | |
3454 | LIST_HEAD(list); | |
3455 | ||
3456 | batchcount = ac->batchcount; | |
3457 | ||
3458 | check_irq_off(); | |
3459 | n = get_node(cachep, node); | |
3460 | spin_lock(&n->list_lock); | |
3461 | if (n->shared) { | |
3462 | struct array_cache *shared_array = n->shared; | |
3463 | int max = shared_array->limit - shared_array->avail; | |
3464 | if (max) { | |
3465 | if (batchcount > max) | |
3466 | batchcount = max; | |
3467 | memcpy(&(shared_array->entry[shared_array->avail]), | |
3468 | ac->entry, sizeof(void *) * batchcount); | |
3469 | shared_array->avail += batchcount; | |
3470 | goto free_done; | |
3471 | } | |
3472 | } | |
3473 | ||
3474 | free_block(cachep, ac->entry, batchcount, node, &list); | |
3475 | free_done: | |
3476 | #if STATS | |
3477 | { | |
3478 | int i = 0; | |
3479 | struct page *page; | |
3480 | ||
3481 | list_for_each_entry(page, &n->slabs_free, lru) { | |
3482 | BUG_ON(page->active); | |
3483 | ||
3484 | i++; | |
3485 | } | |
3486 | STATS_SET_FREEABLE(cachep, i); | |
3487 | } | |
3488 | #endif | |
3489 | spin_unlock(&n->list_lock); | |
3490 | slabs_destroy(cachep, &list); | |
3491 | ac->avail -= batchcount; | |
3492 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); | |
3493 | } | |
3494 | ||
3495 | /* | |
3496 | * Release an obj back to its cache. If the obj has a constructed state, it must | |
3497 | * be in this state _before_ it is released. Called with disabled ints. | |
3498 | */ | |
3499 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |
3500 | unsigned long caller) | |
3501 | { | |
3502 | /* Put the object into the quarantine, don't touch it for now. */ | |
3503 | if (kasan_slab_free(cachep, objp)) | |
3504 | return; | |
3505 | ||
3506 | ___cache_free(cachep, objp, caller); | |
3507 | } | |
3508 | ||
3509 | void ___cache_free(struct kmem_cache *cachep, void *objp, | |
3510 | unsigned long caller) | |
3511 | { | |
3512 | struct array_cache *ac = cpu_cache_get(cachep); | |
3513 | ||
3514 | check_irq_off(); | |
3515 | kmemleak_free_recursive(objp, cachep->flags); | |
3516 | objp = cache_free_debugcheck(cachep, objp, caller); | |
3517 | ||
3518 | kmemcheck_slab_free(cachep, objp, cachep->object_size); | |
3519 | ||
3520 | /* | |
3521 | * Skip calling cache_free_alien() when the platform is not numa. | |
3522 | * This will avoid cache misses that happen while accessing slabp (which | |
3523 | * is per page memory reference) to get nodeid. Instead use a global | |
3524 | * variable to skip the call, which is mostly likely to be present in | |
3525 | * the cache. | |
3526 | */ | |
3527 | if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) | |
3528 | return; | |
3529 | ||
3530 | if (ac->avail < ac->limit) { | |
3531 | STATS_INC_FREEHIT(cachep); | |
3532 | } else { | |
3533 | STATS_INC_FREEMISS(cachep); | |
3534 | cache_flusharray(cachep, ac); | |
3535 | } | |
3536 | ||
3537 | if (sk_memalloc_socks()) { | |
3538 | struct page *page = virt_to_head_page(objp); | |
3539 | ||
3540 | if (unlikely(PageSlabPfmemalloc(page))) { | |
3541 | cache_free_pfmemalloc(cachep, page, objp); | |
3542 | return; | |
3543 | } | |
3544 | } | |
3545 | ||
3546 | ac->entry[ac->avail++] = objp; | |
3547 | } | |
3548 | ||
3549 | /** | |
3550 | * kmem_cache_alloc - Allocate an object | |
3551 | * @cachep: The cache to allocate from. | |
3552 | * @flags: See kmalloc(). | |
3553 | * | |
3554 | * Allocate an object from this cache. The flags are only relevant | |
3555 | * if the cache has no available objects. | |
3556 | */ | |
3557 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
3558 | { | |
3559 | void *ret = slab_alloc(cachep, flags, _RET_IP_); | |
3560 | ||
3561 | kasan_slab_alloc(cachep, ret, flags); | |
3562 | trace_kmem_cache_alloc(_RET_IP_, ret, | |
3563 | cachep->object_size, cachep->size, flags); | |
3564 | ||
3565 | return ret; | |
3566 | } | |
3567 | EXPORT_SYMBOL(kmem_cache_alloc); | |
3568 | ||
3569 | static __always_inline void | |
3570 | cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, | |
3571 | size_t size, void **p, unsigned long caller) | |
3572 | { | |
3573 | size_t i; | |
3574 | ||
3575 | for (i = 0; i < size; i++) | |
3576 | p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); | |
3577 | } | |
3578 | ||
3579 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |
3580 | void **p) | |
3581 | { | |
3582 | size_t i; | |
3583 | ||
3584 | s = slab_pre_alloc_hook(s, flags); | |
3585 | if (!s) | |
3586 | return 0; | |
3587 | ||
3588 | cache_alloc_debugcheck_before(s, flags); | |
3589 | ||
3590 | local_irq_disable(); | |
3591 | for (i = 0; i < size; i++) { | |
3592 | void *objp = __do_cache_alloc(s, flags); | |
3593 | ||
3594 | if (unlikely(!objp)) | |
3595 | goto error; | |
3596 | p[i] = objp; | |
3597 | } | |
3598 | local_irq_enable(); | |
3599 | ||
3600 | cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); | |
3601 | ||
3602 | /* Clear memory outside IRQ disabled section */ | |
3603 | if (unlikely(flags & __GFP_ZERO)) | |
3604 | for (i = 0; i < size; i++) | |
3605 | memset(p[i], 0, s->object_size); | |
3606 | ||
3607 | slab_post_alloc_hook(s, flags, size, p); | |
3608 | /* FIXME: Trace call missing. Christoph would like a bulk variant */ | |
3609 | return size; | |
3610 | error: | |
3611 | local_irq_enable(); | |
3612 | cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); | |
3613 | slab_post_alloc_hook(s, flags, i, p); | |
3614 | __kmem_cache_free_bulk(s, i, p); | |
3615 | return 0; | |
3616 | } | |
3617 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); | |
3618 | ||
3619 | #ifdef CONFIG_TRACING | |
3620 | void * | |
3621 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) | |
3622 | { | |
3623 | void *ret; | |
3624 | ||
3625 | ret = slab_alloc(cachep, flags, _RET_IP_); | |
3626 | ||
3627 | kasan_kmalloc(cachep, ret, size, flags); | |
3628 | trace_kmalloc(_RET_IP_, ret, | |
3629 | size, cachep->size, flags); | |
3630 | return ret; | |
3631 | } | |
3632 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | |
3633 | #endif | |
3634 | ||
3635 | #ifdef CONFIG_NUMA | |
3636 | /** | |
3637 | * kmem_cache_alloc_node - Allocate an object on the specified node | |
3638 | * @cachep: The cache to allocate from. | |
3639 | * @flags: See kmalloc(). | |
3640 | * @nodeid: node number of the target node. | |
3641 | * | |
3642 | * Identical to kmem_cache_alloc but it will allocate memory on the given | |
3643 | * node, which can improve the performance for cpu bound structures. | |
3644 | * | |
3645 | * Fallback to other node is possible if __GFP_THISNODE is not set. | |
3646 | */ | |
3647 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |
3648 | { | |
3649 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | |
3650 | ||
3651 | kasan_slab_alloc(cachep, ret, flags); | |
3652 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | |
3653 | cachep->object_size, cachep->size, | |
3654 | flags, nodeid); | |
3655 | ||
3656 | return ret; | |
3657 | } | |
3658 | EXPORT_SYMBOL(kmem_cache_alloc_node); | |
3659 | ||
3660 | #ifdef CONFIG_TRACING | |
3661 | void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | |
3662 | gfp_t flags, | |
3663 | int nodeid, | |
3664 | size_t size) | |
3665 | { | |
3666 | void *ret; | |
3667 | ||
3668 | ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | |
3669 | ||
3670 | kasan_kmalloc(cachep, ret, size, flags); | |
3671 | trace_kmalloc_node(_RET_IP_, ret, | |
3672 | size, cachep->size, | |
3673 | flags, nodeid); | |
3674 | return ret; | |
3675 | } | |
3676 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |
3677 | #endif | |
3678 | ||
3679 | static __always_inline void * | |
3680 | __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) | |
3681 | { | |
3682 | struct kmem_cache *cachep; | |
3683 | void *ret; | |
3684 | ||
3685 | cachep = kmalloc_slab(size, flags); | |
3686 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | |
3687 | return cachep; | |
3688 | ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); | |
3689 | kasan_kmalloc(cachep, ret, size, flags); | |
3690 | ||
3691 | return ret; | |
3692 | } | |
3693 | ||
3694 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
3695 | { | |
3696 | return __do_kmalloc_node(size, flags, node, _RET_IP_); | |
3697 | } | |
3698 | EXPORT_SYMBOL(__kmalloc_node); | |
3699 | ||
3700 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | |
3701 | int node, unsigned long caller) | |
3702 | { | |
3703 | return __do_kmalloc_node(size, flags, node, caller); | |
3704 | } | |
3705 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | |
3706 | #endif /* CONFIG_NUMA */ | |
3707 | ||
3708 | /** | |
3709 | * __do_kmalloc - allocate memory | |
3710 | * @size: how many bytes of memory are required. | |
3711 | * @flags: the type of memory to allocate (see kmalloc). | |
3712 | * @caller: function caller for debug tracking of the caller | |
3713 | */ | |
3714 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |
3715 | unsigned long caller) | |
3716 | { | |
3717 | struct kmem_cache *cachep; | |
3718 | void *ret; | |
3719 | ||
3720 | cachep = kmalloc_slab(size, flags); | |
3721 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | |
3722 | return cachep; | |
3723 | ret = slab_alloc(cachep, flags, caller); | |
3724 | ||
3725 | kasan_kmalloc(cachep, ret, size, flags); | |
3726 | trace_kmalloc(caller, ret, | |
3727 | size, cachep->size, flags); | |
3728 | ||
3729 | return ret; | |
3730 | } | |
3731 | ||
3732 | void *__kmalloc(size_t size, gfp_t flags) | |
3733 | { | |
3734 | return __do_kmalloc(size, flags, _RET_IP_); | |
3735 | } | |
3736 | EXPORT_SYMBOL(__kmalloc); | |
3737 | ||
3738 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) | |
3739 | { | |
3740 | return __do_kmalloc(size, flags, caller); | |
3741 | } | |
3742 | EXPORT_SYMBOL(__kmalloc_track_caller); | |
3743 | ||
3744 | /** | |
3745 | * kmem_cache_free - Deallocate an object | |
3746 | * @cachep: The cache the allocation was from. | |
3747 | * @objp: The previously allocated object. | |
3748 | * | |
3749 | * Free an object which was previously allocated from this | |
3750 | * cache. | |
3751 | */ | |
3752 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |
3753 | { | |
3754 | unsigned long flags; | |
3755 | cachep = cache_from_obj(cachep, objp); | |
3756 | if (!cachep) | |
3757 | return; | |
3758 | ||
3759 | local_irq_save(flags); | |
3760 | debug_check_no_locks_freed(objp, cachep->object_size); | |
3761 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | |
3762 | debug_check_no_obj_freed(objp, cachep->object_size); | |
3763 | __cache_free(cachep, objp, _RET_IP_); | |
3764 | local_irq_restore(flags); | |
3765 | ||
3766 | trace_kmem_cache_free(_RET_IP_, objp); | |
3767 | } | |
3768 | EXPORT_SYMBOL(kmem_cache_free); | |
3769 | ||
3770 | void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) | |
3771 | { | |
3772 | struct kmem_cache *s; | |
3773 | size_t i; | |
3774 | ||
3775 | local_irq_disable(); | |
3776 | for (i = 0; i < size; i++) { | |
3777 | void *objp = p[i]; | |
3778 | ||
3779 | if (!orig_s) /* called via kfree_bulk */ | |
3780 | s = virt_to_cache(objp); | |
3781 | else | |
3782 | s = cache_from_obj(orig_s, objp); | |
3783 | ||
3784 | debug_check_no_locks_freed(objp, s->object_size); | |
3785 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | |
3786 | debug_check_no_obj_freed(objp, s->object_size); | |
3787 | ||
3788 | __cache_free(s, objp, _RET_IP_); | |
3789 | } | |
3790 | local_irq_enable(); | |
3791 | ||
3792 | /* FIXME: add tracing */ | |
3793 | } | |
3794 | EXPORT_SYMBOL(kmem_cache_free_bulk); | |
3795 | ||
3796 | /** | |
3797 | * kfree - free previously allocated memory | |
3798 | * @objp: pointer returned by kmalloc. | |
3799 | * | |
3800 | * If @objp is NULL, no operation is performed. | |
3801 | * | |
3802 | * Don't free memory not originally allocated by kmalloc() | |
3803 | * or you will run into trouble. | |
3804 | */ | |
3805 | void kfree(const void *objp) | |
3806 | { | |
3807 | struct kmem_cache *c; | |
3808 | unsigned long flags; | |
3809 | ||
3810 | trace_kfree(_RET_IP_, objp); | |
3811 | ||
3812 | if (unlikely(ZERO_OR_NULL_PTR(objp))) | |
3813 | return; | |
3814 | local_irq_save(flags); | |
3815 | kfree_debugcheck(objp); | |
3816 | c = virt_to_cache(objp); | |
3817 | debug_check_no_locks_freed(objp, c->object_size); | |
3818 | ||
3819 | debug_check_no_obj_freed(objp, c->object_size); | |
3820 | __cache_free(c, (void *)objp, _RET_IP_); | |
3821 | local_irq_restore(flags); | |
3822 | } | |
3823 | EXPORT_SYMBOL(kfree); | |
3824 | ||
3825 | /* | |
3826 | * This initializes kmem_cache_node or resizes various caches for all nodes. | |
3827 | */ | |
3828 | static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) | |
3829 | { | |
3830 | int ret; | |
3831 | int node; | |
3832 | struct kmem_cache_node *n; | |
3833 | ||
3834 | for_each_online_node(node) { | |
3835 | ret = setup_kmem_cache_node(cachep, node, gfp, true); | |
3836 | if (ret) | |
3837 | goto fail; | |
3838 | ||
3839 | } | |
3840 | ||
3841 | return 0; | |
3842 | ||
3843 | fail: | |
3844 | if (!cachep->list.next) { | |
3845 | /* Cache is not active yet. Roll back what we did */ | |
3846 | node--; | |
3847 | while (node >= 0) { | |
3848 | n = get_node(cachep, node); | |
3849 | if (n) { | |
3850 | kfree(n->shared); | |
3851 | free_alien_cache(n->alien); | |
3852 | kfree(n); | |
3853 | cachep->node[node] = NULL; | |
3854 | } | |
3855 | node--; | |
3856 | } | |
3857 | } | |
3858 | return -ENOMEM; | |
3859 | } | |
3860 | ||
3861 | /* Always called with the slab_mutex held */ | |
3862 | static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, | |
3863 | int batchcount, int shared, gfp_t gfp) | |
3864 | { | |
3865 | struct array_cache __percpu *cpu_cache, *prev; | |
3866 | int cpu; | |
3867 | ||
3868 | cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); | |
3869 | if (!cpu_cache) | |
3870 | return -ENOMEM; | |
3871 | ||
3872 | prev = cachep->cpu_cache; | |
3873 | cachep->cpu_cache = cpu_cache; | |
3874 | /* | |
3875 | * Without a previous cpu_cache there's no need to synchronize remote | |
3876 | * cpus, so skip the IPIs. | |
3877 | */ | |
3878 | if (prev) | |
3879 | kick_all_cpus_sync(); | |
3880 | ||
3881 | check_irq_on(); | |
3882 | cachep->batchcount = batchcount; | |
3883 | cachep->limit = limit; | |
3884 | cachep->shared = shared; | |
3885 | ||
3886 | if (!prev) | |
3887 | goto setup_node; | |
3888 | ||
3889 | for_each_online_cpu(cpu) { | |
3890 | LIST_HEAD(list); | |
3891 | int node; | |
3892 | struct kmem_cache_node *n; | |
3893 | struct array_cache *ac = per_cpu_ptr(prev, cpu); | |
3894 | ||
3895 | node = cpu_to_mem(cpu); | |
3896 | n = get_node(cachep, node); | |
3897 | spin_lock_irq(&n->list_lock); | |
3898 | free_block(cachep, ac->entry, ac->avail, node, &list); | |
3899 | spin_unlock_irq(&n->list_lock); | |
3900 | slabs_destroy(cachep, &list); | |
3901 | } | |
3902 | free_percpu(prev); | |
3903 | ||
3904 | setup_node: | |
3905 | return setup_kmem_cache_nodes(cachep, gfp); | |
3906 | } | |
3907 | ||
3908 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |
3909 | int batchcount, int shared, gfp_t gfp) | |
3910 | { | |
3911 | int ret; | |
3912 | struct kmem_cache *c; | |
3913 | ||
3914 | ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); | |
3915 | ||
3916 | if (slab_state < FULL) | |
3917 | return ret; | |
3918 | ||
3919 | if ((ret < 0) || !is_root_cache(cachep)) | |
3920 | return ret; | |
3921 | ||
3922 | lockdep_assert_held(&slab_mutex); | |
3923 | for_each_memcg_cache(c, cachep) { | |
3924 | /* return value determined by the root cache only */ | |
3925 | __do_tune_cpucache(c, limit, batchcount, shared, gfp); | |
3926 | } | |
3927 | ||
3928 | return ret; | |
3929 | } | |
3930 | ||
3931 | /* Called with slab_mutex held always */ | |
3932 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |
3933 | { | |
3934 | int err; | |
3935 | int limit = 0; | |
3936 | int shared = 0; | |
3937 | int batchcount = 0; | |
3938 | ||
3939 | err = cache_random_seq_create(cachep, cachep->num, gfp); | |
3940 | if (err) | |
3941 | goto end; | |
3942 | ||
3943 | if (!is_root_cache(cachep)) { | |
3944 | struct kmem_cache *root = memcg_root_cache(cachep); | |
3945 | limit = root->limit; | |
3946 | shared = root->shared; | |
3947 | batchcount = root->batchcount; | |
3948 | } | |
3949 | ||
3950 | if (limit && shared && batchcount) | |
3951 | goto skip_setup; | |
3952 | /* | |
3953 | * The head array serves three purposes: | |
3954 | * - create a LIFO ordering, i.e. return objects that are cache-warm | |
3955 | * - reduce the number of spinlock operations. | |
3956 | * - reduce the number of linked list operations on the slab and | |
3957 | * bufctl chains: array operations are cheaper. | |
3958 | * The numbers are guessed, we should auto-tune as described by | |
3959 | * Bonwick. | |
3960 | */ | |
3961 | if (cachep->size > 131072) | |
3962 | limit = 1; | |
3963 | else if (cachep->size > PAGE_SIZE) | |
3964 | limit = 8; | |
3965 | else if (cachep->size > 1024) | |
3966 | limit = 24; | |
3967 | else if (cachep->size > 256) | |
3968 | limit = 54; | |
3969 | else | |
3970 | limit = 120; | |
3971 | ||
3972 | /* | |
3973 | * CPU bound tasks (e.g. network routing) can exhibit cpu bound | |
3974 | * allocation behaviour: Most allocs on one cpu, most free operations | |
3975 | * on another cpu. For these cases, an efficient object passing between | |
3976 | * cpus is necessary. This is provided by a shared array. The array | |
3977 | * replaces Bonwick's magazine layer. | |
3978 | * On uniprocessor, it's functionally equivalent (but less efficient) | |
3979 | * to a larger limit. Thus disabled by default. | |
3980 | */ | |
3981 | shared = 0; | |
3982 | if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) | |
3983 | shared = 8; | |
3984 | ||
3985 | #if DEBUG | |
3986 | /* | |
3987 | * With debugging enabled, large batchcount lead to excessively long | |
3988 | * periods with disabled local interrupts. Limit the batchcount | |
3989 | */ | |
3990 | if (limit > 32) | |
3991 | limit = 32; | |
3992 | #endif | |
3993 | batchcount = (limit + 1) / 2; | |
3994 | skip_setup: | |
3995 | err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); | |
3996 | end: | |
3997 | if (err) | |
3998 | pr_err("enable_cpucache failed for %s, error %d\n", | |
3999 | cachep->name, -err); | |
4000 | return err; | |
4001 | } | |
4002 | ||
4003 | /* | |
4004 | * Drain an array if it contains any elements taking the node lock only if | |
4005 | * necessary. Note that the node listlock also protects the array_cache | |
4006 | * if drain_array() is used on the shared array. | |
4007 | */ | |
4008 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, | |
4009 | struct array_cache *ac, int node) | |
4010 | { | |
4011 | LIST_HEAD(list); | |
4012 | ||
4013 | /* ac from n->shared can be freed if we don't hold the slab_mutex. */ | |
4014 | check_mutex_acquired(); | |
4015 | ||
4016 | if (!ac || !ac->avail) | |
4017 | return; | |
4018 | ||
4019 | if (ac->touched) { | |
4020 | ac->touched = 0; | |
4021 | return; | |
4022 | } | |
4023 | ||
4024 | spin_lock_irq(&n->list_lock); | |
4025 | drain_array_locked(cachep, ac, node, false, &list); | |
4026 | spin_unlock_irq(&n->list_lock); | |
4027 | ||
4028 | slabs_destroy(cachep, &list); | |
4029 | } | |
4030 | ||
4031 | /** | |
4032 | * cache_reap - Reclaim memory from caches. | |
4033 | * @w: work descriptor | |
4034 | * | |
4035 | * Called from workqueue/eventd every few seconds. | |
4036 | * Purpose: | |
4037 | * - clear the per-cpu caches for this CPU. | |
4038 | * - return freeable pages to the main free memory pool. | |
4039 | * | |
4040 | * If we cannot acquire the cache chain mutex then just give up - we'll try | |
4041 | * again on the next iteration. | |
4042 | */ | |
4043 | static void cache_reap(struct work_struct *w) | |
4044 | { | |
4045 | struct kmem_cache *searchp; | |
4046 | struct kmem_cache_node *n; | |
4047 | int node = numa_mem_id(); | |
4048 | struct delayed_work *work = to_delayed_work(w); | |
4049 | ||
4050 | if (!mutex_trylock(&slab_mutex)) | |
4051 | /* Give up. Setup the next iteration. */ | |
4052 | goto out; | |
4053 | ||
4054 | list_for_each_entry(searchp, &slab_caches, list) { | |
4055 | check_irq_on(); | |
4056 | ||
4057 | /* | |
4058 | * We only take the node lock if absolutely necessary and we | |
4059 | * have established with reasonable certainty that | |
4060 | * we can do some work if the lock was obtained. | |
4061 | */ | |
4062 | n = get_node(searchp, node); | |
4063 | ||
4064 | reap_alien(searchp, n); | |
4065 | ||
4066 | drain_array(searchp, n, cpu_cache_get(searchp), node); | |
4067 | ||
4068 | /* | |
4069 | * These are racy checks but it does not matter | |
4070 | * if we skip one check or scan twice. | |
4071 | */ | |
4072 | if (time_after(n->next_reap, jiffies)) | |
4073 | goto next; | |
4074 | ||
4075 | n->next_reap = jiffies + REAPTIMEOUT_NODE; | |
4076 | ||
4077 | drain_array(searchp, n, n->shared, node); | |
4078 | ||
4079 | if (n->free_touched) | |
4080 | n->free_touched = 0; | |
4081 | else { | |
4082 | int freed; | |
4083 | ||
4084 | freed = drain_freelist(searchp, n, (n->free_limit + | |
4085 | 5 * searchp->num - 1) / (5 * searchp->num)); | |
4086 | STATS_ADD_REAPED(searchp, freed); | |
4087 | } | |
4088 | next: | |
4089 | cond_resched(); | |
4090 | } | |
4091 | check_irq_on(); | |
4092 | mutex_unlock(&slab_mutex); | |
4093 | next_reap_node(); | |
4094 | out: | |
4095 | /* Set up the next iteration */ | |
4096 | schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); | |
4097 | } | |
4098 | ||
4099 | #ifdef CONFIG_SLABINFO | |
4100 | void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |
4101 | { | |
4102 | unsigned long active_objs, num_objs, active_slabs; | |
4103 | unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; | |
4104 | unsigned long free_slabs = 0; | |
4105 | int node; | |
4106 | struct kmem_cache_node *n; | |
4107 | ||
4108 | for_each_kmem_cache_node(cachep, node, n) { | |
4109 | check_irq_on(); | |
4110 | spin_lock_irq(&n->list_lock); | |
4111 | ||
4112 | total_slabs += n->total_slabs; | |
4113 | free_slabs += n->free_slabs; | |
4114 | free_objs += n->free_objects; | |
4115 | ||
4116 | if (n->shared) | |
4117 | shared_avail += n->shared->avail; | |
4118 | ||
4119 | spin_unlock_irq(&n->list_lock); | |
4120 | } | |
4121 | num_objs = total_slabs * cachep->num; | |
4122 | active_slabs = total_slabs - free_slabs; | |
4123 | active_objs = num_objs - free_objs; | |
4124 | ||
4125 | sinfo->active_objs = active_objs; | |
4126 | sinfo->num_objs = num_objs; | |
4127 | sinfo->active_slabs = active_slabs; | |
4128 | sinfo->num_slabs = total_slabs; | |
4129 | sinfo->shared_avail = shared_avail; | |
4130 | sinfo->limit = cachep->limit; | |
4131 | sinfo->batchcount = cachep->batchcount; | |
4132 | sinfo->shared = cachep->shared; | |
4133 | sinfo->objects_per_slab = cachep->num; | |
4134 | sinfo->cache_order = cachep->gfporder; | |
4135 | } | |
4136 | ||
4137 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) | |
4138 | { | |
4139 | #if STATS | |
4140 | { /* node stats */ | |
4141 | unsigned long high = cachep->high_mark; | |
4142 | unsigned long allocs = cachep->num_allocations; | |
4143 | unsigned long grown = cachep->grown; | |
4144 | unsigned long reaped = cachep->reaped; | |
4145 | unsigned long errors = cachep->errors; | |
4146 | unsigned long max_freeable = cachep->max_freeable; | |
4147 | unsigned long node_allocs = cachep->node_allocs; | |
4148 | unsigned long node_frees = cachep->node_frees; | |
4149 | unsigned long overflows = cachep->node_overflow; | |
4150 | ||
4151 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", | |
4152 | allocs, high, grown, | |
4153 | reaped, errors, max_freeable, node_allocs, | |
4154 | node_frees, overflows); | |
4155 | } | |
4156 | /* cpu stats */ | |
4157 | { | |
4158 | unsigned long allochit = atomic_read(&cachep->allochit); | |
4159 | unsigned long allocmiss = atomic_read(&cachep->allocmiss); | |
4160 | unsigned long freehit = atomic_read(&cachep->freehit); | |
4161 | unsigned long freemiss = atomic_read(&cachep->freemiss); | |
4162 | ||
4163 | seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", | |
4164 | allochit, allocmiss, freehit, freemiss); | |
4165 | } | |
4166 | #endif | |
4167 | } | |
4168 | ||
4169 | #define MAX_SLABINFO_WRITE 128 | |
4170 | /** | |
4171 | * slabinfo_write - Tuning for the slab allocator | |
4172 | * @file: unused | |
4173 | * @buffer: user buffer | |
4174 | * @count: data length | |
4175 | * @ppos: unused | |
4176 | */ | |
4177 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |
4178 | size_t count, loff_t *ppos) | |
4179 | { | |
4180 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; | |
4181 | int limit, batchcount, shared, res; | |
4182 | struct kmem_cache *cachep; | |
4183 | ||
4184 | if (count > MAX_SLABINFO_WRITE) | |
4185 | return -EINVAL; | |
4186 | if (copy_from_user(&kbuf, buffer, count)) | |
4187 | return -EFAULT; | |
4188 | kbuf[MAX_SLABINFO_WRITE] = '\0'; | |
4189 | ||
4190 | tmp = strchr(kbuf, ' '); | |
4191 | if (!tmp) | |
4192 | return -EINVAL; | |
4193 | *tmp = '\0'; | |
4194 | tmp++; | |
4195 | if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) | |
4196 | return -EINVAL; | |
4197 | ||
4198 | /* Find the cache in the chain of caches. */ | |
4199 | mutex_lock(&slab_mutex); | |
4200 | res = -EINVAL; | |
4201 | list_for_each_entry(cachep, &slab_caches, list) { | |
4202 | if (!strcmp(cachep->name, kbuf)) { | |
4203 | if (limit < 1 || batchcount < 1 || | |
4204 | batchcount > limit || shared < 0) { | |
4205 | res = 0; | |
4206 | } else { | |
4207 | res = do_tune_cpucache(cachep, limit, | |
4208 | batchcount, shared, | |
4209 | GFP_KERNEL); | |
4210 | } | |
4211 | break; | |
4212 | } | |
4213 | } | |
4214 | mutex_unlock(&slab_mutex); | |
4215 | if (res >= 0) | |
4216 | res = count; | |
4217 | return res; | |
4218 | } | |
4219 | ||
4220 | #ifdef CONFIG_DEBUG_SLAB_LEAK | |
4221 | ||
4222 | static inline int add_caller(unsigned long *n, unsigned long v) | |
4223 | { | |
4224 | unsigned long *p; | |
4225 | int l; | |
4226 | if (!v) | |
4227 | return 1; | |
4228 | l = n[1]; | |
4229 | p = n + 2; | |
4230 | while (l) { | |
4231 | int i = l/2; | |
4232 | unsigned long *q = p + 2 * i; | |
4233 | if (*q == v) { | |
4234 | q[1]++; | |
4235 | return 1; | |
4236 | } | |
4237 | if (*q > v) { | |
4238 | l = i; | |
4239 | } else { | |
4240 | p = q + 2; | |
4241 | l -= i + 1; | |
4242 | } | |
4243 | } | |
4244 | if (++n[1] == n[0]) | |
4245 | return 0; | |
4246 | memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); | |
4247 | p[0] = v; | |
4248 | p[1] = 1; | |
4249 | return 1; | |
4250 | } | |
4251 | ||
4252 | static void handle_slab(unsigned long *n, struct kmem_cache *c, | |
4253 | struct page *page) | |
4254 | { | |
4255 | void *p; | |
4256 | int i, j; | |
4257 | unsigned long v; | |
4258 | ||
4259 | if (n[0] == n[1]) | |
4260 | return; | |
4261 | for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { | |
4262 | bool active = true; | |
4263 | ||
4264 | for (j = page->active; j < c->num; j++) { | |
4265 | if (get_free_obj(page, j) == i) { | |
4266 | active = false; | |
4267 | break; | |
4268 | } | |
4269 | } | |
4270 | ||
4271 | if (!active) | |
4272 | continue; | |
4273 | ||
4274 | /* | |
4275 | * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table | |
4276 | * mapping is established when actual object allocation and | |
4277 | * we could mistakenly access the unmapped object in the cpu | |
4278 | * cache. | |
4279 | */ | |
4280 | if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v))) | |
4281 | continue; | |
4282 | ||
4283 | if (!add_caller(n, v)) | |
4284 | return; | |
4285 | } | |
4286 | } | |
4287 | ||
4288 | static void show_symbol(struct seq_file *m, unsigned long address) | |
4289 | { | |
4290 | #ifdef CONFIG_KALLSYMS | |
4291 | unsigned long offset, size; | |
4292 | char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; | |
4293 | ||
4294 | if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { | |
4295 | seq_printf(m, "%s+%#lx/%#lx", name, offset, size); | |
4296 | if (modname[0]) | |
4297 | seq_printf(m, " [%s]", modname); | |
4298 | return; | |
4299 | } | |
4300 | #endif | |
4301 | seq_printf(m, "%p", (void *)address); | |
4302 | } | |
4303 | ||
4304 | static int leaks_show(struct seq_file *m, void *p) | |
4305 | { | |
4306 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | |
4307 | struct page *page; | |
4308 | struct kmem_cache_node *n; | |
4309 | const char *name; | |
4310 | unsigned long *x = m->private; | |
4311 | int node; | |
4312 | int i; | |
4313 | ||
4314 | if (!(cachep->flags & SLAB_STORE_USER)) | |
4315 | return 0; | |
4316 | if (!(cachep->flags & SLAB_RED_ZONE)) | |
4317 | return 0; | |
4318 | ||
4319 | /* | |
4320 | * Set store_user_clean and start to grab stored user information | |
4321 | * for all objects on this cache. If some alloc/free requests comes | |
4322 | * during the processing, information would be wrong so restart | |
4323 | * whole processing. | |
4324 | */ | |
4325 | do { | |
4326 | set_store_user_clean(cachep); | |
4327 | drain_cpu_caches(cachep); | |
4328 | ||
4329 | x[1] = 0; | |
4330 | ||
4331 | for_each_kmem_cache_node(cachep, node, n) { | |
4332 | ||
4333 | check_irq_on(); | |
4334 | spin_lock_irq(&n->list_lock); | |
4335 | ||
4336 | list_for_each_entry(page, &n->slabs_full, lru) | |
4337 | handle_slab(x, cachep, page); | |
4338 | list_for_each_entry(page, &n->slabs_partial, lru) | |
4339 | handle_slab(x, cachep, page); | |
4340 | spin_unlock_irq(&n->list_lock); | |
4341 | } | |
4342 | } while (!is_store_user_clean(cachep)); | |
4343 | ||
4344 | name = cachep->name; | |
4345 | if (x[0] == x[1]) { | |
4346 | /* Increase the buffer size */ | |
4347 | mutex_unlock(&slab_mutex); | |
4348 | m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | |
4349 | if (!m->private) { | |
4350 | /* Too bad, we are really out */ | |
4351 | m->private = x; | |
4352 | mutex_lock(&slab_mutex); | |
4353 | return -ENOMEM; | |
4354 | } | |
4355 | *(unsigned long *)m->private = x[0] * 2; | |
4356 | kfree(x); | |
4357 | mutex_lock(&slab_mutex); | |
4358 | /* Now make sure this entry will be retried */ | |
4359 | m->count = m->size; | |
4360 | return 0; | |
4361 | } | |
4362 | for (i = 0; i < x[1]; i++) { | |
4363 | seq_printf(m, "%s: %lu ", name, x[2*i+3]); | |
4364 | show_symbol(m, x[2*i+2]); | |
4365 | seq_putc(m, '\n'); | |
4366 | } | |
4367 | ||
4368 | return 0; | |
4369 | } | |
4370 | ||
4371 | static const struct seq_operations slabstats_op = { | |
4372 | .start = slab_start, | |
4373 | .next = slab_next, | |
4374 | .stop = slab_stop, | |
4375 | .show = leaks_show, | |
4376 | }; | |
4377 | ||
4378 | static int slabstats_open(struct inode *inode, struct file *file) | |
4379 | { | |
4380 | unsigned long *n; | |
4381 | ||
4382 | n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); | |
4383 | if (!n) | |
4384 | return -ENOMEM; | |
4385 | ||
4386 | *n = PAGE_SIZE / (2 * sizeof(unsigned long)); | |
4387 | ||
4388 | return 0; | |
4389 | } | |
4390 | ||
4391 | static const struct file_operations proc_slabstats_operations = { | |
4392 | .open = slabstats_open, | |
4393 | .read = seq_read, | |
4394 | .llseek = seq_lseek, | |
4395 | .release = seq_release_private, | |
4396 | }; | |
4397 | #endif | |
4398 | ||
4399 | static int __init slab_proc_init(void) | |
4400 | { | |
4401 | #ifdef CONFIG_DEBUG_SLAB_LEAK | |
4402 | proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); | |
4403 | #endif | |
4404 | return 0; | |
4405 | } | |
4406 | module_init(slab_proc_init); | |
4407 | #endif | |
4408 | ||
4409 | #ifdef CONFIG_HARDENED_USERCOPY | |
4410 | /* | |
4411 | * Rejects objects that are incorrectly sized. | |
4412 | * | |
4413 | * Returns NULL if check passes, otherwise const char * to name of cache | |
4414 | * to indicate an error. | |
4415 | */ | |
4416 | const char *__check_heap_object(const void *ptr, unsigned long n, | |
4417 | struct page *page) | |
4418 | { | |
4419 | struct kmem_cache *cachep; | |
4420 | unsigned int objnr; | |
4421 | unsigned long offset; | |
4422 | ||
4423 | /* Find and validate object. */ | |
4424 | cachep = page->slab_cache; | |
4425 | objnr = obj_to_index(cachep, page, (void *)ptr); | |
4426 | BUG_ON(objnr >= cachep->num); | |
4427 | ||
4428 | /* Find offset within object. */ | |
4429 | offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); | |
4430 | ||
4431 | /* Allow address range falling entirely within object size. */ | |
4432 | if (offset <= cachep->object_size && n <= cachep->object_size - offset) | |
4433 | return NULL; | |
4434 | ||
4435 | return cachep->name; | |
4436 | } | |
4437 | #endif /* CONFIG_HARDENED_USERCOPY */ | |
4438 | ||
4439 | /** | |
4440 | * ksize - get the actual amount of memory allocated for a given object | |
4441 | * @objp: Pointer to the object | |
4442 | * | |
4443 | * kmalloc may internally round up allocations and return more memory | |
4444 | * than requested. ksize() can be used to determine the actual amount of | |
4445 | * memory allocated. The caller may use this additional memory, even though | |
4446 | * a smaller amount of memory was initially specified with the kmalloc call. | |
4447 | * The caller must guarantee that objp points to a valid object previously | |
4448 | * allocated with either kmalloc() or kmem_cache_alloc(). The object | |
4449 | * must not be freed during the duration of the call. | |
4450 | */ | |
4451 | size_t ksize(const void *objp) | |
4452 | { | |
4453 | size_t size; | |
4454 | ||
4455 | BUG_ON(!objp); | |
4456 | if (unlikely(objp == ZERO_SIZE_PTR)) | |
4457 | return 0; | |
4458 | ||
4459 | size = virt_to_cache(objp)->object_size; | |
4460 | /* We assume that ksize callers could use the whole allocated area, | |
4461 | * so we need to unpoison this area. | |
4462 | */ | |
4463 | kasan_unpoison_shadow(objp, size); | |
4464 | ||
4465 | return size; | |
4466 | } | |
4467 | EXPORT_SYMBOL(ksize); |