]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/slab.c
slab: fix the type of the index on freelist index accessor
[mirror_ubuntu-zesty-kernel.git] / mm / slab.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
183ff22b 29 * slabs and you must pass objects with the same initializations to
1da177e4
LT
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
a737b3e2 53 * The c_cpuarray may not be read with enabled local interrupts -
1da177e4
LT
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
343e0d7a 58 * Several members in struct kmem_cache and struct slab never change, they
1da177e4
LT
59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
18004c5d 71 * The global cache-chain is protected by the mutex 'slab_mutex'.
1da177e4
LT
72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
e498be7d
CL
78 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
1da177e4
LT
87 */
88
1da177e4
LT
89#include <linux/slab.h>
90#include <linux/mm.h>
c9cf5528 91#include <linux/poison.h>
1da177e4
LT
92#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
101a5001 97#include <linux/cpuset.h>
a0ec95a8 98#include <linux/proc_fs.h>
1da177e4
LT
99#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
543537bd 106#include <linux/string.h>
138ae663 107#include <linux/uaccess.h>
e498be7d 108#include <linux/nodemask.h>
d5cff635 109#include <linux/kmemleak.h>
dc85da15 110#include <linux/mempolicy.h>
fc0abb14 111#include <linux/mutex.h>
8a8b6502 112#include <linux/fault-inject.h>
e7eebaf6 113#include <linux/rtmutex.h>
6a2d7a95 114#include <linux/reciprocal_div.h>
3ac7fe5a 115#include <linux/debugobjects.h>
c175eea4 116#include <linux/kmemcheck.h>
8f9f8d9e 117#include <linux/memory.h>
268bb0ce 118#include <linux/prefetch.h>
1da177e4 119
381760ea
MG
120#include <net/sock.h>
121
1da177e4
LT
122#include <asm/cacheflush.h>
123#include <asm/tlbflush.h>
124#include <asm/page.h>
125
4dee6b64
SR
126#include <trace/events/kmem.h>
127
072bb0aa
MG
128#include "internal.h"
129
b9ce5ef4
GC
130#include "slab.h"
131
1da177e4 132/*
50953fe9 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
1da177e4
LT
134 * 0 for faster, smaller code (especially in the critical paths).
135 *
136 * STATS - 1 to collect stats for /proc/slabinfo.
137 * 0 for faster, smaller code (especially in the critical paths).
138 *
139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140 */
141
142#ifdef CONFIG_DEBUG_SLAB
143#define DEBUG 1
144#define STATS 1
145#define FORCED_DEBUG 1
146#else
147#define DEBUG 0
148#define STATS 0
149#define FORCED_DEBUG 0
150#endif
151
1da177e4
LT
152/* Shouldn't this be in a header file somewhere? */
153#define BYTES_PER_WORD sizeof(void *)
87a927c7 154#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
1da177e4 155
1da177e4
LT
156#ifndef ARCH_KMALLOC_FLAGS
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif
159
f315e3fa
JK
160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
169#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
170
072bb0aa
MG
171/*
172 * true if a page was allocated from pfmemalloc reserves for network-based
173 * swap
174 */
175static bool pfmemalloc_active __read_mostly;
176
1da177e4
LT
177/*
178 * struct array_cache
179 *
1da177e4
LT
180 * Purpose:
181 * - LIFO ordering, to hand out cache-warm objects from _alloc
182 * - reduce the number of linked list operations
183 * - reduce spinlock operations
184 *
185 * The limit is stored in the per-cpu structure to reduce the data cache
186 * footprint.
187 *
188 */
189struct array_cache {
190 unsigned int avail;
191 unsigned int limit;
192 unsigned int batchcount;
193 unsigned int touched;
e498be7d 194 spinlock_t lock;
bda5b655 195 void *entry[]; /*
a737b3e2
AM
196 * Must have this definition in here for the proper
197 * alignment of array_cache. Also simplifies accessing
198 * the entries.
072bb0aa
MG
199 *
200 * Entries should not be directly dereferenced as
201 * entries belonging to slabs marked pfmemalloc will
202 * have the lower bits set SLAB_OBJ_PFMEMALLOC
a737b3e2 203 */
1da177e4
LT
204};
205
072bb0aa
MG
206#define SLAB_OBJ_PFMEMALLOC 1
207static inline bool is_obj_pfmemalloc(void *objp)
208{
209 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
210}
211
212static inline void set_obj_pfmemalloc(void **objp)
213{
214 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
215 return;
216}
217
218static inline void clear_obj_pfmemalloc(void **objp)
219{
220 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
221}
222
a737b3e2
AM
223/*
224 * bootstrap: The caches do not work without cpuarrays anymore, but the
225 * cpuarrays are allocated from the generic caches...
1da177e4
LT
226 */
227#define BOOT_CPUCACHE_ENTRIES 1
228struct arraycache_init {
229 struct array_cache cache;
b28a02de 230 void *entries[BOOT_CPUCACHE_ENTRIES];
1da177e4
LT
231};
232
e498be7d
CL
233/*
234 * Need this for bootstrapping a per node allocator.
235 */
556a169d 236#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
ce8eb6c4 237static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
e498be7d 238#define CACHE_CACHE 0
556a169d 239#define SIZE_AC MAX_NUMNODES
ce8eb6c4 240#define SIZE_NODE (2 * MAX_NUMNODES)
e498be7d 241
ed11d9eb 242static int drain_freelist(struct kmem_cache *cache,
ce8eb6c4 243 struct kmem_cache_node *n, int tofree);
ed11d9eb
CL
244static void free_block(struct kmem_cache *cachep, void **objpp, int len,
245 int node);
83b519e8 246static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
65f27f38 247static void cache_reap(struct work_struct *unused);
ed11d9eb 248
e0a42726
IM
249static int slab_early_init = 1;
250
e3366016 251#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
ce8eb6c4 252#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
1da177e4 253
ce8eb6c4 254static void kmem_cache_node_init(struct kmem_cache_node *parent)
e498be7d
CL
255{
256 INIT_LIST_HEAD(&parent->slabs_full);
257 INIT_LIST_HEAD(&parent->slabs_partial);
258 INIT_LIST_HEAD(&parent->slabs_free);
259 parent->shared = NULL;
260 parent->alien = NULL;
2e1217cf 261 parent->colour_next = 0;
e498be7d
CL
262 spin_lock_init(&parent->list_lock);
263 parent->free_objects = 0;
264 parent->free_touched = 0;
265}
266
a737b3e2
AM
267#define MAKE_LIST(cachep, listp, slab, nodeid) \
268 do { \
269 INIT_LIST_HEAD(listp); \
6a67368c 270 list_splice(&(cachep->node[nodeid]->slab), listp); \
e498be7d
CL
271 } while (0)
272
a737b3e2
AM
273#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
274 do { \
e498be7d
CL
275 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
276 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
277 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
278 } while (0)
1da177e4 279
1da177e4
LT
280#define CFLGS_OFF_SLAB (0x80000000UL)
281#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
282
283#define BATCHREFILL_LIMIT 16
a737b3e2
AM
284/*
285 * Optimization question: fewer reaps means less probability for unnessary
286 * cpucache drain/refill cycles.
1da177e4 287 *
dc6f3f27 288 * OTOH the cpuarrays can contain lots of objects,
1da177e4
LT
289 * which could lock up otherwise freeable slabs.
290 */
5f0985bb
JZ
291#define REAPTIMEOUT_AC (2*HZ)
292#define REAPTIMEOUT_NODE (4*HZ)
1da177e4
LT
293
294#if STATS
295#define STATS_INC_ACTIVE(x) ((x)->num_active++)
296#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
297#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
298#define STATS_INC_GROWN(x) ((x)->grown++)
ed11d9eb 299#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
a737b3e2
AM
300#define STATS_SET_HIGH(x) \
301 do { \
302 if ((x)->num_active > (x)->high_mark) \
303 (x)->high_mark = (x)->num_active; \
304 } while (0)
1da177e4
LT
305#define STATS_INC_ERR(x) ((x)->errors++)
306#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
e498be7d 307#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
fb7faf33 308#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
a737b3e2
AM
309#define STATS_SET_FREEABLE(x, i) \
310 do { \
311 if ((x)->max_freeable < i) \
312 (x)->max_freeable = i; \
313 } while (0)
1da177e4
LT
314#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
315#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
316#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
317#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
318#else
319#define STATS_INC_ACTIVE(x) do { } while (0)
320#define STATS_DEC_ACTIVE(x) do { } while (0)
321#define STATS_INC_ALLOCED(x) do { } while (0)
322#define STATS_INC_GROWN(x) do { } while (0)
4e60c86b 323#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
1da177e4
LT
324#define STATS_SET_HIGH(x) do { } while (0)
325#define STATS_INC_ERR(x) do { } while (0)
326#define STATS_INC_NODEALLOCS(x) do { } while (0)
e498be7d 327#define STATS_INC_NODEFREES(x) do { } while (0)
fb7faf33 328#define STATS_INC_ACOVERFLOW(x) do { } while (0)
a737b3e2 329#define STATS_SET_FREEABLE(x, i) do { } while (0)
1da177e4
LT
330#define STATS_INC_ALLOCHIT(x) do { } while (0)
331#define STATS_INC_ALLOCMISS(x) do { } while (0)
332#define STATS_INC_FREEHIT(x) do { } while (0)
333#define STATS_INC_FREEMISS(x) do { } while (0)
334#endif
335
336#if DEBUG
1da177e4 337
a737b3e2
AM
338/*
339 * memory layout of objects:
1da177e4 340 * 0 : objp
3dafccf2 341 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
1da177e4
LT
342 * the end of an object is aligned with the end of the real
343 * allocation. Catches writes behind the end of the allocation.
3dafccf2 344 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
1da177e4 345 * redzone word.
3dafccf2 346 * cachep->obj_offset: The real object.
3b0efdfa
CL
347 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
348 * cachep->size - 1* BYTES_PER_WORD: last caller address
a737b3e2 349 * [BYTES_PER_WORD long]
1da177e4 350 */
343e0d7a 351static int obj_offset(struct kmem_cache *cachep)
1da177e4 352{
3dafccf2 353 return cachep->obj_offset;
1da177e4
LT
354}
355
b46b8f19 356static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
1da177e4
LT
357{
358 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
b46b8f19
DW
359 return (unsigned long long*) (objp + obj_offset(cachep) -
360 sizeof(unsigned long long));
1da177e4
LT
361}
362
b46b8f19 363static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
1da177e4
LT
364{
365 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
366 if (cachep->flags & SLAB_STORE_USER)
3b0efdfa 367 return (unsigned long long *)(objp + cachep->size -
b46b8f19 368 sizeof(unsigned long long) -
87a927c7 369 REDZONE_ALIGN);
3b0efdfa 370 return (unsigned long long *) (objp + cachep->size -
b46b8f19 371 sizeof(unsigned long long));
1da177e4
LT
372}
373
343e0d7a 374static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1da177e4
LT
375{
376 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
3b0efdfa 377 return (void **)(objp + cachep->size - BYTES_PER_WORD);
1da177e4
LT
378}
379
380#else
381
3dafccf2 382#define obj_offset(x) 0
b46b8f19
DW
383#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
384#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
1da177e4
LT
385#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
386
387#endif
388
1da177e4 389/*
3df1cccd
DR
390 * Do not go above this order unless 0 objects fit into the slab or
391 * overridden on the command line.
1da177e4 392 */
543585cc
DR
393#define SLAB_MAX_ORDER_HI 1
394#define SLAB_MAX_ORDER_LO 0
395static int slab_max_order = SLAB_MAX_ORDER_LO;
3df1cccd 396static bool slab_max_order_set __initdata;
1da177e4 397
6ed5eb22
PE
398static inline struct kmem_cache *virt_to_cache(const void *obj)
399{
b49af68f 400 struct page *page = virt_to_head_page(obj);
35026088 401 return page->slab_cache;
6ed5eb22
PE
402}
403
8456a648 404static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
8fea4e96
PE
405 unsigned int idx)
406{
8456a648 407 return page->s_mem + cache->size * idx;
8fea4e96
PE
408}
409
6a2d7a95 410/*
3b0efdfa
CL
411 * We want to avoid an expensive divide : (offset / cache->size)
412 * Using the fact that size is a constant for a particular cache,
413 * we can replace (offset / cache->size) by
6a2d7a95
ED
414 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
415 */
416static inline unsigned int obj_to_index(const struct kmem_cache *cache,
8456a648 417 const struct page *page, void *obj)
8fea4e96 418{
8456a648 419 u32 offset = (obj - page->s_mem);
6a2d7a95 420 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
8fea4e96
PE
421}
422
1da177e4 423static struct arraycache_init initarray_generic =
b28a02de 424 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4
LT
425
426/* internal cache of cache description objs */
9b030cb8 427static struct kmem_cache kmem_cache_boot = {
b28a02de
PE
428 .batchcount = 1,
429 .limit = BOOT_CPUCACHE_ENTRIES,
430 .shared = 1,
3b0efdfa 431 .size = sizeof(struct kmem_cache),
b28a02de 432 .name = "kmem_cache",
1da177e4
LT
433};
434
056c6241
RT
435#define BAD_ALIEN_MAGIC 0x01020304ul
436
f1aaee53
AV
437#ifdef CONFIG_LOCKDEP
438
439/*
440 * Slab sometimes uses the kmalloc slabs to store the slab headers
441 * for other slabs "off slab".
442 * The locking for this is tricky in that it nests within the locks
443 * of all other slabs in a few places; to deal with this special
444 * locking we put on-slab caches into a separate lock-class.
056c6241
RT
445 *
446 * We set lock class for alien array caches which are up during init.
447 * The lock annotation will be lost if all cpus of a node goes down and
448 * then comes back up during hotplug
f1aaee53 449 */
056c6241
RT
450static struct lock_class_key on_slab_l3_key;
451static struct lock_class_key on_slab_alc_key;
452
83835b3d
PZ
453static struct lock_class_key debugobj_l3_key;
454static struct lock_class_key debugobj_alc_key;
455
456static void slab_set_lock_classes(struct kmem_cache *cachep,
457 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
458 int q)
459{
460 struct array_cache **alc;
ce8eb6c4 461 struct kmem_cache_node *n;
83835b3d
PZ
462 int r;
463
ce8eb6c4
CL
464 n = cachep->node[q];
465 if (!n)
83835b3d
PZ
466 return;
467
ce8eb6c4
CL
468 lockdep_set_class(&n->list_lock, l3_key);
469 alc = n->alien;
83835b3d
PZ
470 /*
471 * FIXME: This check for BAD_ALIEN_MAGIC
472 * should go away when common slab code is taught to
473 * work even without alien caches.
474 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
475 * for alloc_alien_cache,
476 */
477 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
478 return;
479 for_each_node(r) {
480 if (alc[r])
481 lockdep_set_class(&alc[r]->lock, alc_key);
482 }
483}
484
485static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
486{
487 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
488}
489
490static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
491{
492 int node;
493
494 for_each_online_node(node)
495 slab_set_debugobj_lock_classes_node(cachep, node);
496}
497
ce79ddc8 498static void init_node_lock_keys(int q)
f1aaee53 499{
e3366016 500 int i;
056c6241 501
97d06609 502 if (slab_state < UP)
ce79ddc8
PE
503 return;
504
0f8f8094 505 for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
ce8eb6c4 506 struct kmem_cache_node *n;
e3366016
CL
507 struct kmem_cache *cache = kmalloc_caches[i];
508
509 if (!cache)
510 continue;
ce79ddc8 511
ce8eb6c4
CL
512 n = cache->node[q];
513 if (!n || OFF_SLAB(cache))
00afa758 514 continue;
83835b3d 515
e3366016 516 slab_set_lock_classes(cache, &on_slab_l3_key,
83835b3d 517 &on_slab_alc_key, q);
f1aaee53
AV
518 }
519}
ce79ddc8 520
6ccfb5bc
GC
521static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
522{
6a67368c 523 if (!cachep->node[q])
6ccfb5bc
GC
524 return;
525
526 slab_set_lock_classes(cachep, &on_slab_l3_key,
527 &on_slab_alc_key, q);
528}
529
530static inline void on_slab_lock_classes(struct kmem_cache *cachep)
531{
532 int node;
533
534 VM_BUG_ON(OFF_SLAB(cachep));
535 for_each_node(node)
536 on_slab_lock_classes_node(cachep, node);
537}
538
ce79ddc8
PE
539static inline void init_lock_keys(void)
540{
541 int node;
542
543 for_each_node(node)
544 init_node_lock_keys(node);
545}
f1aaee53 546#else
ce79ddc8
PE
547static void init_node_lock_keys(int q)
548{
549}
550
056c6241 551static inline void init_lock_keys(void)
f1aaee53
AV
552{
553}
83835b3d 554
6ccfb5bc
GC
555static inline void on_slab_lock_classes(struct kmem_cache *cachep)
556{
557}
558
559static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
560{
561}
562
83835b3d
PZ
563static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
564{
565}
566
567static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
568{
569}
f1aaee53
AV
570#endif
571
1871e52c 572static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
1da177e4 573
343e0d7a 574static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1da177e4
LT
575{
576 return cachep->array[smp_processor_id()];
577}
578
9cef2e2b
JK
579static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
580 size_t idx_size, size_t align)
1da177e4 581{
9cef2e2b
JK
582 int nr_objs;
583 size_t freelist_size;
584
585 /*
586 * Ignore padding for the initial guess. The padding
587 * is at most @align-1 bytes, and @buffer_size is at
588 * least @align. In the worst case, this result will
589 * be one greater than the number of objects that fit
590 * into the memory allocation when taking the padding
591 * into account.
592 */
593 nr_objs = slab_size / (buffer_size + idx_size);
594
595 /*
596 * This calculated number will be either the right
597 * amount, or one greater than what we want.
598 */
599 freelist_size = slab_size - nr_objs * buffer_size;
600 if (freelist_size < ALIGN(nr_objs * idx_size, align))
601 nr_objs--;
602
603 return nr_objs;
fbaccacf 604}
1da177e4 605
a737b3e2
AM
606/*
607 * Calculate the number of objects and left-over bytes for a given buffer size.
608 */
fbaccacf
SR
609static void cache_estimate(unsigned long gfporder, size_t buffer_size,
610 size_t align, int flags, size_t *left_over,
611 unsigned int *num)
612{
613 int nr_objs;
614 size_t mgmt_size;
615 size_t slab_size = PAGE_SIZE << gfporder;
1da177e4 616
fbaccacf
SR
617 /*
618 * The slab management structure can be either off the slab or
619 * on it. For the latter case, the memory allocated for a
620 * slab is used for:
621 *
16025177 622 * - One unsigned int for each object
fbaccacf
SR
623 * - Padding to respect alignment of @align
624 * - @buffer_size bytes for each object
625 *
626 * If the slab management structure is off the slab, then the
627 * alignment will already be calculated into the size. Because
628 * the slabs are all pages aligned, the objects will be at the
629 * correct alignment when allocated.
630 */
631 if (flags & CFLGS_OFF_SLAB) {
632 mgmt_size = 0;
633 nr_objs = slab_size / buffer_size;
634
fbaccacf 635 } else {
9cef2e2b 636 nr_objs = calculate_nr_objs(slab_size, buffer_size,
a41adfaa
JK
637 sizeof(freelist_idx_t), align);
638 mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
fbaccacf
SR
639 }
640 *num = nr_objs;
641 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1da177e4
LT
642}
643
f28510d3 644#if DEBUG
d40cee24 645#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
1da177e4 646
a737b3e2
AM
647static void __slab_error(const char *function, struct kmem_cache *cachep,
648 char *msg)
1da177e4
LT
649{
650 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
b28a02de 651 function, cachep->name, msg);
1da177e4 652 dump_stack();
373d4d09 653 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4 654}
f28510d3 655#endif
1da177e4 656
3395ee05
PM
657/*
658 * By default on NUMA we use alien caches to stage the freeing of
659 * objects allocated from other nodes. This causes massive memory
660 * inefficiencies when using fake NUMA setup to split memory into a
661 * large number of small nodes, so it can be disabled on the command
662 * line
663 */
664
665static int use_alien_caches __read_mostly = 1;
666static int __init noaliencache_setup(char *s)
667{
668 use_alien_caches = 0;
669 return 1;
670}
671__setup("noaliencache", noaliencache_setup);
672
3df1cccd
DR
673static int __init slab_max_order_setup(char *str)
674{
675 get_option(&str, &slab_max_order);
676 slab_max_order = slab_max_order < 0 ? 0 :
677 min(slab_max_order, MAX_ORDER - 1);
678 slab_max_order_set = true;
679
680 return 1;
681}
682__setup("slab_max_order=", slab_max_order_setup);
683
8fce4d8e
CL
684#ifdef CONFIG_NUMA
685/*
686 * Special reaping functions for NUMA systems called from cache_reap().
687 * These take care of doing round robin flushing of alien caches (containing
688 * objects freed on different nodes from which they were allocated) and the
689 * flushing of remote pcps by calling drain_node_pages.
690 */
1871e52c 691static DEFINE_PER_CPU(unsigned long, slab_reap_node);
8fce4d8e
CL
692
693static void init_reap_node(int cpu)
694{
695 int node;
696
7d6e6d09 697 node = next_node(cpu_to_mem(cpu), node_online_map);
8fce4d8e 698 if (node == MAX_NUMNODES)
442295c9 699 node = first_node(node_online_map);
8fce4d8e 700
1871e52c 701 per_cpu(slab_reap_node, cpu) = node;
8fce4d8e
CL
702}
703
704static void next_reap_node(void)
705{
909ea964 706 int node = __this_cpu_read(slab_reap_node);
8fce4d8e 707
8fce4d8e
CL
708 node = next_node(node, node_online_map);
709 if (unlikely(node >= MAX_NUMNODES))
710 node = first_node(node_online_map);
909ea964 711 __this_cpu_write(slab_reap_node, node);
8fce4d8e
CL
712}
713
714#else
715#define init_reap_node(cpu) do { } while (0)
716#define next_reap_node(void) do { } while (0)
717#endif
718
1da177e4
LT
719/*
720 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
721 * via the workqueue/eventd.
722 * Add the CPU number into the expiration time to minimize the possibility of
723 * the CPUs getting into lockstep and contending for the global cache chain
724 * lock.
725 */
0db0628d 726static void start_cpu_timer(int cpu)
1da177e4 727{
1871e52c 728 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
1da177e4
LT
729
730 /*
731 * When this gets called from do_initcalls via cpucache_init(),
732 * init_workqueues() has already run, so keventd will be setup
733 * at that time.
734 */
52bad64d 735 if (keventd_up() && reap_work->work.func == NULL) {
8fce4d8e 736 init_reap_node(cpu);
203b42f7 737 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
2b284214
AV
738 schedule_delayed_work_on(cpu, reap_work,
739 __round_jiffies_relative(HZ, cpu));
1da177e4
LT
740 }
741}
742
e498be7d 743static struct array_cache *alloc_arraycache(int node, int entries,
83b519e8 744 int batchcount, gfp_t gfp)
1da177e4 745{
b28a02de 746 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
1da177e4
LT
747 struct array_cache *nc = NULL;
748
83b519e8 749 nc = kmalloc_node(memsize, gfp, node);
d5cff635
CM
750 /*
751 * The array_cache structures contain pointers to free object.
25985edc 752 * However, when such objects are allocated or transferred to another
d5cff635
CM
753 * cache the pointers are not cleared and they could be counted as
754 * valid references during a kmemleak scan. Therefore, kmemleak must
755 * not scan such objects.
756 */
757 kmemleak_no_scan(nc);
1da177e4
LT
758 if (nc) {
759 nc->avail = 0;
760 nc->limit = entries;
761 nc->batchcount = batchcount;
762 nc->touched = 0;
e498be7d 763 spin_lock_init(&nc->lock);
1da177e4
LT
764 }
765 return nc;
766}
767
8456a648 768static inline bool is_slab_pfmemalloc(struct page *page)
072bb0aa 769{
072bb0aa
MG
770 return PageSlabPfmemalloc(page);
771}
772
773/* Clears pfmemalloc_active if no slabs have pfmalloc set */
774static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
775 struct array_cache *ac)
776{
ce8eb6c4 777 struct kmem_cache_node *n = cachep->node[numa_mem_id()];
8456a648 778 struct page *page;
072bb0aa
MG
779 unsigned long flags;
780
781 if (!pfmemalloc_active)
782 return;
783
ce8eb6c4 784 spin_lock_irqsave(&n->list_lock, flags);
8456a648
JK
785 list_for_each_entry(page, &n->slabs_full, lru)
786 if (is_slab_pfmemalloc(page))
072bb0aa
MG
787 goto out;
788
8456a648
JK
789 list_for_each_entry(page, &n->slabs_partial, lru)
790 if (is_slab_pfmemalloc(page))
072bb0aa
MG
791 goto out;
792
8456a648
JK
793 list_for_each_entry(page, &n->slabs_free, lru)
794 if (is_slab_pfmemalloc(page))
072bb0aa
MG
795 goto out;
796
797 pfmemalloc_active = false;
798out:
ce8eb6c4 799 spin_unlock_irqrestore(&n->list_lock, flags);
072bb0aa
MG
800}
801
381760ea 802static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
072bb0aa
MG
803 gfp_t flags, bool force_refill)
804{
805 int i;
806 void *objp = ac->entry[--ac->avail];
807
808 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
809 if (unlikely(is_obj_pfmemalloc(objp))) {
ce8eb6c4 810 struct kmem_cache_node *n;
072bb0aa
MG
811
812 if (gfp_pfmemalloc_allowed(flags)) {
813 clear_obj_pfmemalloc(&objp);
814 return objp;
815 }
816
817 /* The caller cannot use PFMEMALLOC objects, find another one */
d014dc2e 818 for (i = 0; i < ac->avail; i++) {
072bb0aa
MG
819 /* If a !PFMEMALLOC object is found, swap them */
820 if (!is_obj_pfmemalloc(ac->entry[i])) {
821 objp = ac->entry[i];
822 ac->entry[i] = ac->entry[ac->avail];
823 ac->entry[ac->avail] = objp;
824 return objp;
825 }
826 }
827
828 /*
829 * If there are empty slabs on the slabs_free list and we are
830 * being forced to refill the cache, mark this one !pfmemalloc.
831 */
ce8eb6c4
CL
832 n = cachep->node[numa_mem_id()];
833 if (!list_empty(&n->slabs_free) && force_refill) {
8456a648 834 struct page *page = virt_to_head_page(objp);
7ecccf9d 835 ClearPageSlabPfmemalloc(page);
072bb0aa
MG
836 clear_obj_pfmemalloc(&objp);
837 recheck_pfmemalloc_active(cachep, ac);
838 return objp;
839 }
840
841 /* No !PFMEMALLOC objects available */
842 ac->avail++;
843 objp = NULL;
844 }
845
846 return objp;
847}
848
381760ea
MG
849static inline void *ac_get_obj(struct kmem_cache *cachep,
850 struct array_cache *ac, gfp_t flags, bool force_refill)
851{
852 void *objp;
853
854 if (unlikely(sk_memalloc_socks()))
855 objp = __ac_get_obj(cachep, ac, flags, force_refill);
856 else
857 objp = ac->entry[--ac->avail];
858
859 return objp;
860}
861
862static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
072bb0aa
MG
863 void *objp)
864{
865 if (unlikely(pfmemalloc_active)) {
866 /* Some pfmemalloc slabs exist, check if this is one */
30c29bea 867 struct page *page = virt_to_head_page(objp);
072bb0aa
MG
868 if (PageSlabPfmemalloc(page))
869 set_obj_pfmemalloc(&objp);
870 }
871
381760ea
MG
872 return objp;
873}
874
875static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
876 void *objp)
877{
878 if (unlikely(sk_memalloc_socks()))
879 objp = __ac_put_obj(cachep, ac, objp);
880
072bb0aa
MG
881 ac->entry[ac->avail++] = objp;
882}
883
3ded175a
CL
884/*
885 * Transfer objects in one arraycache to another.
886 * Locking must be handled by the caller.
887 *
888 * Return the number of entries transferred.
889 */
890static int transfer_objects(struct array_cache *to,
891 struct array_cache *from, unsigned int max)
892{
893 /* Figure out how many entries to transfer */
732eacc0 894 int nr = min3(from->avail, max, to->limit - to->avail);
3ded175a
CL
895
896 if (!nr)
897 return 0;
898
899 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
900 sizeof(void *) *nr);
901
902 from->avail -= nr;
903 to->avail += nr;
3ded175a
CL
904 return nr;
905}
906
765c4507
CL
907#ifndef CONFIG_NUMA
908
909#define drain_alien_cache(cachep, alien) do { } while (0)
ce8eb6c4 910#define reap_alien(cachep, n) do { } while (0)
765c4507 911
83b519e8 912static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
765c4507
CL
913{
914 return (struct array_cache **)BAD_ALIEN_MAGIC;
915}
916
917static inline void free_alien_cache(struct array_cache **ac_ptr)
918{
919}
920
921static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
922{
923 return 0;
924}
925
926static inline void *alternate_node_alloc(struct kmem_cache *cachep,
927 gfp_t flags)
928{
929 return NULL;
930}
931
8b98c169 932static inline void *____cache_alloc_node(struct kmem_cache *cachep,
765c4507
CL
933 gfp_t flags, int nodeid)
934{
935 return NULL;
936}
937
938#else /* CONFIG_NUMA */
939
8b98c169 940static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
c61afb18 941static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
dc85da15 942
83b519e8 943static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
e498be7d
CL
944{
945 struct array_cache **ac_ptr;
8ef82866 946 int memsize = sizeof(void *) * nr_node_ids;
e498be7d
CL
947 int i;
948
949 if (limit > 1)
950 limit = 12;
f3186a9c 951 ac_ptr = kzalloc_node(memsize, gfp, node);
e498be7d
CL
952 if (ac_ptr) {
953 for_each_node(i) {
f3186a9c 954 if (i == node || !node_online(i))
e498be7d 955 continue;
83b519e8 956 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
e498be7d 957 if (!ac_ptr[i]) {
cc550def 958 for (i--; i >= 0; i--)
e498be7d
CL
959 kfree(ac_ptr[i]);
960 kfree(ac_ptr);
961 return NULL;
962 }
963 }
964 }
965 return ac_ptr;
966}
967
5295a74c 968static void free_alien_cache(struct array_cache **ac_ptr)
e498be7d
CL
969{
970 int i;
971
972 if (!ac_ptr)
973 return;
e498be7d 974 for_each_node(i)
b28a02de 975 kfree(ac_ptr[i]);
e498be7d
CL
976 kfree(ac_ptr);
977}
978
343e0d7a 979static void __drain_alien_cache(struct kmem_cache *cachep,
5295a74c 980 struct array_cache *ac, int node)
e498be7d 981{
ce8eb6c4 982 struct kmem_cache_node *n = cachep->node[node];
e498be7d
CL
983
984 if (ac->avail) {
ce8eb6c4 985 spin_lock(&n->list_lock);
e00946fe
CL
986 /*
987 * Stuff objects into the remote nodes shared array first.
988 * That way we could avoid the overhead of putting the objects
989 * into the free lists and getting them back later.
990 */
ce8eb6c4
CL
991 if (n->shared)
992 transfer_objects(n->shared, ac, ac->limit);
e00946fe 993
ff69416e 994 free_block(cachep, ac->entry, ac->avail, node);
e498be7d 995 ac->avail = 0;
ce8eb6c4 996 spin_unlock(&n->list_lock);
e498be7d
CL
997 }
998}
999
8fce4d8e
CL
1000/*
1001 * Called from cache_reap() to regularly drain alien caches round robin.
1002 */
ce8eb6c4 1003static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
8fce4d8e 1004{
909ea964 1005 int node = __this_cpu_read(slab_reap_node);
8fce4d8e 1006
ce8eb6c4
CL
1007 if (n->alien) {
1008 struct array_cache *ac = n->alien[node];
e00946fe
CL
1009
1010 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
8fce4d8e
CL
1011 __drain_alien_cache(cachep, ac, node);
1012 spin_unlock_irq(&ac->lock);
1013 }
1014 }
1015}
1016
a737b3e2
AM
1017static void drain_alien_cache(struct kmem_cache *cachep,
1018 struct array_cache **alien)
e498be7d 1019{
b28a02de 1020 int i = 0;
e498be7d
CL
1021 struct array_cache *ac;
1022 unsigned long flags;
1023
1024 for_each_online_node(i) {
4484ebf1 1025 ac = alien[i];
e498be7d
CL
1026 if (ac) {
1027 spin_lock_irqsave(&ac->lock, flags);
1028 __drain_alien_cache(cachep, ac, i);
1029 spin_unlock_irqrestore(&ac->lock, flags);
1030 }
1031 }
1032}
729bd0b7 1033
873623df 1034static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
729bd0b7 1035{
1ea991b0 1036 int nodeid = page_to_nid(virt_to_page(objp));
ce8eb6c4 1037 struct kmem_cache_node *n;
729bd0b7 1038 struct array_cache *alien = NULL;
1ca4cb24
PE
1039 int node;
1040
7d6e6d09 1041 node = numa_mem_id();
729bd0b7
PE
1042
1043 /*
1044 * Make sure we are not freeing a object from another node to the array
1045 * cache on this cpu.
1046 */
1ea991b0 1047 if (likely(nodeid == node))
729bd0b7
PE
1048 return 0;
1049
ce8eb6c4 1050 n = cachep->node[node];
729bd0b7 1051 STATS_INC_NODEFREES(cachep);
ce8eb6c4
CL
1052 if (n->alien && n->alien[nodeid]) {
1053 alien = n->alien[nodeid];
873623df 1054 spin_lock(&alien->lock);
729bd0b7
PE
1055 if (unlikely(alien->avail == alien->limit)) {
1056 STATS_INC_ACOVERFLOW(cachep);
1057 __drain_alien_cache(cachep, alien, nodeid);
1058 }
072bb0aa 1059 ac_put_obj(cachep, alien, objp);
729bd0b7
PE
1060 spin_unlock(&alien->lock);
1061 } else {
6a67368c 1062 spin_lock(&(cachep->node[nodeid])->list_lock);
729bd0b7 1063 free_block(cachep, &objp, 1, nodeid);
6a67368c 1064 spin_unlock(&(cachep->node[nodeid])->list_lock);
729bd0b7
PE
1065 }
1066 return 1;
1067}
e498be7d
CL
1068#endif
1069
8f9f8d9e 1070/*
6a67368c 1071 * Allocates and initializes node for a node on each slab cache, used for
ce8eb6c4 1072 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
8f9f8d9e 1073 * will be allocated off-node since memory is not yet online for the new node.
6a67368c 1074 * When hotplugging memory or a cpu, existing node are not replaced if
8f9f8d9e
DR
1075 * already in use.
1076 *
18004c5d 1077 * Must hold slab_mutex.
8f9f8d9e 1078 */
6a67368c 1079static int init_cache_node_node(int node)
8f9f8d9e
DR
1080{
1081 struct kmem_cache *cachep;
ce8eb6c4 1082 struct kmem_cache_node *n;
6744f087 1083 const int memsize = sizeof(struct kmem_cache_node);
8f9f8d9e 1084
18004c5d 1085 list_for_each_entry(cachep, &slab_caches, list) {
8f9f8d9e 1086 /*
5f0985bb 1087 * Set up the kmem_cache_node for cpu before we can
8f9f8d9e
DR
1088 * begin anything. Make sure some other cpu on this
1089 * node has not already allocated this
1090 */
6a67368c 1091 if (!cachep->node[node]) {
ce8eb6c4
CL
1092 n = kmalloc_node(memsize, GFP_KERNEL, node);
1093 if (!n)
8f9f8d9e 1094 return -ENOMEM;
ce8eb6c4 1095 kmem_cache_node_init(n);
5f0985bb
JZ
1096 n->next_reap = jiffies + REAPTIMEOUT_NODE +
1097 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
8f9f8d9e
DR
1098
1099 /*
5f0985bb
JZ
1100 * The kmem_cache_nodes don't come and go as CPUs
1101 * come and go. slab_mutex is sufficient
8f9f8d9e
DR
1102 * protection here.
1103 */
ce8eb6c4 1104 cachep->node[node] = n;
8f9f8d9e
DR
1105 }
1106
6a67368c
CL
1107 spin_lock_irq(&cachep->node[node]->list_lock);
1108 cachep->node[node]->free_limit =
8f9f8d9e
DR
1109 (1 + nr_cpus_node(node)) *
1110 cachep->batchcount + cachep->num;
6a67368c 1111 spin_unlock_irq(&cachep->node[node]->list_lock);
8f9f8d9e
DR
1112 }
1113 return 0;
1114}
1115
0fa8103b
WL
1116static inline int slabs_tofree(struct kmem_cache *cachep,
1117 struct kmem_cache_node *n)
1118{
1119 return (n->free_objects + cachep->num - 1) / cachep->num;
1120}
1121
0db0628d 1122static void cpuup_canceled(long cpu)
fbf1e473
AM
1123{
1124 struct kmem_cache *cachep;
ce8eb6c4 1125 struct kmem_cache_node *n = NULL;
7d6e6d09 1126 int node = cpu_to_mem(cpu);
a70f7302 1127 const struct cpumask *mask = cpumask_of_node(node);
fbf1e473 1128
18004c5d 1129 list_for_each_entry(cachep, &slab_caches, list) {
fbf1e473
AM
1130 struct array_cache *nc;
1131 struct array_cache *shared;
1132 struct array_cache **alien;
fbf1e473 1133
fbf1e473
AM
1134 /* cpu is dead; no one can alloc from it. */
1135 nc = cachep->array[cpu];
1136 cachep->array[cpu] = NULL;
ce8eb6c4 1137 n = cachep->node[node];
fbf1e473 1138
ce8eb6c4 1139 if (!n)
fbf1e473
AM
1140 goto free_array_cache;
1141
ce8eb6c4 1142 spin_lock_irq(&n->list_lock);
fbf1e473 1143
ce8eb6c4
CL
1144 /* Free limit for this kmem_cache_node */
1145 n->free_limit -= cachep->batchcount;
fbf1e473
AM
1146 if (nc)
1147 free_block(cachep, nc->entry, nc->avail, node);
1148
58463c1f 1149 if (!cpumask_empty(mask)) {
ce8eb6c4 1150 spin_unlock_irq(&n->list_lock);
fbf1e473
AM
1151 goto free_array_cache;
1152 }
1153
ce8eb6c4 1154 shared = n->shared;
fbf1e473
AM
1155 if (shared) {
1156 free_block(cachep, shared->entry,
1157 shared->avail, node);
ce8eb6c4 1158 n->shared = NULL;
fbf1e473
AM
1159 }
1160
ce8eb6c4
CL
1161 alien = n->alien;
1162 n->alien = NULL;
fbf1e473 1163
ce8eb6c4 1164 spin_unlock_irq(&n->list_lock);
fbf1e473
AM
1165
1166 kfree(shared);
1167 if (alien) {
1168 drain_alien_cache(cachep, alien);
1169 free_alien_cache(alien);
1170 }
1171free_array_cache:
1172 kfree(nc);
1173 }
1174 /*
1175 * In the previous loop, all the objects were freed to
1176 * the respective cache's slabs, now we can go ahead and
1177 * shrink each nodelist to its limit.
1178 */
18004c5d 1179 list_for_each_entry(cachep, &slab_caches, list) {
ce8eb6c4
CL
1180 n = cachep->node[node];
1181 if (!n)
fbf1e473 1182 continue;
0fa8103b 1183 drain_freelist(cachep, n, slabs_tofree(cachep, n));
fbf1e473
AM
1184 }
1185}
1186
0db0628d 1187static int cpuup_prepare(long cpu)
1da177e4 1188{
343e0d7a 1189 struct kmem_cache *cachep;
ce8eb6c4 1190 struct kmem_cache_node *n = NULL;
7d6e6d09 1191 int node = cpu_to_mem(cpu);
8f9f8d9e 1192 int err;
1da177e4 1193
fbf1e473
AM
1194 /*
1195 * We need to do this right in the beginning since
1196 * alloc_arraycache's are going to use this list.
1197 * kmalloc_node allows us to add the slab to the right
ce8eb6c4 1198 * kmem_cache_node and not this cpu's kmem_cache_node
fbf1e473 1199 */
6a67368c 1200 err = init_cache_node_node(node);
8f9f8d9e
DR
1201 if (err < 0)
1202 goto bad;
fbf1e473
AM
1203
1204 /*
1205 * Now we can go ahead with allocating the shared arrays and
1206 * array caches
1207 */
18004c5d 1208 list_for_each_entry(cachep, &slab_caches, list) {
fbf1e473
AM
1209 struct array_cache *nc;
1210 struct array_cache *shared = NULL;
1211 struct array_cache **alien = NULL;
1212
1213 nc = alloc_arraycache(node, cachep->limit,
83b519e8 1214 cachep->batchcount, GFP_KERNEL);
fbf1e473
AM
1215 if (!nc)
1216 goto bad;
1217 if (cachep->shared) {
1218 shared = alloc_arraycache(node,
1219 cachep->shared * cachep->batchcount,
83b519e8 1220 0xbaadf00d, GFP_KERNEL);
12d00f6a
AM
1221 if (!shared) {
1222 kfree(nc);
1da177e4 1223 goto bad;
12d00f6a 1224 }
fbf1e473
AM
1225 }
1226 if (use_alien_caches) {
83b519e8 1227 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
12d00f6a
AM
1228 if (!alien) {
1229 kfree(shared);
1230 kfree(nc);
fbf1e473 1231 goto bad;
12d00f6a 1232 }
fbf1e473
AM
1233 }
1234 cachep->array[cpu] = nc;
ce8eb6c4
CL
1235 n = cachep->node[node];
1236 BUG_ON(!n);
fbf1e473 1237
ce8eb6c4
CL
1238 spin_lock_irq(&n->list_lock);
1239 if (!n->shared) {
fbf1e473
AM
1240 /*
1241 * We are serialised from CPU_DEAD or
1242 * CPU_UP_CANCELLED by the cpucontrol lock
1243 */
ce8eb6c4 1244 n->shared = shared;
fbf1e473
AM
1245 shared = NULL;
1246 }
4484ebf1 1247#ifdef CONFIG_NUMA
ce8eb6c4
CL
1248 if (!n->alien) {
1249 n->alien = alien;
fbf1e473 1250 alien = NULL;
1da177e4 1251 }
fbf1e473 1252#endif
ce8eb6c4 1253 spin_unlock_irq(&n->list_lock);
fbf1e473
AM
1254 kfree(shared);
1255 free_alien_cache(alien);
83835b3d
PZ
1256 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1257 slab_set_debugobj_lock_classes_node(cachep, node);
6ccfb5bc
GC
1258 else if (!OFF_SLAB(cachep) &&
1259 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1260 on_slab_lock_classes_node(cachep, node);
fbf1e473 1261 }
ce79ddc8
PE
1262 init_node_lock_keys(node);
1263
fbf1e473
AM
1264 return 0;
1265bad:
12d00f6a 1266 cpuup_canceled(cpu);
fbf1e473
AM
1267 return -ENOMEM;
1268}
1269
0db0628d 1270static int cpuup_callback(struct notifier_block *nfb,
fbf1e473
AM
1271 unsigned long action, void *hcpu)
1272{
1273 long cpu = (long)hcpu;
1274 int err = 0;
1275
1276 switch (action) {
fbf1e473
AM
1277 case CPU_UP_PREPARE:
1278 case CPU_UP_PREPARE_FROZEN:
18004c5d 1279 mutex_lock(&slab_mutex);
fbf1e473 1280 err = cpuup_prepare(cpu);
18004c5d 1281 mutex_unlock(&slab_mutex);
1da177e4
LT
1282 break;
1283 case CPU_ONLINE:
8bb78442 1284 case CPU_ONLINE_FROZEN:
1da177e4
LT
1285 start_cpu_timer(cpu);
1286 break;
1287#ifdef CONFIG_HOTPLUG_CPU
5830c590 1288 case CPU_DOWN_PREPARE:
8bb78442 1289 case CPU_DOWN_PREPARE_FROZEN:
5830c590 1290 /*
18004c5d 1291 * Shutdown cache reaper. Note that the slab_mutex is
5830c590
CL
1292 * held so that if cache_reap() is invoked it cannot do
1293 * anything expensive but will only modify reap_work
1294 * and reschedule the timer.
1295 */
afe2c511 1296 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
5830c590 1297 /* Now the cache_reaper is guaranteed to be not running. */
1871e52c 1298 per_cpu(slab_reap_work, cpu).work.func = NULL;
5830c590
CL
1299 break;
1300 case CPU_DOWN_FAILED:
8bb78442 1301 case CPU_DOWN_FAILED_FROZEN:
5830c590
CL
1302 start_cpu_timer(cpu);
1303 break;
1da177e4 1304 case CPU_DEAD:
8bb78442 1305 case CPU_DEAD_FROZEN:
4484ebf1
RT
1306 /*
1307 * Even if all the cpus of a node are down, we don't free the
ce8eb6c4 1308 * kmem_cache_node of any cache. This to avoid a race between
4484ebf1 1309 * cpu_down, and a kmalloc allocation from another cpu for
ce8eb6c4 1310 * memory from the node of the cpu going down. The node
4484ebf1
RT
1311 * structure is usually allocated from kmem_cache_create() and
1312 * gets destroyed at kmem_cache_destroy().
1313 */
183ff22b 1314 /* fall through */
8f5be20b 1315#endif
1da177e4 1316 case CPU_UP_CANCELED:
8bb78442 1317 case CPU_UP_CANCELED_FROZEN:
18004c5d 1318 mutex_lock(&slab_mutex);
fbf1e473 1319 cpuup_canceled(cpu);
18004c5d 1320 mutex_unlock(&slab_mutex);
1da177e4 1321 break;
1da177e4 1322 }
eac40680 1323 return notifier_from_errno(err);
1da177e4
LT
1324}
1325
0db0628d 1326static struct notifier_block cpucache_notifier = {
74b85f37
CS
1327 &cpuup_callback, NULL, 0
1328};
1da177e4 1329
8f9f8d9e
DR
1330#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1331/*
1332 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1333 * Returns -EBUSY if all objects cannot be drained so that the node is not
1334 * removed.
1335 *
18004c5d 1336 * Must hold slab_mutex.
8f9f8d9e 1337 */
6a67368c 1338static int __meminit drain_cache_node_node(int node)
8f9f8d9e
DR
1339{
1340 struct kmem_cache *cachep;
1341 int ret = 0;
1342
18004c5d 1343 list_for_each_entry(cachep, &slab_caches, list) {
ce8eb6c4 1344 struct kmem_cache_node *n;
8f9f8d9e 1345
ce8eb6c4
CL
1346 n = cachep->node[node];
1347 if (!n)
8f9f8d9e
DR
1348 continue;
1349
0fa8103b 1350 drain_freelist(cachep, n, slabs_tofree(cachep, n));
8f9f8d9e 1351
ce8eb6c4
CL
1352 if (!list_empty(&n->slabs_full) ||
1353 !list_empty(&n->slabs_partial)) {
8f9f8d9e
DR
1354 ret = -EBUSY;
1355 break;
1356 }
1357 }
1358 return ret;
1359}
1360
1361static int __meminit slab_memory_callback(struct notifier_block *self,
1362 unsigned long action, void *arg)
1363{
1364 struct memory_notify *mnb = arg;
1365 int ret = 0;
1366 int nid;
1367
1368 nid = mnb->status_change_nid;
1369 if (nid < 0)
1370 goto out;
1371
1372 switch (action) {
1373 case MEM_GOING_ONLINE:
18004c5d 1374 mutex_lock(&slab_mutex);
6a67368c 1375 ret = init_cache_node_node(nid);
18004c5d 1376 mutex_unlock(&slab_mutex);
8f9f8d9e
DR
1377 break;
1378 case MEM_GOING_OFFLINE:
18004c5d 1379 mutex_lock(&slab_mutex);
6a67368c 1380 ret = drain_cache_node_node(nid);
18004c5d 1381 mutex_unlock(&slab_mutex);
8f9f8d9e
DR
1382 break;
1383 case MEM_ONLINE:
1384 case MEM_OFFLINE:
1385 case MEM_CANCEL_ONLINE:
1386 case MEM_CANCEL_OFFLINE:
1387 break;
1388 }
1389out:
5fda1bd5 1390 return notifier_from_errno(ret);
8f9f8d9e
DR
1391}
1392#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1393
e498be7d 1394/*
ce8eb6c4 1395 * swap the static kmem_cache_node with kmalloced memory
e498be7d 1396 */
6744f087 1397static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
8f9f8d9e 1398 int nodeid)
e498be7d 1399{
6744f087 1400 struct kmem_cache_node *ptr;
e498be7d 1401
6744f087 1402 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
e498be7d
CL
1403 BUG_ON(!ptr);
1404
6744f087 1405 memcpy(ptr, list, sizeof(struct kmem_cache_node));
2b2d5493
IM
1406 /*
1407 * Do not assume that spinlocks can be initialized via memcpy:
1408 */
1409 spin_lock_init(&ptr->list_lock);
1410
e498be7d 1411 MAKE_ALL_LISTS(cachep, ptr, nodeid);
6a67368c 1412 cachep->node[nodeid] = ptr;
e498be7d
CL
1413}
1414
556a169d 1415/*
ce8eb6c4
CL
1416 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1417 * size of kmem_cache_node.
556a169d 1418 */
ce8eb6c4 1419static void __init set_up_node(struct kmem_cache *cachep, int index)
556a169d
PE
1420{
1421 int node;
1422
1423 for_each_online_node(node) {
ce8eb6c4 1424 cachep->node[node] = &init_kmem_cache_node[index + node];
6a67368c 1425 cachep->node[node]->next_reap = jiffies +
5f0985bb
JZ
1426 REAPTIMEOUT_NODE +
1427 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
556a169d
PE
1428 }
1429}
1430
3c583465
CL
1431/*
1432 * The memory after the last cpu cache pointer is used for the
6a67368c 1433 * the node pointer.
3c583465 1434 */
6a67368c 1435static void setup_node_pointer(struct kmem_cache *cachep)
3c583465 1436{
6a67368c 1437 cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
3c583465
CL
1438}
1439
a737b3e2
AM
1440/*
1441 * Initialisation. Called after the page allocator have been initialised and
1442 * before smp_init().
1da177e4
LT
1443 */
1444void __init kmem_cache_init(void)
1445{
e498be7d
CL
1446 int i;
1447
68126702
JK
1448 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1449 sizeof(struct rcu_head));
9b030cb8 1450 kmem_cache = &kmem_cache_boot;
6a67368c 1451 setup_node_pointer(kmem_cache);
9b030cb8 1452
b6e68bc1 1453 if (num_possible_nodes() == 1)
62918a03
SS
1454 use_alien_caches = 0;
1455
3c583465 1456 for (i = 0; i < NUM_INIT_LISTS; i++)
ce8eb6c4 1457 kmem_cache_node_init(&init_kmem_cache_node[i]);
3c583465 1458
ce8eb6c4 1459 set_up_node(kmem_cache, CACHE_CACHE);
1da177e4
LT
1460
1461 /*
1462 * Fragmentation resistance on low memory - only use bigger
3df1cccd
DR
1463 * page orders on machines with more than 32MB of memory if
1464 * not overridden on the command line.
1da177e4 1465 */
3df1cccd 1466 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
543585cc 1467 slab_max_order = SLAB_MAX_ORDER_HI;
1da177e4 1468
1da177e4
LT
1469 /* Bootstrap is tricky, because several objects are allocated
1470 * from caches that do not exist yet:
9b030cb8
CL
1471 * 1) initialize the kmem_cache cache: it contains the struct
1472 * kmem_cache structures of all caches, except kmem_cache itself:
1473 * kmem_cache is statically allocated.
e498be7d 1474 * Initially an __init data area is used for the head array and the
ce8eb6c4 1475 * kmem_cache_node structures, it's replaced with a kmalloc allocated
e498be7d 1476 * array at the end of the bootstrap.
1da177e4 1477 * 2) Create the first kmalloc cache.
343e0d7a 1478 * The struct kmem_cache for the new cache is allocated normally.
e498be7d
CL
1479 * An __init data area is used for the head array.
1480 * 3) Create the remaining kmalloc caches, with minimally sized
1481 * head arrays.
9b030cb8 1482 * 4) Replace the __init data head arrays for kmem_cache and the first
1da177e4 1483 * kmalloc cache with kmalloc allocated arrays.
ce8eb6c4 1484 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
e498be7d
CL
1485 * the other cache's with kmalloc allocated memory.
1486 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1da177e4
LT
1487 */
1488
9b030cb8 1489 /* 1) create the kmem_cache */
1da177e4 1490
8da3430d 1491 /*
b56efcf0 1492 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
8da3430d 1493 */
2f9baa9f
CL
1494 create_boot_cache(kmem_cache, "kmem_cache",
1495 offsetof(struct kmem_cache, array[nr_cpu_ids]) +
6744f087 1496 nr_node_ids * sizeof(struct kmem_cache_node *),
2f9baa9f
CL
1497 SLAB_HWCACHE_ALIGN);
1498 list_add(&kmem_cache->list, &slab_caches);
1da177e4
LT
1499
1500 /* 2+3) create the kmalloc caches */
1da177e4 1501
a737b3e2
AM
1502 /*
1503 * Initialize the caches that provide memory for the array cache and the
ce8eb6c4 1504 * kmem_cache_node structures first. Without this, further allocations will
a737b3e2 1505 * bug.
e498be7d
CL
1506 */
1507
e3366016
CL
1508 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1509 kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
45530c44 1510
ce8eb6c4
CL
1511 if (INDEX_AC != INDEX_NODE)
1512 kmalloc_caches[INDEX_NODE] =
1513 create_kmalloc_cache("kmalloc-node",
1514 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
e498be7d 1515
e0a42726
IM
1516 slab_early_init = 0;
1517
1da177e4
LT
1518 /* 4) Replace the bootstrap head arrays */
1519 {
2b2d5493 1520 struct array_cache *ptr;
e498be7d 1521
83b519e8 1522 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
e498be7d 1523
9b030cb8 1524 memcpy(ptr, cpu_cache_get(kmem_cache),
b28a02de 1525 sizeof(struct arraycache_init));
2b2d5493
IM
1526 /*
1527 * Do not assume that spinlocks can be initialized via memcpy:
1528 */
1529 spin_lock_init(&ptr->lock);
1530
9b030cb8 1531 kmem_cache->array[smp_processor_id()] = ptr;
e498be7d 1532
83b519e8 1533 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
e498be7d 1534
e3366016 1535 BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
b28a02de 1536 != &initarray_generic.cache);
e3366016 1537 memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
b28a02de 1538 sizeof(struct arraycache_init));
2b2d5493
IM
1539 /*
1540 * Do not assume that spinlocks can be initialized via memcpy:
1541 */
1542 spin_lock_init(&ptr->lock);
1543
e3366016 1544 kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1da177e4 1545 }
ce8eb6c4 1546 /* 5) Replace the bootstrap kmem_cache_node */
e498be7d 1547 {
1ca4cb24
PE
1548 int nid;
1549
9c09a95c 1550 for_each_online_node(nid) {
ce8eb6c4 1551 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
556a169d 1552
e3366016 1553 init_list(kmalloc_caches[INDEX_AC],
ce8eb6c4 1554 &init_kmem_cache_node[SIZE_AC + nid], nid);
e498be7d 1555
ce8eb6c4
CL
1556 if (INDEX_AC != INDEX_NODE) {
1557 init_list(kmalloc_caches[INDEX_NODE],
1558 &init_kmem_cache_node[SIZE_NODE + nid], nid);
e498be7d
CL
1559 }
1560 }
1561 }
1da177e4 1562
f97d5f63 1563 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
8429db5c
PE
1564}
1565
1566void __init kmem_cache_init_late(void)
1567{
1568 struct kmem_cache *cachep;
1569
97d06609 1570 slab_state = UP;
52cef189 1571
8429db5c 1572 /* 6) resize the head arrays to their final sizes */
18004c5d
CL
1573 mutex_lock(&slab_mutex);
1574 list_for_each_entry(cachep, &slab_caches, list)
8429db5c
PE
1575 if (enable_cpucache(cachep, GFP_NOWAIT))
1576 BUG();
18004c5d 1577 mutex_unlock(&slab_mutex);
056c6241 1578
947ca185
MW
1579 /* Annotate slab for lockdep -- annotate the malloc caches */
1580 init_lock_keys();
1581
97d06609
CL
1582 /* Done! */
1583 slab_state = FULL;
1584
a737b3e2
AM
1585 /*
1586 * Register a cpu startup notifier callback that initializes
1587 * cpu_cache_get for all new cpus
1da177e4
LT
1588 */
1589 register_cpu_notifier(&cpucache_notifier);
1da177e4 1590
8f9f8d9e
DR
1591#ifdef CONFIG_NUMA
1592 /*
1593 * Register a memory hotplug callback that initializes and frees
6a67368c 1594 * node.
8f9f8d9e
DR
1595 */
1596 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1597#endif
1598
a737b3e2
AM
1599 /*
1600 * The reap timers are started later, with a module init call: That part
1601 * of the kernel is not yet operational.
1da177e4
LT
1602 */
1603}
1604
1605static int __init cpucache_init(void)
1606{
1607 int cpu;
1608
a737b3e2
AM
1609 /*
1610 * Register the timers that return unneeded pages to the page allocator
1da177e4 1611 */
e498be7d 1612 for_each_online_cpu(cpu)
a737b3e2 1613 start_cpu_timer(cpu);
a164f896
GC
1614
1615 /* Done! */
97d06609 1616 slab_state = FULL;
1da177e4
LT
1617 return 0;
1618}
1da177e4
LT
1619__initcall(cpucache_init);
1620
8bdec192
RA
1621static noinline void
1622slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1623{
ce8eb6c4 1624 struct kmem_cache_node *n;
8456a648 1625 struct page *page;
8bdec192
RA
1626 unsigned long flags;
1627 int node;
1628
1629 printk(KERN_WARNING
1630 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1631 nodeid, gfpflags);
1632 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
3b0efdfa 1633 cachep->name, cachep->size, cachep->gfporder);
8bdec192
RA
1634
1635 for_each_online_node(node) {
1636 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1637 unsigned long active_slabs = 0, num_slabs = 0;
1638
ce8eb6c4
CL
1639 n = cachep->node[node];
1640 if (!n)
8bdec192
RA
1641 continue;
1642
ce8eb6c4 1643 spin_lock_irqsave(&n->list_lock, flags);
8456a648 1644 list_for_each_entry(page, &n->slabs_full, lru) {
8bdec192
RA
1645 active_objs += cachep->num;
1646 active_slabs++;
1647 }
8456a648
JK
1648 list_for_each_entry(page, &n->slabs_partial, lru) {
1649 active_objs += page->active;
8bdec192
RA
1650 active_slabs++;
1651 }
8456a648 1652 list_for_each_entry(page, &n->slabs_free, lru)
8bdec192
RA
1653 num_slabs++;
1654
ce8eb6c4
CL
1655 free_objects += n->free_objects;
1656 spin_unlock_irqrestore(&n->list_lock, flags);
8bdec192
RA
1657
1658 num_slabs += active_slabs;
1659 num_objs = num_slabs * cachep->num;
1660 printk(KERN_WARNING
1661 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1662 node, active_slabs, num_slabs, active_objs, num_objs,
1663 free_objects);
1664 }
1665}
1666
1da177e4
LT
1667/*
1668 * Interface to system's page allocator. No need to hold the cache-lock.
1669 *
1670 * If we requested dmaable memory, we will get it. Even if we
1671 * did not request dmaable memory, we might get it, but that
1672 * would be relatively rare and ignorable.
1673 */
0c3aa83e
JK
1674static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1675 int nodeid)
1da177e4
LT
1676{
1677 struct page *page;
e1b6aa6f 1678 int nr_pages;
765c4507 1679
a618e89f 1680 flags |= cachep->allocflags;
e12ba74d
MG
1681 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1682 flags |= __GFP_RECLAIMABLE;
e1b6aa6f 1683
517d0869 1684 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
8bdec192
RA
1685 if (!page) {
1686 if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1687 slab_out_of_memory(cachep, flags, nodeid);
1da177e4 1688 return NULL;
8bdec192 1689 }
1da177e4 1690
b37f1dd0 1691 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
072bb0aa
MG
1692 if (unlikely(page->pfmemalloc))
1693 pfmemalloc_active = true;
1694
e1b6aa6f 1695 nr_pages = (1 << cachep->gfporder);
1da177e4 1696 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
972d1a7b
CL
1697 add_zone_page_state(page_zone(page),
1698 NR_SLAB_RECLAIMABLE, nr_pages);
1699 else
1700 add_zone_page_state(page_zone(page),
1701 NR_SLAB_UNRECLAIMABLE, nr_pages);
a57a4988
JK
1702 __SetPageSlab(page);
1703 if (page->pfmemalloc)
1704 SetPageSlabPfmemalloc(page);
1f458cbf 1705 memcg_bind_pages(cachep, cachep->gfporder);
072bb0aa 1706
b1eeab67
VN
1707 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1708 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1709
1710 if (cachep->ctor)
1711 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1712 else
1713 kmemcheck_mark_unallocated_pages(page, nr_pages);
1714 }
c175eea4 1715
0c3aa83e 1716 return page;
1da177e4
LT
1717}
1718
1719/*
1720 * Interface to system's page release.
1721 */
0c3aa83e 1722static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1da177e4 1723{
a57a4988 1724 const unsigned long nr_freed = (1 << cachep->gfporder);
1da177e4 1725
b1eeab67 1726 kmemcheck_free_shadow(page, cachep->gfporder);
c175eea4 1727
972d1a7b
CL
1728 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1729 sub_zone_page_state(page_zone(page),
1730 NR_SLAB_RECLAIMABLE, nr_freed);
1731 else
1732 sub_zone_page_state(page_zone(page),
1733 NR_SLAB_UNRECLAIMABLE, nr_freed);
73293c2f 1734
a57a4988 1735 BUG_ON(!PageSlab(page));
73293c2f 1736 __ClearPageSlabPfmemalloc(page);
a57a4988 1737 __ClearPageSlab(page);
8456a648
JK
1738 page_mapcount_reset(page);
1739 page->mapping = NULL;
1f458cbf
GC
1740
1741 memcg_release_pages(cachep, cachep->gfporder);
1da177e4
LT
1742 if (current->reclaim_state)
1743 current->reclaim_state->reclaimed_slab += nr_freed;
0c3aa83e 1744 __free_memcg_kmem_pages(page, cachep->gfporder);
1da177e4
LT
1745}
1746
1747static void kmem_rcu_free(struct rcu_head *head)
1748{
68126702
JK
1749 struct kmem_cache *cachep;
1750 struct page *page;
1da177e4 1751
68126702
JK
1752 page = container_of(head, struct page, rcu_head);
1753 cachep = page->slab_cache;
1754
1755 kmem_freepages(cachep, page);
1da177e4
LT
1756}
1757
1758#if DEBUG
1759
1760#ifdef CONFIG_DEBUG_PAGEALLOC
343e0d7a 1761static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
b28a02de 1762 unsigned long caller)
1da177e4 1763{
8c138bc0 1764 int size = cachep->object_size;
1da177e4 1765
3dafccf2 1766 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1da177e4 1767
b28a02de 1768 if (size < 5 * sizeof(unsigned long))
1da177e4
LT
1769 return;
1770
b28a02de
PE
1771 *addr++ = 0x12345678;
1772 *addr++ = caller;
1773 *addr++ = smp_processor_id();
1774 size -= 3 * sizeof(unsigned long);
1da177e4
LT
1775 {
1776 unsigned long *sptr = &caller;
1777 unsigned long svalue;
1778
1779 while (!kstack_end(sptr)) {
1780 svalue = *sptr++;
1781 if (kernel_text_address(svalue)) {
b28a02de 1782 *addr++ = svalue;
1da177e4
LT
1783 size -= sizeof(unsigned long);
1784 if (size <= sizeof(unsigned long))
1785 break;
1786 }
1787 }
1788
1789 }
b28a02de 1790 *addr++ = 0x87654321;
1da177e4
LT
1791}
1792#endif
1793
343e0d7a 1794static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1da177e4 1795{
8c138bc0 1796 int size = cachep->object_size;
3dafccf2 1797 addr = &((char *)addr)[obj_offset(cachep)];
1da177e4
LT
1798
1799 memset(addr, val, size);
b28a02de 1800 *(unsigned char *)(addr + size - 1) = POISON_END;
1da177e4
LT
1801}
1802
1803static void dump_line(char *data, int offset, int limit)
1804{
1805 int i;
aa83aa40
DJ
1806 unsigned char error = 0;
1807 int bad_count = 0;
1808
fdde6abb 1809 printk(KERN_ERR "%03x: ", offset);
aa83aa40
DJ
1810 for (i = 0; i < limit; i++) {
1811 if (data[offset + i] != POISON_FREE) {
1812 error = data[offset + i];
1813 bad_count++;
1814 }
aa83aa40 1815 }
fdde6abb
SAS
1816 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1817 &data[offset], limit, 1);
aa83aa40
DJ
1818
1819 if (bad_count == 1) {
1820 error ^= POISON_FREE;
1821 if (!(error & (error - 1))) {
1822 printk(KERN_ERR "Single bit error detected. Probably "
1823 "bad RAM.\n");
1824#ifdef CONFIG_X86
1825 printk(KERN_ERR "Run memtest86+ or a similar memory "
1826 "test tool.\n");
1827#else
1828 printk(KERN_ERR "Run a memory test tool.\n");
1829#endif
1830 }
1831 }
1da177e4
LT
1832}
1833#endif
1834
1835#if DEBUG
1836
343e0d7a 1837static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1da177e4
LT
1838{
1839 int i, size;
1840 char *realobj;
1841
1842 if (cachep->flags & SLAB_RED_ZONE) {
b46b8f19 1843 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
a737b3e2
AM
1844 *dbg_redzone1(cachep, objp),
1845 *dbg_redzone2(cachep, objp));
1da177e4
LT
1846 }
1847
1848 if (cachep->flags & SLAB_STORE_USER) {
071361d3
JP
1849 printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1850 *dbg_userword(cachep, objp),
1851 *dbg_userword(cachep, objp));
1da177e4 1852 }
3dafccf2 1853 realobj = (char *)objp + obj_offset(cachep);
8c138bc0 1854 size = cachep->object_size;
b28a02de 1855 for (i = 0; i < size && lines; i += 16, lines--) {
1da177e4
LT
1856 int limit;
1857 limit = 16;
b28a02de
PE
1858 if (i + limit > size)
1859 limit = size - i;
1da177e4
LT
1860 dump_line(realobj, i, limit);
1861 }
1862}
1863
343e0d7a 1864static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1da177e4
LT
1865{
1866 char *realobj;
1867 int size, i;
1868 int lines = 0;
1869
3dafccf2 1870 realobj = (char *)objp + obj_offset(cachep);
8c138bc0 1871 size = cachep->object_size;
1da177e4 1872
b28a02de 1873 for (i = 0; i < size; i++) {
1da177e4 1874 char exp = POISON_FREE;
b28a02de 1875 if (i == size - 1)
1da177e4
LT
1876 exp = POISON_END;
1877 if (realobj[i] != exp) {
1878 int limit;
1879 /* Mismatch ! */
1880 /* Print header */
1881 if (lines == 0) {
b28a02de 1882 printk(KERN_ERR
face37f5
DJ
1883 "Slab corruption (%s): %s start=%p, len=%d\n",
1884 print_tainted(), cachep->name, realobj, size);
1da177e4
LT
1885 print_objinfo(cachep, objp, 0);
1886 }
1887 /* Hexdump the affected line */
b28a02de 1888 i = (i / 16) * 16;
1da177e4 1889 limit = 16;
b28a02de
PE
1890 if (i + limit > size)
1891 limit = size - i;
1da177e4
LT
1892 dump_line(realobj, i, limit);
1893 i += 16;
1894 lines++;
1895 /* Limit to 5 lines */
1896 if (lines > 5)
1897 break;
1898 }
1899 }
1900 if (lines != 0) {
1901 /* Print some data about the neighboring objects, if they
1902 * exist:
1903 */
8456a648 1904 struct page *page = virt_to_head_page(objp);
8fea4e96 1905 unsigned int objnr;
1da177e4 1906
8456a648 1907 objnr = obj_to_index(cachep, page, objp);
1da177e4 1908 if (objnr) {
8456a648 1909 objp = index_to_obj(cachep, page, objnr - 1);
3dafccf2 1910 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1911 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
b28a02de 1912 realobj, size);
1da177e4
LT
1913 print_objinfo(cachep, objp, 2);
1914 }
b28a02de 1915 if (objnr + 1 < cachep->num) {
8456a648 1916 objp = index_to_obj(cachep, page, objnr + 1);
3dafccf2 1917 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1918 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
b28a02de 1919 realobj, size);
1da177e4
LT
1920 print_objinfo(cachep, objp, 2);
1921 }
1922 }
1923}
1924#endif
1925
12dd36fa 1926#if DEBUG
8456a648
JK
1927static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1928 struct page *page)
1da177e4 1929{
1da177e4
LT
1930 int i;
1931 for (i = 0; i < cachep->num; i++) {
8456a648 1932 void *objp = index_to_obj(cachep, page, i);
1da177e4
LT
1933
1934 if (cachep->flags & SLAB_POISON) {
1935#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 1936 if (cachep->size % PAGE_SIZE == 0 &&
a737b3e2 1937 OFF_SLAB(cachep))
b28a02de 1938 kernel_map_pages(virt_to_page(objp),
3b0efdfa 1939 cachep->size / PAGE_SIZE, 1);
1da177e4
LT
1940 else
1941 check_poison_obj(cachep, objp);
1942#else
1943 check_poison_obj(cachep, objp);
1944#endif
1945 }
1946 if (cachep->flags & SLAB_RED_ZONE) {
1947 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1948 slab_error(cachep, "start of a freed object "
b28a02de 1949 "was overwritten");
1da177e4
LT
1950 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1951 slab_error(cachep, "end of a freed object "
b28a02de 1952 "was overwritten");
1da177e4 1953 }
1da177e4 1954 }
12dd36fa 1955}
1da177e4 1956#else
8456a648
JK
1957static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1958 struct page *page)
12dd36fa 1959{
12dd36fa 1960}
1da177e4
LT
1961#endif
1962
911851e6
RD
1963/**
1964 * slab_destroy - destroy and release all objects in a slab
1965 * @cachep: cache pointer being destroyed
cb8ee1a3 1966 * @page: page pointer being destroyed
911851e6 1967 *
12dd36fa 1968 * Destroy all the objs in a slab, and release the mem back to the system.
a737b3e2
AM
1969 * Before calling the slab must have been unlinked from the cache. The
1970 * cache-lock is not held/needed.
12dd36fa 1971 */
8456a648 1972static void slab_destroy(struct kmem_cache *cachep, struct page *page)
12dd36fa 1973{
7e007355 1974 void *freelist;
12dd36fa 1975
8456a648
JK
1976 freelist = page->freelist;
1977 slab_destroy_debugcheck(cachep, page);
1da177e4 1978 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
68126702
JK
1979 struct rcu_head *head;
1980
1981 /*
1982 * RCU free overloads the RCU head over the LRU.
1983 * slab_page has been overloeaded over the LRU,
1984 * however it is not used from now on so that
1985 * we can use it safely.
1986 */
1987 head = (void *)&page->rcu_head;
1988 call_rcu(head, kmem_rcu_free);
1da177e4 1989
1da177e4 1990 } else {
0c3aa83e 1991 kmem_freepages(cachep, page);
1da177e4 1992 }
68126702
JK
1993
1994 /*
8456a648 1995 * From now on, we don't use freelist
68126702
JK
1996 * although actual page can be freed in rcu context
1997 */
1998 if (OFF_SLAB(cachep))
8456a648 1999 kmem_cache_free(cachep->freelist_cache, freelist);
1da177e4
LT
2000}
2001
4d268eba 2002/**
a70773dd
RD
2003 * calculate_slab_order - calculate size (page order) of slabs
2004 * @cachep: pointer to the cache that is being created
2005 * @size: size of objects to be created in this cache.
2006 * @align: required alignment for the objects.
2007 * @flags: slab allocation flags
2008 *
2009 * Also calculates the number of objects per slab.
4d268eba
PE
2010 *
2011 * This could be made much more intelligent. For now, try to avoid using
2012 * high order pages for slabs. When the gfp() functions are more friendly
2013 * towards high-order requests, this should be changed.
2014 */
a737b3e2 2015static size_t calculate_slab_order(struct kmem_cache *cachep,
ee13d785 2016 size_t size, size_t align, unsigned long flags)
4d268eba 2017{
b1ab41c4 2018 unsigned long offslab_limit;
4d268eba 2019 size_t left_over = 0;
9888e6fa 2020 int gfporder;
4d268eba 2021
0aa817f0 2022 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
4d268eba
PE
2023 unsigned int num;
2024 size_t remainder;
2025
9888e6fa 2026 cache_estimate(gfporder, size, align, flags, &remainder, &num);
4d268eba
PE
2027 if (!num)
2028 continue;
9888e6fa 2029
f315e3fa
JK
2030 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
2031 if (num > SLAB_OBJ_MAX_NUM)
2032 break;
2033
b1ab41c4
IM
2034 if (flags & CFLGS_OFF_SLAB) {
2035 /*
2036 * Max number of objs-per-slab for caches which
2037 * use off-slab slabs. Needed to avoid a possible
2038 * looping condition in cache_grow().
2039 */
8456a648 2040 offslab_limit = size;
a41adfaa 2041 offslab_limit /= sizeof(freelist_idx_t);
b1ab41c4
IM
2042
2043 if (num > offslab_limit)
2044 break;
2045 }
4d268eba 2046
9888e6fa 2047 /* Found something acceptable - save it away */
4d268eba 2048 cachep->num = num;
9888e6fa 2049 cachep->gfporder = gfporder;
4d268eba
PE
2050 left_over = remainder;
2051
f78bb8ad
LT
2052 /*
2053 * A VFS-reclaimable slab tends to have most allocations
2054 * as GFP_NOFS and we really don't want to have to be allocating
2055 * higher-order pages when we are unable to shrink dcache.
2056 */
2057 if (flags & SLAB_RECLAIM_ACCOUNT)
2058 break;
2059
4d268eba
PE
2060 /*
2061 * Large number of objects is good, but very large slabs are
2062 * currently bad for the gfp()s.
2063 */
543585cc 2064 if (gfporder >= slab_max_order)
4d268eba
PE
2065 break;
2066
9888e6fa
LT
2067 /*
2068 * Acceptable internal fragmentation?
2069 */
a737b3e2 2070 if (left_over * 8 <= (PAGE_SIZE << gfporder))
4d268eba
PE
2071 break;
2072 }
2073 return left_over;
2074}
2075
83b519e8 2076static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
f30cf7d1 2077{
97d06609 2078 if (slab_state >= FULL)
83b519e8 2079 return enable_cpucache(cachep, gfp);
2ed3a4ef 2080
97d06609 2081 if (slab_state == DOWN) {
f30cf7d1 2082 /*
2f9baa9f 2083 * Note: Creation of first cache (kmem_cache).
ce8eb6c4 2084 * The setup_node is taken care
2f9baa9f
CL
2085 * of by the caller of __kmem_cache_create
2086 */
2087 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2088 slab_state = PARTIAL;
2089 } else if (slab_state == PARTIAL) {
2090 /*
2091 * Note: the second kmem_cache_create must create the cache
f30cf7d1
PE
2092 * that's used by kmalloc(24), otherwise the creation of
2093 * further caches will BUG().
2094 */
2095 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2096
2097 /*
ce8eb6c4
CL
2098 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2099 * the second cache, then we need to set up all its node/,
f30cf7d1
PE
2100 * otherwise the creation of further caches will BUG().
2101 */
ce8eb6c4
CL
2102 set_up_node(cachep, SIZE_AC);
2103 if (INDEX_AC == INDEX_NODE)
2104 slab_state = PARTIAL_NODE;
f30cf7d1 2105 else
97d06609 2106 slab_state = PARTIAL_ARRAYCACHE;
f30cf7d1 2107 } else {
2f9baa9f 2108 /* Remaining boot caches */
f30cf7d1 2109 cachep->array[smp_processor_id()] =
83b519e8 2110 kmalloc(sizeof(struct arraycache_init), gfp);
f30cf7d1 2111
97d06609 2112 if (slab_state == PARTIAL_ARRAYCACHE) {
ce8eb6c4
CL
2113 set_up_node(cachep, SIZE_NODE);
2114 slab_state = PARTIAL_NODE;
f30cf7d1
PE
2115 } else {
2116 int node;
556a169d 2117 for_each_online_node(node) {
6a67368c 2118 cachep->node[node] =
6744f087 2119 kmalloc_node(sizeof(struct kmem_cache_node),
eb91f1d0 2120 gfp, node);
6a67368c 2121 BUG_ON(!cachep->node[node]);
ce8eb6c4 2122 kmem_cache_node_init(cachep->node[node]);
f30cf7d1
PE
2123 }
2124 }
2125 }
6a67368c 2126 cachep->node[numa_mem_id()]->next_reap =
5f0985bb
JZ
2127 jiffies + REAPTIMEOUT_NODE +
2128 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
f30cf7d1
PE
2129
2130 cpu_cache_get(cachep)->avail = 0;
2131 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2132 cpu_cache_get(cachep)->batchcount = 1;
2133 cpu_cache_get(cachep)->touched = 0;
2134 cachep->batchcount = 1;
2135 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2ed3a4ef 2136 return 0;
f30cf7d1
PE
2137}
2138
1da177e4 2139/**
039363f3 2140 * __kmem_cache_create - Create a cache.
a755b76a 2141 * @cachep: cache management descriptor
1da177e4 2142 * @flags: SLAB flags
1da177e4
LT
2143 *
2144 * Returns a ptr to the cache on success, NULL on failure.
2145 * Cannot be called within a int, but can be interrupted.
20c2df83 2146 * The @ctor is run when new pages are allocated by the cache.
1da177e4 2147 *
1da177e4
LT
2148 * The flags are
2149 *
2150 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2151 * to catch references to uninitialised memory.
2152 *
2153 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2154 * for buffer overruns.
2155 *
1da177e4
LT
2156 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2157 * cacheline. This can be beneficial if you're counting cycles as closely
2158 * as davem.
2159 */
278b1bb1 2160int
8a13a4cc 2161__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
1da177e4 2162{
8456a648 2163 size_t left_over, freelist_size, ralign;
83b519e8 2164 gfp_t gfp;
278b1bb1 2165 int err;
8a13a4cc 2166 size_t size = cachep->size;
1da177e4 2167
1da177e4 2168#if DEBUG
1da177e4
LT
2169#if FORCED_DEBUG
2170 /*
2171 * Enable redzoning and last user accounting, except for caches with
2172 * large objects, if the increased size would increase the object size
2173 * above the next power of two: caches with object sizes just above a
2174 * power of two have a significant amount of internal fragmentation.
2175 */
87a927c7
DW
2176 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2177 2 * sizeof(unsigned long long)))
b28a02de 2178 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1da177e4
LT
2179 if (!(flags & SLAB_DESTROY_BY_RCU))
2180 flags |= SLAB_POISON;
2181#endif
2182 if (flags & SLAB_DESTROY_BY_RCU)
2183 BUG_ON(flags & SLAB_POISON);
2184#endif
1da177e4 2185
a737b3e2
AM
2186 /*
2187 * Check that size is in terms of words. This is needed to avoid
1da177e4
LT
2188 * unaligned accesses for some archs when redzoning is used, and makes
2189 * sure any on-slab bufctl's are also correctly aligned.
2190 */
b28a02de
PE
2191 if (size & (BYTES_PER_WORD - 1)) {
2192 size += (BYTES_PER_WORD - 1);
2193 size &= ~(BYTES_PER_WORD - 1);
1da177e4
LT
2194 }
2195
ca5f9703 2196 /*
87a927c7
DW
2197 * Redzoning and user store require word alignment or possibly larger.
2198 * Note this will be overridden by architecture or caller mandated
2199 * alignment if either is greater than BYTES_PER_WORD.
ca5f9703 2200 */
87a927c7
DW
2201 if (flags & SLAB_STORE_USER)
2202 ralign = BYTES_PER_WORD;
2203
2204 if (flags & SLAB_RED_ZONE) {
2205 ralign = REDZONE_ALIGN;
2206 /* If redzoning, ensure that the second redzone is suitably
2207 * aligned, by adjusting the object size accordingly. */
2208 size += REDZONE_ALIGN - 1;
2209 size &= ~(REDZONE_ALIGN - 1);
2210 }
ca5f9703 2211
a44b56d3 2212 /* 3) caller mandated alignment */
8a13a4cc
CL
2213 if (ralign < cachep->align) {
2214 ralign = cachep->align;
1da177e4 2215 }
3ff84a7f
PE
2216 /* disable debug if necessary */
2217 if (ralign > __alignof__(unsigned long long))
a44b56d3 2218 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
a737b3e2 2219 /*
ca5f9703 2220 * 4) Store it.
1da177e4 2221 */
8a13a4cc 2222 cachep->align = ralign;
1da177e4 2223
83b519e8
PE
2224 if (slab_is_available())
2225 gfp = GFP_KERNEL;
2226 else
2227 gfp = GFP_NOWAIT;
2228
6a67368c 2229 setup_node_pointer(cachep);
1da177e4 2230#if DEBUG
1da177e4 2231
ca5f9703
PE
2232 /*
2233 * Both debugging options require word-alignment which is calculated
2234 * into align above.
2235 */
1da177e4 2236 if (flags & SLAB_RED_ZONE) {
1da177e4 2237 /* add space for red zone words */
3ff84a7f
PE
2238 cachep->obj_offset += sizeof(unsigned long long);
2239 size += 2 * sizeof(unsigned long long);
1da177e4
LT
2240 }
2241 if (flags & SLAB_STORE_USER) {
ca5f9703 2242 /* user store requires one word storage behind the end of
87a927c7
DW
2243 * the real object. But if the second red zone needs to be
2244 * aligned to 64 bits, we must allow that much space.
1da177e4 2245 */
87a927c7
DW
2246 if (flags & SLAB_RED_ZONE)
2247 size += REDZONE_ALIGN;
2248 else
2249 size += BYTES_PER_WORD;
1da177e4
LT
2250 }
2251#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
ce8eb6c4 2252 if (size >= kmalloc_size(INDEX_NODE + 1)
608da7e3
TH
2253 && cachep->object_size > cache_line_size()
2254 && ALIGN(size, cachep->align) < PAGE_SIZE) {
2255 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
1da177e4
LT
2256 size = PAGE_SIZE;
2257 }
2258#endif
2259#endif
2260
e0a42726
IM
2261 /*
2262 * Determine if the slab management is 'on' or 'off' slab.
2263 * (bootstrapping cannot cope with offslab caches so don't do
e7cb55b9
CM
2264 * it too early on. Always use on-slab management when
2265 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
e0a42726 2266 */
8fc9cf42 2267 if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
e7cb55b9 2268 !(flags & SLAB_NOLEAKTRACE))
1da177e4
LT
2269 /*
2270 * Size is large, assume best to place the slab management obj
2271 * off-slab (should allow better packing of objs).
2272 */
2273 flags |= CFLGS_OFF_SLAB;
2274
8a13a4cc 2275 size = ALIGN(size, cachep->align);
f315e3fa
JK
2276 /*
2277 * We should restrict the number of objects in a slab to implement
2278 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2279 */
2280 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2281 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
1da177e4 2282
8a13a4cc 2283 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
1da177e4 2284
8a13a4cc 2285 if (!cachep->num)
278b1bb1 2286 return -E2BIG;
1da177e4 2287
8456a648 2288 freelist_size =
a41adfaa 2289 ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
1da177e4
LT
2290
2291 /*
2292 * If the slab has been placed off-slab, and we have enough space then
2293 * move it on-slab. This is at the expense of any extra colouring.
2294 */
8456a648 2295 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
1da177e4 2296 flags &= ~CFLGS_OFF_SLAB;
8456a648 2297 left_over -= freelist_size;
1da177e4
LT
2298 }
2299
2300 if (flags & CFLGS_OFF_SLAB) {
2301 /* really off slab. No need for manual alignment */
a41adfaa 2302 freelist_size = cachep->num * sizeof(freelist_idx_t);
67461365
RL
2303
2304#ifdef CONFIG_PAGE_POISONING
2305 /* If we're going to use the generic kernel_map_pages()
2306 * poisoning, then it's going to smash the contents of
2307 * the redzone and userword anyhow, so switch them off.
2308 */
2309 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2310 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2311#endif
1da177e4
LT
2312 }
2313
2314 cachep->colour_off = cache_line_size();
2315 /* Offset must be a multiple of the alignment. */
8a13a4cc
CL
2316 if (cachep->colour_off < cachep->align)
2317 cachep->colour_off = cachep->align;
b28a02de 2318 cachep->colour = left_over / cachep->colour_off;
8456a648 2319 cachep->freelist_size = freelist_size;
1da177e4 2320 cachep->flags = flags;
a57a4988 2321 cachep->allocflags = __GFP_COMP;
4b51d669 2322 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
a618e89f 2323 cachep->allocflags |= GFP_DMA;
3b0efdfa 2324 cachep->size = size;
6a2d7a95 2325 cachep->reciprocal_buffer_size = reciprocal_value(size);
1da177e4 2326
e5ac9c5a 2327 if (flags & CFLGS_OFF_SLAB) {
8456a648 2328 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
e5ac9c5a 2329 /*
5f0985bb 2330 * This is a possibility for one of the kmalloc_{dma,}_caches.
e5ac9c5a 2331 * But since we go off slab only for object size greater than
5f0985bb
JZ
2332 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2333 * in ascending order,this should not happen at all.
e5ac9c5a
RT
2334 * But leave a BUG_ON for some lucky dude.
2335 */
8456a648 2336 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
e5ac9c5a 2337 }
1da177e4 2338
278b1bb1
CL
2339 err = setup_cpu_cache(cachep, gfp);
2340 if (err) {
12c3667f 2341 __kmem_cache_shutdown(cachep);
278b1bb1 2342 return err;
2ed3a4ef 2343 }
1da177e4 2344
83835b3d
PZ
2345 if (flags & SLAB_DEBUG_OBJECTS) {
2346 /*
2347 * Would deadlock through slab_destroy()->call_rcu()->
2348 * debug_object_activate()->kmem_cache_alloc().
2349 */
2350 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2351
2352 slab_set_debugobj_lock_classes(cachep);
6ccfb5bc
GC
2353 } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2354 on_slab_lock_classes(cachep);
83835b3d 2355
278b1bb1 2356 return 0;
1da177e4 2357}
1da177e4
LT
2358
2359#if DEBUG
2360static void check_irq_off(void)
2361{
2362 BUG_ON(!irqs_disabled());
2363}
2364
2365static void check_irq_on(void)
2366{
2367 BUG_ON(irqs_disabled());
2368}
2369
343e0d7a 2370static void check_spinlock_acquired(struct kmem_cache *cachep)
1da177e4
LT
2371{
2372#ifdef CONFIG_SMP
2373 check_irq_off();
6a67368c 2374 assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
1da177e4
LT
2375#endif
2376}
e498be7d 2377
343e0d7a 2378static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
e498be7d
CL
2379{
2380#ifdef CONFIG_SMP
2381 check_irq_off();
6a67368c 2382 assert_spin_locked(&cachep->node[node]->list_lock);
e498be7d
CL
2383#endif
2384}
2385
1da177e4
LT
2386#else
2387#define check_irq_off() do { } while(0)
2388#define check_irq_on() do { } while(0)
2389#define check_spinlock_acquired(x) do { } while(0)
e498be7d 2390#define check_spinlock_acquired_node(x, y) do { } while(0)
1da177e4
LT
2391#endif
2392
ce8eb6c4 2393static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
aab2207c
CL
2394 struct array_cache *ac,
2395 int force, int node);
2396
1da177e4
LT
2397static void do_drain(void *arg)
2398{
a737b3e2 2399 struct kmem_cache *cachep = arg;
1da177e4 2400 struct array_cache *ac;
7d6e6d09 2401 int node = numa_mem_id();
1da177e4
LT
2402
2403 check_irq_off();
9a2dba4b 2404 ac = cpu_cache_get(cachep);
6a67368c 2405 spin_lock(&cachep->node[node]->list_lock);
ff69416e 2406 free_block(cachep, ac->entry, ac->avail, node);
6a67368c 2407 spin_unlock(&cachep->node[node]->list_lock);
1da177e4
LT
2408 ac->avail = 0;
2409}
2410
343e0d7a 2411static void drain_cpu_caches(struct kmem_cache *cachep)
1da177e4 2412{
ce8eb6c4 2413 struct kmem_cache_node *n;
e498be7d
CL
2414 int node;
2415
15c8b6c1 2416 on_each_cpu(do_drain, cachep, 1);
1da177e4 2417 check_irq_on();
b28a02de 2418 for_each_online_node(node) {
ce8eb6c4
CL
2419 n = cachep->node[node];
2420 if (n && n->alien)
2421 drain_alien_cache(cachep, n->alien);
a4523a8b
RD
2422 }
2423
2424 for_each_online_node(node) {
ce8eb6c4
CL
2425 n = cachep->node[node];
2426 if (n)
2427 drain_array(cachep, n, n->shared, 1, node);
e498be7d 2428 }
1da177e4
LT
2429}
2430
ed11d9eb
CL
2431/*
2432 * Remove slabs from the list of free slabs.
2433 * Specify the number of slabs to drain in tofree.
2434 *
2435 * Returns the actual number of slabs released.
2436 */
2437static int drain_freelist(struct kmem_cache *cache,
ce8eb6c4 2438 struct kmem_cache_node *n, int tofree)
1da177e4 2439{
ed11d9eb
CL
2440 struct list_head *p;
2441 int nr_freed;
8456a648 2442 struct page *page;
1da177e4 2443
ed11d9eb 2444 nr_freed = 0;
ce8eb6c4 2445 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
1da177e4 2446
ce8eb6c4
CL
2447 spin_lock_irq(&n->list_lock);
2448 p = n->slabs_free.prev;
2449 if (p == &n->slabs_free) {
2450 spin_unlock_irq(&n->list_lock);
ed11d9eb
CL
2451 goto out;
2452 }
1da177e4 2453
8456a648 2454 page = list_entry(p, struct page, lru);
1da177e4 2455#if DEBUG
8456a648 2456 BUG_ON(page->active);
1da177e4 2457#endif
8456a648 2458 list_del(&page->lru);
ed11d9eb
CL
2459 /*
2460 * Safe to drop the lock. The slab is no longer linked
2461 * to the cache.
2462 */
ce8eb6c4
CL
2463 n->free_objects -= cache->num;
2464 spin_unlock_irq(&n->list_lock);
8456a648 2465 slab_destroy(cache, page);
ed11d9eb 2466 nr_freed++;
1da177e4 2467 }
ed11d9eb
CL
2468out:
2469 return nr_freed;
1da177e4
LT
2470}
2471
18004c5d 2472/* Called with slab_mutex held to protect against cpu hotplug */
343e0d7a 2473static int __cache_shrink(struct kmem_cache *cachep)
e498be7d
CL
2474{
2475 int ret = 0, i = 0;
ce8eb6c4 2476 struct kmem_cache_node *n;
e498be7d
CL
2477
2478 drain_cpu_caches(cachep);
2479
2480 check_irq_on();
2481 for_each_online_node(i) {
ce8eb6c4
CL
2482 n = cachep->node[i];
2483 if (!n)
ed11d9eb
CL
2484 continue;
2485
0fa8103b 2486 drain_freelist(cachep, n, slabs_tofree(cachep, n));
ed11d9eb 2487
ce8eb6c4
CL
2488 ret += !list_empty(&n->slabs_full) ||
2489 !list_empty(&n->slabs_partial);
e498be7d
CL
2490 }
2491 return (ret ? 1 : 0);
2492}
2493
1da177e4
LT
2494/**
2495 * kmem_cache_shrink - Shrink a cache.
2496 * @cachep: The cache to shrink.
2497 *
2498 * Releases as many slabs as possible for a cache.
2499 * To help debugging, a zero exit status indicates all slabs were released.
2500 */
343e0d7a 2501int kmem_cache_shrink(struct kmem_cache *cachep)
1da177e4 2502{
8f5be20b 2503 int ret;
40094fa6 2504 BUG_ON(!cachep || in_interrupt());
1da177e4 2505
95402b38 2506 get_online_cpus();
18004c5d 2507 mutex_lock(&slab_mutex);
8f5be20b 2508 ret = __cache_shrink(cachep);
18004c5d 2509 mutex_unlock(&slab_mutex);
95402b38 2510 put_online_cpus();
8f5be20b 2511 return ret;
1da177e4
LT
2512}
2513EXPORT_SYMBOL(kmem_cache_shrink);
2514
945cf2b6 2515int __kmem_cache_shutdown(struct kmem_cache *cachep)
1da177e4 2516{
12c3667f 2517 int i;
ce8eb6c4 2518 struct kmem_cache_node *n;
12c3667f 2519 int rc = __cache_shrink(cachep);
1da177e4 2520
12c3667f
CL
2521 if (rc)
2522 return rc;
1da177e4 2523
12c3667f
CL
2524 for_each_online_cpu(i)
2525 kfree(cachep->array[i]);
1da177e4 2526
ce8eb6c4 2527 /* NUMA: free the node structures */
12c3667f 2528 for_each_online_node(i) {
ce8eb6c4
CL
2529 n = cachep->node[i];
2530 if (n) {
2531 kfree(n->shared);
2532 free_alien_cache(n->alien);
2533 kfree(n);
12c3667f
CL
2534 }
2535 }
2536 return 0;
1da177e4 2537}
1da177e4 2538
e5ac9c5a
RT
2539/*
2540 * Get the memory for a slab management obj.
5f0985bb
JZ
2541 *
2542 * For a slab cache when the slab descriptor is off-slab, the
2543 * slab descriptor can't come from the same cache which is being created,
2544 * Because if it is the case, that means we defer the creation of
2545 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2546 * And we eventually call down to __kmem_cache_create(), which
2547 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2548 * This is a "chicken-and-egg" problem.
2549 *
2550 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2551 * which are all initialized during kmem_cache_init().
e5ac9c5a 2552 */
7e007355 2553static void *alloc_slabmgmt(struct kmem_cache *cachep,
0c3aa83e
JK
2554 struct page *page, int colour_off,
2555 gfp_t local_flags, int nodeid)
1da177e4 2556{
7e007355 2557 void *freelist;
0c3aa83e 2558 void *addr = page_address(page);
b28a02de 2559
1da177e4
LT
2560 if (OFF_SLAB(cachep)) {
2561 /* Slab management obj is off-slab. */
8456a648 2562 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
8759ec50 2563 local_flags, nodeid);
8456a648 2564 if (!freelist)
1da177e4
LT
2565 return NULL;
2566 } else {
8456a648
JK
2567 freelist = addr + colour_off;
2568 colour_off += cachep->freelist_size;
1da177e4 2569 }
8456a648
JK
2570 page->active = 0;
2571 page->s_mem = addr + colour_off;
2572 return freelist;
1da177e4
LT
2573}
2574
7cc68973 2575static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
1da177e4 2576{
a41adfaa 2577 return ((freelist_idx_t *)page->freelist)[idx];
e5c58dfd
JK
2578}
2579
2580static inline void set_free_obj(struct page *page,
7cc68973 2581 unsigned int idx, freelist_idx_t val)
e5c58dfd 2582{
a41adfaa 2583 ((freelist_idx_t *)(page->freelist))[idx] = val;
1da177e4
LT
2584}
2585
343e0d7a 2586static void cache_init_objs(struct kmem_cache *cachep,
8456a648 2587 struct page *page)
1da177e4
LT
2588{
2589 int i;
2590
2591 for (i = 0; i < cachep->num; i++) {
8456a648 2592 void *objp = index_to_obj(cachep, page, i);
1da177e4
LT
2593#if DEBUG
2594 /* need to poison the objs? */
2595 if (cachep->flags & SLAB_POISON)
2596 poison_obj(cachep, objp, POISON_FREE);
2597 if (cachep->flags & SLAB_STORE_USER)
2598 *dbg_userword(cachep, objp) = NULL;
2599
2600 if (cachep->flags & SLAB_RED_ZONE) {
2601 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2602 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2603 }
2604 /*
a737b3e2
AM
2605 * Constructors are not allowed to allocate memory from the same
2606 * cache which they are a constructor for. Otherwise, deadlock.
2607 * They must also be threaded.
1da177e4
LT
2608 */
2609 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
51cc5068 2610 cachep->ctor(objp + obj_offset(cachep));
1da177e4
LT
2611
2612 if (cachep->flags & SLAB_RED_ZONE) {
2613 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2614 slab_error(cachep, "constructor overwrote the"
b28a02de 2615 " end of an object");
1da177e4
LT
2616 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2617 slab_error(cachep, "constructor overwrote the"
b28a02de 2618 " start of an object");
1da177e4 2619 }
3b0efdfa 2620 if ((cachep->size % PAGE_SIZE) == 0 &&
a737b3e2 2621 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
b28a02de 2622 kernel_map_pages(virt_to_page(objp),
3b0efdfa 2623 cachep->size / PAGE_SIZE, 0);
1da177e4
LT
2624#else
2625 if (cachep->ctor)
51cc5068 2626 cachep->ctor(objp);
1da177e4 2627#endif
e5c58dfd 2628 set_free_obj(page, i, i);
1da177e4 2629 }
1da177e4
LT
2630}
2631
343e0d7a 2632static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2633{
4b51d669
CL
2634 if (CONFIG_ZONE_DMA_FLAG) {
2635 if (flags & GFP_DMA)
a618e89f 2636 BUG_ON(!(cachep->allocflags & GFP_DMA));
4b51d669 2637 else
a618e89f 2638 BUG_ON(cachep->allocflags & GFP_DMA);
4b51d669 2639 }
1da177e4
LT
2640}
2641
8456a648 2642static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
a737b3e2 2643 int nodeid)
78d382d7 2644{
b1cb0982 2645 void *objp;
78d382d7 2646
e5c58dfd 2647 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
8456a648 2648 page->active++;
78d382d7 2649#if DEBUG
1ea991b0 2650 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
78d382d7 2651#endif
78d382d7
MD
2652
2653 return objp;
2654}
2655
8456a648 2656static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
a737b3e2 2657 void *objp, int nodeid)
78d382d7 2658{
8456a648 2659 unsigned int objnr = obj_to_index(cachep, page, objp);
78d382d7 2660#if DEBUG
16025177 2661 unsigned int i;
b1cb0982 2662
78d382d7 2663 /* Verify that the slab belongs to the intended node */
1ea991b0 2664 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
78d382d7 2665
b1cb0982 2666 /* Verify double free bug */
8456a648 2667 for (i = page->active; i < cachep->num; i++) {
e5c58dfd 2668 if (get_free_obj(page, i) == objnr) {
b1cb0982
JK
2669 printk(KERN_ERR "slab: double free detected in cache "
2670 "'%s', objp %p\n", cachep->name, objp);
2671 BUG();
2672 }
78d382d7
MD
2673 }
2674#endif
8456a648 2675 page->active--;
e5c58dfd 2676 set_free_obj(page, page->active, objnr);
78d382d7
MD
2677}
2678
4776874f
PE
2679/*
2680 * Map pages beginning at addr to the given cache and slab. This is required
2681 * for the slab allocator to be able to lookup the cache and slab of a
ccd35fb9 2682 * virtual address for kfree, ksize, and slab debugging.
4776874f 2683 */
8456a648 2684static void slab_map_pages(struct kmem_cache *cache, struct page *page,
7e007355 2685 void *freelist)
1da177e4 2686{
a57a4988 2687 page->slab_cache = cache;
8456a648 2688 page->freelist = freelist;
1da177e4
LT
2689}
2690
2691/*
2692 * Grow (by 1) the number of slabs within a cache. This is called by
2693 * kmem_cache_alloc() when there are no active objs left in a cache.
2694 */
3c517a61 2695static int cache_grow(struct kmem_cache *cachep,
0c3aa83e 2696 gfp_t flags, int nodeid, struct page *page)
1da177e4 2697{
7e007355 2698 void *freelist;
b28a02de
PE
2699 size_t offset;
2700 gfp_t local_flags;
ce8eb6c4 2701 struct kmem_cache_node *n;
1da177e4 2702
a737b3e2
AM
2703 /*
2704 * Be lazy and only check for valid flags here, keeping it out of the
2705 * critical path in kmem_cache_alloc().
1da177e4 2706 */
6cb06229
CL
2707 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2708 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
1da177e4 2709
ce8eb6c4 2710 /* Take the node list lock to change the colour_next on this node */
1da177e4 2711 check_irq_off();
ce8eb6c4
CL
2712 n = cachep->node[nodeid];
2713 spin_lock(&n->list_lock);
1da177e4
LT
2714
2715 /* Get colour for the slab, and cal the next value. */
ce8eb6c4
CL
2716 offset = n->colour_next;
2717 n->colour_next++;
2718 if (n->colour_next >= cachep->colour)
2719 n->colour_next = 0;
2720 spin_unlock(&n->list_lock);
1da177e4 2721
2e1217cf 2722 offset *= cachep->colour_off;
1da177e4
LT
2723
2724 if (local_flags & __GFP_WAIT)
2725 local_irq_enable();
2726
2727 /*
2728 * The test for missing atomic flag is performed here, rather than
2729 * the more obvious place, simply to reduce the critical path length
2730 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2731 * will eventually be caught here (where it matters).
2732 */
2733 kmem_flagcheck(cachep, flags);
2734
a737b3e2
AM
2735 /*
2736 * Get mem for the objs. Attempt to allocate a physical page from
2737 * 'nodeid'.
e498be7d 2738 */
0c3aa83e
JK
2739 if (!page)
2740 page = kmem_getpages(cachep, local_flags, nodeid);
2741 if (!page)
1da177e4
LT
2742 goto failed;
2743
2744 /* Get slab management. */
8456a648 2745 freelist = alloc_slabmgmt(cachep, page, offset,
6cb06229 2746 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
8456a648 2747 if (!freelist)
1da177e4
LT
2748 goto opps1;
2749
8456a648 2750 slab_map_pages(cachep, page, freelist);
1da177e4 2751
8456a648 2752 cache_init_objs(cachep, page);
1da177e4
LT
2753
2754 if (local_flags & __GFP_WAIT)
2755 local_irq_disable();
2756 check_irq_off();
ce8eb6c4 2757 spin_lock(&n->list_lock);
1da177e4
LT
2758
2759 /* Make slab active. */
8456a648 2760 list_add_tail(&page->lru, &(n->slabs_free));
1da177e4 2761 STATS_INC_GROWN(cachep);
ce8eb6c4
CL
2762 n->free_objects += cachep->num;
2763 spin_unlock(&n->list_lock);
1da177e4 2764 return 1;
a737b3e2 2765opps1:
0c3aa83e 2766 kmem_freepages(cachep, page);
a737b3e2 2767failed:
1da177e4
LT
2768 if (local_flags & __GFP_WAIT)
2769 local_irq_disable();
2770 return 0;
2771}
2772
2773#if DEBUG
2774
2775/*
2776 * Perform extra freeing checks:
2777 * - detect bad pointers.
2778 * - POISON/RED_ZONE checking
1da177e4
LT
2779 */
2780static void kfree_debugcheck(const void *objp)
2781{
1da177e4
LT
2782 if (!virt_addr_valid(objp)) {
2783 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
b28a02de
PE
2784 (unsigned long)objp);
2785 BUG();
1da177e4 2786 }
1da177e4
LT
2787}
2788
58ce1fd5
PE
2789static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2790{
b46b8f19 2791 unsigned long long redzone1, redzone2;
58ce1fd5
PE
2792
2793 redzone1 = *dbg_redzone1(cache, obj);
2794 redzone2 = *dbg_redzone2(cache, obj);
2795
2796 /*
2797 * Redzone is ok.
2798 */
2799 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2800 return;
2801
2802 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2803 slab_error(cache, "double free detected");
2804 else
2805 slab_error(cache, "memory outside object was overwritten");
2806
b46b8f19 2807 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
58ce1fd5
PE
2808 obj, redzone1, redzone2);
2809}
2810
343e0d7a 2811static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
7c0cb9c6 2812 unsigned long caller)
1da177e4 2813{
1da177e4 2814 unsigned int objnr;
8456a648 2815 struct page *page;
1da177e4 2816
80cbd911
MW
2817 BUG_ON(virt_to_cache(objp) != cachep);
2818
3dafccf2 2819 objp -= obj_offset(cachep);
1da177e4 2820 kfree_debugcheck(objp);
b49af68f 2821 page = virt_to_head_page(objp);
1da177e4 2822
1da177e4 2823 if (cachep->flags & SLAB_RED_ZONE) {
58ce1fd5 2824 verify_redzone_free(cachep, objp);
1da177e4
LT
2825 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2826 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2827 }
2828 if (cachep->flags & SLAB_STORE_USER)
7c0cb9c6 2829 *dbg_userword(cachep, objp) = (void *)caller;
1da177e4 2830
8456a648 2831 objnr = obj_to_index(cachep, page, objp);
1da177e4
LT
2832
2833 BUG_ON(objnr >= cachep->num);
8456a648 2834 BUG_ON(objp != index_to_obj(cachep, page, objnr));
1da177e4 2835
1da177e4
LT
2836 if (cachep->flags & SLAB_POISON) {
2837#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 2838 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
7c0cb9c6 2839 store_stackinfo(cachep, objp, caller);
b28a02de 2840 kernel_map_pages(virt_to_page(objp),
3b0efdfa 2841 cachep->size / PAGE_SIZE, 0);
1da177e4
LT
2842 } else {
2843 poison_obj(cachep, objp, POISON_FREE);
2844 }
2845#else
2846 poison_obj(cachep, objp, POISON_FREE);
2847#endif
2848 }
2849 return objp;
2850}
2851
1da177e4
LT
2852#else
2853#define kfree_debugcheck(x) do { } while(0)
2854#define cache_free_debugcheck(x,objp,z) (objp)
1da177e4
LT
2855#endif
2856
072bb0aa
MG
2857static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2858 bool force_refill)
1da177e4
LT
2859{
2860 int batchcount;
ce8eb6c4 2861 struct kmem_cache_node *n;
1da177e4 2862 struct array_cache *ac;
1ca4cb24
PE
2863 int node;
2864
1da177e4 2865 check_irq_off();
7d6e6d09 2866 node = numa_mem_id();
072bb0aa
MG
2867 if (unlikely(force_refill))
2868 goto force_grow;
2869retry:
9a2dba4b 2870 ac = cpu_cache_get(cachep);
1da177e4
LT
2871 batchcount = ac->batchcount;
2872 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
a737b3e2
AM
2873 /*
2874 * If there was little recent activity on this cache, then
2875 * perform only a partial refill. Otherwise we could generate
2876 * refill bouncing.
1da177e4
LT
2877 */
2878 batchcount = BATCHREFILL_LIMIT;
2879 }
ce8eb6c4 2880 n = cachep->node[node];
e498be7d 2881
ce8eb6c4
CL
2882 BUG_ON(ac->avail > 0 || !n);
2883 spin_lock(&n->list_lock);
1da177e4 2884
3ded175a 2885 /* See if we can refill from the shared array */
ce8eb6c4
CL
2886 if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2887 n->shared->touched = 1;
3ded175a 2888 goto alloc_done;
44b57f1c 2889 }
3ded175a 2890
1da177e4
LT
2891 while (batchcount > 0) {
2892 struct list_head *entry;
8456a648 2893 struct page *page;
1da177e4 2894 /* Get slab alloc is to come from. */
ce8eb6c4
CL
2895 entry = n->slabs_partial.next;
2896 if (entry == &n->slabs_partial) {
2897 n->free_touched = 1;
2898 entry = n->slabs_free.next;
2899 if (entry == &n->slabs_free)
1da177e4
LT
2900 goto must_grow;
2901 }
2902
8456a648 2903 page = list_entry(entry, struct page, lru);
1da177e4 2904 check_spinlock_acquired(cachep);
714b8171
PE
2905
2906 /*
2907 * The slab was either on partial or free list so
2908 * there must be at least one object available for
2909 * allocation.
2910 */
8456a648 2911 BUG_ON(page->active >= cachep->num);
714b8171 2912
8456a648 2913 while (page->active < cachep->num && batchcount--) {
1da177e4
LT
2914 STATS_INC_ALLOCED(cachep);
2915 STATS_INC_ACTIVE(cachep);
2916 STATS_SET_HIGH(cachep);
2917
8456a648 2918 ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
072bb0aa 2919 node));
1da177e4 2920 }
1da177e4
LT
2921
2922 /* move slabp to correct slabp list: */
8456a648
JK
2923 list_del(&page->lru);
2924 if (page->active == cachep->num)
34bf6ef9 2925 list_add(&page->lru, &n->slabs_full);
1da177e4 2926 else
34bf6ef9 2927 list_add(&page->lru, &n->slabs_partial);
1da177e4
LT
2928 }
2929
a737b3e2 2930must_grow:
ce8eb6c4 2931 n->free_objects -= ac->avail;
a737b3e2 2932alloc_done:
ce8eb6c4 2933 spin_unlock(&n->list_lock);
1da177e4
LT
2934
2935 if (unlikely(!ac->avail)) {
2936 int x;
072bb0aa 2937force_grow:
3c517a61 2938 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
e498be7d 2939
a737b3e2 2940 /* cache_grow can reenable interrupts, then ac could change. */
9a2dba4b 2941 ac = cpu_cache_get(cachep);
51cd8e6f 2942 node = numa_mem_id();
072bb0aa
MG
2943
2944 /* no objects in sight? abort */
2945 if (!x && (ac->avail == 0 || force_refill))
1da177e4
LT
2946 return NULL;
2947
a737b3e2 2948 if (!ac->avail) /* objects refilled by interrupt? */
1da177e4
LT
2949 goto retry;
2950 }
2951 ac->touched = 1;
072bb0aa
MG
2952
2953 return ac_get_obj(cachep, ac, flags, force_refill);
1da177e4
LT
2954}
2955
a737b3e2
AM
2956static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2957 gfp_t flags)
1da177e4
LT
2958{
2959 might_sleep_if(flags & __GFP_WAIT);
2960#if DEBUG
2961 kmem_flagcheck(cachep, flags);
2962#endif
2963}
2964
2965#if DEBUG
a737b3e2 2966static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
7c0cb9c6 2967 gfp_t flags, void *objp, unsigned long caller)
1da177e4 2968{
b28a02de 2969 if (!objp)
1da177e4 2970 return objp;
b28a02de 2971 if (cachep->flags & SLAB_POISON) {
1da177e4 2972#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 2973 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
b28a02de 2974 kernel_map_pages(virt_to_page(objp),
3b0efdfa 2975 cachep->size / PAGE_SIZE, 1);
1da177e4
LT
2976 else
2977 check_poison_obj(cachep, objp);
2978#else
2979 check_poison_obj(cachep, objp);
2980#endif
2981 poison_obj(cachep, objp, POISON_INUSE);
2982 }
2983 if (cachep->flags & SLAB_STORE_USER)
7c0cb9c6 2984 *dbg_userword(cachep, objp) = (void *)caller;
1da177e4
LT
2985
2986 if (cachep->flags & SLAB_RED_ZONE) {
a737b3e2
AM
2987 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2988 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2989 slab_error(cachep, "double free, or memory outside"
2990 " object was overwritten");
b28a02de 2991 printk(KERN_ERR
b46b8f19 2992 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
a737b3e2
AM
2993 objp, *dbg_redzone1(cachep, objp),
2994 *dbg_redzone2(cachep, objp));
1da177e4
LT
2995 }
2996 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2997 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2998 }
3dafccf2 2999 objp += obj_offset(cachep);
4f104934 3000 if (cachep->ctor && cachep->flags & SLAB_POISON)
51cc5068 3001 cachep->ctor(objp);
7ea466f2
TH
3002 if (ARCH_SLAB_MINALIGN &&
3003 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
a44b56d3 3004 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
c225150b 3005 objp, (int)ARCH_SLAB_MINALIGN);
a44b56d3 3006 }
1da177e4
LT
3007 return objp;
3008}
3009#else
3010#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3011#endif
3012
773ff60e 3013static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
8a8b6502 3014{
9b030cb8 3015 if (cachep == kmem_cache)
773ff60e 3016 return false;
8a8b6502 3017
8c138bc0 3018 return should_failslab(cachep->object_size, flags, cachep->flags);
8a8b6502
AM
3019}
3020
343e0d7a 3021static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3022{
b28a02de 3023 void *objp;
1da177e4 3024 struct array_cache *ac;
072bb0aa 3025 bool force_refill = false;
1da177e4 3026
5c382300 3027 check_irq_off();
8a8b6502 3028
9a2dba4b 3029 ac = cpu_cache_get(cachep);
1da177e4 3030 if (likely(ac->avail)) {
1da177e4 3031 ac->touched = 1;
072bb0aa
MG
3032 objp = ac_get_obj(cachep, ac, flags, false);
3033
ddbf2e83 3034 /*
072bb0aa
MG
3035 * Allow for the possibility all avail objects are not allowed
3036 * by the current flags
ddbf2e83 3037 */
072bb0aa
MG
3038 if (objp) {
3039 STATS_INC_ALLOCHIT(cachep);
3040 goto out;
3041 }
3042 force_refill = true;
1da177e4 3043 }
072bb0aa
MG
3044
3045 STATS_INC_ALLOCMISS(cachep);
3046 objp = cache_alloc_refill(cachep, flags, force_refill);
3047 /*
3048 * the 'ac' may be updated by cache_alloc_refill(),
3049 * and kmemleak_erase() requires its correct value.
3050 */
3051 ac = cpu_cache_get(cachep);
3052
3053out:
d5cff635
CM
3054 /*
3055 * To avoid a false negative, if an object that is in one of the
3056 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3057 * treat the array pointers as a reference to the object.
3058 */
f3d8b53a
O
3059 if (objp)
3060 kmemleak_erase(&ac->entry[ac->avail]);
5c382300
AK
3061 return objp;
3062}
3063
e498be7d 3064#ifdef CONFIG_NUMA
c61afb18 3065/*
f0432d15 3066 * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
c61afb18
PJ
3067 *
3068 * If we are in_interrupt, then process context, including cpusets and
3069 * mempolicy, may not apply and should not be used for allocation policy.
3070 */
3071static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3072{
3073 int nid_alloc, nid_here;
3074
765c4507 3075 if (in_interrupt() || (flags & __GFP_THISNODE))
c61afb18 3076 return NULL;
7d6e6d09 3077 nid_alloc = nid_here = numa_mem_id();
c61afb18 3078 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
6adef3eb 3079 nid_alloc = cpuset_slab_spread_node();
c61afb18 3080 else if (current->mempolicy)
2a389610 3081 nid_alloc = mempolicy_slab_node();
c61afb18 3082 if (nid_alloc != nid_here)
8b98c169 3083 return ____cache_alloc_node(cachep, flags, nid_alloc);
c61afb18
PJ
3084 return NULL;
3085}
3086
765c4507
CL
3087/*
3088 * Fallback function if there was no memory available and no objects on a
3c517a61 3089 * certain node and fall back is permitted. First we scan all the
6a67368c 3090 * available node for available objects. If that fails then we
3c517a61
CL
3091 * perform an allocation without specifying a node. This allows the page
3092 * allocator to do its reclaim / fallback magic. We then insert the
3093 * slab into the proper nodelist and then allocate from it.
765c4507 3094 */
8c8cc2c1 3095static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
765c4507 3096{
8c8cc2c1
PE
3097 struct zonelist *zonelist;
3098 gfp_t local_flags;
dd1a239f 3099 struct zoneref *z;
54a6eb5c
MG
3100 struct zone *zone;
3101 enum zone_type high_zoneidx = gfp_zone(flags);
765c4507 3102 void *obj = NULL;
3c517a61 3103 int nid;
cc9a6c87 3104 unsigned int cpuset_mems_cookie;
8c8cc2c1
PE
3105
3106 if (flags & __GFP_THISNODE)
3107 return NULL;
3108
6cb06229 3109 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
765c4507 3110
cc9a6c87 3111retry_cpuset:
d26914d1 3112 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 3113 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87 3114
3c517a61
CL
3115retry:
3116 /*
3117 * Look through allowed nodes for objects available
3118 * from existing per node queues.
3119 */
54a6eb5c
MG
3120 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3121 nid = zone_to_nid(zone);
aedb0eb1 3122
54a6eb5c 3123 if (cpuset_zone_allowed_hardwall(zone, flags) &&
6a67368c
CL
3124 cache->node[nid] &&
3125 cache->node[nid]->free_objects) {
3c517a61
CL
3126 obj = ____cache_alloc_node(cache,
3127 flags | GFP_THISNODE, nid);
481c5346
CL
3128 if (obj)
3129 break;
3130 }
3c517a61
CL
3131 }
3132
cfce6604 3133 if (!obj) {
3c517a61
CL
3134 /*
3135 * This allocation will be performed within the constraints
3136 * of the current cpuset / memory policy requirements.
3137 * We may trigger various forms of reclaim on the allowed
3138 * set and go into memory reserves if necessary.
3139 */
0c3aa83e
JK
3140 struct page *page;
3141
dd47ea75
CL
3142 if (local_flags & __GFP_WAIT)
3143 local_irq_enable();
3144 kmem_flagcheck(cache, flags);
0c3aa83e 3145 page = kmem_getpages(cache, local_flags, numa_mem_id());
dd47ea75
CL
3146 if (local_flags & __GFP_WAIT)
3147 local_irq_disable();
0c3aa83e 3148 if (page) {
3c517a61
CL
3149 /*
3150 * Insert into the appropriate per node queues
3151 */
0c3aa83e
JK
3152 nid = page_to_nid(page);
3153 if (cache_grow(cache, flags, nid, page)) {
3c517a61
CL
3154 obj = ____cache_alloc_node(cache,
3155 flags | GFP_THISNODE, nid);
3156 if (!obj)
3157 /*
3158 * Another processor may allocate the
3159 * objects in the slab since we are
3160 * not holding any locks.
3161 */
3162 goto retry;
3163 } else {
b6a60451 3164 /* cache_grow already freed obj */
3c517a61
CL
3165 obj = NULL;
3166 }
3167 }
aedb0eb1 3168 }
cc9a6c87 3169
d26914d1 3170 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87 3171 goto retry_cpuset;
765c4507
CL
3172 return obj;
3173}
3174
e498be7d
CL
3175/*
3176 * A interface to enable slab creation on nodeid
1da177e4 3177 */
8b98c169 3178static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
a737b3e2 3179 int nodeid)
e498be7d
CL
3180{
3181 struct list_head *entry;
8456a648 3182 struct page *page;
ce8eb6c4 3183 struct kmem_cache_node *n;
b28a02de 3184 void *obj;
b28a02de
PE
3185 int x;
3186
14e50c6a 3187 VM_BUG_ON(nodeid > num_online_nodes());
ce8eb6c4
CL
3188 n = cachep->node[nodeid];
3189 BUG_ON(!n);
b28a02de 3190
a737b3e2 3191retry:
ca3b9b91 3192 check_irq_off();
ce8eb6c4
CL
3193 spin_lock(&n->list_lock);
3194 entry = n->slabs_partial.next;
3195 if (entry == &n->slabs_partial) {
3196 n->free_touched = 1;
3197 entry = n->slabs_free.next;
3198 if (entry == &n->slabs_free)
b28a02de
PE
3199 goto must_grow;
3200 }
3201
8456a648 3202 page = list_entry(entry, struct page, lru);
b28a02de 3203 check_spinlock_acquired_node(cachep, nodeid);
b28a02de
PE
3204
3205 STATS_INC_NODEALLOCS(cachep);
3206 STATS_INC_ACTIVE(cachep);
3207 STATS_SET_HIGH(cachep);
3208
8456a648 3209 BUG_ON(page->active == cachep->num);
b28a02de 3210
8456a648 3211 obj = slab_get_obj(cachep, page, nodeid);
ce8eb6c4 3212 n->free_objects--;
b28a02de 3213 /* move slabp to correct slabp list: */
8456a648 3214 list_del(&page->lru);
b28a02de 3215
8456a648
JK
3216 if (page->active == cachep->num)
3217 list_add(&page->lru, &n->slabs_full);
a737b3e2 3218 else
8456a648 3219 list_add(&page->lru, &n->slabs_partial);
e498be7d 3220
ce8eb6c4 3221 spin_unlock(&n->list_lock);
b28a02de 3222 goto done;
e498be7d 3223
a737b3e2 3224must_grow:
ce8eb6c4 3225 spin_unlock(&n->list_lock);
3c517a61 3226 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
765c4507
CL
3227 if (x)
3228 goto retry;
1da177e4 3229
8c8cc2c1 3230 return fallback_alloc(cachep, flags);
e498be7d 3231
a737b3e2 3232done:
b28a02de 3233 return obj;
e498be7d 3234}
8c8cc2c1 3235
8c8cc2c1 3236static __always_inline void *
48356303 3237slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
7c0cb9c6 3238 unsigned long caller)
8c8cc2c1
PE
3239{
3240 unsigned long save_flags;
3241 void *ptr;
7d6e6d09 3242 int slab_node = numa_mem_id();
8c8cc2c1 3243
dcce284a 3244 flags &= gfp_allowed_mask;
7e85ee0c 3245
cf40bd16
NP
3246 lockdep_trace_alloc(flags);
3247
773ff60e 3248 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3249 return NULL;
3250
d79923fa
GC
3251 cachep = memcg_kmem_get_cache(cachep, flags);
3252
8c8cc2c1
PE
3253 cache_alloc_debugcheck_before(cachep, flags);
3254 local_irq_save(save_flags);
3255
eacbbae3 3256 if (nodeid == NUMA_NO_NODE)
7d6e6d09 3257 nodeid = slab_node;
8c8cc2c1 3258
6a67368c 3259 if (unlikely(!cachep->node[nodeid])) {
8c8cc2c1
PE
3260 /* Node not bootstrapped yet */
3261 ptr = fallback_alloc(cachep, flags);
3262 goto out;
3263 }
3264
7d6e6d09 3265 if (nodeid == slab_node) {
8c8cc2c1
PE
3266 /*
3267 * Use the locally cached objects if possible.
3268 * However ____cache_alloc does not allow fallback
3269 * to other nodes. It may fail while we still have
3270 * objects on other nodes available.
3271 */
3272 ptr = ____cache_alloc(cachep, flags);
3273 if (ptr)
3274 goto out;
3275 }
3276 /* ___cache_alloc_node can fall back to other nodes */
3277 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3278 out:
3279 local_irq_restore(save_flags);
3280 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
8c138bc0 3281 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
d5cff635 3282 flags);
8c8cc2c1 3283
5087c822 3284 if (likely(ptr)) {
8c138bc0 3285 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
5087c822
JP
3286 if (unlikely(flags & __GFP_ZERO))
3287 memset(ptr, 0, cachep->object_size);
3288 }
d07dbea4 3289
8c8cc2c1
PE
3290 return ptr;
3291}
3292
3293static __always_inline void *
3294__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3295{
3296 void *objp;
3297
f0432d15 3298 if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
8c8cc2c1
PE
3299 objp = alternate_node_alloc(cache, flags);
3300 if (objp)
3301 goto out;
3302 }
3303 objp = ____cache_alloc(cache, flags);
3304
3305 /*
3306 * We may just have run out of memory on the local node.
3307 * ____cache_alloc_node() knows how to locate memory on other nodes
3308 */
7d6e6d09
LS
3309 if (!objp)
3310 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
8c8cc2c1
PE
3311
3312 out:
3313 return objp;
3314}
3315#else
3316
3317static __always_inline void *
3318__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3319{
3320 return ____cache_alloc(cachep, flags);
3321}
3322
3323#endif /* CONFIG_NUMA */
3324
3325static __always_inline void *
48356303 3326slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
8c8cc2c1
PE
3327{
3328 unsigned long save_flags;
3329 void *objp;
3330
dcce284a 3331 flags &= gfp_allowed_mask;
7e85ee0c 3332
cf40bd16
NP
3333 lockdep_trace_alloc(flags);
3334
773ff60e 3335 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3336 return NULL;
3337
d79923fa
GC
3338 cachep = memcg_kmem_get_cache(cachep, flags);
3339
8c8cc2c1
PE
3340 cache_alloc_debugcheck_before(cachep, flags);
3341 local_irq_save(save_flags);
3342 objp = __do_cache_alloc(cachep, flags);
3343 local_irq_restore(save_flags);
3344 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
8c138bc0 3345 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
d5cff635 3346 flags);
8c8cc2c1
PE
3347 prefetchw(objp);
3348
5087c822 3349 if (likely(objp)) {
8c138bc0 3350 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
5087c822
JP
3351 if (unlikely(flags & __GFP_ZERO))
3352 memset(objp, 0, cachep->object_size);
3353 }
d07dbea4 3354
8c8cc2c1
PE
3355 return objp;
3356}
e498be7d
CL
3357
3358/*
5f0985bb 3359 * Caller needs to acquire correct kmem_cache_node's list_lock
e498be7d 3360 */
343e0d7a 3361static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
b28a02de 3362 int node)
1da177e4
LT
3363{
3364 int i;
ce8eb6c4 3365 struct kmem_cache_node *n;
1da177e4
LT
3366
3367 for (i = 0; i < nr_objects; i++) {
072bb0aa 3368 void *objp;
8456a648 3369 struct page *page;
1da177e4 3370
072bb0aa
MG
3371 clear_obj_pfmemalloc(&objpp[i]);
3372 objp = objpp[i];
3373
8456a648 3374 page = virt_to_head_page(objp);
ce8eb6c4 3375 n = cachep->node[node];
8456a648 3376 list_del(&page->lru);
ff69416e 3377 check_spinlock_acquired_node(cachep, node);
8456a648 3378 slab_put_obj(cachep, page, objp, node);
1da177e4 3379 STATS_DEC_ACTIVE(cachep);
ce8eb6c4 3380 n->free_objects++;
1da177e4
LT
3381
3382 /* fixup slab chains */
8456a648 3383 if (page->active == 0) {
ce8eb6c4
CL
3384 if (n->free_objects > n->free_limit) {
3385 n->free_objects -= cachep->num;
e5ac9c5a
RT
3386 /* No need to drop any previously held
3387 * lock here, even if we have a off-slab slab
3388 * descriptor it is guaranteed to come from
3389 * a different cache, refer to comments before
3390 * alloc_slabmgmt.
3391 */
8456a648 3392 slab_destroy(cachep, page);
1da177e4 3393 } else {
8456a648 3394 list_add(&page->lru, &n->slabs_free);
1da177e4
LT
3395 }
3396 } else {
3397 /* Unconditionally move a slab to the end of the
3398 * partial list on free - maximum time for the
3399 * other objects to be freed, too.
3400 */
8456a648 3401 list_add_tail(&page->lru, &n->slabs_partial);
1da177e4
LT
3402 }
3403 }
3404}
3405
343e0d7a 3406static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
1da177e4
LT
3407{
3408 int batchcount;
ce8eb6c4 3409 struct kmem_cache_node *n;
7d6e6d09 3410 int node = numa_mem_id();
1da177e4
LT
3411
3412 batchcount = ac->batchcount;
3413#if DEBUG
3414 BUG_ON(!batchcount || batchcount > ac->avail);
3415#endif
3416 check_irq_off();
ce8eb6c4
CL
3417 n = cachep->node[node];
3418 spin_lock(&n->list_lock);
3419 if (n->shared) {
3420 struct array_cache *shared_array = n->shared;
b28a02de 3421 int max = shared_array->limit - shared_array->avail;
1da177e4
LT
3422 if (max) {
3423 if (batchcount > max)
3424 batchcount = max;
e498be7d 3425 memcpy(&(shared_array->entry[shared_array->avail]),
b28a02de 3426 ac->entry, sizeof(void *) * batchcount);
1da177e4
LT
3427 shared_array->avail += batchcount;
3428 goto free_done;
3429 }
3430 }
3431
ff69416e 3432 free_block(cachep, ac->entry, batchcount, node);
a737b3e2 3433free_done:
1da177e4
LT
3434#if STATS
3435 {
3436 int i = 0;
3437 struct list_head *p;
3438
ce8eb6c4
CL
3439 p = n->slabs_free.next;
3440 while (p != &(n->slabs_free)) {
8456a648 3441 struct page *page;
1da177e4 3442
8456a648
JK
3443 page = list_entry(p, struct page, lru);
3444 BUG_ON(page->active);
1da177e4
LT
3445
3446 i++;
3447 p = p->next;
3448 }
3449 STATS_SET_FREEABLE(cachep, i);
3450 }
3451#endif
ce8eb6c4 3452 spin_unlock(&n->list_lock);
1da177e4 3453 ac->avail -= batchcount;
a737b3e2 3454 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
1da177e4
LT
3455}
3456
3457/*
a737b3e2
AM
3458 * Release an obj back to its cache. If the obj has a constructed state, it must
3459 * be in this state _before_ it is released. Called with disabled ints.
1da177e4 3460 */
a947eb95 3461static inline void __cache_free(struct kmem_cache *cachep, void *objp,
7c0cb9c6 3462 unsigned long caller)
1da177e4 3463{
9a2dba4b 3464 struct array_cache *ac = cpu_cache_get(cachep);
1da177e4
LT
3465
3466 check_irq_off();
d5cff635 3467 kmemleak_free_recursive(objp, cachep->flags);
a947eb95 3468 objp = cache_free_debugcheck(cachep, objp, caller);
1da177e4 3469
8c138bc0 3470 kmemcheck_slab_free(cachep, objp, cachep->object_size);
c175eea4 3471
1807a1aa
SS
3472 /*
3473 * Skip calling cache_free_alien() when the platform is not numa.
3474 * This will avoid cache misses that happen while accessing slabp (which
3475 * is per page memory reference) to get nodeid. Instead use a global
3476 * variable to skip the call, which is mostly likely to be present in
3477 * the cache.
3478 */
b6e68bc1 3479 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
729bd0b7
PE
3480 return;
3481
1da177e4
LT
3482 if (likely(ac->avail < ac->limit)) {
3483 STATS_INC_FREEHIT(cachep);
1da177e4
LT
3484 } else {
3485 STATS_INC_FREEMISS(cachep);
3486 cache_flusharray(cachep, ac);
1da177e4 3487 }
42c8c99c 3488
072bb0aa 3489 ac_put_obj(cachep, ac, objp);
1da177e4
LT
3490}
3491
3492/**
3493 * kmem_cache_alloc - Allocate an object
3494 * @cachep: The cache to allocate from.
3495 * @flags: See kmalloc().
3496 *
3497 * Allocate an object from this cache. The flags are only relevant
3498 * if the cache has no available objects.
3499 */
343e0d7a 3500void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3501{
48356303 3502 void *ret = slab_alloc(cachep, flags, _RET_IP_);
36555751 3503
ca2b84cb 3504 trace_kmem_cache_alloc(_RET_IP_, ret,
8c138bc0 3505 cachep->object_size, cachep->size, flags);
36555751
EGM
3506
3507 return ret;
1da177e4
LT
3508}
3509EXPORT_SYMBOL(kmem_cache_alloc);
3510
0f24f128 3511#ifdef CONFIG_TRACING
85beb586 3512void *
4052147c 3513kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
36555751 3514{
85beb586
SR
3515 void *ret;
3516
48356303 3517 ret = slab_alloc(cachep, flags, _RET_IP_);
85beb586
SR
3518
3519 trace_kmalloc(_RET_IP_, ret,
ff4fcd01 3520 size, cachep->size, flags);
85beb586 3521 return ret;
36555751 3522}
85beb586 3523EXPORT_SYMBOL(kmem_cache_alloc_trace);
36555751
EGM
3524#endif
3525
1da177e4 3526#ifdef CONFIG_NUMA
d0d04b78
ZL
3527/**
3528 * kmem_cache_alloc_node - Allocate an object on the specified node
3529 * @cachep: The cache to allocate from.
3530 * @flags: See kmalloc().
3531 * @nodeid: node number of the target node.
3532 *
3533 * Identical to kmem_cache_alloc but it will allocate memory on the given
3534 * node, which can improve the performance for cpu bound structures.
3535 *
3536 * Fallback to other node is possible if __GFP_THISNODE is not set.
3537 */
8b98c169
CH
3538void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3539{
48356303 3540 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
36555751 3541
ca2b84cb 3542 trace_kmem_cache_alloc_node(_RET_IP_, ret,
8c138bc0 3543 cachep->object_size, cachep->size,
ca2b84cb 3544 flags, nodeid);
36555751
EGM
3545
3546 return ret;
8b98c169 3547}
1da177e4
LT
3548EXPORT_SYMBOL(kmem_cache_alloc_node);
3549
0f24f128 3550#ifdef CONFIG_TRACING
4052147c 3551void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
85beb586 3552 gfp_t flags,
4052147c
EG
3553 int nodeid,
3554 size_t size)
36555751 3555{
85beb586
SR
3556 void *ret;
3557
592f4145 3558 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
7c0cb9c6 3559
85beb586 3560 trace_kmalloc_node(_RET_IP_, ret,
ff4fcd01 3561 size, cachep->size,
85beb586
SR
3562 flags, nodeid);
3563 return ret;
36555751 3564}
85beb586 3565EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
36555751
EGM
3566#endif
3567
8b98c169 3568static __always_inline void *
7c0cb9c6 3569__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
97e2bde4 3570{
343e0d7a 3571 struct kmem_cache *cachep;
97e2bde4 3572
2c59dd65 3573 cachep = kmalloc_slab(size, flags);
6cb8f913
CL
3574 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3575 return cachep;
4052147c 3576 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
97e2bde4 3577}
8b98c169 3578
0bb38a5c 3579#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
8b98c169
CH
3580void *__kmalloc_node(size_t size, gfp_t flags, int node)
3581{
7c0cb9c6 3582 return __do_kmalloc_node(size, flags, node, _RET_IP_);
8b98c169 3583}
dbe5e69d 3584EXPORT_SYMBOL(__kmalloc_node);
8b98c169
CH
3585
3586void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
ce71e27c 3587 int node, unsigned long caller)
8b98c169 3588{
7c0cb9c6 3589 return __do_kmalloc_node(size, flags, node, caller);
8b98c169
CH
3590}
3591EXPORT_SYMBOL(__kmalloc_node_track_caller);
3592#else
3593void *__kmalloc_node(size_t size, gfp_t flags, int node)
3594{
7c0cb9c6 3595 return __do_kmalloc_node(size, flags, node, 0);
8b98c169
CH
3596}
3597EXPORT_SYMBOL(__kmalloc_node);
0bb38a5c 3598#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
8b98c169 3599#endif /* CONFIG_NUMA */
1da177e4
LT
3600
3601/**
800590f5 3602 * __do_kmalloc - allocate memory
1da177e4 3603 * @size: how many bytes of memory are required.
800590f5 3604 * @flags: the type of memory to allocate (see kmalloc).
911851e6 3605 * @caller: function caller for debug tracking of the caller
1da177e4 3606 */
7fd6b141 3607static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
7c0cb9c6 3608 unsigned long caller)
1da177e4 3609{
343e0d7a 3610 struct kmem_cache *cachep;
36555751 3611 void *ret;
1da177e4 3612
2c59dd65 3613 cachep = kmalloc_slab(size, flags);
a5c96d8a
LT
3614 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3615 return cachep;
48356303 3616 ret = slab_alloc(cachep, flags, caller);
36555751 3617
7c0cb9c6 3618 trace_kmalloc(caller, ret,
3b0efdfa 3619 size, cachep->size, flags);
36555751
EGM
3620
3621 return ret;
7fd6b141
PE
3622}
3623
7fd6b141 3624
0bb38a5c 3625#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
7fd6b141
PE
3626void *__kmalloc(size_t size, gfp_t flags)
3627{
7c0cb9c6 3628 return __do_kmalloc(size, flags, _RET_IP_);
1da177e4
LT
3629}
3630EXPORT_SYMBOL(__kmalloc);
3631
ce71e27c 3632void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
7fd6b141 3633{
7c0cb9c6 3634 return __do_kmalloc(size, flags, caller);
7fd6b141
PE
3635}
3636EXPORT_SYMBOL(__kmalloc_track_caller);
1d2c8eea
CH
3637
3638#else
3639void *__kmalloc(size_t size, gfp_t flags)
3640{
7c0cb9c6 3641 return __do_kmalloc(size, flags, 0);
1d2c8eea
CH
3642}
3643EXPORT_SYMBOL(__kmalloc);
7fd6b141
PE
3644#endif
3645
1da177e4
LT
3646/**
3647 * kmem_cache_free - Deallocate an object
3648 * @cachep: The cache the allocation was from.
3649 * @objp: The previously allocated object.
3650 *
3651 * Free an object which was previously allocated from this
3652 * cache.
3653 */
343e0d7a 3654void kmem_cache_free(struct kmem_cache *cachep, void *objp)
1da177e4
LT
3655{
3656 unsigned long flags;
b9ce5ef4
GC
3657 cachep = cache_from_obj(cachep, objp);
3658 if (!cachep)
3659 return;
1da177e4
LT
3660
3661 local_irq_save(flags);
d97d476b 3662 debug_check_no_locks_freed(objp, cachep->object_size);
3ac7fe5a 3663 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
8c138bc0 3664 debug_check_no_obj_freed(objp, cachep->object_size);
7c0cb9c6 3665 __cache_free(cachep, objp, _RET_IP_);
1da177e4 3666 local_irq_restore(flags);
36555751 3667
ca2b84cb 3668 trace_kmem_cache_free(_RET_IP_, objp);
1da177e4
LT
3669}
3670EXPORT_SYMBOL(kmem_cache_free);
3671
1da177e4
LT
3672/**
3673 * kfree - free previously allocated memory
3674 * @objp: pointer returned by kmalloc.
3675 *
80e93eff
PE
3676 * If @objp is NULL, no operation is performed.
3677 *
1da177e4
LT
3678 * Don't free memory not originally allocated by kmalloc()
3679 * or you will run into trouble.
3680 */
3681void kfree(const void *objp)
3682{
343e0d7a 3683 struct kmem_cache *c;
1da177e4
LT
3684 unsigned long flags;
3685
2121db74
PE
3686 trace_kfree(_RET_IP_, objp);
3687
6cb8f913 3688 if (unlikely(ZERO_OR_NULL_PTR(objp)))
1da177e4
LT
3689 return;
3690 local_irq_save(flags);
3691 kfree_debugcheck(objp);
6ed5eb22 3692 c = virt_to_cache(objp);
8c138bc0
CL
3693 debug_check_no_locks_freed(objp, c->object_size);
3694
3695 debug_check_no_obj_freed(objp, c->object_size);
7c0cb9c6 3696 __cache_free(c, (void *)objp, _RET_IP_);
1da177e4
LT
3697 local_irq_restore(flags);
3698}
3699EXPORT_SYMBOL(kfree);
3700
e498be7d 3701/*
ce8eb6c4 3702 * This initializes kmem_cache_node or resizes various caches for all nodes.
e498be7d 3703 */
5f0985bb 3704static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
e498be7d
CL
3705{
3706 int node;
ce8eb6c4 3707 struct kmem_cache_node *n;
cafeb02e 3708 struct array_cache *new_shared;
3395ee05 3709 struct array_cache **new_alien = NULL;
e498be7d 3710
9c09a95c 3711 for_each_online_node(node) {
cafeb02e 3712
3395ee05 3713 if (use_alien_caches) {
83b519e8 3714 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3395ee05
PM
3715 if (!new_alien)
3716 goto fail;
3717 }
cafeb02e 3718
63109846
ED
3719 new_shared = NULL;
3720 if (cachep->shared) {
3721 new_shared = alloc_arraycache(node,
0718dc2a 3722 cachep->shared*cachep->batchcount,
83b519e8 3723 0xbaadf00d, gfp);
63109846
ED
3724 if (!new_shared) {
3725 free_alien_cache(new_alien);
3726 goto fail;
3727 }
0718dc2a 3728 }
cafeb02e 3729
ce8eb6c4
CL
3730 n = cachep->node[node];
3731 if (n) {
3732 struct array_cache *shared = n->shared;
cafeb02e 3733
ce8eb6c4 3734 spin_lock_irq(&n->list_lock);
e498be7d 3735
cafeb02e 3736 if (shared)
0718dc2a
CL
3737 free_block(cachep, shared->entry,
3738 shared->avail, node);
e498be7d 3739
ce8eb6c4
CL
3740 n->shared = new_shared;
3741 if (!n->alien) {
3742 n->alien = new_alien;
e498be7d
CL
3743 new_alien = NULL;
3744 }
ce8eb6c4 3745 n->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3746 cachep->batchcount + cachep->num;
ce8eb6c4 3747 spin_unlock_irq(&n->list_lock);
cafeb02e 3748 kfree(shared);
e498be7d
CL
3749 free_alien_cache(new_alien);
3750 continue;
3751 }
ce8eb6c4
CL
3752 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3753 if (!n) {
0718dc2a
CL
3754 free_alien_cache(new_alien);
3755 kfree(new_shared);
e498be7d 3756 goto fail;
0718dc2a 3757 }
e498be7d 3758
ce8eb6c4 3759 kmem_cache_node_init(n);
5f0985bb
JZ
3760 n->next_reap = jiffies + REAPTIMEOUT_NODE +
3761 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
ce8eb6c4
CL
3762 n->shared = new_shared;
3763 n->alien = new_alien;
3764 n->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3765 cachep->batchcount + cachep->num;
ce8eb6c4 3766 cachep->node[node] = n;
e498be7d 3767 }
cafeb02e 3768 return 0;
0718dc2a 3769
a737b3e2 3770fail:
3b0efdfa 3771 if (!cachep->list.next) {
0718dc2a
CL
3772 /* Cache is not active yet. Roll back what we did */
3773 node--;
3774 while (node >= 0) {
6a67368c 3775 if (cachep->node[node]) {
ce8eb6c4 3776 n = cachep->node[node];
0718dc2a 3777
ce8eb6c4
CL
3778 kfree(n->shared);
3779 free_alien_cache(n->alien);
3780 kfree(n);
6a67368c 3781 cachep->node[node] = NULL;
0718dc2a
CL
3782 }
3783 node--;
3784 }
3785 }
cafeb02e 3786 return -ENOMEM;
e498be7d
CL
3787}
3788
1da177e4 3789struct ccupdate_struct {
343e0d7a 3790 struct kmem_cache *cachep;
acfe7d74 3791 struct array_cache *new[0];
1da177e4
LT
3792};
3793
3794static void do_ccupdate_local(void *info)
3795{
a737b3e2 3796 struct ccupdate_struct *new = info;
1da177e4
LT
3797 struct array_cache *old;
3798
3799 check_irq_off();
9a2dba4b 3800 old = cpu_cache_get(new->cachep);
e498be7d 3801
1da177e4
LT
3802 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3803 new->new[smp_processor_id()] = old;
3804}
3805
18004c5d 3806/* Always called with the slab_mutex held */
943a451a 3807static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
83b519e8 3808 int batchcount, int shared, gfp_t gfp)
1da177e4 3809{
d2e7b7d0 3810 struct ccupdate_struct *new;
2ed3a4ef 3811 int i;
1da177e4 3812
acfe7d74
ED
3813 new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
3814 gfp);
d2e7b7d0
SS
3815 if (!new)
3816 return -ENOMEM;
3817
e498be7d 3818 for_each_online_cpu(i) {
7d6e6d09 3819 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
83b519e8 3820 batchcount, gfp);
d2e7b7d0 3821 if (!new->new[i]) {
b28a02de 3822 for (i--; i >= 0; i--)
d2e7b7d0
SS
3823 kfree(new->new[i]);
3824 kfree(new);
e498be7d 3825 return -ENOMEM;
1da177e4
LT
3826 }
3827 }
d2e7b7d0 3828 new->cachep = cachep;
1da177e4 3829
15c8b6c1 3830 on_each_cpu(do_ccupdate_local, (void *)new, 1);
e498be7d 3831
1da177e4 3832 check_irq_on();
1da177e4
LT
3833 cachep->batchcount = batchcount;
3834 cachep->limit = limit;
e498be7d 3835 cachep->shared = shared;
1da177e4 3836
e498be7d 3837 for_each_online_cpu(i) {
d2e7b7d0 3838 struct array_cache *ccold = new->new[i];
1da177e4
LT
3839 if (!ccold)
3840 continue;
6a67368c 3841 spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
7d6e6d09 3842 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
6a67368c 3843 spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
1da177e4
LT
3844 kfree(ccold);
3845 }
d2e7b7d0 3846 kfree(new);
5f0985bb 3847 return alloc_kmem_cache_node(cachep, gfp);
1da177e4
LT
3848}
3849
943a451a
GC
3850static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3851 int batchcount, int shared, gfp_t gfp)
3852{
3853 int ret;
3854 struct kmem_cache *c = NULL;
3855 int i = 0;
3856
3857 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3858
3859 if (slab_state < FULL)
3860 return ret;
3861
3862 if ((ret < 0) || !is_root_cache(cachep))
3863 return ret;
3864
ebe945c2 3865 VM_BUG_ON(!mutex_is_locked(&slab_mutex));
943a451a 3866 for_each_memcg_cache_index(i) {
2ade4de8 3867 c = cache_from_memcg_idx(cachep, i);
943a451a
GC
3868 if (c)
3869 /* return value determined by the parent cache only */
3870 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3871 }
3872
3873 return ret;
3874}
3875
18004c5d 3876/* Called with slab_mutex held always */
83b519e8 3877static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
1da177e4
LT
3878{
3879 int err;
943a451a
GC
3880 int limit = 0;
3881 int shared = 0;
3882 int batchcount = 0;
3883
3884 if (!is_root_cache(cachep)) {
3885 struct kmem_cache *root = memcg_root_cache(cachep);
3886 limit = root->limit;
3887 shared = root->shared;
3888 batchcount = root->batchcount;
3889 }
1da177e4 3890
943a451a
GC
3891 if (limit && shared && batchcount)
3892 goto skip_setup;
a737b3e2
AM
3893 /*
3894 * The head array serves three purposes:
1da177e4
LT
3895 * - create a LIFO ordering, i.e. return objects that are cache-warm
3896 * - reduce the number of spinlock operations.
a737b3e2 3897 * - reduce the number of linked list operations on the slab and
1da177e4
LT
3898 * bufctl chains: array operations are cheaper.
3899 * The numbers are guessed, we should auto-tune as described by
3900 * Bonwick.
3901 */
3b0efdfa 3902 if (cachep->size > 131072)
1da177e4 3903 limit = 1;
3b0efdfa 3904 else if (cachep->size > PAGE_SIZE)
1da177e4 3905 limit = 8;
3b0efdfa 3906 else if (cachep->size > 1024)
1da177e4 3907 limit = 24;
3b0efdfa 3908 else if (cachep->size > 256)
1da177e4
LT
3909 limit = 54;
3910 else
3911 limit = 120;
3912
a737b3e2
AM
3913 /*
3914 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
1da177e4
LT
3915 * allocation behaviour: Most allocs on one cpu, most free operations
3916 * on another cpu. For these cases, an efficient object passing between
3917 * cpus is necessary. This is provided by a shared array. The array
3918 * replaces Bonwick's magazine layer.
3919 * On uniprocessor, it's functionally equivalent (but less efficient)
3920 * to a larger limit. Thus disabled by default.
3921 */
3922 shared = 0;
3b0efdfa 3923 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
1da177e4 3924 shared = 8;
1da177e4
LT
3925
3926#if DEBUG
a737b3e2
AM
3927 /*
3928 * With debugging enabled, large batchcount lead to excessively long
3929 * periods with disabled local interrupts. Limit the batchcount
1da177e4
LT
3930 */
3931 if (limit > 32)
3932 limit = 32;
3933#endif
943a451a
GC
3934 batchcount = (limit + 1) / 2;
3935skip_setup:
3936 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
1da177e4
LT
3937 if (err)
3938 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
b28a02de 3939 cachep->name, -err);
2ed3a4ef 3940 return err;
1da177e4
LT
3941}
3942
1b55253a 3943/*
ce8eb6c4
CL
3944 * Drain an array if it contains any elements taking the node lock only if
3945 * necessary. Note that the node listlock also protects the array_cache
b18e7e65 3946 * if drain_array() is used on the shared array.
1b55253a 3947 */
ce8eb6c4 3948static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
1b55253a 3949 struct array_cache *ac, int force, int node)
1da177e4
LT
3950{
3951 int tofree;
3952
1b55253a
CL
3953 if (!ac || !ac->avail)
3954 return;
1da177e4
LT
3955 if (ac->touched && !force) {
3956 ac->touched = 0;
b18e7e65 3957 } else {
ce8eb6c4 3958 spin_lock_irq(&n->list_lock);
b18e7e65
CL
3959 if (ac->avail) {
3960 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3961 if (tofree > ac->avail)
3962 tofree = (ac->avail + 1) / 2;
3963 free_block(cachep, ac->entry, tofree, node);
3964 ac->avail -= tofree;
3965 memmove(ac->entry, &(ac->entry[tofree]),
3966 sizeof(void *) * ac->avail);
3967 }
ce8eb6c4 3968 spin_unlock_irq(&n->list_lock);
1da177e4
LT
3969 }
3970}
3971
3972/**
3973 * cache_reap - Reclaim memory from caches.
05fb6bf0 3974 * @w: work descriptor
1da177e4
LT
3975 *
3976 * Called from workqueue/eventd every few seconds.
3977 * Purpose:
3978 * - clear the per-cpu caches for this CPU.
3979 * - return freeable pages to the main free memory pool.
3980 *
a737b3e2
AM
3981 * If we cannot acquire the cache chain mutex then just give up - we'll try
3982 * again on the next iteration.
1da177e4 3983 */
7c5cae36 3984static void cache_reap(struct work_struct *w)
1da177e4 3985{
7a7c381d 3986 struct kmem_cache *searchp;
ce8eb6c4 3987 struct kmem_cache_node *n;
7d6e6d09 3988 int node = numa_mem_id();
bf6aede7 3989 struct delayed_work *work = to_delayed_work(w);
1da177e4 3990
18004c5d 3991 if (!mutex_trylock(&slab_mutex))
1da177e4 3992 /* Give up. Setup the next iteration. */
7c5cae36 3993 goto out;
1da177e4 3994
18004c5d 3995 list_for_each_entry(searchp, &slab_caches, list) {
1da177e4
LT
3996 check_irq_on();
3997
35386e3b 3998 /*
ce8eb6c4 3999 * We only take the node lock if absolutely necessary and we
35386e3b
CL
4000 * have established with reasonable certainty that
4001 * we can do some work if the lock was obtained.
4002 */
ce8eb6c4 4003 n = searchp->node[node];
35386e3b 4004
ce8eb6c4 4005 reap_alien(searchp, n);
1da177e4 4006
ce8eb6c4 4007 drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
1da177e4 4008
35386e3b
CL
4009 /*
4010 * These are racy checks but it does not matter
4011 * if we skip one check or scan twice.
4012 */
ce8eb6c4 4013 if (time_after(n->next_reap, jiffies))
35386e3b 4014 goto next;
1da177e4 4015
5f0985bb 4016 n->next_reap = jiffies + REAPTIMEOUT_NODE;
1da177e4 4017
ce8eb6c4 4018 drain_array(searchp, n, n->shared, 0, node);
1da177e4 4019
ce8eb6c4
CL
4020 if (n->free_touched)
4021 n->free_touched = 0;
ed11d9eb
CL
4022 else {
4023 int freed;
1da177e4 4024
ce8eb6c4 4025 freed = drain_freelist(searchp, n, (n->free_limit +
ed11d9eb
CL
4026 5 * searchp->num - 1) / (5 * searchp->num));
4027 STATS_ADD_REAPED(searchp, freed);
4028 }
35386e3b 4029next:
1da177e4
LT
4030 cond_resched();
4031 }
4032 check_irq_on();
18004c5d 4033 mutex_unlock(&slab_mutex);
8fce4d8e 4034 next_reap_node();
7c5cae36 4035out:
a737b3e2 4036 /* Set up the next iteration */
5f0985bb 4037 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
1da177e4
LT
4038}
4039
158a9624 4040#ifdef CONFIG_SLABINFO
0d7561c6 4041void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
1da177e4 4042{
8456a648 4043 struct page *page;
b28a02de
PE
4044 unsigned long active_objs;
4045 unsigned long num_objs;
4046 unsigned long active_slabs = 0;
4047 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
e498be7d 4048 const char *name;
1da177e4 4049 char *error = NULL;
e498be7d 4050 int node;
ce8eb6c4 4051 struct kmem_cache_node *n;
1da177e4 4052
1da177e4
LT
4053 active_objs = 0;
4054 num_slabs = 0;
e498be7d 4055 for_each_online_node(node) {
ce8eb6c4
CL
4056 n = cachep->node[node];
4057 if (!n)
e498be7d
CL
4058 continue;
4059
ca3b9b91 4060 check_irq_on();
ce8eb6c4 4061 spin_lock_irq(&n->list_lock);
e498be7d 4062
8456a648
JK
4063 list_for_each_entry(page, &n->slabs_full, lru) {
4064 if (page->active != cachep->num && !error)
e498be7d
CL
4065 error = "slabs_full accounting error";
4066 active_objs += cachep->num;
4067 active_slabs++;
4068 }
8456a648
JK
4069 list_for_each_entry(page, &n->slabs_partial, lru) {
4070 if (page->active == cachep->num && !error)
106a74e1 4071 error = "slabs_partial accounting error";
8456a648 4072 if (!page->active && !error)
106a74e1 4073 error = "slabs_partial accounting error";
8456a648 4074 active_objs += page->active;
e498be7d
CL
4075 active_slabs++;
4076 }
8456a648
JK
4077 list_for_each_entry(page, &n->slabs_free, lru) {
4078 if (page->active && !error)
106a74e1 4079 error = "slabs_free accounting error";
e498be7d
CL
4080 num_slabs++;
4081 }
ce8eb6c4
CL
4082 free_objects += n->free_objects;
4083 if (n->shared)
4084 shared_avail += n->shared->avail;
e498be7d 4085
ce8eb6c4 4086 spin_unlock_irq(&n->list_lock);
1da177e4 4087 }
b28a02de
PE
4088 num_slabs += active_slabs;
4089 num_objs = num_slabs * cachep->num;
e498be7d 4090 if (num_objs - active_objs != free_objects && !error)
1da177e4
LT
4091 error = "free_objects accounting error";
4092
b28a02de 4093 name = cachep->name;
1da177e4
LT
4094 if (error)
4095 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4096
0d7561c6
GC
4097 sinfo->active_objs = active_objs;
4098 sinfo->num_objs = num_objs;
4099 sinfo->active_slabs = active_slabs;
4100 sinfo->num_slabs = num_slabs;
4101 sinfo->shared_avail = shared_avail;
4102 sinfo->limit = cachep->limit;
4103 sinfo->batchcount = cachep->batchcount;
4104 sinfo->shared = cachep->shared;
4105 sinfo->objects_per_slab = cachep->num;
4106 sinfo->cache_order = cachep->gfporder;
4107}
4108
4109void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4110{
1da177e4 4111#if STATS
ce8eb6c4 4112 { /* node stats */
1da177e4
LT
4113 unsigned long high = cachep->high_mark;
4114 unsigned long allocs = cachep->num_allocations;
4115 unsigned long grown = cachep->grown;
4116 unsigned long reaped = cachep->reaped;
4117 unsigned long errors = cachep->errors;
4118 unsigned long max_freeable = cachep->max_freeable;
1da177e4 4119 unsigned long node_allocs = cachep->node_allocs;
e498be7d 4120 unsigned long node_frees = cachep->node_frees;
fb7faf33 4121 unsigned long overflows = cachep->node_overflow;
1da177e4 4122
e92dd4fd
JP
4123 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4124 "%4lu %4lu %4lu %4lu %4lu",
4125 allocs, high, grown,
4126 reaped, errors, max_freeable, node_allocs,
4127 node_frees, overflows);
1da177e4
LT
4128 }
4129 /* cpu stats */
4130 {
4131 unsigned long allochit = atomic_read(&cachep->allochit);
4132 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4133 unsigned long freehit = atomic_read(&cachep->freehit);
4134 unsigned long freemiss = atomic_read(&cachep->freemiss);
4135
4136 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
b28a02de 4137 allochit, allocmiss, freehit, freemiss);
1da177e4
LT
4138 }
4139#endif
1da177e4
LT
4140}
4141
1da177e4
LT
4142#define MAX_SLABINFO_WRITE 128
4143/**
4144 * slabinfo_write - Tuning for the slab allocator
4145 * @file: unused
4146 * @buffer: user buffer
4147 * @count: data length
4148 * @ppos: unused
4149 */
b7454ad3 4150ssize_t slabinfo_write(struct file *file, const char __user *buffer,
b28a02de 4151 size_t count, loff_t *ppos)
1da177e4 4152{
b28a02de 4153 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
1da177e4 4154 int limit, batchcount, shared, res;
7a7c381d 4155 struct kmem_cache *cachep;
b28a02de 4156
1da177e4
LT
4157 if (count > MAX_SLABINFO_WRITE)
4158 return -EINVAL;
4159 if (copy_from_user(&kbuf, buffer, count))
4160 return -EFAULT;
b28a02de 4161 kbuf[MAX_SLABINFO_WRITE] = '\0';
1da177e4
LT
4162
4163 tmp = strchr(kbuf, ' ');
4164 if (!tmp)
4165 return -EINVAL;
4166 *tmp = '\0';
4167 tmp++;
4168 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4169 return -EINVAL;
4170
4171 /* Find the cache in the chain of caches. */
18004c5d 4172 mutex_lock(&slab_mutex);
1da177e4 4173 res = -EINVAL;
18004c5d 4174 list_for_each_entry(cachep, &slab_caches, list) {
1da177e4 4175 if (!strcmp(cachep->name, kbuf)) {
a737b3e2
AM
4176 if (limit < 1 || batchcount < 1 ||
4177 batchcount > limit || shared < 0) {
e498be7d 4178 res = 0;
1da177e4 4179 } else {
e498be7d 4180 res = do_tune_cpucache(cachep, limit,
83b519e8
PE
4181 batchcount, shared,
4182 GFP_KERNEL);
1da177e4
LT
4183 }
4184 break;
4185 }
4186 }
18004c5d 4187 mutex_unlock(&slab_mutex);
1da177e4
LT
4188 if (res >= 0)
4189 res = count;
4190 return res;
4191}
871751e2
AV
4192
4193#ifdef CONFIG_DEBUG_SLAB_LEAK
4194
4195static void *leaks_start(struct seq_file *m, loff_t *pos)
4196{
18004c5d
CL
4197 mutex_lock(&slab_mutex);
4198 return seq_list_start(&slab_caches, *pos);
871751e2
AV
4199}
4200
4201static inline int add_caller(unsigned long *n, unsigned long v)
4202{
4203 unsigned long *p;
4204 int l;
4205 if (!v)
4206 return 1;
4207 l = n[1];
4208 p = n + 2;
4209 while (l) {
4210 int i = l/2;
4211 unsigned long *q = p + 2 * i;
4212 if (*q == v) {
4213 q[1]++;
4214 return 1;
4215 }
4216 if (*q > v) {
4217 l = i;
4218 } else {
4219 p = q + 2;
4220 l -= i + 1;
4221 }
4222 }
4223 if (++n[1] == n[0])
4224 return 0;
4225 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4226 p[0] = v;
4227 p[1] = 1;
4228 return 1;
4229}
4230
8456a648
JK
4231static void handle_slab(unsigned long *n, struct kmem_cache *c,
4232 struct page *page)
871751e2
AV
4233{
4234 void *p;
b1cb0982
JK
4235 int i, j;
4236
871751e2
AV
4237 if (n[0] == n[1])
4238 return;
8456a648 4239 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
b1cb0982
JK
4240 bool active = true;
4241
8456a648 4242 for (j = page->active; j < c->num; j++) {
b1cb0982 4243 /* Skip freed item */
e5c58dfd 4244 if (get_free_obj(page, j) == i) {
b1cb0982
JK
4245 active = false;
4246 break;
4247 }
4248 }
4249 if (!active)
871751e2 4250 continue;
b1cb0982 4251
871751e2
AV
4252 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4253 return;
4254 }
4255}
4256
4257static void show_symbol(struct seq_file *m, unsigned long address)
4258{
4259#ifdef CONFIG_KALLSYMS
871751e2 4260 unsigned long offset, size;
9281acea 4261 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
871751e2 4262
a5c43dae 4263 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
871751e2 4264 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
a5c43dae 4265 if (modname[0])
871751e2
AV
4266 seq_printf(m, " [%s]", modname);
4267 return;
4268 }
4269#endif
4270 seq_printf(m, "%p", (void *)address);
4271}
4272
4273static int leaks_show(struct seq_file *m, void *p)
4274{
0672aa7c 4275 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
8456a648 4276 struct page *page;
ce8eb6c4 4277 struct kmem_cache_node *n;
871751e2 4278 const char *name;
db845067 4279 unsigned long *x = m->private;
871751e2
AV
4280 int node;
4281 int i;
4282
4283 if (!(cachep->flags & SLAB_STORE_USER))
4284 return 0;
4285 if (!(cachep->flags & SLAB_RED_ZONE))
4286 return 0;
4287
4288 /* OK, we can do it */
4289
db845067 4290 x[1] = 0;
871751e2
AV
4291
4292 for_each_online_node(node) {
ce8eb6c4
CL
4293 n = cachep->node[node];
4294 if (!n)
871751e2
AV
4295 continue;
4296
4297 check_irq_on();
ce8eb6c4 4298 spin_lock_irq(&n->list_lock);
871751e2 4299
8456a648
JK
4300 list_for_each_entry(page, &n->slabs_full, lru)
4301 handle_slab(x, cachep, page);
4302 list_for_each_entry(page, &n->slabs_partial, lru)
4303 handle_slab(x, cachep, page);
ce8eb6c4 4304 spin_unlock_irq(&n->list_lock);
871751e2
AV
4305 }
4306 name = cachep->name;
db845067 4307 if (x[0] == x[1]) {
871751e2 4308 /* Increase the buffer size */
18004c5d 4309 mutex_unlock(&slab_mutex);
db845067 4310 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
871751e2
AV
4311 if (!m->private) {
4312 /* Too bad, we are really out */
db845067 4313 m->private = x;
18004c5d 4314 mutex_lock(&slab_mutex);
871751e2
AV
4315 return -ENOMEM;
4316 }
db845067
CL
4317 *(unsigned long *)m->private = x[0] * 2;
4318 kfree(x);
18004c5d 4319 mutex_lock(&slab_mutex);
871751e2
AV
4320 /* Now make sure this entry will be retried */
4321 m->count = m->size;
4322 return 0;
4323 }
db845067
CL
4324 for (i = 0; i < x[1]; i++) {
4325 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4326 show_symbol(m, x[2*i+2]);
871751e2
AV
4327 seq_putc(m, '\n');
4328 }
d2e7b7d0 4329
871751e2
AV
4330 return 0;
4331}
4332
a0ec95a8 4333static const struct seq_operations slabstats_op = {
871751e2 4334 .start = leaks_start,
276a2439
WL
4335 .next = slab_next,
4336 .stop = slab_stop,
871751e2
AV
4337 .show = leaks_show,
4338};
a0ec95a8
AD
4339
4340static int slabstats_open(struct inode *inode, struct file *file)
4341{
4342 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4343 int ret = -ENOMEM;
4344 if (n) {
4345 ret = seq_open(file, &slabstats_op);
4346 if (!ret) {
4347 struct seq_file *m = file->private_data;
4348 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4349 m->private = n;
4350 n = NULL;
4351 }
4352 kfree(n);
4353 }
4354 return ret;
4355}
4356
4357static const struct file_operations proc_slabstats_operations = {
4358 .open = slabstats_open,
4359 .read = seq_read,
4360 .llseek = seq_lseek,
4361 .release = seq_release_private,
4362};
4363#endif
4364
4365static int __init slab_proc_init(void)
4366{
4367#ifdef CONFIG_DEBUG_SLAB_LEAK
4368 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
871751e2 4369#endif
a0ec95a8
AD
4370 return 0;
4371}
4372module_init(slab_proc_init);
1da177e4
LT
4373#endif
4374
00e145b6
MS
4375/**
4376 * ksize - get the actual amount of memory allocated for a given object
4377 * @objp: Pointer to the object
4378 *
4379 * kmalloc may internally round up allocations and return more memory
4380 * than requested. ksize() can be used to determine the actual amount of
4381 * memory allocated. The caller may use this additional memory, even though
4382 * a smaller amount of memory was initially specified with the kmalloc call.
4383 * The caller must guarantee that objp points to a valid object previously
4384 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4385 * must not be freed during the duration of the call.
4386 */
fd76bab2 4387size_t ksize(const void *objp)
1da177e4 4388{
ef8b4520
CL
4389 BUG_ON(!objp);
4390 if (unlikely(objp == ZERO_SIZE_PTR))
00e145b6 4391 return 0;
1da177e4 4392
8c138bc0 4393 return virt_to_cache(objp)->object_size;
1da177e4 4394}
b1aabecd 4395EXPORT_SYMBOL(ksize);