]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/slub.c
slab: annotate on-slab caches nodelist locks
[mirror_ubuntu-bionic-kernel.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
881db7fb
CL
5 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
881db7fb 9 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
10 */
11
12#include <linux/mm.h>
1eb5ac64 13#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
14#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
97d06609 19#include "slab.h"
7b3c3a50 20#include <linux/proc_fs.h>
81819f0f 21#include <linux/seq_file.h>
5a896d9e 22#include <linux/kmemcheck.h>
81819f0f
CL
23#include <linux/cpu.h>
24#include <linux/cpuset.h>
25#include <linux/mempolicy.h>
26#include <linux/ctype.h>
3ac7fe5a 27#include <linux/debugobjects.h>
81819f0f 28#include <linux/kallsyms.h>
b9049e23 29#include <linux/memory.h>
f8bd2258 30#include <linux/math64.h>
773ff60e 31#include <linux/fault-inject.h>
bfa71457 32#include <linux/stacktrace.h>
4de900b4 33#include <linux/prefetch.h>
81819f0f 34
4a92379b
RK
35#include <trace/events/kmem.h>
36
072bb0aa
MG
37#include "internal.h"
38
81819f0f
CL
39/*
40 * Lock order:
18004c5d 41 * 1. slab_mutex (Global Mutex)
881db7fb
CL
42 * 2. node->list_lock
43 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 44 *
18004c5d 45 * slab_mutex
881db7fb 46 *
18004c5d 47 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
48 * and to synchronize major metadata changes to slab cache structures.
49 *
50 * The slab_lock is only used for debugging and on arches that do not
51 * have the ability to do a cmpxchg_double. It only protects the second
52 * double word in the page struct. Meaning
53 * A. page->freelist -> List of object free in a page
54 * B. page->counters -> Counters of objects
55 * C. page->frozen -> frozen state
56 *
57 * If a slab is frozen then it is exempt from list management. It is not
58 * on any list. The processor that froze the slab is the one who can
59 * perform list operations on the page. Other processors may put objects
60 * onto the freelist but the processor that froze the slab is the only
61 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
62 *
63 * The list_lock protects the partial and full list on each node and
64 * the partial slab counter. If taken then no new slabs may be added or
65 * removed from the lists nor make the number of partial slabs be modified.
66 * (Note that the total number of slabs is an atomic value that may be
67 * modified without taking the list lock).
68 *
69 * The list_lock is a centralized lock and thus we avoid taking it as
70 * much as possible. As long as SLUB does not have to handle partial
71 * slabs, operations can continue without any centralized lock. F.e.
72 * allocating a long series of objects that fill up slabs does not require
73 * the list lock.
81819f0f
CL
74 * Interrupts are disabled during allocation and deallocation in order to
75 * make the slab allocator safe to use in the context of an irq. In addition
76 * interrupts are disabled to ensure that the processor does not change
77 * while handling per_cpu slabs, due to kernel preemption.
78 *
79 * SLUB assigns one slab for allocation to each processor.
80 * Allocations only occur from these slabs called cpu slabs.
81 *
672bba3a
CL
82 * Slabs with free elements are kept on a partial list and during regular
83 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 84 * freed then the slab will show up again on the partial lists.
672bba3a
CL
85 * We track full slabs for debugging purposes though because otherwise we
86 * cannot scan all objects.
81819f0f
CL
87 *
88 * Slabs are freed when they become empty. Teardown and setup is
89 * minimal so we rely on the page allocators per cpu caches for
90 * fast frees and allocs.
91 *
92 * Overloading of page flags that are otherwise used for LRU management.
93 *
4b6f0750
CL
94 * PageActive The slab is frozen and exempt from list processing.
95 * This means that the slab is dedicated to a purpose
96 * such as satisfying allocations for a specific
97 * processor. Objects may be freed in the slab while
98 * it is frozen but slab_free will then skip the usual
99 * list operations. It is up to the processor holding
100 * the slab to integrate the slab into the slab lists
101 * when the slab is no longer needed.
102 *
103 * One use of this flag is to mark slabs that are
104 * used for allocations. Then such a slab becomes a cpu
105 * slab. The cpu slab may be equipped with an additional
dfb4f096 106 * freelist that allows lockless access to
894b8788
CL
107 * free objects in addition to the regular freelist
108 * that requires the slab lock.
81819f0f
CL
109 *
110 * PageError Slab requires special handling due to debug
111 * options set. This moves slab handling out of
894b8788 112 * the fast path and disables lockless freelists.
81819f0f
CL
113 */
114
af537b0a
CL
115static inline int kmem_cache_debug(struct kmem_cache *s)
116{
5577bd8a 117#ifdef CONFIG_SLUB_DEBUG
af537b0a 118 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 119#else
af537b0a 120 return 0;
5577bd8a 121#endif
af537b0a 122}
5577bd8a 123
81819f0f
CL
124/*
125 * Issues still to be resolved:
126 *
81819f0f
CL
127 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
128 *
81819f0f
CL
129 * - Variable sizing of the per node arrays
130 */
131
132/* Enable to test recovery from slab corruption on boot */
133#undef SLUB_RESILIENCY_TEST
134
b789ef51
CL
135/* Enable to log cmpxchg failures */
136#undef SLUB_DEBUG_CMPXCHG
137
2086d26a
CL
138/*
139 * Mininum number of partial slabs. These will be left on the partial
140 * lists even if they are empty. kmem_cache_shrink may reclaim them.
141 */
76be8950 142#define MIN_PARTIAL 5
e95eed57 143
2086d26a
CL
144/*
145 * Maximum number of desirable partial slabs.
146 * The existence of more partial slabs makes kmem_cache_shrink
147 * sort the partial list by the number of objects in the.
148 */
149#define MAX_PARTIAL 10
150
81819f0f
CL
151#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
152 SLAB_POISON | SLAB_STORE_USER)
672bba3a 153
fa5ec8a1 154/*
3de47213
DR
155 * Debugging flags that require metadata to be stored in the slab. These get
156 * disabled when slub_debug=O is used and a cache's min order increases with
157 * metadata.
fa5ec8a1 158 */
3de47213 159#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 160
81819f0f
CL
161/*
162 * Set of flags that will prevent slab merging
163 */
164#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b
DM
165 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
166 SLAB_FAILSLAB)
81819f0f
CL
167
168#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e 169 SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0f 170
210b5c06
CG
171#define OO_SHIFT 16
172#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 173#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 174
81819f0f 175/* Internal SLUB flags */
f90ec390 176#define __OBJECT_POISON 0x80000000UL /* Poison object */
b789ef51 177#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
81819f0f 178
81819f0f
CL
179#ifdef CONFIG_SMP
180static struct notifier_block slab_notifier;
181#endif
182
02cbc874
CL
183/*
184 * Tracking user of a slab.
185 */
d6543e39 186#define TRACK_ADDRS_COUNT 16
02cbc874 187struct track {
ce71e27c 188 unsigned long addr; /* Called from address */
d6543e39
BG
189#ifdef CONFIG_STACKTRACE
190 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
191#endif
02cbc874
CL
192 int cpu; /* Was running on cpu */
193 int pid; /* Pid context */
194 unsigned long when; /* When did the operation occur */
195};
196
197enum track_item { TRACK_ALLOC, TRACK_FREE };
198
ab4d5ed5 199#ifdef CONFIG_SYSFS
81819f0f
CL
200static int sysfs_slab_add(struct kmem_cache *);
201static int sysfs_slab_alias(struct kmem_cache *, const char *);
202static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc 203
81819f0f 204#else
0c710013
CL
205static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
206static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
207 { return 0; }
db265eca 208static inline void sysfs_slab_remove(struct kmem_cache *s) { }
8ff12cfc 209
81819f0f
CL
210#endif
211
4fdccdfb 212static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
213{
214#ifdef CONFIG_SLUB_STATS
84e554e6 215 __this_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
216#endif
217}
218
81819f0f
CL
219/********************************************************************
220 * Core slab cache functions
221 *******************************************************************/
222
81819f0f
CL
223static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
224{
81819f0f 225 return s->node[node];
81819f0f
CL
226}
227
6446faa2 228/* Verify that a pointer has an address that is valid within a slab page */
02cbc874
CL
229static inline int check_valid_pointer(struct kmem_cache *s,
230 struct page *page, const void *object)
231{
232 void *base;
233
a973e9dd 234 if (!object)
02cbc874
CL
235 return 1;
236
a973e9dd 237 base = page_address(page);
39b26464 238 if (object < base || object >= base + page->objects * s->size ||
02cbc874
CL
239 (object - base) % s->size) {
240 return 0;
241 }
242
243 return 1;
244}
245
7656c72b
CL
246static inline void *get_freepointer(struct kmem_cache *s, void *object)
247{
248 return *(void **)(object + s->offset);
249}
250
0ad9500e
ED
251static void prefetch_freepointer(const struct kmem_cache *s, void *object)
252{
253 prefetch(object + s->offset);
254}
255
1393d9a1
CL
256static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
257{
258 void *p;
259
260#ifdef CONFIG_DEBUG_PAGEALLOC
261 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
262#else
263 p = get_freepointer(s, object);
264#endif
265 return p;
266}
267
7656c72b
CL
268static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
269{
270 *(void **)(object + s->offset) = fp;
271}
272
273/* Loop over all objects in a slab */
224a88be
CL
274#define for_each_object(__p, __s, __addr, __objects) \
275 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b
CL
276 __p += (__s)->size)
277
7656c72b
CL
278/* Determine object index from a given position */
279static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
280{
281 return (p - addr) / s->size;
282}
283
d71f606f
MK
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
3b0efdfa 292 return s->object_size;
d71f606f
MK
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
ab9a0f19
LJ
308static inline int order_objects(int order, unsigned long size, int reserved)
309{
310 return ((PAGE_SIZE << order) - reserved) / size;
311}
312
834f3d11 313static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f19 314 unsigned long size, int reserved)
834f3d11
CL
315{
316 struct kmem_cache_order_objects x = {
ab9a0f19 317 (order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d11
CL
318 };
319
320 return x;
321}
322
323static inline int oo_order(struct kmem_cache_order_objects x)
324{
210b5c06 325 return x.x >> OO_SHIFT;
834f3d11
CL
326}
327
328static inline int oo_objects(struct kmem_cache_order_objects x)
329{
210b5c06 330 return x.x & OO_MASK;
834f3d11
CL
331}
332
881db7fb
CL
333/*
334 * Per slab locking using the pagelock
335 */
336static __always_inline void slab_lock(struct page *page)
337{
338 bit_spin_lock(PG_locked, &page->flags);
339}
340
341static __always_inline void slab_unlock(struct page *page)
342{
343 __bit_spin_unlock(PG_locked, &page->flags);
344}
345
1d07171c
CL
346/* Interrupts must be disabled (for the fallback code to work right) */
347static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
348 void *freelist_old, unsigned long counters_old,
349 void *freelist_new, unsigned long counters_new,
350 const char *n)
351{
352 VM_BUG_ON(!irqs_disabled());
2565409f
HC
353#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
354 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 355 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 356 if (cmpxchg_double(&page->freelist, &page->counters,
1d07171c
CL
357 freelist_old, counters_old,
358 freelist_new, counters_new))
359 return 1;
360 } else
361#endif
362 {
363 slab_lock(page);
364 if (page->freelist == freelist_old && page->counters == counters_old) {
365 page->freelist = freelist_new;
366 page->counters = counters_new;
367 slab_unlock(page);
368 return 1;
369 }
370 slab_unlock(page);
371 }
372
373 cpu_relax();
374 stat(s, CMPXCHG_DOUBLE_FAIL);
375
376#ifdef SLUB_DEBUG_CMPXCHG
377 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
378#endif
379
380 return 0;
381}
382
b789ef51
CL
383static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
384 void *freelist_old, unsigned long counters_old,
385 void *freelist_new, unsigned long counters_new,
386 const char *n)
387{
2565409f
HC
388#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
389 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 390 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 391 if (cmpxchg_double(&page->freelist, &page->counters,
b789ef51
CL
392 freelist_old, counters_old,
393 freelist_new, counters_new))
394 return 1;
395 } else
396#endif
397 {
1d07171c
CL
398 unsigned long flags;
399
400 local_irq_save(flags);
881db7fb 401 slab_lock(page);
b789ef51
CL
402 if (page->freelist == freelist_old && page->counters == counters_old) {
403 page->freelist = freelist_new;
404 page->counters = counters_new;
881db7fb 405 slab_unlock(page);
1d07171c 406 local_irq_restore(flags);
b789ef51
CL
407 return 1;
408 }
881db7fb 409 slab_unlock(page);
1d07171c 410 local_irq_restore(flags);
b789ef51
CL
411 }
412
413 cpu_relax();
414 stat(s, CMPXCHG_DOUBLE_FAIL);
415
416#ifdef SLUB_DEBUG_CMPXCHG
417 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
418#endif
419
420 return 0;
421}
422
41ecc55b 423#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
424/*
425 * Determine a map of object in use on a page.
426 *
881db7fb 427 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
428 * not vanish from under us.
429 */
430static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
431{
432 void *p;
433 void *addr = page_address(page);
434
435 for (p = page->freelist; p; p = get_freepointer(s, p))
436 set_bit(slab_index(p, s, addr), map);
437}
438
41ecc55b
CL
439/*
440 * Debug settings:
441 */
f0630fff
CL
442#ifdef CONFIG_SLUB_DEBUG_ON
443static int slub_debug = DEBUG_DEFAULT_FLAGS;
444#else
41ecc55b 445static int slub_debug;
f0630fff 446#endif
41ecc55b
CL
447
448static char *slub_debug_slabs;
fa5ec8a1 449static int disable_higher_order_debug;
41ecc55b 450
81819f0f
CL
451/*
452 * Object debugging
453 */
454static void print_section(char *text, u8 *addr, unsigned int length)
455{
ffc79d28
SAS
456 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
457 length, 1);
81819f0f
CL
458}
459
81819f0f
CL
460static struct track *get_track(struct kmem_cache *s, void *object,
461 enum track_item alloc)
462{
463 struct track *p;
464
465 if (s->offset)
466 p = object + s->offset + sizeof(void *);
467 else
468 p = object + s->inuse;
469
470 return p + alloc;
471}
472
473static void set_track(struct kmem_cache *s, void *object,
ce71e27c 474 enum track_item alloc, unsigned long addr)
81819f0f 475{
1a00df4a 476 struct track *p = get_track(s, object, alloc);
81819f0f 477
81819f0f 478 if (addr) {
d6543e39
BG
479#ifdef CONFIG_STACKTRACE
480 struct stack_trace trace;
481 int i;
482
483 trace.nr_entries = 0;
484 trace.max_entries = TRACK_ADDRS_COUNT;
485 trace.entries = p->addrs;
486 trace.skip = 3;
487 save_stack_trace(&trace);
488
489 /* See rant in lockdep.c */
490 if (trace.nr_entries != 0 &&
491 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
492 trace.nr_entries--;
493
494 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
495 p->addrs[i] = 0;
496#endif
81819f0f
CL
497 p->addr = addr;
498 p->cpu = smp_processor_id();
88e4ccf2 499 p->pid = current->pid;
81819f0f
CL
500 p->when = jiffies;
501 } else
502 memset(p, 0, sizeof(struct track));
503}
504
81819f0f
CL
505static void init_tracking(struct kmem_cache *s, void *object)
506{
24922684
CL
507 if (!(s->flags & SLAB_STORE_USER))
508 return;
509
ce71e27c
EGM
510 set_track(s, object, TRACK_FREE, 0UL);
511 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
512}
513
514static void print_track(const char *s, struct track *t)
515{
516 if (!t->addr)
517 return;
518
7daf705f 519 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
ce71e27c 520 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e39
BG
521#ifdef CONFIG_STACKTRACE
522 {
523 int i;
524 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
525 if (t->addrs[i])
526 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
527 else
528 break;
529 }
530#endif
24922684
CL
531}
532
533static void print_tracking(struct kmem_cache *s, void *object)
534{
535 if (!(s->flags & SLAB_STORE_USER))
536 return;
537
538 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
539 print_track("Freed", get_track(s, object, TRACK_FREE));
540}
541
542static void print_page_info(struct page *page)
543{
39b26464
CL
544 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
545 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
546
547}
548
549static void slab_bug(struct kmem_cache *s, char *fmt, ...)
550{
551 va_list args;
552 char buf[100];
553
554 va_start(args, fmt);
555 vsnprintf(buf, sizeof(buf), fmt, args);
556 va_end(args);
557 printk(KERN_ERR "========================================"
558 "=====================================\n");
265d47e7 559 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
24922684
CL
560 printk(KERN_ERR "----------------------------------------"
561 "-------------------------------------\n\n");
645df230
DJ
562
563 add_taint(TAINT_BAD_PAGE);
81819f0f
CL
564}
565
24922684
CL
566static void slab_fix(struct kmem_cache *s, char *fmt, ...)
567{
568 va_list args;
569 char buf[100];
570
571 va_start(args, fmt);
572 vsnprintf(buf, sizeof(buf), fmt, args);
573 va_end(args);
574 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
575}
576
577static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
578{
579 unsigned int off; /* Offset of last byte */
a973e9dd 580 u8 *addr = page_address(page);
24922684
CL
581
582 print_tracking(s, p);
583
584 print_page_info(page);
585
586 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
587 p, p - addr, get_freepointer(s, p));
588
589 if (p > addr + 16)
ffc79d28 590 print_section("Bytes b4 ", p - 16, 16);
81819f0f 591
3b0efdfa 592 print_section("Object ", p, min_t(unsigned long, s->object_size,
ffc79d28 593 PAGE_SIZE));
81819f0f 594 if (s->flags & SLAB_RED_ZONE)
3b0efdfa
CL
595 print_section("Redzone ", p + s->object_size,
596 s->inuse - s->object_size);
81819f0f 597
81819f0f
CL
598 if (s->offset)
599 off = s->offset + sizeof(void *);
600 else
601 off = s->inuse;
602
24922684 603 if (s->flags & SLAB_STORE_USER)
81819f0f 604 off += 2 * sizeof(struct track);
81819f0f
CL
605
606 if (off != s->size)
607 /* Beginning of the filler is the free pointer */
ffc79d28 608 print_section("Padding ", p + off, s->size - off);
24922684
CL
609
610 dump_stack();
81819f0f
CL
611}
612
613static void object_err(struct kmem_cache *s, struct page *page,
614 u8 *object, char *reason)
615{
3dc50637 616 slab_bug(s, "%s", reason);
24922684 617 print_trailer(s, page, object);
81819f0f
CL
618}
619
945cf2b6 620static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
81819f0f
CL
621{
622 va_list args;
623 char buf[100];
624
24922684
CL
625 va_start(args, fmt);
626 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 627 va_end(args);
3dc50637 628 slab_bug(s, "%s", buf);
24922684 629 print_page_info(page);
81819f0f
CL
630 dump_stack();
631}
632
f7cb1933 633static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
634{
635 u8 *p = object;
636
637 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
638 memset(p, POISON_FREE, s->object_size - 1);
639 p[s->object_size - 1] = POISON_END;
81819f0f
CL
640 }
641
642 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 643 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
644}
645
24922684
CL
646static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
647 void *from, void *to)
648{
649 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
650 memset(from, data, to - from);
651}
652
653static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
654 u8 *object, char *what,
06428780 655 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
656{
657 u8 *fault;
658 u8 *end;
659
79824820 660 fault = memchr_inv(start, value, bytes);
24922684
CL
661 if (!fault)
662 return 1;
663
664 end = start + bytes;
665 while (end > fault && end[-1] == value)
666 end--;
667
668 slab_bug(s, "%s overwritten", what);
669 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
670 fault, end - 1, fault[0], value);
671 print_trailer(s, page, object);
672
673 restore_bytes(s, what, value, fault, end);
674 return 0;
81819f0f
CL
675}
676
81819f0f
CL
677/*
678 * Object layout:
679 *
680 * object address
681 * Bytes of the object to be managed.
682 * If the freepointer may overlay the object then the free
683 * pointer is the first word of the object.
672bba3a 684 *
81819f0f
CL
685 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
686 * 0xa5 (POISON_END)
687 *
3b0efdfa 688 * object + s->object_size
81819f0f 689 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 690 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 691 * object_size == inuse.
672bba3a 692 *
81819f0f
CL
693 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
694 * 0xcc (RED_ACTIVE) for objects in use.
695 *
696 * object + s->inuse
672bba3a
CL
697 * Meta data starts here.
698 *
81819f0f
CL
699 * A. Free pointer (if we cannot overwrite object on free)
700 * B. Tracking data for SLAB_STORE_USER
672bba3a 701 * C. Padding to reach required alignment boundary or at mininum
6446faa2 702 * one word if debugging is on to be able to detect writes
672bba3a
CL
703 * before the word boundary.
704 *
705 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
706 *
707 * object + s->size
672bba3a 708 * Nothing is used beyond s->size.
81819f0f 709 *
3b0efdfa 710 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 711 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
712 * may be used with merged slabcaches.
713 */
714
81819f0f
CL
715static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
716{
717 unsigned long off = s->inuse; /* The end of info */
718
719 if (s->offset)
720 /* Freepointer is placed after the object. */
721 off += sizeof(void *);
722
723 if (s->flags & SLAB_STORE_USER)
724 /* We also have user information there */
725 off += 2 * sizeof(struct track);
726
727 if (s->size == off)
728 return 1;
729
24922684
CL
730 return check_bytes_and_report(s, page, p, "Object padding",
731 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
732}
733
39b26464 734/* Check the pad bytes at the end of a slab page */
81819f0f
CL
735static int slab_pad_check(struct kmem_cache *s, struct page *page)
736{
24922684
CL
737 u8 *start;
738 u8 *fault;
739 u8 *end;
740 int length;
741 int remainder;
81819f0f
CL
742
743 if (!(s->flags & SLAB_POISON))
744 return 1;
745
a973e9dd 746 start = page_address(page);
ab9a0f19 747 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b26464
CL
748 end = start + length;
749 remainder = length % s->size;
81819f0f
CL
750 if (!remainder)
751 return 1;
752
79824820 753 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
24922684
CL
754 if (!fault)
755 return 1;
756 while (end > fault && end[-1] == POISON_INUSE)
757 end--;
758
759 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
ffc79d28 760 print_section("Padding ", end - remainder, remainder);
24922684 761
8a3d271d 762 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
24922684 763 return 0;
81819f0f
CL
764}
765
766static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 767 void *object, u8 val)
81819f0f
CL
768{
769 u8 *p = object;
3b0efdfa 770 u8 *endobject = object + s->object_size;
81819f0f
CL
771
772 if (s->flags & SLAB_RED_ZONE) {
24922684 773 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 774 endobject, val, s->inuse - s->object_size))
81819f0f 775 return 0;
81819f0f 776 } else {
3b0efdfa 777 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 778 check_bytes_and_report(s, page, p, "Alignment padding",
3b0efdfa 779 endobject, POISON_INUSE, s->inuse - s->object_size);
3adbefee 780 }
81819f0f
CL
781 }
782
783 if (s->flags & SLAB_POISON) {
f7cb1933 784 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 785 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 786 POISON_FREE, s->object_size - 1) ||
24922684 787 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 788 p + s->object_size - 1, POISON_END, 1)))
81819f0f 789 return 0;
81819f0f
CL
790 /*
791 * check_pad_bytes cleans up on its own.
792 */
793 check_pad_bytes(s, page, p);
794 }
795
f7cb1933 796 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
797 /*
798 * Object and freepointer overlap. Cannot check
799 * freepointer while object is allocated.
800 */
801 return 1;
802
803 /* Check free pointer validity */
804 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
805 object_err(s, page, p, "Freepointer corrupt");
806 /*
9f6c708e 807 * No choice but to zap it and thus lose the remainder
81819f0f 808 * of the free objects in this slab. May cause
672bba3a 809 * another error because the object count is now wrong.
81819f0f 810 */
a973e9dd 811 set_freepointer(s, p, NULL);
81819f0f
CL
812 return 0;
813 }
814 return 1;
815}
816
817static int check_slab(struct kmem_cache *s, struct page *page)
818{
39b26464
CL
819 int maxobj;
820
81819f0f
CL
821 VM_BUG_ON(!irqs_disabled());
822
823 if (!PageSlab(page)) {
24922684 824 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
825 return 0;
826 }
39b26464 827
ab9a0f19 828 maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b26464
CL
829 if (page->objects > maxobj) {
830 slab_err(s, page, "objects %u > max %u",
831 s->name, page->objects, maxobj);
832 return 0;
833 }
834 if (page->inuse > page->objects) {
24922684 835 slab_err(s, page, "inuse %u > max %u",
39b26464 836 s->name, page->inuse, page->objects);
81819f0f
CL
837 return 0;
838 }
839 /* Slab_pad_check fixes things up after itself */
840 slab_pad_check(s, page);
841 return 1;
842}
843
844/*
672bba3a
CL
845 * Determine if a certain object on a page is on the freelist. Must hold the
846 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
847 */
848static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
849{
850 int nr = 0;
881db7fb 851 void *fp;
81819f0f 852 void *object = NULL;
224a88be 853 unsigned long max_objects;
81819f0f 854
881db7fb 855 fp = page->freelist;
39b26464 856 while (fp && nr <= page->objects) {
81819f0f
CL
857 if (fp == search)
858 return 1;
859 if (!check_valid_pointer(s, page, fp)) {
860 if (object) {
861 object_err(s, page, object,
862 "Freechain corrupt");
a973e9dd 863 set_freepointer(s, object, NULL);
81819f0f
CL
864 break;
865 } else {
24922684 866 slab_err(s, page, "Freepointer corrupt");
a973e9dd 867 page->freelist = NULL;
39b26464 868 page->inuse = page->objects;
24922684 869 slab_fix(s, "Freelist cleared");
81819f0f
CL
870 return 0;
871 }
872 break;
873 }
874 object = fp;
875 fp = get_freepointer(s, object);
876 nr++;
877 }
878
ab9a0f19 879 max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c06
CG
880 if (max_objects > MAX_OBJS_PER_PAGE)
881 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
882
883 if (page->objects != max_objects) {
884 slab_err(s, page, "Wrong number of objects. Found %d but "
885 "should be %d", page->objects, max_objects);
886 page->objects = max_objects;
887 slab_fix(s, "Number of objects adjusted.");
888 }
39b26464 889 if (page->inuse != page->objects - nr) {
70d71228 890 slab_err(s, page, "Wrong object count. Counter is %d but "
39b26464
CL
891 "counted were %d", page->inuse, page->objects - nr);
892 page->inuse = page->objects - nr;
24922684 893 slab_fix(s, "Object count adjusted.");
81819f0f
CL
894 }
895 return search == NULL;
896}
897
0121c619
CL
898static void trace(struct kmem_cache *s, struct page *page, void *object,
899 int alloc)
3ec09742
CL
900{
901 if (s->flags & SLAB_TRACE) {
902 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
903 s->name,
904 alloc ? "alloc" : "free",
905 object, page->inuse,
906 page->freelist);
907
908 if (!alloc)
3b0efdfa 909 print_section("Object ", (void *)object, s->object_size);
3ec09742
CL
910
911 dump_stack();
912 }
913}
914
c016b0bd
CL
915/*
916 * Hooks for other subsystems that check memory allocations. In a typical
917 * production configuration these hooks all should produce no code at all.
918 */
919static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
920{
c1d50836 921 flags &= gfp_allowed_mask;
c016b0bd
CL
922 lockdep_trace_alloc(flags);
923 might_sleep_if(flags & __GFP_WAIT);
924
3b0efdfa 925 return should_failslab(s->object_size, flags, s->flags);
c016b0bd
CL
926}
927
928static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
929{
c1d50836 930 flags &= gfp_allowed_mask;
b3d41885 931 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
3b0efdfa 932 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
c016b0bd
CL
933}
934
935static inline void slab_free_hook(struct kmem_cache *s, void *x)
936{
937 kmemleak_free_recursive(x, s->flags);
c016b0bd 938
d3f661d6
CL
939 /*
940 * Trouble is that we may no longer disable interupts in the fast path
941 * So in order to make the debug calls that expect irqs to be
942 * disabled we need to disable interrupts temporarily.
943 */
944#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
945 {
946 unsigned long flags;
947
948 local_irq_save(flags);
3b0efdfa
CL
949 kmemcheck_slab_free(s, x, s->object_size);
950 debug_check_no_locks_freed(x, s->object_size);
d3f661d6
CL
951 local_irq_restore(flags);
952 }
953#endif
f9b615de 954 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3b0efdfa 955 debug_check_no_obj_freed(x, s->object_size);
c016b0bd
CL
956}
957
643b1138 958/*
672bba3a 959 * Tracking of fully allocated slabs for debugging purposes.
5cc6eee8
CL
960 *
961 * list_lock must be held.
643b1138 962 */
5cc6eee8
CL
963static void add_full(struct kmem_cache *s,
964 struct kmem_cache_node *n, struct page *page)
643b1138 965{
5cc6eee8
CL
966 if (!(s->flags & SLAB_STORE_USER))
967 return;
968
643b1138 969 list_add(&page->lru, &n->full);
643b1138
CL
970}
971
5cc6eee8
CL
972/*
973 * list_lock must be held.
974 */
643b1138
CL
975static void remove_full(struct kmem_cache *s, struct page *page)
976{
643b1138
CL
977 if (!(s->flags & SLAB_STORE_USER))
978 return;
979
643b1138 980 list_del(&page->lru);
643b1138
CL
981}
982
0f389ec6
CL
983/* Tracking of the number of slabs for debugging purposes */
984static inline unsigned long slabs_node(struct kmem_cache *s, int node)
985{
986 struct kmem_cache_node *n = get_node(s, node);
987
988 return atomic_long_read(&n->nr_slabs);
989}
990
26c02cf0
AB
991static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
992{
993 return atomic_long_read(&n->nr_slabs);
994}
995
205ab99d 996static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
997{
998 struct kmem_cache_node *n = get_node(s, node);
999
1000 /*
1001 * May be called early in order to allocate a slab for the
1002 * kmem_cache_node structure. Solve the chicken-egg
1003 * dilemma by deferring the increment of the count during
1004 * bootstrap (see early_kmem_cache_node_alloc).
1005 */
7340cc84 1006 if (n) {
0f389ec6 1007 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1008 atomic_long_add(objects, &n->total_objects);
1009 }
0f389ec6 1010}
205ab99d 1011static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1012{
1013 struct kmem_cache_node *n = get_node(s, node);
1014
1015 atomic_long_dec(&n->nr_slabs);
205ab99d 1016 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1017}
1018
1019/* Object debug checks for alloc/free paths */
3ec09742
CL
1020static void setup_object_debug(struct kmem_cache *s, struct page *page,
1021 void *object)
1022{
1023 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1024 return;
1025
f7cb1933 1026 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1027 init_tracking(s, object);
1028}
1029
1537066c 1030static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c 1031 void *object, unsigned long addr)
81819f0f
CL
1032{
1033 if (!check_slab(s, page))
1034 goto bad;
1035
81819f0f
CL
1036 if (!check_valid_pointer(s, page, object)) {
1037 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 1038 goto bad;
81819f0f
CL
1039 }
1040
f7cb1933 1041 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
81819f0f 1042 goto bad;
81819f0f 1043
3ec09742
CL
1044 /* Success perform special debug activities for allocs */
1045 if (s->flags & SLAB_STORE_USER)
1046 set_track(s, object, TRACK_ALLOC, addr);
1047 trace(s, page, object, 1);
f7cb1933 1048 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1049 return 1;
3ec09742 1050
81819f0f
CL
1051bad:
1052 if (PageSlab(page)) {
1053 /*
1054 * If this is a slab page then lets do the best we can
1055 * to avoid issues in the future. Marking all objects
672bba3a 1056 * as used avoids touching the remaining objects.
81819f0f 1057 */
24922684 1058 slab_fix(s, "Marking all objects used");
39b26464 1059 page->inuse = page->objects;
a973e9dd 1060 page->freelist = NULL;
81819f0f
CL
1061 }
1062 return 0;
1063}
1064
19c7ff9e
CL
1065static noinline struct kmem_cache_node *free_debug_processing(
1066 struct kmem_cache *s, struct page *page, void *object,
1067 unsigned long addr, unsigned long *flags)
81819f0f 1068{
19c7ff9e 1069 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
5c2e4bbb 1070
19c7ff9e 1071 spin_lock_irqsave(&n->list_lock, *flags);
881db7fb
CL
1072 slab_lock(page);
1073
81819f0f
CL
1074 if (!check_slab(s, page))
1075 goto fail;
1076
1077 if (!check_valid_pointer(s, page, object)) {
70d71228 1078 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
1079 goto fail;
1080 }
1081
1082 if (on_freelist(s, page, object)) {
24922684 1083 object_err(s, page, object, "Object already free");
81819f0f
CL
1084 goto fail;
1085 }
1086
f7cb1933 1087 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
5c2e4bbb 1088 goto out;
81819f0f 1089
1b4f59e3 1090 if (unlikely(s != page->slab_cache)) {
3adbefee 1091 if (!PageSlab(page)) {
70d71228
CL
1092 slab_err(s, page, "Attempt to free object(0x%p) "
1093 "outside of slab", object);
1b4f59e3 1094 } else if (!page->slab_cache) {
81819f0f 1095 printk(KERN_ERR
70d71228 1096 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 1097 object);
70d71228 1098 dump_stack();
06428780 1099 } else
24922684
CL
1100 object_err(s, page, object,
1101 "page slab pointer corrupt.");
81819f0f
CL
1102 goto fail;
1103 }
3ec09742 1104
3ec09742
CL
1105 if (s->flags & SLAB_STORE_USER)
1106 set_track(s, object, TRACK_FREE, addr);
1107 trace(s, page, object, 0);
f7cb1933 1108 init_object(s, object, SLUB_RED_INACTIVE);
5c2e4bbb 1109out:
881db7fb 1110 slab_unlock(page);
19c7ff9e
CL
1111 /*
1112 * Keep node_lock to preserve integrity
1113 * until the object is actually freed
1114 */
1115 return n;
3ec09742 1116
81819f0f 1117fail:
19c7ff9e
CL
1118 slab_unlock(page);
1119 spin_unlock_irqrestore(&n->list_lock, *flags);
24922684 1120 slab_fix(s, "Object at 0x%p not freed", object);
19c7ff9e 1121 return NULL;
81819f0f
CL
1122}
1123
41ecc55b
CL
1124static int __init setup_slub_debug(char *str)
1125{
f0630fff
CL
1126 slub_debug = DEBUG_DEFAULT_FLAGS;
1127 if (*str++ != '=' || !*str)
1128 /*
1129 * No options specified. Switch on full debugging.
1130 */
1131 goto out;
1132
1133 if (*str == ',')
1134 /*
1135 * No options but restriction on slabs. This means full
1136 * debugging for slabs matching a pattern.
1137 */
1138 goto check_slabs;
1139
fa5ec8a1
DR
1140 if (tolower(*str) == 'o') {
1141 /*
1142 * Avoid enabling debugging on caches if its minimum order
1143 * would increase as a result.
1144 */
1145 disable_higher_order_debug = 1;
1146 goto out;
1147 }
1148
f0630fff
CL
1149 slub_debug = 0;
1150 if (*str == '-')
1151 /*
1152 * Switch off all debugging measures.
1153 */
1154 goto out;
1155
1156 /*
1157 * Determine which debug features should be switched on
1158 */
06428780 1159 for (; *str && *str != ','; str++) {
f0630fff
CL
1160 switch (tolower(*str)) {
1161 case 'f':
1162 slub_debug |= SLAB_DEBUG_FREE;
1163 break;
1164 case 'z':
1165 slub_debug |= SLAB_RED_ZONE;
1166 break;
1167 case 'p':
1168 slub_debug |= SLAB_POISON;
1169 break;
1170 case 'u':
1171 slub_debug |= SLAB_STORE_USER;
1172 break;
1173 case 't':
1174 slub_debug |= SLAB_TRACE;
1175 break;
4c13dd3b
DM
1176 case 'a':
1177 slub_debug |= SLAB_FAILSLAB;
1178 break;
f0630fff
CL
1179 default:
1180 printk(KERN_ERR "slub_debug option '%c' "
06428780 1181 "unknown. skipped\n", *str);
f0630fff 1182 }
41ecc55b
CL
1183 }
1184
f0630fff 1185check_slabs:
41ecc55b
CL
1186 if (*str == ',')
1187 slub_debug_slabs = str + 1;
f0630fff 1188out:
41ecc55b
CL
1189 return 1;
1190}
1191
1192__setup("slub_debug", setup_slub_debug);
1193
3b0efdfa 1194static unsigned long kmem_cache_flags(unsigned long object_size,
ba0268a8 1195 unsigned long flags, const char *name,
51cc5068 1196 void (*ctor)(void *))
41ecc55b
CL
1197{
1198 /*
e153362a 1199 * Enable debugging if selected on the kernel commandline.
41ecc55b 1200 */
e153362a 1201 if (slub_debug && (!slub_debug_slabs ||
3de47213
DR
1202 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1203 flags |= slub_debug;
ba0268a8
CL
1204
1205 return flags;
41ecc55b
CL
1206}
1207#else
3ec09742
CL
1208static inline void setup_object_debug(struct kmem_cache *s,
1209 struct page *page, void *object) {}
41ecc55b 1210
3ec09742 1211static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1212 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1213
19c7ff9e
CL
1214static inline struct kmem_cache_node *free_debug_processing(
1215 struct kmem_cache *s, struct page *page, void *object,
1216 unsigned long addr, unsigned long *flags) { return NULL; }
41ecc55b 1217
41ecc55b
CL
1218static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1219 { return 1; }
1220static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1221 void *object, u8 val) { return 1; }
5cc6eee8
CL
1222static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1223 struct page *page) {}
2cfb7455 1224static inline void remove_full(struct kmem_cache *s, struct page *page) {}
3b0efdfa 1225static inline unsigned long kmem_cache_flags(unsigned long object_size,
ba0268a8 1226 unsigned long flags, const char *name,
51cc5068 1227 void (*ctor)(void *))
ba0268a8
CL
1228{
1229 return flags;
1230}
41ecc55b 1231#define slub_debug 0
0f389ec6 1232
fdaa45e9
IM
1233#define disable_higher_order_debug 0
1234
0f389ec6
CL
1235static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1236 { return 0; }
26c02cf0
AB
1237static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1238 { return 0; }
205ab99d
CL
1239static inline void inc_slabs_node(struct kmem_cache *s, int node,
1240 int objects) {}
1241static inline void dec_slabs_node(struct kmem_cache *s, int node,
1242 int objects) {}
7d550c56
CL
1243
1244static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1245 { return 0; }
1246
1247static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1248 void *object) {}
1249
1250static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1251
ab4d5ed5 1252#endif /* CONFIG_SLUB_DEBUG */
205ab99d 1253
81819f0f
CL
1254/*
1255 * Slab allocation and freeing
1256 */
65c3376a
CL
1257static inline struct page *alloc_slab_page(gfp_t flags, int node,
1258 struct kmem_cache_order_objects oo)
1259{
1260 int order = oo_order(oo);
1261
b1eeab67
VN
1262 flags |= __GFP_NOTRACK;
1263
2154a336 1264 if (node == NUMA_NO_NODE)
65c3376a
CL
1265 return alloc_pages(flags, order);
1266 else
6b65aaf3 1267 return alloc_pages_exact_node(node, flags, order);
65c3376a
CL
1268}
1269
81819f0f
CL
1270static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1271{
06428780 1272 struct page *page;
834f3d11 1273 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1274 gfp_t alloc_gfp;
81819f0f 1275
7e0528da
CL
1276 flags &= gfp_allowed_mask;
1277
1278 if (flags & __GFP_WAIT)
1279 local_irq_enable();
1280
b7a49f0d 1281 flags |= s->allocflags;
e12ba74d 1282
ba52270d
PE
1283 /*
1284 * Let the initial higher-order allocation fail under memory pressure
1285 * so we fall-back to the minimum order allocation.
1286 */
1287 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1288
1289 page = alloc_slab_page(alloc_gfp, node, oo);
65c3376a
CL
1290 if (unlikely(!page)) {
1291 oo = s->min;
1292 /*
1293 * Allocation may have failed due to fragmentation.
1294 * Try a lower order alloc if possible
1295 */
1296 page = alloc_slab_page(flags, node, oo);
81819f0f 1297
7e0528da
CL
1298 if (page)
1299 stat(s, ORDER_FALLBACK);
65c3376a 1300 }
5a896d9e 1301
737b719e 1302 if (kmemcheck_enabled && page
5086c389 1303 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab67
VN
1304 int pages = 1 << oo_order(oo);
1305
1306 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1307
1308 /*
1309 * Objects from caches that have a constructor don't get
1310 * cleared when they're allocated, so we need to do it here.
1311 */
1312 if (s->ctor)
1313 kmemcheck_mark_uninitialized_pages(page, pages);
1314 else
1315 kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e
VN
1316 }
1317
737b719e
DR
1318 if (flags & __GFP_WAIT)
1319 local_irq_disable();
1320 if (!page)
1321 return NULL;
1322
834f3d11 1323 page->objects = oo_objects(oo);
81819f0f
CL
1324 mod_zone_page_state(page_zone(page),
1325 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1326 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376a 1327 1 << oo_order(oo));
81819f0f
CL
1328
1329 return page;
1330}
1331
1332static void setup_object(struct kmem_cache *s, struct page *page,
1333 void *object)
1334{
3ec09742 1335 setup_object_debug(s, page, object);
4f104934 1336 if (unlikely(s->ctor))
51cc5068 1337 s->ctor(object);
81819f0f
CL
1338}
1339
1340static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1341{
1342 struct page *page;
81819f0f 1343 void *start;
81819f0f
CL
1344 void *last;
1345 void *p;
1346
6cb06229 1347 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f 1348
6cb06229
CL
1349 page = allocate_slab(s,
1350 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1351 if (!page)
1352 goto out;
1353
205ab99d 1354 inc_slabs_node(s, page_to_nid(page), page->objects);
1b4f59e3 1355 page->slab_cache = s;
c03f94cc 1356 __SetPageSlab(page);
072bb0aa
MG
1357 if (page->pfmemalloc)
1358 SetPageSlabPfmemalloc(page);
81819f0f
CL
1359
1360 start = page_address(page);
81819f0f
CL
1361
1362 if (unlikely(s->flags & SLAB_POISON))
834f3d11 1363 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0f
CL
1364
1365 last = start;
224a88be 1366 for_each_object(p, s, start, page->objects) {
81819f0f
CL
1367 setup_object(s, page, last);
1368 set_freepointer(s, last, p);
1369 last = p;
1370 }
1371 setup_object(s, page, last);
a973e9dd 1372 set_freepointer(s, last, NULL);
81819f0f
CL
1373
1374 page->freelist = start;
e6e82ea1 1375 page->inuse = page->objects;
8cb0a506 1376 page->frozen = 1;
81819f0f 1377out:
81819f0f
CL
1378 return page;
1379}
1380
1381static void __free_slab(struct kmem_cache *s, struct page *page)
1382{
834f3d11
CL
1383 int order = compound_order(page);
1384 int pages = 1 << order;
81819f0f 1385
af537b0a 1386 if (kmem_cache_debug(s)) {
81819f0f
CL
1387 void *p;
1388
1389 slab_pad_check(s, page);
224a88be
CL
1390 for_each_object(p, s, page_address(page),
1391 page->objects)
f7cb1933 1392 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1393 }
1394
b1eeab67 1395 kmemcheck_free_shadow(page, compound_order(page));
5a896d9e 1396
81819f0f
CL
1397 mod_zone_page_state(page_zone(page),
1398 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1399 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1400 -pages);
81819f0f 1401
072bb0aa 1402 __ClearPageSlabPfmemalloc(page);
49bd5221
CL
1403 __ClearPageSlab(page);
1404 reset_page_mapcount(page);
1eb5ac64
NP
1405 if (current->reclaim_state)
1406 current->reclaim_state->reclaimed_slab += pages;
834f3d11 1407 __free_pages(page, order);
81819f0f
CL
1408}
1409
da9a638c
LJ
1410#define need_reserve_slab_rcu \
1411 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1412
81819f0f
CL
1413static void rcu_free_slab(struct rcu_head *h)
1414{
1415 struct page *page;
1416
da9a638c
LJ
1417 if (need_reserve_slab_rcu)
1418 page = virt_to_head_page(h);
1419 else
1420 page = container_of((struct list_head *)h, struct page, lru);
1421
1b4f59e3 1422 __free_slab(page->slab_cache, page);
81819f0f
CL
1423}
1424
1425static void free_slab(struct kmem_cache *s, struct page *page)
1426{
1427 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
da9a638c
LJ
1428 struct rcu_head *head;
1429
1430 if (need_reserve_slab_rcu) {
1431 int order = compound_order(page);
1432 int offset = (PAGE_SIZE << order) - s->reserved;
1433
1434 VM_BUG_ON(s->reserved != sizeof(*head));
1435 head = page_address(page) + offset;
1436 } else {
1437 /*
1438 * RCU free overloads the RCU head over the LRU
1439 */
1440 head = (void *)&page->lru;
1441 }
81819f0f
CL
1442
1443 call_rcu(head, rcu_free_slab);
1444 } else
1445 __free_slab(s, page);
1446}
1447
1448static void discard_slab(struct kmem_cache *s, struct page *page)
1449{
205ab99d 1450 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1451 free_slab(s, page);
1452}
1453
1454/*
5cc6eee8
CL
1455 * Management of partially allocated slabs.
1456 *
1457 * list_lock must be held.
81819f0f 1458 */
5cc6eee8 1459static inline void add_partial(struct kmem_cache_node *n,
7c2e132c 1460 struct page *page, int tail)
81819f0f 1461{
e95eed57 1462 n->nr_partial++;
136333d1 1463 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1464 list_add_tail(&page->lru, &n->partial);
1465 else
1466 list_add(&page->lru, &n->partial);
81819f0f
CL
1467}
1468
5cc6eee8
CL
1469/*
1470 * list_lock must be held.
1471 */
1472static inline void remove_partial(struct kmem_cache_node *n,
62e346a8
CL
1473 struct page *page)
1474{
1475 list_del(&page->lru);
1476 n->nr_partial--;
1477}
1478
81819f0f 1479/*
7ced3719
CL
1480 * Remove slab from the partial list, freeze it and
1481 * return the pointer to the freelist.
81819f0f 1482 *
497b66f2
CL
1483 * Returns a list of objects or NULL if it fails.
1484 *
7ced3719 1485 * Must hold list_lock since we modify the partial list.
81819f0f 1486 */
497b66f2 1487static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1488 struct kmem_cache_node *n, struct page *page,
49e22585 1489 int mode)
81819f0f 1490{
2cfb7455
CL
1491 void *freelist;
1492 unsigned long counters;
1493 struct page new;
1494
2cfb7455
CL
1495 /*
1496 * Zap the freelist and set the frozen bit.
1497 * The old freelist is the list of objects for the
1498 * per cpu allocation list.
1499 */
7ced3719
CL
1500 freelist = page->freelist;
1501 counters = page->counters;
1502 new.counters = counters;
23910c50 1503 if (mode) {
7ced3719 1504 new.inuse = page->objects;
23910c50
PE
1505 new.freelist = NULL;
1506 } else {
1507 new.freelist = freelist;
1508 }
2cfb7455 1509
7ced3719
CL
1510 VM_BUG_ON(new.frozen);
1511 new.frozen = 1;
2cfb7455 1512
7ced3719 1513 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1514 freelist, counters,
02d7633f 1515 new.freelist, new.counters,
7ced3719 1516 "acquire_slab"))
7ced3719 1517 return NULL;
2cfb7455
CL
1518
1519 remove_partial(n, page);
7ced3719 1520 WARN_ON(!freelist);
49e22585 1521 return freelist;
81819f0f
CL
1522}
1523
49e22585 1524static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1525static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1526
81819f0f 1527/*
672bba3a 1528 * Try to allocate a partial slab from a specific node.
81819f0f 1529 */
8ba00bb6
JK
1530static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1531 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1532{
49e22585
CL
1533 struct page *page, *page2;
1534 void *object = NULL;
81819f0f
CL
1535
1536 /*
1537 * Racy check. If we mistakenly see no partial slabs then we
1538 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1539 * partial slab and there is none available then get_partials()
1540 * will return NULL.
81819f0f
CL
1541 */
1542 if (!n || !n->nr_partial)
1543 return NULL;
1544
1545 spin_lock(&n->list_lock);
49e22585 1546 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1547 void *t;
49e22585
CL
1548 int available;
1549
8ba00bb6
JK
1550 if (!pfmemalloc_match(page, flags))
1551 continue;
1552
1553 t = acquire_slab(s, n, page, object == NULL);
49e22585
CL
1554 if (!t)
1555 break;
1556
12d79634 1557 if (!object) {
49e22585 1558 c->page = page;
49e22585 1559 stat(s, ALLOC_FROM_PARTIAL);
49e22585
CL
1560 object = t;
1561 available = page->objects - page->inuse;
1562 } else {
49e22585 1563 available = put_cpu_partial(s, page, 0);
8028dcea 1564 stat(s, CPU_PARTIAL_NODE);
49e22585
CL
1565 }
1566 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1567 break;
1568
497b66f2 1569 }
81819f0f 1570 spin_unlock(&n->list_lock);
497b66f2 1571 return object;
81819f0f
CL
1572}
1573
1574/*
672bba3a 1575 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1576 */
de3ec035 1577static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1578 struct kmem_cache_cpu *c)
81819f0f
CL
1579{
1580#ifdef CONFIG_NUMA
1581 struct zonelist *zonelist;
dd1a239f 1582 struct zoneref *z;
54a6eb5c
MG
1583 struct zone *zone;
1584 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1585 void *object;
cc9a6c87 1586 unsigned int cpuset_mems_cookie;
81819f0f
CL
1587
1588 /*
672bba3a
CL
1589 * The defrag ratio allows a configuration of the tradeoffs between
1590 * inter node defragmentation and node local allocations. A lower
1591 * defrag_ratio increases the tendency to do local allocations
1592 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1593 *
672bba3a
CL
1594 * If the defrag_ratio is set to 0 then kmalloc() always
1595 * returns node local objects. If the ratio is higher then kmalloc()
1596 * may return off node objects because partial slabs are obtained
1597 * from other nodes and filled up.
81819f0f 1598 *
6446faa2 1599 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1600 * defrag_ratio = 1000) then every (well almost) allocation will
1601 * first attempt to defrag slab caches on other nodes. This means
1602 * scanning over all nodes to look for partial slabs which may be
1603 * expensive if we do it every time we are trying to find a slab
1604 * with available objects.
81819f0f 1605 */
9824601e
CL
1606 if (!s->remote_node_defrag_ratio ||
1607 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1608 return NULL;
1609
cc9a6c87
MG
1610 do {
1611 cpuset_mems_cookie = get_mems_allowed();
e7b691b0 1612 zonelist = node_zonelist(slab_node(), flags);
cc9a6c87
MG
1613 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1614 struct kmem_cache_node *n;
1615
1616 n = get_node(s, zone_to_nid(zone));
1617
1618 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1619 n->nr_partial > s->min_partial) {
8ba00bb6 1620 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1621 if (object) {
1622 /*
1623 * Return the object even if
1624 * put_mems_allowed indicated that
1625 * the cpuset mems_allowed was
1626 * updated in parallel. It's a
1627 * harmless race between the alloc
1628 * and the cpuset update.
1629 */
1630 put_mems_allowed(cpuset_mems_cookie);
1631 return object;
1632 }
c0ff7453 1633 }
81819f0f 1634 }
cc9a6c87 1635 } while (!put_mems_allowed(cpuset_mems_cookie));
81819f0f
CL
1636#endif
1637 return NULL;
1638}
1639
1640/*
1641 * Get a partial page, lock it and return it.
1642 */
497b66f2 1643static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1644 struct kmem_cache_cpu *c)
81819f0f 1645{
497b66f2 1646 void *object;
2154a336 1647 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
81819f0f 1648
8ba00bb6 1649 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1650 if (object || node != NUMA_NO_NODE)
1651 return object;
81819f0f 1652
acd19fd1 1653 return get_any_partial(s, flags, c);
81819f0f
CL
1654}
1655
8a5ec0ba
CL
1656#ifdef CONFIG_PREEMPT
1657/*
1658 * Calculate the next globally unique transaction for disambiguiation
1659 * during cmpxchg. The transactions start with the cpu number and are then
1660 * incremented by CONFIG_NR_CPUS.
1661 */
1662#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1663#else
1664/*
1665 * No preemption supported therefore also no need to check for
1666 * different cpus.
1667 */
1668#define TID_STEP 1
1669#endif
1670
1671static inline unsigned long next_tid(unsigned long tid)
1672{
1673 return tid + TID_STEP;
1674}
1675
1676static inline unsigned int tid_to_cpu(unsigned long tid)
1677{
1678 return tid % TID_STEP;
1679}
1680
1681static inline unsigned long tid_to_event(unsigned long tid)
1682{
1683 return tid / TID_STEP;
1684}
1685
1686static inline unsigned int init_tid(int cpu)
1687{
1688 return cpu;
1689}
1690
1691static inline void note_cmpxchg_failure(const char *n,
1692 const struct kmem_cache *s, unsigned long tid)
1693{
1694#ifdef SLUB_DEBUG_CMPXCHG
1695 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1696
1697 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1698
1699#ifdef CONFIG_PREEMPT
1700 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1701 printk("due to cpu change %d -> %d\n",
1702 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1703 else
1704#endif
1705 if (tid_to_event(tid) != tid_to_event(actual_tid))
1706 printk("due to cpu running other code. Event %ld->%ld\n",
1707 tid_to_event(tid), tid_to_event(actual_tid));
1708 else
1709 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1710 actual_tid, tid, next_tid(tid));
1711#endif
4fdccdfb 1712 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
1713}
1714
788e1aad 1715static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 1716{
8a5ec0ba
CL
1717 int cpu;
1718
1719 for_each_possible_cpu(cpu)
1720 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 1721}
2cfb7455 1722
81819f0f
CL
1723/*
1724 * Remove the cpu slab
1725 */
c17dda40 1726static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
81819f0f 1727{
2cfb7455 1728 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
1729 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1730 int lock = 0;
1731 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 1732 void *nextfree;
136333d1 1733 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
1734 struct page new;
1735 struct page old;
1736
1737 if (page->freelist) {
84e554e6 1738 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 1739 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
1740 }
1741
894b8788 1742 /*
2cfb7455
CL
1743 * Stage one: Free all available per cpu objects back
1744 * to the page freelist while it is still frozen. Leave the
1745 * last one.
1746 *
1747 * There is no need to take the list->lock because the page
1748 * is still frozen.
1749 */
1750 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1751 void *prior;
1752 unsigned long counters;
1753
1754 do {
1755 prior = page->freelist;
1756 counters = page->counters;
1757 set_freepointer(s, freelist, prior);
1758 new.counters = counters;
1759 new.inuse--;
1760 VM_BUG_ON(!new.frozen);
1761
1d07171c 1762 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
1763 prior, counters,
1764 freelist, new.counters,
1765 "drain percpu freelist"));
1766
1767 freelist = nextfree;
1768 }
1769
894b8788 1770 /*
2cfb7455
CL
1771 * Stage two: Ensure that the page is unfrozen while the
1772 * list presence reflects the actual number of objects
1773 * during unfreeze.
1774 *
1775 * We setup the list membership and then perform a cmpxchg
1776 * with the count. If there is a mismatch then the page
1777 * is not unfrozen but the page is on the wrong list.
1778 *
1779 * Then we restart the process which may have to remove
1780 * the page from the list that we just put it on again
1781 * because the number of objects in the slab may have
1782 * changed.
894b8788 1783 */
2cfb7455 1784redo:
894b8788 1785
2cfb7455
CL
1786 old.freelist = page->freelist;
1787 old.counters = page->counters;
1788 VM_BUG_ON(!old.frozen);
7c2e132c 1789
2cfb7455
CL
1790 /* Determine target state of the slab */
1791 new.counters = old.counters;
1792 if (freelist) {
1793 new.inuse--;
1794 set_freepointer(s, freelist, old.freelist);
1795 new.freelist = freelist;
1796 } else
1797 new.freelist = old.freelist;
1798
1799 new.frozen = 0;
1800
81107188 1801 if (!new.inuse && n->nr_partial > s->min_partial)
2cfb7455
CL
1802 m = M_FREE;
1803 else if (new.freelist) {
1804 m = M_PARTIAL;
1805 if (!lock) {
1806 lock = 1;
1807 /*
1808 * Taking the spinlock removes the possiblity
1809 * that acquire_slab() will see a slab page that
1810 * is frozen
1811 */
1812 spin_lock(&n->list_lock);
1813 }
1814 } else {
1815 m = M_FULL;
1816 if (kmem_cache_debug(s) && !lock) {
1817 lock = 1;
1818 /*
1819 * This also ensures that the scanning of full
1820 * slabs from diagnostic functions will not see
1821 * any frozen slabs.
1822 */
1823 spin_lock(&n->list_lock);
1824 }
1825 }
1826
1827 if (l != m) {
1828
1829 if (l == M_PARTIAL)
1830
1831 remove_partial(n, page);
1832
1833 else if (l == M_FULL)
894b8788 1834
2cfb7455
CL
1835 remove_full(s, page);
1836
1837 if (m == M_PARTIAL) {
1838
1839 add_partial(n, page, tail);
136333d1 1840 stat(s, tail);
2cfb7455
CL
1841
1842 } else if (m == M_FULL) {
894b8788 1843
2cfb7455
CL
1844 stat(s, DEACTIVATE_FULL);
1845 add_full(s, n, page);
1846
1847 }
1848 }
1849
1850 l = m;
1d07171c 1851 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
1852 old.freelist, old.counters,
1853 new.freelist, new.counters,
1854 "unfreezing slab"))
1855 goto redo;
1856
2cfb7455
CL
1857 if (lock)
1858 spin_unlock(&n->list_lock);
1859
1860 if (m == M_FREE) {
1861 stat(s, DEACTIVATE_EMPTY);
1862 discard_slab(s, page);
1863 stat(s, FREE_SLAB);
894b8788 1864 }
81819f0f
CL
1865}
1866
d24ac77f
JK
1867/*
1868 * Unfreeze all the cpu partial slabs.
1869 *
59a09917
CL
1870 * This function must be called with interrupts disabled
1871 * for the cpu using c (or some other guarantee must be there
1872 * to guarantee no concurrent accesses).
d24ac77f 1873 */
59a09917
CL
1874static void unfreeze_partials(struct kmem_cache *s,
1875 struct kmem_cache_cpu *c)
49e22585 1876{
43d77867 1877 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 1878 struct page *page, *discard_page = NULL;
49e22585
CL
1879
1880 while ((page = c->partial)) {
49e22585
CL
1881 struct page new;
1882 struct page old;
1883
1884 c->partial = page->next;
43d77867
JK
1885
1886 n2 = get_node(s, page_to_nid(page));
1887 if (n != n2) {
1888 if (n)
1889 spin_unlock(&n->list_lock);
1890
1891 n = n2;
1892 spin_lock(&n->list_lock);
1893 }
49e22585
CL
1894
1895 do {
1896
1897 old.freelist = page->freelist;
1898 old.counters = page->counters;
1899 VM_BUG_ON(!old.frozen);
1900
1901 new.counters = old.counters;
1902 new.freelist = old.freelist;
1903
1904 new.frozen = 0;
1905
d24ac77f 1906 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
1907 old.freelist, old.counters,
1908 new.freelist, new.counters,
1909 "unfreezing slab"));
1910
43d77867 1911 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
9ada1934
SL
1912 page->next = discard_page;
1913 discard_page = page;
43d77867
JK
1914 } else {
1915 add_partial(n, page, DEACTIVATE_TO_TAIL);
1916 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
1917 }
1918 }
1919
1920 if (n)
1921 spin_unlock(&n->list_lock);
9ada1934
SL
1922
1923 while (discard_page) {
1924 page = discard_page;
1925 discard_page = discard_page->next;
1926
1927 stat(s, DEACTIVATE_EMPTY);
1928 discard_slab(s, page);
1929 stat(s, FREE_SLAB);
1930 }
49e22585
CL
1931}
1932
1933/*
1934 * Put a page that was just frozen (in __slab_free) into a partial page
1935 * slot if available. This is done without interrupts disabled and without
1936 * preemption disabled. The cmpxchg is racy and may put the partial page
1937 * onto a random cpus partial slot.
1938 *
1939 * If we did not find a slot then simply move all the partials to the
1940 * per node partial list.
1941 */
788e1aad 1942static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585
CL
1943{
1944 struct page *oldpage;
1945 int pages;
1946 int pobjects;
1947
1948 do {
1949 pages = 0;
1950 pobjects = 0;
1951 oldpage = this_cpu_read(s->cpu_slab->partial);
1952
1953 if (oldpage) {
1954 pobjects = oldpage->pobjects;
1955 pages = oldpage->pages;
1956 if (drain && pobjects > s->cpu_partial) {
1957 unsigned long flags;
1958 /*
1959 * partial array is full. Move the existing
1960 * set to the per node partial list.
1961 */
1962 local_irq_save(flags);
59a09917 1963 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 1964 local_irq_restore(flags);
e24fc410 1965 oldpage = NULL;
49e22585
CL
1966 pobjects = 0;
1967 pages = 0;
8028dcea 1968 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
1969 }
1970 }
1971
1972 pages++;
1973 pobjects += page->objects - page->inuse;
1974
1975 page->pages = pages;
1976 page->pobjects = pobjects;
1977 page->next = oldpage;
1978
933393f5 1979 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
49e22585
CL
1980 return pobjects;
1981}
1982
dfb4f096 1983static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1984{
84e554e6 1985 stat(s, CPUSLAB_FLUSH);
c17dda40
CL
1986 deactivate_slab(s, c->page, c->freelist);
1987
1988 c->tid = next_tid(c->tid);
1989 c->page = NULL;
1990 c->freelist = NULL;
81819f0f
CL
1991}
1992
1993/*
1994 * Flush cpu slab.
6446faa2 1995 *
81819f0f
CL
1996 * Called from IPI handler with interrupts disabled.
1997 */
0c710013 1998static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 1999{
9dfc6e68 2000 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2001
49e22585
CL
2002 if (likely(c)) {
2003 if (c->page)
2004 flush_slab(s, c);
2005
59a09917 2006 unfreeze_partials(s, c);
49e22585 2007 }
81819f0f
CL
2008}
2009
2010static void flush_cpu_slab(void *d)
2011{
2012 struct kmem_cache *s = d;
81819f0f 2013
dfb4f096 2014 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2015}
2016
a8364d55
GBY
2017static bool has_cpu_slab(int cpu, void *info)
2018{
2019 struct kmem_cache *s = info;
2020 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2021
02e1a9cd 2022 return c->page || c->partial;
a8364d55
GBY
2023}
2024
81819f0f
CL
2025static void flush_all(struct kmem_cache *s)
2026{
a8364d55 2027 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2028}
2029
dfb4f096
CL
2030/*
2031 * Check if the objects in a per cpu structure fit numa
2032 * locality expectations.
2033 */
57d437d2 2034static inline int node_match(struct page *page, int node)
dfb4f096
CL
2035{
2036#ifdef CONFIG_NUMA
57d437d2 2037 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
dfb4f096
CL
2038 return 0;
2039#endif
2040 return 1;
2041}
2042
781b2ba6
PE
2043static int count_free(struct page *page)
2044{
2045 return page->objects - page->inuse;
2046}
2047
2048static unsigned long count_partial(struct kmem_cache_node *n,
2049 int (*get_count)(struct page *))
2050{
2051 unsigned long flags;
2052 unsigned long x = 0;
2053 struct page *page;
2054
2055 spin_lock_irqsave(&n->list_lock, flags);
2056 list_for_each_entry(page, &n->partial, lru)
2057 x += get_count(page);
2058 spin_unlock_irqrestore(&n->list_lock, flags);
2059 return x;
2060}
2061
26c02cf0
AB
2062static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2063{
2064#ifdef CONFIG_SLUB_DEBUG
2065 return atomic_long_read(&n->total_objects);
2066#else
2067 return 0;
2068#endif
2069}
2070
781b2ba6
PE
2071static noinline void
2072slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2073{
2074 int node;
2075
2076 printk(KERN_WARNING
2077 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2078 nid, gfpflags);
2079 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
3b0efdfa 2080 "default order: %d, min order: %d\n", s->name, s->object_size,
781b2ba6
PE
2081 s->size, oo_order(s->oo), oo_order(s->min));
2082
3b0efdfa 2083 if (oo_order(s->min) > get_order(s->object_size))
fa5ec8a1
DR
2084 printk(KERN_WARNING " %s debugging increased min order, use "
2085 "slub_debug=O to disable.\n", s->name);
2086
781b2ba6
PE
2087 for_each_online_node(node) {
2088 struct kmem_cache_node *n = get_node(s, node);
2089 unsigned long nr_slabs;
2090 unsigned long nr_objs;
2091 unsigned long nr_free;
2092
2093 if (!n)
2094 continue;
2095
26c02cf0
AB
2096 nr_free = count_partial(n, count_free);
2097 nr_slabs = node_nr_slabs(n);
2098 nr_objs = node_nr_objs(n);
781b2ba6
PE
2099
2100 printk(KERN_WARNING
2101 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2102 node, nr_slabs, nr_objs, nr_free);
2103 }
2104}
2105
497b66f2
CL
2106static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2107 int node, struct kmem_cache_cpu **pc)
2108{
6faa6833 2109 void *freelist;
188fd063
CL
2110 struct kmem_cache_cpu *c = *pc;
2111 struct page *page;
497b66f2 2112
188fd063 2113 freelist = get_partial(s, flags, node, c);
497b66f2 2114
188fd063
CL
2115 if (freelist)
2116 return freelist;
2117
2118 page = new_slab(s, flags, node);
497b66f2
CL
2119 if (page) {
2120 c = __this_cpu_ptr(s->cpu_slab);
2121 if (c->page)
2122 flush_slab(s, c);
2123
2124 /*
2125 * No other reference to the page yet so we can
2126 * muck around with it freely without cmpxchg
2127 */
6faa6833 2128 freelist = page->freelist;
497b66f2
CL
2129 page->freelist = NULL;
2130
2131 stat(s, ALLOC_SLAB);
497b66f2
CL
2132 c->page = page;
2133 *pc = c;
2134 } else
6faa6833 2135 freelist = NULL;
497b66f2 2136
6faa6833 2137 return freelist;
497b66f2
CL
2138}
2139
072bb0aa
MG
2140static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2141{
2142 if (unlikely(PageSlabPfmemalloc(page)))
2143 return gfp_pfmemalloc_allowed(gfpflags);
2144
2145 return true;
2146}
2147
213eeb9f
CL
2148/*
2149 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2150 * or deactivate the page.
2151 *
2152 * The page is still frozen if the return value is not NULL.
2153 *
2154 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2155 *
2156 * This function must be called with interrupt disabled.
213eeb9f
CL
2157 */
2158static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2159{
2160 struct page new;
2161 unsigned long counters;
2162 void *freelist;
2163
2164 do {
2165 freelist = page->freelist;
2166 counters = page->counters;
6faa6833 2167
213eeb9f
CL
2168 new.counters = counters;
2169 VM_BUG_ON(!new.frozen);
2170
2171 new.inuse = page->objects;
2172 new.frozen = freelist != NULL;
2173
d24ac77f 2174 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2175 freelist, counters,
2176 NULL, new.counters,
2177 "get_freelist"));
2178
2179 return freelist;
2180}
2181
81819f0f 2182/*
894b8788
CL
2183 * Slow path. The lockless freelist is empty or we need to perform
2184 * debugging duties.
2185 *
894b8788
CL
2186 * Processing is still very fast if new objects have been freed to the
2187 * regular freelist. In that case we simply take over the regular freelist
2188 * as the lockless freelist and zap the regular freelist.
81819f0f 2189 *
894b8788
CL
2190 * If that is not working then we fall back to the partial lists. We take the
2191 * first element of the freelist as the object to allocate now and move the
2192 * rest of the freelist to the lockless freelist.
81819f0f 2193 *
894b8788 2194 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2195 * we need to allocate a new slab. This is the slowest path since it involves
2196 * a call to the page allocator and the setup of a new slab.
81819f0f 2197 */
ce71e27c
EGM
2198static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2199 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2200{
6faa6833 2201 void *freelist;
f6e7def7 2202 struct page *page;
8a5ec0ba
CL
2203 unsigned long flags;
2204
2205 local_irq_save(flags);
2206#ifdef CONFIG_PREEMPT
2207 /*
2208 * We may have been preempted and rescheduled on a different
2209 * cpu before disabling interrupts. Need to reload cpu area
2210 * pointer.
2211 */
2212 c = this_cpu_ptr(s->cpu_slab);
8a5ec0ba 2213#endif
81819f0f 2214
f6e7def7
CL
2215 page = c->page;
2216 if (!page)
81819f0f 2217 goto new_slab;
49e22585 2218redo:
6faa6833 2219
57d437d2 2220 if (unlikely(!node_match(page, node))) {
e36a2652 2221 stat(s, ALLOC_NODE_MISMATCH);
f6e7def7 2222 deactivate_slab(s, page, c->freelist);
c17dda40
CL
2223 c->page = NULL;
2224 c->freelist = NULL;
fc59c053
CL
2225 goto new_slab;
2226 }
6446faa2 2227
072bb0aa
MG
2228 /*
2229 * By rights, we should be searching for a slab page that was
2230 * PFMEMALLOC but right now, we are losing the pfmemalloc
2231 * information when the page leaves the per-cpu allocator
2232 */
2233 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2234 deactivate_slab(s, page, c->freelist);
2235 c->page = NULL;
2236 c->freelist = NULL;
2237 goto new_slab;
2238 }
2239
73736e03 2240 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2241 freelist = c->freelist;
2242 if (freelist)
73736e03 2243 goto load_freelist;
03e404af 2244
2cfb7455 2245 stat(s, ALLOC_SLOWPATH);
03e404af 2246
f6e7def7 2247 freelist = get_freelist(s, page);
6446faa2 2248
6faa6833 2249 if (!freelist) {
03e404af
CL
2250 c->page = NULL;
2251 stat(s, DEACTIVATE_BYPASS);
fc59c053 2252 goto new_slab;
03e404af 2253 }
6446faa2 2254
84e554e6 2255 stat(s, ALLOC_REFILL);
6446faa2 2256
894b8788 2257load_freelist:
507effea
CL
2258 /*
2259 * freelist is pointing to the list of objects to be used.
2260 * page is pointing to the page from which the objects are obtained.
2261 * That page must be frozen for per cpu allocations to work.
2262 */
2263 VM_BUG_ON(!c->page->frozen);
6faa6833 2264 c->freelist = get_freepointer(s, freelist);
8a5ec0ba
CL
2265 c->tid = next_tid(c->tid);
2266 local_irq_restore(flags);
6faa6833 2267 return freelist;
81819f0f 2268
81819f0f 2269new_slab:
2cfb7455 2270
49e22585 2271 if (c->partial) {
f6e7def7
CL
2272 page = c->page = c->partial;
2273 c->partial = page->next;
49e22585
CL
2274 stat(s, CPU_PARTIAL_ALLOC);
2275 c->freelist = NULL;
2276 goto redo;
81819f0f
CL
2277 }
2278
188fd063 2279 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2280
f4697436
CL
2281 if (unlikely(!freelist)) {
2282 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2283 slab_out_of_memory(s, gfpflags, node);
2cfb7455 2284
f4697436
CL
2285 local_irq_restore(flags);
2286 return NULL;
81819f0f 2287 }
2cfb7455 2288
f6e7def7 2289 page = c->page;
5091b74a 2290 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2291 goto load_freelist;
2cfb7455 2292
497b66f2 2293 /* Only entered in the debug case */
5091b74a 2294 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2295 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2296
f6e7def7 2297 deactivate_slab(s, page, get_freepointer(s, freelist));
c17dda40
CL
2298 c->page = NULL;
2299 c->freelist = NULL;
a71ae47a 2300 local_irq_restore(flags);
6faa6833 2301 return freelist;
894b8788
CL
2302}
2303
2304/*
2305 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2306 * have the fastpath folded into their functions. So no function call
2307 * overhead for requests that can be satisfied on the fastpath.
2308 *
2309 * The fastpath works by first checking if the lockless freelist can be used.
2310 * If not then __slab_alloc is called for slow processing.
2311 *
2312 * Otherwise we can simply pick the next object from the lockless free list.
2313 */
2b847c3c 2314static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2315 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2316{
894b8788 2317 void **object;
dfb4f096 2318 struct kmem_cache_cpu *c;
57d437d2 2319 struct page *page;
8a5ec0ba 2320 unsigned long tid;
1f84260c 2321
c016b0bd 2322 if (slab_pre_alloc_hook(s, gfpflags))
773ff60e 2323 return NULL;
1f84260c 2324
8a5ec0ba 2325redo:
8a5ec0ba
CL
2326
2327 /*
2328 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2329 * enabled. We may switch back and forth between cpus while
2330 * reading from one cpu area. That does not matter as long
2331 * as we end up on the original cpu again when doing the cmpxchg.
2332 */
9dfc6e68 2333 c = __this_cpu_ptr(s->cpu_slab);
8a5ec0ba 2334
8a5ec0ba
CL
2335 /*
2336 * The transaction ids are globally unique per cpu and per operation on
2337 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2338 * occurs on the right processor and that there was no operation on the
2339 * linked list in between.
2340 */
2341 tid = c->tid;
2342 barrier();
8a5ec0ba 2343
9dfc6e68 2344 object = c->freelist;
57d437d2 2345 page = c->page;
5091b74a 2346 if (unlikely(!object || !node_match(page, node)))
dfb4f096 2347 object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788
CL
2348
2349 else {
0ad9500e
ED
2350 void *next_object = get_freepointer_safe(s, object);
2351
8a5ec0ba 2352 /*
25985edc 2353 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2354 * operation and if we are on the right processor.
2355 *
2356 * The cmpxchg does the following atomically (without lock semantics!)
2357 * 1. Relocate first pointer to the current per cpu area.
2358 * 2. Verify that tid and freelist have not been changed
2359 * 3. If they were not changed replace tid and freelist
2360 *
2361 * Since this is without lock semantics the protection is only against
2362 * code executing on this cpu *not* from access by other cpus.
2363 */
933393f5 2364 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2365 s->cpu_slab->freelist, s->cpu_slab->tid,
2366 object, tid,
0ad9500e 2367 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2368
2369 note_cmpxchg_failure("slab_alloc", s, tid);
2370 goto redo;
2371 }
0ad9500e 2372 prefetch_freepointer(s, next_object);
84e554e6 2373 stat(s, ALLOC_FASTPATH);
894b8788 2374 }
8a5ec0ba 2375
74e2134f 2376 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2377 memset(object, 0, s->object_size);
d07dbea4 2378
c016b0bd 2379 slab_post_alloc_hook(s, gfpflags, object);
5a896d9e 2380
894b8788 2381 return object;
81819f0f
CL
2382}
2383
2b847c3c
EG
2384static __always_inline void *slab_alloc(struct kmem_cache *s,
2385 gfp_t gfpflags, unsigned long addr)
2386{
2387 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2388}
2389
81819f0f
CL
2390void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2391{
2b847c3c 2392 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2393
3b0efdfa 2394 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
5b882be4
EGM
2395
2396 return ret;
81819f0f
CL
2397}
2398EXPORT_SYMBOL(kmem_cache_alloc);
2399
0f24f128 2400#ifdef CONFIG_TRACING
4a92379b
RK
2401void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2402{
2b847c3c 2403 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b
RK
2404 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2405 return ret;
2406}
2407EXPORT_SYMBOL(kmem_cache_alloc_trace);
2408
2409void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
5b882be4 2410{
4a92379b
RK
2411 void *ret = kmalloc_order(size, flags, order);
2412 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2413 return ret;
5b882be4 2414}
4a92379b 2415EXPORT_SYMBOL(kmalloc_order_trace);
5b882be4
EGM
2416#endif
2417
81819f0f
CL
2418#ifdef CONFIG_NUMA
2419void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2420{
2b847c3c 2421 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2422
ca2b84cb 2423 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2424 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2425
2426 return ret;
81819f0f
CL
2427}
2428EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2429
0f24f128 2430#ifdef CONFIG_TRACING
4a92379b 2431void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2432 gfp_t gfpflags,
4a92379b 2433 int node, size_t size)
5b882be4 2434{
2b847c3c 2435 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2436
2437 trace_kmalloc_node(_RET_IP_, ret,
2438 size, s->size, gfpflags, node);
2439 return ret;
5b882be4 2440}
4a92379b 2441EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2442#endif
5d1f57e4 2443#endif
5b882be4 2444
81819f0f 2445/*
894b8788
CL
2446 * Slow patch handling. This may still be called frequently since objects
2447 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2448 *
894b8788
CL
2449 * So we still attempt to reduce cache line usage. Just take the slab
2450 * lock and free the item. If there is no additional partial page
2451 * handling required then we can return immediately.
81819f0f 2452 */
894b8788 2453static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059e 2454 void *x, unsigned long addr)
81819f0f
CL
2455{
2456 void *prior;
2457 void **object = (void *)x;
2cfb7455 2458 int was_frozen;
2cfb7455
CL
2459 struct page new;
2460 unsigned long counters;
2461 struct kmem_cache_node *n = NULL;
61728d1e 2462 unsigned long uninitialized_var(flags);
81819f0f 2463
8a5ec0ba 2464 stat(s, FREE_SLOWPATH);
81819f0f 2465
19c7ff9e
CL
2466 if (kmem_cache_debug(s) &&
2467 !(n = free_debug_processing(s, page, x, addr, &flags)))
80f08c19 2468 return;
6446faa2 2469
2cfb7455 2470 do {
837d678d
JK
2471 if (unlikely(n)) {
2472 spin_unlock_irqrestore(&n->list_lock, flags);
2473 n = NULL;
2474 }
2cfb7455
CL
2475 prior = page->freelist;
2476 counters = page->counters;
2477 set_freepointer(s, object, prior);
2478 new.counters = counters;
2479 was_frozen = new.frozen;
2480 new.inuse--;
837d678d 2481 if ((!new.inuse || !prior) && !was_frozen) {
49e22585
CL
2482
2483 if (!kmem_cache_debug(s) && !prior)
2484
2485 /*
2486 * Slab was on no list before and will be partially empty
2487 * We can defer the list move and instead freeze it.
2488 */
2489 new.frozen = 1;
2490
2491 else { /* Needs to be taken off a list */
2492
2493 n = get_node(s, page_to_nid(page));
2494 /*
2495 * Speculatively acquire the list_lock.
2496 * If the cmpxchg does not succeed then we may
2497 * drop the list_lock without any processing.
2498 *
2499 * Otherwise the list_lock will synchronize with
2500 * other processors updating the list of slabs.
2501 */
2502 spin_lock_irqsave(&n->list_lock, flags);
2503
2504 }
2cfb7455 2505 }
81819f0f 2506
2cfb7455
CL
2507 } while (!cmpxchg_double_slab(s, page,
2508 prior, counters,
2509 object, new.counters,
2510 "__slab_free"));
81819f0f 2511
2cfb7455 2512 if (likely(!n)) {
49e22585
CL
2513
2514 /*
2515 * If we just froze the page then put it onto the
2516 * per cpu partial list.
2517 */
8028dcea 2518 if (new.frozen && !was_frozen) {
49e22585 2519 put_cpu_partial(s, page, 1);
8028dcea
AS
2520 stat(s, CPU_PARTIAL_FREE);
2521 }
49e22585 2522 /*
2cfb7455
CL
2523 * The list lock was not taken therefore no list
2524 * activity can be necessary.
2525 */
2526 if (was_frozen)
2527 stat(s, FREE_FROZEN);
80f08c19 2528 return;
2cfb7455 2529 }
81819f0f 2530
837d678d
JK
2531 if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2532 goto slab_empty;
2533
81819f0f 2534 /*
837d678d
JK
2535 * Objects left in the slab. If it was not on the partial list before
2536 * then add it.
81819f0f 2537 */
837d678d
JK
2538 if (kmem_cache_debug(s) && unlikely(!prior)) {
2539 remove_full(s, page);
2540 add_partial(n, page, DEACTIVATE_TO_TAIL);
2541 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2542 }
80f08c19 2543 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2544 return;
2545
2546slab_empty:
a973e9dd 2547 if (prior) {
81819f0f 2548 /*
6fbabb20 2549 * Slab on the partial list.
81819f0f 2550 */
5cc6eee8 2551 remove_partial(n, page);
84e554e6 2552 stat(s, FREE_REMOVE_PARTIAL);
6fbabb20
CL
2553 } else
2554 /* Slab must be on the full list */
2555 remove_full(s, page);
2cfb7455 2556
80f08c19 2557 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2558 stat(s, FREE_SLAB);
81819f0f 2559 discard_slab(s, page);
81819f0f
CL
2560}
2561
894b8788
CL
2562/*
2563 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2564 * can perform fastpath freeing without additional function calls.
2565 *
2566 * The fastpath is only possible if we are freeing to the current cpu slab
2567 * of this processor. This typically the case if we have just allocated
2568 * the item before.
2569 *
2570 * If fastpath is not possible then fall back to __slab_free where we deal
2571 * with all sorts of special processing.
2572 */
06428780 2573static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c 2574 struct page *page, void *x, unsigned long addr)
894b8788
CL
2575{
2576 void **object = (void *)x;
dfb4f096 2577 struct kmem_cache_cpu *c;
8a5ec0ba 2578 unsigned long tid;
1f84260c 2579
c016b0bd
CL
2580 slab_free_hook(s, x);
2581
8a5ec0ba
CL
2582redo:
2583 /*
2584 * Determine the currently cpus per cpu slab.
2585 * The cpu may change afterward. However that does not matter since
2586 * data is retrieved via this pointer. If we are on the same cpu
2587 * during the cmpxchg then the free will succedd.
2588 */
9dfc6e68 2589 c = __this_cpu_ptr(s->cpu_slab);
c016b0bd 2590
8a5ec0ba
CL
2591 tid = c->tid;
2592 barrier();
c016b0bd 2593
442b06bc 2594 if (likely(page == c->page)) {
ff12059e 2595 set_freepointer(s, object, c->freelist);
8a5ec0ba 2596
933393f5 2597 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2598 s->cpu_slab->freelist, s->cpu_slab->tid,
2599 c->freelist, tid,
2600 object, next_tid(tid)))) {
2601
2602 note_cmpxchg_failure("slab_free", s, tid);
2603 goto redo;
2604 }
84e554e6 2605 stat(s, FREE_FASTPATH);
894b8788 2606 } else
ff12059e 2607 __slab_free(s, page, x, addr);
894b8788 2608
894b8788
CL
2609}
2610
81819f0f
CL
2611void kmem_cache_free(struct kmem_cache *s, void *x)
2612{
77c5e2d0 2613 struct page *page;
81819f0f 2614
b49af68f 2615 page = virt_to_head_page(x);
81819f0f 2616
1b4f59e3 2617 if (kmem_cache_debug(s) && page->slab_cache != s) {
79576102 2618 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
1b4f59e3 2619 " is from %s\n", page->slab_cache->name, s->name);
79576102
CL
2620 WARN_ON_ONCE(1);
2621 return;
2622 }
2623
ce71e27c 2624 slab_free(s, page, x, _RET_IP_);
5b882be4 2625
ca2b84cb 2626 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
2627}
2628EXPORT_SYMBOL(kmem_cache_free);
2629
81819f0f 2630/*
672bba3a
CL
2631 * Object placement in a slab is made very easy because we always start at
2632 * offset 0. If we tune the size of the object to the alignment then we can
2633 * get the required alignment by putting one properly sized object after
2634 * another.
81819f0f
CL
2635 *
2636 * Notice that the allocation order determines the sizes of the per cpu
2637 * caches. Each processor has always one slab available for allocations.
2638 * Increasing the allocation order reduces the number of times that slabs
672bba3a 2639 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 2640 * locking overhead.
81819f0f
CL
2641 */
2642
2643/*
2644 * Mininum / Maximum order of slab pages. This influences locking overhead
2645 * and slab fragmentation. A higher order reduces the number of partial slabs
2646 * and increases the number of allocations possible without having to
2647 * take the list_lock.
2648 */
2649static int slub_min_order;
114e9e89 2650static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 2651static int slub_min_objects;
81819f0f
CL
2652
2653/*
2654 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 2655 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
2656 */
2657static int slub_nomerge;
2658
81819f0f
CL
2659/*
2660 * Calculate the order of allocation given an slab object size.
2661 *
672bba3a
CL
2662 * The order of allocation has significant impact on performance and other
2663 * system components. Generally order 0 allocations should be preferred since
2664 * order 0 does not cause fragmentation in the page allocator. Larger objects
2665 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 2666 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
2667 * would be wasted.
2668 *
2669 * In order to reach satisfactory performance we must ensure that a minimum
2670 * number of objects is in one slab. Otherwise we may generate too much
2671 * activity on the partial lists which requires taking the list_lock. This is
2672 * less a concern for large slabs though which are rarely used.
81819f0f 2673 *
672bba3a
CL
2674 * slub_max_order specifies the order where we begin to stop considering the
2675 * number of objects in a slab as critical. If we reach slub_max_order then
2676 * we try to keep the page order as low as possible. So we accept more waste
2677 * of space in favor of a small page order.
81819f0f 2678 *
672bba3a
CL
2679 * Higher order allocations also allow the placement of more objects in a
2680 * slab and thereby reduce object handling overhead. If the user has
2681 * requested a higher mininum order then we start with that one instead of
2682 * the smallest order which will fit the object.
81819f0f 2683 */
5e6d444e 2684static inline int slab_order(int size, int min_objects,
ab9a0f19 2685 int max_order, int fract_leftover, int reserved)
81819f0f
CL
2686{
2687 int order;
2688 int rem;
6300ea75 2689 int min_order = slub_min_order;
81819f0f 2690
ab9a0f19 2691 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c06 2692 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 2693
6300ea75 2694 for (order = max(min_order,
5e6d444e
CL
2695 fls(min_objects * size - 1) - PAGE_SHIFT);
2696 order <= max_order; order++) {
81819f0f 2697
5e6d444e 2698 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 2699
ab9a0f19 2700 if (slab_size < min_objects * size + reserved)
81819f0f
CL
2701 continue;
2702
ab9a0f19 2703 rem = (slab_size - reserved) % size;
81819f0f 2704
5e6d444e 2705 if (rem <= slab_size / fract_leftover)
81819f0f
CL
2706 break;
2707
2708 }
672bba3a 2709
81819f0f
CL
2710 return order;
2711}
2712
ab9a0f19 2713static inline int calculate_order(int size, int reserved)
5e6d444e
CL
2714{
2715 int order;
2716 int min_objects;
2717 int fraction;
e8120ff1 2718 int max_objects;
5e6d444e
CL
2719
2720 /*
2721 * Attempt to find best configuration for a slab. This
2722 * works by first attempting to generate a layout with
2723 * the best configuration and backing off gradually.
2724 *
2725 * First we reduce the acceptable waste in a slab. Then
2726 * we reduce the minimum objects required in a slab.
2727 */
2728 min_objects = slub_min_objects;
9b2cd506
CL
2729 if (!min_objects)
2730 min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f19 2731 max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1
ZY
2732 min_objects = min(min_objects, max_objects);
2733
5e6d444e 2734 while (min_objects > 1) {
c124f5b5 2735 fraction = 16;
5e6d444e
CL
2736 while (fraction >= 4) {
2737 order = slab_order(size, min_objects,
ab9a0f19 2738 slub_max_order, fraction, reserved);
5e6d444e
CL
2739 if (order <= slub_max_order)
2740 return order;
2741 fraction /= 2;
2742 }
5086c389 2743 min_objects--;
5e6d444e
CL
2744 }
2745
2746 /*
2747 * We were unable to place multiple objects in a slab. Now
2748 * lets see if we can place a single object there.
2749 */
ab9a0f19 2750 order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444e
CL
2751 if (order <= slub_max_order)
2752 return order;
2753
2754 /*
2755 * Doh this slab cannot be placed using slub_max_order.
2756 */
ab9a0f19 2757 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf590 2758 if (order < MAX_ORDER)
5e6d444e
CL
2759 return order;
2760 return -ENOSYS;
2761}
2762
5595cffc 2763static void
4053497d 2764init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
2765{
2766 n->nr_partial = 0;
81819f0f
CL
2767 spin_lock_init(&n->list_lock);
2768 INIT_LIST_HEAD(&n->partial);
8ab1372f 2769#ifdef CONFIG_SLUB_DEBUG
0f389ec6 2770 atomic_long_set(&n->nr_slabs, 0);
02b71b70 2771 atomic_long_set(&n->total_objects, 0);
643b1138 2772 INIT_LIST_HEAD(&n->full);
8ab1372f 2773#endif
81819f0f
CL
2774}
2775
55136592 2776static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 2777{
6c182dc0
CL
2778 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2779 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
4c93c355 2780
8a5ec0ba 2781 /*
d4d84fef
CM
2782 * Must align to double word boundary for the double cmpxchg
2783 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 2784 */
d4d84fef
CM
2785 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2786 2 * sizeof(void *));
8a5ec0ba
CL
2787
2788 if (!s->cpu_slab)
2789 return 0;
2790
2791 init_kmem_cache_cpus(s);
4c93c355 2792
8a5ec0ba 2793 return 1;
4c93c355 2794}
4c93c355 2795
51df1142
CL
2796static struct kmem_cache *kmem_cache_node;
2797
81819f0f
CL
2798/*
2799 * No kmalloc_node yet so do it by hand. We know that this is the first
2800 * slab on the node for this slabcache. There are no concurrent accesses
2801 * possible.
2802 *
2803 * Note that this function only works on the kmalloc_node_cache
4c93c355
CL
2804 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2805 * memory on a fresh node that has no slab structures yet.
81819f0f 2806 */
55136592 2807static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
2808{
2809 struct page *page;
2810 struct kmem_cache_node *n;
2811
51df1142 2812 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 2813
51df1142 2814 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
2815
2816 BUG_ON(!page);
a2f92ee7
CL
2817 if (page_to_nid(page) != node) {
2818 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2819 "node %d\n", node);
2820 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2821 "in order to be able to continue\n");
2822 }
2823
81819f0f
CL
2824 n = page->freelist;
2825 BUG_ON(!n);
51df1142 2826 page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea1 2827 page->inuse = 1;
8cb0a506 2828 page->frozen = 0;
51df1142 2829 kmem_cache_node->node[node] = n;
8ab1372f 2830#ifdef CONFIG_SLUB_DEBUG
f7cb1933 2831 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 2832 init_tracking(kmem_cache_node, n);
8ab1372f 2833#endif
4053497d 2834 init_kmem_cache_node(n);
51df1142 2835 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 2836
136333d1 2837 add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
2838}
2839
2840static void free_kmem_cache_nodes(struct kmem_cache *s)
2841{
2842 int node;
2843
f64dc58c 2844 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f 2845 struct kmem_cache_node *n = s->node[node];
51df1142 2846
73367bd8 2847 if (n)
51df1142
CL
2848 kmem_cache_free(kmem_cache_node, n);
2849
81819f0f
CL
2850 s->node[node] = NULL;
2851 }
2852}
2853
55136592 2854static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
2855{
2856 int node;
81819f0f 2857
f64dc58c 2858 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2859 struct kmem_cache_node *n;
2860
73367bd8 2861 if (slab_state == DOWN) {
55136592 2862 early_kmem_cache_node_alloc(node);
73367bd8
AD
2863 continue;
2864 }
51df1142 2865 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 2866 GFP_KERNEL, node);
81819f0f 2867
73367bd8
AD
2868 if (!n) {
2869 free_kmem_cache_nodes(s);
2870 return 0;
81819f0f 2871 }
73367bd8 2872
81819f0f 2873 s->node[node] = n;
4053497d 2874 init_kmem_cache_node(n);
81819f0f
CL
2875 }
2876 return 1;
2877}
81819f0f 2878
c0bdb232 2879static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
2880{
2881 if (min < MIN_PARTIAL)
2882 min = MIN_PARTIAL;
2883 else if (min > MAX_PARTIAL)
2884 min = MAX_PARTIAL;
2885 s->min_partial = min;
2886}
2887
81819f0f
CL
2888/*
2889 * calculate_sizes() determines the order and the distribution of data within
2890 * a slab object.
2891 */
06b285dc 2892static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f
CL
2893{
2894 unsigned long flags = s->flags;
3b0efdfa 2895 unsigned long size = s->object_size;
834f3d11 2896 int order;
81819f0f 2897
d8b42bf5
CL
2898 /*
2899 * Round up object size to the next word boundary. We can only
2900 * place the free pointer at word boundaries and this determines
2901 * the possible location of the free pointer.
2902 */
2903 size = ALIGN(size, sizeof(void *));
2904
2905#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2906 /*
2907 * Determine if we can poison the object itself. If the user of
2908 * the slab may touch the object after free or before allocation
2909 * then we should never poison the object itself.
2910 */
2911 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 2912 !s->ctor)
81819f0f
CL
2913 s->flags |= __OBJECT_POISON;
2914 else
2915 s->flags &= ~__OBJECT_POISON;
2916
81819f0f
CL
2917
2918 /*
672bba3a 2919 * If we are Redzoning then check if there is some space between the
81819f0f 2920 * end of the object and the free pointer. If not then add an
672bba3a 2921 * additional word to have some bytes to store Redzone information.
81819f0f 2922 */
3b0efdfa 2923 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 2924 size += sizeof(void *);
41ecc55b 2925#endif
81819f0f
CL
2926
2927 /*
672bba3a
CL
2928 * With that we have determined the number of bytes in actual use
2929 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
2930 */
2931 s->inuse = size;
2932
2933 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 2934 s->ctor)) {
81819f0f
CL
2935 /*
2936 * Relocate free pointer after the object if it is not
2937 * permitted to overwrite the first word of the object on
2938 * kmem_cache_free.
2939 *
2940 * This is the case if we do RCU, have a constructor or
2941 * destructor or are poisoning the objects.
2942 */
2943 s->offset = size;
2944 size += sizeof(void *);
2945 }
2946
c12b3c62 2947#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2948 if (flags & SLAB_STORE_USER)
2949 /*
2950 * Need to store information about allocs and frees after
2951 * the object.
2952 */
2953 size += 2 * sizeof(struct track);
2954
be7b3fbc 2955 if (flags & SLAB_RED_ZONE)
81819f0f
CL
2956 /*
2957 * Add some empty padding so that we can catch
2958 * overwrites from earlier objects rather than let
2959 * tracking information or the free pointer be
0211a9c8 2960 * corrupted if a user writes before the start
81819f0f
CL
2961 * of the object.
2962 */
2963 size += sizeof(void *);
41ecc55b 2964#endif
672bba3a 2965
81819f0f
CL
2966 /*
2967 * SLUB stores one object immediately after another beginning from
2968 * offset 0. In order to align the objects we have to simply size
2969 * each object to conform to the alignment.
2970 */
45906855 2971 size = ALIGN(size, s->align);
81819f0f 2972 s->size = size;
06b285dc
CL
2973 if (forced_order >= 0)
2974 order = forced_order;
2975 else
ab9a0f19 2976 order = calculate_order(size, s->reserved);
81819f0f 2977
834f3d11 2978 if (order < 0)
81819f0f
CL
2979 return 0;
2980
b7a49f0d 2981 s->allocflags = 0;
834f3d11 2982 if (order)
b7a49f0d
CL
2983 s->allocflags |= __GFP_COMP;
2984
2985 if (s->flags & SLAB_CACHE_DMA)
2986 s->allocflags |= SLUB_DMA;
2987
2988 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2989 s->allocflags |= __GFP_RECLAIMABLE;
2990
81819f0f
CL
2991 /*
2992 * Determine the number of objects per slab
2993 */
ab9a0f19
LJ
2994 s->oo = oo_make(order, size, s->reserved);
2995 s->min = oo_make(get_order(size), size, s->reserved);
205ab99d
CL
2996 if (oo_objects(s->oo) > oo_objects(s->max))
2997 s->max = s->oo;
81819f0f 2998
834f3d11 2999 return !!oo_objects(s->oo);
81819f0f
CL
3000}
3001
8a13a4cc 3002static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
81819f0f 3003{
8a13a4cc 3004 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
ab9a0f19 3005 s->reserved = 0;
81819f0f 3006
da9a638c
LJ
3007 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3008 s->reserved = sizeof(struct rcu_head);
81819f0f 3009
06b285dc 3010 if (!calculate_sizes(s, -1))
81819f0f 3011 goto error;
3de47213
DR
3012 if (disable_higher_order_debug) {
3013 /*
3014 * Disable debugging flags that store metadata if the min slab
3015 * order increased.
3016 */
3b0efdfa 3017 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3018 s->flags &= ~DEBUG_METADATA_FLAGS;
3019 s->offset = 0;
3020 if (!calculate_sizes(s, -1))
3021 goto error;
3022 }
3023 }
81819f0f 3024
2565409f
HC
3025#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3026 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51
CL
3027 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3028 /* Enable fast mode */
3029 s->flags |= __CMPXCHG_DOUBLE;
3030#endif
3031
3b89d7d8
DR
3032 /*
3033 * The larger the object size is, the more pages we want on the partial
3034 * list to avoid pounding the page allocator excessively.
3035 */
49e22585
CL
3036 set_min_partial(s, ilog2(s->size) / 2);
3037
3038 /*
3039 * cpu_partial determined the maximum number of objects kept in the
3040 * per cpu partial lists of a processor.
3041 *
3042 * Per cpu partial lists mainly contain slabs that just have one
3043 * object freed. If they are used for allocation then they can be
3044 * filled up again with minimal effort. The slab will never hit the
3045 * per node partial lists and therefore no locking will be required.
3046 *
3047 * This setting also determines
3048 *
3049 * A) The number of objects from per cpu partial slabs dumped to the
3050 * per node list when we reach the limit.
9f264904 3051 * B) The number of objects in cpu partial slabs to extract from the
49e22585
CL
3052 * per node list when we run out of per cpu objects. We only fetch 50%
3053 * to keep some capacity around for frees.
3054 */
8f1e33da
CL
3055 if (kmem_cache_debug(s))
3056 s->cpu_partial = 0;
3057 else if (s->size >= PAGE_SIZE)
49e22585
CL
3058 s->cpu_partial = 2;
3059 else if (s->size >= 1024)
3060 s->cpu_partial = 6;
3061 else if (s->size >= 256)
3062 s->cpu_partial = 13;
3063 else
3064 s->cpu_partial = 30;
3065
81819f0f 3066#ifdef CONFIG_NUMA
e2cb96b7 3067 s->remote_node_defrag_ratio = 1000;
81819f0f 3068#endif
55136592 3069 if (!init_kmem_cache_nodes(s))
dfb4f096 3070 goto error;
81819f0f 3071
55136592 3072 if (alloc_kmem_cache_cpus(s))
278b1bb1 3073 return 0;
ff12059e 3074
4c93c355 3075 free_kmem_cache_nodes(s);
81819f0f
CL
3076error:
3077 if (flags & SLAB_PANIC)
3078 panic("Cannot create slab %s size=%lu realsize=%u "
3079 "order=%u offset=%u flags=%lx\n",
8a13a4cc 3080 s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
81819f0f 3081 s->offset, flags);
278b1bb1 3082 return -EINVAL;
81819f0f 3083}
81819f0f 3084
33b12c38
CL
3085static void list_slab_objects(struct kmem_cache *s, struct page *page,
3086 const char *text)
3087{
3088#ifdef CONFIG_SLUB_DEBUG
3089 void *addr = page_address(page);
3090 void *p;
a5dd5c11
NK
3091 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3092 sizeof(long), GFP_ATOMIC);
bbd7d57b
ED
3093 if (!map)
3094 return;
945cf2b6 3095 slab_err(s, page, text, s->name);
33b12c38 3096 slab_lock(page);
33b12c38 3097
5f80b13a 3098 get_map(s, page, map);
33b12c38
CL
3099 for_each_object(p, s, addr, page->objects) {
3100
3101 if (!test_bit(slab_index(p, s, addr), map)) {
3102 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3103 p, p - addr);
3104 print_tracking(s, p);
3105 }
3106 }
3107 slab_unlock(page);
bbd7d57b 3108 kfree(map);
33b12c38
CL
3109#endif
3110}
3111
81819f0f 3112/*
599870b1 3113 * Attempt to free all partial slabs on a node.
69cb8e6b
CL
3114 * This is called from kmem_cache_close(). We must be the last thread
3115 * using the cache and therefore we do not need to lock anymore.
81819f0f 3116 */
599870b1 3117static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3118{
81819f0f
CL
3119 struct page *page, *h;
3120
33b12c38 3121 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3122 if (!page->inuse) {
5cc6eee8 3123 remove_partial(n, page);
81819f0f 3124 discard_slab(s, page);
33b12c38
CL
3125 } else {
3126 list_slab_objects(s, page,
945cf2b6 3127 "Objects remaining in %s on kmem_cache_close()");
599870b1 3128 }
33b12c38 3129 }
81819f0f
CL
3130}
3131
3132/*
672bba3a 3133 * Release all resources used by a slab cache.
81819f0f 3134 */
0c710013 3135static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
3136{
3137 int node;
3138
3139 flush_all(s);
81819f0f 3140 /* Attempt to free all objects */
f64dc58c 3141 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3142 struct kmem_cache_node *n = get_node(s, node);
3143
599870b1
CL
3144 free_partial(s, n);
3145 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3146 return 1;
3147 }
945cf2b6 3148 free_percpu(s->cpu_slab);
81819f0f
CL
3149 free_kmem_cache_nodes(s);
3150 return 0;
3151}
3152
945cf2b6 3153int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f 3154{
12c3667f 3155 int rc = kmem_cache_close(s);
945cf2b6 3156
12c3667f 3157 if (!rc)
81819f0f 3158 sysfs_slab_remove(s);
12c3667f
CL
3159
3160 return rc;
81819f0f 3161}
81819f0f
CL
3162
3163/********************************************************************
3164 * Kmalloc subsystem
3165 *******************************************************************/
3166
51df1142 3167struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
81819f0f
CL
3168EXPORT_SYMBOL(kmalloc_caches);
3169
55136592 3170#ifdef CONFIG_ZONE_DMA
51df1142 3171static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
55136592
CL
3172#endif
3173
81819f0f
CL
3174static int __init setup_slub_min_order(char *str)
3175{
06428780 3176 get_option(&str, &slub_min_order);
81819f0f
CL
3177
3178 return 1;
3179}
3180
3181__setup("slub_min_order=", setup_slub_min_order);
3182
3183static int __init setup_slub_max_order(char *str)
3184{
06428780 3185 get_option(&str, &slub_max_order);
818cf590 3186 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0f
CL
3187
3188 return 1;
3189}
3190
3191__setup("slub_max_order=", setup_slub_max_order);
3192
3193static int __init setup_slub_min_objects(char *str)
3194{
06428780 3195 get_option(&str, &slub_min_objects);
81819f0f
CL
3196
3197 return 1;
3198}
3199
3200__setup("slub_min_objects=", setup_slub_min_objects);
3201
3202static int __init setup_slub_nomerge(char *str)
3203{
3204 slub_nomerge = 1;
3205 return 1;
3206}
3207
3208__setup("slub_nomerge", setup_slub_nomerge);
3209
f1b26339
CL
3210/*
3211 * Conversion table for small slabs sizes / 8 to the index in the
3212 * kmalloc array. This is necessary for slabs < 192 since we have non power
3213 * of two cache sizes there. The size of larger slabs can be determined using
3214 * fls.
3215 */
3216static s8 size_index[24] = {
3217 3, /* 8 */
3218 4, /* 16 */
3219 5, /* 24 */
3220 5, /* 32 */
3221 6, /* 40 */
3222 6, /* 48 */
3223 6, /* 56 */
3224 6, /* 64 */
3225 1, /* 72 */
3226 1, /* 80 */
3227 1, /* 88 */
3228 1, /* 96 */
3229 7, /* 104 */
3230 7, /* 112 */
3231 7, /* 120 */
3232 7, /* 128 */
3233 2, /* 136 */
3234 2, /* 144 */
3235 2, /* 152 */
3236 2, /* 160 */
3237 2, /* 168 */
3238 2, /* 176 */
3239 2, /* 184 */
3240 2 /* 192 */
3241};
3242
acdfcd04
AK
3243static inline int size_index_elem(size_t bytes)
3244{
3245 return (bytes - 1) / 8;
3246}
3247
81819f0f
CL
3248static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3249{
f1b26339 3250 int index;
81819f0f 3251
f1b26339
CL
3252 if (size <= 192) {
3253 if (!size)
3254 return ZERO_SIZE_PTR;
81819f0f 3255
acdfcd04 3256 index = size_index[size_index_elem(size)];
aadb4bc4 3257 } else
f1b26339 3258 index = fls(size - 1);
81819f0f
CL
3259
3260#ifdef CONFIG_ZONE_DMA
f1b26339 3261 if (unlikely((flags & SLUB_DMA)))
51df1142 3262 return kmalloc_dma_caches[index];
f1b26339 3263
81819f0f 3264#endif
51df1142 3265 return kmalloc_caches[index];
81819f0f
CL
3266}
3267
3268void *__kmalloc(size_t size, gfp_t flags)
3269{
aadb4bc4 3270 struct kmem_cache *s;
5b882be4 3271 void *ret;
81819f0f 3272
ffadd4d0 3273 if (unlikely(size > SLUB_MAX_SIZE))
eada35ef 3274 return kmalloc_large(size, flags);
aadb4bc4
CL
3275
3276 s = get_slab(size, flags);
3277
3278 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3279 return s;
3280
2b847c3c 3281 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3282
ca2b84cb 3283 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4
EGM
3284
3285 return ret;
81819f0f
CL
3286}
3287EXPORT_SYMBOL(__kmalloc);
3288
5d1f57e4 3289#ifdef CONFIG_NUMA
f619cfe1
CL
3290static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3291{
b1eeab67 3292 struct page *page;
e4f7c0b4 3293 void *ptr = NULL;
f619cfe1 3294
b1eeab67
VN
3295 flags |= __GFP_COMP | __GFP_NOTRACK;
3296 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3297 if (page)
e4f7c0b4
CM
3298 ptr = page_address(page);
3299
3300 kmemleak_alloc(ptr, size, 1, flags);
3301 return ptr;
f619cfe1
CL
3302}
3303
81819f0f
CL
3304void *__kmalloc_node(size_t size, gfp_t flags, int node)
3305{
aadb4bc4 3306 struct kmem_cache *s;
5b882be4 3307 void *ret;
81819f0f 3308
057685cf 3309 if (unlikely(size > SLUB_MAX_SIZE)) {
5b882be4
EGM
3310 ret = kmalloc_large_node(size, flags, node);
3311
ca2b84cb
EGM
3312 trace_kmalloc_node(_RET_IP_, ret,
3313 size, PAGE_SIZE << get_order(size),
3314 flags, node);
5b882be4
EGM
3315
3316 return ret;
3317 }
aadb4bc4
CL
3318
3319 s = get_slab(size, flags);
3320
3321 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3322 return s;
3323
2b847c3c 3324 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3325
ca2b84cb 3326 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4
EGM
3327
3328 return ret;
81819f0f
CL
3329}
3330EXPORT_SYMBOL(__kmalloc_node);
3331#endif
3332
3333size_t ksize(const void *object)
3334{
272c1d21 3335 struct page *page;
81819f0f 3336
ef8b4520 3337 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3338 return 0;
3339
294a80a8 3340 page = virt_to_head_page(object);
294a80a8 3341
76994412
PE
3342 if (unlikely(!PageSlab(page))) {
3343 WARN_ON(!PageCompound(page));
294a80a8 3344 return PAGE_SIZE << compound_order(page);
76994412 3345 }
81819f0f 3346
1b4f59e3 3347 return slab_ksize(page->slab_cache);
81819f0f 3348}
b1aabecd 3349EXPORT_SYMBOL(ksize);
81819f0f 3350
d18a90dd
BG
3351#ifdef CONFIG_SLUB_DEBUG
3352bool verify_mem_not_deleted(const void *x)
3353{
3354 struct page *page;
3355 void *object = (void *)x;
3356 unsigned long flags;
3357 bool rv;
3358
3359 if (unlikely(ZERO_OR_NULL_PTR(x)))
3360 return false;
3361
3362 local_irq_save(flags);
3363
3364 page = virt_to_head_page(x);
3365 if (unlikely(!PageSlab(page))) {
3366 /* maybe it was from stack? */
3367 rv = true;
3368 goto out_unlock;
3369 }
3370
3371 slab_lock(page);
1b4f59e3
GC
3372 if (on_freelist(page->slab_cache, page, object)) {
3373 object_err(page->slab_cache, page, object, "Object is on free-list");
d18a90dd
BG
3374 rv = false;
3375 } else {
3376 rv = true;
3377 }
3378 slab_unlock(page);
3379
3380out_unlock:
3381 local_irq_restore(flags);
3382 return rv;
3383}
3384EXPORT_SYMBOL(verify_mem_not_deleted);
3385#endif
3386
81819f0f
CL
3387void kfree(const void *x)
3388{
81819f0f 3389 struct page *page;
5bb983b0 3390 void *object = (void *)x;
81819f0f 3391
2121db74
PE
3392 trace_kfree(_RET_IP_, x);
3393
2408c550 3394 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3395 return;
3396
b49af68f 3397 page = virt_to_head_page(x);
aadb4bc4 3398 if (unlikely(!PageSlab(page))) {
0937502a 3399 BUG_ON(!PageCompound(page));
e4f7c0b4 3400 kmemleak_free(x);
d9b7f226 3401 __free_pages(page, compound_order(page));
aadb4bc4
CL
3402 return;
3403 }
1b4f59e3 3404 slab_free(page->slab_cache, page, object, _RET_IP_);
81819f0f
CL
3405}
3406EXPORT_SYMBOL(kfree);
3407
2086d26a 3408/*
672bba3a
CL
3409 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3410 * the remaining slabs by the number of items in use. The slabs with the
3411 * most items in use come first. New allocations will then fill those up
3412 * and thus they can be removed from the partial lists.
3413 *
3414 * The slabs with the least items are placed last. This results in them
3415 * being allocated from last increasing the chance that the last objects
3416 * are freed in them.
2086d26a
CL
3417 */
3418int kmem_cache_shrink(struct kmem_cache *s)
3419{
3420 int node;
3421 int i;
3422 struct kmem_cache_node *n;
3423 struct page *page;
3424 struct page *t;
205ab99d 3425 int objects = oo_objects(s->max);
2086d26a 3426 struct list_head *slabs_by_inuse =
834f3d11 3427 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a
CL
3428 unsigned long flags;
3429
3430 if (!slabs_by_inuse)
3431 return -ENOMEM;
3432
3433 flush_all(s);
f64dc58c 3434 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
3435 n = get_node(s, node);
3436
3437 if (!n->nr_partial)
3438 continue;
3439
834f3d11 3440 for (i = 0; i < objects; i++)
2086d26a
CL
3441 INIT_LIST_HEAD(slabs_by_inuse + i);
3442
3443 spin_lock_irqsave(&n->list_lock, flags);
3444
3445 /*
672bba3a 3446 * Build lists indexed by the items in use in each slab.
2086d26a 3447 *
672bba3a
CL
3448 * Note that concurrent frees may occur while we hold the
3449 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3450 */
3451 list_for_each_entry_safe(page, t, &n->partial, lru) {
69cb8e6b
CL
3452 list_move(&page->lru, slabs_by_inuse + page->inuse);
3453 if (!page->inuse)
3454 n->nr_partial--;
2086d26a
CL
3455 }
3456
2086d26a 3457 /*
672bba3a
CL
3458 * Rebuild the partial list with the slabs filled up most
3459 * first and the least used slabs at the end.
2086d26a 3460 */
69cb8e6b 3461 for (i = objects - 1; i > 0; i--)
2086d26a
CL
3462 list_splice(slabs_by_inuse + i, n->partial.prev);
3463
2086d26a 3464 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
3465
3466 /* Release empty slabs */
3467 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3468 discard_slab(s, page);
2086d26a
CL
3469 }
3470
3471 kfree(slabs_by_inuse);
3472 return 0;
3473}
3474EXPORT_SYMBOL(kmem_cache_shrink);
3475
92a5bbc1 3476#if defined(CONFIG_MEMORY_HOTPLUG)
b9049e23
YG
3477static int slab_mem_going_offline_callback(void *arg)
3478{
3479 struct kmem_cache *s;
3480
18004c5d 3481 mutex_lock(&slab_mutex);
b9049e23
YG
3482 list_for_each_entry(s, &slab_caches, list)
3483 kmem_cache_shrink(s);
18004c5d 3484 mutex_unlock(&slab_mutex);
b9049e23
YG
3485
3486 return 0;
3487}
3488
3489static void slab_mem_offline_callback(void *arg)
3490{
3491 struct kmem_cache_node *n;
3492 struct kmem_cache *s;
3493 struct memory_notify *marg = arg;
3494 int offline_node;
3495
b9d5ab25 3496 offline_node = marg->status_change_nid_normal;
b9049e23
YG
3497
3498 /*
3499 * If the node still has available memory. we need kmem_cache_node
3500 * for it yet.
3501 */
3502 if (offline_node < 0)
3503 return;
3504
18004c5d 3505 mutex_lock(&slab_mutex);
b9049e23
YG
3506 list_for_each_entry(s, &slab_caches, list) {
3507 n = get_node(s, offline_node);
3508 if (n) {
3509 /*
3510 * if n->nr_slabs > 0, slabs still exist on the node
3511 * that is going down. We were unable to free them,
c9404c9c 3512 * and offline_pages() function shouldn't call this
b9049e23
YG
3513 * callback. So, we must fail.
3514 */
0f389ec6 3515 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
3516
3517 s->node[offline_node] = NULL;
8de66a0c 3518 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
3519 }
3520 }
18004c5d 3521 mutex_unlock(&slab_mutex);
b9049e23
YG
3522}
3523
3524static int slab_mem_going_online_callback(void *arg)
3525{
3526 struct kmem_cache_node *n;
3527 struct kmem_cache *s;
3528 struct memory_notify *marg = arg;
b9d5ab25 3529 int nid = marg->status_change_nid_normal;
b9049e23
YG
3530 int ret = 0;
3531
3532 /*
3533 * If the node's memory is already available, then kmem_cache_node is
3534 * already created. Nothing to do.
3535 */
3536 if (nid < 0)
3537 return 0;
3538
3539 /*
0121c619 3540 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
3541 * allocate a kmem_cache_node structure in order to bring the node
3542 * online.
3543 */
18004c5d 3544 mutex_lock(&slab_mutex);
b9049e23
YG
3545 list_for_each_entry(s, &slab_caches, list) {
3546 /*
3547 * XXX: kmem_cache_alloc_node will fallback to other nodes
3548 * since memory is not yet available from the node that
3549 * is brought up.
3550 */
8de66a0c 3551 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
3552 if (!n) {
3553 ret = -ENOMEM;
3554 goto out;
3555 }
4053497d 3556 init_kmem_cache_node(n);
b9049e23
YG
3557 s->node[nid] = n;
3558 }
3559out:
18004c5d 3560 mutex_unlock(&slab_mutex);
b9049e23
YG
3561 return ret;
3562}
3563
3564static int slab_memory_callback(struct notifier_block *self,
3565 unsigned long action, void *arg)
3566{
3567 int ret = 0;
3568
3569 switch (action) {
3570 case MEM_GOING_ONLINE:
3571 ret = slab_mem_going_online_callback(arg);
3572 break;
3573 case MEM_GOING_OFFLINE:
3574 ret = slab_mem_going_offline_callback(arg);
3575 break;
3576 case MEM_OFFLINE:
3577 case MEM_CANCEL_ONLINE:
3578 slab_mem_offline_callback(arg);
3579 break;
3580 case MEM_ONLINE:
3581 case MEM_CANCEL_OFFLINE:
3582 break;
3583 }
dc19f9db
KH
3584 if (ret)
3585 ret = notifier_from_errno(ret);
3586 else
3587 ret = NOTIFY_OK;
b9049e23
YG
3588 return ret;
3589}
3590
3591#endif /* CONFIG_MEMORY_HOTPLUG */
3592
81819f0f
CL
3593/********************************************************************
3594 * Basic setup of slabs
3595 *******************************************************************/
3596
51df1142
CL
3597/*
3598 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
3599 * the page allocator. Allocate them properly then fix up the pointers
3600 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
3601 */
3602
dffb4d60 3603static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
3604{
3605 int node;
dffb4d60 3606 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
51df1142 3607
dffb4d60 3608 memcpy(s, static_cache, kmem_cache->object_size);
51df1142
CL
3609
3610 for_each_node_state(node, N_NORMAL_MEMORY) {
3611 struct kmem_cache_node *n = get_node(s, node);
3612 struct page *p;
3613
3614 if (n) {
3615 list_for_each_entry(p, &n->partial, lru)
1b4f59e3 3616 p->slab_cache = s;
51df1142 3617
607bf324 3618#ifdef CONFIG_SLUB_DEBUG
51df1142 3619 list_for_each_entry(p, &n->full, lru)
1b4f59e3 3620 p->slab_cache = s;
51df1142
CL
3621#endif
3622 }
3623 }
dffb4d60
CL
3624 list_add(&s->list, &slab_caches);
3625 return s;
51df1142
CL
3626}
3627
81819f0f
CL
3628void __init kmem_cache_init(void)
3629{
dffb4d60
CL
3630 static __initdata struct kmem_cache boot_kmem_cache,
3631 boot_kmem_cache_node;
81819f0f 3632 int i;
dffb4d60 3633 int caches = 2;
51df1142 3634
fc8d8620
SG
3635 if (debug_guardpage_minorder())
3636 slub_max_order = 0;
3637
dffb4d60
CL
3638 kmem_cache_node = &boot_kmem_cache_node;
3639 kmem_cache = &boot_kmem_cache;
51df1142 3640
dffb4d60
CL
3641 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3642 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
b9049e23 3643
0c40ba4f 3644 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0f
CL
3645
3646 /* Able to allocate the per node structures */
3647 slab_state = PARTIAL;
3648
dffb4d60
CL
3649 create_boot_cache(kmem_cache, "kmem_cache",
3650 offsetof(struct kmem_cache, node) +
3651 nr_node_ids * sizeof(struct kmem_cache_node *),
3652 SLAB_HWCACHE_ALIGN);
8a13a4cc 3653
dffb4d60 3654 kmem_cache = bootstrap(&boot_kmem_cache);
81819f0f 3655
51df1142
CL
3656 /*
3657 * Allocate kmem_cache_node properly from the kmem_cache slab.
3658 * kmem_cache_node is separately allocated so no need to
3659 * update any list pointers.
3660 */
dffb4d60 3661 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
3662
3663 /* Now we can use the kmem_cache to allocate kmalloc slabs */
f1b26339
CL
3664
3665 /*
3666 * Patch up the size_index table if we have strange large alignment
3667 * requirements for the kmalloc array. This is only the case for
6446faa2 3668 * MIPS it seems. The standard arches will not generate any code here.
f1b26339
CL
3669 *
3670 * Largest permitted alignment is 256 bytes due to the way we
3671 * handle the index determination for the smaller caches.
3672 *
3673 * Make sure that nothing crazy happens if someone starts tinkering
3674 * around with ARCH_KMALLOC_MINALIGN
3675 */
3676 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3677 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3678
acdfcd04
AK
3679 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3680 int elem = size_index_elem(i);
3681 if (elem >= ARRAY_SIZE(size_index))
3682 break;
3683 size_index[elem] = KMALLOC_SHIFT_LOW;
3684 }
f1b26339 3685
acdfcd04
AK
3686 if (KMALLOC_MIN_SIZE == 64) {
3687 /*
3688 * The 96 byte size cache is not used if the alignment
3689 * is 64 byte.
3690 */
3691 for (i = 64 + 8; i <= 96; i += 8)
3692 size_index[size_index_elem(i)] = 7;
3693 } else if (KMALLOC_MIN_SIZE == 128) {
41d54d3b
CL
3694 /*
3695 * The 192 byte sized cache is not used if the alignment
3696 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3697 * instead.
3698 */
3699 for (i = 128 + 8; i <= 192; i += 8)
acdfcd04 3700 size_index[size_index_elem(i)] = 8;
41d54d3b
CL
3701 }
3702
51df1142
CL
3703 /* Caches that are not of the two-to-the-power-of size */
3704 if (KMALLOC_MIN_SIZE <= 32) {
3705 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3706 caches++;
3707 }
3708
3709 if (KMALLOC_MIN_SIZE <= 64) {
3710 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3711 caches++;
3712 }
3713
3714 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3715 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3716 caches++;
3717 }
3718
81819f0f
CL
3719 slab_state = UP;
3720
3721 /* Provide the correct kmalloc names now that the caches are up */
84c1cf62
PE
3722 if (KMALLOC_MIN_SIZE <= 32) {
3723 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3724 BUG_ON(!kmalloc_caches[1]->name);
3725 }
3726
3727 if (KMALLOC_MIN_SIZE <= 64) {
3728 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3729 BUG_ON(!kmalloc_caches[2]->name);
3730 }
3731
d7278bd7
CL
3732 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3733 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3734
3735 BUG_ON(!s);
51df1142 3736 kmalloc_caches[i]->name = s;
d7278bd7 3737 }
81819f0f
CL
3738
3739#ifdef CONFIG_SMP
3740 register_cpu_notifier(&slab_notifier);
9dfc6e68 3741#endif
81819f0f 3742
55136592 3743#ifdef CONFIG_ZONE_DMA
51df1142
CL
3744 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3745 struct kmem_cache *s = kmalloc_caches[i];
55136592 3746
51df1142 3747 if (s && s->size) {
55136592 3748 char *name = kasprintf(GFP_NOWAIT,
3b0efdfa 3749 "dma-kmalloc-%d", s->object_size);
55136592
CL
3750
3751 BUG_ON(!name);
51df1142 3752 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3b0efdfa 3753 s->object_size, SLAB_CACHE_DMA);
55136592
CL
3754 }
3755 }
3756#endif
3adbefee
IM
3757 printk(KERN_INFO
3758 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0
CL
3759 " CPUs=%d, Nodes=%d\n",
3760 caches, cache_line_size(),
81819f0f
CL
3761 slub_min_order, slub_max_order, slub_min_objects,
3762 nr_cpu_ids, nr_node_ids);
3763}
3764
7e85ee0c
PE
3765void __init kmem_cache_init_late(void)
3766{
7e85ee0c
PE
3767}
3768
81819f0f
CL
3769/*
3770 * Find a mergeable slab cache
3771 */
3772static int slab_unmergeable(struct kmem_cache *s)
3773{
3774 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3775 return 1;
3776
c59def9f 3777 if (s->ctor)
81819f0f
CL
3778 return 1;
3779
8ffa6875
CL
3780 /*
3781 * We may have set a slab to be unmergeable during bootstrap.
3782 */
3783 if (s->refcount < 0)
3784 return 1;
3785
81819f0f
CL
3786 return 0;
3787}
3788
3789static struct kmem_cache *find_mergeable(size_t size,
ba0268a8 3790 size_t align, unsigned long flags, const char *name,
51cc5068 3791 void (*ctor)(void *))
81819f0f 3792{
5b95a4ac 3793 struct kmem_cache *s;
81819f0f
CL
3794
3795 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3796 return NULL;
3797
c59def9f 3798 if (ctor)
81819f0f
CL
3799 return NULL;
3800
3801 size = ALIGN(size, sizeof(void *));
3802 align = calculate_alignment(flags, align, size);
3803 size = ALIGN(size, align);
ba0268a8 3804 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 3805
5b95a4ac 3806 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
3807 if (slab_unmergeable(s))
3808 continue;
3809
3810 if (size > s->size)
3811 continue;
3812
ba0268a8 3813 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0f
CL
3814 continue;
3815 /*
3816 * Check if alignment is compatible.
3817 * Courtesy of Adrian Drzewiecki
3818 */
06428780 3819 if ((s->size & ~(align - 1)) != s->size)
81819f0f
CL
3820 continue;
3821
3822 if (s->size - size >= sizeof(void *))
3823 continue;
3824
3825 return s;
3826 }
3827 return NULL;
3828}
3829
cbb79694 3830struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
51cc5068 3831 size_t align, unsigned long flags, void (*ctor)(void *))
81819f0f
CL
3832{
3833 struct kmem_cache *s;
3834
ba0268a8 3835 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
3836 if (s) {
3837 s->refcount++;
3838 /*
3839 * Adjust the object sizes so that we clear
3840 * the complete object on kzalloc.
3841 */
3b0efdfa 3842 s->object_size = max(s->object_size, (int)size);
81819f0f 3843 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 3844
7b8f3b66 3845 if (sysfs_slab_alias(s, name)) {
7b8f3b66 3846 s->refcount--;
cbb79694 3847 s = NULL;
7b8f3b66 3848 }
a0e1d1be 3849 }
6446faa2 3850
cbb79694
CL
3851 return s;
3852}
84c1cf62 3853
8a13a4cc 3854int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
cbb79694 3855{
aac3a166
PE
3856 int err;
3857
3858 err = kmem_cache_open(s, flags);
3859 if (err)
3860 return err;
20cea968 3861
45530c44
CL
3862 /* Mutex is not taken during early boot */
3863 if (slab_state <= UP)
3864 return 0;
3865
aac3a166
PE
3866 mutex_unlock(&slab_mutex);
3867 err = sysfs_slab_add(s);
3868 mutex_lock(&slab_mutex);
20cea968 3869
aac3a166
PE
3870 if (err)
3871 kmem_cache_close(s);
20cea968 3872
aac3a166 3873 return err;
81819f0f 3874}
81819f0f 3875
81819f0f 3876#ifdef CONFIG_SMP
81819f0f 3877/*
672bba3a
CL
3878 * Use the cpu notifier to insure that the cpu slabs are flushed when
3879 * necessary.
81819f0f
CL
3880 */
3881static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3882 unsigned long action, void *hcpu)
3883{
3884 long cpu = (long)hcpu;
5b95a4ac
CL
3885 struct kmem_cache *s;
3886 unsigned long flags;
81819f0f
CL
3887
3888 switch (action) {
3889 case CPU_UP_CANCELED:
8bb78442 3890 case CPU_UP_CANCELED_FROZEN:
81819f0f 3891 case CPU_DEAD:
8bb78442 3892 case CPU_DEAD_FROZEN:
18004c5d 3893 mutex_lock(&slab_mutex);
5b95a4ac
CL
3894 list_for_each_entry(s, &slab_caches, list) {
3895 local_irq_save(flags);
3896 __flush_cpu_slab(s, cpu);
3897 local_irq_restore(flags);
3898 }
18004c5d 3899 mutex_unlock(&slab_mutex);
81819f0f
CL
3900 break;
3901 default:
3902 break;
3903 }
3904 return NOTIFY_OK;
3905}
3906
06428780 3907static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee 3908 .notifier_call = slab_cpuup_callback
06428780 3909};
81819f0f
CL
3910
3911#endif
3912
ce71e27c 3913void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 3914{
aadb4bc4 3915 struct kmem_cache *s;
94b528d0 3916 void *ret;
aadb4bc4 3917
ffadd4d0 3918 if (unlikely(size > SLUB_MAX_SIZE))
eada35ef
PE
3919 return kmalloc_large(size, gfpflags);
3920
aadb4bc4 3921 s = get_slab(size, gfpflags);
81819f0f 3922
2408c550 3923 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3924 return s;
81819f0f 3925
2b847c3c 3926 ret = slab_alloc(s, gfpflags, caller);
94b528d0 3927
25985edc 3928 /* Honor the call site pointer we received. */
ca2b84cb 3929 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
3930
3931 return ret;
81819f0f
CL
3932}
3933
5d1f57e4 3934#ifdef CONFIG_NUMA
81819f0f 3935void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 3936 int node, unsigned long caller)
81819f0f 3937{
aadb4bc4 3938 struct kmem_cache *s;
94b528d0 3939 void *ret;
aadb4bc4 3940
d3e14aa3
XF
3941 if (unlikely(size > SLUB_MAX_SIZE)) {
3942 ret = kmalloc_large_node(size, gfpflags, node);
3943
3944 trace_kmalloc_node(caller, ret,
3945 size, PAGE_SIZE << get_order(size),
3946 gfpflags, node);
3947
3948 return ret;
3949 }
eada35ef 3950
aadb4bc4 3951 s = get_slab(size, gfpflags);
81819f0f 3952
2408c550 3953 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3954 return s;
81819f0f 3955
2b847c3c 3956 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 3957
25985edc 3958 /* Honor the call site pointer we received. */
ca2b84cb 3959 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
3960
3961 return ret;
81819f0f 3962}
5d1f57e4 3963#endif
81819f0f 3964
ab4d5ed5 3965#ifdef CONFIG_SYSFS
205ab99d
CL
3966static int count_inuse(struct page *page)
3967{
3968 return page->inuse;
3969}
3970
3971static int count_total(struct page *page)
3972{
3973 return page->objects;
3974}
ab4d5ed5 3975#endif
205ab99d 3976
ab4d5ed5 3977#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
3978static int validate_slab(struct kmem_cache *s, struct page *page,
3979 unsigned long *map)
53e15af0
CL
3980{
3981 void *p;
a973e9dd 3982 void *addr = page_address(page);
53e15af0
CL
3983
3984 if (!check_slab(s, page) ||
3985 !on_freelist(s, page, NULL))
3986 return 0;
3987
3988 /* Now we know that a valid freelist exists */
39b26464 3989 bitmap_zero(map, page->objects);
53e15af0 3990
5f80b13a
CL
3991 get_map(s, page, map);
3992 for_each_object(p, s, addr, page->objects) {
3993 if (test_bit(slab_index(p, s, addr), map))
3994 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3995 return 0;
53e15af0
CL
3996 }
3997
224a88be 3998 for_each_object(p, s, addr, page->objects)
7656c72b 3999 if (!test_bit(slab_index(p, s, addr), map))
37d57443 4000 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
4001 return 0;
4002 return 1;
4003}
4004
434e245d
CL
4005static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4006 unsigned long *map)
53e15af0 4007{
881db7fb
CL
4008 slab_lock(page);
4009 validate_slab(s, page, map);
4010 slab_unlock(page);
53e15af0
CL
4011}
4012
434e245d
CL
4013static int validate_slab_node(struct kmem_cache *s,
4014 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
4015{
4016 unsigned long count = 0;
4017 struct page *page;
4018 unsigned long flags;
4019
4020 spin_lock_irqsave(&n->list_lock, flags);
4021
4022 list_for_each_entry(page, &n->partial, lru) {
434e245d 4023 validate_slab_slab(s, page, map);
53e15af0
CL
4024 count++;
4025 }
4026 if (count != n->nr_partial)
4027 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4028 "counter=%ld\n", s->name, count, n->nr_partial);
4029
4030 if (!(s->flags & SLAB_STORE_USER))
4031 goto out;
4032
4033 list_for_each_entry(page, &n->full, lru) {
434e245d 4034 validate_slab_slab(s, page, map);
53e15af0
CL
4035 count++;
4036 }
4037 if (count != atomic_long_read(&n->nr_slabs))
4038 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4039 "counter=%ld\n", s->name, count,
4040 atomic_long_read(&n->nr_slabs));
4041
4042out:
4043 spin_unlock_irqrestore(&n->list_lock, flags);
4044 return count;
4045}
4046
434e245d 4047static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
4048{
4049 int node;
4050 unsigned long count = 0;
205ab99d 4051 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d
CL
4052 sizeof(unsigned long), GFP_KERNEL);
4053
4054 if (!map)
4055 return -ENOMEM;
53e15af0
CL
4056
4057 flush_all(s);
f64dc58c 4058 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
4059 struct kmem_cache_node *n = get_node(s, node);
4060
434e245d 4061 count += validate_slab_node(s, n, map);
53e15af0 4062 }
434e245d 4063 kfree(map);
53e15af0
CL
4064 return count;
4065}
88a420e4 4066/*
672bba3a 4067 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
4068 * and freed.
4069 */
4070
4071struct location {
4072 unsigned long count;
ce71e27c 4073 unsigned long addr;
45edfa58
CL
4074 long long sum_time;
4075 long min_time;
4076 long max_time;
4077 long min_pid;
4078 long max_pid;
174596a0 4079 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4080 nodemask_t nodes;
88a420e4
CL
4081};
4082
4083struct loc_track {
4084 unsigned long max;
4085 unsigned long count;
4086 struct location *loc;
4087};
4088
4089static void free_loc_track(struct loc_track *t)
4090{
4091 if (t->max)
4092 free_pages((unsigned long)t->loc,
4093 get_order(sizeof(struct location) * t->max));
4094}
4095
68dff6a9 4096static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4097{
4098 struct location *l;
4099 int order;
4100
88a420e4
CL
4101 order = get_order(sizeof(struct location) * max);
4102
68dff6a9 4103 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4104 if (!l)
4105 return 0;
4106
4107 if (t->count) {
4108 memcpy(l, t->loc, sizeof(struct location) * t->count);
4109 free_loc_track(t);
4110 }
4111 t->max = max;
4112 t->loc = l;
4113 return 1;
4114}
4115
4116static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4117 const struct track *track)
88a420e4
CL
4118{
4119 long start, end, pos;
4120 struct location *l;
ce71e27c 4121 unsigned long caddr;
45edfa58 4122 unsigned long age = jiffies - track->when;
88a420e4
CL
4123
4124 start = -1;
4125 end = t->count;
4126
4127 for ( ; ; ) {
4128 pos = start + (end - start + 1) / 2;
4129
4130 /*
4131 * There is nothing at "end". If we end up there
4132 * we need to add something to before end.
4133 */
4134 if (pos == end)
4135 break;
4136
4137 caddr = t->loc[pos].addr;
45edfa58
CL
4138 if (track->addr == caddr) {
4139
4140 l = &t->loc[pos];
4141 l->count++;
4142 if (track->when) {
4143 l->sum_time += age;
4144 if (age < l->min_time)
4145 l->min_time = age;
4146 if (age > l->max_time)
4147 l->max_time = age;
4148
4149 if (track->pid < l->min_pid)
4150 l->min_pid = track->pid;
4151 if (track->pid > l->max_pid)
4152 l->max_pid = track->pid;
4153
174596a0
RR
4154 cpumask_set_cpu(track->cpu,
4155 to_cpumask(l->cpus));
45edfa58
CL
4156 }
4157 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4158 return 1;
4159 }
4160
45edfa58 4161 if (track->addr < caddr)
88a420e4
CL
4162 end = pos;
4163 else
4164 start = pos;
4165 }
4166
4167 /*
672bba3a 4168 * Not found. Insert new tracking element.
88a420e4 4169 */
68dff6a9 4170 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4171 return 0;
4172
4173 l = t->loc + pos;
4174 if (pos < t->count)
4175 memmove(l + 1, l,
4176 (t->count - pos) * sizeof(struct location));
4177 t->count++;
4178 l->count = 1;
45edfa58
CL
4179 l->addr = track->addr;
4180 l->sum_time = age;
4181 l->min_time = age;
4182 l->max_time = age;
4183 l->min_pid = track->pid;
4184 l->max_pid = track->pid;
174596a0
RR
4185 cpumask_clear(to_cpumask(l->cpus));
4186 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4187 nodes_clear(l->nodes);
4188 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4189 return 1;
4190}
4191
4192static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4193 struct page *page, enum track_item alloc,
a5dd5c11 4194 unsigned long *map)
88a420e4 4195{
a973e9dd 4196 void *addr = page_address(page);
88a420e4
CL
4197 void *p;
4198
39b26464 4199 bitmap_zero(map, page->objects);
5f80b13a 4200 get_map(s, page, map);
88a420e4 4201
224a88be 4202 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4203 if (!test_bit(slab_index(p, s, addr), map))
4204 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4205}
4206
4207static int list_locations(struct kmem_cache *s, char *buf,
4208 enum track_item alloc)
4209{
e374d483 4210 int len = 0;
88a420e4 4211 unsigned long i;
68dff6a9 4212 struct loc_track t = { 0, 0, NULL };
88a420e4 4213 int node;
bbd7d57b
ED
4214 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4215 sizeof(unsigned long), GFP_KERNEL);
88a420e4 4216
bbd7d57b
ED
4217 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4218 GFP_TEMPORARY)) {
4219 kfree(map);
68dff6a9 4220 return sprintf(buf, "Out of memory\n");
bbd7d57b 4221 }
88a420e4
CL
4222 /* Push back cpu slabs */
4223 flush_all(s);
4224
f64dc58c 4225 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
4226 struct kmem_cache_node *n = get_node(s, node);
4227 unsigned long flags;
4228 struct page *page;
4229
9e86943b 4230 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4231 continue;
4232
4233 spin_lock_irqsave(&n->list_lock, flags);
4234 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4235 process_slab(&t, s, page, alloc, map);
88a420e4 4236 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4237 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4238 spin_unlock_irqrestore(&n->list_lock, flags);
4239 }
4240
4241 for (i = 0; i < t.count; i++) {
45edfa58 4242 struct location *l = &t.loc[i];
88a420e4 4243
9c246247 4244 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4245 break;
e374d483 4246 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4247
4248 if (l->addr)
62c70bce 4249 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4250 else
e374d483 4251 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4252
4253 if (l->sum_time != l->min_time) {
e374d483 4254 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4255 l->min_time,
4256 (long)div_u64(l->sum_time, l->count),
4257 l->max_time);
45edfa58 4258 } else
e374d483 4259 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4260 l->min_time);
4261
4262 if (l->min_pid != l->max_pid)
e374d483 4263 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4264 l->min_pid, l->max_pid);
4265 else
e374d483 4266 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4267 l->min_pid);
4268
174596a0
RR
4269 if (num_online_cpus() > 1 &&
4270 !cpumask_empty(to_cpumask(l->cpus)) &&
e374d483
HH
4271 len < PAGE_SIZE - 60) {
4272 len += sprintf(buf + len, " cpus=");
4273 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
174596a0 4274 to_cpumask(l->cpus));
45edfa58
CL
4275 }
4276
62bc62a8 4277 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d483
HH
4278 len < PAGE_SIZE - 60) {
4279 len += sprintf(buf + len, " nodes=");
4280 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa58
CL
4281 l->nodes);
4282 }
4283
e374d483 4284 len += sprintf(buf + len, "\n");
88a420e4
CL
4285 }
4286
4287 free_loc_track(&t);
bbd7d57b 4288 kfree(map);
88a420e4 4289 if (!t.count)
e374d483
HH
4290 len += sprintf(buf, "No data\n");
4291 return len;
88a420e4 4292}
ab4d5ed5 4293#endif
88a420e4 4294
a5a84755
CL
4295#ifdef SLUB_RESILIENCY_TEST
4296static void resiliency_test(void)
4297{
4298 u8 *p;
4299
4300 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4301
4302 printk(KERN_ERR "SLUB resiliency testing\n");
4303 printk(KERN_ERR "-----------------------\n");
4304 printk(KERN_ERR "A. Corruption after allocation\n");
4305
4306 p = kzalloc(16, GFP_KERNEL);
4307 p[16] = 0x12;
4308 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4309 " 0x12->0x%p\n\n", p + 16);
4310
4311 validate_slab_cache(kmalloc_caches[4]);
4312
4313 /* Hmmm... The next two are dangerous */
4314 p = kzalloc(32, GFP_KERNEL);
4315 p[32 + sizeof(void *)] = 0x34;
4316 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4317 " 0x34 -> -0x%p\n", p);
4318 printk(KERN_ERR
4319 "If allocated object is overwritten then not detectable\n\n");
4320
4321 validate_slab_cache(kmalloc_caches[5]);
4322 p = kzalloc(64, GFP_KERNEL);
4323 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4324 *p = 0x56;
4325 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4326 p);
4327 printk(KERN_ERR
4328 "If allocated object is overwritten then not detectable\n\n");
4329 validate_slab_cache(kmalloc_caches[6]);
4330
4331 printk(KERN_ERR "\nB. Corruption after free\n");
4332 p = kzalloc(128, GFP_KERNEL);
4333 kfree(p);
4334 *p = 0x78;
4335 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4336 validate_slab_cache(kmalloc_caches[7]);
4337
4338 p = kzalloc(256, GFP_KERNEL);
4339 kfree(p);
4340 p[50] = 0x9a;
4341 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4342 p);
4343 validate_slab_cache(kmalloc_caches[8]);
4344
4345 p = kzalloc(512, GFP_KERNEL);
4346 kfree(p);
4347 p[512] = 0xab;
4348 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4349 validate_slab_cache(kmalloc_caches[9]);
4350}
4351#else
4352#ifdef CONFIG_SYSFS
4353static void resiliency_test(void) {};
4354#endif
4355#endif
4356
ab4d5ed5 4357#ifdef CONFIG_SYSFS
81819f0f 4358enum slab_stat_type {
205ab99d
CL
4359 SL_ALL, /* All slabs */
4360 SL_PARTIAL, /* Only partially allocated slabs */
4361 SL_CPU, /* Only slabs used for cpu caches */
4362 SL_OBJECTS, /* Determine allocated objects not slabs */
4363 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4364};
4365
205ab99d 4366#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4367#define SO_PARTIAL (1 << SL_PARTIAL)
4368#define SO_CPU (1 << SL_CPU)
4369#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4370#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4371
62e5c4b4
CG
4372static ssize_t show_slab_objects(struct kmem_cache *s,
4373 char *buf, unsigned long flags)
81819f0f
CL
4374{
4375 unsigned long total = 0;
81819f0f
CL
4376 int node;
4377 int x;
4378 unsigned long *nodes;
4379 unsigned long *per_cpu;
4380
4381 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
4382 if (!nodes)
4383 return -ENOMEM;
81819f0f
CL
4384 per_cpu = nodes + nr_node_ids;
4385
205ab99d
CL
4386 if (flags & SO_CPU) {
4387 int cpu;
81819f0f 4388
205ab99d 4389 for_each_possible_cpu(cpu) {
9dfc6e68 4390 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
ec3ab083 4391 int node;
49e22585 4392 struct page *page;
dfb4f096 4393
bc6697d8 4394 page = ACCESS_ONCE(c->page);
ec3ab083
CL
4395 if (!page)
4396 continue;
205ab99d 4397
ec3ab083
CL
4398 node = page_to_nid(page);
4399 if (flags & SO_TOTAL)
4400 x = page->objects;
4401 else if (flags & SO_OBJECTS)
4402 x = page->inuse;
4403 else
4404 x = 1;
49e22585 4405
ec3ab083
CL
4406 total += x;
4407 nodes[node] += x;
4408
4409 page = ACCESS_ONCE(c->partial);
49e22585
CL
4410 if (page) {
4411 x = page->pobjects;
bc6697d8
ED
4412 total += x;
4413 nodes[node] += x;
49e22585 4414 }
ec3ab083 4415
bc6697d8 4416 per_cpu[node]++;
81819f0f
CL
4417 }
4418 }
4419
04d94879 4420 lock_memory_hotplug();
ab4d5ed5 4421#ifdef CONFIG_SLUB_DEBUG
205ab99d
CL
4422 if (flags & SO_ALL) {
4423 for_each_node_state(node, N_NORMAL_MEMORY) {
4424 struct kmem_cache_node *n = get_node(s, node);
4425
4426 if (flags & SO_TOTAL)
4427 x = atomic_long_read(&n->total_objects);
4428 else if (flags & SO_OBJECTS)
4429 x = atomic_long_read(&n->total_objects) -
4430 count_partial(n, count_free);
81819f0f 4431
81819f0f 4432 else
205ab99d 4433 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4434 total += x;
4435 nodes[node] += x;
4436 }
4437
ab4d5ed5
CL
4438 } else
4439#endif
4440 if (flags & SO_PARTIAL) {
205ab99d
CL
4441 for_each_node_state(node, N_NORMAL_MEMORY) {
4442 struct kmem_cache_node *n = get_node(s, node);
81819f0f 4443
205ab99d
CL
4444 if (flags & SO_TOTAL)
4445 x = count_partial(n, count_total);
4446 else if (flags & SO_OBJECTS)
4447 x = count_partial(n, count_inuse);
81819f0f 4448 else
205ab99d 4449 x = n->nr_partial;
81819f0f
CL
4450 total += x;
4451 nodes[node] += x;
4452 }
4453 }
81819f0f
CL
4454 x = sprintf(buf, "%lu", total);
4455#ifdef CONFIG_NUMA
f64dc58c 4456 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
4457 if (nodes[node])
4458 x += sprintf(buf + x, " N%d=%lu",
4459 node, nodes[node]);
4460#endif
04d94879 4461 unlock_memory_hotplug();
81819f0f
CL
4462 kfree(nodes);
4463 return x + sprintf(buf + x, "\n");
4464}
4465
ab4d5ed5 4466#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4467static int any_slab_objects(struct kmem_cache *s)
4468{
4469 int node;
81819f0f 4470
dfb4f096 4471 for_each_online_node(node) {
81819f0f
CL
4472 struct kmem_cache_node *n = get_node(s, node);
4473
dfb4f096
CL
4474 if (!n)
4475 continue;
4476
4ea33e2d 4477 if (atomic_long_read(&n->total_objects))
81819f0f
CL
4478 return 1;
4479 }
4480 return 0;
4481}
ab4d5ed5 4482#endif
81819f0f
CL
4483
4484#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4485#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4486
4487struct slab_attribute {
4488 struct attribute attr;
4489 ssize_t (*show)(struct kmem_cache *s, char *buf);
4490 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4491};
4492
4493#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4494 static struct slab_attribute _name##_attr = \
4495 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4496
4497#define SLAB_ATTR(_name) \
4498 static struct slab_attribute _name##_attr = \
ab067e99 4499 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4500
81819f0f
CL
4501static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4502{
4503 return sprintf(buf, "%d\n", s->size);
4504}
4505SLAB_ATTR_RO(slab_size);
4506
4507static ssize_t align_show(struct kmem_cache *s, char *buf)
4508{
4509 return sprintf(buf, "%d\n", s->align);
4510}
4511SLAB_ATTR_RO(align);
4512
4513static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4514{
3b0efdfa 4515 return sprintf(buf, "%d\n", s->object_size);
81819f0f
CL
4516}
4517SLAB_ATTR_RO(object_size);
4518
4519static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4520{
834f3d11 4521 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
4522}
4523SLAB_ATTR_RO(objs_per_slab);
4524
06b285dc
CL
4525static ssize_t order_store(struct kmem_cache *s,
4526 const char *buf, size_t length)
4527{
0121c619
CL
4528 unsigned long order;
4529 int err;
4530
4531 err = strict_strtoul(buf, 10, &order);
4532 if (err)
4533 return err;
06b285dc
CL
4534
4535 if (order > slub_max_order || order < slub_min_order)
4536 return -EINVAL;
4537
4538 calculate_sizes(s, order);
4539 return length;
4540}
4541
81819f0f
CL
4542static ssize_t order_show(struct kmem_cache *s, char *buf)
4543{
834f3d11 4544 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 4545}
06b285dc 4546SLAB_ATTR(order);
81819f0f 4547
73d342b1
DR
4548static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4549{
4550 return sprintf(buf, "%lu\n", s->min_partial);
4551}
4552
4553static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4554 size_t length)
4555{
4556 unsigned long min;
4557 int err;
4558
4559 err = strict_strtoul(buf, 10, &min);
4560 if (err)
4561 return err;
4562
c0bdb232 4563 set_min_partial(s, min);
73d342b1
DR
4564 return length;
4565}
4566SLAB_ATTR(min_partial);
4567
49e22585
CL
4568static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4569{
4570 return sprintf(buf, "%u\n", s->cpu_partial);
4571}
4572
4573static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4574 size_t length)
4575{
4576 unsigned long objects;
4577 int err;
4578
4579 err = strict_strtoul(buf, 10, &objects);
4580 if (err)
4581 return err;
74ee4ef1
DR
4582 if (objects && kmem_cache_debug(s))
4583 return -EINVAL;
49e22585
CL
4584
4585 s->cpu_partial = objects;
4586 flush_all(s);
4587 return length;
4588}
4589SLAB_ATTR(cpu_partial);
4590
81819f0f
CL
4591static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4592{
62c70bce
JP
4593 if (!s->ctor)
4594 return 0;
4595 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
4596}
4597SLAB_ATTR_RO(ctor);
4598
81819f0f
CL
4599static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4600{
4601 return sprintf(buf, "%d\n", s->refcount - 1);
4602}
4603SLAB_ATTR_RO(aliases);
4604
81819f0f
CL
4605static ssize_t partial_show(struct kmem_cache *s, char *buf)
4606{
d9acf4b7 4607 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
4608}
4609SLAB_ATTR_RO(partial);
4610
4611static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4612{
d9acf4b7 4613 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
4614}
4615SLAB_ATTR_RO(cpu_slabs);
4616
4617static ssize_t objects_show(struct kmem_cache *s, char *buf)
4618{
205ab99d 4619 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
4620}
4621SLAB_ATTR_RO(objects);
4622
205ab99d
CL
4623static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4624{
4625 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4626}
4627SLAB_ATTR_RO(objects_partial);
4628
49e22585
CL
4629static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4630{
4631 int objects = 0;
4632 int pages = 0;
4633 int cpu;
4634 int len;
4635
4636 for_each_online_cpu(cpu) {
4637 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4638
4639 if (page) {
4640 pages += page->pages;
4641 objects += page->pobjects;
4642 }
4643 }
4644
4645 len = sprintf(buf, "%d(%d)", objects, pages);
4646
4647#ifdef CONFIG_SMP
4648 for_each_online_cpu(cpu) {
4649 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4650
4651 if (page && len < PAGE_SIZE - 20)
4652 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4653 page->pobjects, page->pages);
4654 }
4655#endif
4656 return len + sprintf(buf + len, "\n");
4657}
4658SLAB_ATTR_RO(slabs_cpu_partial);
4659
a5a84755
CL
4660static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4661{
4662 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4663}
4664
4665static ssize_t reclaim_account_store(struct kmem_cache *s,
4666 const char *buf, size_t length)
4667{
4668 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4669 if (buf[0] == '1')
4670 s->flags |= SLAB_RECLAIM_ACCOUNT;
4671 return length;
4672}
4673SLAB_ATTR(reclaim_account);
4674
4675static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4676{
4677 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4678}
4679SLAB_ATTR_RO(hwcache_align);
4680
4681#ifdef CONFIG_ZONE_DMA
4682static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4683{
4684 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4685}
4686SLAB_ATTR_RO(cache_dma);
4687#endif
4688
4689static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4690{
4691 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4692}
4693SLAB_ATTR_RO(destroy_by_rcu);
4694
ab9a0f19
LJ
4695static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4696{
4697 return sprintf(buf, "%d\n", s->reserved);
4698}
4699SLAB_ATTR_RO(reserved);
4700
ab4d5ed5 4701#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
4702static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4703{
4704 return show_slab_objects(s, buf, SO_ALL);
4705}
4706SLAB_ATTR_RO(slabs);
4707
205ab99d
CL
4708static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4709{
4710 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4711}
4712SLAB_ATTR_RO(total_objects);
4713
81819f0f
CL
4714static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4715{
4716 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4717}
4718
4719static ssize_t sanity_checks_store(struct kmem_cache *s,
4720 const char *buf, size_t length)
4721{
4722 s->flags &= ~SLAB_DEBUG_FREE;
b789ef51
CL
4723 if (buf[0] == '1') {
4724 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4725 s->flags |= SLAB_DEBUG_FREE;
b789ef51 4726 }
81819f0f
CL
4727 return length;
4728}
4729SLAB_ATTR(sanity_checks);
4730
4731static ssize_t trace_show(struct kmem_cache *s, char *buf)
4732{
4733 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4734}
4735
4736static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4737 size_t length)
4738{
4739 s->flags &= ~SLAB_TRACE;
b789ef51
CL
4740 if (buf[0] == '1') {
4741 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4742 s->flags |= SLAB_TRACE;
b789ef51 4743 }
81819f0f
CL
4744 return length;
4745}
4746SLAB_ATTR(trace);
4747
81819f0f
CL
4748static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4749{
4750 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4751}
4752
4753static ssize_t red_zone_store(struct kmem_cache *s,
4754 const char *buf, size_t length)
4755{
4756 if (any_slab_objects(s))
4757 return -EBUSY;
4758
4759 s->flags &= ~SLAB_RED_ZONE;
b789ef51
CL
4760 if (buf[0] == '1') {
4761 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4762 s->flags |= SLAB_RED_ZONE;
b789ef51 4763 }
06b285dc 4764 calculate_sizes(s, -1);
81819f0f
CL
4765 return length;
4766}
4767SLAB_ATTR(red_zone);
4768
4769static ssize_t poison_show(struct kmem_cache *s, char *buf)
4770{
4771 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4772}
4773
4774static ssize_t poison_store(struct kmem_cache *s,
4775 const char *buf, size_t length)
4776{
4777 if (any_slab_objects(s))
4778 return -EBUSY;
4779
4780 s->flags &= ~SLAB_POISON;
b789ef51
CL
4781 if (buf[0] == '1') {
4782 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4783 s->flags |= SLAB_POISON;
b789ef51 4784 }
06b285dc 4785 calculate_sizes(s, -1);
81819f0f
CL
4786 return length;
4787}
4788SLAB_ATTR(poison);
4789
4790static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4791{
4792 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4793}
4794
4795static ssize_t store_user_store(struct kmem_cache *s,
4796 const char *buf, size_t length)
4797{
4798 if (any_slab_objects(s))
4799 return -EBUSY;
4800
4801 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
4802 if (buf[0] == '1') {
4803 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4804 s->flags |= SLAB_STORE_USER;
b789ef51 4805 }
06b285dc 4806 calculate_sizes(s, -1);
81819f0f
CL
4807 return length;
4808}
4809SLAB_ATTR(store_user);
4810
53e15af0
CL
4811static ssize_t validate_show(struct kmem_cache *s, char *buf)
4812{
4813 return 0;
4814}
4815
4816static ssize_t validate_store(struct kmem_cache *s,
4817 const char *buf, size_t length)
4818{
434e245d
CL
4819 int ret = -EINVAL;
4820
4821 if (buf[0] == '1') {
4822 ret = validate_slab_cache(s);
4823 if (ret >= 0)
4824 ret = length;
4825 }
4826 return ret;
53e15af0
CL
4827}
4828SLAB_ATTR(validate);
a5a84755
CL
4829
4830static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4831{
4832 if (!(s->flags & SLAB_STORE_USER))
4833 return -ENOSYS;
4834 return list_locations(s, buf, TRACK_ALLOC);
4835}
4836SLAB_ATTR_RO(alloc_calls);
4837
4838static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4839{
4840 if (!(s->flags & SLAB_STORE_USER))
4841 return -ENOSYS;
4842 return list_locations(s, buf, TRACK_FREE);
4843}
4844SLAB_ATTR_RO(free_calls);
4845#endif /* CONFIG_SLUB_DEBUG */
4846
4847#ifdef CONFIG_FAILSLAB
4848static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4849{
4850 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4851}
4852
4853static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4854 size_t length)
4855{
4856 s->flags &= ~SLAB_FAILSLAB;
4857 if (buf[0] == '1')
4858 s->flags |= SLAB_FAILSLAB;
4859 return length;
4860}
4861SLAB_ATTR(failslab);
ab4d5ed5 4862#endif
53e15af0 4863
2086d26a
CL
4864static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4865{
4866 return 0;
4867}
4868
4869static ssize_t shrink_store(struct kmem_cache *s,
4870 const char *buf, size_t length)
4871{
4872 if (buf[0] == '1') {
4873 int rc = kmem_cache_shrink(s);
4874
4875 if (rc)
4876 return rc;
4877 } else
4878 return -EINVAL;
4879 return length;
4880}
4881SLAB_ATTR(shrink);
4882
81819f0f 4883#ifdef CONFIG_NUMA
9824601e 4884static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 4885{
9824601e 4886 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
4887}
4888
9824601e 4889static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
4890 const char *buf, size_t length)
4891{
0121c619
CL
4892 unsigned long ratio;
4893 int err;
4894
4895 err = strict_strtoul(buf, 10, &ratio);
4896 if (err)
4897 return err;
4898
e2cb96b7 4899 if (ratio <= 100)
0121c619 4900 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 4901
81819f0f
CL
4902 return length;
4903}
9824601e 4904SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
4905#endif
4906
8ff12cfc 4907#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
4908static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4909{
4910 unsigned long sum = 0;
4911 int cpu;
4912 int len;
4913 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4914
4915 if (!data)
4916 return -ENOMEM;
4917
4918 for_each_online_cpu(cpu) {
9dfc6e68 4919 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
4920
4921 data[cpu] = x;
4922 sum += x;
4923 }
4924
4925 len = sprintf(buf, "%lu", sum);
4926
50ef37b9 4927#ifdef CONFIG_SMP
8ff12cfc
CL
4928 for_each_online_cpu(cpu) {
4929 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 4930 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 4931 }
50ef37b9 4932#endif
8ff12cfc
CL
4933 kfree(data);
4934 return len + sprintf(buf + len, "\n");
4935}
4936
78eb00cc
DR
4937static void clear_stat(struct kmem_cache *s, enum stat_item si)
4938{
4939 int cpu;
4940
4941 for_each_online_cpu(cpu)
9dfc6e68 4942 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
4943}
4944
8ff12cfc
CL
4945#define STAT_ATTR(si, text) \
4946static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4947{ \
4948 return show_stat(s, buf, si); \
4949} \
78eb00cc
DR
4950static ssize_t text##_store(struct kmem_cache *s, \
4951 const char *buf, size_t length) \
4952{ \
4953 if (buf[0] != '0') \
4954 return -EINVAL; \
4955 clear_stat(s, si); \
4956 return length; \
4957} \
4958SLAB_ATTR(text); \
8ff12cfc
CL
4959
4960STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4961STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4962STAT_ATTR(FREE_FASTPATH, free_fastpath);
4963STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4964STAT_ATTR(FREE_FROZEN, free_frozen);
4965STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4966STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4967STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4968STAT_ATTR(ALLOC_SLAB, alloc_slab);
4969STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 4970STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
4971STAT_ATTR(FREE_SLAB, free_slab);
4972STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4973STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4974STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4975STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4976STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4977STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 4978STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 4979STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
4980STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4981STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
4982STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
4983STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
4984STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
4985STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
4986#endif
4987
06428780 4988static struct attribute *slab_attrs[] = {
81819f0f
CL
4989 &slab_size_attr.attr,
4990 &object_size_attr.attr,
4991 &objs_per_slab_attr.attr,
4992 &order_attr.attr,
73d342b1 4993 &min_partial_attr.attr,
49e22585 4994 &cpu_partial_attr.attr,
81819f0f 4995 &objects_attr.attr,
205ab99d 4996 &objects_partial_attr.attr,
81819f0f
CL
4997 &partial_attr.attr,
4998 &cpu_slabs_attr.attr,
4999 &ctor_attr.attr,
81819f0f
CL
5000 &aliases_attr.attr,
5001 &align_attr.attr,
81819f0f
CL
5002 &hwcache_align_attr.attr,
5003 &reclaim_account_attr.attr,
5004 &destroy_by_rcu_attr.attr,
a5a84755 5005 &shrink_attr.attr,
ab9a0f19 5006 &reserved_attr.attr,
49e22585 5007 &slabs_cpu_partial_attr.attr,
ab4d5ed5 5008#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5009 &total_objects_attr.attr,
5010 &slabs_attr.attr,
5011 &sanity_checks_attr.attr,
5012 &trace_attr.attr,
81819f0f
CL
5013 &red_zone_attr.attr,
5014 &poison_attr.attr,
5015 &store_user_attr.attr,
53e15af0 5016 &validate_attr.attr,
88a420e4
CL
5017 &alloc_calls_attr.attr,
5018 &free_calls_attr.attr,
ab4d5ed5 5019#endif
81819f0f
CL
5020#ifdef CONFIG_ZONE_DMA
5021 &cache_dma_attr.attr,
5022#endif
5023#ifdef CONFIG_NUMA
9824601e 5024 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
5025#endif
5026#ifdef CONFIG_SLUB_STATS
5027 &alloc_fastpath_attr.attr,
5028 &alloc_slowpath_attr.attr,
5029 &free_fastpath_attr.attr,
5030 &free_slowpath_attr.attr,
5031 &free_frozen_attr.attr,
5032 &free_add_partial_attr.attr,
5033 &free_remove_partial_attr.attr,
5034 &alloc_from_partial_attr.attr,
5035 &alloc_slab_attr.attr,
5036 &alloc_refill_attr.attr,
e36a2652 5037 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
5038 &free_slab_attr.attr,
5039 &cpuslab_flush_attr.attr,
5040 &deactivate_full_attr.attr,
5041 &deactivate_empty_attr.attr,
5042 &deactivate_to_head_attr.attr,
5043 &deactivate_to_tail_attr.attr,
5044 &deactivate_remote_frees_attr.attr,
03e404af 5045 &deactivate_bypass_attr.attr,
65c3376a 5046 &order_fallback_attr.attr,
b789ef51
CL
5047 &cmpxchg_double_fail_attr.attr,
5048 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
5049 &cpu_partial_alloc_attr.attr,
5050 &cpu_partial_free_attr.attr,
8028dcea
AS
5051 &cpu_partial_node_attr.attr,
5052 &cpu_partial_drain_attr.attr,
81819f0f 5053#endif
4c13dd3b
DM
5054#ifdef CONFIG_FAILSLAB
5055 &failslab_attr.attr,
5056#endif
5057
81819f0f
CL
5058 NULL
5059};
5060
5061static struct attribute_group slab_attr_group = {
5062 .attrs = slab_attrs,
5063};
5064
5065static ssize_t slab_attr_show(struct kobject *kobj,
5066 struct attribute *attr,
5067 char *buf)
5068{
5069 struct slab_attribute *attribute;
5070 struct kmem_cache *s;
5071 int err;
5072
5073 attribute = to_slab_attr(attr);
5074 s = to_slab(kobj);
5075
5076 if (!attribute->show)
5077 return -EIO;
5078
5079 err = attribute->show(s, buf);
5080
5081 return err;
5082}
5083
5084static ssize_t slab_attr_store(struct kobject *kobj,
5085 struct attribute *attr,
5086 const char *buf, size_t len)
5087{
5088 struct slab_attribute *attribute;
5089 struct kmem_cache *s;
5090 int err;
5091
5092 attribute = to_slab_attr(attr);
5093 s = to_slab(kobj);
5094
5095 if (!attribute->store)
5096 return -EIO;
5097
5098 err = attribute->store(s, buf, len);
5099
5100 return err;
5101}
5102
52cf25d0 5103static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5104 .show = slab_attr_show,
5105 .store = slab_attr_store,
5106};
5107
5108static struct kobj_type slab_ktype = {
5109 .sysfs_ops = &slab_sysfs_ops,
5110};
5111
5112static int uevent_filter(struct kset *kset, struct kobject *kobj)
5113{
5114 struct kobj_type *ktype = get_ktype(kobj);
5115
5116 if (ktype == &slab_ktype)
5117 return 1;
5118 return 0;
5119}
5120
9cd43611 5121static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5122 .filter = uevent_filter,
5123};
5124
27c3a314 5125static struct kset *slab_kset;
81819f0f
CL
5126
5127#define ID_STR_LENGTH 64
5128
5129/* Create a unique string id for a slab cache:
6446faa2
CL
5130 *
5131 * Format :[flags-]size
81819f0f
CL
5132 */
5133static char *create_unique_id(struct kmem_cache *s)
5134{
5135 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5136 char *p = name;
5137
5138 BUG_ON(!name);
5139
5140 *p++ = ':';
5141 /*
5142 * First flags affecting slabcache operations. We will only
5143 * get here for aliasable slabs so we do not need to support
5144 * too many flags. The flags here must cover all flags that
5145 * are matched during merging to guarantee that the id is
5146 * unique.
5147 */
5148 if (s->flags & SLAB_CACHE_DMA)
5149 *p++ = 'd';
5150 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5151 *p++ = 'a';
5152 if (s->flags & SLAB_DEBUG_FREE)
5153 *p++ = 'F';
5a896d9e
VN
5154 if (!(s->flags & SLAB_NOTRACK))
5155 *p++ = 't';
81819f0f
CL
5156 if (p != name + 1)
5157 *p++ = '-';
5158 p += sprintf(p, "%07d", s->size);
5159 BUG_ON(p > name + ID_STR_LENGTH - 1);
5160 return name;
5161}
5162
5163static int sysfs_slab_add(struct kmem_cache *s)
5164{
5165 int err;
5166 const char *name;
45530c44 5167 int unmergeable = slab_unmergeable(s);
81819f0f 5168
81819f0f
CL
5169 if (unmergeable) {
5170 /*
5171 * Slabcache can never be merged so we can use the name proper.
5172 * This is typically the case for debug situations. In that
5173 * case we can catch duplicate names easily.
5174 */
27c3a314 5175 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5176 name = s->name;
5177 } else {
5178 /*
5179 * Create a unique name for the slab as a target
5180 * for the symlinks.
5181 */
5182 name = create_unique_id(s);
5183 }
5184
27c3a314 5185 s->kobj.kset = slab_kset;
1eada11c
GKH
5186 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5187 if (err) {
5188 kobject_put(&s->kobj);
81819f0f 5189 return err;
1eada11c 5190 }
81819f0f
CL
5191
5192 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5788d8ad
XF
5193 if (err) {
5194 kobject_del(&s->kobj);
5195 kobject_put(&s->kobj);
81819f0f 5196 return err;
5788d8ad 5197 }
81819f0f
CL
5198 kobject_uevent(&s->kobj, KOBJ_ADD);
5199 if (!unmergeable) {
5200 /* Setup first alias */
5201 sysfs_slab_alias(s, s->name);
5202 kfree(name);
5203 }
5204 return 0;
5205}
5206
5207static void sysfs_slab_remove(struct kmem_cache *s)
5208{
97d06609 5209 if (slab_state < FULL)
2bce6485
CL
5210 /*
5211 * Sysfs has not been setup yet so no need to remove the
5212 * cache from sysfs.
5213 */
5214 return;
5215
81819f0f
CL
5216 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5217 kobject_del(&s->kobj);
151c602f 5218 kobject_put(&s->kobj);
81819f0f
CL
5219}
5220
5221/*
5222 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5223 * available lest we lose that information.
81819f0f
CL
5224 */
5225struct saved_alias {
5226 struct kmem_cache *s;
5227 const char *name;
5228 struct saved_alias *next;
5229};
5230
5af328a5 5231static struct saved_alias *alias_list;
81819f0f
CL
5232
5233static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5234{
5235 struct saved_alias *al;
5236
97d06609 5237 if (slab_state == FULL) {
81819f0f
CL
5238 /*
5239 * If we have a leftover link then remove it.
5240 */
27c3a314
GKH
5241 sysfs_remove_link(&slab_kset->kobj, name);
5242 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5243 }
5244
5245 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5246 if (!al)
5247 return -ENOMEM;
5248
5249 al->s = s;
5250 al->name = name;
5251 al->next = alias_list;
5252 alias_list = al;
5253 return 0;
5254}
5255
5256static int __init slab_sysfs_init(void)
5257{
5b95a4ac 5258 struct kmem_cache *s;
81819f0f
CL
5259 int err;
5260
18004c5d 5261 mutex_lock(&slab_mutex);
2bce6485 5262
0ff21e46 5263 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5264 if (!slab_kset) {
18004c5d 5265 mutex_unlock(&slab_mutex);
81819f0f
CL
5266 printk(KERN_ERR "Cannot register slab subsystem.\n");
5267 return -ENOSYS;
5268 }
5269
97d06609 5270 slab_state = FULL;
26a7bd03 5271
5b95a4ac 5272 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5273 err = sysfs_slab_add(s);
5d540fb7
CL
5274 if (err)
5275 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5276 " to sysfs\n", s->name);
26a7bd03 5277 }
81819f0f
CL
5278
5279 while (alias_list) {
5280 struct saved_alias *al = alias_list;
5281
5282 alias_list = alias_list->next;
5283 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
5284 if (err)
5285 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
068ce415 5286 " %s to sysfs\n", al->name);
81819f0f
CL
5287 kfree(al);
5288 }
5289
18004c5d 5290 mutex_unlock(&slab_mutex);
81819f0f
CL
5291 resiliency_test();
5292 return 0;
5293}
5294
5295__initcall(slab_sysfs_init);
ab4d5ed5 5296#endif /* CONFIG_SYSFS */
57ed3eda
PE
5297
5298/*
5299 * The /proc/slabinfo ABI
5300 */
158a9624 5301#ifdef CONFIG_SLABINFO
0d7561c6 5302void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda
PE
5303{
5304 unsigned long nr_partials = 0;
5305 unsigned long nr_slabs = 0;
205ab99d
CL
5306 unsigned long nr_objs = 0;
5307 unsigned long nr_free = 0;
57ed3eda
PE
5308 int node;
5309
57ed3eda
PE
5310 for_each_online_node(node) {
5311 struct kmem_cache_node *n = get_node(s, node);
5312
5313 if (!n)
5314 continue;
5315
5316 nr_partials += n->nr_partial;
5317 nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99d
CL
5318 nr_objs += atomic_long_read(&n->total_objects);
5319 nr_free += count_partial(n, count_free);
57ed3eda
PE
5320 }
5321
0d7561c6
GC
5322 sinfo->active_objs = nr_objs - nr_free;
5323 sinfo->num_objs = nr_objs;
5324 sinfo->active_slabs = nr_slabs;
5325 sinfo->num_slabs = nr_slabs;
5326 sinfo->objects_per_slab = oo_objects(s->oo);
5327 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5328}
5329
0d7561c6 5330void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5331{
7b3c3a50
AD
5332}
5333
b7454ad3
GC
5334ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5335 size_t count, loff_t *ppos)
7b3c3a50 5336{
b7454ad3 5337 return -EIO;
7b3c3a50 5338}
158a9624 5339#endif /* CONFIG_SLABINFO */