]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/slub.c
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penber...
[mirror_ubuntu-artful-kernel.git] / mm / slub.c
1 /*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter
9 * (C) 2011 Linux Foundation, Christoph Lameter
10 */
11
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include "slab.h"
20 #include <linux/proc_fs.h>
21 #include <linux/notifier.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmemcheck.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/mempolicy.h>
27 #include <linux/ctype.h>
28 #include <linux/debugobjects.h>
29 #include <linux/kallsyms.h>
30 #include <linux/memory.h>
31 #include <linux/math64.h>
32 #include <linux/fault-inject.h>
33 #include <linux/stacktrace.h>
34 #include <linux/prefetch.h>
35 #include <linux/memcontrol.h>
36
37 #include <trace/events/kmem.h>
38
39 #include "internal.h"
40
41 /*
42 * Lock order:
43 * 1. slab_mutex (Global Mutex)
44 * 2. node->list_lock
45 * 3. slab_lock(page) (Only on some arches and for debugging)
46 *
47 * slab_mutex
48 *
49 * The role of the slab_mutex is to protect the list of all the slabs
50 * and to synchronize major metadata changes to slab cache structures.
51 *
52 * The slab_lock is only used for debugging and on arches that do not
53 * have the ability to do a cmpxchg_double. It only protects the second
54 * double word in the page struct. Meaning
55 * A. page->freelist -> List of object free in a page
56 * B. page->counters -> Counters of objects
57 * C. page->frozen -> frozen state
58 *
59 * If a slab is frozen then it is exempt from list management. It is not
60 * on any list. The processor that froze the slab is the one who can
61 * perform list operations on the page. Other processors may put objects
62 * onto the freelist but the processor that froze the slab is the only
63 * one that can retrieve the objects from the page's freelist.
64 *
65 * The list_lock protects the partial and full list on each node and
66 * the partial slab counter. If taken then no new slabs may be added or
67 * removed from the lists nor make the number of partial slabs be modified.
68 * (Note that the total number of slabs is an atomic value that may be
69 * modified without taking the list lock).
70 *
71 * The list_lock is a centralized lock and thus we avoid taking it as
72 * much as possible. As long as SLUB does not have to handle partial
73 * slabs, operations can continue without any centralized lock. F.e.
74 * allocating a long series of objects that fill up slabs does not require
75 * the list lock.
76 * Interrupts are disabled during allocation and deallocation in order to
77 * make the slab allocator safe to use in the context of an irq. In addition
78 * interrupts are disabled to ensure that the processor does not change
79 * while handling per_cpu slabs, due to kernel preemption.
80 *
81 * SLUB assigns one slab for allocation to each processor.
82 * Allocations only occur from these slabs called cpu slabs.
83 *
84 * Slabs with free elements are kept on a partial list and during regular
85 * operations no list for full slabs is used. If an object in a full slab is
86 * freed then the slab will show up again on the partial lists.
87 * We track full slabs for debugging purposes though because otherwise we
88 * cannot scan all objects.
89 *
90 * Slabs are freed when they become empty. Teardown and setup is
91 * minimal so we rely on the page allocators per cpu caches for
92 * fast frees and allocs.
93 *
94 * Overloading of page flags that are otherwise used for LRU management.
95 *
96 * PageActive The slab is frozen and exempt from list processing.
97 * This means that the slab is dedicated to a purpose
98 * such as satisfying allocations for a specific
99 * processor. Objects may be freed in the slab while
100 * it is frozen but slab_free will then skip the usual
101 * list operations. It is up to the processor holding
102 * the slab to integrate the slab into the slab lists
103 * when the slab is no longer needed.
104 *
105 * One use of this flag is to mark slabs that are
106 * used for allocations. Then such a slab becomes a cpu
107 * slab. The cpu slab may be equipped with an additional
108 * freelist that allows lockless access to
109 * free objects in addition to the regular freelist
110 * that requires the slab lock.
111 *
112 * PageError Slab requires special handling due to debug
113 * options set. This moves slab handling out of
114 * the fast path and disables lockless freelists.
115 */
116
117 static inline int kmem_cache_debug(struct kmem_cache *s)
118 {
119 #ifdef CONFIG_SLUB_DEBUG
120 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
121 #else
122 return 0;
123 #endif
124 }
125
126 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
127 {
128 #ifdef CONFIG_SLUB_CPU_PARTIAL
129 return !kmem_cache_debug(s);
130 #else
131 return false;
132 #endif
133 }
134
135 /*
136 * Issues still to be resolved:
137 *
138 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
139 *
140 * - Variable sizing of the per node arrays
141 */
142
143 /* Enable to test recovery from slab corruption on boot */
144 #undef SLUB_RESILIENCY_TEST
145
146 /* Enable to log cmpxchg failures */
147 #undef SLUB_DEBUG_CMPXCHG
148
149 /*
150 * Mininum number of partial slabs. These will be left on the partial
151 * lists even if they are empty. kmem_cache_shrink may reclaim them.
152 */
153 #define MIN_PARTIAL 5
154
155 /*
156 * Maximum number of desirable partial slabs.
157 * The existence of more partial slabs makes kmem_cache_shrink
158 * sort the partial list by the number of objects in the.
159 */
160 #define MAX_PARTIAL 10
161
162 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
163 SLAB_POISON | SLAB_STORE_USER)
164
165 /*
166 * Debugging flags that require metadata to be stored in the slab. These get
167 * disabled when slub_debug=O is used and a cache's min order increases with
168 * metadata.
169 */
170 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
171
172 /*
173 * Set of flags that will prevent slab merging
174 */
175 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
176 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
177 SLAB_FAILSLAB)
178
179 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
180 SLAB_CACHE_DMA | SLAB_NOTRACK)
181
182 #define OO_SHIFT 16
183 #define OO_MASK ((1 << OO_SHIFT) - 1)
184 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
185
186 /* Internal SLUB flags */
187 #define __OBJECT_POISON 0x80000000UL /* Poison object */
188 #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
189
190 #ifdef CONFIG_SMP
191 static struct notifier_block slab_notifier;
192 #endif
193
194 /*
195 * Tracking user of a slab.
196 */
197 #define TRACK_ADDRS_COUNT 16
198 struct track {
199 unsigned long addr; /* Called from address */
200 #ifdef CONFIG_STACKTRACE
201 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
202 #endif
203 int cpu; /* Was running on cpu */
204 int pid; /* Pid context */
205 unsigned long when; /* When did the operation occur */
206 };
207
208 enum track_item { TRACK_ALLOC, TRACK_FREE };
209
210 #ifdef CONFIG_SYSFS
211 static int sysfs_slab_add(struct kmem_cache *);
212 static int sysfs_slab_alias(struct kmem_cache *, const char *);
213 static void sysfs_slab_remove(struct kmem_cache *);
214 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
215 #else
216 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
217 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
218 { return 0; }
219 static inline void sysfs_slab_remove(struct kmem_cache *s) { }
220
221 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
222 #endif
223
224 static inline void stat(const struct kmem_cache *s, enum stat_item si)
225 {
226 #ifdef CONFIG_SLUB_STATS
227 __this_cpu_inc(s->cpu_slab->stat[si]);
228 #endif
229 }
230
231 /********************************************************************
232 * Core slab cache functions
233 *******************************************************************/
234
235 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
236 {
237 return s->node[node];
238 }
239
240 /* Verify that a pointer has an address that is valid within a slab page */
241 static inline int check_valid_pointer(struct kmem_cache *s,
242 struct page *page, const void *object)
243 {
244 void *base;
245
246 if (!object)
247 return 1;
248
249 base = page_address(page);
250 if (object < base || object >= base + page->objects * s->size ||
251 (object - base) % s->size) {
252 return 0;
253 }
254
255 return 1;
256 }
257
258 static inline void *get_freepointer(struct kmem_cache *s, void *object)
259 {
260 return *(void **)(object + s->offset);
261 }
262
263 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
264 {
265 prefetch(object + s->offset);
266 }
267
268 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
269 {
270 void *p;
271
272 #ifdef CONFIG_DEBUG_PAGEALLOC
273 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
274 #else
275 p = get_freepointer(s, object);
276 #endif
277 return p;
278 }
279
280 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
281 {
282 *(void **)(object + s->offset) = fp;
283 }
284
285 /* Loop over all objects in a slab */
286 #define for_each_object(__p, __s, __addr, __objects) \
287 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
288 __p += (__s)->size)
289
290 /* Determine object index from a given position */
291 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
292 {
293 return (p - addr) / s->size;
294 }
295
296 static inline size_t slab_ksize(const struct kmem_cache *s)
297 {
298 #ifdef CONFIG_SLUB_DEBUG
299 /*
300 * Debugging requires use of the padding between object
301 * and whatever may come after it.
302 */
303 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
304 return s->object_size;
305
306 #endif
307 /*
308 * If we have the need to store the freelist pointer
309 * back there or track user information then we can
310 * only use the space before that information.
311 */
312 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
313 return s->inuse;
314 /*
315 * Else we can use all the padding etc for the allocation
316 */
317 return s->size;
318 }
319
320 static inline int order_objects(int order, unsigned long size, int reserved)
321 {
322 return ((PAGE_SIZE << order) - reserved) / size;
323 }
324
325 static inline struct kmem_cache_order_objects oo_make(int order,
326 unsigned long size, int reserved)
327 {
328 struct kmem_cache_order_objects x = {
329 (order << OO_SHIFT) + order_objects(order, size, reserved)
330 };
331
332 return x;
333 }
334
335 static inline int oo_order(struct kmem_cache_order_objects x)
336 {
337 return x.x >> OO_SHIFT;
338 }
339
340 static inline int oo_objects(struct kmem_cache_order_objects x)
341 {
342 return x.x & OO_MASK;
343 }
344
345 /*
346 * Per slab locking using the pagelock
347 */
348 static __always_inline void slab_lock(struct page *page)
349 {
350 bit_spin_lock(PG_locked, &page->flags);
351 }
352
353 static __always_inline void slab_unlock(struct page *page)
354 {
355 __bit_spin_unlock(PG_locked, &page->flags);
356 }
357
358 /* Interrupts must be disabled (for the fallback code to work right) */
359 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
360 void *freelist_old, unsigned long counters_old,
361 void *freelist_new, unsigned long counters_new,
362 const char *n)
363 {
364 VM_BUG_ON(!irqs_disabled());
365 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
366 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
367 if (s->flags & __CMPXCHG_DOUBLE) {
368 if (cmpxchg_double(&page->freelist, &page->counters,
369 freelist_old, counters_old,
370 freelist_new, counters_new))
371 return 1;
372 } else
373 #endif
374 {
375 slab_lock(page);
376 if (page->freelist == freelist_old && page->counters == counters_old) {
377 page->freelist = freelist_new;
378 page->counters = counters_new;
379 slab_unlock(page);
380 return 1;
381 }
382 slab_unlock(page);
383 }
384
385 cpu_relax();
386 stat(s, CMPXCHG_DOUBLE_FAIL);
387
388 #ifdef SLUB_DEBUG_CMPXCHG
389 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
390 #endif
391
392 return 0;
393 }
394
395 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
396 void *freelist_old, unsigned long counters_old,
397 void *freelist_new, unsigned long counters_new,
398 const char *n)
399 {
400 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
401 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
402 if (s->flags & __CMPXCHG_DOUBLE) {
403 if (cmpxchg_double(&page->freelist, &page->counters,
404 freelist_old, counters_old,
405 freelist_new, counters_new))
406 return 1;
407 } else
408 #endif
409 {
410 unsigned long flags;
411
412 local_irq_save(flags);
413 slab_lock(page);
414 if (page->freelist == freelist_old && page->counters == counters_old) {
415 page->freelist = freelist_new;
416 page->counters = counters_new;
417 slab_unlock(page);
418 local_irq_restore(flags);
419 return 1;
420 }
421 slab_unlock(page);
422 local_irq_restore(flags);
423 }
424
425 cpu_relax();
426 stat(s, CMPXCHG_DOUBLE_FAIL);
427
428 #ifdef SLUB_DEBUG_CMPXCHG
429 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
430 #endif
431
432 return 0;
433 }
434
435 #ifdef CONFIG_SLUB_DEBUG
436 /*
437 * Determine a map of object in use on a page.
438 *
439 * Node listlock must be held to guarantee that the page does
440 * not vanish from under us.
441 */
442 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
443 {
444 void *p;
445 void *addr = page_address(page);
446
447 for (p = page->freelist; p; p = get_freepointer(s, p))
448 set_bit(slab_index(p, s, addr), map);
449 }
450
451 /*
452 * Debug settings:
453 */
454 #ifdef CONFIG_SLUB_DEBUG_ON
455 static int slub_debug = DEBUG_DEFAULT_FLAGS;
456 #else
457 static int slub_debug;
458 #endif
459
460 static char *slub_debug_slabs;
461 static int disable_higher_order_debug;
462
463 /*
464 * Object debugging
465 */
466 static void print_section(char *text, u8 *addr, unsigned int length)
467 {
468 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
469 length, 1);
470 }
471
472 static struct track *get_track(struct kmem_cache *s, void *object,
473 enum track_item alloc)
474 {
475 struct track *p;
476
477 if (s->offset)
478 p = object + s->offset + sizeof(void *);
479 else
480 p = object + s->inuse;
481
482 return p + alloc;
483 }
484
485 static void set_track(struct kmem_cache *s, void *object,
486 enum track_item alloc, unsigned long addr)
487 {
488 struct track *p = get_track(s, object, alloc);
489
490 if (addr) {
491 #ifdef CONFIG_STACKTRACE
492 struct stack_trace trace;
493 int i;
494
495 trace.nr_entries = 0;
496 trace.max_entries = TRACK_ADDRS_COUNT;
497 trace.entries = p->addrs;
498 trace.skip = 3;
499 save_stack_trace(&trace);
500
501 /* See rant in lockdep.c */
502 if (trace.nr_entries != 0 &&
503 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
504 trace.nr_entries--;
505
506 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
507 p->addrs[i] = 0;
508 #endif
509 p->addr = addr;
510 p->cpu = smp_processor_id();
511 p->pid = current->pid;
512 p->when = jiffies;
513 } else
514 memset(p, 0, sizeof(struct track));
515 }
516
517 static void init_tracking(struct kmem_cache *s, void *object)
518 {
519 if (!(s->flags & SLAB_STORE_USER))
520 return;
521
522 set_track(s, object, TRACK_FREE, 0UL);
523 set_track(s, object, TRACK_ALLOC, 0UL);
524 }
525
526 static void print_track(const char *s, struct track *t)
527 {
528 if (!t->addr)
529 return;
530
531 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
532 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
533 #ifdef CONFIG_STACKTRACE
534 {
535 int i;
536 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
537 if (t->addrs[i])
538 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
539 else
540 break;
541 }
542 #endif
543 }
544
545 static void print_tracking(struct kmem_cache *s, void *object)
546 {
547 if (!(s->flags & SLAB_STORE_USER))
548 return;
549
550 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
551 print_track("Freed", get_track(s, object, TRACK_FREE));
552 }
553
554 static void print_page_info(struct page *page)
555 {
556 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
557 page, page->objects, page->inuse, page->freelist, page->flags);
558
559 }
560
561 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
562 {
563 va_list args;
564 char buf[100];
565
566 va_start(args, fmt);
567 vsnprintf(buf, sizeof(buf), fmt, args);
568 va_end(args);
569 printk(KERN_ERR "========================================"
570 "=====================================\n");
571 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
572 printk(KERN_ERR "----------------------------------------"
573 "-------------------------------------\n\n");
574
575 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
576 }
577
578 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
579 {
580 va_list args;
581 char buf[100];
582
583 va_start(args, fmt);
584 vsnprintf(buf, sizeof(buf), fmt, args);
585 va_end(args);
586 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
587 }
588
589 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
590 {
591 unsigned int off; /* Offset of last byte */
592 u8 *addr = page_address(page);
593
594 print_tracking(s, p);
595
596 print_page_info(page);
597
598 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
599 p, p - addr, get_freepointer(s, p));
600
601 if (p > addr + 16)
602 print_section("Bytes b4 ", p - 16, 16);
603
604 print_section("Object ", p, min_t(unsigned long, s->object_size,
605 PAGE_SIZE));
606 if (s->flags & SLAB_RED_ZONE)
607 print_section("Redzone ", p + s->object_size,
608 s->inuse - s->object_size);
609
610 if (s->offset)
611 off = s->offset + sizeof(void *);
612 else
613 off = s->inuse;
614
615 if (s->flags & SLAB_STORE_USER)
616 off += 2 * sizeof(struct track);
617
618 if (off != s->size)
619 /* Beginning of the filler is the free pointer */
620 print_section("Padding ", p + off, s->size - off);
621
622 dump_stack();
623 }
624
625 static void object_err(struct kmem_cache *s, struct page *page,
626 u8 *object, char *reason)
627 {
628 slab_bug(s, "%s", reason);
629 print_trailer(s, page, object);
630 }
631
632 static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
633 {
634 va_list args;
635 char buf[100];
636
637 va_start(args, fmt);
638 vsnprintf(buf, sizeof(buf), fmt, args);
639 va_end(args);
640 slab_bug(s, "%s", buf);
641 print_page_info(page);
642 dump_stack();
643 }
644
645 static void init_object(struct kmem_cache *s, void *object, u8 val)
646 {
647 u8 *p = object;
648
649 if (s->flags & __OBJECT_POISON) {
650 memset(p, POISON_FREE, s->object_size - 1);
651 p[s->object_size - 1] = POISON_END;
652 }
653
654 if (s->flags & SLAB_RED_ZONE)
655 memset(p + s->object_size, val, s->inuse - s->object_size);
656 }
657
658 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
659 void *from, void *to)
660 {
661 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
662 memset(from, data, to - from);
663 }
664
665 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
666 u8 *object, char *what,
667 u8 *start, unsigned int value, unsigned int bytes)
668 {
669 u8 *fault;
670 u8 *end;
671
672 fault = memchr_inv(start, value, bytes);
673 if (!fault)
674 return 1;
675
676 end = start + bytes;
677 while (end > fault && end[-1] == value)
678 end--;
679
680 slab_bug(s, "%s overwritten", what);
681 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
682 fault, end - 1, fault[0], value);
683 print_trailer(s, page, object);
684
685 restore_bytes(s, what, value, fault, end);
686 return 0;
687 }
688
689 /*
690 * Object layout:
691 *
692 * object address
693 * Bytes of the object to be managed.
694 * If the freepointer may overlay the object then the free
695 * pointer is the first word of the object.
696 *
697 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
698 * 0xa5 (POISON_END)
699 *
700 * object + s->object_size
701 * Padding to reach word boundary. This is also used for Redzoning.
702 * Padding is extended by another word if Redzoning is enabled and
703 * object_size == inuse.
704 *
705 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
706 * 0xcc (RED_ACTIVE) for objects in use.
707 *
708 * object + s->inuse
709 * Meta data starts here.
710 *
711 * A. Free pointer (if we cannot overwrite object on free)
712 * B. Tracking data for SLAB_STORE_USER
713 * C. Padding to reach required alignment boundary or at mininum
714 * one word if debugging is on to be able to detect writes
715 * before the word boundary.
716 *
717 * Padding is done using 0x5a (POISON_INUSE)
718 *
719 * object + s->size
720 * Nothing is used beyond s->size.
721 *
722 * If slabcaches are merged then the object_size and inuse boundaries are mostly
723 * ignored. And therefore no slab options that rely on these boundaries
724 * may be used with merged slabcaches.
725 */
726
727 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
728 {
729 unsigned long off = s->inuse; /* The end of info */
730
731 if (s->offset)
732 /* Freepointer is placed after the object. */
733 off += sizeof(void *);
734
735 if (s->flags & SLAB_STORE_USER)
736 /* We also have user information there */
737 off += 2 * sizeof(struct track);
738
739 if (s->size == off)
740 return 1;
741
742 return check_bytes_and_report(s, page, p, "Object padding",
743 p + off, POISON_INUSE, s->size - off);
744 }
745
746 /* Check the pad bytes at the end of a slab page */
747 static int slab_pad_check(struct kmem_cache *s, struct page *page)
748 {
749 u8 *start;
750 u8 *fault;
751 u8 *end;
752 int length;
753 int remainder;
754
755 if (!(s->flags & SLAB_POISON))
756 return 1;
757
758 start = page_address(page);
759 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
760 end = start + length;
761 remainder = length % s->size;
762 if (!remainder)
763 return 1;
764
765 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
766 if (!fault)
767 return 1;
768 while (end > fault && end[-1] == POISON_INUSE)
769 end--;
770
771 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
772 print_section("Padding ", end - remainder, remainder);
773
774 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
775 return 0;
776 }
777
778 static int check_object(struct kmem_cache *s, struct page *page,
779 void *object, u8 val)
780 {
781 u8 *p = object;
782 u8 *endobject = object + s->object_size;
783
784 if (s->flags & SLAB_RED_ZONE) {
785 if (!check_bytes_and_report(s, page, object, "Redzone",
786 endobject, val, s->inuse - s->object_size))
787 return 0;
788 } else {
789 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
790 check_bytes_and_report(s, page, p, "Alignment padding",
791 endobject, POISON_INUSE, s->inuse - s->object_size);
792 }
793 }
794
795 if (s->flags & SLAB_POISON) {
796 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
797 (!check_bytes_and_report(s, page, p, "Poison", p,
798 POISON_FREE, s->object_size - 1) ||
799 !check_bytes_and_report(s, page, p, "Poison",
800 p + s->object_size - 1, POISON_END, 1)))
801 return 0;
802 /*
803 * check_pad_bytes cleans up on its own.
804 */
805 check_pad_bytes(s, page, p);
806 }
807
808 if (!s->offset && val == SLUB_RED_ACTIVE)
809 /*
810 * Object and freepointer overlap. Cannot check
811 * freepointer while object is allocated.
812 */
813 return 1;
814
815 /* Check free pointer validity */
816 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
817 object_err(s, page, p, "Freepointer corrupt");
818 /*
819 * No choice but to zap it and thus lose the remainder
820 * of the free objects in this slab. May cause
821 * another error because the object count is now wrong.
822 */
823 set_freepointer(s, p, NULL);
824 return 0;
825 }
826 return 1;
827 }
828
829 static int check_slab(struct kmem_cache *s, struct page *page)
830 {
831 int maxobj;
832
833 VM_BUG_ON(!irqs_disabled());
834
835 if (!PageSlab(page)) {
836 slab_err(s, page, "Not a valid slab page");
837 return 0;
838 }
839
840 maxobj = order_objects(compound_order(page), s->size, s->reserved);
841 if (page->objects > maxobj) {
842 slab_err(s, page, "objects %u > max %u",
843 s->name, page->objects, maxobj);
844 return 0;
845 }
846 if (page->inuse > page->objects) {
847 slab_err(s, page, "inuse %u > max %u",
848 s->name, page->inuse, page->objects);
849 return 0;
850 }
851 /* Slab_pad_check fixes things up after itself */
852 slab_pad_check(s, page);
853 return 1;
854 }
855
856 /*
857 * Determine if a certain object on a page is on the freelist. Must hold the
858 * slab lock to guarantee that the chains are in a consistent state.
859 */
860 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
861 {
862 int nr = 0;
863 void *fp;
864 void *object = NULL;
865 unsigned long max_objects;
866
867 fp = page->freelist;
868 while (fp && nr <= page->objects) {
869 if (fp == search)
870 return 1;
871 if (!check_valid_pointer(s, page, fp)) {
872 if (object) {
873 object_err(s, page, object,
874 "Freechain corrupt");
875 set_freepointer(s, object, NULL);
876 break;
877 } else {
878 slab_err(s, page, "Freepointer corrupt");
879 page->freelist = NULL;
880 page->inuse = page->objects;
881 slab_fix(s, "Freelist cleared");
882 return 0;
883 }
884 break;
885 }
886 object = fp;
887 fp = get_freepointer(s, object);
888 nr++;
889 }
890
891 max_objects = order_objects(compound_order(page), s->size, s->reserved);
892 if (max_objects > MAX_OBJS_PER_PAGE)
893 max_objects = MAX_OBJS_PER_PAGE;
894
895 if (page->objects != max_objects) {
896 slab_err(s, page, "Wrong number of objects. Found %d but "
897 "should be %d", page->objects, max_objects);
898 page->objects = max_objects;
899 slab_fix(s, "Number of objects adjusted.");
900 }
901 if (page->inuse != page->objects - nr) {
902 slab_err(s, page, "Wrong object count. Counter is %d but "
903 "counted were %d", page->inuse, page->objects - nr);
904 page->inuse = page->objects - nr;
905 slab_fix(s, "Object count adjusted.");
906 }
907 return search == NULL;
908 }
909
910 static void trace(struct kmem_cache *s, struct page *page, void *object,
911 int alloc)
912 {
913 if (s->flags & SLAB_TRACE) {
914 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
915 s->name,
916 alloc ? "alloc" : "free",
917 object, page->inuse,
918 page->freelist);
919
920 if (!alloc)
921 print_section("Object ", (void *)object, s->object_size);
922
923 dump_stack();
924 }
925 }
926
927 /*
928 * Hooks for other subsystems that check memory allocations. In a typical
929 * production configuration these hooks all should produce no code at all.
930 */
931 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
932 {
933 flags &= gfp_allowed_mask;
934 lockdep_trace_alloc(flags);
935 might_sleep_if(flags & __GFP_WAIT);
936
937 return should_failslab(s->object_size, flags, s->flags);
938 }
939
940 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
941 {
942 flags &= gfp_allowed_mask;
943 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
944 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
945 }
946
947 static inline void slab_free_hook(struct kmem_cache *s, void *x)
948 {
949 kmemleak_free_recursive(x, s->flags);
950
951 /*
952 * Trouble is that we may no longer disable interupts in the fast path
953 * So in order to make the debug calls that expect irqs to be
954 * disabled we need to disable interrupts temporarily.
955 */
956 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
957 {
958 unsigned long flags;
959
960 local_irq_save(flags);
961 kmemcheck_slab_free(s, x, s->object_size);
962 debug_check_no_locks_freed(x, s->object_size);
963 local_irq_restore(flags);
964 }
965 #endif
966 if (!(s->flags & SLAB_DEBUG_OBJECTS))
967 debug_check_no_obj_freed(x, s->object_size);
968 }
969
970 /*
971 * Tracking of fully allocated slabs for debugging purposes.
972 *
973 * list_lock must be held.
974 */
975 static void add_full(struct kmem_cache *s,
976 struct kmem_cache_node *n, struct page *page)
977 {
978 if (!(s->flags & SLAB_STORE_USER))
979 return;
980
981 list_add(&page->lru, &n->full);
982 }
983
984 /*
985 * list_lock must be held.
986 */
987 static void remove_full(struct kmem_cache *s, struct page *page)
988 {
989 if (!(s->flags & SLAB_STORE_USER))
990 return;
991
992 list_del(&page->lru);
993 }
994
995 /* Tracking of the number of slabs for debugging purposes */
996 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
997 {
998 struct kmem_cache_node *n = get_node(s, node);
999
1000 return atomic_long_read(&n->nr_slabs);
1001 }
1002
1003 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1004 {
1005 return atomic_long_read(&n->nr_slabs);
1006 }
1007
1008 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1009 {
1010 struct kmem_cache_node *n = get_node(s, node);
1011
1012 /*
1013 * May be called early in order to allocate a slab for the
1014 * kmem_cache_node structure. Solve the chicken-egg
1015 * dilemma by deferring the increment of the count during
1016 * bootstrap (see early_kmem_cache_node_alloc).
1017 */
1018 if (likely(n)) {
1019 atomic_long_inc(&n->nr_slabs);
1020 atomic_long_add(objects, &n->total_objects);
1021 }
1022 }
1023 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1024 {
1025 struct kmem_cache_node *n = get_node(s, node);
1026
1027 atomic_long_dec(&n->nr_slabs);
1028 atomic_long_sub(objects, &n->total_objects);
1029 }
1030
1031 /* Object debug checks for alloc/free paths */
1032 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1033 void *object)
1034 {
1035 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1036 return;
1037
1038 init_object(s, object, SLUB_RED_INACTIVE);
1039 init_tracking(s, object);
1040 }
1041
1042 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
1043 void *object, unsigned long addr)
1044 {
1045 if (!check_slab(s, page))
1046 goto bad;
1047
1048 if (!check_valid_pointer(s, page, object)) {
1049 object_err(s, page, object, "Freelist Pointer check fails");
1050 goto bad;
1051 }
1052
1053 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1054 goto bad;
1055
1056 /* Success perform special debug activities for allocs */
1057 if (s->flags & SLAB_STORE_USER)
1058 set_track(s, object, TRACK_ALLOC, addr);
1059 trace(s, page, object, 1);
1060 init_object(s, object, SLUB_RED_ACTIVE);
1061 return 1;
1062
1063 bad:
1064 if (PageSlab(page)) {
1065 /*
1066 * If this is a slab page then lets do the best we can
1067 * to avoid issues in the future. Marking all objects
1068 * as used avoids touching the remaining objects.
1069 */
1070 slab_fix(s, "Marking all objects used");
1071 page->inuse = page->objects;
1072 page->freelist = NULL;
1073 }
1074 return 0;
1075 }
1076
1077 static noinline struct kmem_cache_node *free_debug_processing(
1078 struct kmem_cache *s, struct page *page, void *object,
1079 unsigned long addr, unsigned long *flags)
1080 {
1081 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1082
1083 spin_lock_irqsave(&n->list_lock, *flags);
1084 slab_lock(page);
1085
1086 if (!check_slab(s, page))
1087 goto fail;
1088
1089 if (!check_valid_pointer(s, page, object)) {
1090 slab_err(s, page, "Invalid object pointer 0x%p", object);
1091 goto fail;
1092 }
1093
1094 if (on_freelist(s, page, object)) {
1095 object_err(s, page, object, "Object already free");
1096 goto fail;
1097 }
1098
1099 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1100 goto out;
1101
1102 if (unlikely(s != page->slab_cache)) {
1103 if (!PageSlab(page)) {
1104 slab_err(s, page, "Attempt to free object(0x%p) "
1105 "outside of slab", object);
1106 } else if (!page->slab_cache) {
1107 printk(KERN_ERR
1108 "SLUB <none>: no slab for object 0x%p.\n",
1109 object);
1110 dump_stack();
1111 } else
1112 object_err(s, page, object,
1113 "page slab pointer corrupt.");
1114 goto fail;
1115 }
1116
1117 if (s->flags & SLAB_STORE_USER)
1118 set_track(s, object, TRACK_FREE, addr);
1119 trace(s, page, object, 0);
1120 init_object(s, object, SLUB_RED_INACTIVE);
1121 out:
1122 slab_unlock(page);
1123 /*
1124 * Keep node_lock to preserve integrity
1125 * until the object is actually freed
1126 */
1127 return n;
1128
1129 fail:
1130 slab_unlock(page);
1131 spin_unlock_irqrestore(&n->list_lock, *flags);
1132 slab_fix(s, "Object at 0x%p not freed", object);
1133 return NULL;
1134 }
1135
1136 static int __init setup_slub_debug(char *str)
1137 {
1138 slub_debug = DEBUG_DEFAULT_FLAGS;
1139 if (*str++ != '=' || !*str)
1140 /*
1141 * No options specified. Switch on full debugging.
1142 */
1143 goto out;
1144
1145 if (*str == ',')
1146 /*
1147 * No options but restriction on slabs. This means full
1148 * debugging for slabs matching a pattern.
1149 */
1150 goto check_slabs;
1151
1152 if (tolower(*str) == 'o') {
1153 /*
1154 * Avoid enabling debugging on caches if its minimum order
1155 * would increase as a result.
1156 */
1157 disable_higher_order_debug = 1;
1158 goto out;
1159 }
1160
1161 slub_debug = 0;
1162 if (*str == '-')
1163 /*
1164 * Switch off all debugging measures.
1165 */
1166 goto out;
1167
1168 /*
1169 * Determine which debug features should be switched on
1170 */
1171 for (; *str && *str != ','; str++) {
1172 switch (tolower(*str)) {
1173 case 'f':
1174 slub_debug |= SLAB_DEBUG_FREE;
1175 break;
1176 case 'z':
1177 slub_debug |= SLAB_RED_ZONE;
1178 break;
1179 case 'p':
1180 slub_debug |= SLAB_POISON;
1181 break;
1182 case 'u':
1183 slub_debug |= SLAB_STORE_USER;
1184 break;
1185 case 't':
1186 slub_debug |= SLAB_TRACE;
1187 break;
1188 case 'a':
1189 slub_debug |= SLAB_FAILSLAB;
1190 break;
1191 default:
1192 printk(KERN_ERR "slub_debug option '%c' "
1193 "unknown. skipped\n", *str);
1194 }
1195 }
1196
1197 check_slabs:
1198 if (*str == ',')
1199 slub_debug_slabs = str + 1;
1200 out:
1201 return 1;
1202 }
1203
1204 __setup("slub_debug", setup_slub_debug);
1205
1206 static unsigned long kmem_cache_flags(unsigned long object_size,
1207 unsigned long flags, const char *name,
1208 void (*ctor)(void *))
1209 {
1210 /*
1211 * Enable debugging if selected on the kernel commandline.
1212 */
1213 if (slub_debug && (!slub_debug_slabs ||
1214 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1215 flags |= slub_debug;
1216
1217 return flags;
1218 }
1219 #else
1220 static inline void setup_object_debug(struct kmem_cache *s,
1221 struct page *page, void *object) {}
1222
1223 static inline int alloc_debug_processing(struct kmem_cache *s,
1224 struct page *page, void *object, unsigned long addr) { return 0; }
1225
1226 static inline struct kmem_cache_node *free_debug_processing(
1227 struct kmem_cache *s, struct page *page, void *object,
1228 unsigned long addr, unsigned long *flags) { return NULL; }
1229
1230 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1231 { return 1; }
1232 static inline int check_object(struct kmem_cache *s, struct page *page,
1233 void *object, u8 val) { return 1; }
1234 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1235 struct page *page) {}
1236 static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1237 static inline unsigned long kmem_cache_flags(unsigned long object_size,
1238 unsigned long flags, const char *name,
1239 void (*ctor)(void *))
1240 {
1241 return flags;
1242 }
1243 #define slub_debug 0
1244
1245 #define disable_higher_order_debug 0
1246
1247 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1248 { return 0; }
1249 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1250 { return 0; }
1251 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1252 int objects) {}
1253 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1254 int objects) {}
1255
1256 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1257 { return 0; }
1258
1259 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1260 void *object) {}
1261
1262 static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1263
1264 #endif /* CONFIG_SLUB_DEBUG */
1265
1266 /*
1267 * Slab allocation and freeing
1268 */
1269 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1270 struct kmem_cache_order_objects oo)
1271 {
1272 int order = oo_order(oo);
1273
1274 flags |= __GFP_NOTRACK;
1275
1276 if (node == NUMA_NO_NODE)
1277 return alloc_pages(flags, order);
1278 else
1279 return alloc_pages_exact_node(node, flags, order);
1280 }
1281
1282 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1283 {
1284 struct page *page;
1285 struct kmem_cache_order_objects oo = s->oo;
1286 gfp_t alloc_gfp;
1287
1288 flags &= gfp_allowed_mask;
1289
1290 if (flags & __GFP_WAIT)
1291 local_irq_enable();
1292
1293 flags |= s->allocflags;
1294
1295 /*
1296 * Let the initial higher-order allocation fail under memory pressure
1297 * so we fall-back to the minimum order allocation.
1298 */
1299 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1300
1301 page = alloc_slab_page(alloc_gfp, node, oo);
1302 if (unlikely(!page)) {
1303 oo = s->min;
1304 /*
1305 * Allocation may have failed due to fragmentation.
1306 * Try a lower order alloc if possible
1307 */
1308 page = alloc_slab_page(flags, node, oo);
1309
1310 if (page)
1311 stat(s, ORDER_FALLBACK);
1312 }
1313
1314 if (kmemcheck_enabled && page
1315 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1316 int pages = 1 << oo_order(oo);
1317
1318 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1319
1320 /*
1321 * Objects from caches that have a constructor don't get
1322 * cleared when they're allocated, so we need to do it here.
1323 */
1324 if (s->ctor)
1325 kmemcheck_mark_uninitialized_pages(page, pages);
1326 else
1327 kmemcheck_mark_unallocated_pages(page, pages);
1328 }
1329
1330 if (flags & __GFP_WAIT)
1331 local_irq_disable();
1332 if (!page)
1333 return NULL;
1334
1335 page->objects = oo_objects(oo);
1336 mod_zone_page_state(page_zone(page),
1337 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1338 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1339 1 << oo_order(oo));
1340
1341 return page;
1342 }
1343
1344 static void setup_object(struct kmem_cache *s, struct page *page,
1345 void *object)
1346 {
1347 setup_object_debug(s, page, object);
1348 if (unlikely(s->ctor))
1349 s->ctor(object);
1350 }
1351
1352 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1353 {
1354 struct page *page;
1355 void *start;
1356 void *last;
1357 void *p;
1358 int order;
1359
1360 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1361
1362 page = allocate_slab(s,
1363 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1364 if (!page)
1365 goto out;
1366
1367 order = compound_order(page);
1368 inc_slabs_node(s, page_to_nid(page), page->objects);
1369 memcg_bind_pages(s, order);
1370 page->slab_cache = s;
1371 __SetPageSlab(page);
1372 if (page->pfmemalloc)
1373 SetPageSlabPfmemalloc(page);
1374
1375 start = page_address(page);
1376
1377 if (unlikely(s->flags & SLAB_POISON))
1378 memset(start, POISON_INUSE, PAGE_SIZE << order);
1379
1380 last = start;
1381 for_each_object(p, s, start, page->objects) {
1382 setup_object(s, page, last);
1383 set_freepointer(s, last, p);
1384 last = p;
1385 }
1386 setup_object(s, page, last);
1387 set_freepointer(s, last, NULL);
1388
1389 page->freelist = start;
1390 page->inuse = page->objects;
1391 page->frozen = 1;
1392 out:
1393 return page;
1394 }
1395
1396 static void __free_slab(struct kmem_cache *s, struct page *page)
1397 {
1398 int order = compound_order(page);
1399 int pages = 1 << order;
1400
1401 if (kmem_cache_debug(s)) {
1402 void *p;
1403
1404 slab_pad_check(s, page);
1405 for_each_object(p, s, page_address(page),
1406 page->objects)
1407 check_object(s, page, p, SLUB_RED_INACTIVE);
1408 }
1409
1410 kmemcheck_free_shadow(page, compound_order(page));
1411
1412 mod_zone_page_state(page_zone(page),
1413 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1414 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1415 -pages);
1416
1417 __ClearPageSlabPfmemalloc(page);
1418 __ClearPageSlab(page);
1419
1420 memcg_release_pages(s, order);
1421 page_mapcount_reset(page);
1422 if (current->reclaim_state)
1423 current->reclaim_state->reclaimed_slab += pages;
1424 __free_memcg_kmem_pages(page, order);
1425 }
1426
1427 #define need_reserve_slab_rcu \
1428 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1429
1430 static void rcu_free_slab(struct rcu_head *h)
1431 {
1432 struct page *page;
1433
1434 if (need_reserve_slab_rcu)
1435 page = virt_to_head_page(h);
1436 else
1437 page = container_of((struct list_head *)h, struct page, lru);
1438
1439 __free_slab(page->slab_cache, page);
1440 }
1441
1442 static void free_slab(struct kmem_cache *s, struct page *page)
1443 {
1444 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1445 struct rcu_head *head;
1446
1447 if (need_reserve_slab_rcu) {
1448 int order = compound_order(page);
1449 int offset = (PAGE_SIZE << order) - s->reserved;
1450
1451 VM_BUG_ON(s->reserved != sizeof(*head));
1452 head = page_address(page) + offset;
1453 } else {
1454 /*
1455 * RCU free overloads the RCU head over the LRU
1456 */
1457 head = (void *)&page->lru;
1458 }
1459
1460 call_rcu(head, rcu_free_slab);
1461 } else
1462 __free_slab(s, page);
1463 }
1464
1465 static void discard_slab(struct kmem_cache *s, struct page *page)
1466 {
1467 dec_slabs_node(s, page_to_nid(page), page->objects);
1468 free_slab(s, page);
1469 }
1470
1471 /*
1472 * Management of partially allocated slabs.
1473 *
1474 * list_lock must be held.
1475 */
1476 static inline void add_partial(struct kmem_cache_node *n,
1477 struct page *page, int tail)
1478 {
1479 n->nr_partial++;
1480 if (tail == DEACTIVATE_TO_TAIL)
1481 list_add_tail(&page->lru, &n->partial);
1482 else
1483 list_add(&page->lru, &n->partial);
1484 }
1485
1486 /*
1487 * list_lock must be held.
1488 */
1489 static inline void remove_partial(struct kmem_cache_node *n,
1490 struct page *page)
1491 {
1492 list_del(&page->lru);
1493 n->nr_partial--;
1494 }
1495
1496 /*
1497 * Remove slab from the partial list, freeze it and
1498 * return the pointer to the freelist.
1499 *
1500 * Returns a list of objects or NULL if it fails.
1501 *
1502 * Must hold list_lock since we modify the partial list.
1503 */
1504 static inline void *acquire_slab(struct kmem_cache *s,
1505 struct kmem_cache_node *n, struct page *page,
1506 int mode, int *objects)
1507 {
1508 void *freelist;
1509 unsigned long counters;
1510 struct page new;
1511
1512 /*
1513 * Zap the freelist and set the frozen bit.
1514 * The old freelist is the list of objects for the
1515 * per cpu allocation list.
1516 */
1517 freelist = page->freelist;
1518 counters = page->counters;
1519 new.counters = counters;
1520 *objects = new.objects - new.inuse;
1521 if (mode) {
1522 new.inuse = page->objects;
1523 new.freelist = NULL;
1524 } else {
1525 new.freelist = freelist;
1526 }
1527
1528 VM_BUG_ON(new.frozen);
1529 new.frozen = 1;
1530
1531 if (!__cmpxchg_double_slab(s, page,
1532 freelist, counters,
1533 new.freelist, new.counters,
1534 "acquire_slab"))
1535 return NULL;
1536
1537 remove_partial(n, page);
1538 WARN_ON(!freelist);
1539 return freelist;
1540 }
1541
1542 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1543 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1544
1545 /*
1546 * Try to allocate a partial slab from a specific node.
1547 */
1548 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1549 struct kmem_cache_cpu *c, gfp_t flags)
1550 {
1551 struct page *page, *page2;
1552 void *object = NULL;
1553 int available = 0;
1554 int objects;
1555
1556 /*
1557 * Racy check. If we mistakenly see no partial slabs then we
1558 * just allocate an empty slab. If we mistakenly try to get a
1559 * partial slab and there is none available then get_partials()
1560 * will return NULL.
1561 */
1562 if (!n || !n->nr_partial)
1563 return NULL;
1564
1565 spin_lock(&n->list_lock);
1566 list_for_each_entry_safe(page, page2, &n->partial, lru) {
1567 void *t;
1568
1569 if (!pfmemalloc_match(page, flags))
1570 continue;
1571
1572 t = acquire_slab(s, n, page, object == NULL, &objects);
1573 if (!t)
1574 break;
1575
1576 available += objects;
1577 if (!object) {
1578 c->page = page;
1579 stat(s, ALLOC_FROM_PARTIAL);
1580 object = t;
1581 } else {
1582 put_cpu_partial(s, page, 0);
1583 stat(s, CPU_PARTIAL_NODE);
1584 }
1585 if (!kmem_cache_has_cpu_partial(s)
1586 || available > s->cpu_partial / 2)
1587 break;
1588
1589 }
1590 spin_unlock(&n->list_lock);
1591 return object;
1592 }
1593
1594 /*
1595 * Get a page from somewhere. Search in increasing NUMA distances.
1596 */
1597 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1598 struct kmem_cache_cpu *c)
1599 {
1600 #ifdef CONFIG_NUMA
1601 struct zonelist *zonelist;
1602 struct zoneref *z;
1603 struct zone *zone;
1604 enum zone_type high_zoneidx = gfp_zone(flags);
1605 void *object;
1606 unsigned int cpuset_mems_cookie;
1607
1608 /*
1609 * The defrag ratio allows a configuration of the tradeoffs between
1610 * inter node defragmentation and node local allocations. A lower
1611 * defrag_ratio increases the tendency to do local allocations
1612 * instead of attempting to obtain partial slabs from other nodes.
1613 *
1614 * If the defrag_ratio is set to 0 then kmalloc() always
1615 * returns node local objects. If the ratio is higher then kmalloc()
1616 * may return off node objects because partial slabs are obtained
1617 * from other nodes and filled up.
1618 *
1619 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1620 * defrag_ratio = 1000) then every (well almost) allocation will
1621 * first attempt to defrag slab caches on other nodes. This means
1622 * scanning over all nodes to look for partial slabs which may be
1623 * expensive if we do it every time we are trying to find a slab
1624 * with available objects.
1625 */
1626 if (!s->remote_node_defrag_ratio ||
1627 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1628 return NULL;
1629
1630 do {
1631 cpuset_mems_cookie = get_mems_allowed();
1632 zonelist = node_zonelist(slab_node(), flags);
1633 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1634 struct kmem_cache_node *n;
1635
1636 n = get_node(s, zone_to_nid(zone));
1637
1638 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1639 n->nr_partial > s->min_partial) {
1640 object = get_partial_node(s, n, c, flags);
1641 if (object) {
1642 /*
1643 * Return the object even if
1644 * put_mems_allowed indicated that
1645 * the cpuset mems_allowed was
1646 * updated in parallel. It's a
1647 * harmless race between the alloc
1648 * and the cpuset update.
1649 */
1650 put_mems_allowed(cpuset_mems_cookie);
1651 return object;
1652 }
1653 }
1654 }
1655 } while (!put_mems_allowed(cpuset_mems_cookie));
1656 #endif
1657 return NULL;
1658 }
1659
1660 /*
1661 * Get a partial page, lock it and return it.
1662 */
1663 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1664 struct kmem_cache_cpu *c)
1665 {
1666 void *object;
1667 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1668
1669 object = get_partial_node(s, get_node(s, searchnode), c, flags);
1670 if (object || node != NUMA_NO_NODE)
1671 return object;
1672
1673 return get_any_partial(s, flags, c);
1674 }
1675
1676 #ifdef CONFIG_PREEMPT
1677 /*
1678 * Calculate the next globally unique transaction for disambiguiation
1679 * during cmpxchg. The transactions start with the cpu number and are then
1680 * incremented by CONFIG_NR_CPUS.
1681 */
1682 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1683 #else
1684 /*
1685 * No preemption supported therefore also no need to check for
1686 * different cpus.
1687 */
1688 #define TID_STEP 1
1689 #endif
1690
1691 static inline unsigned long next_tid(unsigned long tid)
1692 {
1693 return tid + TID_STEP;
1694 }
1695
1696 static inline unsigned int tid_to_cpu(unsigned long tid)
1697 {
1698 return tid % TID_STEP;
1699 }
1700
1701 static inline unsigned long tid_to_event(unsigned long tid)
1702 {
1703 return tid / TID_STEP;
1704 }
1705
1706 static inline unsigned int init_tid(int cpu)
1707 {
1708 return cpu;
1709 }
1710
1711 static inline void note_cmpxchg_failure(const char *n,
1712 const struct kmem_cache *s, unsigned long tid)
1713 {
1714 #ifdef SLUB_DEBUG_CMPXCHG
1715 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1716
1717 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1718
1719 #ifdef CONFIG_PREEMPT
1720 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1721 printk("due to cpu change %d -> %d\n",
1722 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1723 else
1724 #endif
1725 if (tid_to_event(tid) != tid_to_event(actual_tid))
1726 printk("due to cpu running other code. Event %ld->%ld\n",
1727 tid_to_event(tid), tid_to_event(actual_tid));
1728 else
1729 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1730 actual_tid, tid, next_tid(tid));
1731 #endif
1732 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1733 }
1734
1735 static void init_kmem_cache_cpus(struct kmem_cache *s)
1736 {
1737 int cpu;
1738
1739 for_each_possible_cpu(cpu)
1740 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1741 }
1742
1743 /*
1744 * Remove the cpu slab
1745 */
1746 static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
1747 {
1748 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1749 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1750 int lock = 0;
1751 enum slab_modes l = M_NONE, m = M_NONE;
1752 void *nextfree;
1753 int tail = DEACTIVATE_TO_HEAD;
1754 struct page new;
1755 struct page old;
1756
1757 if (page->freelist) {
1758 stat(s, DEACTIVATE_REMOTE_FREES);
1759 tail = DEACTIVATE_TO_TAIL;
1760 }
1761
1762 /*
1763 * Stage one: Free all available per cpu objects back
1764 * to the page freelist while it is still frozen. Leave the
1765 * last one.
1766 *
1767 * There is no need to take the list->lock because the page
1768 * is still frozen.
1769 */
1770 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1771 void *prior;
1772 unsigned long counters;
1773
1774 do {
1775 prior = page->freelist;
1776 counters = page->counters;
1777 set_freepointer(s, freelist, prior);
1778 new.counters = counters;
1779 new.inuse--;
1780 VM_BUG_ON(!new.frozen);
1781
1782 } while (!__cmpxchg_double_slab(s, page,
1783 prior, counters,
1784 freelist, new.counters,
1785 "drain percpu freelist"));
1786
1787 freelist = nextfree;
1788 }
1789
1790 /*
1791 * Stage two: Ensure that the page is unfrozen while the
1792 * list presence reflects the actual number of objects
1793 * during unfreeze.
1794 *
1795 * We setup the list membership and then perform a cmpxchg
1796 * with the count. If there is a mismatch then the page
1797 * is not unfrozen but the page is on the wrong list.
1798 *
1799 * Then we restart the process which may have to remove
1800 * the page from the list that we just put it on again
1801 * because the number of objects in the slab may have
1802 * changed.
1803 */
1804 redo:
1805
1806 old.freelist = page->freelist;
1807 old.counters = page->counters;
1808 VM_BUG_ON(!old.frozen);
1809
1810 /* Determine target state of the slab */
1811 new.counters = old.counters;
1812 if (freelist) {
1813 new.inuse--;
1814 set_freepointer(s, freelist, old.freelist);
1815 new.freelist = freelist;
1816 } else
1817 new.freelist = old.freelist;
1818
1819 new.frozen = 0;
1820
1821 if (!new.inuse && n->nr_partial > s->min_partial)
1822 m = M_FREE;
1823 else if (new.freelist) {
1824 m = M_PARTIAL;
1825 if (!lock) {
1826 lock = 1;
1827 /*
1828 * Taking the spinlock removes the possiblity
1829 * that acquire_slab() will see a slab page that
1830 * is frozen
1831 */
1832 spin_lock(&n->list_lock);
1833 }
1834 } else {
1835 m = M_FULL;
1836 if (kmem_cache_debug(s) && !lock) {
1837 lock = 1;
1838 /*
1839 * This also ensures that the scanning of full
1840 * slabs from diagnostic functions will not see
1841 * any frozen slabs.
1842 */
1843 spin_lock(&n->list_lock);
1844 }
1845 }
1846
1847 if (l != m) {
1848
1849 if (l == M_PARTIAL)
1850
1851 remove_partial(n, page);
1852
1853 else if (l == M_FULL)
1854
1855 remove_full(s, page);
1856
1857 if (m == M_PARTIAL) {
1858
1859 add_partial(n, page, tail);
1860 stat(s, tail);
1861
1862 } else if (m == M_FULL) {
1863
1864 stat(s, DEACTIVATE_FULL);
1865 add_full(s, n, page);
1866
1867 }
1868 }
1869
1870 l = m;
1871 if (!__cmpxchg_double_slab(s, page,
1872 old.freelist, old.counters,
1873 new.freelist, new.counters,
1874 "unfreezing slab"))
1875 goto redo;
1876
1877 if (lock)
1878 spin_unlock(&n->list_lock);
1879
1880 if (m == M_FREE) {
1881 stat(s, DEACTIVATE_EMPTY);
1882 discard_slab(s, page);
1883 stat(s, FREE_SLAB);
1884 }
1885 }
1886
1887 /*
1888 * Unfreeze all the cpu partial slabs.
1889 *
1890 * This function must be called with interrupts disabled
1891 * for the cpu using c (or some other guarantee must be there
1892 * to guarantee no concurrent accesses).
1893 */
1894 static void unfreeze_partials(struct kmem_cache *s,
1895 struct kmem_cache_cpu *c)
1896 {
1897 #ifdef CONFIG_SLUB_CPU_PARTIAL
1898 struct kmem_cache_node *n = NULL, *n2 = NULL;
1899 struct page *page, *discard_page = NULL;
1900
1901 while ((page = c->partial)) {
1902 struct page new;
1903 struct page old;
1904
1905 c->partial = page->next;
1906
1907 n2 = get_node(s, page_to_nid(page));
1908 if (n != n2) {
1909 if (n)
1910 spin_unlock(&n->list_lock);
1911
1912 n = n2;
1913 spin_lock(&n->list_lock);
1914 }
1915
1916 do {
1917
1918 old.freelist = page->freelist;
1919 old.counters = page->counters;
1920 VM_BUG_ON(!old.frozen);
1921
1922 new.counters = old.counters;
1923 new.freelist = old.freelist;
1924
1925 new.frozen = 0;
1926
1927 } while (!__cmpxchg_double_slab(s, page,
1928 old.freelist, old.counters,
1929 new.freelist, new.counters,
1930 "unfreezing slab"));
1931
1932 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
1933 page->next = discard_page;
1934 discard_page = page;
1935 } else {
1936 add_partial(n, page, DEACTIVATE_TO_TAIL);
1937 stat(s, FREE_ADD_PARTIAL);
1938 }
1939 }
1940
1941 if (n)
1942 spin_unlock(&n->list_lock);
1943
1944 while (discard_page) {
1945 page = discard_page;
1946 discard_page = discard_page->next;
1947
1948 stat(s, DEACTIVATE_EMPTY);
1949 discard_slab(s, page);
1950 stat(s, FREE_SLAB);
1951 }
1952 #endif
1953 }
1954
1955 /*
1956 * Put a page that was just frozen (in __slab_free) into a partial page
1957 * slot if available. This is done without interrupts disabled and without
1958 * preemption disabled. The cmpxchg is racy and may put the partial page
1959 * onto a random cpus partial slot.
1960 *
1961 * If we did not find a slot then simply move all the partials to the
1962 * per node partial list.
1963 */
1964 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1965 {
1966 #ifdef CONFIG_SLUB_CPU_PARTIAL
1967 struct page *oldpage;
1968 int pages;
1969 int pobjects;
1970
1971 if (!s->cpu_partial)
1972 return;
1973
1974 do {
1975 pages = 0;
1976 pobjects = 0;
1977 oldpage = this_cpu_read(s->cpu_slab->partial);
1978
1979 if (oldpage) {
1980 pobjects = oldpage->pobjects;
1981 pages = oldpage->pages;
1982 if (drain && pobjects > s->cpu_partial) {
1983 unsigned long flags;
1984 /*
1985 * partial array is full. Move the existing
1986 * set to the per node partial list.
1987 */
1988 local_irq_save(flags);
1989 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
1990 local_irq_restore(flags);
1991 oldpage = NULL;
1992 pobjects = 0;
1993 pages = 0;
1994 stat(s, CPU_PARTIAL_DRAIN);
1995 }
1996 }
1997
1998 pages++;
1999 pobjects += page->objects - page->inuse;
2000
2001 page->pages = pages;
2002 page->pobjects = pobjects;
2003 page->next = oldpage;
2004
2005 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
2006 #endif
2007 }
2008
2009 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2010 {
2011 stat(s, CPUSLAB_FLUSH);
2012 deactivate_slab(s, c->page, c->freelist);
2013
2014 c->tid = next_tid(c->tid);
2015 c->page = NULL;
2016 c->freelist = NULL;
2017 }
2018
2019 /*
2020 * Flush cpu slab.
2021 *
2022 * Called from IPI handler with interrupts disabled.
2023 */
2024 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2025 {
2026 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2027
2028 if (likely(c)) {
2029 if (c->page)
2030 flush_slab(s, c);
2031
2032 unfreeze_partials(s, c);
2033 }
2034 }
2035
2036 static void flush_cpu_slab(void *d)
2037 {
2038 struct kmem_cache *s = d;
2039
2040 __flush_cpu_slab(s, smp_processor_id());
2041 }
2042
2043 static bool has_cpu_slab(int cpu, void *info)
2044 {
2045 struct kmem_cache *s = info;
2046 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2047
2048 return c->page || c->partial;
2049 }
2050
2051 static void flush_all(struct kmem_cache *s)
2052 {
2053 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2054 }
2055
2056 /*
2057 * Check if the objects in a per cpu structure fit numa
2058 * locality expectations.
2059 */
2060 static inline int node_match(struct page *page, int node)
2061 {
2062 #ifdef CONFIG_NUMA
2063 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2064 return 0;
2065 #endif
2066 return 1;
2067 }
2068
2069 static int count_free(struct page *page)
2070 {
2071 return page->objects - page->inuse;
2072 }
2073
2074 static unsigned long count_partial(struct kmem_cache_node *n,
2075 int (*get_count)(struct page *))
2076 {
2077 unsigned long flags;
2078 unsigned long x = 0;
2079 struct page *page;
2080
2081 spin_lock_irqsave(&n->list_lock, flags);
2082 list_for_each_entry(page, &n->partial, lru)
2083 x += get_count(page);
2084 spin_unlock_irqrestore(&n->list_lock, flags);
2085 return x;
2086 }
2087
2088 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2089 {
2090 #ifdef CONFIG_SLUB_DEBUG
2091 return atomic_long_read(&n->total_objects);
2092 #else
2093 return 0;
2094 #endif
2095 }
2096
2097 static noinline void
2098 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2099 {
2100 int node;
2101
2102 printk(KERN_WARNING
2103 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2104 nid, gfpflags);
2105 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
2106 "default order: %d, min order: %d\n", s->name, s->object_size,
2107 s->size, oo_order(s->oo), oo_order(s->min));
2108
2109 if (oo_order(s->min) > get_order(s->object_size))
2110 printk(KERN_WARNING " %s debugging increased min order, use "
2111 "slub_debug=O to disable.\n", s->name);
2112
2113 for_each_online_node(node) {
2114 struct kmem_cache_node *n = get_node(s, node);
2115 unsigned long nr_slabs;
2116 unsigned long nr_objs;
2117 unsigned long nr_free;
2118
2119 if (!n)
2120 continue;
2121
2122 nr_free = count_partial(n, count_free);
2123 nr_slabs = node_nr_slabs(n);
2124 nr_objs = node_nr_objs(n);
2125
2126 printk(KERN_WARNING
2127 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2128 node, nr_slabs, nr_objs, nr_free);
2129 }
2130 }
2131
2132 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2133 int node, struct kmem_cache_cpu **pc)
2134 {
2135 void *freelist;
2136 struct kmem_cache_cpu *c = *pc;
2137 struct page *page;
2138
2139 freelist = get_partial(s, flags, node, c);
2140
2141 if (freelist)
2142 return freelist;
2143
2144 page = new_slab(s, flags, node);
2145 if (page) {
2146 c = __this_cpu_ptr(s->cpu_slab);
2147 if (c->page)
2148 flush_slab(s, c);
2149
2150 /*
2151 * No other reference to the page yet so we can
2152 * muck around with it freely without cmpxchg
2153 */
2154 freelist = page->freelist;
2155 page->freelist = NULL;
2156
2157 stat(s, ALLOC_SLAB);
2158 c->page = page;
2159 *pc = c;
2160 } else
2161 freelist = NULL;
2162
2163 return freelist;
2164 }
2165
2166 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2167 {
2168 if (unlikely(PageSlabPfmemalloc(page)))
2169 return gfp_pfmemalloc_allowed(gfpflags);
2170
2171 return true;
2172 }
2173
2174 /*
2175 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2176 * or deactivate the page.
2177 *
2178 * The page is still frozen if the return value is not NULL.
2179 *
2180 * If this function returns NULL then the page has been unfrozen.
2181 *
2182 * This function must be called with interrupt disabled.
2183 */
2184 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2185 {
2186 struct page new;
2187 unsigned long counters;
2188 void *freelist;
2189
2190 do {
2191 freelist = page->freelist;
2192 counters = page->counters;
2193
2194 new.counters = counters;
2195 VM_BUG_ON(!new.frozen);
2196
2197 new.inuse = page->objects;
2198 new.frozen = freelist != NULL;
2199
2200 } while (!__cmpxchg_double_slab(s, page,
2201 freelist, counters,
2202 NULL, new.counters,
2203 "get_freelist"));
2204
2205 return freelist;
2206 }
2207
2208 /*
2209 * Slow path. The lockless freelist is empty or we need to perform
2210 * debugging duties.
2211 *
2212 * Processing is still very fast if new objects have been freed to the
2213 * regular freelist. In that case we simply take over the regular freelist
2214 * as the lockless freelist and zap the regular freelist.
2215 *
2216 * If that is not working then we fall back to the partial lists. We take the
2217 * first element of the freelist as the object to allocate now and move the
2218 * rest of the freelist to the lockless freelist.
2219 *
2220 * And if we were unable to get a new slab from the partial slab lists then
2221 * we need to allocate a new slab. This is the slowest path since it involves
2222 * a call to the page allocator and the setup of a new slab.
2223 */
2224 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2225 unsigned long addr, struct kmem_cache_cpu *c)
2226 {
2227 void *freelist;
2228 struct page *page;
2229 unsigned long flags;
2230
2231 local_irq_save(flags);
2232 #ifdef CONFIG_PREEMPT
2233 /*
2234 * We may have been preempted and rescheduled on a different
2235 * cpu before disabling interrupts. Need to reload cpu area
2236 * pointer.
2237 */
2238 c = this_cpu_ptr(s->cpu_slab);
2239 #endif
2240
2241 page = c->page;
2242 if (!page)
2243 goto new_slab;
2244 redo:
2245
2246 if (unlikely(!node_match(page, node))) {
2247 stat(s, ALLOC_NODE_MISMATCH);
2248 deactivate_slab(s, page, c->freelist);
2249 c->page = NULL;
2250 c->freelist = NULL;
2251 goto new_slab;
2252 }
2253
2254 /*
2255 * By rights, we should be searching for a slab page that was
2256 * PFMEMALLOC but right now, we are losing the pfmemalloc
2257 * information when the page leaves the per-cpu allocator
2258 */
2259 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2260 deactivate_slab(s, page, c->freelist);
2261 c->page = NULL;
2262 c->freelist = NULL;
2263 goto new_slab;
2264 }
2265
2266 /* must check again c->freelist in case of cpu migration or IRQ */
2267 freelist = c->freelist;
2268 if (freelist)
2269 goto load_freelist;
2270
2271 stat(s, ALLOC_SLOWPATH);
2272
2273 freelist = get_freelist(s, page);
2274
2275 if (!freelist) {
2276 c->page = NULL;
2277 stat(s, DEACTIVATE_BYPASS);
2278 goto new_slab;
2279 }
2280
2281 stat(s, ALLOC_REFILL);
2282
2283 load_freelist:
2284 /*
2285 * freelist is pointing to the list of objects to be used.
2286 * page is pointing to the page from which the objects are obtained.
2287 * That page must be frozen for per cpu allocations to work.
2288 */
2289 VM_BUG_ON(!c->page->frozen);
2290 c->freelist = get_freepointer(s, freelist);
2291 c->tid = next_tid(c->tid);
2292 local_irq_restore(flags);
2293 return freelist;
2294
2295 new_slab:
2296
2297 if (c->partial) {
2298 page = c->page = c->partial;
2299 c->partial = page->next;
2300 stat(s, CPU_PARTIAL_ALLOC);
2301 c->freelist = NULL;
2302 goto redo;
2303 }
2304
2305 freelist = new_slab_objects(s, gfpflags, node, &c);
2306
2307 if (unlikely(!freelist)) {
2308 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2309 slab_out_of_memory(s, gfpflags, node);
2310
2311 local_irq_restore(flags);
2312 return NULL;
2313 }
2314
2315 page = c->page;
2316 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2317 goto load_freelist;
2318
2319 /* Only entered in the debug case */
2320 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
2321 goto new_slab; /* Slab failed checks. Next slab needed */
2322
2323 deactivate_slab(s, page, get_freepointer(s, freelist));
2324 c->page = NULL;
2325 c->freelist = NULL;
2326 local_irq_restore(flags);
2327 return freelist;
2328 }
2329
2330 /*
2331 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2332 * have the fastpath folded into their functions. So no function call
2333 * overhead for requests that can be satisfied on the fastpath.
2334 *
2335 * The fastpath works by first checking if the lockless freelist can be used.
2336 * If not then __slab_alloc is called for slow processing.
2337 *
2338 * Otherwise we can simply pick the next object from the lockless free list.
2339 */
2340 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2341 gfp_t gfpflags, int node, unsigned long addr)
2342 {
2343 void **object;
2344 struct kmem_cache_cpu *c;
2345 struct page *page;
2346 unsigned long tid;
2347
2348 if (slab_pre_alloc_hook(s, gfpflags))
2349 return NULL;
2350
2351 s = memcg_kmem_get_cache(s, gfpflags);
2352 redo:
2353 /*
2354 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2355 * enabled. We may switch back and forth between cpus while
2356 * reading from one cpu area. That does not matter as long
2357 * as we end up on the original cpu again when doing the cmpxchg.
2358 *
2359 * Preemption is disabled for the retrieval of the tid because that
2360 * must occur from the current processor. We cannot allow rescheduling
2361 * on a different processor between the determination of the pointer
2362 * and the retrieval of the tid.
2363 */
2364 preempt_disable();
2365 c = __this_cpu_ptr(s->cpu_slab);
2366
2367 /*
2368 * The transaction ids are globally unique per cpu and per operation on
2369 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2370 * occurs on the right processor and that there was no operation on the
2371 * linked list in between.
2372 */
2373 tid = c->tid;
2374 preempt_enable();
2375
2376 object = c->freelist;
2377 page = c->page;
2378 if (unlikely(!object || !page || !node_match(page, node)))
2379 object = __slab_alloc(s, gfpflags, node, addr, c);
2380
2381 else {
2382 void *next_object = get_freepointer_safe(s, object);
2383
2384 /*
2385 * The cmpxchg will only match if there was no additional
2386 * operation and if we are on the right processor.
2387 *
2388 * The cmpxchg does the following atomically (without lock semantics!)
2389 * 1. Relocate first pointer to the current per cpu area.
2390 * 2. Verify that tid and freelist have not been changed
2391 * 3. If they were not changed replace tid and freelist
2392 *
2393 * Since this is without lock semantics the protection is only against
2394 * code executing on this cpu *not* from access by other cpus.
2395 */
2396 if (unlikely(!this_cpu_cmpxchg_double(
2397 s->cpu_slab->freelist, s->cpu_slab->tid,
2398 object, tid,
2399 next_object, next_tid(tid)))) {
2400
2401 note_cmpxchg_failure("slab_alloc", s, tid);
2402 goto redo;
2403 }
2404 prefetch_freepointer(s, next_object);
2405 stat(s, ALLOC_FASTPATH);
2406 }
2407
2408 if (unlikely(gfpflags & __GFP_ZERO) && object)
2409 memset(object, 0, s->object_size);
2410
2411 slab_post_alloc_hook(s, gfpflags, object);
2412
2413 return object;
2414 }
2415
2416 static __always_inline void *slab_alloc(struct kmem_cache *s,
2417 gfp_t gfpflags, unsigned long addr)
2418 {
2419 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2420 }
2421
2422 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2423 {
2424 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2425
2426 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
2427
2428 return ret;
2429 }
2430 EXPORT_SYMBOL(kmem_cache_alloc);
2431
2432 #ifdef CONFIG_TRACING
2433 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2434 {
2435 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2436 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2437 return ret;
2438 }
2439 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2440
2441 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2442 {
2443 void *ret = kmalloc_order(size, flags, order);
2444 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2445 return ret;
2446 }
2447 EXPORT_SYMBOL(kmalloc_order_trace);
2448 #endif
2449
2450 #ifdef CONFIG_NUMA
2451 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2452 {
2453 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2454
2455 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2456 s->object_size, s->size, gfpflags, node);
2457
2458 return ret;
2459 }
2460 EXPORT_SYMBOL(kmem_cache_alloc_node);
2461
2462 #ifdef CONFIG_TRACING
2463 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2464 gfp_t gfpflags,
2465 int node, size_t size)
2466 {
2467 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2468
2469 trace_kmalloc_node(_RET_IP_, ret,
2470 size, s->size, gfpflags, node);
2471 return ret;
2472 }
2473 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2474 #endif
2475 #endif
2476
2477 /*
2478 * Slow patch handling. This may still be called frequently since objects
2479 * have a longer lifetime than the cpu slabs in most processing loads.
2480 *
2481 * So we still attempt to reduce cache line usage. Just take the slab
2482 * lock and free the item. If there is no additional partial page
2483 * handling required then we can return immediately.
2484 */
2485 static void __slab_free(struct kmem_cache *s, struct page *page,
2486 void *x, unsigned long addr)
2487 {
2488 void *prior;
2489 void **object = (void *)x;
2490 int was_frozen;
2491 struct page new;
2492 unsigned long counters;
2493 struct kmem_cache_node *n = NULL;
2494 unsigned long uninitialized_var(flags);
2495
2496 stat(s, FREE_SLOWPATH);
2497
2498 if (kmem_cache_debug(s) &&
2499 !(n = free_debug_processing(s, page, x, addr, &flags)))
2500 return;
2501
2502 do {
2503 if (unlikely(n)) {
2504 spin_unlock_irqrestore(&n->list_lock, flags);
2505 n = NULL;
2506 }
2507 prior = page->freelist;
2508 counters = page->counters;
2509 set_freepointer(s, object, prior);
2510 new.counters = counters;
2511 was_frozen = new.frozen;
2512 new.inuse--;
2513 if ((!new.inuse || !prior) && !was_frozen) {
2514
2515 if (kmem_cache_has_cpu_partial(s) && !prior)
2516
2517 /*
2518 * Slab was on no list before and will be partially empty
2519 * We can defer the list move and instead freeze it.
2520 */
2521 new.frozen = 1;
2522
2523 else { /* Needs to be taken off a list */
2524
2525 n = get_node(s, page_to_nid(page));
2526 /*
2527 * Speculatively acquire the list_lock.
2528 * If the cmpxchg does not succeed then we may
2529 * drop the list_lock without any processing.
2530 *
2531 * Otherwise the list_lock will synchronize with
2532 * other processors updating the list of slabs.
2533 */
2534 spin_lock_irqsave(&n->list_lock, flags);
2535
2536 }
2537 }
2538
2539 } while (!cmpxchg_double_slab(s, page,
2540 prior, counters,
2541 object, new.counters,
2542 "__slab_free"));
2543
2544 if (likely(!n)) {
2545
2546 /*
2547 * If we just froze the page then put it onto the
2548 * per cpu partial list.
2549 */
2550 if (new.frozen && !was_frozen) {
2551 put_cpu_partial(s, page, 1);
2552 stat(s, CPU_PARTIAL_FREE);
2553 }
2554 /*
2555 * The list lock was not taken therefore no list
2556 * activity can be necessary.
2557 */
2558 if (was_frozen)
2559 stat(s, FREE_FROZEN);
2560 return;
2561 }
2562
2563 if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2564 goto slab_empty;
2565
2566 /*
2567 * Objects left in the slab. If it was not on the partial list before
2568 * then add it.
2569 */
2570 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2571 if (kmem_cache_debug(s))
2572 remove_full(s, page);
2573 add_partial(n, page, DEACTIVATE_TO_TAIL);
2574 stat(s, FREE_ADD_PARTIAL);
2575 }
2576 spin_unlock_irqrestore(&n->list_lock, flags);
2577 return;
2578
2579 slab_empty:
2580 if (prior) {
2581 /*
2582 * Slab on the partial list.
2583 */
2584 remove_partial(n, page);
2585 stat(s, FREE_REMOVE_PARTIAL);
2586 } else
2587 /* Slab must be on the full list */
2588 remove_full(s, page);
2589
2590 spin_unlock_irqrestore(&n->list_lock, flags);
2591 stat(s, FREE_SLAB);
2592 discard_slab(s, page);
2593 }
2594
2595 /*
2596 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2597 * can perform fastpath freeing without additional function calls.
2598 *
2599 * The fastpath is only possible if we are freeing to the current cpu slab
2600 * of this processor. This typically the case if we have just allocated
2601 * the item before.
2602 *
2603 * If fastpath is not possible then fall back to __slab_free where we deal
2604 * with all sorts of special processing.
2605 */
2606 static __always_inline void slab_free(struct kmem_cache *s,
2607 struct page *page, void *x, unsigned long addr)
2608 {
2609 void **object = (void *)x;
2610 struct kmem_cache_cpu *c;
2611 unsigned long tid;
2612
2613 slab_free_hook(s, x);
2614
2615 redo:
2616 /*
2617 * Determine the currently cpus per cpu slab.
2618 * The cpu may change afterward. However that does not matter since
2619 * data is retrieved via this pointer. If we are on the same cpu
2620 * during the cmpxchg then the free will succedd.
2621 */
2622 preempt_disable();
2623 c = __this_cpu_ptr(s->cpu_slab);
2624
2625 tid = c->tid;
2626 preempt_enable();
2627
2628 if (likely(page == c->page)) {
2629 set_freepointer(s, object, c->freelist);
2630
2631 if (unlikely(!this_cpu_cmpxchg_double(
2632 s->cpu_slab->freelist, s->cpu_slab->tid,
2633 c->freelist, tid,
2634 object, next_tid(tid)))) {
2635
2636 note_cmpxchg_failure("slab_free", s, tid);
2637 goto redo;
2638 }
2639 stat(s, FREE_FASTPATH);
2640 } else
2641 __slab_free(s, page, x, addr);
2642
2643 }
2644
2645 void kmem_cache_free(struct kmem_cache *s, void *x)
2646 {
2647 s = cache_from_obj(s, x);
2648 if (!s)
2649 return;
2650 slab_free(s, virt_to_head_page(x), x, _RET_IP_);
2651 trace_kmem_cache_free(_RET_IP_, x);
2652 }
2653 EXPORT_SYMBOL(kmem_cache_free);
2654
2655 /*
2656 * Object placement in a slab is made very easy because we always start at
2657 * offset 0. If we tune the size of the object to the alignment then we can
2658 * get the required alignment by putting one properly sized object after
2659 * another.
2660 *
2661 * Notice that the allocation order determines the sizes of the per cpu
2662 * caches. Each processor has always one slab available for allocations.
2663 * Increasing the allocation order reduces the number of times that slabs
2664 * must be moved on and off the partial lists and is therefore a factor in
2665 * locking overhead.
2666 */
2667
2668 /*
2669 * Mininum / Maximum order of slab pages. This influences locking overhead
2670 * and slab fragmentation. A higher order reduces the number of partial slabs
2671 * and increases the number of allocations possible without having to
2672 * take the list_lock.
2673 */
2674 static int slub_min_order;
2675 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2676 static int slub_min_objects;
2677
2678 /*
2679 * Merge control. If this is set then no merging of slab caches will occur.
2680 * (Could be removed. This was introduced to pacify the merge skeptics.)
2681 */
2682 static int slub_nomerge;
2683
2684 /*
2685 * Calculate the order of allocation given an slab object size.
2686 *
2687 * The order of allocation has significant impact on performance and other
2688 * system components. Generally order 0 allocations should be preferred since
2689 * order 0 does not cause fragmentation in the page allocator. Larger objects
2690 * be problematic to put into order 0 slabs because there may be too much
2691 * unused space left. We go to a higher order if more than 1/16th of the slab
2692 * would be wasted.
2693 *
2694 * In order to reach satisfactory performance we must ensure that a minimum
2695 * number of objects is in one slab. Otherwise we may generate too much
2696 * activity on the partial lists which requires taking the list_lock. This is
2697 * less a concern for large slabs though which are rarely used.
2698 *
2699 * slub_max_order specifies the order where we begin to stop considering the
2700 * number of objects in a slab as critical. If we reach slub_max_order then
2701 * we try to keep the page order as low as possible. So we accept more waste
2702 * of space in favor of a small page order.
2703 *
2704 * Higher order allocations also allow the placement of more objects in a
2705 * slab and thereby reduce object handling overhead. If the user has
2706 * requested a higher mininum order then we start with that one instead of
2707 * the smallest order which will fit the object.
2708 */
2709 static inline int slab_order(int size, int min_objects,
2710 int max_order, int fract_leftover, int reserved)
2711 {
2712 int order;
2713 int rem;
2714 int min_order = slub_min_order;
2715
2716 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2717 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2718
2719 for (order = max(min_order,
2720 fls(min_objects * size - 1) - PAGE_SHIFT);
2721 order <= max_order; order++) {
2722
2723 unsigned long slab_size = PAGE_SIZE << order;
2724
2725 if (slab_size < min_objects * size + reserved)
2726 continue;
2727
2728 rem = (slab_size - reserved) % size;
2729
2730 if (rem <= slab_size / fract_leftover)
2731 break;
2732
2733 }
2734
2735 return order;
2736 }
2737
2738 static inline int calculate_order(int size, int reserved)
2739 {
2740 int order;
2741 int min_objects;
2742 int fraction;
2743 int max_objects;
2744
2745 /*
2746 * Attempt to find best configuration for a slab. This
2747 * works by first attempting to generate a layout with
2748 * the best configuration and backing off gradually.
2749 *
2750 * First we reduce the acceptable waste in a slab. Then
2751 * we reduce the minimum objects required in a slab.
2752 */
2753 min_objects = slub_min_objects;
2754 if (!min_objects)
2755 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2756 max_objects = order_objects(slub_max_order, size, reserved);
2757 min_objects = min(min_objects, max_objects);
2758
2759 while (min_objects > 1) {
2760 fraction = 16;
2761 while (fraction >= 4) {
2762 order = slab_order(size, min_objects,
2763 slub_max_order, fraction, reserved);
2764 if (order <= slub_max_order)
2765 return order;
2766 fraction /= 2;
2767 }
2768 min_objects--;
2769 }
2770
2771 /*
2772 * We were unable to place multiple objects in a slab. Now
2773 * lets see if we can place a single object there.
2774 */
2775 order = slab_order(size, 1, slub_max_order, 1, reserved);
2776 if (order <= slub_max_order)
2777 return order;
2778
2779 /*
2780 * Doh this slab cannot be placed using slub_max_order.
2781 */
2782 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2783 if (order < MAX_ORDER)
2784 return order;
2785 return -ENOSYS;
2786 }
2787
2788 static void
2789 init_kmem_cache_node(struct kmem_cache_node *n)
2790 {
2791 n->nr_partial = 0;
2792 spin_lock_init(&n->list_lock);
2793 INIT_LIST_HEAD(&n->partial);
2794 #ifdef CONFIG_SLUB_DEBUG
2795 atomic_long_set(&n->nr_slabs, 0);
2796 atomic_long_set(&n->total_objects, 0);
2797 INIT_LIST_HEAD(&n->full);
2798 #endif
2799 }
2800
2801 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2802 {
2803 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2804 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
2805
2806 /*
2807 * Must align to double word boundary for the double cmpxchg
2808 * instructions to work; see __pcpu_double_call_return_bool().
2809 */
2810 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2811 2 * sizeof(void *));
2812
2813 if (!s->cpu_slab)
2814 return 0;
2815
2816 init_kmem_cache_cpus(s);
2817
2818 return 1;
2819 }
2820
2821 static struct kmem_cache *kmem_cache_node;
2822
2823 /*
2824 * No kmalloc_node yet so do it by hand. We know that this is the first
2825 * slab on the node for this slabcache. There are no concurrent accesses
2826 * possible.
2827 *
2828 * Note that this function only works on the kmalloc_node_cache
2829 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2830 * memory on a fresh node that has no slab structures yet.
2831 */
2832 static void early_kmem_cache_node_alloc(int node)
2833 {
2834 struct page *page;
2835 struct kmem_cache_node *n;
2836
2837 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2838
2839 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2840
2841 BUG_ON(!page);
2842 if (page_to_nid(page) != node) {
2843 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2844 "node %d\n", node);
2845 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2846 "in order to be able to continue\n");
2847 }
2848
2849 n = page->freelist;
2850 BUG_ON(!n);
2851 page->freelist = get_freepointer(kmem_cache_node, n);
2852 page->inuse = 1;
2853 page->frozen = 0;
2854 kmem_cache_node->node[node] = n;
2855 #ifdef CONFIG_SLUB_DEBUG
2856 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2857 init_tracking(kmem_cache_node, n);
2858 #endif
2859 init_kmem_cache_node(n);
2860 inc_slabs_node(kmem_cache_node, node, page->objects);
2861
2862 add_partial(n, page, DEACTIVATE_TO_HEAD);
2863 }
2864
2865 static void free_kmem_cache_nodes(struct kmem_cache *s)
2866 {
2867 int node;
2868
2869 for_each_node_state(node, N_NORMAL_MEMORY) {
2870 struct kmem_cache_node *n = s->node[node];
2871
2872 if (n)
2873 kmem_cache_free(kmem_cache_node, n);
2874
2875 s->node[node] = NULL;
2876 }
2877 }
2878
2879 static int init_kmem_cache_nodes(struct kmem_cache *s)
2880 {
2881 int node;
2882
2883 for_each_node_state(node, N_NORMAL_MEMORY) {
2884 struct kmem_cache_node *n;
2885
2886 if (slab_state == DOWN) {
2887 early_kmem_cache_node_alloc(node);
2888 continue;
2889 }
2890 n = kmem_cache_alloc_node(kmem_cache_node,
2891 GFP_KERNEL, node);
2892
2893 if (!n) {
2894 free_kmem_cache_nodes(s);
2895 return 0;
2896 }
2897
2898 s->node[node] = n;
2899 init_kmem_cache_node(n);
2900 }
2901 return 1;
2902 }
2903
2904 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2905 {
2906 if (min < MIN_PARTIAL)
2907 min = MIN_PARTIAL;
2908 else if (min > MAX_PARTIAL)
2909 min = MAX_PARTIAL;
2910 s->min_partial = min;
2911 }
2912
2913 /*
2914 * calculate_sizes() determines the order and the distribution of data within
2915 * a slab object.
2916 */
2917 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2918 {
2919 unsigned long flags = s->flags;
2920 unsigned long size = s->object_size;
2921 int order;
2922
2923 /*
2924 * Round up object size to the next word boundary. We can only
2925 * place the free pointer at word boundaries and this determines
2926 * the possible location of the free pointer.
2927 */
2928 size = ALIGN(size, sizeof(void *));
2929
2930 #ifdef CONFIG_SLUB_DEBUG
2931 /*
2932 * Determine if we can poison the object itself. If the user of
2933 * the slab may touch the object after free or before allocation
2934 * then we should never poison the object itself.
2935 */
2936 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2937 !s->ctor)
2938 s->flags |= __OBJECT_POISON;
2939 else
2940 s->flags &= ~__OBJECT_POISON;
2941
2942
2943 /*
2944 * If we are Redzoning then check if there is some space between the
2945 * end of the object and the free pointer. If not then add an
2946 * additional word to have some bytes to store Redzone information.
2947 */
2948 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
2949 size += sizeof(void *);
2950 #endif
2951
2952 /*
2953 * With that we have determined the number of bytes in actual use
2954 * by the object. This is the potential offset to the free pointer.
2955 */
2956 s->inuse = size;
2957
2958 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2959 s->ctor)) {
2960 /*
2961 * Relocate free pointer after the object if it is not
2962 * permitted to overwrite the first word of the object on
2963 * kmem_cache_free.
2964 *
2965 * This is the case if we do RCU, have a constructor or
2966 * destructor or are poisoning the objects.
2967 */
2968 s->offset = size;
2969 size += sizeof(void *);
2970 }
2971
2972 #ifdef CONFIG_SLUB_DEBUG
2973 if (flags & SLAB_STORE_USER)
2974 /*
2975 * Need to store information about allocs and frees after
2976 * the object.
2977 */
2978 size += 2 * sizeof(struct track);
2979
2980 if (flags & SLAB_RED_ZONE)
2981 /*
2982 * Add some empty padding so that we can catch
2983 * overwrites from earlier objects rather than let
2984 * tracking information or the free pointer be
2985 * corrupted if a user writes before the start
2986 * of the object.
2987 */
2988 size += sizeof(void *);
2989 #endif
2990
2991 /*
2992 * SLUB stores one object immediately after another beginning from
2993 * offset 0. In order to align the objects we have to simply size
2994 * each object to conform to the alignment.
2995 */
2996 size = ALIGN(size, s->align);
2997 s->size = size;
2998 if (forced_order >= 0)
2999 order = forced_order;
3000 else
3001 order = calculate_order(size, s->reserved);
3002
3003 if (order < 0)
3004 return 0;
3005
3006 s->allocflags = 0;
3007 if (order)
3008 s->allocflags |= __GFP_COMP;
3009
3010 if (s->flags & SLAB_CACHE_DMA)
3011 s->allocflags |= GFP_DMA;
3012
3013 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3014 s->allocflags |= __GFP_RECLAIMABLE;
3015
3016 /*
3017 * Determine the number of objects per slab
3018 */
3019 s->oo = oo_make(order, size, s->reserved);
3020 s->min = oo_make(get_order(size), size, s->reserved);
3021 if (oo_objects(s->oo) > oo_objects(s->max))
3022 s->max = s->oo;
3023
3024 return !!oo_objects(s->oo);
3025 }
3026
3027 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3028 {
3029 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3030 s->reserved = 0;
3031
3032 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3033 s->reserved = sizeof(struct rcu_head);
3034
3035 if (!calculate_sizes(s, -1))
3036 goto error;
3037 if (disable_higher_order_debug) {
3038 /*
3039 * Disable debugging flags that store metadata if the min slab
3040 * order increased.
3041 */
3042 if (get_order(s->size) > get_order(s->object_size)) {
3043 s->flags &= ~DEBUG_METADATA_FLAGS;
3044 s->offset = 0;
3045 if (!calculate_sizes(s, -1))
3046 goto error;
3047 }
3048 }
3049
3050 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3051 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3052 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3053 /* Enable fast mode */
3054 s->flags |= __CMPXCHG_DOUBLE;
3055 #endif
3056
3057 /*
3058 * The larger the object size is, the more pages we want on the partial
3059 * list to avoid pounding the page allocator excessively.
3060 */
3061 set_min_partial(s, ilog2(s->size) / 2);
3062
3063 /*
3064 * cpu_partial determined the maximum number of objects kept in the
3065 * per cpu partial lists of a processor.
3066 *
3067 * Per cpu partial lists mainly contain slabs that just have one
3068 * object freed. If they are used for allocation then they can be
3069 * filled up again with minimal effort. The slab will never hit the
3070 * per node partial lists and therefore no locking will be required.
3071 *
3072 * This setting also determines
3073 *
3074 * A) The number of objects from per cpu partial slabs dumped to the
3075 * per node list when we reach the limit.
3076 * B) The number of objects in cpu partial slabs to extract from the
3077 * per node list when we run out of per cpu objects. We only fetch 50%
3078 * to keep some capacity around for frees.
3079 */
3080 if (!kmem_cache_has_cpu_partial(s))
3081 s->cpu_partial = 0;
3082 else if (s->size >= PAGE_SIZE)
3083 s->cpu_partial = 2;
3084 else if (s->size >= 1024)
3085 s->cpu_partial = 6;
3086 else if (s->size >= 256)
3087 s->cpu_partial = 13;
3088 else
3089 s->cpu_partial = 30;
3090
3091 #ifdef CONFIG_NUMA
3092 s->remote_node_defrag_ratio = 1000;
3093 #endif
3094 if (!init_kmem_cache_nodes(s))
3095 goto error;
3096
3097 if (alloc_kmem_cache_cpus(s))
3098 return 0;
3099
3100 free_kmem_cache_nodes(s);
3101 error:
3102 if (flags & SLAB_PANIC)
3103 panic("Cannot create slab %s size=%lu realsize=%u "
3104 "order=%u offset=%u flags=%lx\n",
3105 s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
3106 s->offset, flags);
3107 return -EINVAL;
3108 }
3109
3110 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3111 const char *text)
3112 {
3113 #ifdef CONFIG_SLUB_DEBUG
3114 void *addr = page_address(page);
3115 void *p;
3116 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3117 sizeof(long), GFP_ATOMIC);
3118 if (!map)
3119 return;
3120 slab_err(s, page, text, s->name);
3121 slab_lock(page);
3122
3123 get_map(s, page, map);
3124 for_each_object(p, s, addr, page->objects) {
3125
3126 if (!test_bit(slab_index(p, s, addr), map)) {
3127 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3128 p, p - addr);
3129 print_tracking(s, p);
3130 }
3131 }
3132 slab_unlock(page);
3133 kfree(map);
3134 #endif
3135 }
3136
3137 /*
3138 * Attempt to free all partial slabs on a node.
3139 * This is called from kmem_cache_close(). We must be the last thread
3140 * using the cache and therefore we do not need to lock anymore.
3141 */
3142 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3143 {
3144 struct page *page, *h;
3145
3146 list_for_each_entry_safe(page, h, &n->partial, lru) {
3147 if (!page->inuse) {
3148 remove_partial(n, page);
3149 discard_slab(s, page);
3150 } else {
3151 list_slab_objects(s, page,
3152 "Objects remaining in %s on kmem_cache_close()");
3153 }
3154 }
3155 }
3156
3157 /*
3158 * Release all resources used by a slab cache.
3159 */
3160 static inline int kmem_cache_close(struct kmem_cache *s)
3161 {
3162 int node;
3163
3164 flush_all(s);
3165 /* Attempt to free all objects */
3166 for_each_node_state(node, N_NORMAL_MEMORY) {
3167 struct kmem_cache_node *n = get_node(s, node);
3168
3169 free_partial(s, n);
3170 if (n->nr_partial || slabs_node(s, node))
3171 return 1;
3172 }
3173 free_percpu(s->cpu_slab);
3174 free_kmem_cache_nodes(s);
3175 return 0;
3176 }
3177
3178 int __kmem_cache_shutdown(struct kmem_cache *s)
3179 {
3180 int rc = kmem_cache_close(s);
3181
3182 if (!rc) {
3183 /*
3184 * We do the same lock strategy around sysfs_slab_add, see
3185 * __kmem_cache_create. Because this is pretty much the last
3186 * operation we do and the lock will be released shortly after
3187 * that in slab_common.c, we could just move sysfs_slab_remove
3188 * to a later point in common code. We should do that when we
3189 * have a common sysfs framework for all allocators.
3190 */
3191 mutex_unlock(&slab_mutex);
3192 sysfs_slab_remove(s);
3193 mutex_lock(&slab_mutex);
3194 }
3195
3196 return rc;
3197 }
3198
3199 /********************************************************************
3200 * Kmalloc subsystem
3201 *******************************************************************/
3202
3203 static int __init setup_slub_min_order(char *str)
3204 {
3205 get_option(&str, &slub_min_order);
3206
3207 return 1;
3208 }
3209
3210 __setup("slub_min_order=", setup_slub_min_order);
3211
3212 static int __init setup_slub_max_order(char *str)
3213 {
3214 get_option(&str, &slub_max_order);
3215 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3216
3217 return 1;
3218 }
3219
3220 __setup("slub_max_order=", setup_slub_max_order);
3221
3222 static int __init setup_slub_min_objects(char *str)
3223 {
3224 get_option(&str, &slub_min_objects);
3225
3226 return 1;
3227 }
3228
3229 __setup("slub_min_objects=", setup_slub_min_objects);
3230
3231 static int __init setup_slub_nomerge(char *str)
3232 {
3233 slub_nomerge = 1;
3234 return 1;
3235 }
3236
3237 __setup("slub_nomerge", setup_slub_nomerge);
3238
3239 void *__kmalloc(size_t size, gfp_t flags)
3240 {
3241 struct kmem_cache *s;
3242 void *ret;
3243
3244 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3245 return kmalloc_large(size, flags);
3246
3247 s = kmalloc_slab(size, flags);
3248
3249 if (unlikely(ZERO_OR_NULL_PTR(s)))
3250 return s;
3251
3252 ret = slab_alloc(s, flags, _RET_IP_);
3253
3254 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3255
3256 return ret;
3257 }
3258 EXPORT_SYMBOL(__kmalloc);
3259
3260 #ifdef CONFIG_NUMA
3261 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3262 {
3263 struct page *page;
3264 void *ptr = NULL;
3265
3266 flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
3267 page = alloc_pages_node(node, flags, get_order(size));
3268 if (page)
3269 ptr = page_address(page);
3270
3271 kmemleak_alloc(ptr, size, 1, flags);
3272 return ptr;
3273 }
3274
3275 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3276 {
3277 struct kmem_cache *s;
3278 void *ret;
3279
3280 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3281 ret = kmalloc_large_node(size, flags, node);
3282
3283 trace_kmalloc_node(_RET_IP_, ret,
3284 size, PAGE_SIZE << get_order(size),
3285 flags, node);
3286
3287 return ret;
3288 }
3289
3290 s = kmalloc_slab(size, flags);
3291
3292 if (unlikely(ZERO_OR_NULL_PTR(s)))
3293 return s;
3294
3295 ret = slab_alloc_node(s, flags, node, _RET_IP_);
3296
3297 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3298
3299 return ret;
3300 }
3301 EXPORT_SYMBOL(__kmalloc_node);
3302 #endif
3303
3304 size_t ksize(const void *object)
3305 {
3306 struct page *page;
3307
3308 if (unlikely(object == ZERO_SIZE_PTR))
3309 return 0;
3310
3311 page = virt_to_head_page(object);
3312
3313 if (unlikely(!PageSlab(page))) {
3314 WARN_ON(!PageCompound(page));
3315 return PAGE_SIZE << compound_order(page);
3316 }
3317
3318 return slab_ksize(page->slab_cache);
3319 }
3320 EXPORT_SYMBOL(ksize);
3321
3322 #ifdef CONFIG_SLUB_DEBUG
3323 bool verify_mem_not_deleted(const void *x)
3324 {
3325 struct page *page;
3326 void *object = (void *)x;
3327 unsigned long flags;
3328 bool rv;
3329
3330 if (unlikely(ZERO_OR_NULL_PTR(x)))
3331 return false;
3332
3333 local_irq_save(flags);
3334
3335 page = virt_to_head_page(x);
3336 if (unlikely(!PageSlab(page))) {
3337 /* maybe it was from stack? */
3338 rv = true;
3339 goto out_unlock;
3340 }
3341
3342 slab_lock(page);
3343 if (on_freelist(page->slab_cache, page, object)) {
3344 object_err(page->slab_cache, page, object, "Object is on free-list");
3345 rv = false;
3346 } else {
3347 rv = true;
3348 }
3349 slab_unlock(page);
3350
3351 out_unlock:
3352 local_irq_restore(flags);
3353 return rv;
3354 }
3355 EXPORT_SYMBOL(verify_mem_not_deleted);
3356 #endif
3357
3358 void kfree(const void *x)
3359 {
3360 struct page *page;
3361 void *object = (void *)x;
3362
3363 trace_kfree(_RET_IP_, x);
3364
3365 if (unlikely(ZERO_OR_NULL_PTR(x)))
3366 return;
3367
3368 page = virt_to_head_page(x);
3369 if (unlikely(!PageSlab(page))) {
3370 BUG_ON(!PageCompound(page));
3371 kmemleak_free(x);
3372 __free_memcg_kmem_pages(page, compound_order(page));
3373 return;
3374 }
3375 slab_free(page->slab_cache, page, object, _RET_IP_);
3376 }
3377 EXPORT_SYMBOL(kfree);
3378
3379 /*
3380 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3381 * the remaining slabs by the number of items in use. The slabs with the
3382 * most items in use come first. New allocations will then fill those up
3383 * and thus they can be removed from the partial lists.
3384 *
3385 * The slabs with the least items are placed last. This results in them
3386 * being allocated from last increasing the chance that the last objects
3387 * are freed in them.
3388 */
3389 int kmem_cache_shrink(struct kmem_cache *s)
3390 {
3391 int node;
3392 int i;
3393 struct kmem_cache_node *n;
3394 struct page *page;
3395 struct page *t;
3396 int objects = oo_objects(s->max);
3397 struct list_head *slabs_by_inuse =
3398 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
3399 unsigned long flags;
3400
3401 if (!slabs_by_inuse)
3402 return -ENOMEM;
3403
3404 flush_all(s);
3405 for_each_node_state(node, N_NORMAL_MEMORY) {
3406 n = get_node(s, node);
3407
3408 if (!n->nr_partial)
3409 continue;
3410
3411 for (i = 0; i < objects; i++)
3412 INIT_LIST_HEAD(slabs_by_inuse + i);
3413
3414 spin_lock_irqsave(&n->list_lock, flags);
3415
3416 /*
3417 * Build lists indexed by the items in use in each slab.
3418 *
3419 * Note that concurrent frees may occur while we hold the
3420 * list_lock. page->inuse here is the upper limit.
3421 */
3422 list_for_each_entry_safe(page, t, &n->partial, lru) {
3423 list_move(&page->lru, slabs_by_inuse + page->inuse);
3424 if (!page->inuse)
3425 n->nr_partial--;
3426 }
3427
3428 /*
3429 * Rebuild the partial list with the slabs filled up most
3430 * first and the least used slabs at the end.
3431 */
3432 for (i = objects - 1; i > 0; i--)
3433 list_splice(slabs_by_inuse + i, n->partial.prev);
3434
3435 spin_unlock_irqrestore(&n->list_lock, flags);
3436
3437 /* Release empty slabs */
3438 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3439 discard_slab(s, page);
3440 }
3441
3442 kfree(slabs_by_inuse);
3443 return 0;
3444 }
3445 EXPORT_SYMBOL(kmem_cache_shrink);
3446
3447 static int slab_mem_going_offline_callback(void *arg)
3448 {
3449 struct kmem_cache *s;
3450
3451 mutex_lock(&slab_mutex);
3452 list_for_each_entry(s, &slab_caches, list)
3453 kmem_cache_shrink(s);
3454 mutex_unlock(&slab_mutex);
3455
3456 return 0;
3457 }
3458
3459 static void slab_mem_offline_callback(void *arg)
3460 {
3461 struct kmem_cache_node *n;
3462 struct kmem_cache *s;
3463 struct memory_notify *marg = arg;
3464 int offline_node;
3465
3466 offline_node = marg->status_change_nid_normal;
3467
3468 /*
3469 * If the node still has available memory. we need kmem_cache_node
3470 * for it yet.
3471 */
3472 if (offline_node < 0)
3473 return;
3474
3475 mutex_lock(&slab_mutex);
3476 list_for_each_entry(s, &slab_caches, list) {
3477 n = get_node(s, offline_node);
3478 if (n) {
3479 /*
3480 * if n->nr_slabs > 0, slabs still exist on the node
3481 * that is going down. We were unable to free them,
3482 * and offline_pages() function shouldn't call this
3483 * callback. So, we must fail.
3484 */
3485 BUG_ON(slabs_node(s, offline_node));
3486
3487 s->node[offline_node] = NULL;
3488 kmem_cache_free(kmem_cache_node, n);
3489 }
3490 }
3491 mutex_unlock(&slab_mutex);
3492 }
3493
3494 static int slab_mem_going_online_callback(void *arg)
3495 {
3496 struct kmem_cache_node *n;
3497 struct kmem_cache *s;
3498 struct memory_notify *marg = arg;
3499 int nid = marg->status_change_nid_normal;
3500 int ret = 0;
3501
3502 /*
3503 * If the node's memory is already available, then kmem_cache_node is
3504 * already created. Nothing to do.
3505 */
3506 if (nid < 0)
3507 return 0;
3508
3509 /*
3510 * We are bringing a node online. No memory is available yet. We must
3511 * allocate a kmem_cache_node structure in order to bring the node
3512 * online.
3513 */
3514 mutex_lock(&slab_mutex);
3515 list_for_each_entry(s, &slab_caches, list) {
3516 /*
3517 * XXX: kmem_cache_alloc_node will fallback to other nodes
3518 * since memory is not yet available from the node that
3519 * is brought up.
3520 */
3521 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3522 if (!n) {
3523 ret = -ENOMEM;
3524 goto out;
3525 }
3526 init_kmem_cache_node(n);
3527 s->node[nid] = n;
3528 }
3529 out:
3530 mutex_unlock(&slab_mutex);
3531 return ret;
3532 }
3533
3534 static int slab_memory_callback(struct notifier_block *self,
3535 unsigned long action, void *arg)
3536 {
3537 int ret = 0;
3538
3539 switch (action) {
3540 case MEM_GOING_ONLINE:
3541 ret = slab_mem_going_online_callback(arg);
3542 break;
3543 case MEM_GOING_OFFLINE:
3544 ret = slab_mem_going_offline_callback(arg);
3545 break;
3546 case MEM_OFFLINE:
3547 case MEM_CANCEL_ONLINE:
3548 slab_mem_offline_callback(arg);
3549 break;
3550 case MEM_ONLINE:
3551 case MEM_CANCEL_OFFLINE:
3552 break;
3553 }
3554 if (ret)
3555 ret = notifier_from_errno(ret);
3556 else
3557 ret = NOTIFY_OK;
3558 return ret;
3559 }
3560
3561 static struct notifier_block slab_memory_callback_nb = {
3562 .notifier_call = slab_memory_callback,
3563 .priority = SLAB_CALLBACK_PRI,
3564 };
3565
3566 /********************************************************************
3567 * Basic setup of slabs
3568 *******************************************************************/
3569
3570 /*
3571 * Used for early kmem_cache structures that were allocated using
3572 * the page allocator. Allocate them properly then fix up the pointers
3573 * that may be pointing to the wrong kmem_cache structure.
3574 */
3575
3576 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3577 {
3578 int node;
3579 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3580
3581 memcpy(s, static_cache, kmem_cache->object_size);
3582
3583 /*
3584 * This runs very early, and only the boot processor is supposed to be
3585 * up. Even if it weren't true, IRQs are not up so we couldn't fire
3586 * IPIs around.
3587 */
3588 __flush_cpu_slab(s, smp_processor_id());
3589 for_each_node_state(node, N_NORMAL_MEMORY) {
3590 struct kmem_cache_node *n = get_node(s, node);
3591 struct page *p;
3592
3593 if (n) {
3594 list_for_each_entry(p, &n->partial, lru)
3595 p->slab_cache = s;
3596
3597 #ifdef CONFIG_SLUB_DEBUG
3598 list_for_each_entry(p, &n->full, lru)
3599 p->slab_cache = s;
3600 #endif
3601 }
3602 }
3603 list_add(&s->list, &slab_caches);
3604 return s;
3605 }
3606
3607 void __init kmem_cache_init(void)
3608 {
3609 static __initdata struct kmem_cache boot_kmem_cache,
3610 boot_kmem_cache_node;
3611
3612 if (debug_guardpage_minorder())
3613 slub_max_order = 0;
3614
3615 kmem_cache_node = &boot_kmem_cache_node;
3616 kmem_cache = &boot_kmem_cache;
3617
3618 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3619 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
3620
3621 register_hotmemory_notifier(&slab_memory_callback_nb);
3622
3623 /* Able to allocate the per node structures */
3624 slab_state = PARTIAL;
3625
3626 create_boot_cache(kmem_cache, "kmem_cache",
3627 offsetof(struct kmem_cache, node) +
3628 nr_node_ids * sizeof(struct kmem_cache_node *),
3629 SLAB_HWCACHE_ALIGN);
3630
3631 kmem_cache = bootstrap(&boot_kmem_cache);
3632
3633 /*
3634 * Allocate kmem_cache_node properly from the kmem_cache slab.
3635 * kmem_cache_node is separately allocated so no need to
3636 * update any list pointers.
3637 */
3638 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3639
3640 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3641 create_kmalloc_caches(0);
3642
3643 #ifdef CONFIG_SMP
3644 register_cpu_notifier(&slab_notifier);
3645 #endif
3646
3647 printk(KERN_INFO
3648 "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
3649 " CPUs=%d, Nodes=%d\n",
3650 cache_line_size(),
3651 slub_min_order, slub_max_order, slub_min_objects,
3652 nr_cpu_ids, nr_node_ids);
3653 }
3654
3655 void __init kmem_cache_init_late(void)
3656 {
3657 }
3658
3659 /*
3660 * Find a mergeable slab cache
3661 */
3662 static int slab_unmergeable(struct kmem_cache *s)
3663 {
3664 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3665 return 1;
3666
3667 if (s->ctor)
3668 return 1;
3669
3670 /*
3671 * We may have set a slab to be unmergeable during bootstrap.
3672 */
3673 if (s->refcount < 0)
3674 return 1;
3675
3676 return 0;
3677 }
3678
3679 static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
3680 size_t align, unsigned long flags, const char *name,
3681 void (*ctor)(void *))
3682 {
3683 struct kmem_cache *s;
3684
3685 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3686 return NULL;
3687
3688 if (ctor)
3689 return NULL;
3690
3691 size = ALIGN(size, sizeof(void *));
3692 align = calculate_alignment(flags, align, size);
3693 size = ALIGN(size, align);
3694 flags = kmem_cache_flags(size, flags, name, NULL);
3695
3696 list_for_each_entry(s, &slab_caches, list) {
3697 if (slab_unmergeable(s))
3698 continue;
3699
3700 if (size > s->size)
3701 continue;
3702
3703 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3704 continue;
3705 /*
3706 * Check if alignment is compatible.
3707 * Courtesy of Adrian Drzewiecki
3708 */
3709 if ((s->size & ~(align - 1)) != s->size)
3710 continue;
3711
3712 if (s->size - size >= sizeof(void *))
3713 continue;
3714
3715 if (!cache_match_memcg(s, memcg))
3716 continue;
3717
3718 return s;
3719 }
3720 return NULL;
3721 }
3722
3723 struct kmem_cache *
3724 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
3725 size_t align, unsigned long flags, void (*ctor)(void *))
3726 {
3727 struct kmem_cache *s;
3728
3729 s = find_mergeable(memcg, size, align, flags, name, ctor);
3730 if (s) {
3731 s->refcount++;
3732 /*
3733 * Adjust the object sizes so that we clear
3734 * the complete object on kzalloc.
3735 */
3736 s->object_size = max(s->object_size, (int)size);
3737 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3738
3739 if (sysfs_slab_alias(s, name)) {
3740 s->refcount--;
3741 s = NULL;
3742 }
3743 }
3744
3745 return s;
3746 }
3747
3748 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3749 {
3750 int err;
3751
3752 err = kmem_cache_open(s, flags);
3753 if (err)
3754 return err;
3755
3756 /* Mutex is not taken during early boot */
3757 if (slab_state <= UP)
3758 return 0;
3759
3760 memcg_propagate_slab_attrs(s);
3761 mutex_unlock(&slab_mutex);
3762 err = sysfs_slab_add(s);
3763 mutex_lock(&slab_mutex);
3764
3765 if (err)
3766 kmem_cache_close(s);
3767
3768 return err;
3769 }
3770
3771 #ifdef CONFIG_SMP
3772 /*
3773 * Use the cpu notifier to insure that the cpu slabs are flushed when
3774 * necessary.
3775 */
3776 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3777 unsigned long action, void *hcpu)
3778 {
3779 long cpu = (long)hcpu;
3780 struct kmem_cache *s;
3781 unsigned long flags;
3782
3783 switch (action) {
3784 case CPU_UP_CANCELED:
3785 case CPU_UP_CANCELED_FROZEN:
3786 case CPU_DEAD:
3787 case CPU_DEAD_FROZEN:
3788 mutex_lock(&slab_mutex);
3789 list_for_each_entry(s, &slab_caches, list) {
3790 local_irq_save(flags);
3791 __flush_cpu_slab(s, cpu);
3792 local_irq_restore(flags);
3793 }
3794 mutex_unlock(&slab_mutex);
3795 break;
3796 default:
3797 break;
3798 }
3799 return NOTIFY_OK;
3800 }
3801
3802 static struct notifier_block __cpuinitdata slab_notifier = {
3803 .notifier_call = slab_cpuup_callback
3804 };
3805
3806 #endif
3807
3808 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3809 {
3810 struct kmem_cache *s;
3811 void *ret;
3812
3813 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3814 return kmalloc_large(size, gfpflags);
3815
3816 s = kmalloc_slab(size, gfpflags);
3817
3818 if (unlikely(ZERO_OR_NULL_PTR(s)))
3819 return s;
3820
3821 ret = slab_alloc(s, gfpflags, caller);
3822
3823 /* Honor the call site pointer we received. */
3824 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3825
3826 return ret;
3827 }
3828
3829 #ifdef CONFIG_NUMA
3830 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3831 int node, unsigned long caller)
3832 {
3833 struct kmem_cache *s;
3834 void *ret;
3835
3836 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3837 ret = kmalloc_large_node(size, gfpflags, node);
3838
3839 trace_kmalloc_node(caller, ret,
3840 size, PAGE_SIZE << get_order(size),
3841 gfpflags, node);
3842
3843 return ret;
3844 }
3845
3846 s = kmalloc_slab(size, gfpflags);
3847
3848 if (unlikely(ZERO_OR_NULL_PTR(s)))
3849 return s;
3850
3851 ret = slab_alloc_node(s, gfpflags, node, caller);
3852
3853 /* Honor the call site pointer we received. */
3854 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3855
3856 return ret;
3857 }
3858 #endif
3859
3860 #ifdef CONFIG_SYSFS
3861 static int count_inuse(struct page *page)
3862 {
3863 return page->inuse;
3864 }
3865
3866 static int count_total(struct page *page)
3867 {
3868 return page->objects;
3869 }
3870 #endif
3871
3872 #ifdef CONFIG_SLUB_DEBUG
3873 static int validate_slab(struct kmem_cache *s, struct page *page,
3874 unsigned long *map)
3875 {
3876 void *p;
3877 void *addr = page_address(page);
3878
3879 if (!check_slab(s, page) ||
3880 !on_freelist(s, page, NULL))
3881 return 0;
3882
3883 /* Now we know that a valid freelist exists */
3884 bitmap_zero(map, page->objects);
3885
3886 get_map(s, page, map);
3887 for_each_object(p, s, addr, page->objects) {
3888 if (test_bit(slab_index(p, s, addr), map))
3889 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3890 return 0;
3891 }
3892
3893 for_each_object(p, s, addr, page->objects)
3894 if (!test_bit(slab_index(p, s, addr), map))
3895 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3896 return 0;
3897 return 1;
3898 }
3899
3900 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3901 unsigned long *map)
3902 {
3903 slab_lock(page);
3904 validate_slab(s, page, map);
3905 slab_unlock(page);
3906 }
3907
3908 static int validate_slab_node(struct kmem_cache *s,
3909 struct kmem_cache_node *n, unsigned long *map)
3910 {
3911 unsigned long count = 0;
3912 struct page *page;
3913 unsigned long flags;
3914
3915 spin_lock_irqsave(&n->list_lock, flags);
3916
3917 list_for_each_entry(page, &n->partial, lru) {
3918 validate_slab_slab(s, page, map);
3919 count++;
3920 }
3921 if (count != n->nr_partial)
3922 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3923 "counter=%ld\n", s->name, count, n->nr_partial);
3924
3925 if (!(s->flags & SLAB_STORE_USER))
3926 goto out;
3927
3928 list_for_each_entry(page, &n->full, lru) {
3929 validate_slab_slab(s, page, map);
3930 count++;
3931 }
3932 if (count != atomic_long_read(&n->nr_slabs))
3933 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3934 "counter=%ld\n", s->name, count,
3935 atomic_long_read(&n->nr_slabs));
3936
3937 out:
3938 spin_unlock_irqrestore(&n->list_lock, flags);
3939 return count;
3940 }
3941
3942 static long validate_slab_cache(struct kmem_cache *s)
3943 {
3944 int node;
3945 unsigned long count = 0;
3946 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3947 sizeof(unsigned long), GFP_KERNEL);
3948
3949 if (!map)
3950 return -ENOMEM;
3951
3952 flush_all(s);
3953 for_each_node_state(node, N_NORMAL_MEMORY) {
3954 struct kmem_cache_node *n = get_node(s, node);
3955
3956 count += validate_slab_node(s, n, map);
3957 }
3958 kfree(map);
3959 return count;
3960 }
3961 /*
3962 * Generate lists of code addresses where slabcache objects are allocated
3963 * and freed.
3964 */
3965
3966 struct location {
3967 unsigned long count;
3968 unsigned long addr;
3969 long long sum_time;
3970 long min_time;
3971 long max_time;
3972 long min_pid;
3973 long max_pid;
3974 DECLARE_BITMAP(cpus, NR_CPUS);
3975 nodemask_t nodes;
3976 };
3977
3978 struct loc_track {
3979 unsigned long max;
3980 unsigned long count;
3981 struct location *loc;
3982 };
3983
3984 static void free_loc_track(struct loc_track *t)
3985 {
3986 if (t->max)
3987 free_pages((unsigned long)t->loc,
3988 get_order(sizeof(struct location) * t->max));
3989 }
3990
3991 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3992 {
3993 struct location *l;
3994 int order;
3995
3996 order = get_order(sizeof(struct location) * max);
3997
3998 l = (void *)__get_free_pages(flags, order);
3999 if (!l)
4000 return 0;
4001
4002 if (t->count) {
4003 memcpy(l, t->loc, sizeof(struct location) * t->count);
4004 free_loc_track(t);
4005 }
4006 t->max = max;
4007 t->loc = l;
4008 return 1;
4009 }
4010
4011 static int add_location(struct loc_track *t, struct kmem_cache *s,
4012 const struct track *track)
4013 {
4014 long start, end, pos;
4015 struct location *l;
4016 unsigned long caddr;
4017 unsigned long age = jiffies - track->when;
4018
4019 start = -1;
4020 end = t->count;
4021
4022 for ( ; ; ) {
4023 pos = start + (end - start + 1) / 2;
4024
4025 /*
4026 * There is nothing at "end". If we end up there
4027 * we need to add something to before end.
4028 */
4029 if (pos == end)
4030 break;
4031
4032 caddr = t->loc[pos].addr;
4033 if (track->addr == caddr) {
4034
4035 l = &t->loc[pos];
4036 l->count++;
4037 if (track->when) {
4038 l->sum_time += age;
4039 if (age < l->min_time)
4040 l->min_time = age;
4041 if (age > l->max_time)
4042 l->max_time = age;
4043
4044 if (track->pid < l->min_pid)
4045 l->min_pid = track->pid;
4046 if (track->pid > l->max_pid)
4047 l->max_pid = track->pid;
4048
4049 cpumask_set_cpu(track->cpu,
4050 to_cpumask(l->cpus));
4051 }
4052 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4053 return 1;
4054 }
4055
4056 if (track->addr < caddr)
4057 end = pos;
4058 else
4059 start = pos;
4060 }
4061
4062 /*
4063 * Not found. Insert new tracking element.
4064 */
4065 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4066 return 0;
4067
4068 l = t->loc + pos;
4069 if (pos < t->count)
4070 memmove(l + 1, l,
4071 (t->count - pos) * sizeof(struct location));
4072 t->count++;
4073 l->count = 1;
4074 l->addr = track->addr;
4075 l->sum_time = age;
4076 l->min_time = age;
4077 l->max_time = age;
4078 l->min_pid = track->pid;
4079 l->max_pid = track->pid;
4080 cpumask_clear(to_cpumask(l->cpus));
4081 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4082 nodes_clear(l->nodes);
4083 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4084 return 1;
4085 }
4086
4087 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4088 struct page *page, enum track_item alloc,
4089 unsigned long *map)
4090 {
4091 void *addr = page_address(page);
4092 void *p;
4093
4094 bitmap_zero(map, page->objects);
4095 get_map(s, page, map);
4096
4097 for_each_object(p, s, addr, page->objects)
4098 if (!test_bit(slab_index(p, s, addr), map))
4099 add_location(t, s, get_track(s, p, alloc));
4100 }
4101
4102 static int list_locations(struct kmem_cache *s, char *buf,
4103 enum track_item alloc)
4104 {
4105 int len = 0;
4106 unsigned long i;
4107 struct loc_track t = { 0, 0, NULL };
4108 int node;
4109 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4110 sizeof(unsigned long), GFP_KERNEL);
4111
4112 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4113 GFP_TEMPORARY)) {
4114 kfree(map);
4115 return sprintf(buf, "Out of memory\n");
4116 }
4117 /* Push back cpu slabs */
4118 flush_all(s);
4119
4120 for_each_node_state(node, N_NORMAL_MEMORY) {
4121 struct kmem_cache_node *n = get_node(s, node);
4122 unsigned long flags;
4123 struct page *page;
4124
4125 if (!atomic_long_read(&n->nr_slabs))
4126 continue;
4127
4128 spin_lock_irqsave(&n->list_lock, flags);
4129 list_for_each_entry(page, &n->partial, lru)
4130 process_slab(&t, s, page, alloc, map);
4131 list_for_each_entry(page, &n->full, lru)
4132 process_slab(&t, s, page, alloc, map);
4133 spin_unlock_irqrestore(&n->list_lock, flags);
4134 }
4135
4136 for (i = 0; i < t.count; i++) {
4137 struct location *l = &t.loc[i];
4138
4139 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4140 break;
4141 len += sprintf(buf + len, "%7ld ", l->count);
4142
4143 if (l->addr)
4144 len += sprintf(buf + len, "%pS", (void *)l->addr);
4145 else
4146 len += sprintf(buf + len, "<not-available>");
4147
4148 if (l->sum_time != l->min_time) {
4149 len += sprintf(buf + len, " age=%ld/%ld/%ld",
4150 l->min_time,
4151 (long)div_u64(l->sum_time, l->count),
4152 l->max_time);
4153 } else
4154 len += sprintf(buf + len, " age=%ld",
4155 l->min_time);
4156
4157 if (l->min_pid != l->max_pid)
4158 len += sprintf(buf + len, " pid=%ld-%ld",
4159 l->min_pid, l->max_pid);
4160 else
4161 len += sprintf(buf + len, " pid=%ld",
4162 l->min_pid);
4163
4164 if (num_online_cpus() > 1 &&
4165 !cpumask_empty(to_cpumask(l->cpus)) &&
4166 len < PAGE_SIZE - 60) {
4167 len += sprintf(buf + len, " cpus=");
4168 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4169 to_cpumask(l->cpus));
4170 }
4171
4172 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4173 len < PAGE_SIZE - 60) {
4174 len += sprintf(buf + len, " nodes=");
4175 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4176 l->nodes);
4177 }
4178
4179 len += sprintf(buf + len, "\n");
4180 }
4181
4182 free_loc_track(&t);
4183 kfree(map);
4184 if (!t.count)
4185 len += sprintf(buf, "No data\n");
4186 return len;
4187 }
4188 #endif
4189
4190 #ifdef SLUB_RESILIENCY_TEST
4191 static void resiliency_test(void)
4192 {
4193 u8 *p;
4194
4195 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4196
4197 printk(KERN_ERR "SLUB resiliency testing\n");
4198 printk(KERN_ERR "-----------------------\n");
4199 printk(KERN_ERR "A. Corruption after allocation\n");
4200
4201 p = kzalloc(16, GFP_KERNEL);
4202 p[16] = 0x12;
4203 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4204 " 0x12->0x%p\n\n", p + 16);
4205
4206 validate_slab_cache(kmalloc_caches[4]);
4207
4208 /* Hmmm... The next two are dangerous */
4209 p = kzalloc(32, GFP_KERNEL);
4210 p[32 + sizeof(void *)] = 0x34;
4211 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4212 " 0x34 -> -0x%p\n", p);
4213 printk(KERN_ERR
4214 "If allocated object is overwritten then not detectable\n\n");
4215
4216 validate_slab_cache(kmalloc_caches[5]);
4217 p = kzalloc(64, GFP_KERNEL);
4218 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4219 *p = 0x56;
4220 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4221 p);
4222 printk(KERN_ERR
4223 "If allocated object is overwritten then not detectable\n\n");
4224 validate_slab_cache(kmalloc_caches[6]);
4225
4226 printk(KERN_ERR "\nB. Corruption after free\n");
4227 p = kzalloc(128, GFP_KERNEL);
4228 kfree(p);
4229 *p = 0x78;
4230 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4231 validate_slab_cache(kmalloc_caches[7]);
4232
4233 p = kzalloc(256, GFP_KERNEL);
4234 kfree(p);
4235 p[50] = 0x9a;
4236 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4237 p);
4238 validate_slab_cache(kmalloc_caches[8]);
4239
4240 p = kzalloc(512, GFP_KERNEL);
4241 kfree(p);
4242 p[512] = 0xab;
4243 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4244 validate_slab_cache(kmalloc_caches[9]);
4245 }
4246 #else
4247 #ifdef CONFIG_SYSFS
4248 static void resiliency_test(void) {};
4249 #endif
4250 #endif
4251
4252 #ifdef CONFIG_SYSFS
4253 enum slab_stat_type {
4254 SL_ALL, /* All slabs */
4255 SL_PARTIAL, /* Only partially allocated slabs */
4256 SL_CPU, /* Only slabs used for cpu caches */
4257 SL_OBJECTS, /* Determine allocated objects not slabs */
4258 SL_TOTAL /* Determine object capacity not slabs */
4259 };
4260
4261 #define SO_ALL (1 << SL_ALL)
4262 #define SO_PARTIAL (1 << SL_PARTIAL)
4263 #define SO_CPU (1 << SL_CPU)
4264 #define SO_OBJECTS (1 << SL_OBJECTS)
4265 #define SO_TOTAL (1 << SL_TOTAL)
4266
4267 static ssize_t show_slab_objects(struct kmem_cache *s,
4268 char *buf, unsigned long flags)
4269 {
4270 unsigned long total = 0;
4271 int node;
4272 int x;
4273 unsigned long *nodes;
4274 unsigned long *per_cpu;
4275
4276 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4277 if (!nodes)
4278 return -ENOMEM;
4279 per_cpu = nodes + nr_node_ids;
4280
4281 if (flags & SO_CPU) {
4282 int cpu;
4283
4284 for_each_possible_cpu(cpu) {
4285 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4286 int node;
4287 struct page *page;
4288
4289 page = ACCESS_ONCE(c->page);
4290 if (!page)
4291 continue;
4292
4293 node = page_to_nid(page);
4294 if (flags & SO_TOTAL)
4295 x = page->objects;
4296 else if (flags & SO_OBJECTS)
4297 x = page->inuse;
4298 else
4299 x = 1;
4300
4301 total += x;
4302 nodes[node] += x;
4303
4304 page = ACCESS_ONCE(c->partial);
4305 if (page) {
4306 x = page->pobjects;
4307 total += x;
4308 nodes[node] += x;
4309 }
4310
4311 per_cpu[node]++;
4312 }
4313 }
4314
4315 lock_memory_hotplug();
4316 #ifdef CONFIG_SLUB_DEBUG
4317 if (flags & SO_ALL) {
4318 for_each_node_state(node, N_NORMAL_MEMORY) {
4319 struct kmem_cache_node *n = get_node(s, node);
4320
4321 if (flags & SO_TOTAL)
4322 x = atomic_long_read(&n->total_objects);
4323 else if (flags & SO_OBJECTS)
4324 x = atomic_long_read(&n->total_objects) -
4325 count_partial(n, count_free);
4326
4327 else
4328 x = atomic_long_read(&n->nr_slabs);
4329 total += x;
4330 nodes[node] += x;
4331 }
4332
4333 } else
4334 #endif
4335 if (flags & SO_PARTIAL) {
4336 for_each_node_state(node, N_NORMAL_MEMORY) {
4337 struct kmem_cache_node *n = get_node(s, node);
4338
4339 if (flags & SO_TOTAL)
4340 x = count_partial(n, count_total);
4341 else if (flags & SO_OBJECTS)
4342 x = count_partial(n, count_inuse);
4343 else
4344 x = n->nr_partial;
4345 total += x;
4346 nodes[node] += x;
4347 }
4348 }
4349 x = sprintf(buf, "%lu", total);
4350 #ifdef CONFIG_NUMA
4351 for_each_node_state(node, N_NORMAL_MEMORY)
4352 if (nodes[node])
4353 x += sprintf(buf + x, " N%d=%lu",
4354 node, nodes[node]);
4355 #endif
4356 unlock_memory_hotplug();
4357 kfree(nodes);
4358 return x + sprintf(buf + x, "\n");
4359 }
4360
4361 #ifdef CONFIG_SLUB_DEBUG
4362 static int any_slab_objects(struct kmem_cache *s)
4363 {
4364 int node;
4365
4366 for_each_online_node(node) {
4367 struct kmem_cache_node *n = get_node(s, node);
4368
4369 if (!n)
4370 continue;
4371
4372 if (atomic_long_read(&n->total_objects))
4373 return 1;
4374 }
4375 return 0;
4376 }
4377 #endif
4378
4379 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4380 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4381
4382 struct slab_attribute {
4383 struct attribute attr;
4384 ssize_t (*show)(struct kmem_cache *s, char *buf);
4385 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4386 };
4387
4388 #define SLAB_ATTR_RO(_name) \
4389 static struct slab_attribute _name##_attr = \
4390 __ATTR(_name, 0400, _name##_show, NULL)
4391
4392 #define SLAB_ATTR(_name) \
4393 static struct slab_attribute _name##_attr = \
4394 __ATTR(_name, 0600, _name##_show, _name##_store)
4395
4396 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4397 {
4398 return sprintf(buf, "%d\n", s->size);
4399 }
4400 SLAB_ATTR_RO(slab_size);
4401
4402 static ssize_t align_show(struct kmem_cache *s, char *buf)
4403 {
4404 return sprintf(buf, "%d\n", s->align);
4405 }
4406 SLAB_ATTR_RO(align);
4407
4408 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4409 {
4410 return sprintf(buf, "%d\n", s->object_size);
4411 }
4412 SLAB_ATTR_RO(object_size);
4413
4414 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4415 {
4416 return sprintf(buf, "%d\n", oo_objects(s->oo));
4417 }
4418 SLAB_ATTR_RO(objs_per_slab);
4419
4420 static ssize_t order_store(struct kmem_cache *s,
4421 const char *buf, size_t length)
4422 {
4423 unsigned long order;
4424 int err;
4425
4426 err = strict_strtoul(buf, 10, &order);
4427 if (err)
4428 return err;
4429
4430 if (order > slub_max_order || order < slub_min_order)
4431 return -EINVAL;
4432
4433 calculate_sizes(s, order);
4434 return length;
4435 }
4436
4437 static ssize_t order_show(struct kmem_cache *s, char *buf)
4438 {
4439 return sprintf(buf, "%d\n", oo_order(s->oo));
4440 }
4441 SLAB_ATTR(order);
4442
4443 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4444 {
4445 return sprintf(buf, "%lu\n", s->min_partial);
4446 }
4447
4448 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4449 size_t length)
4450 {
4451 unsigned long min;
4452 int err;
4453
4454 err = strict_strtoul(buf, 10, &min);
4455 if (err)
4456 return err;
4457
4458 set_min_partial(s, min);
4459 return length;
4460 }
4461 SLAB_ATTR(min_partial);
4462
4463 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4464 {
4465 return sprintf(buf, "%u\n", s->cpu_partial);
4466 }
4467
4468 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4469 size_t length)
4470 {
4471 unsigned long objects;
4472 int err;
4473
4474 err = strict_strtoul(buf, 10, &objects);
4475 if (err)
4476 return err;
4477 if (objects && !kmem_cache_has_cpu_partial(s))
4478 return -EINVAL;
4479
4480 s->cpu_partial = objects;
4481 flush_all(s);
4482 return length;
4483 }
4484 SLAB_ATTR(cpu_partial);
4485
4486 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4487 {
4488 if (!s->ctor)
4489 return 0;
4490 return sprintf(buf, "%pS\n", s->ctor);
4491 }
4492 SLAB_ATTR_RO(ctor);
4493
4494 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4495 {
4496 return sprintf(buf, "%d\n", s->refcount - 1);
4497 }
4498 SLAB_ATTR_RO(aliases);
4499
4500 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4501 {
4502 return show_slab_objects(s, buf, SO_PARTIAL);
4503 }
4504 SLAB_ATTR_RO(partial);
4505
4506 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4507 {
4508 return show_slab_objects(s, buf, SO_CPU);
4509 }
4510 SLAB_ATTR_RO(cpu_slabs);
4511
4512 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4513 {
4514 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4515 }
4516 SLAB_ATTR_RO(objects);
4517
4518 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4519 {
4520 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4521 }
4522 SLAB_ATTR_RO(objects_partial);
4523
4524 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4525 {
4526 int objects = 0;
4527 int pages = 0;
4528 int cpu;
4529 int len;
4530
4531 for_each_online_cpu(cpu) {
4532 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4533
4534 if (page) {
4535 pages += page->pages;
4536 objects += page->pobjects;
4537 }
4538 }
4539
4540 len = sprintf(buf, "%d(%d)", objects, pages);
4541
4542 #ifdef CONFIG_SMP
4543 for_each_online_cpu(cpu) {
4544 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4545
4546 if (page && len < PAGE_SIZE - 20)
4547 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4548 page->pobjects, page->pages);
4549 }
4550 #endif
4551 return len + sprintf(buf + len, "\n");
4552 }
4553 SLAB_ATTR_RO(slabs_cpu_partial);
4554
4555 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4556 {
4557 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4558 }
4559
4560 static ssize_t reclaim_account_store(struct kmem_cache *s,
4561 const char *buf, size_t length)
4562 {
4563 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4564 if (buf[0] == '1')
4565 s->flags |= SLAB_RECLAIM_ACCOUNT;
4566 return length;
4567 }
4568 SLAB_ATTR(reclaim_account);
4569
4570 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4571 {
4572 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4573 }
4574 SLAB_ATTR_RO(hwcache_align);
4575
4576 #ifdef CONFIG_ZONE_DMA
4577 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4578 {
4579 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4580 }
4581 SLAB_ATTR_RO(cache_dma);
4582 #endif
4583
4584 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4585 {
4586 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4587 }
4588 SLAB_ATTR_RO(destroy_by_rcu);
4589
4590 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4591 {
4592 return sprintf(buf, "%d\n", s->reserved);
4593 }
4594 SLAB_ATTR_RO(reserved);
4595
4596 #ifdef CONFIG_SLUB_DEBUG
4597 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4598 {
4599 return show_slab_objects(s, buf, SO_ALL);
4600 }
4601 SLAB_ATTR_RO(slabs);
4602
4603 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4604 {
4605 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4606 }
4607 SLAB_ATTR_RO(total_objects);
4608
4609 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4610 {
4611 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4612 }
4613
4614 static ssize_t sanity_checks_store(struct kmem_cache *s,
4615 const char *buf, size_t length)
4616 {
4617 s->flags &= ~SLAB_DEBUG_FREE;
4618 if (buf[0] == '1') {
4619 s->flags &= ~__CMPXCHG_DOUBLE;
4620 s->flags |= SLAB_DEBUG_FREE;
4621 }
4622 return length;
4623 }
4624 SLAB_ATTR(sanity_checks);
4625
4626 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4627 {
4628 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4629 }
4630
4631 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4632 size_t length)
4633 {
4634 s->flags &= ~SLAB_TRACE;
4635 if (buf[0] == '1') {
4636 s->flags &= ~__CMPXCHG_DOUBLE;
4637 s->flags |= SLAB_TRACE;
4638 }
4639 return length;
4640 }
4641 SLAB_ATTR(trace);
4642
4643 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4644 {
4645 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4646 }
4647
4648 static ssize_t red_zone_store(struct kmem_cache *s,
4649 const char *buf, size_t length)
4650 {
4651 if (any_slab_objects(s))
4652 return -EBUSY;
4653
4654 s->flags &= ~SLAB_RED_ZONE;
4655 if (buf[0] == '1') {
4656 s->flags &= ~__CMPXCHG_DOUBLE;
4657 s->flags |= SLAB_RED_ZONE;
4658 }
4659 calculate_sizes(s, -1);
4660 return length;
4661 }
4662 SLAB_ATTR(red_zone);
4663
4664 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4665 {
4666 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4667 }
4668
4669 static ssize_t poison_store(struct kmem_cache *s,
4670 const char *buf, size_t length)
4671 {
4672 if (any_slab_objects(s))
4673 return -EBUSY;
4674
4675 s->flags &= ~SLAB_POISON;
4676 if (buf[0] == '1') {
4677 s->flags &= ~__CMPXCHG_DOUBLE;
4678 s->flags |= SLAB_POISON;
4679 }
4680 calculate_sizes(s, -1);
4681 return length;
4682 }
4683 SLAB_ATTR(poison);
4684
4685 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4686 {
4687 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4688 }
4689
4690 static ssize_t store_user_store(struct kmem_cache *s,
4691 const char *buf, size_t length)
4692 {
4693 if (any_slab_objects(s))
4694 return -EBUSY;
4695
4696 s->flags &= ~SLAB_STORE_USER;
4697 if (buf[0] == '1') {
4698 s->flags &= ~__CMPXCHG_DOUBLE;
4699 s->flags |= SLAB_STORE_USER;
4700 }
4701 calculate_sizes(s, -1);
4702 return length;
4703 }
4704 SLAB_ATTR(store_user);
4705
4706 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4707 {
4708 return 0;
4709 }
4710
4711 static ssize_t validate_store(struct kmem_cache *s,
4712 const char *buf, size_t length)
4713 {
4714 int ret = -EINVAL;
4715
4716 if (buf[0] == '1') {
4717 ret = validate_slab_cache(s);
4718 if (ret >= 0)
4719 ret = length;
4720 }
4721 return ret;
4722 }
4723 SLAB_ATTR(validate);
4724
4725 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4726 {
4727 if (!(s->flags & SLAB_STORE_USER))
4728 return -ENOSYS;
4729 return list_locations(s, buf, TRACK_ALLOC);
4730 }
4731 SLAB_ATTR_RO(alloc_calls);
4732
4733 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4734 {
4735 if (!(s->flags & SLAB_STORE_USER))
4736 return -ENOSYS;
4737 return list_locations(s, buf, TRACK_FREE);
4738 }
4739 SLAB_ATTR_RO(free_calls);
4740 #endif /* CONFIG_SLUB_DEBUG */
4741
4742 #ifdef CONFIG_FAILSLAB
4743 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4744 {
4745 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4746 }
4747
4748 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4749 size_t length)
4750 {
4751 s->flags &= ~SLAB_FAILSLAB;
4752 if (buf[0] == '1')
4753 s->flags |= SLAB_FAILSLAB;
4754 return length;
4755 }
4756 SLAB_ATTR(failslab);
4757 #endif
4758
4759 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4760 {
4761 return 0;
4762 }
4763
4764 static ssize_t shrink_store(struct kmem_cache *s,
4765 const char *buf, size_t length)
4766 {
4767 if (buf[0] == '1') {
4768 int rc = kmem_cache_shrink(s);
4769
4770 if (rc)
4771 return rc;
4772 } else
4773 return -EINVAL;
4774 return length;
4775 }
4776 SLAB_ATTR(shrink);
4777
4778 #ifdef CONFIG_NUMA
4779 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4780 {
4781 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4782 }
4783
4784 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4785 const char *buf, size_t length)
4786 {
4787 unsigned long ratio;
4788 int err;
4789
4790 err = strict_strtoul(buf, 10, &ratio);
4791 if (err)
4792 return err;
4793
4794 if (ratio <= 100)
4795 s->remote_node_defrag_ratio = ratio * 10;
4796
4797 return length;
4798 }
4799 SLAB_ATTR(remote_node_defrag_ratio);
4800 #endif
4801
4802 #ifdef CONFIG_SLUB_STATS
4803 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4804 {
4805 unsigned long sum = 0;
4806 int cpu;
4807 int len;
4808 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4809
4810 if (!data)
4811 return -ENOMEM;
4812
4813 for_each_online_cpu(cpu) {
4814 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4815
4816 data[cpu] = x;
4817 sum += x;
4818 }
4819
4820 len = sprintf(buf, "%lu", sum);
4821
4822 #ifdef CONFIG_SMP
4823 for_each_online_cpu(cpu) {
4824 if (data[cpu] && len < PAGE_SIZE - 20)
4825 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4826 }
4827 #endif
4828 kfree(data);
4829 return len + sprintf(buf + len, "\n");
4830 }
4831
4832 static void clear_stat(struct kmem_cache *s, enum stat_item si)
4833 {
4834 int cpu;
4835
4836 for_each_online_cpu(cpu)
4837 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4838 }
4839
4840 #define STAT_ATTR(si, text) \
4841 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4842 { \
4843 return show_stat(s, buf, si); \
4844 } \
4845 static ssize_t text##_store(struct kmem_cache *s, \
4846 const char *buf, size_t length) \
4847 { \
4848 if (buf[0] != '0') \
4849 return -EINVAL; \
4850 clear_stat(s, si); \
4851 return length; \
4852 } \
4853 SLAB_ATTR(text); \
4854
4855 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4856 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4857 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4858 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4859 STAT_ATTR(FREE_FROZEN, free_frozen);
4860 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4861 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4862 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4863 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4864 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4865 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
4866 STAT_ATTR(FREE_SLAB, free_slab);
4867 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4868 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4869 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4870 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4871 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4872 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4873 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
4874 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4875 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4876 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
4877 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
4878 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
4879 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
4880 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
4881 #endif
4882
4883 static struct attribute *slab_attrs[] = {
4884 &slab_size_attr.attr,
4885 &object_size_attr.attr,
4886 &objs_per_slab_attr.attr,
4887 &order_attr.attr,
4888 &min_partial_attr.attr,
4889 &cpu_partial_attr.attr,
4890 &objects_attr.attr,
4891 &objects_partial_attr.attr,
4892 &partial_attr.attr,
4893 &cpu_slabs_attr.attr,
4894 &ctor_attr.attr,
4895 &aliases_attr.attr,
4896 &align_attr.attr,
4897 &hwcache_align_attr.attr,
4898 &reclaim_account_attr.attr,
4899 &destroy_by_rcu_attr.attr,
4900 &shrink_attr.attr,
4901 &reserved_attr.attr,
4902 &slabs_cpu_partial_attr.attr,
4903 #ifdef CONFIG_SLUB_DEBUG
4904 &total_objects_attr.attr,
4905 &slabs_attr.attr,
4906 &sanity_checks_attr.attr,
4907 &trace_attr.attr,
4908 &red_zone_attr.attr,
4909 &poison_attr.attr,
4910 &store_user_attr.attr,
4911 &validate_attr.attr,
4912 &alloc_calls_attr.attr,
4913 &free_calls_attr.attr,
4914 #endif
4915 #ifdef CONFIG_ZONE_DMA
4916 &cache_dma_attr.attr,
4917 #endif
4918 #ifdef CONFIG_NUMA
4919 &remote_node_defrag_ratio_attr.attr,
4920 #endif
4921 #ifdef CONFIG_SLUB_STATS
4922 &alloc_fastpath_attr.attr,
4923 &alloc_slowpath_attr.attr,
4924 &free_fastpath_attr.attr,
4925 &free_slowpath_attr.attr,
4926 &free_frozen_attr.attr,
4927 &free_add_partial_attr.attr,
4928 &free_remove_partial_attr.attr,
4929 &alloc_from_partial_attr.attr,
4930 &alloc_slab_attr.attr,
4931 &alloc_refill_attr.attr,
4932 &alloc_node_mismatch_attr.attr,
4933 &free_slab_attr.attr,
4934 &cpuslab_flush_attr.attr,
4935 &deactivate_full_attr.attr,
4936 &deactivate_empty_attr.attr,
4937 &deactivate_to_head_attr.attr,
4938 &deactivate_to_tail_attr.attr,
4939 &deactivate_remote_frees_attr.attr,
4940 &deactivate_bypass_attr.attr,
4941 &order_fallback_attr.attr,
4942 &cmpxchg_double_fail_attr.attr,
4943 &cmpxchg_double_cpu_fail_attr.attr,
4944 &cpu_partial_alloc_attr.attr,
4945 &cpu_partial_free_attr.attr,
4946 &cpu_partial_node_attr.attr,
4947 &cpu_partial_drain_attr.attr,
4948 #endif
4949 #ifdef CONFIG_FAILSLAB
4950 &failslab_attr.attr,
4951 #endif
4952
4953 NULL
4954 };
4955
4956 static struct attribute_group slab_attr_group = {
4957 .attrs = slab_attrs,
4958 };
4959
4960 static ssize_t slab_attr_show(struct kobject *kobj,
4961 struct attribute *attr,
4962 char *buf)
4963 {
4964 struct slab_attribute *attribute;
4965 struct kmem_cache *s;
4966 int err;
4967
4968 attribute = to_slab_attr(attr);
4969 s = to_slab(kobj);
4970
4971 if (!attribute->show)
4972 return -EIO;
4973
4974 err = attribute->show(s, buf);
4975
4976 return err;
4977 }
4978
4979 static ssize_t slab_attr_store(struct kobject *kobj,
4980 struct attribute *attr,
4981 const char *buf, size_t len)
4982 {
4983 struct slab_attribute *attribute;
4984 struct kmem_cache *s;
4985 int err;
4986
4987 attribute = to_slab_attr(attr);
4988 s = to_slab(kobj);
4989
4990 if (!attribute->store)
4991 return -EIO;
4992
4993 err = attribute->store(s, buf, len);
4994 #ifdef CONFIG_MEMCG_KMEM
4995 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
4996 int i;
4997
4998 mutex_lock(&slab_mutex);
4999 if (s->max_attr_size < len)
5000 s->max_attr_size = len;
5001
5002 /*
5003 * This is a best effort propagation, so this function's return
5004 * value will be determined by the parent cache only. This is
5005 * basically because not all attributes will have a well
5006 * defined semantics for rollbacks - most of the actions will
5007 * have permanent effects.
5008 *
5009 * Returning the error value of any of the children that fail
5010 * is not 100 % defined, in the sense that users seeing the
5011 * error code won't be able to know anything about the state of
5012 * the cache.
5013 *
5014 * Only returning the error code for the parent cache at least
5015 * has well defined semantics. The cache being written to
5016 * directly either failed or succeeded, in which case we loop
5017 * through the descendants with best-effort propagation.
5018 */
5019 for_each_memcg_cache_index(i) {
5020 struct kmem_cache *c = cache_from_memcg(s, i);
5021 if (c)
5022 attribute->store(c, buf, len);
5023 }
5024 mutex_unlock(&slab_mutex);
5025 }
5026 #endif
5027 return err;
5028 }
5029
5030 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5031 {
5032 #ifdef CONFIG_MEMCG_KMEM
5033 int i;
5034 char *buffer = NULL;
5035
5036 if (!is_root_cache(s))
5037 return;
5038
5039 /*
5040 * This mean this cache had no attribute written. Therefore, no point
5041 * in copying default values around
5042 */
5043 if (!s->max_attr_size)
5044 return;
5045
5046 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5047 char mbuf[64];
5048 char *buf;
5049 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5050
5051 if (!attr || !attr->store || !attr->show)
5052 continue;
5053
5054 /*
5055 * It is really bad that we have to allocate here, so we will
5056 * do it only as a fallback. If we actually allocate, though,
5057 * we can just use the allocated buffer until the end.
5058 *
5059 * Most of the slub attributes will tend to be very small in
5060 * size, but sysfs allows buffers up to a page, so they can
5061 * theoretically happen.
5062 */
5063 if (buffer)
5064 buf = buffer;
5065 else if (s->max_attr_size < ARRAY_SIZE(mbuf))
5066 buf = mbuf;
5067 else {
5068 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5069 if (WARN_ON(!buffer))
5070 continue;
5071 buf = buffer;
5072 }
5073
5074 attr->show(s->memcg_params->root_cache, buf);
5075 attr->store(s, buf, strlen(buf));
5076 }
5077
5078 if (buffer)
5079 free_page((unsigned long)buffer);
5080 #endif
5081 }
5082
5083 static const struct sysfs_ops slab_sysfs_ops = {
5084 .show = slab_attr_show,
5085 .store = slab_attr_store,
5086 };
5087
5088 static struct kobj_type slab_ktype = {
5089 .sysfs_ops = &slab_sysfs_ops,
5090 };
5091
5092 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5093 {
5094 struct kobj_type *ktype = get_ktype(kobj);
5095
5096 if (ktype == &slab_ktype)
5097 return 1;
5098 return 0;
5099 }
5100
5101 static const struct kset_uevent_ops slab_uevent_ops = {
5102 .filter = uevent_filter,
5103 };
5104
5105 static struct kset *slab_kset;
5106
5107 #define ID_STR_LENGTH 64
5108
5109 /* Create a unique string id for a slab cache:
5110 *
5111 * Format :[flags-]size
5112 */
5113 static char *create_unique_id(struct kmem_cache *s)
5114 {
5115 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5116 char *p = name;
5117
5118 BUG_ON(!name);
5119
5120 *p++ = ':';
5121 /*
5122 * First flags affecting slabcache operations. We will only
5123 * get here for aliasable slabs so we do not need to support
5124 * too many flags. The flags here must cover all flags that
5125 * are matched during merging to guarantee that the id is
5126 * unique.
5127 */
5128 if (s->flags & SLAB_CACHE_DMA)
5129 *p++ = 'd';
5130 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5131 *p++ = 'a';
5132 if (s->flags & SLAB_DEBUG_FREE)
5133 *p++ = 'F';
5134 if (!(s->flags & SLAB_NOTRACK))
5135 *p++ = 't';
5136 if (p != name + 1)
5137 *p++ = '-';
5138 p += sprintf(p, "%07d", s->size);
5139
5140 #ifdef CONFIG_MEMCG_KMEM
5141 if (!is_root_cache(s))
5142 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
5143 #endif
5144
5145 BUG_ON(p > name + ID_STR_LENGTH - 1);
5146 return name;
5147 }
5148
5149 static int sysfs_slab_add(struct kmem_cache *s)
5150 {
5151 int err;
5152 const char *name;
5153 int unmergeable = slab_unmergeable(s);
5154
5155 if (unmergeable) {
5156 /*
5157 * Slabcache can never be merged so we can use the name proper.
5158 * This is typically the case for debug situations. In that
5159 * case we can catch duplicate names easily.
5160 */
5161 sysfs_remove_link(&slab_kset->kobj, s->name);
5162 name = s->name;
5163 } else {
5164 /*
5165 * Create a unique name for the slab as a target
5166 * for the symlinks.
5167 */
5168 name = create_unique_id(s);
5169 }
5170
5171 s->kobj.kset = slab_kset;
5172 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5173 if (err) {
5174 kobject_put(&s->kobj);
5175 return err;
5176 }
5177
5178 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5179 if (err) {
5180 kobject_del(&s->kobj);
5181 kobject_put(&s->kobj);
5182 return err;
5183 }
5184 kobject_uevent(&s->kobj, KOBJ_ADD);
5185 if (!unmergeable) {
5186 /* Setup first alias */
5187 sysfs_slab_alias(s, s->name);
5188 kfree(name);
5189 }
5190 return 0;
5191 }
5192
5193 static void sysfs_slab_remove(struct kmem_cache *s)
5194 {
5195 if (slab_state < FULL)
5196 /*
5197 * Sysfs has not been setup yet so no need to remove the
5198 * cache from sysfs.
5199 */
5200 return;
5201
5202 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5203 kobject_del(&s->kobj);
5204 kobject_put(&s->kobj);
5205 }
5206
5207 /*
5208 * Need to buffer aliases during bootup until sysfs becomes
5209 * available lest we lose that information.
5210 */
5211 struct saved_alias {
5212 struct kmem_cache *s;
5213 const char *name;
5214 struct saved_alias *next;
5215 };
5216
5217 static struct saved_alias *alias_list;
5218
5219 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5220 {
5221 struct saved_alias *al;
5222
5223 if (slab_state == FULL) {
5224 /*
5225 * If we have a leftover link then remove it.
5226 */
5227 sysfs_remove_link(&slab_kset->kobj, name);
5228 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5229 }
5230
5231 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5232 if (!al)
5233 return -ENOMEM;
5234
5235 al->s = s;
5236 al->name = name;
5237 al->next = alias_list;
5238 alias_list = al;
5239 return 0;
5240 }
5241
5242 static int __init slab_sysfs_init(void)
5243 {
5244 struct kmem_cache *s;
5245 int err;
5246
5247 mutex_lock(&slab_mutex);
5248
5249 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5250 if (!slab_kset) {
5251 mutex_unlock(&slab_mutex);
5252 printk(KERN_ERR "Cannot register slab subsystem.\n");
5253 return -ENOSYS;
5254 }
5255
5256 slab_state = FULL;
5257
5258 list_for_each_entry(s, &slab_caches, list) {
5259 err = sysfs_slab_add(s);
5260 if (err)
5261 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5262 " to sysfs\n", s->name);
5263 }
5264
5265 while (alias_list) {
5266 struct saved_alias *al = alias_list;
5267
5268 alias_list = alias_list->next;
5269 err = sysfs_slab_alias(al->s, al->name);
5270 if (err)
5271 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
5272 " %s to sysfs\n", al->name);
5273 kfree(al);
5274 }
5275
5276 mutex_unlock(&slab_mutex);
5277 resiliency_test();
5278 return 0;
5279 }
5280
5281 __initcall(slab_sysfs_init);
5282 #endif /* CONFIG_SYSFS */
5283
5284 /*
5285 * The /proc/slabinfo ABI
5286 */
5287 #ifdef CONFIG_SLABINFO
5288 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5289 {
5290 unsigned long nr_slabs = 0;
5291 unsigned long nr_objs = 0;
5292 unsigned long nr_free = 0;
5293 int node;
5294
5295 for_each_online_node(node) {
5296 struct kmem_cache_node *n = get_node(s, node);
5297
5298 if (!n)
5299 continue;
5300
5301 nr_slabs += node_nr_slabs(n);
5302 nr_objs += node_nr_objs(n);
5303 nr_free += count_partial(n, count_free);
5304 }
5305
5306 sinfo->active_objs = nr_objs - nr_free;
5307 sinfo->num_objs = nr_objs;
5308 sinfo->active_slabs = nr_slabs;
5309 sinfo->num_slabs = nr_slabs;
5310 sinfo->objects_per_slab = oo_objects(s->oo);
5311 sinfo->cache_order = oo_order(s->oo);
5312 }
5313
5314 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5315 {
5316 }
5317
5318 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5319 size_t count, loff_t *ppos)
5320 {
5321 return -EIO;
5322 }
5323 #endif /* CONFIG_SLABINFO */