]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/slub.c
slab: make size_index_elem() unsigned int
[mirror_ubuntu-jammy-kernel.git] / mm / slub.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
81819f0f
CL
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
881db7fb
CL
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 8 *
cde53535 9 * (C) 2007 SGI, Christoph Lameter
881db7fb 10 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
11 */
12
13#include <linux/mm.h>
1eb5ac64 14#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/bitops.h>
19#include <linux/slab.h>
97d06609 20#include "slab.h"
7b3c3a50 21#include <linux/proc_fs.h>
3ac38faa 22#include <linux/notifier.h>
81819f0f 23#include <linux/seq_file.h>
a79316c6 24#include <linux/kasan.h>
81819f0f
CL
25#include <linux/cpu.h>
26#include <linux/cpuset.h>
27#include <linux/mempolicy.h>
28#include <linux/ctype.h>
3ac7fe5a 29#include <linux/debugobjects.h>
81819f0f 30#include <linux/kallsyms.h>
b9049e23 31#include <linux/memory.h>
f8bd2258 32#include <linux/math64.h>
773ff60e 33#include <linux/fault-inject.h>
bfa71457 34#include <linux/stacktrace.h>
4de900b4 35#include <linux/prefetch.h>
2633d7a0 36#include <linux/memcontrol.h>
2482ddec 37#include <linux/random.h>
81819f0f 38
4a92379b
RK
39#include <trace/events/kmem.h>
40
072bb0aa
MG
41#include "internal.h"
42
81819f0f
CL
43/*
44 * Lock order:
18004c5d 45 * 1. slab_mutex (Global Mutex)
881db7fb
CL
46 * 2. node->list_lock
47 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 48 *
18004c5d 49 * slab_mutex
881db7fb 50 *
18004c5d 51 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
52 * and to synchronize major metadata changes to slab cache structures.
53 *
54 * The slab_lock is only used for debugging and on arches that do not
55 * have the ability to do a cmpxchg_double. It only protects the second
56 * double word in the page struct. Meaning
57 * A. page->freelist -> List of object free in a page
58 * B. page->counters -> Counters of objects
59 * C. page->frozen -> frozen state
60 *
61 * If a slab is frozen then it is exempt from list management. It is not
62 * on any list. The processor that froze the slab is the one who can
63 * perform list operations on the page. Other processors may put objects
64 * onto the freelist but the processor that froze the slab is the only
65 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
66 *
67 * The list_lock protects the partial and full list on each node and
68 * the partial slab counter. If taken then no new slabs may be added or
69 * removed from the lists nor make the number of partial slabs be modified.
70 * (Note that the total number of slabs is an atomic value that may be
71 * modified without taking the list lock).
72 *
73 * The list_lock is a centralized lock and thus we avoid taking it as
74 * much as possible. As long as SLUB does not have to handle partial
75 * slabs, operations can continue without any centralized lock. F.e.
76 * allocating a long series of objects that fill up slabs does not require
77 * the list lock.
81819f0f
CL
78 * Interrupts are disabled during allocation and deallocation in order to
79 * make the slab allocator safe to use in the context of an irq. In addition
80 * interrupts are disabled to ensure that the processor does not change
81 * while handling per_cpu slabs, due to kernel preemption.
82 *
83 * SLUB assigns one slab for allocation to each processor.
84 * Allocations only occur from these slabs called cpu slabs.
85 *
672bba3a
CL
86 * Slabs with free elements are kept on a partial list and during regular
87 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 88 * freed then the slab will show up again on the partial lists.
672bba3a
CL
89 * We track full slabs for debugging purposes though because otherwise we
90 * cannot scan all objects.
81819f0f
CL
91 *
92 * Slabs are freed when they become empty. Teardown and setup is
93 * minimal so we rely on the page allocators per cpu caches for
94 * fast frees and allocs.
95 *
96 * Overloading of page flags that are otherwise used for LRU management.
97 *
4b6f0750
CL
98 * PageActive The slab is frozen and exempt from list processing.
99 * This means that the slab is dedicated to a purpose
100 * such as satisfying allocations for a specific
101 * processor. Objects may be freed in the slab while
102 * it is frozen but slab_free will then skip the usual
103 * list operations. It is up to the processor holding
104 * the slab to integrate the slab into the slab lists
105 * when the slab is no longer needed.
106 *
107 * One use of this flag is to mark slabs that are
108 * used for allocations. Then such a slab becomes a cpu
109 * slab. The cpu slab may be equipped with an additional
dfb4f096 110 * freelist that allows lockless access to
894b8788
CL
111 * free objects in addition to the regular freelist
112 * that requires the slab lock.
81819f0f
CL
113 *
114 * PageError Slab requires special handling due to debug
115 * options set. This moves slab handling out of
894b8788 116 * the fast path and disables lockless freelists.
81819f0f
CL
117 */
118
af537b0a
CL
119static inline int kmem_cache_debug(struct kmem_cache *s)
120{
5577bd8a 121#ifdef CONFIG_SLUB_DEBUG
af537b0a 122 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 123#else
af537b0a 124 return 0;
5577bd8a 125#endif
af537b0a 126}
5577bd8a 127
117d54df 128void *fixup_red_left(struct kmem_cache *s, void *p)
d86bd1be
JK
129{
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
131 p += s->red_left_pad;
132
133 return p;
134}
135
345c905d
JK
136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
137{
138#ifdef CONFIG_SLUB_CPU_PARTIAL
139 return !kmem_cache_debug(s);
140#else
141 return false;
142#endif
143}
144
81819f0f
CL
145/*
146 * Issues still to be resolved:
147 *
81819f0f
CL
148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
149 *
81819f0f
CL
150 * - Variable sizing of the per node arrays
151 */
152
153/* Enable to test recovery from slab corruption on boot */
154#undef SLUB_RESILIENCY_TEST
155
b789ef51
CL
156/* Enable to log cmpxchg failures */
157#undef SLUB_DEBUG_CMPXCHG
158
2086d26a
CL
159/*
160 * Mininum number of partial slabs. These will be left on the partial
161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
162 */
76be8950 163#define MIN_PARTIAL 5
e95eed57 164
2086d26a
CL
165/*
166 * Maximum number of desirable partial slabs.
167 * The existence of more partial slabs makes kmem_cache_shrink
721ae22a 168 * sort the partial list by the number of objects in use.
2086d26a
CL
169 */
170#define MAX_PARTIAL 10
171
becfda68 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
81819f0f 173 SLAB_POISON | SLAB_STORE_USER)
672bba3a 174
149daaf3
LA
175/*
176 * These debug flags cannot use CMPXCHG because there might be consistency
177 * issues when checking or reading debug information
178 */
179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
180 SLAB_TRACE)
181
182
fa5ec8a1 183/*
3de47213
DR
184 * Debugging flags that require metadata to be stored in the slab. These get
185 * disabled when slub_debug=O is used and a cache's min order increases with
186 * metadata.
fa5ec8a1 187 */
3de47213 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 189
210b5c06
CG
190#define OO_SHIFT 16
191#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 192#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 193
81819f0f 194/* Internal SLUB flags */
d50112ed 195/* Poison object */
4fd0b46e 196#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
d50112ed 197/* Use cmpxchg_double */
4fd0b46e 198#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
81819f0f 199
02cbc874
CL
200/*
201 * Tracking user of a slab.
202 */
d6543e39 203#define TRACK_ADDRS_COUNT 16
02cbc874 204struct track {
ce71e27c 205 unsigned long addr; /* Called from address */
d6543e39
BG
206#ifdef CONFIG_STACKTRACE
207 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
208#endif
02cbc874
CL
209 int cpu; /* Was running on cpu */
210 int pid; /* Pid context */
211 unsigned long when; /* When did the operation occur */
212};
213
214enum track_item { TRACK_ALLOC, TRACK_FREE };
215
ab4d5ed5 216#ifdef CONFIG_SYSFS
81819f0f
CL
217static int sysfs_slab_add(struct kmem_cache *);
218static int sysfs_slab_alias(struct kmem_cache *, const char *);
107dab5c 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
bf5eb3de 220static void sysfs_slab_remove(struct kmem_cache *s);
81819f0f 221#else
0c710013
CL
222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
224 { return 0; }
107dab5c 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
bf5eb3de 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
81819f0f
CL
227#endif
228
4fdccdfb 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
230{
231#ifdef CONFIG_SLUB_STATS
88da03a6
CL
232 /*
233 * The rmw is racy on a preemptible kernel but this is acceptable, so
234 * avoid this_cpu_add()'s irq-disable overhead.
235 */
236 raw_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
237#endif
238}
239
81819f0f
CL
240/********************************************************************
241 * Core slab cache functions
242 *******************************************************************/
243
2482ddec
KC
244/*
245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
246 * with an XOR of the address where the pointer is held and a per-cache
247 * random number.
248 */
249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
250 unsigned long ptr_addr)
251{
252#ifdef CONFIG_SLAB_FREELIST_HARDENED
253 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
254#else
255 return ptr;
256#endif
257}
258
259/* Returns the freelist pointer recorded at location ptr_addr. */
260static inline void *freelist_dereference(const struct kmem_cache *s,
261 void *ptr_addr)
262{
263 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
264 (unsigned long)ptr_addr);
265}
266
7656c72b
CL
267static inline void *get_freepointer(struct kmem_cache *s, void *object)
268{
2482ddec 269 return freelist_dereference(s, object + s->offset);
7656c72b
CL
270}
271
0ad9500e
ED
272static void prefetch_freepointer(const struct kmem_cache *s, void *object)
273{
2482ddec
KC
274 if (object)
275 prefetch(freelist_dereference(s, object + s->offset));
0ad9500e
ED
276}
277
1393d9a1
CL
278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
279{
2482ddec 280 unsigned long freepointer_addr;
1393d9a1
CL
281 void *p;
282
922d566c
JK
283 if (!debug_pagealloc_enabled())
284 return get_freepointer(s, object);
285
2482ddec
KC
286 freepointer_addr = (unsigned long)object + s->offset;
287 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
288 return freelist_ptr(s, p, freepointer_addr);
1393d9a1
CL
289}
290
7656c72b
CL
291static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
292{
2482ddec
KC
293 unsigned long freeptr_addr = (unsigned long)object + s->offset;
294
ce6fa91b
AP
295#ifdef CONFIG_SLAB_FREELIST_HARDENED
296 BUG_ON(object == fp); /* naive detection of double free or corruption */
297#endif
298
2482ddec 299 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
7656c72b
CL
300}
301
302/* Loop over all objects in a slab */
224a88be 303#define for_each_object(__p, __s, __addr, __objects) \
d86bd1be
JK
304 for (__p = fixup_red_left(__s, __addr); \
305 __p < (__addr) + (__objects) * (__s)->size; \
306 __p += (__s)->size)
7656c72b 307
54266640 308#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
d86bd1be
JK
309 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
310 __idx <= __objects; \
311 __p += (__s)->size, __idx++)
54266640 312
7656c72b
CL
313/* Determine object index from a given position */
314static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
315{
316 return (p - addr) / s->size;
317}
318
ab9a0f19
LJ
319static inline int order_objects(int order, unsigned long size, int reserved)
320{
321 return ((PAGE_SIZE << order) - reserved) / size;
322}
323
834f3d11 324static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f19 325 unsigned long size, int reserved)
834f3d11
CL
326{
327 struct kmem_cache_order_objects x = {
ab9a0f19 328 (order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d11
CL
329 };
330
331 return x;
332}
333
334static inline int oo_order(struct kmem_cache_order_objects x)
335{
210b5c06 336 return x.x >> OO_SHIFT;
834f3d11
CL
337}
338
339static inline int oo_objects(struct kmem_cache_order_objects x)
340{
210b5c06 341 return x.x & OO_MASK;
834f3d11
CL
342}
343
881db7fb
CL
344/*
345 * Per slab locking using the pagelock
346 */
347static __always_inline void slab_lock(struct page *page)
348{
48c935ad 349 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
350 bit_spin_lock(PG_locked, &page->flags);
351}
352
353static __always_inline void slab_unlock(struct page *page)
354{
48c935ad 355 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
356 __bit_spin_unlock(PG_locked, &page->flags);
357}
358
a0320865
DH
359static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
360{
361 struct page tmp;
362 tmp.counters = counters_new;
363 /*
364 * page->counters can cover frozen/inuse/objects as well
0139aa7b
JK
365 * as page->_refcount. If we assign to ->counters directly
366 * we run the risk of losing updates to page->_refcount, so
a0320865
DH
367 * be careful and only assign to the fields we need.
368 */
369 page->frozen = tmp.frozen;
370 page->inuse = tmp.inuse;
371 page->objects = tmp.objects;
372}
373
1d07171c
CL
374/* Interrupts must be disabled (for the fallback code to work right) */
375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
376 void *freelist_old, unsigned long counters_old,
377 void *freelist_new, unsigned long counters_new,
378 const char *n)
379{
380 VM_BUG_ON(!irqs_disabled());
2565409f
HC
381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
382 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 383 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 384 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
385 freelist_old, counters_old,
386 freelist_new, counters_new))
6f6528a1 387 return true;
1d07171c
CL
388 } else
389#endif
390 {
391 slab_lock(page);
d0e0ac97
CG
392 if (page->freelist == freelist_old &&
393 page->counters == counters_old) {
1d07171c 394 page->freelist = freelist_new;
a0320865 395 set_page_slub_counters(page, counters_new);
1d07171c 396 slab_unlock(page);
6f6528a1 397 return true;
1d07171c
CL
398 }
399 slab_unlock(page);
400 }
401
402 cpu_relax();
403 stat(s, CMPXCHG_DOUBLE_FAIL);
404
405#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 406 pr_info("%s %s: cmpxchg double redo ", n, s->name);
1d07171c
CL
407#endif
408
6f6528a1 409 return false;
1d07171c
CL
410}
411
b789ef51
CL
412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
413 void *freelist_old, unsigned long counters_old,
414 void *freelist_new, unsigned long counters_new,
415 const char *n)
416{
2565409f
HC
417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
418 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 419 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 420 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
421 freelist_old, counters_old,
422 freelist_new, counters_new))
6f6528a1 423 return true;
b789ef51
CL
424 } else
425#endif
426 {
1d07171c
CL
427 unsigned long flags;
428
429 local_irq_save(flags);
881db7fb 430 slab_lock(page);
d0e0ac97
CG
431 if (page->freelist == freelist_old &&
432 page->counters == counters_old) {
b789ef51 433 page->freelist = freelist_new;
a0320865 434 set_page_slub_counters(page, counters_new);
881db7fb 435 slab_unlock(page);
1d07171c 436 local_irq_restore(flags);
6f6528a1 437 return true;
b789ef51 438 }
881db7fb 439 slab_unlock(page);
1d07171c 440 local_irq_restore(flags);
b789ef51
CL
441 }
442
443 cpu_relax();
444 stat(s, CMPXCHG_DOUBLE_FAIL);
445
446#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 447 pr_info("%s %s: cmpxchg double redo ", n, s->name);
b789ef51
CL
448#endif
449
6f6528a1 450 return false;
b789ef51
CL
451}
452
41ecc55b 453#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
454/*
455 * Determine a map of object in use on a page.
456 *
881db7fb 457 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
458 * not vanish from under us.
459 */
460static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
461{
462 void *p;
463 void *addr = page_address(page);
464
465 for (p = page->freelist; p; p = get_freepointer(s, p))
466 set_bit(slab_index(p, s, addr), map);
467}
468
d86bd1be
JK
469static inline int size_from_object(struct kmem_cache *s)
470{
471 if (s->flags & SLAB_RED_ZONE)
472 return s->size - s->red_left_pad;
473
474 return s->size;
475}
476
477static inline void *restore_red_left(struct kmem_cache *s, void *p)
478{
479 if (s->flags & SLAB_RED_ZONE)
480 p -= s->red_left_pad;
481
482 return p;
483}
484
41ecc55b
CL
485/*
486 * Debug settings:
487 */
89d3c87e 488#if defined(CONFIG_SLUB_DEBUG_ON)
d50112ed 489static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
f0630fff 490#else
d50112ed 491static slab_flags_t slub_debug;
f0630fff 492#endif
41ecc55b
CL
493
494static char *slub_debug_slabs;
fa5ec8a1 495static int disable_higher_order_debug;
41ecc55b 496
a79316c6
AR
497/*
498 * slub is about to manipulate internal object metadata. This memory lies
499 * outside the range of the allocated object, so accessing it would normally
500 * be reported by kasan as a bounds error. metadata_access_enable() is used
501 * to tell kasan that these accesses are OK.
502 */
503static inline void metadata_access_enable(void)
504{
505 kasan_disable_current();
506}
507
508static inline void metadata_access_disable(void)
509{
510 kasan_enable_current();
511}
512
81819f0f
CL
513/*
514 * Object debugging
515 */
d86bd1be
JK
516
517/* Verify that a pointer has an address that is valid within a slab page */
518static inline int check_valid_pointer(struct kmem_cache *s,
519 struct page *page, void *object)
520{
521 void *base;
522
523 if (!object)
524 return 1;
525
526 base = page_address(page);
527 object = restore_red_left(s, object);
528 if (object < base || object >= base + page->objects * s->size ||
529 (object - base) % s->size) {
530 return 0;
531 }
532
533 return 1;
534}
535
aa2efd5e
DT
536static void print_section(char *level, char *text, u8 *addr,
537 unsigned int length)
81819f0f 538{
a79316c6 539 metadata_access_enable();
aa2efd5e 540 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
ffc79d28 541 length, 1);
a79316c6 542 metadata_access_disable();
81819f0f
CL
543}
544
81819f0f
CL
545static struct track *get_track(struct kmem_cache *s, void *object,
546 enum track_item alloc)
547{
548 struct track *p;
549
550 if (s->offset)
551 p = object + s->offset + sizeof(void *);
552 else
553 p = object + s->inuse;
554
555 return p + alloc;
556}
557
558static void set_track(struct kmem_cache *s, void *object,
ce71e27c 559 enum track_item alloc, unsigned long addr)
81819f0f 560{
1a00df4a 561 struct track *p = get_track(s, object, alloc);
81819f0f 562
81819f0f 563 if (addr) {
d6543e39
BG
564#ifdef CONFIG_STACKTRACE
565 struct stack_trace trace;
566 int i;
567
568 trace.nr_entries = 0;
569 trace.max_entries = TRACK_ADDRS_COUNT;
570 trace.entries = p->addrs;
571 trace.skip = 3;
a79316c6 572 metadata_access_enable();
d6543e39 573 save_stack_trace(&trace);
a79316c6 574 metadata_access_disable();
d6543e39
BG
575
576 /* See rant in lockdep.c */
577 if (trace.nr_entries != 0 &&
578 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
579 trace.nr_entries--;
580
581 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
582 p->addrs[i] = 0;
583#endif
81819f0f
CL
584 p->addr = addr;
585 p->cpu = smp_processor_id();
88e4ccf2 586 p->pid = current->pid;
81819f0f
CL
587 p->when = jiffies;
588 } else
589 memset(p, 0, sizeof(struct track));
590}
591
81819f0f
CL
592static void init_tracking(struct kmem_cache *s, void *object)
593{
24922684
CL
594 if (!(s->flags & SLAB_STORE_USER))
595 return;
596
ce71e27c
EGM
597 set_track(s, object, TRACK_FREE, 0UL);
598 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
599}
600
86609d33 601static void print_track(const char *s, struct track *t, unsigned long pr_time)
81819f0f
CL
602{
603 if (!t->addr)
604 return;
605
f9f58285 606 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
86609d33 607 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
d6543e39
BG
608#ifdef CONFIG_STACKTRACE
609 {
610 int i;
611 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
612 if (t->addrs[i])
f9f58285 613 pr_err("\t%pS\n", (void *)t->addrs[i]);
d6543e39
BG
614 else
615 break;
616 }
617#endif
24922684
CL
618}
619
620static void print_tracking(struct kmem_cache *s, void *object)
621{
86609d33 622 unsigned long pr_time = jiffies;
24922684
CL
623 if (!(s->flags & SLAB_STORE_USER))
624 return;
625
86609d33
CP
626 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
627 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
24922684
CL
628}
629
630static void print_page_info(struct page *page)
631{
f9f58285 632 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
d0e0ac97 633 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
634
635}
636
637static void slab_bug(struct kmem_cache *s, char *fmt, ...)
638{
ecc42fbe 639 struct va_format vaf;
24922684 640 va_list args;
24922684
CL
641
642 va_start(args, fmt);
ecc42fbe
FF
643 vaf.fmt = fmt;
644 vaf.va = &args;
f9f58285 645 pr_err("=============================================================================\n");
ecc42fbe 646 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
f9f58285 647 pr_err("-----------------------------------------------------------------------------\n\n");
645df230 648
373d4d09 649 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
ecc42fbe 650 va_end(args);
81819f0f
CL
651}
652
24922684
CL
653static void slab_fix(struct kmem_cache *s, char *fmt, ...)
654{
ecc42fbe 655 struct va_format vaf;
24922684 656 va_list args;
24922684
CL
657
658 va_start(args, fmt);
ecc42fbe
FF
659 vaf.fmt = fmt;
660 vaf.va = &args;
661 pr_err("FIX %s: %pV\n", s->name, &vaf);
24922684 662 va_end(args);
24922684
CL
663}
664
665static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
666{
667 unsigned int off; /* Offset of last byte */
a973e9dd 668 u8 *addr = page_address(page);
24922684
CL
669
670 print_tracking(s, p);
671
672 print_page_info(page);
673
f9f58285
FF
674 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
675 p, p - addr, get_freepointer(s, p));
24922684 676
d86bd1be 677 if (s->flags & SLAB_RED_ZONE)
aa2efd5e
DT
678 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
679 s->red_left_pad);
d86bd1be 680 else if (p > addr + 16)
aa2efd5e 681 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
81819f0f 682
aa2efd5e
DT
683 print_section(KERN_ERR, "Object ", p,
684 min_t(unsigned long, s->object_size, PAGE_SIZE));
81819f0f 685 if (s->flags & SLAB_RED_ZONE)
aa2efd5e 686 print_section(KERN_ERR, "Redzone ", p + s->object_size,
3b0efdfa 687 s->inuse - s->object_size);
81819f0f 688
81819f0f
CL
689 if (s->offset)
690 off = s->offset + sizeof(void *);
691 else
692 off = s->inuse;
693
24922684 694 if (s->flags & SLAB_STORE_USER)
81819f0f 695 off += 2 * sizeof(struct track);
81819f0f 696
80a9201a
AP
697 off += kasan_metadata_size(s);
698
d86bd1be 699 if (off != size_from_object(s))
81819f0f 700 /* Beginning of the filler is the free pointer */
aa2efd5e
DT
701 print_section(KERN_ERR, "Padding ", p + off,
702 size_from_object(s) - off);
24922684
CL
703
704 dump_stack();
81819f0f
CL
705}
706
75c66def 707void object_err(struct kmem_cache *s, struct page *page,
81819f0f
CL
708 u8 *object, char *reason)
709{
3dc50637 710 slab_bug(s, "%s", reason);
24922684 711 print_trailer(s, page, object);
81819f0f
CL
712}
713
d0e0ac97
CG
714static void slab_err(struct kmem_cache *s, struct page *page,
715 const char *fmt, ...)
81819f0f
CL
716{
717 va_list args;
718 char buf[100];
719
24922684
CL
720 va_start(args, fmt);
721 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 722 va_end(args);
3dc50637 723 slab_bug(s, "%s", buf);
24922684 724 print_page_info(page);
81819f0f
CL
725 dump_stack();
726}
727
f7cb1933 728static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
729{
730 u8 *p = object;
731
d86bd1be
JK
732 if (s->flags & SLAB_RED_ZONE)
733 memset(p - s->red_left_pad, val, s->red_left_pad);
734
81819f0f 735 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
736 memset(p, POISON_FREE, s->object_size - 1);
737 p[s->object_size - 1] = POISON_END;
81819f0f
CL
738 }
739
740 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 741 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
742}
743
24922684
CL
744static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
745 void *from, void *to)
746{
747 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
748 memset(from, data, to - from);
749}
750
751static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
752 u8 *object, char *what,
06428780 753 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
754{
755 u8 *fault;
756 u8 *end;
757
a79316c6 758 metadata_access_enable();
79824820 759 fault = memchr_inv(start, value, bytes);
a79316c6 760 metadata_access_disable();
24922684
CL
761 if (!fault)
762 return 1;
763
764 end = start + bytes;
765 while (end > fault && end[-1] == value)
766 end--;
767
768 slab_bug(s, "%s overwritten", what);
f9f58285 769 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
24922684
CL
770 fault, end - 1, fault[0], value);
771 print_trailer(s, page, object);
772
773 restore_bytes(s, what, value, fault, end);
774 return 0;
81819f0f
CL
775}
776
81819f0f
CL
777/*
778 * Object layout:
779 *
780 * object address
781 * Bytes of the object to be managed.
782 * If the freepointer may overlay the object then the free
783 * pointer is the first word of the object.
672bba3a 784 *
81819f0f
CL
785 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
786 * 0xa5 (POISON_END)
787 *
3b0efdfa 788 * object + s->object_size
81819f0f 789 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 790 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 791 * object_size == inuse.
672bba3a 792 *
81819f0f
CL
793 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
794 * 0xcc (RED_ACTIVE) for objects in use.
795 *
796 * object + s->inuse
672bba3a
CL
797 * Meta data starts here.
798 *
81819f0f
CL
799 * A. Free pointer (if we cannot overwrite object on free)
800 * B. Tracking data for SLAB_STORE_USER
672bba3a 801 * C. Padding to reach required alignment boundary or at mininum
6446faa2 802 * one word if debugging is on to be able to detect writes
672bba3a
CL
803 * before the word boundary.
804 *
805 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
806 *
807 * object + s->size
672bba3a 808 * Nothing is used beyond s->size.
81819f0f 809 *
3b0efdfa 810 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 811 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
812 * may be used with merged slabcaches.
813 */
814
81819f0f
CL
815static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
816{
817 unsigned long off = s->inuse; /* The end of info */
818
819 if (s->offset)
820 /* Freepointer is placed after the object. */
821 off += sizeof(void *);
822
823 if (s->flags & SLAB_STORE_USER)
824 /* We also have user information there */
825 off += 2 * sizeof(struct track);
826
80a9201a
AP
827 off += kasan_metadata_size(s);
828
d86bd1be 829 if (size_from_object(s) == off)
81819f0f
CL
830 return 1;
831
24922684 832 return check_bytes_and_report(s, page, p, "Object padding",
d86bd1be 833 p + off, POISON_INUSE, size_from_object(s) - off);
81819f0f
CL
834}
835
39b26464 836/* Check the pad bytes at the end of a slab page */
81819f0f
CL
837static int slab_pad_check(struct kmem_cache *s, struct page *page)
838{
24922684
CL
839 u8 *start;
840 u8 *fault;
841 u8 *end;
5d682681 842 u8 *pad;
24922684
CL
843 int length;
844 int remainder;
81819f0f
CL
845
846 if (!(s->flags & SLAB_POISON))
847 return 1;
848
a973e9dd 849 start = page_address(page);
ab9a0f19 850 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b26464
CL
851 end = start + length;
852 remainder = length % s->size;
81819f0f
CL
853 if (!remainder)
854 return 1;
855
5d682681 856 pad = end - remainder;
a79316c6 857 metadata_access_enable();
5d682681 858 fault = memchr_inv(pad, POISON_INUSE, remainder);
a79316c6 859 metadata_access_disable();
24922684
CL
860 if (!fault)
861 return 1;
862 while (end > fault && end[-1] == POISON_INUSE)
863 end--;
864
865 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
5d682681 866 print_section(KERN_ERR, "Padding ", pad, remainder);
24922684 867
5d682681 868 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
24922684 869 return 0;
81819f0f
CL
870}
871
872static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 873 void *object, u8 val)
81819f0f
CL
874{
875 u8 *p = object;
3b0efdfa 876 u8 *endobject = object + s->object_size;
81819f0f
CL
877
878 if (s->flags & SLAB_RED_ZONE) {
d86bd1be
JK
879 if (!check_bytes_and_report(s, page, object, "Redzone",
880 object - s->red_left_pad, val, s->red_left_pad))
881 return 0;
882
24922684 883 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 884 endobject, val, s->inuse - s->object_size))
81819f0f 885 return 0;
81819f0f 886 } else {
3b0efdfa 887 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 888 check_bytes_and_report(s, page, p, "Alignment padding",
d0e0ac97
CG
889 endobject, POISON_INUSE,
890 s->inuse - s->object_size);
3adbefee 891 }
81819f0f
CL
892 }
893
894 if (s->flags & SLAB_POISON) {
f7cb1933 895 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 896 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 897 POISON_FREE, s->object_size - 1) ||
24922684 898 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 899 p + s->object_size - 1, POISON_END, 1)))
81819f0f 900 return 0;
81819f0f
CL
901 /*
902 * check_pad_bytes cleans up on its own.
903 */
904 check_pad_bytes(s, page, p);
905 }
906
f7cb1933 907 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
908 /*
909 * Object and freepointer overlap. Cannot check
910 * freepointer while object is allocated.
911 */
912 return 1;
913
914 /* Check free pointer validity */
915 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
916 object_err(s, page, p, "Freepointer corrupt");
917 /*
9f6c708e 918 * No choice but to zap it and thus lose the remainder
81819f0f 919 * of the free objects in this slab. May cause
672bba3a 920 * another error because the object count is now wrong.
81819f0f 921 */
a973e9dd 922 set_freepointer(s, p, NULL);
81819f0f
CL
923 return 0;
924 }
925 return 1;
926}
927
928static int check_slab(struct kmem_cache *s, struct page *page)
929{
39b26464
CL
930 int maxobj;
931
81819f0f
CL
932 VM_BUG_ON(!irqs_disabled());
933
934 if (!PageSlab(page)) {
24922684 935 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
936 return 0;
937 }
39b26464 938
ab9a0f19 939 maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b26464
CL
940 if (page->objects > maxobj) {
941 slab_err(s, page, "objects %u > max %u",
f6edde9c 942 page->objects, maxobj);
39b26464
CL
943 return 0;
944 }
945 if (page->inuse > page->objects) {
24922684 946 slab_err(s, page, "inuse %u > max %u",
f6edde9c 947 page->inuse, page->objects);
81819f0f
CL
948 return 0;
949 }
950 /* Slab_pad_check fixes things up after itself */
951 slab_pad_check(s, page);
952 return 1;
953}
954
955/*
672bba3a
CL
956 * Determine if a certain object on a page is on the freelist. Must hold the
957 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
958 */
959static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
960{
961 int nr = 0;
881db7fb 962 void *fp;
81819f0f 963 void *object = NULL;
f6edde9c 964 int max_objects;
81819f0f 965
881db7fb 966 fp = page->freelist;
39b26464 967 while (fp && nr <= page->objects) {
81819f0f
CL
968 if (fp == search)
969 return 1;
970 if (!check_valid_pointer(s, page, fp)) {
971 if (object) {
972 object_err(s, page, object,
973 "Freechain corrupt");
a973e9dd 974 set_freepointer(s, object, NULL);
81819f0f 975 } else {
24922684 976 slab_err(s, page, "Freepointer corrupt");
a973e9dd 977 page->freelist = NULL;
39b26464 978 page->inuse = page->objects;
24922684 979 slab_fix(s, "Freelist cleared");
81819f0f
CL
980 return 0;
981 }
982 break;
983 }
984 object = fp;
985 fp = get_freepointer(s, object);
986 nr++;
987 }
988
ab9a0f19 989 max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c06
CG
990 if (max_objects > MAX_OBJS_PER_PAGE)
991 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
992
993 if (page->objects != max_objects) {
756a025f
JP
994 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
995 page->objects, max_objects);
224a88be
CL
996 page->objects = max_objects;
997 slab_fix(s, "Number of objects adjusted.");
998 }
39b26464 999 if (page->inuse != page->objects - nr) {
756a025f
JP
1000 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1001 page->inuse, page->objects - nr);
39b26464 1002 page->inuse = page->objects - nr;
24922684 1003 slab_fix(s, "Object count adjusted.");
81819f0f
CL
1004 }
1005 return search == NULL;
1006}
1007
0121c619
CL
1008static void trace(struct kmem_cache *s, struct page *page, void *object,
1009 int alloc)
3ec09742
CL
1010{
1011 if (s->flags & SLAB_TRACE) {
f9f58285 1012 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
3ec09742
CL
1013 s->name,
1014 alloc ? "alloc" : "free",
1015 object, page->inuse,
1016 page->freelist);
1017
1018 if (!alloc)
aa2efd5e 1019 print_section(KERN_INFO, "Object ", (void *)object,
d0e0ac97 1020 s->object_size);
3ec09742
CL
1021
1022 dump_stack();
1023 }
1024}
1025
643b1138 1026/*
672bba3a 1027 * Tracking of fully allocated slabs for debugging purposes.
643b1138 1028 */
5cc6eee8
CL
1029static void add_full(struct kmem_cache *s,
1030 struct kmem_cache_node *n, struct page *page)
643b1138 1031{
5cc6eee8
CL
1032 if (!(s->flags & SLAB_STORE_USER))
1033 return;
1034
255d0884 1035 lockdep_assert_held(&n->list_lock);
643b1138 1036 list_add(&page->lru, &n->full);
643b1138
CL
1037}
1038
c65c1877 1039static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
643b1138 1040{
643b1138
CL
1041 if (!(s->flags & SLAB_STORE_USER))
1042 return;
1043
255d0884 1044 lockdep_assert_held(&n->list_lock);
643b1138 1045 list_del(&page->lru);
643b1138
CL
1046}
1047
0f389ec6
CL
1048/* Tracking of the number of slabs for debugging purposes */
1049static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1050{
1051 struct kmem_cache_node *n = get_node(s, node);
1052
1053 return atomic_long_read(&n->nr_slabs);
1054}
1055
26c02cf0
AB
1056static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1057{
1058 return atomic_long_read(&n->nr_slabs);
1059}
1060
205ab99d 1061static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1062{
1063 struct kmem_cache_node *n = get_node(s, node);
1064
1065 /*
1066 * May be called early in order to allocate a slab for the
1067 * kmem_cache_node structure. Solve the chicken-egg
1068 * dilemma by deferring the increment of the count during
1069 * bootstrap (see early_kmem_cache_node_alloc).
1070 */
338b2642 1071 if (likely(n)) {
0f389ec6 1072 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1073 atomic_long_add(objects, &n->total_objects);
1074 }
0f389ec6 1075}
205ab99d 1076static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1077{
1078 struct kmem_cache_node *n = get_node(s, node);
1079
1080 atomic_long_dec(&n->nr_slabs);
205ab99d 1081 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1082}
1083
1084/* Object debug checks for alloc/free paths */
3ec09742
CL
1085static void setup_object_debug(struct kmem_cache *s, struct page *page,
1086 void *object)
1087{
1088 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1089 return;
1090
f7cb1933 1091 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1092 init_tracking(s, object);
1093}
1094
becfda68 1095static inline int alloc_consistency_checks(struct kmem_cache *s,
d0e0ac97 1096 struct page *page,
ce71e27c 1097 void *object, unsigned long addr)
81819f0f
CL
1098{
1099 if (!check_slab(s, page))
becfda68 1100 return 0;
81819f0f 1101
81819f0f
CL
1102 if (!check_valid_pointer(s, page, object)) {
1103 object_err(s, page, object, "Freelist Pointer check fails");
becfda68 1104 return 0;
81819f0f
CL
1105 }
1106
f7cb1933 1107 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
becfda68
LA
1108 return 0;
1109
1110 return 1;
1111}
1112
1113static noinline int alloc_debug_processing(struct kmem_cache *s,
1114 struct page *page,
1115 void *object, unsigned long addr)
1116{
1117 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1118 if (!alloc_consistency_checks(s, page, object, addr))
1119 goto bad;
1120 }
81819f0f 1121
3ec09742
CL
1122 /* Success perform special debug activities for allocs */
1123 if (s->flags & SLAB_STORE_USER)
1124 set_track(s, object, TRACK_ALLOC, addr);
1125 trace(s, page, object, 1);
f7cb1933 1126 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1127 return 1;
3ec09742 1128
81819f0f
CL
1129bad:
1130 if (PageSlab(page)) {
1131 /*
1132 * If this is a slab page then lets do the best we can
1133 * to avoid issues in the future. Marking all objects
672bba3a 1134 * as used avoids touching the remaining objects.
81819f0f 1135 */
24922684 1136 slab_fix(s, "Marking all objects used");
39b26464 1137 page->inuse = page->objects;
a973e9dd 1138 page->freelist = NULL;
81819f0f
CL
1139 }
1140 return 0;
1141}
1142
becfda68
LA
1143static inline int free_consistency_checks(struct kmem_cache *s,
1144 struct page *page, void *object, unsigned long addr)
81819f0f 1145{
81819f0f 1146 if (!check_valid_pointer(s, page, object)) {
70d71228 1147 slab_err(s, page, "Invalid object pointer 0x%p", object);
becfda68 1148 return 0;
81819f0f
CL
1149 }
1150
1151 if (on_freelist(s, page, object)) {
24922684 1152 object_err(s, page, object, "Object already free");
becfda68 1153 return 0;
81819f0f
CL
1154 }
1155
f7cb1933 1156 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
becfda68 1157 return 0;
81819f0f 1158
1b4f59e3 1159 if (unlikely(s != page->slab_cache)) {
3adbefee 1160 if (!PageSlab(page)) {
756a025f
JP
1161 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1162 object);
1b4f59e3 1163 } else if (!page->slab_cache) {
f9f58285
FF
1164 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1165 object);
70d71228 1166 dump_stack();
06428780 1167 } else
24922684
CL
1168 object_err(s, page, object,
1169 "page slab pointer corrupt.");
becfda68
LA
1170 return 0;
1171 }
1172 return 1;
1173}
1174
1175/* Supports checking bulk free of a constructed freelist */
1176static noinline int free_debug_processing(
1177 struct kmem_cache *s, struct page *page,
1178 void *head, void *tail, int bulk_cnt,
1179 unsigned long addr)
1180{
1181 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1182 void *object = head;
1183 int cnt = 0;
1184 unsigned long uninitialized_var(flags);
1185 int ret = 0;
1186
1187 spin_lock_irqsave(&n->list_lock, flags);
1188 slab_lock(page);
1189
1190 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191 if (!check_slab(s, page))
1192 goto out;
1193 }
1194
1195next_object:
1196 cnt++;
1197
1198 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1199 if (!free_consistency_checks(s, page, object, addr))
1200 goto out;
81819f0f 1201 }
3ec09742 1202
3ec09742
CL
1203 if (s->flags & SLAB_STORE_USER)
1204 set_track(s, object, TRACK_FREE, addr);
1205 trace(s, page, object, 0);
81084651 1206 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
f7cb1933 1207 init_object(s, object, SLUB_RED_INACTIVE);
81084651
JDB
1208
1209 /* Reached end of constructed freelist yet? */
1210 if (object != tail) {
1211 object = get_freepointer(s, object);
1212 goto next_object;
1213 }
804aa132
LA
1214 ret = 1;
1215
5c2e4bbb 1216out:
81084651
JDB
1217 if (cnt != bulk_cnt)
1218 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1219 bulk_cnt, cnt);
1220
881db7fb 1221 slab_unlock(page);
282acb43 1222 spin_unlock_irqrestore(&n->list_lock, flags);
804aa132
LA
1223 if (!ret)
1224 slab_fix(s, "Object at 0x%p not freed", object);
1225 return ret;
81819f0f
CL
1226}
1227
41ecc55b
CL
1228static int __init setup_slub_debug(char *str)
1229{
f0630fff
CL
1230 slub_debug = DEBUG_DEFAULT_FLAGS;
1231 if (*str++ != '=' || !*str)
1232 /*
1233 * No options specified. Switch on full debugging.
1234 */
1235 goto out;
1236
1237 if (*str == ',')
1238 /*
1239 * No options but restriction on slabs. This means full
1240 * debugging for slabs matching a pattern.
1241 */
1242 goto check_slabs;
1243
1244 slub_debug = 0;
1245 if (*str == '-')
1246 /*
1247 * Switch off all debugging measures.
1248 */
1249 goto out;
1250
1251 /*
1252 * Determine which debug features should be switched on
1253 */
06428780 1254 for (; *str && *str != ','; str++) {
f0630fff
CL
1255 switch (tolower(*str)) {
1256 case 'f':
becfda68 1257 slub_debug |= SLAB_CONSISTENCY_CHECKS;
f0630fff
CL
1258 break;
1259 case 'z':
1260 slub_debug |= SLAB_RED_ZONE;
1261 break;
1262 case 'p':
1263 slub_debug |= SLAB_POISON;
1264 break;
1265 case 'u':
1266 slub_debug |= SLAB_STORE_USER;
1267 break;
1268 case 't':
1269 slub_debug |= SLAB_TRACE;
1270 break;
4c13dd3b
DM
1271 case 'a':
1272 slub_debug |= SLAB_FAILSLAB;
1273 break;
08303a73
CA
1274 case 'o':
1275 /*
1276 * Avoid enabling debugging on caches if its minimum
1277 * order would increase as a result.
1278 */
1279 disable_higher_order_debug = 1;
1280 break;
f0630fff 1281 default:
f9f58285
FF
1282 pr_err("slub_debug option '%c' unknown. skipped\n",
1283 *str);
f0630fff 1284 }
41ecc55b
CL
1285 }
1286
f0630fff 1287check_slabs:
41ecc55b
CL
1288 if (*str == ',')
1289 slub_debug_slabs = str + 1;
f0630fff 1290out:
41ecc55b
CL
1291 return 1;
1292}
1293
1294__setup("slub_debug", setup_slub_debug);
1295
d50112ed
AD
1296slab_flags_t kmem_cache_flags(unsigned long object_size,
1297 slab_flags_t flags, const char *name,
51cc5068 1298 void (*ctor)(void *))
41ecc55b
CL
1299{
1300 /*
e153362a 1301 * Enable debugging if selected on the kernel commandline.
41ecc55b 1302 */
c6f58d9b
CL
1303 if (slub_debug && (!slub_debug_slabs || (name &&
1304 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
3de47213 1305 flags |= slub_debug;
ba0268a8
CL
1306
1307 return flags;
41ecc55b 1308}
b4a64718 1309#else /* !CONFIG_SLUB_DEBUG */
3ec09742
CL
1310static inline void setup_object_debug(struct kmem_cache *s,
1311 struct page *page, void *object) {}
41ecc55b 1312
3ec09742 1313static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1314 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1315
282acb43 1316static inline int free_debug_processing(
81084651
JDB
1317 struct kmem_cache *s, struct page *page,
1318 void *head, void *tail, int bulk_cnt,
282acb43 1319 unsigned long addr) { return 0; }
41ecc55b 1320
41ecc55b
CL
1321static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1322 { return 1; }
1323static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1324 void *object, u8 val) { return 1; }
5cc6eee8
CL
1325static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1326 struct page *page) {}
c65c1877
PZ
1327static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1328 struct page *page) {}
d50112ed
AD
1329slab_flags_t kmem_cache_flags(unsigned long object_size,
1330 slab_flags_t flags, const char *name,
51cc5068 1331 void (*ctor)(void *))
ba0268a8
CL
1332{
1333 return flags;
1334}
41ecc55b 1335#define slub_debug 0
0f389ec6 1336
fdaa45e9
IM
1337#define disable_higher_order_debug 0
1338
0f389ec6
CL
1339static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1340 { return 0; }
26c02cf0
AB
1341static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1342 { return 0; }
205ab99d
CL
1343static inline void inc_slabs_node(struct kmem_cache *s, int node,
1344 int objects) {}
1345static inline void dec_slabs_node(struct kmem_cache *s, int node,
1346 int objects) {}
7d550c56 1347
02e72cc6
AR
1348#endif /* CONFIG_SLUB_DEBUG */
1349
1350/*
1351 * Hooks for other subsystems that check memory allocations. In a typical
1352 * production configuration these hooks all should produce no code at all.
1353 */
d56791b3
RB
1354static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1355{
1356 kmemleak_alloc(ptr, size, 1, flags);
505f5dcb 1357 kasan_kmalloc_large(ptr, size, flags);
d56791b3
RB
1358}
1359
ee3ce779 1360static __always_inline void kfree_hook(void *x)
d56791b3
RB
1361{
1362 kmemleak_free(x);
ee3ce779 1363 kasan_kfree_large(x, _RET_IP_);
d56791b3
RB
1364}
1365
ee3ce779 1366static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
d56791b3 1367{
80a9201a
AP
1368 void *freeptr;
1369
d56791b3 1370 kmemleak_free_recursive(x, s->flags);
7d550c56 1371
02e72cc6
AR
1372 /*
1373 * Trouble is that we may no longer disable interrupts in the fast path
1374 * So in order to make the debug calls that expect irqs to be
1375 * disabled we need to disable interrupts temporarily.
1376 */
4675ff05 1377#ifdef CONFIG_LOCKDEP
02e72cc6
AR
1378 {
1379 unsigned long flags;
1380
1381 local_irq_save(flags);
02e72cc6
AR
1382 debug_check_no_locks_freed(x, s->object_size);
1383 local_irq_restore(flags);
1384 }
1385#endif
1386 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1387 debug_check_no_obj_freed(x, s->object_size);
0316bec2 1388
80a9201a
AP
1389 freeptr = get_freepointer(s, x);
1390 /*
1391 * kasan_slab_free() may put x into memory quarantine, delaying its
1392 * reuse. In this case the object's freelist pointer is changed.
1393 */
ee3ce779 1394 kasan_slab_free(s, x, _RET_IP_);
80a9201a 1395 return freeptr;
02e72cc6 1396}
205ab99d 1397
81084651
JDB
1398static inline void slab_free_freelist_hook(struct kmem_cache *s,
1399 void *head, void *tail)
1400{
1401/*
1402 * Compiler cannot detect this function can be removed if slab_free_hook()
1403 * evaluates to nothing. Thus, catch all relevant config debug options here.
1404 */
4675ff05 1405#if defined(CONFIG_LOCKDEP) || \
81084651
JDB
1406 defined(CONFIG_DEBUG_KMEMLEAK) || \
1407 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1408 defined(CONFIG_KASAN)
1409
1410 void *object = head;
1411 void *tail_obj = tail ? : head;
80a9201a 1412 void *freeptr;
81084651
JDB
1413
1414 do {
80a9201a
AP
1415 freeptr = slab_free_hook(s, object);
1416 } while ((object != tail_obj) && (object = freeptr));
81084651
JDB
1417#endif
1418}
1419
588f8ba9
TG
1420static void setup_object(struct kmem_cache *s, struct page *page,
1421 void *object)
1422{
1423 setup_object_debug(s, page, object);
b3cbd9bf 1424 kasan_init_slab_obj(s, object);
588f8ba9
TG
1425 if (unlikely(s->ctor)) {
1426 kasan_unpoison_object_data(s, object);
1427 s->ctor(object);
1428 kasan_poison_object_data(s, object);
1429 }
1430}
1431
81819f0f
CL
1432/*
1433 * Slab allocation and freeing
1434 */
5dfb4175
VD
1435static inline struct page *alloc_slab_page(struct kmem_cache *s,
1436 gfp_t flags, int node, struct kmem_cache_order_objects oo)
65c3376a 1437{
5dfb4175 1438 struct page *page;
65c3376a
CL
1439 int order = oo_order(oo);
1440
2154a336 1441 if (node == NUMA_NO_NODE)
5dfb4175 1442 page = alloc_pages(flags, order);
65c3376a 1443 else
96db800f 1444 page = __alloc_pages_node(node, flags, order);
5dfb4175 1445
f3ccb2c4
VD
1446 if (page && memcg_charge_slab(page, flags, order, s)) {
1447 __free_pages(page, order);
1448 page = NULL;
1449 }
5dfb4175
VD
1450
1451 return page;
65c3376a
CL
1452}
1453
210e7a43
TG
1454#ifdef CONFIG_SLAB_FREELIST_RANDOM
1455/* Pre-initialize the random sequence cache */
1456static int init_cache_random_seq(struct kmem_cache *s)
1457{
1458 int err;
1459 unsigned long i, count = oo_objects(s->oo);
1460
a810007a
SR
1461 /* Bailout if already initialised */
1462 if (s->random_seq)
1463 return 0;
1464
210e7a43
TG
1465 err = cache_random_seq_create(s, count, GFP_KERNEL);
1466 if (err) {
1467 pr_err("SLUB: Unable to initialize free list for %s\n",
1468 s->name);
1469 return err;
1470 }
1471
1472 /* Transform to an offset on the set of pages */
1473 if (s->random_seq) {
1474 for (i = 0; i < count; i++)
1475 s->random_seq[i] *= s->size;
1476 }
1477 return 0;
1478}
1479
1480/* Initialize each random sequence freelist per cache */
1481static void __init init_freelist_randomization(void)
1482{
1483 struct kmem_cache *s;
1484
1485 mutex_lock(&slab_mutex);
1486
1487 list_for_each_entry(s, &slab_caches, list)
1488 init_cache_random_seq(s);
1489
1490 mutex_unlock(&slab_mutex);
1491}
1492
1493/* Get the next entry on the pre-computed freelist randomized */
1494static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1495 unsigned long *pos, void *start,
1496 unsigned long page_limit,
1497 unsigned long freelist_count)
1498{
1499 unsigned int idx;
1500
1501 /*
1502 * If the target page allocation failed, the number of objects on the
1503 * page might be smaller than the usual size defined by the cache.
1504 */
1505 do {
1506 idx = s->random_seq[*pos];
1507 *pos += 1;
1508 if (*pos >= freelist_count)
1509 *pos = 0;
1510 } while (unlikely(idx >= page_limit));
1511
1512 return (char *)start + idx;
1513}
1514
1515/* Shuffle the single linked freelist based on a random pre-computed sequence */
1516static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1517{
1518 void *start;
1519 void *cur;
1520 void *next;
1521 unsigned long idx, pos, page_limit, freelist_count;
1522
1523 if (page->objects < 2 || !s->random_seq)
1524 return false;
1525
1526 freelist_count = oo_objects(s->oo);
1527 pos = get_random_int() % freelist_count;
1528
1529 page_limit = page->objects * s->size;
1530 start = fixup_red_left(s, page_address(page));
1531
1532 /* First entry is used as the base of the freelist */
1533 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1534 freelist_count);
1535 page->freelist = cur;
1536
1537 for (idx = 1; idx < page->objects; idx++) {
1538 setup_object(s, page, cur);
1539 next = next_freelist_entry(s, page, &pos, start, page_limit,
1540 freelist_count);
1541 set_freepointer(s, cur, next);
1542 cur = next;
1543 }
1544 setup_object(s, page, cur);
1545 set_freepointer(s, cur, NULL);
1546
1547 return true;
1548}
1549#else
1550static inline int init_cache_random_seq(struct kmem_cache *s)
1551{
1552 return 0;
1553}
1554static inline void init_freelist_randomization(void) { }
1555static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1556{
1557 return false;
1558}
1559#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1560
81819f0f
CL
1561static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1562{
06428780 1563 struct page *page;
834f3d11 1564 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1565 gfp_t alloc_gfp;
588f8ba9
TG
1566 void *start, *p;
1567 int idx, order;
210e7a43 1568 bool shuffle;
81819f0f 1569
7e0528da
CL
1570 flags &= gfp_allowed_mask;
1571
d0164adc 1572 if (gfpflags_allow_blocking(flags))
7e0528da
CL
1573 local_irq_enable();
1574
b7a49f0d 1575 flags |= s->allocflags;
e12ba74d 1576
ba52270d
PE
1577 /*
1578 * Let the initial higher-order allocation fail under memory pressure
1579 * so we fall-back to the minimum order allocation.
1580 */
1581 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
d0164adc 1582 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
444eb2a4 1583 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
ba52270d 1584
5dfb4175 1585 page = alloc_slab_page(s, alloc_gfp, node, oo);
65c3376a
CL
1586 if (unlikely(!page)) {
1587 oo = s->min;
80c3a998 1588 alloc_gfp = flags;
65c3376a
CL
1589 /*
1590 * Allocation may have failed due to fragmentation.
1591 * Try a lower order alloc if possible
1592 */
5dfb4175 1593 page = alloc_slab_page(s, alloc_gfp, node, oo);
588f8ba9
TG
1594 if (unlikely(!page))
1595 goto out;
1596 stat(s, ORDER_FALLBACK);
65c3376a 1597 }
5a896d9e 1598
834f3d11 1599 page->objects = oo_objects(oo);
81819f0f 1600
1f458cbf 1601 order = compound_order(page);
1b4f59e3 1602 page->slab_cache = s;
c03f94cc 1603 __SetPageSlab(page);
2f064f34 1604 if (page_is_pfmemalloc(page))
072bb0aa 1605 SetPageSlabPfmemalloc(page);
81819f0f
CL
1606
1607 start = page_address(page);
81819f0f
CL
1608
1609 if (unlikely(s->flags & SLAB_POISON))
1f458cbf 1610 memset(start, POISON_INUSE, PAGE_SIZE << order);
81819f0f 1611
0316bec2
AR
1612 kasan_poison_slab(page);
1613
210e7a43
TG
1614 shuffle = shuffle_freelist(s, page);
1615
1616 if (!shuffle) {
1617 for_each_object_idx(p, idx, s, start, page->objects) {
1618 setup_object(s, page, p);
1619 if (likely(idx < page->objects))
1620 set_freepointer(s, p, p + s->size);
1621 else
1622 set_freepointer(s, p, NULL);
1623 }
1624 page->freelist = fixup_red_left(s, start);
81819f0f 1625 }
81819f0f 1626
e6e82ea1 1627 page->inuse = page->objects;
8cb0a506 1628 page->frozen = 1;
588f8ba9 1629
81819f0f 1630out:
d0164adc 1631 if (gfpflags_allow_blocking(flags))
588f8ba9
TG
1632 local_irq_disable();
1633 if (!page)
1634 return NULL;
1635
7779f212 1636 mod_lruvec_page_state(page,
588f8ba9
TG
1637 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1638 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1639 1 << oo_order(oo));
1640
1641 inc_slabs_node(s, page_to_nid(page), page->objects);
1642
81819f0f
CL
1643 return page;
1644}
1645
588f8ba9
TG
1646static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1647{
1648 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
bacdcb34 1649 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
72baeef0
MH
1650 flags &= ~GFP_SLAB_BUG_MASK;
1651 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1652 invalid_mask, &invalid_mask, flags, &flags);
65b9de75 1653 dump_stack();
588f8ba9
TG
1654 }
1655
1656 return allocate_slab(s,
1657 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1658}
1659
81819f0f
CL
1660static void __free_slab(struct kmem_cache *s, struct page *page)
1661{
834f3d11
CL
1662 int order = compound_order(page);
1663 int pages = 1 << order;
81819f0f 1664
becfda68 1665 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
81819f0f
CL
1666 void *p;
1667
1668 slab_pad_check(s, page);
224a88be
CL
1669 for_each_object(p, s, page_address(page),
1670 page->objects)
f7cb1933 1671 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1672 }
1673
7779f212 1674 mod_lruvec_page_state(page,
81819f0f
CL
1675 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1676 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1677 -pages);
81819f0f 1678
072bb0aa 1679 __ClearPageSlabPfmemalloc(page);
49bd5221 1680 __ClearPageSlab(page);
1f458cbf 1681
22b751c3 1682 page_mapcount_reset(page);
1eb5ac64
NP
1683 if (current->reclaim_state)
1684 current->reclaim_state->reclaimed_slab += pages;
27ee57c9
VD
1685 memcg_uncharge_slab(page, order, s);
1686 __free_pages(page, order);
81819f0f
CL
1687}
1688
da9a638c
LJ
1689#define need_reserve_slab_rcu \
1690 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1691
81819f0f
CL
1692static void rcu_free_slab(struct rcu_head *h)
1693{
1694 struct page *page;
1695
da9a638c
LJ
1696 if (need_reserve_slab_rcu)
1697 page = virt_to_head_page(h);
1698 else
1699 page = container_of((struct list_head *)h, struct page, lru);
1700
1b4f59e3 1701 __free_slab(page->slab_cache, page);
81819f0f
CL
1702}
1703
1704static void free_slab(struct kmem_cache *s, struct page *page)
1705{
5f0d5a3a 1706 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
da9a638c
LJ
1707 struct rcu_head *head;
1708
1709 if (need_reserve_slab_rcu) {
1710 int order = compound_order(page);
1711 int offset = (PAGE_SIZE << order) - s->reserved;
1712
1713 VM_BUG_ON(s->reserved != sizeof(*head));
1714 head = page_address(page) + offset;
1715 } else {
bc4f610d 1716 head = &page->rcu_head;
da9a638c 1717 }
81819f0f
CL
1718
1719 call_rcu(head, rcu_free_slab);
1720 } else
1721 __free_slab(s, page);
1722}
1723
1724static void discard_slab(struct kmem_cache *s, struct page *page)
1725{
205ab99d 1726 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1727 free_slab(s, page);
1728}
1729
1730/*
5cc6eee8 1731 * Management of partially allocated slabs.
81819f0f 1732 */
1e4dd946
SR
1733static inline void
1734__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
81819f0f 1735{
e95eed57 1736 n->nr_partial++;
136333d1 1737 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1738 list_add_tail(&page->lru, &n->partial);
1739 else
1740 list_add(&page->lru, &n->partial);
81819f0f
CL
1741}
1742
1e4dd946
SR
1743static inline void add_partial(struct kmem_cache_node *n,
1744 struct page *page, int tail)
62e346a8 1745{
c65c1877 1746 lockdep_assert_held(&n->list_lock);
1e4dd946
SR
1747 __add_partial(n, page, tail);
1748}
c65c1877 1749
1e4dd946
SR
1750static inline void remove_partial(struct kmem_cache_node *n,
1751 struct page *page)
1752{
1753 lockdep_assert_held(&n->list_lock);
52b4b950
DS
1754 list_del(&page->lru);
1755 n->nr_partial--;
1e4dd946
SR
1756}
1757
81819f0f 1758/*
7ced3719
CL
1759 * Remove slab from the partial list, freeze it and
1760 * return the pointer to the freelist.
81819f0f 1761 *
497b66f2 1762 * Returns a list of objects or NULL if it fails.
81819f0f 1763 */
497b66f2 1764static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1765 struct kmem_cache_node *n, struct page *page,
633b0764 1766 int mode, int *objects)
81819f0f 1767{
2cfb7455
CL
1768 void *freelist;
1769 unsigned long counters;
1770 struct page new;
1771
c65c1877
PZ
1772 lockdep_assert_held(&n->list_lock);
1773
2cfb7455
CL
1774 /*
1775 * Zap the freelist and set the frozen bit.
1776 * The old freelist is the list of objects for the
1777 * per cpu allocation list.
1778 */
7ced3719
CL
1779 freelist = page->freelist;
1780 counters = page->counters;
1781 new.counters = counters;
633b0764 1782 *objects = new.objects - new.inuse;
23910c50 1783 if (mode) {
7ced3719 1784 new.inuse = page->objects;
23910c50
PE
1785 new.freelist = NULL;
1786 } else {
1787 new.freelist = freelist;
1788 }
2cfb7455 1789
a0132ac0 1790 VM_BUG_ON(new.frozen);
7ced3719 1791 new.frozen = 1;
2cfb7455 1792
7ced3719 1793 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1794 freelist, counters,
02d7633f 1795 new.freelist, new.counters,
7ced3719 1796 "acquire_slab"))
7ced3719 1797 return NULL;
2cfb7455
CL
1798
1799 remove_partial(n, page);
7ced3719 1800 WARN_ON(!freelist);
49e22585 1801 return freelist;
81819f0f
CL
1802}
1803
633b0764 1804static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1805static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1806
81819f0f 1807/*
672bba3a 1808 * Try to allocate a partial slab from a specific node.
81819f0f 1809 */
8ba00bb6
JK
1810static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1811 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1812{
49e22585
CL
1813 struct page *page, *page2;
1814 void *object = NULL;
633b0764
JK
1815 int available = 0;
1816 int objects;
81819f0f
CL
1817
1818 /*
1819 * Racy check. If we mistakenly see no partial slabs then we
1820 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1821 * partial slab and there is none available then get_partials()
1822 * will return NULL.
81819f0f
CL
1823 */
1824 if (!n || !n->nr_partial)
1825 return NULL;
1826
1827 spin_lock(&n->list_lock);
49e22585 1828 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1829 void *t;
49e22585 1830
8ba00bb6
JK
1831 if (!pfmemalloc_match(page, flags))
1832 continue;
1833
633b0764 1834 t = acquire_slab(s, n, page, object == NULL, &objects);
49e22585
CL
1835 if (!t)
1836 break;
1837
633b0764 1838 available += objects;
12d79634 1839 if (!object) {
49e22585 1840 c->page = page;
49e22585 1841 stat(s, ALLOC_FROM_PARTIAL);
49e22585 1842 object = t;
49e22585 1843 } else {
633b0764 1844 put_cpu_partial(s, page, 0);
8028dcea 1845 stat(s, CPU_PARTIAL_NODE);
49e22585 1846 }
345c905d 1847 if (!kmem_cache_has_cpu_partial(s)
e6d0e1dc 1848 || available > slub_cpu_partial(s) / 2)
49e22585
CL
1849 break;
1850
497b66f2 1851 }
81819f0f 1852 spin_unlock(&n->list_lock);
497b66f2 1853 return object;
81819f0f
CL
1854}
1855
1856/*
672bba3a 1857 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1858 */
de3ec035 1859static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1860 struct kmem_cache_cpu *c)
81819f0f
CL
1861{
1862#ifdef CONFIG_NUMA
1863 struct zonelist *zonelist;
dd1a239f 1864 struct zoneref *z;
54a6eb5c
MG
1865 struct zone *zone;
1866 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1867 void *object;
cc9a6c87 1868 unsigned int cpuset_mems_cookie;
81819f0f
CL
1869
1870 /*
672bba3a
CL
1871 * The defrag ratio allows a configuration of the tradeoffs between
1872 * inter node defragmentation and node local allocations. A lower
1873 * defrag_ratio increases the tendency to do local allocations
1874 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1875 *
672bba3a
CL
1876 * If the defrag_ratio is set to 0 then kmalloc() always
1877 * returns node local objects. If the ratio is higher then kmalloc()
1878 * may return off node objects because partial slabs are obtained
1879 * from other nodes and filled up.
81819f0f 1880 *
43efd3ea
LP
1881 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1882 * (which makes defrag_ratio = 1000) then every (well almost)
1883 * allocation will first attempt to defrag slab caches on other nodes.
1884 * This means scanning over all nodes to look for partial slabs which
1885 * may be expensive if we do it every time we are trying to find a slab
672bba3a 1886 * with available objects.
81819f0f 1887 */
9824601e
CL
1888 if (!s->remote_node_defrag_ratio ||
1889 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1890 return NULL;
1891
cc9a6c87 1892 do {
d26914d1 1893 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 1894 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87
MG
1895 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1896 struct kmem_cache_node *n;
1897
1898 n = get_node(s, zone_to_nid(zone));
1899
dee2f8aa 1900 if (n && cpuset_zone_allowed(zone, flags) &&
cc9a6c87 1901 n->nr_partial > s->min_partial) {
8ba00bb6 1902 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1903 if (object) {
1904 /*
d26914d1
MG
1905 * Don't check read_mems_allowed_retry()
1906 * here - if mems_allowed was updated in
1907 * parallel, that was a harmless race
1908 * between allocation and the cpuset
1909 * update
cc9a6c87 1910 */
cc9a6c87
MG
1911 return object;
1912 }
c0ff7453 1913 }
81819f0f 1914 }
d26914d1 1915 } while (read_mems_allowed_retry(cpuset_mems_cookie));
81819f0f
CL
1916#endif
1917 return NULL;
1918}
1919
1920/*
1921 * Get a partial page, lock it and return it.
1922 */
497b66f2 1923static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1924 struct kmem_cache_cpu *c)
81819f0f 1925{
497b66f2 1926 void *object;
a561ce00
JK
1927 int searchnode = node;
1928
1929 if (node == NUMA_NO_NODE)
1930 searchnode = numa_mem_id();
1931 else if (!node_present_pages(node))
1932 searchnode = node_to_mem_node(node);
81819f0f 1933
8ba00bb6 1934 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1935 if (object || node != NUMA_NO_NODE)
1936 return object;
81819f0f 1937
acd19fd1 1938 return get_any_partial(s, flags, c);
81819f0f
CL
1939}
1940
8a5ec0ba
CL
1941#ifdef CONFIG_PREEMPT
1942/*
1943 * Calculate the next globally unique transaction for disambiguiation
1944 * during cmpxchg. The transactions start with the cpu number and are then
1945 * incremented by CONFIG_NR_CPUS.
1946 */
1947#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1948#else
1949/*
1950 * No preemption supported therefore also no need to check for
1951 * different cpus.
1952 */
1953#define TID_STEP 1
1954#endif
1955
1956static inline unsigned long next_tid(unsigned long tid)
1957{
1958 return tid + TID_STEP;
1959}
1960
1961static inline unsigned int tid_to_cpu(unsigned long tid)
1962{
1963 return tid % TID_STEP;
1964}
1965
1966static inline unsigned long tid_to_event(unsigned long tid)
1967{
1968 return tid / TID_STEP;
1969}
1970
1971static inline unsigned int init_tid(int cpu)
1972{
1973 return cpu;
1974}
1975
1976static inline void note_cmpxchg_failure(const char *n,
1977 const struct kmem_cache *s, unsigned long tid)
1978{
1979#ifdef SLUB_DEBUG_CMPXCHG
1980 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1981
f9f58285 1982 pr_info("%s %s: cmpxchg redo ", n, s->name);
8a5ec0ba
CL
1983
1984#ifdef CONFIG_PREEMPT
1985 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
f9f58285 1986 pr_warn("due to cpu change %d -> %d\n",
8a5ec0ba
CL
1987 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1988 else
1989#endif
1990 if (tid_to_event(tid) != tid_to_event(actual_tid))
f9f58285 1991 pr_warn("due to cpu running other code. Event %ld->%ld\n",
8a5ec0ba
CL
1992 tid_to_event(tid), tid_to_event(actual_tid));
1993 else
f9f58285 1994 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
8a5ec0ba
CL
1995 actual_tid, tid, next_tid(tid));
1996#endif
4fdccdfb 1997 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
1998}
1999
788e1aad 2000static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 2001{
8a5ec0ba
CL
2002 int cpu;
2003
2004 for_each_possible_cpu(cpu)
2005 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 2006}
2cfb7455 2007
81819f0f
CL
2008/*
2009 * Remove the cpu slab
2010 */
d0e0ac97 2011static void deactivate_slab(struct kmem_cache *s, struct page *page,
d4ff6d35 2012 void *freelist, struct kmem_cache_cpu *c)
81819f0f 2013{
2cfb7455 2014 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
2015 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2016 int lock = 0;
2017 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 2018 void *nextfree;
136333d1 2019 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
2020 struct page new;
2021 struct page old;
2022
2023 if (page->freelist) {
84e554e6 2024 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 2025 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
2026 }
2027
894b8788 2028 /*
2cfb7455
CL
2029 * Stage one: Free all available per cpu objects back
2030 * to the page freelist while it is still frozen. Leave the
2031 * last one.
2032 *
2033 * There is no need to take the list->lock because the page
2034 * is still frozen.
2035 */
2036 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2037 void *prior;
2038 unsigned long counters;
2039
2040 do {
2041 prior = page->freelist;
2042 counters = page->counters;
2043 set_freepointer(s, freelist, prior);
2044 new.counters = counters;
2045 new.inuse--;
a0132ac0 2046 VM_BUG_ON(!new.frozen);
2cfb7455 2047
1d07171c 2048 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2049 prior, counters,
2050 freelist, new.counters,
2051 "drain percpu freelist"));
2052
2053 freelist = nextfree;
2054 }
2055
894b8788 2056 /*
2cfb7455
CL
2057 * Stage two: Ensure that the page is unfrozen while the
2058 * list presence reflects the actual number of objects
2059 * during unfreeze.
2060 *
2061 * We setup the list membership and then perform a cmpxchg
2062 * with the count. If there is a mismatch then the page
2063 * is not unfrozen but the page is on the wrong list.
2064 *
2065 * Then we restart the process which may have to remove
2066 * the page from the list that we just put it on again
2067 * because the number of objects in the slab may have
2068 * changed.
894b8788 2069 */
2cfb7455 2070redo:
894b8788 2071
2cfb7455
CL
2072 old.freelist = page->freelist;
2073 old.counters = page->counters;
a0132ac0 2074 VM_BUG_ON(!old.frozen);
7c2e132c 2075
2cfb7455
CL
2076 /* Determine target state of the slab */
2077 new.counters = old.counters;
2078 if (freelist) {
2079 new.inuse--;
2080 set_freepointer(s, freelist, old.freelist);
2081 new.freelist = freelist;
2082 } else
2083 new.freelist = old.freelist;
2084
2085 new.frozen = 0;
2086
8a5b20ae 2087 if (!new.inuse && n->nr_partial >= s->min_partial)
2cfb7455
CL
2088 m = M_FREE;
2089 else if (new.freelist) {
2090 m = M_PARTIAL;
2091 if (!lock) {
2092 lock = 1;
2093 /*
2094 * Taking the spinlock removes the possiblity
2095 * that acquire_slab() will see a slab page that
2096 * is frozen
2097 */
2098 spin_lock(&n->list_lock);
2099 }
2100 } else {
2101 m = M_FULL;
2102 if (kmem_cache_debug(s) && !lock) {
2103 lock = 1;
2104 /*
2105 * This also ensures that the scanning of full
2106 * slabs from diagnostic functions will not see
2107 * any frozen slabs.
2108 */
2109 spin_lock(&n->list_lock);
2110 }
2111 }
2112
2113 if (l != m) {
2114
2115 if (l == M_PARTIAL)
2116
2117 remove_partial(n, page);
2118
2119 else if (l == M_FULL)
894b8788 2120
c65c1877 2121 remove_full(s, n, page);
2cfb7455
CL
2122
2123 if (m == M_PARTIAL) {
2124
2125 add_partial(n, page, tail);
136333d1 2126 stat(s, tail);
2cfb7455
CL
2127
2128 } else if (m == M_FULL) {
894b8788 2129
2cfb7455
CL
2130 stat(s, DEACTIVATE_FULL);
2131 add_full(s, n, page);
2132
2133 }
2134 }
2135
2136 l = m;
1d07171c 2137 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2138 old.freelist, old.counters,
2139 new.freelist, new.counters,
2140 "unfreezing slab"))
2141 goto redo;
2142
2cfb7455
CL
2143 if (lock)
2144 spin_unlock(&n->list_lock);
2145
2146 if (m == M_FREE) {
2147 stat(s, DEACTIVATE_EMPTY);
2148 discard_slab(s, page);
2149 stat(s, FREE_SLAB);
894b8788 2150 }
d4ff6d35
WY
2151
2152 c->page = NULL;
2153 c->freelist = NULL;
81819f0f
CL
2154}
2155
d24ac77f
JK
2156/*
2157 * Unfreeze all the cpu partial slabs.
2158 *
59a09917
CL
2159 * This function must be called with interrupts disabled
2160 * for the cpu using c (or some other guarantee must be there
2161 * to guarantee no concurrent accesses).
d24ac77f 2162 */
59a09917
CL
2163static void unfreeze_partials(struct kmem_cache *s,
2164 struct kmem_cache_cpu *c)
49e22585 2165{
345c905d 2166#ifdef CONFIG_SLUB_CPU_PARTIAL
43d77867 2167 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 2168 struct page *page, *discard_page = NULL;
49e22585
CL
2169
2170 while ((page = c->partial)) {
49e22585
CL
2171 struct page new;
2172 struct page old;
2173
2174 c->partial = page->next;
43d77867
JK
2175
2176 n2 = get_node(s, page_to_nid(page));
2177 if (n != n2) {
2178 if (n)
2179 spin_unlock(&n->list_lock);
2180
2181 n = n2;
2182 spin_lock(&n->list_lock);
2183 }
49e22585
CL
2184
2185 do {
2186
2187 old.freelist = page->freelist;
2188 old.counters = page->counters;
a0132ac0 2189 VM_BUG_ON(!old.frozen);
49e22585
CL
2190
2191 new.counters = old.counters;
2192 new.freelist = old.freelist;
2193
2194 new.frozen = 0;
2195
d24ac77f 2196 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
2197 old.freelist, old.counters,
2198 new.freelist, new.counters,
2199 "unfreezing slab"));
2200
8a5b20ae 2201 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
9ada1934
SL
2202 page->next = discard_page;
2203 discard_page = page;
43d77867
JK
2204 } else {
2205 add_partial(n, page, DEACTIVATE_TO_TAIL);
2206 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
2207 }
2208 }
2209
2210 if (n)
2211 spin_unlock(&n->list_lock);
9ada1934
SL
2212
2213 while (discard_page) {
2214 page = discard_page;
2215 discard_page = discard_page->next;
2216
2217 stat(s, DEACTIVATE_EMPTY);
2218 discard_slab(s, page);
2219 stat(s, FREE_SLAB);
2220 }
345c905d 2221#endif
49e22585
CL
2222}
2223
2224/*
2225 * Put a page that was just frozen (in __slab_free) into a partial page
0d2d5d40 2226 * slot if available.
49e22585
CL
2227 *
2228 * If we did not find a slot then simply move all the partials to the
2229 * per node partial list.
2230 */
633b0764 2231static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585 2232{
345c905d 2233#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585
CL
2234 struct page *oldpage;
2235 int pages;
2236 int pobjects;
2237
d6e0b7fa 2238 preempt_disable();
49e22585
CL
2239 do {
2240 pages = 0;
2241 pobjects = 0;
2242 oldpage = this_cpu_read(s->cpu_slab->partial);
2243
2244 if (oldpage) {
2245 pobjects = oldpage->pobjects;
2246 pages = oldpage->pages;
2247 if (drain && pobjects > s->cpu_partial) {
2248 unsigned long flags;
2249 /*
2250 * partial array is full. Move the existing
2251 * set to the per node partial list.
2252 */
2253 local_irq_save(flags);
59a09917 2254 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 2255 local_irq_restore(flags);
e24fc410 2256 oldpage = NULL;
49e22585
CL
2257 pobjects = 0;
2258 pages = 0;
8028dcea 2259 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
2260 }
2261 }
2262
2263 pages++;
2264 pobjects += page->objects - page->inuse;
2265
2266 page->pages = pages;
2267 page->pobjects = pobjects;
2268 page->next = oldpage;
2269
d0e0ac97
CG
2270 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2271 != oldpage);
d6e0b7fa
VD
2272 if (unlikely(!s->cpu_partial)) {
2273 unsigned long flags;
2274
2275 local_irq_save(flags);
2276 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2277 local_irq_restore(flags);
2278 }
2279 preempt_enable();
345c905d 2280#endif
49e22585
CL
2281}
2282
dfb4f096 2283static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 2284{
84e554e6 2285 stat(s, CPUSLAB_FLUSH);
d4ff6d35 2286 deactivate_slab(s, c->page, c->freelist, c);
c17dda40
CL
2287
2288 c->tid = next_tid(c->tid);
81819f0f
CL
2289}
2290
2291/*
2292 * Flush cpu slab.
6446faa2 2293 *
81819f0f
CL
2294 * Called from IPI handler with interrupts disabled.
2295 */
0c710013 2296static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 2297{
9dfc6e68 2298 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2299
49e22585
CL
2300 if (likely(c)) {
2301 if (c->page)
2302 flush_slab(s, c);
2303
59a09917 2304 unfreeze_partials(s, c);
49e22585 2305 }
81819f0f
CL
2306}
2307
2308static void flush_cpu_slab(void *d)
2309{
2310 struct kmem_cache *s = d;
81819f0f 2311
dfb4f096 2312 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2313}
2314
a8364d55
GBY
2315static bool has_cpu_slab(int cpu, void *info)
2316{
2317 struct kmem_cache *s = info;
2318 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2319
a93cf07b 2320 return c->page || slub_percpu_partial(c);
a8364d55
GBY
2321}
2322
81819f0f
CL
2323static void flush_all(struct kmem_cache *s)
2324{
a8364d55 2325 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2326}
2327
a96a87bf
SAS
2328/*
2329 * Use the cpu notifier to insure that the cpu slabs are flushed when
2330 * necessary.
2331 */
2332static int slub_cpu_dead(unsigned int cpu)
2333{
2334 struct kmem_cache *s;
2335 unsigned long flags;
2336
2337 mutex_lock(&slab_mutex);
2338 list_for_each_entry(s, &slab_caches, list) {
2339 local_irq_save(flags);
2340 __flush_cpu_slab(s, cpu);
2341 local_irq_restore(flags);
2342 }
2343 mutex_unlock(&slab_mutex);
2344 return 0;
2345}
2346
dfb4f096
CL
2347/*
2348 * Check if the objects in a per cpu structure fit numa
2349 * locality expectations.
2350 */
57d437d2 2351static inline int node_match(struct page *page, int node)
dfb4f096
CL
2352{
2353#ifdef CONFIG_NUMA
4d7868e6 2354 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
dfb4f096
CL
2355 return 0;
2356#endif
2357 return 1;
2358}
2359
9a02d699 2360#ifdef CONFIG_SLUB_DEBUG
781b2ba6
PE
2361static int count_free(struct page *page)
2362{
2363 return page->objects - page->inuse;
2364}
2365
9a02d699
DR
2366static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2367{
2368 return atomic_long_read(&n->total_objects);
2369}
2370#endif /* CONFIG_SLUB_DEBUG */
2371
2372#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
781b2ba6
PE
2373static unsigned long count_partial(struct kmem_cache_node *n,
2374 int (*get_count)(struct page *))
2375{
2376 unsigned long flags;
2377 unsigned long x = 0;
2378 struct page *page;
2379
2380 spin_lock_irqsave(&n->list_lock, flags);
2381 list_for_each_entry(page, &n->partial, lru)
2382 x += get_count(page);
2383 spin_unlock_irqrestore(&n->list_lock, flags);
2384 return x;
2385}
9a02d699 2386#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
26c02cf0 2387
781b2ba6
PE
2388static noinline void
2389slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2390{
9a02d699
DR
2391#ifdef CONFIG_SLUB_DEBUG
2392 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2393 DEFAULT_RATELIMIT_BURST);
781b2ba6 2394 int node;
fa45dc25 2395 struct kmem_cache_node *n;
781b2ba6 2396
9a02d699
DR
2397 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2398 return;
2399
5b3810e5
VB
2400 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2401 nid, gfpflags, &gfpflags);
f9f58285
FF
2402 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
2403 s->name, s->object_size, s->size, oo_order(s->oo),
2404 oo_order(s->min));
781b2ba6 2405
3b0efdfa 2406 if (oo_order(s->min) > get_order(s->object_size))
f9f58285
FF
2407 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2408 s->name);
fa5ec8a1 2409
fa45dc25 2410 for_each_kmem_cache_node(s, node, n) {
781b2ba6
PE
2411 unsigned long nr_slabs;
2412 unsigned long nr_objs;
2413 unsigned long nr_free;
2414
26c02cf0
AB
2415 nr_free = count_partial(n, count_free);
2416 nr_slabs = node_nr_slabs(n);
2417 nr_objs = node_nr_objs(n);
781b2ba6 2418
f9f58285 2419 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
781b2ba6
PE
2420 node, nr_slabs, nr_objs, nr_free);
2421 }
9a02d699 2422#endif
781b2ba6
PE
2423}
2424
497b66f2
CL
2425static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2426 int node, struct kmem_cache_cpu **pc)
2427{
6faa6833 2428 void *freelist;
188fd063
CL
2429 struct kmem_cache_cpu *c = *pc;
2430 struct page *page;
497b66f2 2431
188fd063 2432 freelist = get_partial(s, flags, node, c);
497b66f2 2433
188fd063
CL
2434 if (freelist)
2435 return freelist;
2436
2437 page = new_slab(s, flags, node);
497b66f2 2438 if (page) {
7c8e0181 2439 c = raw_cpu_ptr(s->cpu_slab);
497b66f2
CL
2440 if (c->page)
2441 flush_slab(s, c);
2442
2443 /*
2444 * No other reference to the page yet so we can
2445 * muck around with it freely without cmpxchg
2446 */
6faa6833 2447 freelist = page->freelist;
497b66f2
CL
2448 page->freelist = NULL;
2449
2450 stat(s, ALLOC_SLAB);
497b66f2
CL
2451 c->page = page;
2452 *pc = c;
2453 } else
6faa6833 2454 freelist = NULL;
497b66f2 2455
6faa6833 2456 return freelist;
497b66f2
CL
2457}
2458
072bb0aa
MG
2459static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2460{
2461 if (unlikely(PageSlabPfmemalloc(page)))
2462 return gfp_pfmemalloc_allowed(gfpflags);
2463
2464 return true;
2465}
2466
213eeb9f 2467/*
d0e0ac97
CG
2468 * Check the page->freelist of a page and either transfer the freelist to the
2469 * per cpu freelist or deactivate the page.
213eeb9f
CL
2470 *
2471 * The page is still frozen if the return value is not NULL.
2472 *
2473 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2474 *
2475 * This function must be called with interrupt disabled.
213eeb9f
CL
2476 */
2477static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2478{
2479 struct page new;
2480 unsigned long counters;
2481 void *freelist;
2482
2483 do {
2484 freelist = page->freelist;
2485 counters = page->counters;
6faa6833 2486
213eeb9f 2487 new.counters = counters;
a0132ac0 2488 VM_BUG_ON(!new.frozen);
213eeb9f
CL
2489
2490 new.inuse = page->objects;
2491 new.frozen = freelist != NULL;
2492
d24ac77f 2493 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2494 freelist, counters,
2495 NULL, new.counters,
2496 "get_freelist"));
2497
2498 return freelist;
2499}
2500
81819f0f 2501/*
894b8788
CL
2502 * Slow path. The lockless freelist is empty or we need to perform
2503 * debugging duties.
2504 *
894b8788
CL
2505 * Processing is still very fast if new objects have been freed to the
2506 * regular freelist. In that case we simply take over the regular freelist
2507 * as the lockless freelist and zap the regular freelist.
81819f0f 2508 *
894b8788
CL
2509 * If that is not working then we fall back to the partial lists. We take the
2510 * first element of the freelist as the object to allocate now and move the
2511 * rest of the freelist to the lockless freelist.
81819f0f 2512 *
894b8788 2513 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2514 * we need to allocate a new slab. This is the slowest path since it involves
2515 * a call to the page allocator and the setup of a new slab.
a380a3c7
CL
2516 *
2517 * Version of __slab_alloc to use when we know that interrupts are
2518 * already disabled (which is the case for bulk allocation).
81819f0f 2519 */
a380a3c7 2520static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
ce71e27c 2521 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2522{
6faa6833 2523 void *freelist;
f6e7def7 2524 struct page *page;
81819f0f 2525
f6e7def7
CL
2526 page = c->page;
2527 if (!page)
81819f0f 2528 goto new_slab;
49e22585 2529redo:
6faa6833 2530
57d437d2 2531 if (unlikely(!node_match(page, node))) {
a561ce00
JK
2532 int searchnode = node;
2533
2534 if (node != NUMA_NO_NODE && !node_present_pages(node))
2535 searchnode = node_to_mem_node(node);
2536
2537 if (unlikely(!node_match(page, searchnode))) {
2538 stat(s, ALLOC_NODE_MISMATCH);
d4ff6d35 2539 deactivate_slab(s, page, c->freelist, c);
a561ce00
JK
2540 goto new_slab;
2541 }
fc59c053 2542 }
6446faa2 2543
072bb0aa
MG
2544 /*
2545 * By rights, we should be searching for a slab page that was
2546 * PFMEMALLOC but right now, we are losing the pfmemalloc
2547 * information when the page leaves the per-cpu allocator
2548 */
2549 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
d4ff6d35 2550 deactivate_slab(s, page, c->freelist, c);
072bb0aa
MG
2551 goto new_slab;
2552 }
2553
73736e03 2554 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2555 freelist = c->freelist;
2556 if (freelist)
73736e03 2557 goto load_freelist;
03e404af 2558
f6e7def7 2559 freelist = get_freelist(s, page);
6446faa2 2560
6faa6833 2561 if (!freelist) {
03e404af
CL
2562 c->page = NULL;
2563 stat(s, DEACTIVATE_BYPASS);
fc59c053 2564 goto new_slab;
03e404af 2565 }
6446faa2 2566
84e554e6 2567 stat(s, ALLOC_REFILL);
6446faa2 2568
894b8788 2569load_freelist:
507effea
CL
2570 /*
2571 * freelist is pointing to the list of objects to be used.
2572 * page is pointing to the page from which the objects are obtained.
2573 * That page must be frozen for per cpu allocations to work.
2574 */
a0132ac0 2575 VM_BUG_ON(!c->page->frozen);
6faa6833 2576 c->freelist = get_freepointer(s, freelist);
8a5ec0ba 2577 c->tid = next_tid(c->tid);
6faa6833 2578 return freelist;
81819f0f 2579
81819f0f 2580new_slab:
2cfb7455 2581
a93cf07b
WY
2582 if (slub_percpu_partial(c)) {
2583 page = c->page = slub_percpu_partial(c);
2584 slub_set_percpu_partial(c, page);
49e22585 2585 stat(s, CPU_PARTIAL_ALLOC);
49e22585 2586 goto redo;
81819f0f
CL
2587 }
2588
188fd063 2589 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2590
f4697436 2591 if (unlikely(!freelist)) {
9a02d699 2592 slab_out_of_memory(s, gfpflags, node);
f4697436 2593 return NULL;
81819f0f 2594 }
2cfb7455 2595
f6e7def7 2596 page = c->page;
5091b74a 2597 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2598 goto load_freelist;
2cfb7455 2599
497b66f2 2600 /* Only entered in the debug case */
d0e0ac97
CG
2601 if (kmem_cache_debug(s) &&
2602 !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2603 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2604
d4ff6d35 2605 deactivate_slab(s, page, get_freepointer(s, freelist), c);
6faa6833 2606 return freelist;
894b8788
CL
2607}
2608
a380a3c7
CL
2609/*
2610 * Another one that disabled interrupt and compensates for possible
2611 * cpu changes by refetching the per cpu area pointer.
2612 */
2613static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2614 unsigned long addr, struct kmem_cache_cpu *c)
2615{
2616 void *p;
2617 unsigned long flags;
2618
2619 local_irq_save(flags);
2620#ifdef CONFIG_PREEMPT
2621 /*
2622 * We may have been preempted and rescheduled on a different
2623 * cpu before disabling interrupts. Need to reload cpu area
2624 * pointer.
2625 */
2626 c = this_cpu_ptr(s->cpu_slab);
2627#endif
2628
2629 p = ___slab_alloc(s, gfpflags, node, addr, c);
2630 local_irq_restore(flags);
2631 return p;
2632}
2633
894b8788
CL
2634/*
2635 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2636 * have the fastpath folded into their functions. So no function call
2637 * overhead for requests that can be satisfied on the fastpath.
2638 *
2639 * The fastpath works by first checking if the lockless freelist can be used.
2640 * If not then __slab_alloc is called for slow processing.
2641 *
2642 * Otherwise we can simply pick the next object from the lockless free list.
2643 */
2b847c3c 2644static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2645 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2646{
03ec0ed5 2647 void *object;
dfb4f096 2648 struct kmem_cache_cpu *c;
57d437d2 2649 struct page *page;
8a5ec0ba 2650 unsigned long tid;
1f84260c 2651
8135be5a
VD
2652 s = slab_pre_alloc_hook(s, gfpflags);
2653 if (!s)
773ff60e 2654 return NULL;
8a5ec0ba 2655redo:
8a5ec0ba
CL
2656 /*
2657 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2658 * enabled. We may switch back and forth between cpus while
2659 * reading from one cpu area. That does not matter as long
2660 * as we end up on the original cpu again when doing the cmpxchg.
7cccd80b 2661 *
9aabf810
JK
2662 * We should guarantee that tid and kmem_cache are retrieved on
2663 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2664 * to check if it is matched or not.
8a5ec0ba 2665 */
9aabf810
JK
2666 do {
2667 tid = this_cpu_read(s->cpu_slab->tid);
2668 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2669 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2670 unlikely(tid != READ_ONCE(c->tid)));
9aabf810
JK
2671
2672 /*
2673 * Irqless object alloc/free algorithm used here depends on sequence
2674 * of fetching cpu_slab's data. tid should be fetched before anything
2675 * on c to guarantee that object and page associated with previous tid
2676 * won't be used with current tid. If we fetch tid first, object and
2677 * page could be one associated with next tid and our alloc/free
2678 * request will be failed. In this case, we will retry. So, no problem.
2679 */
2680 barrier();
8a5ec0ba 2681
8a5ec0ba
CL
2682 /*
2683 * The transaction ids are globally unique per cpu and per operation on
2684 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2685 * occurs on the right processor and that there was no operation on the
2686 * linked list in between.
2687 */
8a5ec0ba 2688
9dfc6e68 2689 object = c->freelist;
57d437d2 2690 page = c->page;
8eae1492 2691 if (unlikely(!object || !node_match(page, node))) {
dfb4f096 2692 object = __slab_alloc(s, gfpflags, node, addr, c);
8eae1492
DH
2693 stat(s, ALLOC_SLOWPATH);
2694 } else {
0ad9500e
ED
2695 void *next_object = get_freepointer_safe(s, object);
2696
8a5ec0ba 2697 /*
25985edc 2698 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2699 * operation and if we are on the right processor.
2700 *
d0e0ac97
CG
2701 * The cmpxchg does the following atomically (without lock
2702 * semantics!)
8a5ec0ba
CL
2703 * 1. Relocate first pointer to the current per cpu area.
2704 * 2. Verify that tid and freelist have not been changed
2705 * 3. If they were not changed replace tid and freelist
2706 *
d0e0ac97
CG
2707 * Since this is without lock semantics the protection is only
2708 * against code executing on this cpu *not* from access by
2709 * other cpus.
8a5ec0ba 2710 */
933393f5 2711 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2712 s->cpu_slab->freelist, s->cpu_slab->tid,
2713 object, tid,
0ad9500e 2714 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2715
2716 note_cmpxchg_failure("slab_alloc", s, tid);
2717 goto redo;
2718 }
0ad9500e 2719 prefetch_freepointer(s, next_object);
84e554e6 2720 stat(s, ALLOC_FASTPATH);
894b8788 2721 }
8a5ec0ba 2722
74e2134f 2723 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2724 memset(object, 0, s->object_size);
d07dbea4 2725
03ec0ed5 2726 slab_post_alloc_hook(s, gfpflags, 1, &object);
5a896d9e 2727
894b8788 2728 return object;
81819f0f
CL
2729}
2730
2b847c3c
EG
2731static __always_inline void *slab_alloc(struct kmem_cache *s,
2732 gfp_t gfpflags, unsigned long addr)
2733{
2734 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2735}
2736
81819f0f
CL
2737void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2738{
2b847c3c 2739 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2740
d0e0ac97
CG
2741 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2742 s->size, gfpflags);
5b882be4
EGM
2743
2744 return ret;
81819f0f
CL
2745}
2746EXPORT_SYMBOL(kmem_cache_alloc);
2747
0f24f128 2748#ifdef CONFIG_TRACING
4a92379b
RK
2749void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2750{
2b847c3c 2751 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b 2752 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
505f5dcb 2753 kasan_kmalloc(s, ret, size, gfpflags);
4a92379b
RK
2754 return ret;
2755}
2756EXPORT_SYMBOL(kmem_cache_alloc_trace);
5b882be4
EGM
2757#endif
2758
81819f0f
CL
2759#ifdef CONFIG_NUMA
2760void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2761{
2b847c3c 2762 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2763
ca2b84cb 2764 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2765 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2766
2767 return ret;
81819f0f
CL
2768}
2769EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2770
0f24f128 2771#ifdef CONFIG_TRACING
4a92379b 2772void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2773 gfp_t gfpflags,
4a92379b 2774 int node, size_t size)
5b882be4 2775{
2b847c3c 2776 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2777
2778 trace_kmalloc_node(_RET_IP_, ret,
2779 size, s->size, gfpflags, node);
0316bec2 2780
505f5dcb 2781 kasan_kmalloc(s, ret, size, gfpflags);
4a92379b 2782 return ret;
5b882be4 2783}
4a92379b 2784EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2785#endif
5d1f57e4 2786#endif
5b882be4 2787
81819f0f 2788/*
94e4d712 2789 * Slow path handling. This may still be called frequently since objects
894b8788 2790 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2791 *
894b8788
CL
2792 * So we still attempt to reduce cache line usage. Just take the slab
2793 * lock and free the item. If there is no additional partial page
2794 * handling required then we can return immediately.
81819f0f 2795 */
894b8788 2796static void __slab_free(struct kmem_cache *s, struct page *page,
81084651
JDB
2797 void *head, void *tail, int cnt,
2798 unsigned long addr)
2799
81819f0f
CL
2800{
2801 void *prior;
2cfb7455 2802 int was_frozen;
2cfb7455
CL
2803 struct page new;
2804 unsigned long counters;
2805 struct kmem_cache_node *n = NULL;
61728d1e 2806 unsigned long uninitialized_var(flags);
81819f0f 2807
8a5ec0ba 2808 stat(s, FREE_SLOWPATH);
81819f0f 2809
19c7ff9e 2810 if (kmem_cache_debug(s) &&
282acb43 2811 !free_debug_processing(s, page, head, tail, cnt, addr))
80f08c19 2812 return;
6446faa2 2813
2cfb7455 2814 do {
837d678d
JK
2815 if (unlikely(n)) {
2816 spin_unlock_irqrestore(&n->list_lock, flags);
2817 n = NULL;
2818 }
2cfb7455
CL
2819 prior = page->freelist;
2820 counters = page->counters;
81084651 2821 set_freepointer(s, tail, prior);
2cfb7455
CL
2822 new.counters = counters;
2823 was_frozen = new.frozen;
81084651 2824 new.inuse -= cnt;
837d678d 2825 if ((!new.inuse || !prior) && !was_frozen) {
49e22585 2826
c65c1877 2827 if (kmem_cache_has_cpu_partial(s) && !prior) {
49e22585
CL
2828
2829 /*
d0e0ac97
CG
2830 * Slab was on no list before and will be
2831 * partially empty
2832 * We can defer the list move and instead
2833 * freeze it.
49e22585
CL
2834 */
2835 new.frozen = 1;
2836
c65c1877 2837 } else { /* Needs to be taken off a list */
49e22585 2838
b455def2 2839 n = get_node(s, page_to_nid(page));
49e22585
CL
2840 /*
2841 * Speculatively acquire the list_lock.
2842 * If the cmpxchg does not succeed then we may
2843 * drop the list_lock without any processing.
2844 *
2845 * Otherwise the list_lock will synchronize with
2846 * other processors updating the list of slabs.
2847 */
2848 spin_lock_irqsave(&n->list_lock, flags);
2849
2850 }
2cfb7455 2851 }
81819f0f 2852
2cfb7455
CL
2853 } while (!cmpxchg_double_slab(s, page,
2854 prior, counters,
81084651 2855 head, new.counters,
2cfb7455 2856 "__slab_free"));
81819f0f 2857
2cfb7455 2858 if (likely(!n)) {
49e22585
CL
2859
2860 /*
2861 * If we just froze the page then put it onto the
2862 * per cpu partial list.
2863 */
8028dcea 2864 if (new.frozen && !was_frozen) {
49e22585 2865 put_cpu_partial(s, page, 1);
8028dcea
AS
2866 stat(s, CPU_PARTIAL_FREE);
2867 }
49e22585 2868 /*
2cfb7455
CL
2869 * The list lock was not taken therefore no list
2870 * activity can be necessary.
2871 */
b455def2
L
2872 if (was_frozen)
2873 stat(s, FREE_FROZEN);
2874 return;
2875 }
81819f0f 2876
8a5b20ae 2877 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
837d678d
JK
2878 goto slab_empty;
2879
81819f0f 2880 /*
837d678d
JK
2881 * Objects left in the slab. If it was not on the partial list before
2882 * then add it.
81819f0f 2883 */
345c905d
JK
2884 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2885 if (kmem_cache_debug(s))
c65c1877 2886 remove_full(s, n, page);
837d678d
JK
2887 add_partial(n, page, DEACTIVATE_TO_TAIL);
2888 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2889 }
80f08c19 2890 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2891 return;
2892
2893slab_empty:
a973e9dd 2894 if (prior) {
81819f0f 2895 /*
6fbabb20 2896 * Slab on the partial list.
81819f0f 2897 */
5cc6eee8 2898 remove_partial(n, page);
84e554e6 2899 stat(s, FREE_REMOVE_PARTIAL);
c65c1877 2900 } else {
6fbabb20 2901 /* Slab must be on the full list */
c65c1877
PZ
2902 remove_full(s, n, page);
2903 }
2cfb7455 2904
80f08c19 2905 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2906 stat(s, FREE_SLAB);
81819f0f 2907 discard_slab(s, page);
81819f0f
CL
2908}
2909
894b8788
CL
2910/*
2911 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2912 * can perform fastpath freeing without additional function calls.
2913 *
2914 * The fastpath is only possible if we are freeing to the current cpu slab
2915 * of this processor. This typically the case if we have just allocated
2916 * the item before.
2917 *
2918 * If fastpath is not possible then fall back to __slab_free where we deal
2919 * with all sorts of special processing.
81084651
JDB
2920 *
2921 * Bulk free of a freelist with several objects (all pointing to the
2922 * same page) possible by specifying head and tail ptr, plus objects
2923 * count (cnt). Bulk free indicated by tail pointer being set.
894b8788 2924 */
80a9201a
AP
2925static __always_inline void do_slab_free(struct kmem_cache *s,
2926 struct page *page, void *head, void *tail,
2927 int cnt, unsigned long addr)
894b8788 2928{
81084651 2929 void *tail_obj = tail ? : head;
dfb4f096 2930 struct kmem_cache_cpu *c;
8a5ec0ba 2931 unsigned long tid;
8a5ec0ba
CL
2932redo:
2933 /*
2934 * Determine the currently cpus per cpu slab.
2935 * The cpu may change afterward. However that does not matter since
2936 * data is retrieved via this pointer. If we are on the same cpu
2ae44005 2937 * during the cmpxchg then the free will succeed.
8a5ec0ba 2938 */
9aabf810
JK
2939 do {
2940 tid = this_cpu_read(s->cpu_slab->tid);
2941 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2942 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2943 unlikely(tid != READ_ONCE(c->tid)));
c016b0bd 2944
9aabf810
JK
2945 /* Same with comment on barrier() in slab_alloc_node() */
2946 barrier();
c016b0bd 2947
442b06bc 2948 if (likely(page == c->page)) {
81084651 2949 set_freepointer(s, tail_obj, c->freelist);
8a5ec0ba 2950
933393f5 2951 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2952 s->cpu_slab->freelist, s->cpu_slab->tid,
2953 c->freelist, tid,
81084651 2954 head, next_tid(tid)))) {
8a5ec0ba
CL
2955
2956 note_cmpxchg_failure("slab_free", s, tid);
2957 goto redo;
2958 }
84e554e6 2959 stat(s, FREE_FASTPATH);
894b8788 2960 } else
81084651 2961 __slab_free(s, page, head, tail_obj, cnt, addr);
894b8788 2962
894b8788
CL
2963}
2964
80a9201a
AP
2965static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2966 void *head, void *tail, int cnt,
2967 unsigned long addr)
2968{
2969 slab_free_freelist_hook(s, head, tail);
2970 /*
2971 * slab_free_freelist_hook() could have put the items into quarantine.
2972 * If so, no need to free them.
2973 */
5f0d5a3a 2974 if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
80a9201a
AP
2975 return;
2976 do_slab_free(s, page, head, tail, cnt, addr);
2977}
2978
2979#ifdef CONFIG_KASAN
2980void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2981{
2982 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2983}
2984#endif
2985
81819f0f
CL
2986void kmem_cache_free(struct kmem_cache *s, void *x)
2987{
b9ce5ef4
GC
2988 s = cache_from_obj(s, x);
2989 if (!s)
79576102 2990 return;
81084651 2991 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
ca2b84cb 2992 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
2993}
2994EXPORT_SYMBOL(kmem_cache_free);
2995
d0ecd894 2996struct detached_freelist {
fbd02630 2997 struct page *page;
d0ecd894
JDB
2998 void *tail;
2999 void *freelist;
3000 int cnt;
376bf125 3001 struct kmem_cache *s;
d0ecd894 3002};
fbd02630 3003
d0ecd894
JDB
3004/*
3005 * This function progressively scans the array with free objects (with
3006 * a limited look ahead) and extract objects belonging to the same
3007 * page. It builds a detached freelist directly within the given
3008 * page/objects. This can happen without any need for
3009 * synchronization, because the objects are owned by running process.
3010 * The freelist is build up as a single linked list in the objects.
3011 * The idea is, that this detached freelist can then be bulk
3012 * transferred to the real freelist(s), but only requiring a single
3013 * synchronization primitive. Look ahead in the array is limited due
3014 * to performance reasons.
3015 */
376bf125
JDB
3016static inline
3017int build_detached_freelist(struct kmem_cache *s, size_t size,
3018 void **p, struct detached_freelist *df)
d0ecd894
JDB
3019{
3020 size_t first_skipped_index = 0;
3021 int lookahead = 3;
3022 void *object;
ca257195 3023 struct page *page;
fbd02630 3024
d0ecd894
JDB
3025 /* Always re-init detached_freelist */
3026 df->page = NULL;
fbd02630 3027
d0ecd894
JDB
3028 do {
3029 object = p[--size];
ca257195 3030 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
d0ecd894 3031 } while (!object && size);
3eed034d 3032
d0ecd894
JDB
3033 if (!object)
3034 return 0;
fbd02630 3035
ca257195
JDB
3036 page = virt_to_head_page(object);
3037 if (!s) {
3038 /* Handle kalloc'ed objects */
3039 if (unlikely(!PageSlab(page))) {
3040 BUG_ON(!PageCompound(page));
3041 kfree_hook(object);
4949148a 3042 __free_pages(page, compound_order(page));
ca257195
JDB
3043 p[size] = NULL; /* mark object processed */
3044 return size;
3045 }
3046 /* Derive kmem_cache from object */
3047 df->s = page->slab_cache;
3048 } else {
3049 df->s = cache_from_obj(s, object); /* Support for memcg */
3050 }
376bf125 3051
d0ecd894 3052 /* Start new detached freelist */
ca257195 3053 df->page = page;
376bf125 3054 set_freepointer(df->s, object, NULL);
d0ecd894
JDB
3055 df->tail = object;
3056 df->freelist = object;
3057 p[size] = NULL; /* mark object processed */
3058 df->cnt = 1;
3059
3060 while (size) {
3061 object = p[--size];
3062 if (!object)
3063 continue; /* Skip processed objects */
3064
3065 /* df->page is always set at this point */
3066 if (df->page == virt_to_head_page(object)) {
3067 /* Opportunity build freelist */
376bf125 3068 set_freepointer(df->s, object, df->freelist);
d0ecd894
JDB
3069 df->freelist = object;
3070 df->cnt++;
3071 p[size] = NULL; /* mark object processed */
3072
3073 continue;
fbd02630 3074 }
d0ecd894
JDB
3075
3076 /* Limit look ahead search */
3077 if (!--lookahead)
3078 break;
3079
3080 if (!first_skipped_index)
3081 first_skipped_index = size + 1;
fbd02630 3082 }
d0ecd894
JDB
3083
3084 return first_skipped_index;
3085}
3086
d0ecd894 3087/* Note that interrupts must be enabled when calling this function. */
376bf125 3088void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
d0ecd894
JDB
3089{
3090 if (WARN_ON(!size))
3091 return;
3092
3093 do {
3094 struct detached_freelist df;
3095
3096 size = build_detached_freelist(s, size, p, &df);
84582c8a 3097 if (!df.page)
d0ecd894
JDB
3098 continue;
3099
376bf125 3100 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
d0ecd894 3101 } while (likely(size));
484748f0
CL
3102}
3103EXPORT_SYMBOL(kmem_cache_free_bulk);
3104
994eb764 3105/* Note that interrupts must be enabled when calling this function. */
865762a8
JDB
3106int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3107 void **p)
484748f0 3108{
994eb764
JDB
3109 struct kmem_cache_cpu *c;
3110 int i;
3111
03ec0ed5
JDB
3112 /* memcg and kmem_cache debug support */
3113 s = slab_pre_alloc_hook(s, flags);
3114 if (unlikely(!s))
3115 return false;
994eb764
JDB
3116 /*
3117 * Drain objects in the per cpu slab, while disabling local
3118 * IRQs, which protects against PREEMPT and interrupts
3119 * handlers invoking normal fastpath.
3120 */
3121 local_irq_disable();
3122 c = this_cpu_ptr(s->cpu_slab);
3123
3124 for (i = 0; i < size; i++) {
3125 void *object = c->freelist;
3126
ebe909e0 3127 if (unlikely(!object)) {
ebe909e0
JDB
3128 /*
3129 * Invoking slow path likely have side-effect
3130 * of re-populating per CPU c->freelist
3131 */
87098373 3132 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
ebe909e0 3133 _RET_IP_, c);
87098373
CL
3134 if (unlikely(!p[i]))
3135 goto error;
3136
ebe909e0
JDB
3137 c = this_cpu_ptr(s->cpu_slab);
3138 continue; /* goto for-loop */
3139 }
994eb764
JDB
3140 c->freelist = get_freepointer(s, object);
3141 p[i] = object;
3142 }
3143 c->tid = next_tid(c->tid);
3144 local_irq_enable();
3145
3146 /* Clear memory outside IRQ disabled fastpath loop */
3147 if (unlikely(flags & __GFP_ZERO)) {
3148 int j;
3149
3150 for (j = 0; j < i; j++)
3151 memset(p[j], 0, s->object_size);
3152 }
3153
03ec0ed5
JDB
3154 /* memcg and kmem_cache debug support */
3155 slab_post_alloc_hook(s, flags, size, p);
865762a8 3156 return i;
87098373 3157error:
87098373 3158 local_irq_enable();
03ec0ed5
JDB
3159 slab_post_alloc_hook(s, flags, i, p);
3160 __kmem_cache_free_bulk(s, i, p);
865762a8 3161 return 0;
484748f0
CL
3162}
3163EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3164
3165
81819f0f 3166/*
672bba3a
CL
3167 * Object placement in a slab is made very easy because we always start at
3168 * offset 0. If we tune the size of the object to the alignment then we can
3169 * get the required alignment by putting one properly sized object after
3170 * another.
81819f0f
CL
3171 *
3172 * Notice that the allocation order determines the sizes of the per cpu
3173 * caches. Each processor has always one slab available for allocations.
3174 * Increasing the allocation order reduces the number of times that slabs
672bba3a 3175 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 3176 * locking overhead.
81819f0f
CL
3177 */
3178
3179/*
3180 * Mininum / Maximum order of slab pages. This influences locking overhead
3181 * and slab fragmentation. A higher order reduces the number of partial slabs
3182 * and increases the number of allocations possible without having to
3183 * take the list_lock.
3184 */
3185static int slub_min_order;
114e9e89 3186static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 3187static int slub_min_objects;
81819f0f 3188
81819f0f
CL
3189/*
3190 * Calculate the order of allocation given an slab object size.
3191 *
672bba3a
CL
3192 * The order of allocation has significant impact on performance and other
3193 * system components. Generally order 0 allocations should be preferred since
3194 * order 0 does not cause fragmentation in the page allocator. Larger objects
3195 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 3196 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
3197 * would be wasted.
3198 *
3199 * In order to reach satisfactory performance we must ensure that a minimum
3200 * number of objects is in one slab. Otherwise we may generate too much
3201 * activity on the partial lists which requires taking the list_lock. This is
3202 * less a concern for large slabs though which are rarely used.
81819f0f 3203 *
672bba3a
CL
3204 * slub_max_order specifies the order where we begin to stop considering the
3205 * number of objects in a slab as critical. If we reach slub_max_order then
3206 * we try to keep the page order as low as possible. So we accept more waste
3207 * of space in favor of a small page order.
81819f0f 3208 *
672bba3a
CL
3209 * Higher order allocations also allow the placement of more objects in a
3210 * slab and thereby reduce object handling overhead. If the user has
3211 * requested a higher mininum order then we start with that one instead of
3212 * the smallest order which will fit the object.
81819f0f 3213 */
5e6d444e 3214static inline int slab_order(int size, int min_objects,
ab9a0f19 3215 int max_order, int fract_leftover, int reserved)
81819f0f
CL
3216{
3217 int order;
3218 int rem;
6300ea75 3219 int min_order = slub_min_order;
81819f0f 3220
ab9a0f19 3221 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c06 3222 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 3223
9f835703 3224 for (order = max(min_order, get_order(min_objects * size + reserved));
5e6d444e 3225 order <= max_order; order++) {
81819f0f 3226
5e6d444e 3227 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 3228
ab9a0f19 3229 rem = (slab_size - reserved) % size;
81819f0f 3230
5e6d444e 3231 if (rem <= slab_size / fract_leftover)
81819f0f 3232 break;
81819f0f 3233 }
672bba3a 3234
81819f0f
CL
3235 return order;
3236}
3237
ab9a0f19 3238static inline int calculate_order(int size, int reserved)
5e6d444e
CL
3239{
3240 int order;
3241 int min_objects;
3242 int fraction;
e8120ff1 3243 int max_objects;
5e6d444e
CL
3244
3245 /*
3246 * Attempt to find best configuration for a slab. This
3247 * works by first attempting to generate a layout with
3248 * the best configuration and backing off gradually.
3249 *
422ff4d7 3250 * First we increase the acceptable waste in a slab. Then
5e6d444e
CL
3251 * we reduce the minimum objects required in a slab.
3252 */
3253 min_objects = slub_min_objects;
9b2cd506
CL
3254 if (!min_objects)
3255 min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f19 3256 max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1
ZY
3257 min_objects = min(min_objects, max_objects);
3258
5e6d444e 3259 while (min_objects > 1) {
c124f5b5 3260 fraction = 16;
5e6d444e
CL
3261 while (fraction >= 4) {
3262 order = slab_order(size, min_objects,
ab9a0f19 3263 slub_max_order, fraction, reserved);
5e6d444e
CL
3264 if (order <= slub_max_order)
3265 return order;
3266 fraction /= 2;
3267 }
5086c389 3268 min_objects--;
5e6d444e
CL
3269 }
3270
3271 /*
3272 * We were unable to place multiple objects in a slab. Now
3273 * lets see if we can place a single object there.
3274 */
ab9a0f19 3275 order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444e
CL
3276 if (order <= slub_max_order)
3277 return order;
3278
3279 /*
3280 * Doh this slab cannot be placed using slub_max_order.
3281 */
ab9a0f19 3282 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf590 3283 if (order < MAX_ORDER)
5e6d444e
CL
3284 return order;
3285 return -ENOSYS;
3286}
3287
5595cffc 3288static void
4053497d 3289init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
3290{
3291 n->nr_partial = 0;
81819f0f
CL
3292 spin_lock_init(&n->list_lock);
3293 INIT_LIST_HEAD(&n->partial);
8ab1372f 3294#ifdef CONFIG_SLUB_DEBUG
0f389ec6 3295 atomic_long_set(&n->nr_slabs, 0);
02b71b70 3296 atomic_long_set(&n->total_objects, 0);
643b1138 3297 INIT_LIST_HEAD(&n->full);
8ab1372f 3298#endif
81819f0f
CL
3299}
3300
55136592 3301static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 3302{
6c182dc0 3303 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
95a05b42 3304 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
4c93c355 3305
8a5ec0ba 3306 /*
d4d84fef
CM
3307 * Must align to double word boundary for the double cmpxchg
3308 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 3309 */
d4d84fef
CM
3310 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3311 2 * sizeof(void *));
8a5ec0ba
CL
3312
3313 if (!s->cpu_slab)
3314 return 0;
3315
3316 init_kmem_cache_cpus(s);
4c93c355 3317
8a5ec0ba 3318 return 1;
4c93c355 3319}
4c93c355 3320
51df1142
CL
3321static struct kmem_cache *kmem_cache_node;
3322
81819f0f
CL
3323/*
3324 * No kmalloc_node yet so do it by hand. We know that this is the first
3325 * slab on the node for this slabcache. There are no concurrent accesses
3326 * possible.
3327 *
721ae22a
ZYW
3328 * Note that this function only works on the kmem_cache_node
3329 * when allocating for the kmem_cache_node. This is used for bootstrapping
4c93c355 3330 * memory on a fresh node that has no slab structures yet.
81819f0f 3331 */
55136592 3332static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
3333{
3334 struct page *page;
3335 struct kmem_cache_node *n;
3336
51df1142 3337 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 3338
51df1142 3339 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
3340
3341 BUG_ON(!page);
a2f92ee7 3342 if (page_to_nid(page) != node) {
f9f58285
FF
3343 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3344 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
a2f92ee7
CL
3345 }
3346
81819f0f
CL
3347 n = page->freelist;
3348 BUG_ON(!n);
51df1142 3349 page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea1 3350 page->inuse = 1;
8cb0a506 3351 page->frozen = 0;
51df1142 3352 kmem_cache_node->node[node] = n;
8ab1372f 3353#ifdef CONFIG_SLUB_DEBUG
f7cb1933 3354 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 3355 init_tracking(kmem_cache_node, n);
8ab1372f 3356#endif
505f5dcb
AP
3357 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3358 GFP_KERNEL);
4053497d 3359 init_kmem_cache_node(n);
51df1142 3360 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 3361
67b6c900 3362 /*
1e4dd946
SR
3363 * No locks need to be taken here as it has just been
3364 * initialized and there is no concurrent access.
67b6c900 3365 */
1e4dd946 3366 __add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
3367}
3368
3369static void free_kmem_cache_nodes(struct kmem_cache *s)
3370{
3371 int node;
fa45dc25 3372 struct kmem_cache_node *n;
81819f0f 3373
fa45dc25 3374 for_each_kmem_cache_node(s, node, n) {
81819f0f 3375 s->node[node] = NULL;
ea37df54 3376 kmem_cache_free(kmem_cache_node, n);
81819f0f
CL
3377 }
3378}
3379
52b4b950
DS
3380void __kmem_cache_release(struct kmem_cache *s)
3381{
210e7a43 3382 cache_random_seq_destroy(s);
52b4b950
DS
3383 free_percpu(s->cpu_slab);
3384 free_kmem_cache_nodes(s);
3385}
3386
55136592 3387static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
3388{
3389 int node;
81819f0f 3390
f64dc58c 3391 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3392 struct kmem_cache_node *n;
3393
73367bd8 3394 if (slab_state == DOWN) {
55136592 3395 early_kmem_cache_node_alloc(node);
73367bd8
AD
3396 continue;
3397 }
51df1142 3398 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 3399 GFP_KERNEL, node);
81819f0f 3400
73367bd8
AD
3401 if (!n) {
3402 free_kmem_cache_nodes(s);
3403 return 0;
81819f0f 3404 }
73367bd8 3405
4053497d 3406 init_kmem_cache_node(n);
ea37df54 3407 s->node[node] = n;
81819f0f
CL
3408 }
3409 return 1;
3410}
81819f0f 3411
c0bdb232 3412static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
3413{
3414 if (min < MIN_PARTIAL)
3415 min = MIN_PARTIAL;
3416 else if (min > MAX_PARTIAL)
3417 min = MAX_PARTIAL;
3418 s->min_partial = min;
3419}
3420
e6d0e1dc
WY
3421static void set_cpu_partial(struct kmem_cache *s)
3422{
3423#ifdef CONFIG_SLUB_CPU_PARTIAL
3424 /*
3425 * cpu_partial determined the maximum number of objects kept in the
3426 * per cpu partial lists of a processor.
3427 *
3428 * Per cpu partial lists mainly contain slabs that just have one
3429 * object freed. If they are used for allocation then they can be
3430 * filled up again with minimal effort. The slab will never hit the
3431 * per node partial lists and therefore no locking will be required.
3432 *
3433 * This setting also determines
3434 *
3435 * A) The number of objects from per cpu partial slabs dumped to the
3436 * per node list when we reach the limit.
3437 * B) The number of objects in cpu partial slabs to extract from the
3438 * per node list when we run out of per cpu objects. We only fetch
3439 * 50% to keep some capacity around for frees.
3440 */
3441 if (!kmem_cache_has_cpu_partial(s))
3442 s->cpu_partial = 0;
3443 else if (s->size >= PAGE_SIZE)
3444 s->cpu_partial = 2;
3445 else if (s->size >= 1024)
3446 s->cpu_partial = 6;
3447 else if (s->size >= 256)
3448 s->cpu_partial = 13;
3449 else
3450 s->cpu_partial = 30;
3451#endif
3452}
3453
81819f0f
CL
3454/*
3455 * calculate_sizes() determines the order and the distribution of data within
3456 * a slab object.
3457 */
06b285dc 3458static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f 3459{
d50112ed 3460 slab_flags_t flags = s->flags;
80a9201a 3461 size_t size = s->object_size;
834f3d11 3462 int order;
81819f0f 3463
d8b42bf5
CL
3464 /*
3465 * Round up object size to the next word boundary. We can only
3466 * place the free pointer at word boundaries and this determines
3467 * the possible location of the free pointer.
3468 */
3469 size = ALIGN(size, sizeof(void *));
3470
3471#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3472 /*
3473 * Determine if we can poison the object itself. If the user of
3474 * the slab may touch the object after free or before allocation
3475 * then we should never poison the object itself.
3476 */
5f0d5a3a 3477 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
c59def9f 3478 !s->ctor)
81819f0f
CL
3479 s->flags |= __OBJECT_POISON;
3480 else
3481 s->flags &= ~__OBJECT_POISON;
3482
81819f0f
CL
3483
3484 /*
672bba3a 3485 * If we are Redzoning then check if there is some space between the
81819f0f 3486 * end of the object and the free pointer. If not then add an
672bba3a 3487 * additional word to have some bytes to store Redzone information.
81819f0f 3488 */
3b0efdfa 3489 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 3490 size += sizeof(void *);
41ecc55b 3491#endif
81819f0f
CL
3492
3493 /*
672bba3a
CL
3494 * With that we have determined the number of bytes in actual use
3495 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
3496 */
3497 s->inuse = size;
3498
5f0d5a3a 3499 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
c59def9f 3500 s->ctor)) {
81819f0f
CL
3501 /*
3502 * Relocate free pointer after the object if it is not
3503 * permitted to overwrite the first word of the object on
3504 * kmem_cache_free.
3505 *
3506 * This is the case if we do RCU, have a constructor or
3507 * destructor or are poisoning the objects.
3508 */
3509 s->offset = size;
3510 size += sizeof(void *);
3511 }
3512
c12b3c62 3513#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3514 if (flags & SLAB_STORE_USER)
3515 /*
3516 * Need to store information about allocs and frees after
3517 * the object.
3518 */
3519 size += 2 * sizeof(struct track);
80a9201a 3520#endif
81819f0f 3521
80a9201a
AP
3522 kasan_cache_create(s, &size, &s->flags);
3523#ifdef CONFIG_SLUB_DEBUG
d86bd1be 3524 if (flags & SLAB_RED_ZONE) {
81819f0f
CL
3525 /*
3526 * Add some empty padding so that we can catch
3527 * overwrites from earlier objects rather than let
3528 * tracking information or the free pointer be
0211a9c8 3529 * corrupted if a user writes before the start
81819f0f
CL
3530 * of the object.
3531 */
3532 size += sizeof(void *);
d86bd1be
JK
3533
3534 s->red_left_pad = sizeof(void *);
3535 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3536 size += s->red_left_pad;
3537 }
41ecc55b 3538#endif
672bba3a 3539
81819f0f
CL
3540 /*
3541 * SLUB stores one object immediately after another beginning from
3542 * offset 0. In order to align the objects we have to simply size
3543 * each object to conform to the alignment.
3544 */
45906855 3545 size = ALIGN(size, s->align);
81819f0f 3546 s->size = size;
06b285dc
CL
3547 if (forced_order >= 0)
3548 order = forced_order;
3549 else
ab9a0f19 3550 order = calculate_order(size, s->reserved);
81819f0f 3551
834f3d11 3552 if (order < 0)
81819f0f
CL
3553 return 0;
3554
b7a49f0d 3555 s->allocflags = 0;
834f3d11 3556 if (order)
b7a49f0d
CL
3557 s->allocflags |= __GFP_COMP;
3558
3559 if (s->flags & SLAB_CACHE_DMA)
2c59dd65 3560 s->allocflags |= GFP_DMA;
b7a49f0d
CL
3561
3562 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3563 s->allocflags |= __GFP_RECLAIMABLE;
3564
81819f0f
CL
3565 /*
3566 * Determine the number of objects per slab
3567 */
ab9a0f19
LJ
3568 s->oo = oo_make(order, size, s->reserved);
3569 s->min = oo_make(get_order(size), size, s->reserved);
205ab99d
CL
3570 if (oo_objects(s->oo) > oo_objects(s->max))
3571 s->max = s->oo;
81819f0f 3572
834f3d11 3573 return !!oo_objects(s->oo);
81819f0f
CL
3574}
3575
d50112ed 3576static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
81819f0f 3577{
8a13a4cc 3578 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
ab9a0f19 3579 s->reserved = 0;
2482ddec
KC
3580#ifdef CONFIG_SLAB_FREELIST_HARDENED
3581 s->random = get_random_long();
3582#endif
81819f0f 3583
5f0d5a3a 3584 if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
da9a638c 3585 s->reserved = sizeof(struct rcu_head);
81819f0f 3586
06b285dc 3587 if (!calculate_sizes(s, -1))
81819f0f 3588 goto error;
3de47213
DR
3589 if (disable_higher_order_debug) {
3590 /*
3591 * Disable debugging flags that store metadata if the min slab
3592 * order increased.
3593 */
3b0efdfa 3594 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3595 s->flags &= ~DEBUG_METADATA_FLAGS;
3596 s->offset = 0;
3597 if (!calculate_sizes(s, -1))
3598 goto error;
3599 }
3600 }
81819f0f 3601
2565409f
HC
3602#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3603 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
149daaf3 3604 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
b789ef51
CL
3605 /* Enable fast mode */
3606 s->flags |= __CMPXCHG_DOUBLE;
3607#endif
3608
3b89d7d8
DR
3609 /*
3610 * The larger the object size is, the more pages we want on the partial
3611 * list to avoid pounding the page allocator excessively.
3612 */
49e22585
CL
3613 set_min_partial(s, ilog2(s->size) / 2);
3614
e6d0e1dc 3615 set_cpu_partial(s);
49e22585 3616
81819f0f 3617#ifdef CONFIG_NUMA
e2cb96b7 3618 s->remote_node_defrag_ratio = 1000;
81819f0f 3619#endif
210e7a43
TG
3620
3621 /* Initialize the pre-computed randomized freelist if slab is up */
3622 if (slab_state >= UP) {
3623 if (init_cache_random_seq(s))
3624 goto error;
3625 }
3626
55136592 3627 if (!init_kmem_cache_nodes(s))
dfb4f096 3628 goto error;
81819f0f 3629
55136592 3630 if (alloc_kmem_cache_cpus(s))
278b1bb1 3631 return 0;
ff12059e 3632
4c93c355 3633 free_kmem_cache_nodes(s);
81819f0f
CL
3634error:
3635 if (flags & SLAB_PANIC)
756a025f
JP
3636 panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3637 s->name, (unsigned long)s->size, s->size,
4fd0b46e 3638 oo_order(s->oo), s->offset, (unsigned long)flags);
278b1bb1 3639 return -EINVAL;
81819f0f 3640}
81819f0f 3641
33b12c38
CL
3642static void list_slab_objects(struct kmem_cache *s, struct page *page,
3643 const char *text)
3644{
3645#ifdef CONFIG_SLUB_DEBUG
3646 void *addr = page_address(page);
3647 void *p;
a5dd5c11
NK
3648 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3649 sizeof(long), GFP_ATOMIC);
bbd7d57b
ED
3650 if (!map)
3651 return;
945cf2b6 3652 slab_err(s, page, text, s->name);
33b12c38 3653 slab_lock(page);
33b12c38 3654
5f80b13a 3655 get_map(s, page, map);
33b12c38
CL
3656 for_each_object(p, s, addr, page->objects) {
3657
3658 if (!test_bit(slab_index(p, s, addr), map)) {
f9f58285 3659 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
33b12c38
CL
3660 print_tracking(s, p);
3661 }
3662 }
3663 slab_unlock(page);
bbd7d57b 3664 kfree(map);
33b12c38
CL
3665#endif
3666}
3667
81819f0f 3668/*
599870b1 3669 * Attempt to free all partial slabs on a node.
52b4b950
DS
3670 * This is called from __kmem_cache_shutdown(). We must take list_lock
3671 * because sysfs file might still access partial list after the shutdowning.
81819f0f 3672 */
599870b1 3673static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3674{
60398923 3675 LIST_HEAD(discard);
81819f0f
CL
3676 struct page *page, *h;
3677
52b4b950
DS
3678 BUG_ON(irqs_disabled());
3679 spin_lock_irq(&n->list_lock);
33b12c38 3680 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3681 if (!page->inuse) {
52b4b950 3682 remove_partial(n, page);
60398923 3683 list_add(&page->lru, &discard);
33b12c38
CL
3684 } else {
3685 list_slab_objects(s, page,
52b4b950 3686 "Objects remaining in %s on __kmem_cache_shutdown()");
599870b1 3687 }
33b12c38 3688 }
52b4b950 3689 spin_unlock_irq(&n->list_lock);
60398923
CW
3690
3691 list_for_each_entry_safe(page, h, &discard, lru)
3692 discard_slab(s, page);
81819f0f
CL
3693}
3694
3695/*
672bba3a 3696 * Release all resources used by a slab cache.
81819f0f 3697 */
52b4b950 3698int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f
CL
3699{
3700 int node;
fa45dc25 3701 struct kmem_cache_node *n;
81819f0f
CL
3702
3703 flush_all(s);
81819f0f 3704 /* Attempt to free all objects */
fa45dc25 3705 for_each_kmem_cache_node(s, node, n) {
599870b1
CL
3706 free_partial(s, n);
3707 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3708 return 1;
3709 }
bf5eb3de 3710 sysfs_slab_remove(s);
81819f0f
CL
3711 return 0;
3712}
3713
81819f0f
CL
3714/********************************************************************
3715 * Kmalloc subsystem
3716 *******************************************************************/
3717
81819f0f
CL
3718static int __init setup_slub_min_order(char *str)
3719{
06428780 3720 get_option(&str, &slub_min_order);
81819f0f
CL
3721
3722 return 1;
3723}
3724
3725__setup("slub_min_order=", setup_slub_min_order);
3726
3727static int __init setup_slub_max_order(char *str)
3728{
06428780 3729 get_option(&str, &slub_max_order);
818cf590 3730 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0f
CL
3731
3732 return 1;
3733}
3734
3735__setup("slub_max_order=", setup_slub_max_order);
3736
3737static int __init setup_slub_min_objects(char *str)
3738{
06428780 3739 get_option(&str, &slub_min_objects);
81819f0f
CL
3740
3741 return 1;
3742}
3743
3744__setup("slub_min_objects=", setup_slub_min_objects);
3745
81819f0f
CL
3746void *__kmalloc(size_t size, gfp_t flags)
3747{
aadb4bc4 3748 struct kmem_cache *s;
5b882be4 3749 void *ret;
81819f0f 3750
95a05b42 3751 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef 3752 return kmalloc_large(size, flags);
aadb4bc4 3753
2c59dd65 3754 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3755
3756 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3757 return s;
3758
2b847c3c 3759 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3760
ca2b84cb 3761 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4 3762
505f5dcb 3763 kasan_kmalloc(s, ret, size, flags);
0316bec2 3764
5b882be4 3765 return ret;
81819f0f
CL
3766}
3767EXPORT_SYMBOL(__kmalloc);
3768
5d1f57e4 3769#ifdef CONFIG_NUMA
f619cfe1
CL
3770static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3771{
b1eeab67 3772 struct page *page;
e4f7c0b4 3773 void *ptr = NULL;
f619cfe1 3774
75f296d9 3775 flags |= __GFP_COMP;
4949148a 3776 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3777 if (page)
e4f7c0b4
CM
3778 ptr = page_address(page);
3779
d56791b3 3780 kmalloc_large_node_hook(ptr, size, flags);
e4f7c0b4 3781 return ptr;
f619cfe1
CL
3782}
3783
81819f0f
CL
3784void *__kmalloc_node(size_t size, gfp_t flags, int node)
3785{
aadb4bc4 3786 struct kmem_cache *s;
5b882be4 3787 void *ret;
81819f0f 3788
95a05b42 3789 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5b882be4
EGM
3790 ret = kmalloc_large_node(size, flags, node);
3791
ca2b84cb
EGM
3792 trace_kmalloc_node(_RET_IP_, ret,
3793 size, PAGE_SIZE << get_order(size),
3794 flags, node);
5b882be4
EGM
3795
3796 return ret;
3797 }
aadb4bc4 3798
2c59dd65 3799 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3800
3801 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3802 return s;
3803
2b847c3c 3804 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3805
ca2b84cb 3806 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4 3807
505f5dcb 3808 kasan_kmalloc(s, ret, size, flags);
0316bec2 3809
5b882be4 3810 return ret;
81819f0f
CL
3811}
3812EXPORT_SYMBOL(__kmalloc_node);
3813#endif
3814
ed18adc1
KC
3815#ifdef CONFIG_HARDENED_USERCOPY
3816/*
afcc90f8
KC
3817 * Rejects incorrectly sized objects and objects that are to be copied
3818 * to/from userspace but do not fall entirely within the containing slab
3819 * cache's usercopy region.
ed18adc1
KC
3820 *
3821 * Returns NULL if check passes, otherwise const char * to name of cache
3822 * to indicate an error.
3823 */
f4e6e289
KC
3824void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3825 bool to_user)
ed18adc1
KC
3826{
3827 struct kmem_cache *s;
3828 unsigned long offset;
3829 size_t object_size;
3830
3831 /* Find object and usable object size. */
3832 s = page->slab_cache;
ed18adc1
KC
3833
3834 /* Reject impossible pointers. */
3835 if (ptr < page_address(page))
f4e6e289
KC
3836 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3837 to_user, 0, n);
ed18adc1
KC
3838
3839 /* Find offset within object. */
3840 offset = (ptr - page_address(page)) % s->size;
3841
3842 /* Adjust for redzone and reject if within the redzone. */
3843 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3844 if (offset < s->red_left_pad)
f4e6e289
KC
3845 usercopy_abort("SLUB object in left red zone",
3846 s->name, to_user, offset, n);
ed18adc1
KC
3847 offset -= s->red_left_pad;
3848 }
3849
afcc90f8
KC
3850 /* Allow address range falling entirely within usercopy region. */
3851 if (offset >= s->useroffset &&
3852 offset - s->useroffset <= s->usersize &&
3853 n <= s->useroffset - offset + s->usersize)
f4e6e289 3854 return;
ed18adc1 3855
afcc90f8
KC
3856 /*
3857 * If the copy is still within the allocated object, produce
3858 * a warning instead of rejecting the copy. This is intended
3859 * to be a temporary method to find any missing usercopy
3860 * whitelists.
3861 */
3862 object_size = slab_ksize(s);
2d891fbc
KC
3863 if (usercopy_fallback &&
3864 offset <= object_size && n <= object_size - offset) {
afcc90f8
KC
3865 usercopy_warn("SLUB object", s->name, to_user, offset, n);
3866 return;
3867 }
ed18adc1 3868
f4e6e289 3869 usercopy_abort("SLUB object", s->name, to_user, offset, n);
ed18adc1
KC
3870}
3871#endif /* CONFIG_HARDENED_USERCOPY */
3872
0316bec2 3873static size_t __ksize(const void *object)
81819f0f 3874{
272c1d21 3875 struct page *page;
81819f0f 3876
ef8b4520 3877 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3878 return 0;
3879
294a80a8 3880 page = virt_to_head_page(object);
294a80a8 3881
76994412
PE
3882 if (unlikely(!PageSlab(page))) {
3883 WARN_ON(!PageCompound(page));
294a80a8 3884 return PAGE_SIZE << compound_order(page);
76994412 3885 }
81819f0f 3886
1b4f59e3 3887 return slab_ksize(page->slab_cache);
81819f0f 3888}
0316bec2
AR
3889
3890size_t ksize(const void *object)
3891{
3892 size_t size = __ksize(object);
3893 /* We assume that ksize callers could use whole allocated area,
4ebb31a4
AP
3894 * so we need to unpoison this area.
3895 */
3896 kasan_unpoison_shadow(object, size);
0316bec2
AR
3897 return size;
3898}
b1aabecd 3899EXPORT_SYMBOL(ksize);
81819f0f
CL
3900
3901void kfree(const void *x)
3902{
81819f0f 3903 struct page *page;
5bb983b0 3904 void *object = (void *)x;
81819f0f 3905
2121db74
PE
3906 trace_kfree(_RET_IP_, x);
3907
2408c550 3908 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3909 return;
3910
b49af68f 3911 page = virt_to_head_page(x);
aadb4bc4 3912 if (unlikely(!PageSlab(page))) {
0937502a 3913 BUG_ON(!PageCompound(page));
47adccce 3914 kfree_hook(object);
4949148a 3915 __free_pages(page, compound_order(page));
aadb4bc4
CL
3916 return;
3917 }
81084651 3918 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
81819f0f
CL
3919}
3920EXPORT_SYMBOL(kfree);
3921
832f37f5
VD
3922#define SHRINK_PROMOTE_MAX 32
3923
2086d26a 3924/*
832f37f5
VD
3925 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3926 * up most to the head of the partial lists. New allocations will then
3927 * fill those up and thus they can be removed from the partial lists.
672bba3a
CL
3928 *
3929 * The slabs with the least items are placed last. This results in them
3930 * being allocated from last increasing the chance that the last objects
3931 * are freed in them.
2086d26a 3932 */
c9fc5864 3933int __kmem_cache_shrink(struct kmem_cache *s)
2086d26a
CL
3934{
3935 int node;
3936 int i;
3937 struct kmem_cache_node *n;
3938 struct page *page;
3939 struct page *t;
832f37f5
VD
3940 struct list_head discard;
3941 struct list_head promote[SHRINK_PROMOTE_MAX];
2086d26a 3942 unsigned long flags;
ce3712d7 3943 int ret = 0;
2086d26a 3944
2086d26a 3945 flush_all(s);
fa45dc25 3946 for_each_kmem_cache_node(s, node, n) {
832f37f5
VD
3947 INIT_LIST_HEAD(&discard);
3948 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3949 INIT_LIST_HEAD(promote + i);
2086d26a
CL
3950
3951 spin_lock_irqsave(&n->list_lock, flags);
3952
3953 /*
832f37f5 3954 * Build lists of slabs to discard or promote.
2086d26a 3955 *
672bba3a
CL
3956 * Note that concurrent frees may occur while we hold the
3957 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3958 */
3959 list_for_each_entry_safe(page, t, &n->partial, lru) {
832f37f5
VD
3960 int free = page->objects - page->inuse;
3961
3962 /* Do not reread page->inuse */
3963 barrier();
3964
3965 /* We do not keep full slabs on the list */
3966 BUG_ON(free <= 0);
3967
3968 if (free == page->objects) {
3969 list_move(&page->lru, &discard);
69cb8e6b 3970 n->nr_partial--;
832f37f5
VD
3971 } else if (free <= SHRINK_PROMOTE_MAX)
3972 list_move(&page->lru, promote + free - 1);
2086d26a
CL
3973 }
3974
2086d26a 3975 /*
832f37f5
VD
3976 * Promote the slabs filled up most to the head of the
3977 * partial list.
2086d26a 3978 */
832f37f5
VD
3979 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3980 list_splice(promote + i, &n->partial);
2086d26a 3981
2086d26a 3982 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
3983
3984 /* Release empty slabs */
832f37f5 3985 list_for_each_entry_safe(page, t, &discard, lru)
69cb8e6b 3986 discard_slab(s, page);
ce3712d7
VD
3987
3988 if (slabs_node(s, node))
3989 ret = 1;
2086d26a
CL
3990 }
3991
ce3712d7 3992 return ret;
2086d26a 3993}
2086d26a 3994
c9fc5864 3995#ifdef CONFIG_MEMCG
01fb58bc
TH
3996static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
3997{
50862ce7
TH
3998 /*
3999 * Called with all the locks held after a sched RCU grace period.
4000 * Even if @s becomes empty after shrinking, we can't know that @s
4001 * doesn't have allocations already in-flight and thus can't
4002 * destroy @s until the associated memcg is released.
4003 *
4004 * However, let's remove the sysfs files for empty caches here.
4005 * Each cache has a lot of interface files which aren't
4006 * particularly useful for empty draining caches; otherwise, we can
4007 * easily end up with millions of unnecessary sysfs files on
4008 * systems which have a lot of memory and transient cgroups.
4009 */
4010 if (!__kmem_cache_shrink(s))
4011 sysfs_slab_remove(s);
01fb58bc
TH
4012}
4013
c9fc5864
TH
4014void __kmemcg_cache_deactivate(struct kmem_cache *s)
4015{
4016 /*
4017 * Disable empty slabs caching. Used to avoid pinning offline
4018 * memory cgroups by kmem pages that can be freed.
4019 */
e6d0e1dc 4020 slub_set_cpu_partial(s, 0);
c9fc5864
TH
4021 s->min_partial = 0;
4022
4023 /*
4024 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
01fb58bc 4025 * we have to make sure the change is visible before shrinking.
c9fc5864 4026 */
01fb58bc 4027 slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
c9fc5864
TH
4028}
4029#endif
4030
b9049e23
YG
4031static int slab_mem_going_offline_callback(void *arg)
4032{
4033 struct kmem_cache *s;
4034
18004c5d 4035 mutex_lock(&slab_mutex);
b9049e23 4036 list_for_each_entry(s, &slab_caches, list)
c9fc5864 4037 __kmem_cache_shrink(s);
18004c5d 4038 mutex_unlock(&slab_mutex);
b9049e23
YG
4039
4040 return 0;
4041}
4042
4043static void slab_mem_offline_callback(void *arg)
4044{
4045 struct kmem_cache_node *n;
4046 struct kmem_cache *s;
4047 struct memory_notify *marg = arg;
4048 int offline_node;
4049
b9d5ab25 4050 offline_node = marg->status_change_nid_normal;
b9049e23
YG
4051
4052 /*
4053 * If the node still has available memory. we need kmem_cache_node
4054 * for it yet.
4055 */
4056 if (offline_node < 0)
4057 return;
4058
18004c5d 4059 mutex_lock(&slab_mutex);
b9049e23
YG
4060 list_for_each_entry(s, &slab_caches, list) {
4061 n = get_node(s, offline_node);
4062 if (n) {
4063 /*
4064 * if n->nr_slabs > 0, slabs still exist on the node
4065 * that is going down. We were unable to free them,
c9404c9c 4066 * and offline_pages() function shouldn't call this
b9049e23
YG
4067 * callback. So, we must fail.
4068 */
0f389ec6 4069 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
4070
4071 s->node[offline_node] = NULL;
8de66a0c 4072 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
4073 }
4074 }
18004c5d 4075 mutex_unlock(&slab_mutex);
b9049e23
YG
4076}
4077
4078static int slab_mem_going_online_callback(void *arg)
4079{
4080 struct kmem_cache_node *n;
4081 struct kmem_cache *s;
4082 struct memory_notify *marg = arg;
b9d5ab25 4083 int nid = marg->status_change_nid_normal;
b9049e23
YG
4084 int ret = 0;
4085
4086 /*
4087 * If the node's memory is already available, then kmem_cache_node is
4088 * already created. Nothing to do.
4089 */
4090 if (nid < 0)
4091 return 0;
4092
4093 /*
0121c619 4094 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
4095 * allocate a kmem_cache_node structure in order to bring the node
4096 * online.
4097 */
18004c5d 4098 mutex_lock(&slab_mutex);
b9049e23
YG
4099 list_for_each_entry(s, &slab_caches, list) {
4100 /*
4101 * XXX: kmem_cache_alloc_node will fallback to other nodes
4102 * since memory is not yet available from the node that
4103 * is brought up.
4104 */
8de66a0c 4105 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
4106 if (!n) {
4107 ret = -ENOMEM;
4108 goto out;
4109 }
4053497d 4110 init_kmem_cache_node(n);
b9049e23
YG
4111 s->node[nid] = n;
4112 }
4113out:
18004c5d 4114 mutex_unlock(&slab_mutex);
b9049e23
YG
4115 return ret;
4116}
4117
4118static int slab_memory_callback(struct notifier_block *self,
4119 unsigned long action, void *arg)
4120{
4121 int ret = 0;
4122
4123 switch (action) {
4124 case MEM_GOING_ONLINE:
4125 ret = slab_mem_going_online_callback(arg);
4126 break;
4127 case MEM_GOING_OFFLINE:
4128 ret = slab_mem_going_offline_callback(arg);
4129 break;
4130 case MEM_OFFLINE:
4131 case MEM_CANCEL_ONLINE:
4132 slab_mem_offline_callback(arg);
4133 break;
4134 case MEM_ONLINE:
4135 case MEM_CANCEL_OFFLINE:
4136 break;
4137 }
dc19f9db
KH
4138 if (ret)
4139 ret = notifier_from_errno(ret);
4140 else
4141 ret = NOTIFY_OK;
b9049e23
YG
4142 return ret;
4143}
4144
3ac38faa
AM
4145static struct notifier_block slab_memory_callback_nb = {
4146 .notifier_call = slab_memory_callback,
4147 .priority = SLAB_CALLBACK_PRI,
4148};
b9049e23 4149
81819f0f
CL
4150/********************************************************************
4151 * Basic setup of slabs
4152 *******************************************************************/
4153
51df1142
CL
4154/*
4155 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
4156 * the page allocator. Allocate them properly then fix up the pointers
4157 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
4158 */
4159
dffb4d60 4160static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
4161{
4162 int node;
dffb4d60 4163 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
fa45dc25 4164 struct kmem_cache_node *n;
51df1142 4165
dffb4d60 4166 memcpy(s, static_cache, kmem_cache->object_size);
51df1142 4167
7d557b3c
GC
4168 /*
4169 * This runs very early, and only the boot processor is supposed to be
4170 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4171 * IPIs around.
4172 */
4173 __flush_cpu_slab(s, smp_processor_id());
fa45dc25 4174 for_each_kmem_cache_node(s, node, n) {
51df1142
CL
4175 struct page *p;
4176
fa45dc25
CL
4177 list_for_each_entry(p, &n->partial, lru)
4178 p->slab_cache = s;
51df1142 4179
607bf324 4180#ifdef CONFIG_SLUB_DEBUG
fa45dc25
CL
4181 list_for_each_entry(p, &n->full, lru)
4182 p->slab_cache = s;
51df1142 4183#endif
51df1142 4184 }
f7ce3190 4185 slab_init_memcg_params(s);
dffb4d60 4186 list_add(&s->list, &slab_caches);
510ded33 4187 memcg_link_cache(s);
dffb4d60 4188 return s;
51df1142
CL
4189}
4190
81819f0f
CL
4191void __init kmem_cache_init(void)
4192{
dffb4d60
CL
4193 static __initdata struct kmem_cache boot_kmem_cache,
4194 boot_kmem_cache_node;
51df1142 4195
fc8d8620
SG
4196 if (debug_guardpage_minorder())
4197 slub_max_order = 0;
4198
dffb4d60
CL
4199 kmem_cache_node = &boot_kmem_cache_node;
4200 kmem_cache = &boot_kmem_cache;
51df1142 4201
dffb4d60 4202 create_boot_cache(kmem_cache_node, "kmem_cache_node",
8eb8284b 4203 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
b9049e23 4204
3ac38faa 4205 register_hotmemory_notifier(&slab_memory_callback_nb);
81819f0f
CL
4206
4207 /* Able to allocate the per node structures */
4208 slab_state = PARTIAL;
4209
dffb4d60
CL
4210 create_boot_cache(kmem_cache, "kmem_cache",
4211 offsetof(struct kmem_cache, node) +
4212 nr_node_ids * sizeof(struct kmem_cache_node *),
8eb8284b 4213 SLAB_HWCACHE_ALIGN, 0, 0);
8a13a4cc 4214
dffb4d60 4215 kmem_cache = bootstrap(&boot_kmem_cache);
81819f0f 4216
51df1142
CL
4217 /*
4218 * Allocate kmem_cache_node properly from the kmem_cache slab.
4219 * kmem_cache_node is separately allocated so no need to
4220 * update any list pointers.
4221 */
dffb4d60 4222 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
4223
4224 /* Now we can use the kmem_cache to allocate kmalloc slabs */
34cc6990 4225 setup_kmalloc_cache_index_table();
f97d5f63 4226 create_kmalloc_caches(0);
81819f0f 4227
210e7a43
TG
4228 /* Setup random freelists for each cache */
4229 init_freelist_randomization();
4230
a96a87bf
SAS
4231 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4232 slub_cpu_dead);
81819f0f 4233
9b130ad5 4234 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
f97d5f63 4235 cache_line_size(),
81819f0f
CL
4236 slub_min_order, slub_max_order, slub_min_objects,
4237 nr_cpu_ids, nr_node_ids);
4238}
4239
7e85ee0c
PE
4240void __init kmem_cache_init_late(void)
4241{
7e85ee0c
PE
4242}
4243
2633d7a0 4244struct kmem_cache *
f4957d5b 4245__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112ed 4246 slab_flags_t flags, void (*ctor)(void *))
81819f0f 4247{
426589f5 4248 struct kmem_cache *s, *c;
81819f0f 4249
a44cb944 4250 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
4251 if (s) {
4252 s->refcount++;
84d0ddd6 4253
81819f0f
CL
4254 /*
4255 * Adjust the object sizes so that we clear
4256 * the complete object on kzalloc.
4257 */
3b0efdfa 4258 s->object_size = max(s->object_size, (int)size);
81819f0f 4259 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 4260
426589f5 4261 for_each_memcg_cache(c, s) {
84d0ddd6
VD
4262 c->object_size = s->object_size;
4263 c->inuse = max_t(int, c->inuse,
4264 ALIGN(size, sizeof(void *)));
4265 }
4266
7b8f3b66 4267 if (sysfs_slab_alias(s, name)) {
7b8f3b66 4268 s->refcount--;
cbb79694 4269 s = NULL;
7b8f3b66 4270 }
a0e1d1be 4271 }
6446faa2 4272
cbb79694
CL
4273 return s;
4274}
84c1cf62 4275
d50112ed 4276int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
cbb79694 4277{
aac3a166
PE
4278 int err;
4279
4280 err = kmem_cache_open(s, flags);
4281 if (err)
4282 return err;
20cea968 4283
45530c44
CL
4284 /* Mutex is not taken during early boot */
4285 if (slab_state <= UP)
4286 return 0;
4287
107dab5c 4288 memcg_propagate_slab_attrs(s);
aac3a166 4289 err = sysfs_slab_add(s);
aac3a166 4290 if (err)
52b4b950 4291 __kmem_cache_release(s);
20cea968 4292
aac3a166 4293 return err;
81819f0f 4294}
81819f0f 4295
ce71e27c 4296void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 4297{
aadb4bc4 4298 struct kmem_cache *s;
94b528d0 4299 void *ret;
aadb4bc4 4300
95a05b42 4301 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef
PE
4302 return kmalloc_large(size, gfpflags);
4303
2c59dd65 4304 s = kmalloc_slab(size, gfpflags);
81819f0f 4305
2408c550 4306 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4307 return s;
81819f0f 4308
2b847c3c 4309 ret = slab_alloc(s, gfpflags, caller);
94b528d0 4310
25985edc 4311 /* Honor the call site pointer we received. */
ca2b84cb 4312 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
4313
4314 return ret;
81819f0f
CL
4315}
4316
5d1f57e4 4317#ifdef CONFIG_NUMA
81819f0f 4318void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 4319 int node, unsigned long caller)
81819f0f 4320{
aadb4bc4 4321 struct kmem_cache *s;
94b528d0 4322 void *ret;
aadb4bc4 4323
95a05b42 4324 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
d3e14aa3
XF
4325 ret = kmalloc_large_node(size, gfpflags, node);
4326
4327 trace_kmalloc_node(caller, ret,
4328 size, PAGE_SIZE << get_order(size),
4329 gfpflags, node);
4330
4331 return ret;
4332 }
eada35ef 4333
2c59dd65 4334 s = kmalloc_slab(size, gfpflags);
81819f0f 4335
2408c550 4336 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4337 return s;
81819f0f 4338
2b847c3c 4339 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 4340
25985edc 4341 /* Honor the call site pointer we received. */
ca2b84cb 4342 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
4343
4344 return ret;
81819f0f 4345}
5d1f57e4 4346#endif
81819f0f 4347
ab4d5ed5 4348#ifdef CONFIG_SYSFS
205ab99d
CL
4349static int count_inuse(struct page *page)
4350{
4351 return page->inuse;
4352}
4353
4354static int count_total(struct page *page)
4355{
4356 return page->objects;
4357}
ab4d5ed5 4358#endif
205ab99d 4359
ab4d5ed5 4360#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
4361static int validate_slab(struct kmem_cache *s, struct page *page,
4362 unsigned long *map)
53e15af0
CL
4363{
4364 void *p;
a973e9dd 4365 void *addr = page_address(page);
53e15af0
CL
4366
4367 if (!check_slab(s, page) ||
4368 !on_freelist(s, page, NULL))
4369 return 0;
4370
4371 /* Now we know that a valid freelist exists */
39b26464 4372 bitmap_zero(map, page->objects);
53e15af0 4373
5f80b13a
CL
4374 get_map(s, page, map);
4375 for_each_object(p, s, addr, page->objects) {
4376 if (test_bit(slab_index(p, s, addr), map))
4377 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4378 return 0;
53e15af0
CL
4379 }
4380
224a88be 4381 for_each_object(p, s, addr, page->objects)
7656c72b 4382 if (!test_bit(slab_index(p, s, addr), map))
37d57443 4383 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
4384 return 0;
4385 return 1;
4386}
4387
434e245d
CL
4388static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4389 unsigned long *map)
53e15af0 4390{
881db7fb
CL
4391 slab_lock(page);
4392 validate_slab(s, page, map);
4393 slab_unlock(page);
53e15af0
CL
4394}
4395
434e245d
CL
4396static int validate_slab_node(struct kmem_cache *s,
4397 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
4398{
4399 unsigned long count = 0;
4400 struct page *page;
4401 unsigned long flags;
4402
4403 spin_lock_irqsave(&n->list_lock, flags);
4404
4405 list_for_each_entry(page, &n->partial, lru) {
434e245d 4406 validate_slab_slab(s, page, map);
53e15af0
CL
4407 count++;
4408 }
4409 if (count != n->nr_partial)
f9f58285
FF
4410 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4411 s->name, count, n->nr_partial);
53e15af0
CL
4412
4413 if (!(s->flags & SLAB_STORE_USER))
4414 goto out;
4415
4416 list_for_each_entry(page, &n->full, lru) {
434e245d 4417 validate_slab_slab(s, page, map);
53e15af0
CL
4418 count++;
4419 }
4420 if (count != atomic_long_read(&n->nr_slabs))
f9f58285
FF
4421 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4422 s->name, count, atomic_long_read(&n->nr_slabs));
53e15af0
CL
4423
4424out:
4425 spin_unlock_irqrestore(&n->list_lock, flags);
4426 return count;
4427}
4428
434e245d 4429static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
4430{
4431 int node;
4432 unsigned long count = 0;
205ab99d 4433 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d 4434 sizeof(unsigned long), GFP_KERNEL);
fa45dc25 4435 struct kmem_cache_node *n;
434e245d
CL
4436
4437 if (!map)
4438 return -ENOMEM;
53e15af0
CL
4439
4440 flush_all(s);
fa45dc25 4441 for_each_kmem_cache_node(s, node, n)
434e245d 4442 count += validate_slab_node(s, n, map);
434e245d 4443 kfree(map);
53e15af0
CL
4444 return count;
4445}
88a420e4 4446/*
672bba3a 4447 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
4448 * and freed.
4449 */
4450
4451struct location {
4452 unsigned long count;
ce71e27c 4453 unsigned long addr;
45edfa58
CL
4454 long long sum_time;
4455 long min_time;
4456 long max_time;
4457 long min_pid;
4458 long max_pid;
174596a0 4459 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4460 nodemask_t nodes;
88a420e4
CL
4461};
4462
4463struct loc_track {
4464 unsigned long max;
4465 unsigned long count;
4466 struct location *loc;
4467};
4468
4469static void free_loc_track(struct loc_track *t)
4470{
4471 if (t->max)
4472 free_pages((unsigned long)t->loc,
4473 get_order(sizeof(struct location) * t->max));
4474}
4475
68dff6a9 4476static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4477{
4478 struct location *l;
4479 int order;
4480
88a420e4
CL
4481 order = get_order(sizeof(struct location) * max);
4482
68dff6a9 4483 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4484 if (!l)
4485 return 0;
4486
4487 if (t->count) {
4488 memcpy(l, t->loc, sizeof(struct location) * t->count);
4489 free_loc_track(t);
4490 }
4491 t->max = max;
4492 t->loc = l;
4493 return 1;
4494}
4495
4496static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4497 const struct track *track)
88a420e4
CL
4498{
4499 long start, end, pos;
4500 struct location *l;
ce71e27c 4501 unsigned long caddr;
45edfa58 4502 unsigned long age = jiffies - track->when;
88a420e4
CL
4503
4504 start = -1;
4505 end = t->count;
4506
4507 for ( ; ; ) {
4508 pos = start + (end - start + 1) / 2;
4509
4510 /*
4511 * There is nothing at "end". If we end up there
4512 * we need to add something to before end.
4513 */
4514 if (pos == end)
4515 break;
4516
4517 caddr = t->loc[pos].addr;
45edfa58
CL
4518 if (track->addr == caddr) {
4519
4520 l = &t->loc[pos];
4521 l->count++;
4522 if (track->when) {
4523 l->sum_time += age;
4524 if (age < l->min_time)
4525 l->min_time = age;
4526 if (age > l->max_time)
4527 l->max_time = age;
4528
4529 if (track->pid < l->min_pid)
4530 l->min_pid = track->pid;
4531 if (track->pid > l->max_pid)
4532 l->max_pid = track->pid;
4533
174596a0
RR
4534 cpumask_set_cpu(track->cpu,
4535 to_cpumask(l->cpus));
45edfa58
CL
4536 }
4537 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4538 return 1;
4539 }
4540
45edfa58 4541 if (track->addr < caddr)
88a420e4
CL
4542 end = pos;
4543 else
4544 start = pos;
4545 }
4546
4547 /*
672bba3a 4548 * Not found. Insert new tracking element.
88a420e4 4549 */
68dff6a9 4550 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4551 return 0;
4552
4553 l = t->loc + pos;
4554 if (pos < t->count)
4555 memmove(l + 1, l,
4556 (t->count - pos) * sizeof(struct location));
4557 t->count++;
4558 l->count = 1;
45edfa58
CL
4559 l->addr = track->addr;
4560 l->sum_time = age;
4561 l->min_time = age;
4562 l->max_time = age;
4563 l->min_pid = track->pid;
4564 l->max_pid = track->pid;
174596a0
RR
4565 cpumask_clear(to_cpumask(l->cpus));
4566 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4567 nodes_clear(l->nodes);
4568 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4569 return 1;
4570}
4571
4572static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4573 struct page *page, enum track_item alloc,
a5dd5c11 4574 unsigned long *map)
88a420e4 4575{
a973e9dd 4576 void *addr = page_address(page);
88a420e4
CL
4577 void *p;
4578
39b26464 4579 bitmap_zero(map, page->objects);
5f80b13a 4580 get_map(s, page, map);
88a420e4 4581
224a88be 4582 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4583 if (!test_bit(slab_index(p, s, addr), map))
4584 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4585}
4586
4587static int list_locations(struct kmem_cache *s, char *buf,
4588 enum track_item alloc)
4589{
e374d483 4590 int len = 0;
88a420e4 4591 unsigned long i;
68dff6a9 4592 struct loc_track t = { 0, 0, NULL };
88a420e4 4593 int node;
bbd7d57b
ED
4594 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4595 sizeof(unsigned long), GFP_KERNEL);
fa45dc25 4596 struct kmem_cache_node *n;
88a420e4 4597
bbd7d57b 4598 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
0ee931c4 4599 GFP_KERNEL)) {
bbd7d57b 4600 kfree(map);
68dff6a9 4601 return sprintf(buf, "Out of memory\n");
bbd7d57b 4602 }
88a420e4
CL
4603 /* Push back cpu slabs */
4604 flush_all(s);
4605
fa45dc25 4606 for_each_kmem_cache_node(s, node, n) {
88a420e4
CL
4607 unsigned long flags;
4608 struct page *page;
4609
9e86943b 4610 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4611 continue;
4612
4613 spin_lock_irqsave(&n->list_lock, flags);
4614 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4615 process_slab(&t, s, page, alloc, map);
88a420e4 4616 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4617 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4618 spin_unlock_irqrestore(&n->list_lock, flags);
4619 }
4620
4621 for (i = 0; i < t.count; i++) {
45edfa58 4622 struct location *l = &t.loc[i];
88a420e4 4623
9c246247 4624 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4625 break;
e374d483 4626 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4627
4628 if (l->addr)
62c70bce 4629 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4630 else
e374d483 4631 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4632
4633 if (l->sum_time != l->min_time) {
e374d483 4634 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4635 l->min_time,
4636 (long)div_u64(l->sum_time, l->count),
4637 l->max_time);
45edfa58 4638 } else
e374d483 4639 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4640 l->min_time);
4641
4642 if (l->min_pid != l->max_pid)
e374d483 4643 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4644 l->min_pid, l->max_pid);
4645 else
e374d483 4646 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4647 l->min_pid);
4648
174596a0
RR
4649 if (num_online_cpus() > 1 &&
4650 !cpumask_empty(to_cpumask(l->cpus)) &&
5024c1d7
TH
4651 len < PAGE_SIZE - 60)
4652 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4653 " cpus=%*pbl",
4654 cpumask_pr_args(to_cpumask(l->cpus)));
45edfa58 4655
62bc62a8 4656 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
5024c1d7
TH
4657 len < PAGE_SIZE - 60)
4658 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4659 " nodes=%*pbl",
4660 nodemask_pr_args(&l->nodes));
45edfa58 4661
e374d483 4662 len += sprintf(buf + len, "\n");
88a420e4
CL
4663 }
4664
4665 free_loc_track(&t);
bbd7d57b 4666 kfree(map);
88a420e4 4667 if (!t.count)
e374d483
HH
4668 len += sprintf(buf, "No data\n");
4669 return len;
88a420e4 4670}
ab4d5ed5 4671#endif
88a420e4 4672
a5a84755 4673#ifdef SLUB_RESILIENCY_TEST
c07b8183 4674static void __init resiliency_test(void)
a5a84755
CL
4675{
4676 u8 *p;
4677
95a05b42 4678 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
a5a84755 4679
f9f58285
FF
4680 pr_err("SLUB resiliency testing\n");
4681 pr_err("-----------------------\n");
4682 pr_err("A. Corruption after allocation\n");
a5a84755
CL
4683
4684 p = kzalloc(16, GFP_KERNEL);
4685 p[16] = 0x12;
f9f58285
FF
4686 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4687 p + 16);
a5a84755
CL
4688
4689 validate_slab_cache(kmalloc_caches[4]);
4690
4691 /* Hmmm... The next two are dangerous */
4692 p = kzalloc(32, GFP_KERNEL);
4693 p[32 + sizeof(void *)] = 0x34;
f9f58285
FF
4694 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4695 p);
4696 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755
CL
4697
4698 validate_slab_cache(kmalloc_caches[5]);
4699 p = kzalloc(64, GFP_KERNEL);
4700 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4701 *p = 0x56;
f9f58285
FF
4702 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4703 p);
4704 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755
CL
4705 validate_slab_cache(kmalloc_caches[6]);
4706
f9f58285 4707 pr_err("\nB. Corruption after free\n");
a5a84755
CL
4708 p = kzalloc(128, GFP_KERNEL);
4709 kfree(p);
4710 *p = 0x78;
f9f58285 4711 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
a5a84755
CL
4712 validate_slab_cache(kmalloc_caches[7]);
4713
4714 p = kzalloc(256, GFP_KERNEL);
4715 kfree(p);
4716 p[50] = 0x9a;
f9f58285 4717 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
a5a84755
CL
4718 validate_slab_cache(kmalloc_caches[8]);
4719
4720 p = kzalloc(512, GFP_KERNEL);
4721 kfree(p);
4722 p[512] = 0xab;
f9f58285 4723 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
a5a84755
CL
4724 validate_slab_cache(kmalloc_caches[9]);
4725}
4726#else
4727#ifdef CONFIG_SYSFS
4728static void resiliency_test(void) {};
4729#endif
4730#endif
4731
ab4d5ed5 4732#ifdef CONFIG_SYSFS
81819f0f 4733enum slab_stat_type {
205ab99d
CL
4734 SL_ALL, /* All slabs */
4735 SL_PARTIAL, /* Only partially allocated slabs */
4736 SL_CPU, /* Only slabs used for cpu caches */
4737 SL_OBJECTS, /* Determine allocated objects not slabs */
4738 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4739};
4740
205ab99d 4741#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4742#define SO_PARTIAL (1 << SL_PARTIAL)
4743#define SO_CPU (1 << SL_CPU)
4744#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4745#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4746
1663f26d
TH
4747#ifdef CONFIG_MEMCG
4748static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4749
4750static int __init setup_slub_memcg_sysfs(char *str)
4751{
4752 int v;
4753
4754 if (get_option(&str, &v) > 0)
4755 memcg_sysfs_enabled = v;
4756
4757 return 1;
4758}
4759
4760__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4761#endif
4762
62e5c4b4
CG
4763static ssize_t show_slab_objects(struct kmem_cache *s,
4764 char *buf, unsigned long flags)
81819f0f
CL
4765{
4766 unsigned long total = 0;
81819f0f
CL
4767 int node;
4768 int x;
4769 unsigned long *nodes;
81819f0f 4770
e35e1a97 4771 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
4772 if (!nodes)
4773 return -ENOMEM;
81819f0f 4774
205ab99d
CL
4775 if (flags & SO_CPU) {
4776 int cpu;
81819f0f 4777
205ab99d 4778 for_each_possible_cpu(cpu) {
d0e0ac97
CG
4779 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4780 cpu);
ec3ab083 4781 int node;
49e22585 4782 struct page *page;
dfb4f096 4783
4db0c3c2 4784 page = READ_ONCE(c->page);
ec3ab083
CL
4785 if (!page)
4786 continue;
205ab99d 4787
ec3ab083
CL
4788 node = page_to_nid(page);
4789 if (flags & SO_TOTAL)
4790 x = page->objects;
4791 else if (flags & SO_OBJECTS)
4792 x = page->inuse;
4793 else
4794 x = 1;
49e22585 4795
ec3ab083
CL
4796 total += x;
4797 nodes[node] += x;
4798
a93cf07b 4799 page = slub_percpu_partial_read_once(c);
49e22585 4800 if (page) {
8afb1474
LZ
4801 node = page_to_nid(page);
4802 if (flags & SO_TOTAL)
4803 WARN_ON_ONCE(1);
4804 else if (flags & SO_OBJECTS)
4805 WARN_ON_ONCE(1);
4806 else
4807 x = page->pages;
bc6697d8
ED
4808 total += x;
4809 nodes[node] += x;
49e22585 4810 }
81819f0f
CL
4811 }
4812 }
4813
bfc8c901 4814 get_online_mems();
ab4d5ed5 4815#ifdef CONFIG_SLUB_DEBUG
205ab99d 4816 if (flags & SO_ALL) {
fa45dc25
CL
4817 struct kmem_cache_node *n;
4818
4819 for_each_kmem_cache_node(s, node, n) {
205ab99d 4820
d0e0ac97
CG
4821 if (flags & SO_TOTAL)
4822 x = atomic_long_read(&n->total_objects);
4823 else if (flags & SO_OBJECTS)
4824 x = atomic_long_read(&n->total_objects) -
4825 count_partial(n, count_free);
81819f0f 4826 else
205ab99d 4827 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4828 total += x;
4829 nodes[node] += x;
4830 }
4831
ab4d5ed5
CL
4832 } else
4833#endif
4834 if (flags & SO_PARTIAL) {
fa45dc25 4835 struct kmem_cache_node *n;
81819f0f 4836
fa45dc25 4837 for_each_kmem_cache_node(s, node, n) {
205ab99d
CL
4838 if (flags & SO_TOTAL)
4839 x = count_partial(n, count_total);
4840 else if (flags & SO_OBJECTS)
4841 x = count_partial(n, count_inuse);
81819f0f 4842 else
205ab99d 4843 x = n->nr_partial;
81819f0f
CL
4844 total += x;
4845 nodes[node] += x;
4846 }
4847 }
81819f0f
CL
4848 x = sprintf(buf, "%lu", total);
4849#ifdef CONFIG_NUMA
fa45dc25 4850 for (node = 0; node < nr_node_ids; node++)
81819f0f
CL
4851 if (nodes[node])
4852 x += sprintf(buf + x, " N%d=%lu",
4853 node, nodes[node]);
4854#endif
bfc8c901 4855 put_online_mems();
81819f0f
CL
4856 kfree(nodes);
4857 return x + sprintf(buf + x, "\n");
4858}
4859
ab4d5ed5 4860#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4861static int any_slab_objects(struct kmem_cache *s)
4862{
4863 int node;
fa45dc25 4864 struct kmem_cache_node *n;
81819f0f 4865
fa45dc25 4866 for_each_kmem_cache_node(s, node, n)
4ea33e2d 4867 if (atomic_long_read(&n->total_objects))
81819f0f 4868 return 1;
fa45dc25 4869
81819f0f
CL
4870 return 0;
4871}
ab4d5ed5 4872#endif
81819f0f
CL
4873
4874#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4875#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4876
4877struct slab_attribute {
4878 struct attribute attr;
4879 ssize_t (*show)(struct kmem_cache *s, char *buf);
4880 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4881};
4882
4883#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4884 static struct slab_attribute _name##_attr = \
4885 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4886
4887#define SLAB_ATTR(_name) \
4888 static struct slab_attribute _name##_attr = \
ab067e99 4889 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4890
81819f0f
CL
4891static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4892{
4893 return sprintf(buf, "%d\n", s->size);
4894}
4895SLAB_ATTR_RO(slab_size);
4896
4897static ssize_t align_show(struct kmem_cache *s, char *buf)
4898{
4899 return sprintf(buf, "%d\n", s->align);
4900}
4901SLAB_ATTR_RO(align);
4902
4903static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4904{
3b0efdfa 4905 return sprintf(buf, "%d\n", s->object_size);
81819f0f
CL
4906}
4907SLAB_ATTR_RO(object_size);
4908
4909static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4910{
834f3d11 4911 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
4912}
4913SLAB_ATTR_RO(objs_per_slab);
4914
06b285dc
CL
4915static ssize_t order_store(struct kmem_cache *s,
4916 const char *buf, size_t length)
4917{
0121c619
CL
4918 unsigned long order;
4919 int err;
4920
3dbb95f7 4921 err = kstrtoul(buf, 10, &order);
0121c619
CL
4922 if (err)
4923 return err;
06b285dc
CL
4924
4925 if (order > slub_max_order || order < slub_min_order)
4926 return -EINVAL;
4927
4928 calculate_sizes(s, order);
4929 return length;
4930}
4931
81819f0f
CL
4932static ssize_t order_show(struct kmem_cache *s, char *buf)
4933{
834f3d11 4934 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 4935}
06b285dc 4936SLAB_ATTR(order);
81819f0f 4937
73d342b1
DR
4938static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4939{
4940 return sprintf(buf, "%lu\n", s->min_partial);
4941}
4942
4943static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4944 size_t length)
4945{
4946 unsigned long min;
4947 int err;
4948
3dbb95f7 4949 err = kstrtoul(buf, 10, &min);
73d342b1
DR
4950 if (err)
4951 return err;
4952
c0bdb232 4953 set_min_partial(s, min);
73d342b1
DR
4954 return length;
4955}
4956SLAB_ATTR(min_partial);
4957
49e22585
CL
4958static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4959{
e6d0e1dc 4960 return sprintf(buf, "%u\n", slub_cpu_partial(s));
49e22585
CL
4961}
4962
4963static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4964 size_t length)
4965{
4966 unsigned long objects;
4967 int err;
4968
3dbb95f7 4969 err = kstrtoul(buf, 10, &objects);
49e22585
CL
4970 if (err)
4971 return err;
345c905d 4972 if (objects && !kmem_cache_has_cpu_partial(s))
74ee4ef1 4973 return -EINVAL;
49e22585 4974
e6d0e1dc 4975 slub_set_cpu_partial(s, objects);
49e22585
CL
4976 flush_all(s);
4977 return length;
4978}
4979SLAB_ATTR(cpu_partial);
4980
81819f0f
CL
4981static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4982{
62c70bce
JP
4983 if (!s->ctor)
4984 return 0;
4985 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
4986}
4987SLAB_ATTR_RO(ctor);
4988
81819f0f
CL
4989static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4990{
4307c14f 4991 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
81819f0f
CL
4992}
4993SLAB_ATTR_RO(aliases);
4994
81819f0f
CL
4995static ssize_t partial_show(struct kmem_cache *s, char *buf)
4996{
d9acf4b7 4997 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
4998}
4999SLAB_ATTR_RO(partial);
5000
5001static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5002{
d9acf4b7 5003 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
5004}
5005SLAB_ATTR_RO(cpu_slabs);
5006
5007static ssize_t objects_show(struct kmem_cache *s, char *buf)
5008{
205ab99d 5009 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
5010}
5011SLAB_ATTR_RO(objects);
5012
205ab99d
CL
5013static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5014{
5015 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5016}
5017SLAB_ATTR_RO(objects_partial);
5018
49e22585
CL
5019static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5020{
5021 int objects = 0;
5022 int pages = 0;
5023 int cpu;
5024 int len;
5025
5026 for_each_online_cpu(cpu) {
a93cf07b
WY
5027 struct page *page;
5028
5029 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5030
5031 if (page) {
5032 pages += page->pages;
5033 objects += page->pobjects;
5034 }
5035 }
5036
5037 len = sprintf(buf, "%d(%d)", objects, pages);
5038
5039#ifdef CONFIG_SMP
5040 for_each_online_cpu(cpu) {
a93cf07b
WY
5041 struct page *page;
5042
5043 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5044
5045 if (page && len < PAGE_SIZE - 20)
5046 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5047 page->pobjects, page->pages);
5048 }
5049#endif
5050 return len + sprintf(buf + len, "\n");
5051}
5052SLAB_ATTR_RO(slabs_cpu_partial);
5053
a5a84755
CL
5054static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5055{
5056 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5057}
5058
5059static ssize_t reclaim_account_store(struct kmem_cache *s,
5060 const char *buf, size_t length)
5061{
5062 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5063 if (buf[0] == '1')
5064 s->flags |= SLAB_RECLAIM_ACCOUNT;
5065 return length;
5066}
5067SLAB_ATTR(reclaim_account);
5068
5069static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5070{
5071 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5072}
5073SLAB_ATTR_RO(hwcache_align);
5074
5075#ifdef CONFIG_ZONE_DMA
5076static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5077{
5078 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5079}
5080SLAB_ATTR_RO(cache_dma);
5081#endif
5082
8eb8284b
DW
5083static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5084{
5085 return sprintf(buf, "%zu\n", s->usersize);
5086}
5087SLAB_ATTR_RO(usersize);
5088
a5a84755
CL
5089static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5090{
5f0d5a3a 5091 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
a5a84755
CL
5092}
5093SLAB_ATTR_RO(destroy_by_rcu);
5094
ab9a0f19
LJ
5095static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5096{
5097 return sprintf(buf, "%d\n", s->reserved);
5098}
5099SLAB_ATTR_RO(reserved);
5100
ab4d5ed5 5101#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5102static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5103{
5104 return show_slab_objects(s, buf, SO_ALL);
5105}
5106SLAB_ATTR_RO(slabs);
5107
205ab99d
CL
5108static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5109{
5110 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5111}
5112SLAB_ATTR_RO(total_objects);
5113
81819f0f
CL
5114static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5115{
becfda68 5116 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
81819f0f
CL
5117}
5118
5119static ssize_t sanity_checks_store(struct kmem_cache *s,
5120 const char *buf, size_t length)
5121{
becfda68 5122 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
b789ef51
CL
5123 if (buf[0] == '1') {
5124 s->flags &= ~__CMPXCHG_DOUBLE;
becfda68 5125 s->flags |= SLAB_CONSISTENCY_CHECKS;
b789ef51 5126 }
81819f0f
CL
5127 return length;
5128}
5129SLAB_ATTR(sanity_checks);
5130
5131static ssize_t trace_show(struct kmem_cache *s, char *buf)
5132{
5133 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5134}
5135
5136static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5137 size_t length)
5138{
c9e16131
CL
5139 /*
5140 * Tracing a merged cache is going to give confusing results
5141 * as well as cause other issues like converting a mergeable
5142 * cache into an umergeable one.
5143 */
5144 if (s->refcount > 1)
5145 return -EINVAL;
5146
81819f0f 5147 s->flags &= ~SLAB_TRACE;
b789ef51
CL
5148 if (buf[0] == '1') {
5149 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5150 s->flags |= SLAB_TRACE;
b789ef51 5151 }
81819f0f
CL
5152 return length;
5153}
5154SLAB_ATTR(trace);
5155
81819f0f
CL
5156static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5157{
5158 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5159}
5160
5161static ssize_t red_zone_store(struct kmem_cache *s,
5162 const char *buf, size_t length)
5163{
5164 if (any_slab_objects(s))
5165 return -EBUSY;
5166
5167 s->flags &= ~SLAB_RED_ZONE;
b789ef51 5168 if (buf[0] == '1') {
81819f0f 5169 s->flags |= SLAB_RED_ZONE;
b789ef51 5170 }
06b285dc 5171 calculate_sizes(s, -1);
81819f0f
CL
5172 return length;
5173}
5174SLAB_ATTR(red_zone);
5175
5176static ssize_t poison_show(struct kmem_cache *s, char *buf)
5177{
5178 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5179}
5180
5181static ssize_t poison_store(struct kmem_cache *s,
5182 const char *buf, size_t length)
5183{
5184 if (any_slab_objects(s))
5185 return -EBUSY;
5186
5187 s->flags &= ~SLAB_POISON;
b789ef51 5188 if (buf[0] == '1') {
81819f0f 5189 s->flags |= SLAB_POISON;
b789ef51 5190 }
06b285dc 5191 calculate_sizes(s, -1);
81819f0f
CL
5192 return length;
5193}
5194SLAB_ATTR(poison);
5195
5196static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5197{
5198 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5199}
5200
5201static ssize_t store_user_store(struct kmem_cache *s,
5202 const char *buf, size_t length)
5203{
5204 if (any_slab_objects(s))
5205 return -EBUSY;
5206
5207 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
5208 if (buf[0] == '1') {
5209 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5210 s->flags |= SLAB_STORE_USER;
b789ef51 5211 }
06b285dc 5212 calculate_sizes(s, -1);
81819f0f
CL
5213 return length;
5214}
5215SLAB_ATTR(store_user);
5216
53e15af0
CL
5217static ssize_t validate_show(struct kmem_cache *s, char *buf)
5218{
5219 return 0;
5220}
5221
5222static ssize_t validate_store(struct kmem_cache *s,
5223 const char *buf, size_t length)
5224{
434e245d
CL
5225 int ret = -EINVAL;
5226
5227 if (buf[0] == '1') {
5228 ret = validate_slab_cache(s);
5229 if (ret >= 0)
5230 ret = length;
5231 }
5232 return ret;
53e15af0
CL
5233}
5234SLAB_ATTR(validate);
a5a84755
CL
5235
5236static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5237{
5238 if (!(s->flags & SLAB_STORE_USER))
5239 return -ENOSYS;
5240 return list_locations(s, buf, TRACK_ALLOC);
5241}
5242SLAB_ATTR_RO(alloc_calls);
5243
5244static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5245{
5246 if (!(s->flags & SLAB_STORE_USER))
5247 return -ENOSYS;
5248 return list_locations(s, buf, TRACK_FREE);
5249}
5250SLAB_ATTR_RO(free_calls);
5251#endif /* CONFIG_SLUB_DEBUG */
5252
5253#ifdef CONFIG_FAILSLAB
5254static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5255{
5256 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5257}
5258
5259static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5260 size_t length)
5261{
c9e16131
CL
5262 if (s->refcount > 1)
5263 return -EINVAL;
5264
a5a84755
CL
5265 s->flags &= ~SLAB_FAILSLAB;
5266 if (buf[0] == '1')
5267 s->flags |= SLAB_FAILSLAB;
5268 return length;
5269}
5270SLAB_ATTR(failslab);
ab4d5ed5 5271#endif
53e15af0 5272
2086d26a
CL
5273static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5274{
5275 return 0;
5276}
5277
5278static ssize_t shrink_store(struct kmem_cache *s,
5279 const char *buf, size_t length)
5280{
832f37f5
VD
5281 if (buf[0] == '1')
5282 kmem_cache_shrink(s);
5283 else
2086d26a
CL
5284 return -EINVAL;
5285 return length;
5286}
5287SLAB_ATTR(shrink);
5288
81819f0f 5289#ifdef CONFIG_NUMA
9824601e 5290static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 5291{
9824601e 5292 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
5293}
5294
9824601e 5295static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
5296 const char *buf, size_t length)
5297{
0121c619
CL
5298 unsigned long ratio;
5299 int err;
5300
3dbb95f7 5301 err = kstrtoul(buf, 10, &ratio);
0121c619
CL
5302 if (err)
5303 return err;
5304
e2cb96b7 5305 if (ratio <= 100)
0121c619 5306 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 5307
81819f0f
CL
5308 return length;
5309}
9824601e 5310SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
5311#endif
5312
8ff12cfc 5313#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
5314static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5315{
5316 unsigned long sum = 0;
5317 int cpu;
5318 int len;
5319 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5320
5321 if (!data)
5322 return -ENOMEM;
5323
5324 for_each_online_cpu(cpu) {
9dfc6e68 5325 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
5326
5327 data[cpu] = x;
5328 sum += x;
5329 }
5330
5331 len = sprintf(buf, "%lu", sum);
5332
50ef37b9 5333#ifdef CONFIG_SMP
8ff12cfc
CL
5334 for_each_online_cpu(cpu) {
5335 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 5336 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 5337 }
50ef37b9 5338#endif
8ff12cfc
CL
5339 kfree(data);
5340 return len + sprintf(buf + len, "\n");
5341}
5342
78eb00cc
DR
5343static void clear_stat(struct kmem_cache *s, enum stat_item si)
5344{
5345 int cpu;
5346
5347 for_each_online_cpu(cpu)
9dfc6e68 5348 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
5349}
5350
8ff12cfc
CL
5351#define STAT_ATTR(si, text) \
5352static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5353{ \
5354 return show_stat(s, buf, si); \
5355} \
78eb00cc
DR
5356static ssize_t text##_store(struct kmem_cache *s, \
5357 const char *buf, size_t length) \
5358{ \
5359 if (buf[0] != '0') \
5360 return -EINVAL; \
5361 clear_stat(s, si); \
5362 return length; \
5363} \
5364SLAB_ATTR(text); \
8ff12cfc
CL
5365
5366STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5367STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5368STAT_ATTR(FREE_FASTPATH, free_fastpath);
5369STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5370STAT_ATTR(FREE_FROZEN, free_frozen);
5371STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5372STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5373STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5374STAT_ATTR(ALLOC_SLAB, alloc_slab);
5375STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 5376STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
5377STAT_ATTR(FREE_SLAB, free_slab);
5378STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5379STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5380STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5381STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5382STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5383STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 5384STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 5385STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
5386STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5387STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
5388STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5389STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
5390STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5391STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
5392#endif
5393
06428780 5394static struct attribute *slab_attrs[] = {
81819f0f
CL
5395 &slab_size_attr.attr,
5396 &object_size_attr.attr,
5397 &objs_per_slab_attr.attr,
5398 &order_attr.attr,
73d342b1 5399 &min_partial_attr.attr,
49e22585 5400 &cpu_partial_attr.attr,
81819f0f 5401 &objects_attr.attr,
205ab99d 5402 &objects_partial_attr.attr,
81819f0f
CL
5403 &partial_attr.attr,
5404 &cpu_slabs_attr.attr,
5405 &ctor_attr.attr,
81819f0f
CL
5406 &aliases_attr.attr,
5407 &align_attr.attr,
81819f0f
CL
5408 &hwcache_align_attr.attr,
5409 &reclaim_account_attr.attr,
5410 &destroy_by_rcu_attr.attr,
a5a84755 5411 &shrink_attr.attr,
ab9a0f19 5412 &reserved_attr.attr,
49e22585 5413 &slabs_cpu_partial_attr.attr,
ab4d5ed5 5414#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5415 &total_objects_attr.attr,
5416 &slabs_attr.attr,
5417 &sanity_checks_attr.attr,
5418 &trace_attr.attr,
81819f0f
CL
5419 &red_zone_attr.attr,
5420 &poison_attr.attr,
5421 &store_user_attr.attr,
53e15af0 5422 &validate_attr.attr,
88a420e4
CL
5423 &alloc_calls_attr.attr,
5424 &free_calls_attr.attr,
ab4d5ed5 5425#endif
81819f0f
CL
5426#ifdef CONFIG_ZONE_DMA
5427 &cache_dma_attr.attr,
5428#endif
5429#ifdef CONFIG_NUMA
9824601e 5430 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
5431#endif
5432#ifdef CONFIG_SLUB_STATS
5433 &alloc_fastpath_attr.attr,
5434 &alloc_slowpath_attr.attr,
5435 &free_fastpath_attr.attr,
5436 &free_slowpath_attr.attr,
5437 &free_frozen_attr.attr,
5438 &free_add_partial_attr.attr,
5439 &free_remove_partial_attr.attr,
5440 &alloc_from_partial_attr.attr,
5441 &alloc_slab_attr.attr,
5442 &alloc_refill_attr.attr,
e36a2652 5443 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
5444 &free_slab_attr.attr,
5445 &cpuslab_flush_attr.attr,
5446 &deactivate_full_attr.attr,
5447 &deactivate_empty_attr.attr,
5448 &deactivate_to_head_attr.attr,
5449 &deactivate_to_tail_attr.attr,
5450 &deactivate_remote_frees_attr.attr,
03e404af 5451 &deactivate_bypass_attr.attr,
65c3376a 5452 &order_fallback_attr.attr,
b789ef51
CL
5453 &cmpxchg_double_fail_attr.attr,
5454 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
5455 &cpu_partial_alloc_attr.attr,
5456 &cpu_partial_free_attr.attr,
8028dcea
AS
5457 &cpu_partial_node_attr.attr,
5458 &cpu_partial_drain_attr.attr,
81819f0f 5459#endif
4c13dd3b
DM
5460#ifdef CONFIG_FAILSLAB
5461 &failslab_attr.attr,
5462#endif
8eb8284b 5463 &usersize_attr.attr,
4c13dd3b 5464
81819f0f
CL
5465 NULL
5466};
5467
1fdaaa23 5468static const struct attribute_group slab_attr_group = {
81819f0f
CL
5469 .attrs = slab_attrs,
5470};
5471
5472static ssize_t slab_attr_show(struct kobject *kobj,
5473 struct attribute *attr,
5474 char *buf)
5475{
5476 struct slab_attribute *attribute;
5477 struct kmem_cache *s;
5478 int err;
5479
5480 attribute = to_slab_attr(attr);
5481 s = to_slab(kobj);
5482
5483 if (!attribute->show)
5484 return -EIO;
5485
5486 err = attribute->show(s, buf);
5487
5488 return err;
5489}
5490
5491static ssize_t slab_attr_store(struct kobject *kobj,
5492 struct attribute *attr,
5493 const char *buf, size_t len)
5494{
5495 struct slab_attribute *attribute;
5496 struct kmem_cache *s;
5497 int err;
5498
5499 attribute = to_slab_attr(attr);
5500 s = to_slab(kobj);
5501
5502 if (!attribute->store)
5503 return -EIO;
5504
5505 err = attribute->store(s, buf, len);
127424c8 5506#ifdef CONFIG_MEMCG
107dab5c 5507 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
426589f5 5508 struct kmem_cache *c;
81819f0f 5509
107dab5c
GC
5510 mutex_lock(&slab_mutex);
5511 if (s->max_attr_size < len)
5512 s->max_attr_size = len;
5513
ebe945c2
GC
5514 /*
5515 * This is a best effort propagation, so this function's return
5516 * value will be determined by the parent cache only. This is
5517 * basically because not all attributes will have a well
5518 * defined semantics for rollbacks - most of the actions will
5519 * have permanent effects.
5520 *
5521 * Returning the error value of any of the children that fail
5522 * is not 100 % defined, in the sense that users seeing the
5523 * error code won't be able to know anything about the state of
5524 * the cache.
5525 *
5526 * Only returning the error code for the parent cache at least
5527 * has well defined semantics. The cache being written to
5528 * directly either failed or succeeded, in which case we loop
5529 * through the descendants with best-effort propagation.
5530 */
426589f5
VD
5531 for_each_memcg_cache(c, s)
5532 attribute->store(c, buf, len);
107dab5c
GC
5533 mutex_unlock(&slab_mutex);
5534 }
5535#endif
81819f0f
CL
5536 return err;
5537}
5538
107dab5c
GC
5539static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5540{
127424c8 5541#ifdef CONFIG_MEMCG
107dab5c
GC
5542 int i;
5543 char *buffer = NULL;
93030d83 5544 struct kmem_cache *root_cache;
107dab5c 5545
93030d83 5546 if (is_root_cache(s))
107dab5c
GC
5547 return;
5548
f7ce3190 5549 root_cache = s->memcg_params.root_cache;
93030d83 5550
107dab5c
GC
5551 /*
5552 * This mean this cache had no attribute written. Therefore, no point
5553 * in copying default values around
5554 */
93030d83 5555 if (!root_cache->max_attr_size)
107dab5c
GC
5556 return;
5557
5558 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5559 char mbuf[64];
5560 char *buf;
5561 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
478fe303 5562 ssize_t len;
107dab5c
GC
5563
5564 if (!attr || !attr->store || !attr->show)
5565 continue;
5566
5567 /*
5568 * It is really bad that we have to allocate here, so we will
5569 * do it only as a fallback. If we actually allocate, though,
5570 * we can just use the allocated buffer until the end.
5571 *
5572 * Most of the slub attributes will tend to be very small in
5573 * size, but sysfs allows buffers up to a page, so they can
5574 * theoretically happen.
5575 */
5576 if (buffer)
5577 buf = buffer;
93030d83 5578 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
107dab5c
GC
5579 buf = mbuf;
5580 else {
5581 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5582 if (WARN_ON(!buffer))
5583 continue;
5584 buf = buffer;
5585 }
5586
478fe303
TG
5587 len = attr->show(root_cache, buf);
5588 if (len > 0)
5589 attr->store(s, buf, len);
107dab5c
GC
5590 }
5591
5592 if (buffer)
5593 free_page((unsigned long)buffer);
5594#endif
5595}
5596
41a21285
CL
5597static void kmem_cache_release(struct kobject *k)
5598{
5599 slab_kmem_cache_release(to_slab(k));
5600}
5601
52cf25d0 5602static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5603 .show = slab_attr_show,
5604 .store = slab_attr_store,
5605};
5606
5607static struct kobj_type slab_ktype = {
5608 .sysfs_ops = &slab_sysfs_ops,
41a21285 5609 .release = kmem_cache_release,
81819f0f
CL
5610};
5611
5612static int uevent_filter(struct kset *kset, struct kobject *kobj)
5613{
5614 struct kobj_type *ktype = get_ktype(kobj);
5615
5616 if (ktype == &slab_ktype)
5617 return 1;
5618 return 0;
5619}
5620
9cd43611 5621static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5622 .filter = uevent_filter,
5623};
5624
27c3a314 5625static struct kset *slab_kset;
81819f0f 5626
9a41707b
VD
5627static inline struct kset *cache_kset(struct kmem_cache *s)
5628{
127424c8 5629#ifdef CONFIG_MEMCG
9a41707b 5630 if (!is_root_cache(s))
f7ce3190 5631 return s->memcg_params.root_cache->memcg_kset;
9a41707b
VD
5632#endif
5633 return slab_kset;
5634}
5635
81819f0f
CL
5636#define ID_STR_LENGTH 64
5637
5638/* Create a unique string id for a slab cache:
6446faa2
CL
5639 *
5640 * Format :[flags-]size
81819f0f
CL
5641 */
5642static char *create_unique_id(struct kmem_cache *s)
5643{
5644 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5645 char *p = name;
5646
5647 BUG_ON(!name);
5648
5649 *p++ = ':';
5650 /*
5651 * First flags affecting slabcache operations. We will only
5652 * get here for aliasable slabs so we do not need to support
5653 * too many flags. The flags here must cover all flags that
5654 * are matched during merging to guarantee that the id is
5655 * unique.
5656 */
5657 if (s->flags & SLAB_CACHE_DMA)
5658 *p++ = 'd';
5659 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5660 *p++ = 'a';
becfda68 5661 if (s->flags & SLAB_CONSISTENCY_CHECKS)
81819f0f 5662 *p++ = 'F';
230e9fc2
VD
5663 if (s->flags & SLAB_ACCOUNT)
5664 *p++ = 'A';
81819f0f
CL
5665 if (p != name + 1)
5666 *p++ = '-';
5667 p += sprintf(p, "%07d", s->size);
2633d7a0 5668
81819f0f
CL
5669 BUG_ON(p > name + ID_STR_LENGTH - 1);
5670 return name;
5671}
5672
3b7b3140
TH
5673static void sysfs_slab_remove_workfn(struct work_struct *work)
5674{
5675 struct kmem_cache *s =
5676 container_of(work, struct kmem_cache, kobj_remove_work);
5677
5678 if (!s->kobj.state_in_sysfs)
5679 /*
5680 * For a memcg cache, this may be called during
5681 * deactivation and again on shutdown. Remove only once.
5682 * A cache is never shut down before deactivation is
5683 * complete, so no need to worry about synchronization.
5684 */
f6ba4880 5685 goto out;
3b7b3140
TH
5686
5687#ifdef CONFIG_MEMCG
5688 kset_unregister(s->memcg_kset);
5689#endif
5690 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5691 kobject_del(&s->kobj);
f6ba4880 5692out:
3b7b3140
TH
5693 kobject_put(&s->kobj);
5694}
5695
81819f0f
CL
5696static int sysfs_slab_add(struct kmem_cache *s)
5697{
5698 int err;
5699 const char *name;
1663f26d 5700 struct kset *kset = cache_kset(s);
45530c44 5701 int unmergeable = slab_unmergeable(s);
81819f0f 5702
3b7b3140
TH
5703 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5704
1663f26d
TH
5705 if (!kset) {
5706 kobject_init(&s->kobj, &slab_ktype);
5707 return 0;
5708 }
5709
11066386
MC
5710 if (!unmergeable && disable_higher_order_debug &&
5711 (slub_debug & DEBUG_METADATA_FLAGS))
5712 unmergeable = 1;
5713
81819f0f
CL
5714 if (unmergeable) {
5715 /*
5716 * Slabcache can never be merged so we can use the name proper.
5717 * This is typically the case for debug situations. In that
5718 * case we can catch duplicate names easily.
5719 */
27c3a314 5720 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5721 name = s->name;
5722 } else {
5723 /*
5724 * Create a unique name for the slab as a target
5725 * for the symlinks.
5726 */
5727 name = create_unique_id(s);
5728 }
5729
1663f26d 5730 s->kobj.kset = kset;
26e4f205 5731 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
54b6a731 5732 if (err)
80da026a 5733 goto out;
81819f0f
CL
5734
5735 err = sysfs_create_group(&s->kobj, &slab_attr_group);
54b6a731
DJ
5736 if (err)
5737 goto out_del_kobj;
9a41707b 5738
127424c8 5739#ifdef CONFIG_MEMCG
1663f26d 5740 if (is_root_cache(s) && memcg_sysfs_enabled) {
9a41707b
VD
5741 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5742 if (!s->memcg_kset) {
54b6a731
DJ
5743 err = -ENOMEM;
5744 goto out_del_kobj;
9a41707b
VD
5745 }
5746 }
5747#endif
5748
81819f0f
CL
5749 kobject_uevent(&s->kobj, KOBJ_ADD);
5750 if (!unmergeable) {
5751 /* Setup first alias */
5752 sysfs_slab_alias(s, s->name);
81819f0f 5753 }
54b6a731
DJ
5754out:
5755 if (!unmergeable)
5756 kfree(name);
5757 return err;
5758out_del_kobj:
5759 kobject_del(&s->kobj);
54b6a731 5760 goto out;
81819f0f
CL
5761}
5762
bf5eb3de 5763static void sysfs_slab_remove(struct kmem_cache *s)
81819f0f 5764{
97d06609 5765 if (slab_state < FULL)
2bce6485
CL
5766 /*
5767 * Sysfs has not been setup yet so no need to remove the
5768 * cache from sysfs.
5769 */
5770 return;
5771
3b7b3140
TH
5772 kobject_get(&s->kobj);
5773 schedule_work(&s->kobj_remove_work);
bf5eb3de
TH
5774}
5775
5776void sysfs_slab_release(struct kmem_cache *s)
5777{
5778 if (slab_state >= FULL)
5779 kobject_put(&s->kobj);
81819f0f
CL
5780}
5781
5782/*
5783 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5784 * available lest we lose that information.
81819f0f
CL
5785 */
5786struct saved_alias {
5787 struct kmem_cache *s;
5788 const char *name;
5789 struct saved_alias *next;
5790};
5791
5af328a5 5792static struct saved_alias *alias_list;
81819f0f
CL
5793
5794static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5795{
5796 struct saved_alias *al;
5797
97d06609 5798 if (slab_state == FULL) {
81819f0f
CL
5799 /*
5800 * If we have a leftover link then remove it.
5801 */
27c3a314
GKH
5802 sysfs_remove_link(&slab_kset->kobj, name);
5803 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5804 }
5805
5806 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5807 if (!al)
5808 return -ENOMEM;
5809
5810 al->s = s;
5811 al->name = name;
5812 al->next = alias_list;
5813 alias_list = al;
5814 return 0;
5815}
5816
5817static int __init slab_sysfs_init(void)
5818{
5b95a4ac 5819 struct kmem_cache *s;
81819f0f
CL
5820 int err;
5821
18004c5d 5822 mutex_lock(&slab_mutex);
2bce6485 5823
0ff21e46 5824 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5825 if (!slab_kset) {
18004c5d 5826 mutex_unlock(&slab_mutex);
f9f58285 5827 pr_err("Cannot register slab subsystem.\n");
81819f0f
CL
5828 return -ENOSYS;
5829 }
5830
97d06609 5831 slab_state = FULL;
26a7bd03 5832
5b95a4ac 5833 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5834 err = sysfs_slab_add(s);
5d540fb7 5835 if (err)
f9f58285
FF
5836 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5837 s->name);
26a7bd03 5838 }
81819f0f
CL
5839
5840 while (alias_list) {
5841 struct saved_alias *al = alias_list;
5842
5843 alias_list = alias_list->next;
5844 err = sysfs_slab_alias(al->s, al->name);
5d540fb7 5845 if (err)
f9f58285
FF
5846 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5847 al->name);
81819f0f
CL
5848 kfree(al);
5849 }
5850
18004c5d 5851 mutex_unlock(&slab_mutex);
81819f0f
CL
5852 resiliency_test();
5853 return 0;
5854}
5855
5856__initcall(slab_sysfs_init);
ab4d5ed5 5857#endif /* CONFIG_SYSFS */
57ed3eda
PE
5858
5859/*
5860 * The /proc/slabinfo ABI
5861 */
5b365771 5862#ifdef CONFIG_SLUB_DEBUG
0d7561c6 5863void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda 5864{
57ed3eda 5865 unsigned long nr_slabs = 0;
205ab99d
CL
5866 unsigned long nr_objs = 0;
5867 unsigned long nr_free = 0;
57ed3eda 5868 int node;
fa45dc25 5869 struct kmem_cache_node *n;
57ed3eda 5870
fa45dc25 5871 for_each_kmem_cache_node(s, node, n) {
c17fd13e
WL
5872 nr_slabs += node_nr_slabs(n);
5873 nr_objs += node_nr_objs(n);
205ab99d 5874 nr_free += count_partial(n, count_free);
57ed3eda
PE
5875 }
5876
0d7561c6
GC
5877 sinfo->active_objs = nr_objs - nr_free;
5878 sinfo->num_objs = nr_objs;
5879 sinfo->active_slabs = nr_slabs;
5880 sinfo->num_slabs = nr_slabs;
5881 sinfo->objects_per_slab = oo_objects(s->oo);
5882 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5883}
5884
0d7561c6 5885void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5886{
7b3c3a50
AD
5887}
5888
b7454ad3
GC
5889ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5890 size_t count, loff_t *ppos)
7b3c3a50 5891{
b7454ad3 5892 return -EIO;
7b3c3a50 5893}
5b365771 5894#endif /* CONFIG_SLUB_DEBUG */