]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-kmem.c
Remove get_vmalloc_info() wrapper
[mirror_spl.git] / module / spl / spl-kmem.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
715f6251 10 *
716154c5
BB
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25\*****************************************************************************/
715f6251 26
f4b37741 27#include <sys/kmem.h>
55abb092 28#include <spl-debug.h>
f1ca4da6 29
b17edc10
BB
30#ifdef SS_DEBUG_SUBSYS
31#undef SS_DEBUG_SUBSYS
937879f1 32#endif
33
b17edc10 34#define SS_DEBUG_SUBSYS SS_KMEM
937879f1 35
a073aeb0
BB
36/*
37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
38 * are removed to allow access to the real Linux slab allocator.
39 */
40#undef kmem_cache_destroy
41#undef kmem_cache_create
42#undef kmem_cache_alloc
43#undef kmem_cache_free
44
45
0936c344
BB
46/*
47 * Cache expiration was implemented because it was part of the default Solaris
48 * kmem_cache behavior. The idea is that per-cpu objects which haven't been
49 * accessed in several seconds should be returned to the cache. On the other
50 * hand Linux slabs never move objects back to the slabs unless there is
89aa9705
RY
51 * memory pressure on the system. By default the Linux method is enabled
52 * because it has been shown to improve responsiveness on low memory systems.
53 * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
0936c344 54 */
89aa9705 55unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
0936c344
BB
56EXPORT_SYMBOL(spl_kmem_cache_expire);
57module_param(spl_kmem_cache_expire, uint, 0644);
58MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
59
376dc35e 60/*
c1aef269
BB
61 * The default behavior is to report the number of objects remaining in the
62 * cache. This allows the Linux VM to repeatedly reclaim objects from the
63 * cache when memory is low satisfy other memory allocations. Alternately,
64 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
65 * is reclaimed. This may increase the likelihood of out of memory events.
376dc35e 66 */
c1aef269 67unsigned int spl_kmem_cache_reclaim = 0;
376dc35e
BB
68module_param(spl_kmem_cache_reclaim, uint, 0644);
69MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
70
bdfbe594
AV
71unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
72module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
73MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
74
75unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
76module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
77MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
78 "Minimal number of objects per slab");
79
80unsigned int spl_kmem_cache_max_size = 32;
81module_param(spl_kmem_cache_max_size, uint, 0644);
82MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
83
f2297b5a
BB
84/*
85 * For small objects the Linux slab allocator should be used to make the most
86 * efficient use of the memory. However, large objects are not supported by
87 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
88 * of 16K was determined to be optimal for architectures using 4K pages.
89 */
90#if PAGE_SIZE == 4096
91unsigned int spl_kmem_cache_slab_limit = 16384;
92#else
a073aeb0 93unsigned int spl_kmem_cache_slab_limit = 0;
f2297b5a 94#endif
a073aeb0
BB
95module_param(spl_kmem_cache_slab_limit, uint, 0644);
96MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
97 "Objects less than N bytes use the Linux slab");
98
99unsigned int spl_kmem_cache_kmem_limit = (PAGE_SIZE / 4);
100module_param(spl_kmem_cache_kmem_limit, uint, 0644);
101MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
102 "Objects less than N bytes use the kmalloc");
103
36b313da
BB
104/*
105 * The minimum amount of memory measured in pages to be free at all
106 * times on the system. This is similar to Linux's zone->pages_min
ecc39810 107 * multiplied by the number of zones and is sized based on that.
36b313da
BB
108 */
109pgcnt_t minfree = 0;
110EXPORT_SYMBOL(minfree);
111
112/*
113 * The desired amount of memory measured in pages to be free at all
114 * times on the system. This is similar to Linux's zone->pages_low
ecc39810 115 * multiplied by the number of zones and is sized based on that.
36b313da 116 * Assuming all zones are being used roughly equally, when we drop
ecc39810 117 * below this threshold asynchronous page reclamation is triggered.
36b313da
BB
118 */
119pgcnt_t desfree = 0;
120EXPORT_SYMBOL(desfree);
121
122/*
123 * When above this amount of memory measures in pages the system is
124 * determined to have enough free memory. This is similar to Linux's
ecc39810 125 * zone->pages_high multiplied by the number of zones and is sized based
36b313da 126 * on that. Assuming all zones are being used roughly equally, when
ecc39810 127 * asynchronous page reclamation reaches this threshold it stops.
36b313da
BB
128 */
129pgcnt_t lotsfree = 0;
130EXPORT_SYMBOL(lotsfree);
131
132/* Unused always 0 in this implementation */
133pgcnt_t needfree = 0;
134EXPORT_SYMBOL(needfree);
135
36b313da
BB
136pgcnt_t swapfs_minfree = 0;
137EXPORT_SYMBOL(swapfs_minfree);
138
139pgcnt_t swapfs_reserve = 0;
140EXPORT_SYMBOL(swapfs_reserve);
141
36b313da
BB
142vmem_t *heap_arena = NULL;
143EXPORT_SYMBOL(heap_arena);
144
145vmem_t *zio_alloc_arena = NULL;
146EXPORT_SYMBOL(zio_alloc_arena);
147
148vmem_t *zio_arena = NULL;
149EXPORT_SYMBOL(zio_arena);
150
5232d256
BB
151#ifdef HAVE_PGDAT_HELPERS
152# ifndef HAVE_FIRST_ONLINE_PGDAT
96dded38 153first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
d1ff2312 154EXPORT_SYMBOL(first_online_pgdat_fn);
5232d256 155# endif /* HAVE_FIRST_ONLINE_PGDAT */
36b313da 156
5232d256 157# ifndef HAVE_NEXT_ONLINE_PGDAT
96dded38 158next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
d1ff2312 159EXPORT_SYMBOL(next_online_pgdat_fn);
5232d256 160# endif /* HAVE_NEXT_ONLINE_PGDAT */
36b313da 161
5232d256 162# ifndef HAVE_NEXT_ZONE
96dded38 163next_zone_t next_zone_fn = SYMBOL_POISON;
d1ff2312 164EXPORT_SYMBOL(next_zone_fn);
5232d256
BB
165# endif /* HAVE_NEXT_ZONE */
166
167#else /* HAVE_PGDAT_HELPERS */
168
169# ifndef HAVE_PGDAT_LIST
170struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
171EXPORT_SYMBOL(pgdat_list_addr);
172# endif /* HAVE_PGDAT_LIST */
173
174#endif /* HAVE_PGDAT_HELPERS */
36b313da 175
6ae7fef5 176#ifdef NEED_GET_ZONE_COUNTS
e11d6c5f 177# ifndef HAVE_GET_ZONE_COUNTS
96dded38 178get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
d1ff2312 179EXPORT_SYMBOL(get_zone_counts_fn);
96dded38 180# endif /* HAVE_GET_ZONE_COUNTS */
4ab13d3b 181
e11d6c5f 182unsigned long
6ae7fef5 183spl_global_page_state(spl_zone_stat_item_t item)
4ab13d3b
BB
184{
185 unsigned long active;
186 unsigned long inactive;
187 unsigned long free;
188
6ae7fef5
BB
189 get_zone_counts(&active, &inactive, &free);
190 switch (item) {
191 case SPL_NR_FREE_PAGES: return free;
192 case SPL_NR_INACTIVE: return inactive;
193 case SPL_NR_ACTIVE: return active;
194 default: ASSERT(0); /* Unsupported */
e11d6c5f
BB
195 }
196
6ae7fef5
BB
197 return 0;
198}
199#else
200# ifdef HAVE_GLOBAL_PAGE_STATE
201unsigned long
202spl_global_page_state(spl_zone_stat_item_t item)
203{
204 unsigned long pages = 0;
205
206 switch (item) {
207 case SPL_NR_FREE_PAGES:
208# ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
209 pages += global_page_state(NR_FREE_PAGES);
210# endif
211 break;
212 case SPL_NR_INACTIVE:
213# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
214 pages += global_page_state(NR_INACTIVE);
215# endif
216# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
217 pages += global_page_state(NR_INACTIVE_ANON);
218# endif
219# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
220 pages += global_page_state(NR_INACTIVE_FILE);
221# endif
222 break;
223 case SPL_NR_ACTIVE:
224# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
225 pages += global_page_state(NR_ACTIVE);
226# endif
227# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
228 pages += global_page_state(NR_ACTIVE_ANON);
229# endif
230# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
231 pages += global_page_state(NR_ACTIVE_FILE);
232# endif
233 break;
234 default:
235 ASSERT(0); /* Unsupported */
e11d6c5f
BB
236 }
237
6ae7fef5
BB
238 return pages;
239}
96dded38 240# else
6ae7fef5 241# error "Both global_page_state() and get_zone_counts() unavailable"
96dded38 242# endif /* HAVE_GLOBAL_PAGE_STATE */
6ae7fef5 243#endif /* NEED_GET_ZONE_COUNTS */
e11d6c5f 244EXPORT_SYMBOL(spl_global_page_state);
4ab13d3b 245
e76f4bf1
BB
246#ifndef HAVE_SHRINK_DCACHE_MEMORY
247shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
248EXPORT_SYMBOL(shrink_dcache_memory_fn);
249#endif /* HAVE_SHRINK_DCACHE_MEMORY */
250
251#ifndef HAVE_SHRINK_ICACHE_MEMORY
252shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
253EXPORT_SYMBOL(shrink_icache_memory_fn);
254#endif /* HAVE_SHRINK_ICACHE_MEMORY */
255
e11d6c5f
BB
256pgcnt_t
257spl_kmem_availrmem(void)
258{
4ab13d3b 259 /* The amount of easily available memory */
6ae7fef5
BB
260 return (spl_global_page_state(SPL_NR_FREE_PAGES) +
261 spl_global_page_state(SPL_NR_INACTIVE));
4ab13d3b
BB
262}
263EXPORT_SYMBOL(spl_kmem_availrmem);
264
265size_t
266vmem_size(vmem_t *vmp, int typemask)
267{
e1310afa
BB
268 ASSERT3P(vmp, ==, NULL);
269 ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
270 ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
d1ff2312 271
e1310afa 272 return (VMALLOC_TOTAL);
4ab13d3b
BB
273}
274EXPORT_SYMBOL(vmem_size);
4ab13d3b 275
b868e22f
BB
276int
277kmem_debugging(void)
278{
279 return 0;
280}
281EXPORT_SYMBOL(kmem_debugging);
282
283#ifndef HAVE_KVASPRINTF
284/* Simplified asprintf. */
285char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
286{
287 unsigned int len;
288 char *p;
289 va_list aq;
290
291 va_copy(aq, ap);
292 len = vsnprintf(NULL, 0, fmt, aq);
293 va_end(aq);
294
295 p = kmalloc(len+1, gfp);
296 if (!p)
297 return NULL;
298
299 vsnprintf(p, len+1, fmt, ap);
300
301 return p;
302}
303EXPORT_SYMBOL(kvasprintf);
304#endif /* HAVE_KVASPRINTF */
305
e6de04b7
BB
306char *
307kmem_vasprintf(const char *fmt, va_list ap)
308{
309 va_list aq;
310 char *ptr;
311
e6de04b7 312 do {
2c762de8 313 va_copy(aq, ap);
e6de04b7 314 ptr = kvasprintf(GFP_KERNEL, fmt, aq);
2c762de8 315 va_end(aq);
e6de04b7 316 } while (ptr == NULL);
e6de04b7
BB
317
318 return ptr;
319}
320EXPORT_SYMBOL(kmem_vasprintf);
321
b868e22f
BB
322char *
323kmem_asprintf(const char *fmt, ...)
324{
e6de04b7 325 va_list ap;
b868e22f
BB
326 char *ptr;
327
b868e22f 328 do {
2c762de8 329 va_start(ap, fmt);
e6de04b7 330 ptr = kvasprintf(GFP_KERNEL, fmt, ap);
2c762de8 331 va_end(ap);
b868e22f 332 } while (ptr == NULL);
b868e22f
BB
333
334 return ptr;
335}
336EXPORT_SYMBOL(kmem_asprintf);
337
10129680
BB
338static char *
339__strdup(const char *str, int flags)
340{
341 char *ptr;
342 int n;
343
344 n = strlen(str);
345 ptr = kmalloc_nofail(n + 1, flags);
346 if (ptr)
347 memcpy(ptr, str, n + 1);
348
349 return ptr;
350}
351
352char *
353strdup(const char *str)
354{
355 return __strdup(str, KM_SLEEP);
356}
357EXPORT_SYMBOL(strdup);
358
359void
360strfree(char *str)
361{
41f84a8d 362 kfree(str);
10129680
BB
363}
364EXPORT_SYMBOL(strfree);
365
f1ca4da6 366/*
2fb9b26a 367 * Memory allocation interfaces and debugging for basic kmem_*
055ffd98
BB
368 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
369 * the SPL will keep track of the total memory allocated, and
370 * report any memory leaked when the module is unloaded.
f1ca4da6 371 */
372#ifdef DEBUG_KMEM
d04c8a56 373
f1ca4da6 374/* Shim layer memory accounting */
d04c8a56 375# ifdef HAVE_ATOMIC64_T
550f1705 376atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
a0f6da3d 377unsigned long long kmem_alloc_max = 0;
550f1705 378atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
a0f6da3d 379unsigned long long vmem_alloc_max = 0;
10129680 380# else /* HAVE_ATOMIC64_T */
d04c8a56
BB
381atomic_t kmem_alloc_used = ATOMIC_INIT(0);
382unsigned long long kmem_alloc_max = 0;
383atomic_t vmem_alloc_used = ATOMIC_INIT(0);
384unsigned long long vmem_alloc_max = 0;
10129680 385# endif /* HAVE_ATOMIC64_T */
79b31f36 386
ff449ac4 387EXPORT_SYMBOL(kmem_alloc_used);
388EXPORT_SYMBOL(kmem_alloc_max);
389EXPORT_SYMBOL(vmem_alloc_used);
390EXPORT_SYMBOL(vmem_alloc_max);
ff449ac4 391
055ffd98
BB
392/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
393 * but also the location of every alloc and free. When the SPL module is
394 * unloaded a list of all leaked addresses and where they were allocated
395 * will be dumped to the console. Enabling this feature has a significant
396 * impact on performance but it makes finding memory leaks straight forward.
397 *
398 * Not surprisingly with debugging enabled the xmem_locks are very highly
399 * contended particularly on xfree(). If we want to run with this detailed
400 * debugging enabled for anything other than debugging we need to minimize
401 * the contention by moving to a lock per xmem_table entry model.
a0f6da3d 402 */
055ffd98 403# ifdef DEBUG_KMEM_TRACKING
a0f6da3d 404
405# define KMEM_HASH_BITS 10
406# define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
407
408# define VMEM_HASH_BITS 10
409# define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
410
411typedef struct kmem_debug {
412 struct hlist_node kd_hlist; /* Hash node linkage */
413 struct list_head kd_list; /* List of all allocations */
414 void *kd_addr; /* Allocation pointer */
415 size_t kd_size; /* Allocation size */
416 const char *kd_func; /* Allocation function */
417 int kd_line; /* Allocation line */
418} kmem_debug_t;
419
d6a26c6a 420spinlock_t kmem_lock;
421struct hlist_head kmem_table[KMEM_TABLE_SIZE];
422struct list_head kmem_list;
423
13cdca65 424spinlock_t vmem_lock;
425struct hlist_head vmem_table[VMEM_TABLE_SIZE];
426struct list_head vmem_list;
427
d6a26c6a 428EXPORT_SYMBOL(kmem_lock);
429EXPORT_SYMBOL(kmem_table);
430EXPORT_SYMBOL(kmem_list);
431
13cdca65 432EXPORT_SYMBOL(vmem_lock);
433EXPORT_SYMBOL(vmem_table);
434EXPORT_SYMBOL(vmem_list);
a0f6da3d 435
436static kmem_debug_t *
973e8269 437kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
a0f6da3d 438{
439 struct hlist_head *head;
440 struct hlist_node *node;
441 struct kmem_debug *p;
442 unsigned long flags;
b17edc10 443 SENTRY;
a0f6da3d 444
445 spin_lock_irqsave(lock, flags);
446
b1424add
BB
447 head = &table[hash_ptr((void *)addr, bits)];
448 hlist_for_each(node, head) {
449 p = list_entry(node, struct kmem_debug, kd_hlist);
a0f6da3d 450 if (p->kd_addr == addr) {
451 hlist_del_init(&p->kd_hlist);
452 list_del_init(&p->kd_list);
453 spin_unlock_irqrestore(lock, flags);
454 return p;
455 }
456 }
457
458 spin_unlock_irqrestore(lock, flags);
459
b17edc10 460 SRETURN(NULL);
a0f6da3d 461}
462
463void *
464kmem_alloc_track(size_t size, int flags, const char *func, int line,
465 int node_alloc, int node)
466{
467 void *ptr = NULL;
468 kmem_debug_t *dptr;
469 unsigned long irq_flags;
b17edc10 470 SENTRY;
a0f6da3d 471
10129680 472 /* Function may be called with KM_NOSLEEP so failure is possible */
c89fdee4 473 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
a0f6da3d 474 flags & ~__GFP_ZERO);
475
10129680 476 if (unlikely(dptr == NULL)) {
b17edc10 477 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
3cb77549
BB
478 "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
479 sizeof(kmem_debug_t), flags, func, line,
480 kmem_alloc_used_read(), kmem_alloc_max);
a0f6da3d 481 } else {
10129680
BB
482 /*
483 * Marked unlikely because we should never be doing this,
484 * we tolerate to up 2 pages but a single page is best.
485 */
23d91792 486 if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
b17edc10 487 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
3cb77549
BB
488 "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
489 (unsigned long long) size, flags, func, line,
d04c8a56 490 kmem_alloc_used_read(), kmem_alloc_max);
5198ea0e
BB
491 spl_debug_dumpstack(NULL);
492 }
a0f6da3d 493
10129680
BB
494 /*
495 * We use __strdup() below because the string pointed to by
c8e60837 496 * __FUNCTION__ might not be available by the time we want
10129680
BB
497 * to print it since the module might have been unloaded.
498 * This can only fail in the KM_NOSLEEP case.
499 */
500 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
c8e60837 501 if (unlikely(dptr->kd_func == NULL)) {
502 kfree(dptr);
b17edc10 503 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
10129680 504 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
3cb77549 505 func, line, kmem_alloc_used_read(), kmem_alloc_max);
c8e60837 506 goto out;
507 }
508
a0f6da3d 509 /* Use the correct allocator */
510 if (node_alloc) {
511 ASSERT(!(flags & __GFP_ZERO));
c89fdee4 512 ptr = kmalloc_node_nofail(size, flags, node);
a0f6da3d 513 } else if (flags & __GFP_ZERO) {
c89fdee4 514 ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
a0f6da3d 515 } else {
c89fdee4 516 ptr = kmalloc_nofail(size, flags);
a0f6da3d 517 }
518
519 if (unlikely(ptr == NULL)) {
c8e60837 520 kfree(dptr->kd_func);
a0f6da3d 521 kfree(dptr);
b17edc10 522 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
3cb77549
BB
523 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
524 (unsigned long long) size, flags, func, line,
d04c8a56 525 kmem_alloc_used_read(), kmem_alloc_max);
a0f6da3d 526 goto out;
527 }
528
d04c8a56
BB
529 kmem_alloc_used_add(size);
530 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
531 kmem_alloc_max = kmem_alloc_used_read();
a0f6da3d 532
533 INIT_HLIST_NODE(&dptr->kd_hlist);
534 INIT_LIST_HEAD(&dptr->kd_list);
535
536 dptr->kd_addr = ptr;
537 dptr->kd_size = size;
a0f6da3d 538 dptr->kd_line = line;
539
540 spin_lock_irqsave(&kmem_lock, irq_flags);
b1424add 541 hlist_add_head(&dptr->kd_hlist,
a0f6da3d 542 &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
543 list_add_tail(&dptr->kd_list, &kmem_list);
544 spin_unlock_irqrestore(&kmem_lock, irq_flags);
545
b17edc10 546 SDEBUG_LIMIT(SD_INFO,
3cb77549
BB
547 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
548 (unsigned long long) size, flags, func, line, ptr,
549 kmem_alloc_used_read(), kmem_alloc_max);
a0f6da3d 550 }
551out:
b17edc10 552 SRETURN(ptr);
a0f6da3d 553}
554EXPORT_SYMBOL(kmem_alloc_track);
555
556void
973e8269 557kmem_free_track(const void *ptr, size_t size)
a0f6da3d 558{
559 kmem_debug_t *dptr;
b17edc10 560 SENTRY;
a0f6da3d 561
562 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
563 (unsigned long long) size);
564
565 dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
566
10129680
BB
567 /* Must exist in hash due to kmem_alloc() */
568 ASSERT(dptr);
a0f6da3d 569
570 /* Size must match */
571 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
572 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
573 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
574
d04c8a56 575 kmem_alloc_used_sub(size);
b17edc10 576 SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
d04c8a56 577 (unsigned long long) size, kmem_alloc_used_read(),
a0f6da3d 578 kmem_alloc_max);
579
c8e60837 580 kfree(dptr->kd_func);
581
b1424add 582 memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
a0f6da3d 583 kfree(dptr);
584
b1424add 585 memset((void *)ptr, 0x5a, size);
a0f6da3d 586 kfree(ptr);
587
b17edc10 588 SEXIT;
a0f6da3d 589}
590EXPORT_SYMBOL(kmem_free_track);
591
592void *
593vmem_alloc_track(size_t size, int flags, const char *func, int line)
594{
595 void *ptr = NULL;
596 kmem_debug_t *dptr;
597 unsigned long irq_flags;
b17edc10 598 SENTRY;
a0f6da3d 599
600 ASSERT(flags & KM_SLEEP);
601
10129680 602 /* Function may be called with KM_NOSLEEP so failure is possible */
ef1c7a06
BB
603 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
604 flags & ~__GFP_ZERO);
10129680 605 if (unlikely(dptr == NULL)) {
b17edc10 606 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
3cb77549
BB
607 "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
608 sizeof(kmem_debug_t), flags, func, line,
609 vmem_alloc_used_read(), vmem_alloc_max);
a0f6da3d 610 } else {
10129680
BB
611 /*
612 * We use __strdup() below because the string pointed to by
c8e60837 613 * __FUNCTION__ might not be available by the time we want
10129680
BB
614 * to print it, since the module might have been unloaded.
615 * This can never fail because we have already asserted
616 * that flags is KM_SLEEP.
617 */
618 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
c8e60837 619 if (unlikely(dptr->kd_func == NULL)) {
620 kfree(dptr);
b17edc10 621 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
10129680 622 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
3cb77549 623 func, line, vmem_alloc_used_read(), vmem_alloc_max);
c8e60837 624 goto out;
625 }
626
10129680
BB
627 /* Use the correct allocator */
628 if (flags & __GFP_ZERO) {
629 ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
630 } else {
631 ptr = vmalloc_nofail(size, flags);
632 }
a0f6da3d 633
634 if (unlikely(ptr == NULL)) {
c8e60837 635 kfree(dptr->kd_func);
a0f6da3d 636 kfree(dptr);
b17edc10 637 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
3cb77549
BB
638 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
639 (unsigned long long) size, flags, func, line,
d04c8a56 640 vmem_alloc_used_read(), vmem_alloc_max);
a0f6da3d 641 goto out;
642 }
643
d04c8a56
BB
644 vmem_alloc_used_add(size);
645 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
646 vmem_alloc_max = vmem_alloc_used_read();
a0f6da3d 647
648 INIT_HLIST_NODE(&dptr->kd_hlist);
649 INIT_LIST_HEAD(&dptr->kd_list);
650
651 dptr->kd_addr = ptr;
652 dptr->kd_size = size;
a0f6da3d 653 dptr->kd_line = line;
654
655 spin_lock_irqsave(&vmem_lock, irq_flags);
b1424add 656 hlist_add_head(&dptr->kd_hlist,
a0f6da3d 657 &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
658 list_add_tail(&dptr->kd_list, &vmem_list);
659 spin_unlock_irqrestore(&vmem_lock, irq_flags);
660
b17edc10 661 SDEBUG_LIMIT(SD_INFO,
3cb77549
BB
662 "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
663 (unsigned long long) size, flags, func, line,
664 ptr, vmem_alloc_used_read(), vmem_alloc_max);
a0f6da3d 665 }
666out:
b17edc10 667 SRETURN(ptr);
a0f6da3d 668}
669EXPORT_SYMBOL(vmem_alloc_track);
670
671void
973e8269 672vmem_free_track(const void *ptr, size_t size)
a0f6da3d 673{
674 kmem_debug_t *dptr;
b17edc10 675 SENTRY;
a0f6da3d 676
677 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
678 (unsigned long long) size);
679
680 dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
10129680
BB
681
682 /* Must exist in hash due to vmem_alloc() */
683 ASSERT(dptr);
a0f6da3d 684
685 /* Size must match */
686 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
687 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
688 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
689
d04c8a56 690 vmem_alloc_used_sub(size);
b17edc10 691 SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
d04c8a56 692 (unsigned long long) size, vmem_alloc_used_read(),
a0f6da3d 693 vmem_alloc_max);
694
c8e60837 695 kfree(dptr->kd_func);
696
b1424add 697 memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
a0f6da3d 698 kfree(dptr);
699
b1424add 700 memset((void *)ptr, 0x5a, size);
a0f6da3d 701 vfree(ptr);
702
b17edc10 703 SEXIT;
a0f6da3d 704}
705EXPORT_SYMBOL(vmem_free_track);
706
707# else /* DEBUG_KMEM_TRACKING */
708
709void *
710kmem_alloc_debug(size_t size, int flags, const char *func, int line,
711 int node_alloc, int node)
712{
713 void *ptr;
b17edc10 714 SENTRY;
a0f6da3d 715
10129680
BB
716 /*
717 * Marked unlikely because we should never be doing this,
718 * we tolerate to up 2 pages but a single page is best.
719 */
23d91792 720 if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
b17edc10 721 SDEBUG(SD_CONSOLE | SD_WARNING,
10129680 722 "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
3cb77549 723 (unsigned long long) size, flags, func, line,
d04c8a56 724 kmem_alloc_used_read(), kmem_alloc_max);
377e12f1 725 spl_debug_dumpstack(NULL);
5198ea0e 726 }
a0f6da3d 727
728 /* Use the correct allocator */
729 if (node_alloc) {
730 ASSERT(!(flags & __GFP_ZERO));
c89fdee4 731 ptr = kmalloc_node_nofail(size, flags, node);
a0f6da3d 732 } else if (flags & __GFP_ZERO) {
c89fdee4 733 ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
a0f6da3d 734 } else {
c89fdee4 735 ptr = kmalloc_nofail(size, flags);
a0f6da3d 736 }
737
10129680 738 if (unlikely(ptr == NULL)) {
b17edc10 739 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
3cb77549
BB
740 "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
741 (unsigned long long) size, flags, func, line,
d04c8a56 742 kmem_alloc_used_read(), kmem_alloc_max);
a0f6da3d 743 } else {
d04c8a56
BB
744 kmem_alloc_used_add(size);
745 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
746 kmem_alloc_max = kmem_alloc_used_read();
a0f6da3d 747
b17edc10 748 SDEBUG_LIMIT(SD_INFO,
3cb77549
BB
749 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
750 (unsigned long long) size, flags, func, line, ptr,
10129680 751 kmem_alloc_used_read(), kmem_alloc_max);
a0f6da3d 752 }
10129680 753
b17edc10 754 SRETURN(ptr);
a0f6da3d 755}
756EXPORT_SYMBOL(kmem_alloc_debug);
757
758void
973e8269 759kmem_free_debug(const void *ptr, size_t size)
a0f6da3d 760{
b17edc10 761 SENTRY;
a0f6da3d 762
763 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
764 (unsigned long long) size);
765
d04c8a56 766 kmem_alloc_used_sub(size);
b17edc10 767 SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
d04c8a56 768 (unsigned long long) size, kmem_alloc_used_read(),
a0f6da3d 769 kmem_alloc_max);
a0f6da3d 770 kfree(ptr);
771
b17edc10 772 SEXIT;
a0f6da3d 773}
774EXPORT_SYMBOL(kmem_free_debug);
775
776void *
777vmem_alloc_debug(size_t size, int flags, const char *func, int line)
778{
779 void *ptr;
b17edc10 780 SENTRY;
a0f6da3d 781
782 ASSERT(flags & KM_SLEEP);
783
10129680
BB
784 /* Use the correct allocator */
785 if (flags & __GFP_ZERO) {
786 ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
787 } else {
788 ptr = vmalloc_nofail(size, flags);
789 }
790
791 if (unlikely(ptr == NULL)) {
b17edc10 792 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
3cb77549
BB
793 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
794 (unsigned long long) size, flags, func, line,
d04c8a56 795 vmem_alloc_used_read(), vmem_alloc_max);
a0f6da3d 796 } else {
d04c8a56
BB
797 vmem_alloc_used_add(size);
798 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
799 vmem_alloc_max = vmem_alloc_used_read();
a0f6da3d 800
b17edc10 801 SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
a0f6da3d 802 "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
d04c8a56 803 vmem_alloc_used_read(), vmem_alloc_max);
a0f6da3d 804 }
805
b17edc10 806 SRETURN(ptr);
a0f6da3d 807}
808EXPORT_SYMBOL(vmem_alloc_debug);
809
810void
973e8269 811vmem_free_debug(const void *ptr, size_t size)
a0f6da3d 812{
b17edc10 813 SENTRY;
a0f6da3d 814
815 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
816 (unsigned long long) size);
817
d04c8a56 818 vmem_alloc_used_sub(size);
b17edc10 819 SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
d04c8a56 820 (unsigned long long) size, vmem_alloc_used_read(),
a0f6da3d 821 vmem_alloc_max);
a0f6da3d 822 vfree(ptr);
823
b17edc10 824 SEXIT;
a0f6da3d 825}
826EXPORT_SYMBOL(vmem_free_debug);
827
828# endif /* DEBUG_KMEM_TRACKING */
829#endif /* DEBUG_KMEM */
830
10129680
BB
831/*
832 * Slab allocation interfaces
833 *
834 * While the Linux slab implementation was inspired by the Solaris
ecc39810 835 * implementation I cannot use it to emulate the Solaris APIs. I
10129680
BB
836 * require two features which are not provided by the Linux slab.
837 *
838 * 1) Constructors AND destructors. Recent versions of the Linux
839 * kernel have removed support for destructors. This is a deal
840 * breaker for the SPL which contains particularly expensive
841 * initializers for mutex's, condition variables, etc. We also
842 * require a minimal level of cleanup for these data types unlike
843 * many Linux data type which do need to be explicitly destroyed.
844 *
845 * 2) Virtual address space backed slab. Callers of the Solaris slab
846 * expect it to work well for both small are very large allocations.
847 * Because of memory fragmentation the Linux slab which is backed
848 * by kmalloc'ed memory performs very badly when confronted with
849 * large numbers of large allocations. Basing the slab on the
ecc39810 850 * virtual address space removes the need for contiguous pages
10129680
BB
851 * and greatly improve performance for large allocations.
852 *
853 * For these reasons, the SPL has its own slab implementation with
854 * the needed features. It is not as highly optimized as either the
855 * Solaris or Linux slabs, but it should get me most of what is
856 * needed until it can be optimized or obsoleted by another approach.
857 *
858 * One serious concern I do have about this method is the relatively
859 * small virtual address space on 32bit arches. This will seriously
860 * constrain the size of the slab caches and their performance.
861 *
862 * XXX: Improve the partial slab list by carefully maintaining a
863 * strict ordering of fullest to emptiest slabs based on
ecc39810 864 * the slab reference count. This guarantees the when freeing
10129680
BB
865 * slabs back to the system we need only linearly traverse the
866 * last N slabs in the list to discover all the freeable slabs.
867 *
868 * XXX: NUMA awareness for optionally allocating memory close to a
ecc39810 869 * particular core. This can be advantageous if you know the slab
10129680
BB
870 * object will be short lived and primarily accessed from one core.
871 *
872 * XXX: Slab coloring may also yield performance improvements and would
873 * be desirable to implement.
874 */
875
876struct list_head spl_kmem_cache_list; /* List of caches */
877struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
a10287e0 878taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
10129680 879
d4899f47 880static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
10129680 881
a55bcaad 882SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
495bd532
BB
883SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
884 spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
10129680 885
a1502d76 886static void *
887kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
fece7c99 888{
a1502d76 889 void *ptr;
f1ca4da6 890
8b45dda2
BB
891 ASSERT(ISP2(size));
892
500e95c8 893 if (skc->skc_flags & KMC_KMEM)
ae16ed99
CC
894 ptr = (void *)__get_free_pages(flags | __GFP_COMP,
895 get_order(size));
500e95c8 896 else
617f79de
BB
897 ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
898
8b45dda2
BB
899 /* Resulting allocated memory will be page aligned */
900 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
fece7c99 901
a1502d76 902 return ptr;
903}
fece7c99 904
a1502d76 905static void
906kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
907{
8b45dda2
BB
908 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
909 ASSERT(ISP2(size));
910
06089b9e
BB
911 /*
912 * The Linux direct reclaim path uses this out of band value to
913 * determine if forward progress is being made. Normally this is
914 * incremented by kmem_freepages() which is part of the various
915 * Linux slab implementations. However, since we are using none
916 * of that infrastructure we are responsible for incrementing it.
917 */
918 if (current->reclaim_state)
919 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
920
8b45dda2
BB
921 if (skc->skc_flags & KMC_KMEM)
922 free_pages((unsigned long)ptr, get_order(size));
923 else
924 vfree(ptr);
925}
926
927/*
928 * Required space for each aligned sks.
929 */
930static inline uint32_t
931spl_sks_size(spl_kmem_cache_t *skc)
932{
933 return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t),
934 skc->skc_obj_align, uint32_t);
935}
936
937/*
938 * Required space for each aligned object.
939 */
940static inline uint32_t
941spl_obj_size(spl_kmem_cache_t *skc)
942{
943 uint32_t align = skc->skc_obj_align;
944
945 return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
946 P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t);
947}
948
949/*
950 * Lookup the spl_kmem_object_t for an object given that object.
951 */
952static inline spl_kmem_obj_t *
953spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
954{
955 return obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
956 skc->skc_obj_align, uint32_t);
957}
958
959/*
960 * Required space for each offslab object taking in to account alignment
961 * restrictions and the power-of-two requirement of kv_alloc().
962 */
963static inline uint32_t
964spl_offslab_size(spl_kmem_cache_t *skc)
965{
87f8055a 966 return 1UL << (fls64(spl_obj_size(skc)) + 1);
fece7c99 967}
968
ea3e6ca9
BB
969/*
970 * It's important that we pack the spl_kmem_obj_t structure and the
48e0606a
BB
971 * actual objects in to one large address space to minimize the number
972 * of calls to the allocator. It is far better to do a few large
973 * allocations and then subdivide it ourselves. Now which allocator
974 * we use requires balancing a few trade offs.
975 *
976 * For small objects we use kmem_alloc() because as long as you are
977 * only requesting a small number of pages (ideally just one) its cheap.
978 * However, when you start requesting multiple pages with kmem_alloc()
ecc39810 979 * it gets increasingly expensive since it requires contiguous pages.
48e0606a 980 * For this reason we shift to vmem_alloc() for slabs of large objects
ecc39810 981 * which removes the need for contiguous pages. We do not use
48e0606a
BB
982 * vmem_alloc() in all cases because there is significant locking
983 * overhead in __get_vm_area_node(). This function takes a single
ecc39810 984 * global lock when acquiring an available virtual address range which
48e0606a
BB
985 * serializes all vmem_alloc()'s for all slab caches. Using slightly
986 * different allocation functions for small and large objects should
987 * give us the best of both worlds.
988 *
989 * KMC_ONSLAB KMC_OFFSLAB
990 *
991 * +------------------------+ +-----------------+
992 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
993 * | skc_obj_size <-+ | | +-----------------+ | |
994 * | spl_kmem_obj_t | | | |
995 * | skc_obj_size <---+ | +-----------------+ | |
996 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
997 * | ... v | | spl_kmem_obj_t | |
998 * +------------------------+ +-----------------+ v
999 */
fece7c99 1000static spl_kmem_slab_t *
a1502d76 1001spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
fece7c99 1002{
1003 spl_kmem_slab_t *sks;
a1502d76 1004 spl_kmem_obj_t *sko, *n;
1005 void *base, *obj;
8b45dda2
BB
1006 uint32_t obj_size, offslab_size = 0;
1007 int i, rc = 0;
48e0606a 1008
a1502d76 1009 base = kv_alloc(skc, skc->skc_slab_size, flags);
1010 if (base == NULL)
b17edc10 1011 SRETURN(NULL);
fece7c99 1012
a1502d76 1013 sks = (spl_kmem_slab_t *)base;
1014 sks->sks_magic = SKS_MAGIC;
1015 sks->sks_objs = skc->skc_slab_objs;
1016 sks->sks_age = jiffies;
1017 sks->sks_cache = skc;
1018 INIT_LIST_HEAD(&sks->sks_list);
1019 INIT_LIST_HEAD(&sks->sks_free_list);
1020 sks->sks_ref = 0;
8b45dda2 1021 obj_size = spl_obj_size(skc);
48e0606a 1022
8d177c18 1023 if (skc->skc_flags & KMC_OFFSLAB)
8b45dda2 1024 offslab_size = spl_offslab_size(skc);
fece7c99 1025
1026 for (i = 0; i < sks->sks_objs; i++) {
a1502d76 1027 if (skc->skc_flags & KMC_OFFSLAB) {
8b45dda2 1028 obj = kv_alloc(skc, offslab_size, flags);
a1502d76 1029 if (!obj)
b17edc10 1030 SGOTO(out, rc = -ENOMEM);
a1502d76 1031 } else {
8b45dda2 1032 obj = base + spl_sks_size(skc) + (i * obj_size);
a1502d76 1033 }
1034
8b45dda2
BB
1035 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1036 sko = spl_sko_from_obj(skc, obj);
fece7c99 1037 sko->sko_addr = obj;
1038 sko->sko_magic = SKO_MAGIC;
1039 sko->sko_slab = sks;
1040 INIT_LIST_HEAD(&sko->sko_list);
fece7c99 1041 list_add_tail(&sko->sko_list, &sks->sks_free_list);
1042 }
1043
fece7c99 1044 list_for_each_entry(sko, &sks->sks_free_list, sko_list)
1045 if (skc->skc_ctor)
1046 skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
2fb9b26a 1047out:
a1502d76 1048 if (rc) {
1049 if (skc->skc_flags & KMC_OFFSLAB)
48e0606a
BB
1050 list_for_each_entry_safe(sko, n, &sks->sks_free_list,
1051 sko_list)
8b45dda2 1052 kv_free(skc, sko->sko_addr, offslab_size);
fece7c99 1053
a1502d76 1054 kv_free(skc, base, skc->skc_slab_size);
1055 sks = NULL;
fece7c99 1056 }
1057
b17edc10 1058 SRETURN(sks);
fece7c99 1059}
1060
ea3e6ca9
BB
1061/*
1062 * Remove a slab from complete or partial list, it must be called with
1063 * the 'skc->skc_lock' held but the actual free must be performed
1064 * outside the lock to prevent deadlocking on vmem addresses.
fece7c99 1065 */
f1ca4da6 1066static void
ea3e6ca9
BB
1067spl_slab_free(spl_kmem_slab_t *sks,
1068 struct list_head *sks_list, struct list_head *sko_list)
1069{
2fb9b26a 1070 spl_kmem_cache_t *skc;
b17edc10 1071 SENTRY;
57d86234 1072
2fb9b26a 1073 ASSERT(sks->sks_magic == SKS_MAGIC);
4afaaefa 1074 ASSERT(sks->sks_ref == 0);
d6a26c6a 1075
fece7c99 1076 skc = sks->sks_cache;
1077 ASSERT(skc->skc_magic == SKC_MAGIC);
d46630e0 1078 ASSERT(spin_is_locked(&skc->skc_lock));
f1ca4da6 1079
1a944a7d
BB
1080 /*
1081 * Update slab/objects counters in the cache, then remove the
1082 * slab from the skc->skc_partial_list. Finally add the slab
1083 * and all its objects in to the private work lists where the
1084 * destructors will be called and the memory freed to the system.
1085 */
fece7c99 1086 skc->skc_obj_total -= sks->sks_objs;
1087 skc->skc_slab_total--;
1088 list_del(&sks->sks_list);
ea3e6ca9 1089 list_add(&sks->sks_list, sks_list);
1a944a7d
BB
1090 list_splice_init(&sks->sks_free_list, sko_list);
1091
b17edc10 1092 SEXIT;
2fb9b26a 1093}
d6a26c6a 1094
ea3e6ca9
BB
1095/*
1096 * Traverses all the partial slabs attached to a cache and free those
1097 * which which are currently empty, and have not been touched for
37db7d8c
BB
1098 * skc_delay seconds to avoid thrashing. The count argument is
1099 * passed to optionally cap the number of slabs reclaimed, a count
1100 * of zero means try and reclaim everything. When flag is set we
1101 * always free an available slab regardless of age.
ea3e6ca9
BB
1102 */
1103static void
37db7d8c 1104spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
2fb9b26a 1105{
1106 spl_kmem_slab_t *sks, *m;
ea3e6ca9
BB
1107 spl_kmem_obj_t *sko, *n;
1108 LIST_HEAD(sks_list);
1109 LIST_HEAD(sko_list);
8b45dda2
BB
1110 uint32_t size = 0;
1111 int i = 0;
b17edc10 1112 SENTRY;
2fb9b26a 1113
2fb9b26a 1114 /*
ea3e6ca9
BB
1115 * Move empty slabs and objects which have not been touched in
1116 * skc_delay seconds on to private lists to be freed outside
1a944a7d
BB
1117 * the spin lock. This delay time is important to avoid thrashing
1118 * however when flag is set the delay will not be used.
2fb9b26a 1119 */
ea3e6ca9 1120 spin_lock(&skc->skc_lock);
1a944a7d
BB
1121 list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){
1122 /*
1123 * All empty slabs are at the end of skc->skc_partial_list,
1124 * therefore once a non-empty slab is found we can stop
1125 * scanning. Additionally, stop when reaching the target
ecc39810 1126 * reclaim 'count' if a non-zero threshold is given.
1a944a7d 1127 */
cef7605c 1128 if ((sks->sks_ref > 0) || (count && i >= count))
37db7d8c
BB
1129 break;
1130
37db7d8c 1131 if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
ea3e6ca9 1132 spl_slab_free(sks, &sks_list, &sko_list);
37db7d8c
BB
1133 i++;
1134 }
ea3e6ca9
BB
1135 }
1136 spin_unlock(&skc->skc_lock);
1137
1138 /*
1a944a7d
BB
1139 * The following two loops ensure all the object destructors are
1140 * run, any offslab objects are freed, and the slabs themselves
1141 * are freed. This is all done outside the skc->skc_lock since
1142 * this allows the destructor to sleep, and allows us to perform
1143 * a conditional reschedule when a freeing a large number of
1144 * objects and slabs back to the system.
ea3e6ca9 1145 */
1a944a7d 1146 if (skc->skc_flags & KMC_OFFSLAB)
8b45dda2 1147 size = spl_offslab_size(skc);
ea3e6ca9 1148
1a944a7d
BB
1149 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
1150 ASSERT(sko->sko_magic == SKO_MAGIC);
1151
1152 if (skc->skc_dtor)
1153 skc->skc_dtor(sko->sko_addr, skc->skc_private);
1154
1155 if (skc->skc_flags & KMC_OFFSLAB)
ea3e6ca9 1156 kv_free(skc, sko->sko_addr, size);
2fb9b26a 1157 }
1158
37db7d8c 1159 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
1a944a7d 1160 ASSERT(sks->sks_magic == SKS_MAGIC);
ea3e6ca9 1161 kv_free(skc, sks, skc->skc_slab_size);
37db7d8c 1162 }
ea3e6ca9 1163
b17edc10 1164 SEXIT;
f1ca4da6 1165}
1166
ed316348
BB
1167static spl_kmem_emergency_t *
1168spl_emergency_search(struct rb_root *root, void *obj)
1169{
1170 struct rb_node *node = root->rb_node;
1171 spl_kmem_emergency_t *ske;
1172 unsigned long address = (unsigned long)obj;
1173
1174 while (node) {
1175 ske = container_of(node, spl_kmem_emergency_t, ske_node);
1176
1177 if (address < (unsigned long)ske->ske_obj)
1178 node = node->rb_left;
1179 else if (address > (unsigned long)ske->ske_obj)
1180 node = node->rb_right;
1181 else
1182 return ske;
1183 }
1184
1185 return NULL;
1186}
1187
1188static int
1189spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
1190{
1191 struct rb_node **new = &(root->rb_node), *parent = NULL;
1192 spl_kmem_emergency_t *ske_tmp;
1193 unsigned long address = (unsigned long)ske->ske_obj;
1194
1195 while (*new) {
1196 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
1197
1198 parent = *new;
1199 if (address < (unsigned long)ske_tmp->ske_obj)
1200 new = &((*new)->rb_left);
1201 else if (address > (unsigned long)ske_tmp->ske_obj)
1202 new = &((*new)->rb_right);
1203 else
1204 return 0;
1205 }
1206
1207 rb_link_node(&ske->ske_node, parent, new);
1208 rb_insert_color(&ske->ske_node, root);
1209
1210 return 1;
1211}
1212
e2dcc6e2 1213/*
ed316348 1214 * Allocate a single emergency object and track it in a red black tree.
e2dcc6e2
BB
1215 */
1216static int
1217spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
1218{
1219 spl_kmem_emergency_t *ske;
1220 int empty;
1221 SENTRY;
1222
1223 /* Last chance use a partial slab if one now exists */
1224 spin_lock(&skc->skc_lock);
1225 empty = list_empty(&skc->skc_partial_list);
1226 spin_unlock(&skc->skc_lock);
1227 if (!empty)
1228 SRETURN(-EEXIST);
1229
1230 ske = kmalloc(sizeof(*ske), flags);
1231 if (ske == NULL)
1232 SRETURN(-ENOMEM);
1233
1234 ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
1235 if (ske->ske_obj == NULL) {
1236 kfree(ske);
1237 SRETURN(-ENOMEM);
1238 }
1239
e2dcc6e2 1240 spin_lock(&skc->skc_lock);
ed316348
BB
1241 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
1242 if (likely(empty)) {
1243 skc->skc_obj_total++;
1244 skc->skc_obj_emergency++;
1245 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
1246 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
1247 }
e2dcc6e2
BB
1248 spin_unlock(&skc->skc_lock);
1249
ed316348
BB
1250 if (unlikely(!empty)) {
1251 kfree(ske->ske_obj);
1252 kfree(ske);
1253 SRETURN(-EINVAL);
1254 }
1255
1256 if (skc->skc_ctor)
1257 skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
1258
e2dcc6e2
BB
1259 *obj = ske->ske_obj;
1260
1261 SRETURN(0);
1262}
1263
1264/*
ed316348 1265 * Locate the passed object in the red black tree and free it.
e2dcc6e2
BB
1266 */
1267static int
1268spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
1269{
ed316348 1270 spl_kmem_emergency_t *ske;
e2dcc6e2
BB
1271 SENTRY;
1272
1273 spin_lock(&skc->skc_lock);
ed316348
BB
1274 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
1275 if (likely(ske)) {
1276 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
1277 skc->skc_obj_emergency--;
1278 skc->skc_obj_total--;
e2dcc6e2
BB
1279 }
1280 spin_unlock(&skc->skc_lock);
1281
ed316348 1282 if (unlikely(ske == NULL))
e2dcc6e2
BB
1283 SRETURN(-ENOENT);
1284
1285 if (skc->skc_dtor)
1286 skc->skc_dtor(ske->ske_obj, skc->skc_private);
1287
1288 kfree(ske->ske_obj);
1289 kfree(ske);
1290
1291 SRETURN(0);
1292}
1293
d4899f47
BB
1294/*
1295 * Release objects from the per-cpu magazine back to their slab. The flush
1296 * argument contains the max number of entries to remove from the magazine.
1297 */
1298static void
1299__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
1300{
1301 int i, count = MIN(flush, skm->skm_avail);
1302 SENTRY;
1303
1304 ASSERT(skc->skc_magic == SKC_MAGIC);
1305 ASSERT(skm->skm_magic == SKM_MAGIC);
1306 ASSERT(spin_is_locked(&skc->skc_lock));
1307
1308 for (i = 0; i < count; i++)
1309 spl_cache_shrink(skc, skm->skm_objs[i]);
1310
1311 skm->skm_avail -= count;
1312 memmove(skm->skm_objs, &(skm->skm_objs[count]),
1313 sizeof(void *) * skm->skm_avail);
1314
1315 SEXIT;
1316}
1317
1318static void
1319spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
1320{
1321 spin_lock(&skc->skc_lock);
1322 __spl_cache_flush(skc, skm, flush);
1323 spin_unlock(&skc->skc_lock);
1324}
1325
ea3e6ca9
BB
1326static void
1327spl_magazine_age(void *data)
f1ca4da6 1328{
a10287e0
BB
1329 spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
1330 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
9b1b8e4c
BB
1331
1332 ASSERT(skm->skm_magic == SKM_MAGIC);
a10287e0 1333 ASSERT(skm->skm_cpu == smp_processor_id());
d4899f47
BB
1334 ASSERT(irqs_disabled());
1335
1336 /* There are no available objects or they are too young to age out */
1337 if ((skm->skm_avail == 0) ||
1338 time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
1339 return;
f1ca4da6 1340
d4899f47
BB
1341 /*
1342 * Because we're executing in interrupt context we may have
1343 * interrupted the holder of this lock. To avoid a potential
1344 * deadlock return if the lock is contended.
1345 */
1346 if (!spin_trylock(&skc->skc_lock))
1347 return;
1348
1349 __spl_cache_flush(skc, skm, skm->skm_refill);
1350 spin_unlock(&skc->skc_lock);
ea3e6ca9 1351}
4efd4118 1352
ea3e6ca9 1353/*
a10287e0
BB
1354 * Called regularly to keep a downward pressure on the cache.
1355 *
1356 * Objects older than skc->skc_delay seconds in the per-cpu magazines will
1357 * be returned to the caches. This is done to prevent idle magazines from
1358 * holding memory which could be better used elsewhere. The delay is
1359 * present to prevent thrashing the magazine.
1360 *
1361 * The newly released objects may result in empty partial slabs. Those
1362 * slabs should be released to the system. Otherwise moving the objects
1363 * out of the magazines is just wasted work.
ea3e6ca9
BB
1364 */
1365static void
1366spl_cache_age(void *data)
1367{
a10287e0
BB
1368 spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
1369 taskqid_t id = 0;
ea3e6ca9
BB
1370
1371 ASSERT(skc->skc_magic == SKC_MAGIC);
a10287e0 1372
0936c344
BB
1373 /* Dynamically disabled at run time */
1374 if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
1375 return;
1376
a10287e0 1377 atomic_inc(&skc->skc_ref);
a073aeb0
BB
1378
1379 if (!(skc->skc_flags & KMC_NOMAGAZINE))
50e41ab1 1380 on_each_cpu(spl_magazine_age, skc, 1);
a073aeb0 1381
37db7d8c 1382 spl_slab_reclaim(skc, skc->skc_reap, 0);
ea3e6ca9 1383
a10287e0
BB
1384 while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
1385 id = taskq_dispatch_delay(
1386 spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
1387 ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
1388
1389 /* Destroy issued after dispatch immediately cancel it */
1390 if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
1391 taskq_cancel_id(spl_kmem_cache_taskq, id);
1392 }
1393
1394 spin_lock(&skc->skc_lock);
1395 skc->skc_taskqid = id;
1396 spin_unlock(&skc->skc_lock);
1397
1398 atomic_dec(&skc->skc_ref);
2fb9b26a 1399}
f1ca4da6 1400
ea3e6ca9 1401/*
8b45dda2 1402 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
bdfbe594 1403 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
ea3e6ca9
BB
1404 * for very small objects we may end up with more than this so as not
1405 * to waste space in the minimal allocation of a single page. Also for
bdfbe594 1406 * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
ea3e6ca9
BB
1407 * lower than this and we will fail.
1408 */
48e0606a
BB
1409static int
1410spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
1411{
8b45dda2 1412 uint32_t sks_size, obj_size, max_size;
48e0606a
BB
1413
1414 if (skc->skc_flags & KMC_OFFSLAB) {
bdfbe594 1415 *objs = spl_kmem_cache_obj_per_slab;
ceb38728
BB
1416 *size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
1417 SRETURN(0);
48e0606a 1418 } else {
8b45dda2
BB
1419 sks_size = spl_sks_size(skc);
1420 obj_size = spl_obj_size(skc);
ea3e6ca9
BB
1421
1422 if (skc->skc_flags & KMC_KMEM)
aa600d8a 1423 max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE;
ea3e6ca9 1424 else
bdfbe594 1425 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
48e0606a 1426
8b45dda2
BB
1427 /* Power of two sized slab */
1428 for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
ea3e6ca9 1429 *objs = (*size - sks_size) / obj_size;
bdfbe594 1430 if (*objs >= spl_kmem_cache_obj_per_slab)
b17edc10 1431 SRETURN(0);
ea3e6ca9 1432 }
48e0606a 1433
ea3e6ca9 1434 /*
8b45dda2 1435 * Unable to satisfy target objects per slab, fall back to
ea3e6ca9
BB
1436 * allocating a maximally sized slab and assuming it can
1437 * contain the minimum objects count use it. If not fail.
1438 */
1439 *size = max_size;
1440 *objs = (*size - sks_size) / obj_size;
bdfbe594 1441 if (*objs >= (spl_kmem_cache_obj_per_slab_min))
b17edc10 1442 SRETURN(0);
48e0606a
BB
1443 }
1444
b17edc10 1445 SRETURN(-ENOSPC);
48e0606a
BB
1446}
1447
ea3e6ca9
BB
1448/*
1449 * Make a guess at reasonable per-cpu magazine size based on the size of
1450 * each object and the cost of caching N of them in each magazine. Long
1451 * term this should really adapt based on an observed usage heuristic.
1452 */
4afaaefa 1453static int
1454spl_magazine_size(spl_kmem_cache_t *skc)
1455{
8b45dda2
BB
1456 uint32_t obj_size = spl_obj_size(skc);
1457 int size;
b17edc10 1458 SENTRY;
4afaaefa 1459
ea3e6ca9 1460 /* Per-magazine sizes below assume a 4Kib page size */
8b45dda2 1461 if (obj_size > (PAGE_SIZE * 256))
ea3e6ca9 1462 size = 4; /* Minimum 4Mib per-magazine */
8b45dda2 1463 else if (obj_size > (PAGE_SIZE * 32))
ea3e6ca9 1464 size = 16; /* Minimum 2Mib per-magazine */
8b45dda2 1465 else if (obj_size > (PAGE_SIZE))
ea3e6ca9 1466 size = 64; /* Minimum 256Kib per-magazine */
8b45dda2 1467 else if (obj_size > (PAGE_SIZE / 4))
ea3e6ca9 1468 size = 128; /* Minimum 128Kib per-magazine */
4afaaefa 1469 else
ea3e6ca9 1470 size = 256;
4afaaefa 1471
b17edc10 1472 SRETURN(size);
4afaaefa 1473}
1474
ea3e6ca9 1475/*
ecc39810 1476 * Allocate a per-cpu magazine to associate with a specific core.
ea3e6ca9 1477 */
4afaaefa 1478static spl_kmem_magazine_t *
08850edd 1479spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
4afaaefa 1480{
1481 spl_kmem_magazine_t *skm;
1482 int size = sizeof(spl_kmem_magazine_t) +
1483 sizeof(void *) * skc->skc_mag_size;
b17edc10 1484 SENTRY;
4afaaefa 1485
08850edd 1486 skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
4afaaefa 1487 if (skm) {
1488 skm->skm_magic = SKM_MAGIC;
1489 skm->skm_avail = 0;
1490 skm->skm_size = skc->skc_mag_size;
1491 skm->skm_refill = skc->skc_mag_refill;
9b1b8e4c 1492 skm->skm_cache = skc;
ea3e6ca9 1493 skm->skm_age = jiffies;
08850edd 1494 skm->skm_cpu = cpu;
4afaaefa 1495 }
1496
b17edc10 1497 SRETURN(skm);
4afaaefa 1498}
1499
ea3e6ca9 1500/*
ecc39810 1501 * Free a per-cpu magazine associated with a specific core.
ea3e6ca9 1502 */
4afaaefa 1503static void
1504spl_magazine_free(spl_kmem_magazine_t *skm)
1505{
a0f6da3d 1506 int size = sizeof(spl_kmem_magazine_t) +
1507 sizeof(void *) * skm->skm_size;
1508
b17edc10 1509 SENTRY;
4afaaefa 1510 ASSERT(skm->skm_magic == SKM_MAGIC);
1511 ASSERT(skm->skm_avail == 0);
a0f6da3d 1512
1513 kmem_free(skm, size);
b17edc10 1514 SEXIT;
4afaaefa 1515}
1516
ea3e6ca9
BB
1517/*
1518 * Create all pre-cpu magazines of reasonable sizes.
1519 */
4afaaefa 1520static int
1521spl_magazine_create(spl_kmem_cache_t *skc)
1522{
37db7d8c 1523 int i;
b17edc10 1524 SENTRY;
4afaaefa 1525
a073aeb0
BB
1526 if (skc->skc_flags & KMC_NOMAGAZINE)
1527 SRETURN(0);
1528
4afaaefa 1529 skc->skc_mag_size = spl_magazine_size(skc);
ea3e6ca9 1530 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
4afaaefa 1531
37db7d8c 1532 for_each_online_cpu(i) {
08850edd 1533 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
37db7d8c
BB
1534 if (!skc->skc_mag[i]) {
1535 for (i--; i >= 0; i--)
1536 spl_magazine_free(skc->skc_mag[i]);
4afaaefa 1537
b17edc10 1538 SRETURN(-ENOMEM);
37db7d8c
BB
1539 }
1540 }
4afaaefa 1541
b17edc10 1542 SRETURN(0);
4afaaefa 1543}
1544
ea3e6ca9
BB
1545/*
1546 * Destroy all pre-cpu magazines.
1547 */
4afaaefa 1548static void
1549spl_magazine_destroy(spl_kmem_cache_t *skc)
1550{
37db7d8c
BB
1551 spl_kmem_magazine_t *skm;
1552 int i;
b17edc10 1553 SENTRY;
37db7d8c 1554
a073aeb0
BB
1555 if (skc->skc_flags & KMC_NOMAGAZINE) {
1556 SEXIT;
1557 return;
1558 }
1559
37db7d8c
BB
1560 for_each_online_cpu(i) {
1561 skm = skc->skc_mag[i];
d4899f47 1562 spl_cache_flush(skc, skm, skm->skm_avail);
37db7d8c
BB
1563 spl_magazine_free(skm);
1564 }
1565
b17edc10 1566 SEXIT;
4afaaefa 1567}
1568
ea3e6ca9
BB
1569/*
1570 * Create a object cache based on the following arguments:
1571 * name cache name
1572 * size cache object size
1573 * align cache object alignment
1574 * ctor cache object constructor
1575 * dtor cache object destructor
1576 * reclaim cache object reclaim
1577 * priv cache private data for ctor/dtor/reclaim
1578 * vmp unused must be NULL
1579 * flags
1580 * KMC_NOTOUCH Disable cache object aging (unsupported)
1581 * KMC_NODEBUG Disable debugging (unsupported)
ea3e6ca9
BB
1582 * KMC_NOHASH Disable hashing (unsupported)
1583 * KMC_QCACHE Disable qcache (unsupported)
a073aeb0 1584 * KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab
ea3e6ca9
BB
1585 * KMC_KMEM Force kmem backed cache
1586 * KMC_VMEM Force vmem backed cache
a073aeb0 1587 * KMC_SLAB Force Linux slab backed cache
ea3e6ca9
BB
1588 * KMC_OFFSLAB Locate objects off the slab
1589 */
2fb9b26a 1590spl_kmem_cache_t *
1591spl_kmem_cache_create(char *name, size_t size, size_t align,
1592 spl_kmem_ctor_t ctor,
1593 spl_kmem_dtor_t dtor,
1594 spl_kmem_reclaim_t reclaim,
1595 void *priv, void *vmp, int flags)
1596{
1597 spl_kmem_cache_t *skc;
296a8e59 1598 int rc;
b17edc10 1599 SENTRY;
937879f1 1600
a1502d76 1601 ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
1602 ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
1603 ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
48e0606a 1604 ASSERT(vmp == NULL);
a1502d76 1605
296a8e59 1606 might_sleep();
0a6fd143 1607
296a8e59
BB
1608 /*
1609 * Allocate memory for a new cache an initialize it. Unfortunately,
5198ea0e
BB
1610 * this usually ends up being a large allocation of ~32k because
1611 * we need to allocate enough memory for the worst case number of
1612 * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
296a8e59
BB
1613 * explicitly pass KM_NODEBUG to suppress the kmem warning
1614 */
1615 skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
e9d7a2be 1616 if (skc == NULL)
b17edc10 1617 SRETURN(NULL);
d61e12af 1618
2fb9b26a 1619 skc->skc_magic = SKC_MAGIC;
2fb9b26a 1620 skc->skc_name_size = strlen(name) + 1;
296a8e59 1621 skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
2fb9b26a 1622 if (skc->skc_name == NULL) {
1623 kmem_free(skc, sizeof(*skc));
b17edc10 1624 SRETURN(NULL);
2fb9b26a 1625 }
1626 strncpy(skc->skc_name, name, skc->skc_name_size);
1627
e9d7a2be 1628 skc->skc_ctor = ctor;
1629 skc->skc_dtor = dtor;
1630 skc->skc_reclaim = reclaim;
2fb9b26a 1631 skc->skc_private = priv;
1632 skc->skc_vmp = vmp;
a073aeb0 1633 skc->skc_linux_cache = NULL;
2fb9b26a 1634 skc->skc_flags = flags;
1635 skc->skc_obj_size = size;
48e0606a 1636 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
2fb9b26a 1637 skc->skc_delay = SPL_KMEM_CACHE_DELAY;
37db7d8c 1638 skc->skc_reap = SPL_KMEM_CACHE_REAP;
ea3e6ca9 1639 atomic_set(&skc->skc_ref, 0);
2fb9b26a 1640
2fb9b26a 1641 INIT_LIST_HEAD(&skc->skc_list);
1642 INIT_LIST_HEAD(&skc->skc_complete_list);
1643 INIT_LIST_HEAD(&skc->skc_partial_list);
ed316348 1644 skc->skc_emergency_tree = RB_ROOT;
d46630e0 1645 spin_lock_init(&skc->skc_lock);
e2dcc6e2 1646 init_waitqueue_head(&skc->skc_waitq);
e9d7a2be 1647 skc->skc_slab_fail = 0;
1648 skc->skc_slab_create = 0;
1649 skc->skc_slab_destroy = 0;
2fb9b26a 1650 skc->skc_slab_total = 0;
1651 skc->skc_slab_alloc = 0;
1652 skc->skc_slab_max = 0;
1653 skc->skc_obj_total = 0;
1654 skc->skc_obj_alloc = 0;
1655 skc->skc_obj_max = 0;
165f13c3 1656 skc->skc_obj_deadlock = 0;
e2dcc6e2
BB
1657 skc->skc_obj_emergency = 0;
1658 skc->skc_obj_emergency_max = 0;
a1502d76 1659
a073aeb0
BB
1660 /*
1661 * Verify the requested alignment restriction is sane.
1662 */
48e0606a 1663 if (align) {
8b45dda2 1664 VERIFY(ISP2(align));
a073aeb0
BB
1665 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
1666 VERIFY3U(align, <=, PAGE_SIZE);
48e0606a
BB
1667 skc->skc_obj_align = align;
1668 }
1669
a073aeb0
BB
1670 /*
1671 * When no specific type of slab is requested (kmem, vmem, or
1672 * linuxslab) then select a cache type based on the object size
1673 * and default tunables.
1674 */
1675 if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
1676
1677 /*
1678 * Objects smaller than spl_kmem_cache_slab_limit can
1679 * use the Linux slab for better space-efficiency. By
1680 * default this functionality is disabled until its
1681 * performance characters are fully understood.
1682 */
1683 if (spl_kmem_cache_slab_limit &&
1684 size <= (size_t)spl_kmem_cache_slab_limit)
1685 skc->skc_flags |= KMC_SLAB;
1686
1687 /*
1688 * Small objects, less than spl_kmem_cache_kmem_limit per
1689 * object should use kmem because their slabs are small.
1690 */
1691 else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit)
a1502d76 1692 skc->skc_flags |= KMC_KMEM;
a073aeb0
BB
1693
1694 /*
1695 * All other objects are considered large and are placed
1696 * on vmem backed slabs.
1697 */
8b45dda2 1698 else
a1502d76 1699 skc->skc_flags |= KMC_VMEM;
a1502d76 1700 }
1701
a073aeb0
BB
1702 /*
1703 * Given the type of slab allocate the required resources.
1704 */
1705 if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
1706 rc = spl_slab_size(skc,
1707 &skc->skc_slab_objs, &skc->skc_slab_size);
1708 if (rc)
1709 SGOTO(out, rc);
1710
1711 rc = spl_magazine_create(skc);
1712 if (rc)
1713 SGOTO(out, rc);
1714 } else {
1715 skc->skc_linux_cache = kmem_cache_create(
1716 skc->skc_name, size, align, 0, NULL);
1717 if (skc->skc_linux_cache == NULL)
1718 SGOTO(out, rc = ENOMEM);
4afaaefa 1719
a073aeb0
BB
1720 kmem_cache_set_allocflags(skc, __GFP_COMP);
1721 skc->skc_flags |= KMC_NOMAGAZINE;
1722 }
2fb9b26a 1723
0936c344
BB
1724 if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
1725 skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
1726 spl_cache_age, skc, TQ_SLEEP,
1727 ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
ea3e6ca9 1728
2fb9b26a 1729 down_write(&spl_kmem_cache_sem);
e9d7a2be 1730 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
2fb9b26a 1731 up_write(&spl_kmem_cache_sem);
1732
b17edc10 1733 SRETURN(skc);
48e0606a
BB
1734out:
1735 kmem_free(skc->skc_name, skc->skc_name_size);
1736 kmem_free(skc, sizeof(*skc));
b17edc10 1737 SRETURN(NULL);
f1ca4da6 1738}
2fb9b26a 1739EXPORT_SYMBOL(spl_kmem_cache_create);
f1ca4da6 1740
2b354302
BB
1741/*
1742 * Register a move callback to for cache defragmentation.
1743 * XXX: Unimplemented but harmless to stub out for now.
1744 */
1745void
6576a1a7 1746spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
2b354302
BB
1747 kmem_cbrc_t (move)(void *, void *, size_t, void *))
1748{
1749 ASSERT(move != NULL);
1750}
1751EXPORT_SYMBOL(spl_kmem_cache_set_move);
1752
ea3e6ca9 1753/*
ecc39810 1754 * Destroy a cache and all objects associated with the cache.
ea3e6ca9 1755 */
2fb9b26a 1756void
1757spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
f1ca4da6 1758{
ea3e6ca9 1759 DECLARE_WAIT_QUEUE_HEAD(wq);
a10287e0 1760 taskqid_t id;
b17edc10 1761 SENTRY;
f1ca4da6 1762
e9d7a2be 1763 ASSERT(skc->skc_magic == SKC_MAGIC);
a073aeb0 1764 ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
e9d7a2be 1765
1766 down_write(&spl_kmem_cache_sem);
1767 list_del_init(&skc->skc_list);
1768 up_write(&spl_kmem_cache_sem);
2fb9b26a 1769
a10287e0 1770 /* Cancel any and wait for any pending delayed tasks */
64c075c3 1771 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
9b1b8e4c 1772
a10287e0
BB
1773 spin_lock(&skc->skc_lock);
1774 id = skc->skc_taskqid;
1775 spin_unlock(&skc->skc_lock);
1776
1777 taskq_cancel_id(spl_kmem_cache_taskq, id);
ea3e6ca9
BB
1778
1779 /* Wait until all current callers complete, this is mainly
1780 * to catch the case where a low memory situation triggers a
1781 * cache reaping action which races with this destroy. */
1782 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
1783
a073aeb0
BB
1784 if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
1785 spl_magazine_destroy(skc);
1786 spl_slab_reclaim(skc, 0, 1);
1787 } else {
1788 ASSERT(skc->skc_flags & KMC_SLAB);
1789 kmem_cache_destroy(skc->skc_linux_cache);
1790 }
1791
d46630e0 1792 spin_lock(&skc->skc_lock);
d6a26c6a 1793
2fb9b26a 1794 /* Validate there are no objects in use and free all the
4afaaefa 1795 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
ea3e6ca9
BB
1796 ASSERT3U(skc->skc_slab_alloc, ==, 0);
1797 ASSERT3U(skc->skc_obj_alloc, ==, 0);
1798 ASSERT3U(skc->skc_slab_total, ==, 0);
1799 ASSERT3U(skc->skc_obj_total, ==, 0);
e2dcc6e2 1800 ASSERT3U(skc->skc_obj_emergency, ==, 0);
2fb9b26a 1801 ASSERT(list_empty(&skc->skc_complete_list));
a1502d76 1802
2fb9b26a 1803 kmem_free(skc->skc_name, skc->skc_name_size);
d46630e0 1804 spin_unlock(&skc->skc_lock);
ff449ac4 1805
4afaaefa 1806 kmem_free(skc, sizeof(*skc));
2fb9b26a 1807
b17edc10 1808 SEXIT;
f1ca4da6 1809}
2fb9b26a 1810EXPORT_SYMBOL(spl_kmem_cache_destroy);
f1ca4da6 1811
ea3e6ca9
BB
1812/*
1813 * Allocate an object from a slab attached to the cache. This is used to
1814 * repopulate the per-cpu magazine caches in batches when they run low.
1815 */
4afaaefa 1816static void *
1817spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
f1ca4da6 1818{
2fb9b26a 1819 spl_kmem_obj_t *sko;
f1ca4da6 1820
e9d7a2be 1821 ASSERT(skc->skc_magic == SKC_MAGIC);
1822 ASSERT(sks->sks_magic == SKS_MAGIC);
4afaaefa 1823 ASSERT(spin_is_locked(&skc->skc_lock));
2fb9b26a 1824
a1502d76 1825 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
4afaaefa 1826 ASSERT(sko->sko_magic == SKO_MAGIC);
1827 ASSERT(sko->sko_addr != NULL);
2fb9b26a 1828
a1502d76 1829 /* Remove from sks_free_list */
4afaaefa 1830 list_del_init(&sko->sko_list);
2fb9b26a 1831
4afaaefa 1832 sks->sks_age = jiffies;
1833 sks->sks_ref++;
1834 skc->skc_obj_alloc++;
2fb9b26a 1835
4afaaefa 1836 /* Track max obj usage statistics */
1837 if (skc->skc_obj_alloc > skc->skc_obj_max)
1838 skc->skc_obj_max = skc->skc_obj_alloc;
2fb9b26a 1839
4afaaefa 1840 /* Track max slab usage statistics */
1841 if (sks->sks_ref == 1) {
1842 skc->skc_slab_alloc++;
f1ca4da6 1843
4afaaefa 1844 if (skc->skc_slab_alloc > skc->skc_slab_max)
1845 skc->skc_slab_max = skc->skc_slab_alloc;
2fb9b26a 1846 }
1847
4afaaefa 1848 return sko->sko_addr;
1849}
c30df9c8 1850
ea3e6ca9 1851/*
e2dcc6e2
BB
1852 * Generic slab allocation function to run by the global work queues.
1853 * It is responsible for allocating a new slab, linking it in to the list
1854 * of partial slabs, and then waking any waiters.
4afaaefa 1855 */
e2dcc6e2
BB
1856static void
1857spl_cache_grow_work(void *data)
4afaaefa 1858{
33e94ef1 1859 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
e2dcc6e2 1860 spl_kmem_cache_t *skc = ska->ska_cache;
e9d7a2be 1861 spl_kmem_slab_t *sks;
e2dcc6e2
BB
1862
1863 sks = spl_slab_alloc(skc, ska->ska_flags | __GFP_NORETRY | KM_NODEBUG);
1864 spin_lock(&skc->skc_lock);
1865 if (sks) {
1866 skc->skc_slab_total++;
1867 skc->skc_obj_total += sks->sks_objs;
1868 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1869 }
1870
1871 atomic_dec(&skc->skc_ref);
1872 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
165f13c3 1873 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
e2dcc6e2
BB
1874 wake_up_all(&skc->skc_waitq);
1875 spin_unlock(&skc->skc_lock);
1876
1877 kfree(ska);
1878}
1879
1880/*
1881 * Returns non-zero when a new slab should be available.
1882 */
1883static int
1884spl_cache_grow_wait(spl_kmem_cache_t *skc)
1885{
1886 return !test_bit(KMC_BIT_GROWING, &skc->skc_flags);
1887}
1888
1889/*
a073aeb0
BB
1890 * No available objects on any slabs, create a new slab. Note that this
1891 * functionality is disabled for KMC_SLAB caches which are backed by the
1892 * Linux slab.
e2dcc6e2
BB
1893 */
1894static int
1895spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1896{
165f13c3 1897 int remaining, rc;
b17edc10 1898 SENTRY;
f1ca4da6 1899
e9d7a2be 1900 ASSERT(skc->skc_magic == SKC_MAGIC);
a073aeb0 1901 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
ea3e6ca9 1902 might_sleep();
e2dcc6e2 1903 *obj = NULL;
e9d7a2be 1904
ea3e6ca9 1905 /*
dc1b3022
BB
1906 * Before allocating a new slab wait for any reaping to complete and
1907 * then return so the local magazine can be rechecked for new objects.
ea3e6ca9 1908 */
dc1b3022 1909 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
2fc44f66
NB
1910 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1911 TASK_UNINTERRUPTIBLE);
dc1b3022
BB
1912 SRETURN(rc ? rc : -EAGAIN);
1913 }
2fb9b26a 1914
e2dcc6e2
BB
1915 /*
1916 * This is handled by dispatching a work request to the global work
1917 * queue. This allows us to asynchronously allocate a new slab while
1918 * retaining the ability to safely fall back to a smaller synchronous
1919 * allocations to ensure forward progress is always maintained.
1920 */
1921 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1922 spl_kmem_alloc_t *ska;
4afaaefa 1923
e2dcc6e2
BB
1924 ska = kmalloc(sizeof(*ska), flags);
1925 if (ska == NULL) {
1926 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
1927 wake_up_all(&skc->skc_waitq);
1928 SRETURN(-ENOMEM);
1929 }
4afaaefa 1930
e2dcc6e2
BB
1931 atomic_inc(&skc->skc_ref);
1932 ska->ska_cache = skc;
043f9b57 1933 ska->ska_flags = flags & ~__GFP_FS;
33e94ef1
BB
1934 taskq_init_ent(&ska->ska_tqe);
1935 taskq_dispatch_ent(spl_kmem_cache_taskq,
1936 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
e2dcc6e2
BB
1937 }
1938
1939 /*
165f13c3
BB
1940 * The goal here is to only detect the rare case where a virtual slab
1941 * allocation has deadlocked. We must be careful to minimize the use
1942 * of emergency objects which are more expensive to track. Therefore,
1943 * we set a very long timeout for the asynchronous allocation and if
1944 * the timeout is reached the cache is flagged as deadlocked. From
1945 * this point only new emergency objects will be allocated until the
1946 * asynchronous allocation completes and clears the deadlocked flag.
e2dcc6e2 1947 */
165f13c3
BB
1948 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1949 rc = spl_emergency_alloc(skc, flags, obj);
1950 } else {
1951 remaining = wait_event_timeout(skc->skc_waitq,
1952 spl_cache_grow_wait(skc), HZ);
1953
1954 if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) {
1955 spin_lock(&skc->skc_lock);
1956 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1957 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1958 skc->skc_obj_deadlock++;
1959 }
1960 spin_unlock(&skc->skc_lock);
1961 }
cb5c2ace 1962
165f13c3 1963 rc = -ENOMEM;
cb5c2ace 1964 }
e2dcc6e2
BB
1965
1966 SRETURN(rc);
f1ca4da6 1967}
1968
ea3e6ca9 1969/*
e2dcc6e2
BB
1970 * Refill a per-cpu magazine with objects from the slabs for this cache.
1971 * Ideally the magazine can be repopulated using existing objects which have
1972 * been released, however if we are unable to locate enough free objects new
1973 * slabs of objects will be created. On success NULL is returned, otherwise
1974 * the address of a single emergency object is returned for use by the caller.
ea3e6ca9 1975 */
e2dcc6e2 1976static void *
4afaaefa 1977spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
f1ca4da6 1978{
e9d7a2be 1979 spl_kmem_slab_t *sks;
e2dcc6e2
BB
1980 int count = 0, rc, refill;
1981 void *obj = NULL;
b17edc10 1982 SENTRY;
f1ca4da6 1983
e9d7a2be 1984 ASSERT(skc->skc_magic == SKC_MAGIC);
1985 ASSERT(skm->skm_magic == SKM_MAGIC);
1986
e9d7a2be 1987 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
d46630e0 1988 spin_lock(&skc->skc_lock);
ff449ac4 1989
4afaaefa 1990 while (refill > 0) {
ea3e6ca9 1991 /* No slabs available we may need to grow the cache */
4afaaefa 1992 if (list_empty(&skc->skc_partial_list)) {
1993 spin_unlock(&skc->skc_lock);
ff449ac4 1994
e2dcc6e2
BB
1995 local_irq_enable();
1996 rc = spl_cache_grow(skc, flags, &obj);
1997 local_irq_disable();
1998
1999 /* Emergency object for immediate use by caller */
2000 if (rc == 0 && obj != NULL)
2001 SRETURN(obj);
2002
2003 if (rc)
b17edc10 2004 SGOTO(out, rc);
4afaaefa 2005
2006 /* Rescheduled to different CPU skm is not local */
2007 if (skm != skc->skc_mag[smp_processor_id()])
b17edc10 2008 SGOTO(out, rc);
e9d7a2be 2009
2010 /* Potentially rescheduled to the same CPU but
ecc39810 2011 * allocations may have occurred from this CPU while
e9d7a2be 2012 * we were sleeping so recalculate max refill. */
2013 refill = MIN(refill, skm->skm_size - skm->skm_avail);
4afaaefa 2014
2015 spin_lock(&skc->skc_lock);
2016 continue;
2017 }
d46630e0 2018
4afaaefa 2019 /* Grab the next available slab */
2020 sks = list_entry((&skc->skc_partial_list)->next,
2021 spl_kmem_slab_t, sks_list);
2022 ASSERT(sks->sks_magic == SKS_MAGIC);
2023 ASSERT(sks->sks_ref < sks->sks_objs);
2024 ASSERT(!list_empty(&sks->sks_free_list));
d46630e0 2025
4afaaefa 2026 /* Consume as many objects as needed to refill the requested
e9d7a2be 2027 * cache. We must also be careful not to overfill it. */
e2dcc6e2 2028 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) {
e9d7a2be 2029 ASSERT(skm->skm_avail < skm->skm_size);
e2dcc6e2 2030 ASSERT(count < skm->skm_size);
4afaaefa 2031 skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
e9d7a2be 2032 }
f1ca4da6 2033
4afaaefa 2034 /* Move slab to skc_complete_list when full */
2035 if (sks->sks_ref == sks->sks_objs) {
2036 list_del(&sks->sks_list);
2037 list_add(&sks->sks_list, &skc->skc_complete_list);
2fb9b26a 2038 }
2039 }
57d86234 2040
4afaaefa 2041 spin_unlock(&skc->skc_lock);
2042out:
e2dcc6e2 2043 SRETURN(NULL);
4afaaefa 2044}
2045
ea3e6ca9
BB
2046/*
2047 * Release an object back to the slab from which it came.
2048 */
4afaaefa 2049static void
2050spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
2051{
e9d7a2be 2052 spl_kmem_slab_t *sks = NULL;
4afaaefa 2053 spl_kmem_obj_t *sko = NULL;
b17edc10 2054 SENTRY;
4afaaefa 2055
e9d7a2be 2056 ASSERT(skc->skc_magic == SKC_MAGIC);
4afaaefa 2057 ASSERT(spin_is_locked(&skc->skc_lock));
2058
8b45dda2 2059 sko = spl_sko_from_obj(skc, obj);
a1502d76 2060 ASSERT(sko->sko_magic == SKO_MAGIC);
4afaaefa 2061 sks = sko->sko_slab;
a1502d76 2062 ASSERT(sks->sks_magic == SKS_MAGIC);
2fb9b26a 2063 ASSERT(sks->sks_cache == skc);
2fb9b26a 2064 list_add(&sko->sko_list, &sks->sks_free_list);
d6a26c6a 2065
2fb9b26a 2066 sks->sks_age = jiffies;
4afaaefa 2067 sks->sks_ref--;
2fb9b26a 2068 skc->skc_obj_alloc--;
f1ca4da6 2069
2fb9b26a 2070 /* Move slab to skc_partial_list when no longer full. Slabs
4afaaefa 2071 * are added to the head to keep the partial list is quasi-full
2072 * sorted order. Fuller at the head, emptier at the tail. */
2073 if (sks->sks_ref == (sks->sks_objs - 1)) {
2fb9b26a 2074 list_del(&sks->sks_list);
2075 list_add(&sks->sks_list, &skc->skc_partial_list);
2076 }
f1ca4da6 2077
ecc39810 2078 /* Move empty slabs to the end of the partial list so
4afaaefa 2079 * they can be easily found and freed during reclamation. */
2080 if (sks->sks_ref == 0) {
2fb9b26a 2081 list_del(&sks->sks_list);
2082 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
2083 skc->skc_slab_alloc--;
2084 }
2085
b17edc10 2086 SEXIT;
4afaaefa 2087}
2088
ea3e6ca9
BB
2089/*
2090 * Allocate an object from the per-cpu magazine, or if the magazine
2091 * is empty directly allocate from a slab and repopulate the magazine.
2092 */
4afaaefa 2093void *
2094spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
2095{
2096 spl_kmem_magazine_t *skm;
4afaaefa 2097 void *obj = NULL;
b17edc10 2098 SENTRY;
4afaaefa 2099
e9d7a2be 2100 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9
BB
2101 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2102 ASSERT(flags & KM_SLEEP);
a073aeb0 2103
ea3e6ca9 2104 atomic_inc(&skc->skc_ref);
a073aeb0
BB
2105
2106 /*
2107 * Allocate directly from a Linux slab. All optimizations are left
2108 * to the underlying cache we only need to guarantee that KM_SLEEP
2109 * callers will never fail.
2110 */
2111 if (skc->skc_flags & KMC_SLAB) {
2112 struct kmem_cache *slc = skc->skc_linux_cache;
2113
2114 do {
2115 obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
2116 if (obj && skc->skc_ctor)
2117 skc->skc_ctor(obj, skc->skc_private, flags);
2118
2119 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
2120
2121 atomic_dec(&skc->skc_ref);
2122 SRETURN(obj);
2123 }
2124
429fe89c 2125 local_irq_disable();
4afaaefa 2126
2127restart:
2128 /* Safe to update per-cpu structure without lock, but
ecc39810 2129 * in the restart case we must be careful to reacquire
4afaaefa 2130 * the local magazine since this may have changed
2131 * when we need to grow the cache. */
2132 skm = skc->skc_mag[smp_processor_id()];
e9d7a2be 2133 ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
2134 skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
2135 skm->skm_size, skm->skm_refill, skm->skm_avail);
4afaaefa 2136
2137 if (likely(skm->skm_avail)) {
2138 /* Object available in CPU cache, use it */
2139 obj = skm->skm_objs[--skm->skm_avail];
ea3e6ca9 2140 skm->skm_age = jiffies;
4afaaefa 2141 } else {
e2dcc6e2
BB
2142 obj = spl_cache_refill(skc, skm, flags);
2143 if (obj == NULL)
2144 SGOTO(restart, obj = NULL);
4afaaefa 2145 }
2146
429fe89c 2147 local_irq_enable();
fece7c99 2148 ASSERT(obj);
8b45dda2 2149 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
4afaaefa 2150
2151 /* Pre-emptively migrate object to CPU L1 cache */
2152 prefetchw(obj);
ea3e6ca9 2153 atomic_dec(&skc->skc_ref);
4afaaefa 2154
b17edc10 2155 SRETURN(obj);
4afaaefa 2156}
2157EXPORT_SYMBOL(spl_kmem_cache_alloc);
2158
ea3e6ca9
BB
2159/*
2160 * Free an object back to the local per-cpu magazine, there is no
2161 * guarantee that this is the same magazine the object was originally
2162 * allocated from. We may need to flush entire from the magazine
2163 * back to the slabs to make space.
2164 */
4afaaefa 2165void
2166spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
2167{
2168 spl_kmem_magazine_t *skm;
2169 unsigned long flags;
b17edc10 2170 SENTRY;
4afaaefa 2171
e9d7a2be 2172 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9
BB
2173 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2174 atomic_inc(&skc->skc_ref);
e2dcc6e2 2175
a073aeb0
BB
2176 /*
2177 * Free the object from the Linux underlying Linux slab.
2178 */
2179 if (skc->skc_flags & KMC_SLAB) {
2180 if (skc->skc_dtor)
2181 skc->skc_dtor(obj, skc->skc_private);
2182
2183 kmem_cache_free(skc->skc_linux_cache, obj);
2184 goto out;
2185 }
2186
e2dcc6e2 2187 /*
a1af8fb1
BB
2188 * Only virtual slabs may have emergency objects and these objects
2189 * are guaranteed to have physical addresses. They must be removed
2190 * from the tree of emergency objects and the freed.
e2dcc6e2 2191 */
a1af8fb1
BB
2192 if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
2193 SGOTO(out, spl_emergency_free(skc, obj));
e2dcc6e2 2194
4afaaefa 2195 local_irq_save(flags);
2196
2197 /* Safe to update per-cpu structure without lock, but
2198 * no remote memory allocation tracking is being performed
2199 * it is entirely possible to allocate an object from one
2200 * CPU cache and return it to another. */
2201 skm = skc->skc_mag[smp_processor_id()];
e9d7a2be 2202 ASSERT(skm->skm_magic == SKM_MAGIC);
4afaaefa 2203
2204 /* Per-CPU cache full, flush it to make space */
2205 if (unlikely(skm->skm_avail >= skm->skm_size))
d4899f47 2206 spl_cache_flush(skc, skm, skm->skm_refill);
4afaaefa 2207
2208 /* Available space in cache, use it */
2209 skm->skm_objs[skm->skm_avail++] = obj;
2210
2211 local_irq_restore(flags);
e2dcc6e2 2212out:
ea3e6ca9 2213 atomic_dec(&skc->skc_ref);
4afaaefa 2214
b17edc10 2215 SEXIT;
f1ca4da6 2216}
2fb9b26a 2217EXPORT_SYMBOL(spl_kmem_cache_free);
5c2bb9b2 2218
ea3e6ca9 2219/*
ecc39810
BB
2220 * The generic shrinker function for all caches. Under Linux a shrinker
2221 * may not be tightly coupled with a slab cache. In fact Linux always
2222 * systematically tries calling all registered shrinker callbacks which
ea3e6ca9
BB
2223 * report that they contain unused objects. Because of this we only
2224 * register one shrinker function in the shim layer for all slab caches.
2225 * We always attempt to shrink all caches when this generic shrinker
2226 * is called. The shrinker should return the number of free objects
2227 * in the cache when called with nr_to_scan == 0 but not attempt to
2228 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
cef7605c
PS
2229 * objects should be freed, which differs from Solaris semantics.
2230 * Solaris semantics are to free all available objects which may (and
2231 * probably will) be more objects than the requested nr_to_scan.
ea3e6ca9 2232 */
a55bcaad
BB
2233static int
2234__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
2235 struct shrink_control *sc)
2fb9b26a 2236{
e9d7a2be 2237 spl_kmem_cache_t *skc;
376dc35e 2238 int alloc = 0;
5c2bb9b2 2239
e9d7a2be 2240 down_read(&spl_kmem_cache_sem);
ea3e6ca9 2241 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
a55bcaad 2242 if (sc->nr_to_scan)
cef7605c
PS
2243 spl_kmem_cache_reap_now(skc,
2244 MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
ea3e6ca9
BB
2245
2246 /*
376dc35e 2247 * Presume everything alloc'ed is reclaimable, this ensures
ea3e6ca9
BB
2248 * we are called again with nr_to_scan > 0 so can try and
2249 * reclaim. The exact number is not important either so
2250 * we forgo taking this already highly contented lock.
2251 */
376dc35e 2252 alloc += skc->skc_obj_alloc;
ea3e6ca9 2253 }
e9d7a2be 2254 up_read(&spl_kmem_cache_sem);
2fb9b26a 2255
b9b37153 2256 /*
376dc35e
BB
2257 * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
2258 * This functionality only exists to work around a rare issue where
2259 * shrink_slabs() is repeatedly invoked by many cores causing the
2260 * system to thrash.
b9b37153 2261 */
376dc35e
BB
2262 if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
2263 return (-1);
b9b37153 2264
aa363c5c 2265 return (MAX(alloc, 0));
5c2bb9b2 2266}
5c2bb9b2 2267
a55bcaad
BB
2268SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
2269
ea3e6ca9
BB
2270/*
2271 * Call the registered reclaim function for a cache. Depending on how
2272 * many and which objects are released it may simply repopulate the
2273 * local magazine which will then need to age-out. Objects which cannot
2274 * fit in the magazine we will be released back to their slabs which will
2275 * also need to age out before being release. This is all just best
2276 * effort and we do not want to thrash creating and destroying slabs.
2277 */
57d86234 2278void
cef7605c 2279spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
57d86234 2280{
b17edc10 2281 SENTRY;
e9d7a2be 2282
2283 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9 2284 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2fb9b26a 2285
a073aeb0
BB
2286 atomic_inc(&skc->skc_ref);
2287
2288 /*
2289 * Execute the registered reclaim callback if it exists. The
2290 * per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
2291 */
2292 if (skc->skc_flags & KMC_SLAB) {
2293 if (skc->skc_reclaim)
2294 skc->skc_reclaim(skc->skc_private);
2295
2296 if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
2297 kmem_cache_shrink(skc->skc_linux_cache);
2298
2299 SGOTO(out, 0);
ea3e6ca9 2300 }
2fb9b26a 2301
a073aeb0
BB
2302 /*
2303 * Prevent concurrent cache reaping when contended.
2304 */
2305 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
2306 SGOTO(out, 0);
4afaaefa 2307
b78d4b9d
BB
2308 /*
2309 * When a reclaim function is available it may be invoked repeatedly
2310 * until at least a single slab can be freed. This ensures that we
2311 * do free memory back to the system. This helps minimize the chance
2312 * of an OOM event when the bulk of memory is used by the slab.
2313 *
2314 * When free slabs are already available the reclaim callback will be
2315 * skipped. Additionally, if no forward progress is detected despite
2316 * a reclaim function the cache will be skipped to avoid deadlock.
2317 *
2318 * Longer term this would be the correct place to add the code which
2319 * repacks the slabs in order minimize fragmentation.
2320 */
2321 if (skc->skc_reclaim) {
2322 uint64_t objects = UINT64_MAX;
2323 int do_reclaim;
2324
2325 do {
2326 spin_lock(&skc->skc_lock);
2327 do_reclaim =
2328 (skc->skc_slab_total > 0) &&
2329 ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) &&
2330 (skc->skc_obj_alloc < objects);
2331
2332 objects = skc->skc_obj_alloc;
2333 spin_unlock(&skc->skc_lock);
2334
2335 if (do_reclaim)
2336 skc->skc_reclaim(skc->skc_private);
2337
2338 } while (do_reclaim);
2339 }
4afaaefa 2340
0936c344
BB
2341 /* Reclaim from the magazine then the slabs ignoring age and delay. */
2342 if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
2343 spl_kmem_magazine_t *skm;
251e7a77 2344 unsigned long irq_flags;
0936c344 2345
251e7a77
RY
2346 local_irq_save(irq_flags);
2347 skm = skc->skc_mag[smp_processor_id()];
2348 spl_cache_flush(skc, skm, skm->skm_avail);
2349 local_irq_restore(irq_flags);
0936c344
BB
2350 }
2351
c0e0fc14 2352 spl_slab_reclaim(skc, count, 1);
ea3e6ca9 2353 clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
e3020723 2354 smp_wmb();
dc1b3022 2355 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
a073aeb0 2356out:
ea3e6ca9 2357 atomic_dec(&skc->skc_ref);
4afaaefa 2358
b17edc10 2359 SEXIT;
57d86234 2360}
2fb9b26a 2361EXPORT_SYMBOL(spl_kmem_cache_reap_now);
57d86234 2362
ea3e6ca9
BB
2363/*
2364 * Reap all free slabs from all registered caches.
2365 */
f1b59d26 2366void
2fb9b26a 2367spl_kmem_reap(void)
937879f1 2368{
a55bcaad
BB
2369 struct shrink_control sc;
2370
2371 sc.nr_to_scan = KMC_REAP_CHUNK;
2372 sc.gfp_mask = GFP_KERNEL;
2373
2374 __spl_kmem_cache_generic_shrinker(NULL, &sc);
f1ca4da6 2375}
2fb9b26a 2376EXPORT_SYMBOL(spl_kmem_reap);
5d86345d 2377
ff449ac4 2378#if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
c6dc93d6 2379static char *
4afaaefa 2380spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
d6a26c6a 2381{
e9d7a2be 2382 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
d6a26c6a 2383 int i, flag = 1;
2384
2385 ASSERT(str != NULL && len >= 17);
e9d7a2be 2386 memset(str, 0, len);
d6a26c6a 2387
2388 /* Check for a fully printable string, and while we are at
2389 * it place the printable characters in the passed buffer. */
2390 for (i = 0; i < size; i++) {
e9d7a2be 2391 str[i] = ((char *)(kd->kd_addr))[i];
2392 if (isprint(str[i])) {
2393 continue;
2394 } else {
2395 /* Minimum number of printable characters found
2396 * to make it worthwhile to print this as ascii. */
2397 if (i > min)
2398 break;
2399
2400 flag = 0;
2401 break;
2402 }
d6a26c6a 2403 }
2404
2405 if (!flag) {
2406 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
2407 *((uint8_t *)kd->kd_addr),
2408 *((uint8_t *)kd->kd_addr + 2),
2409 *((uint8_t *)kd->kd_addr + 4),
2410 *((uint8_t *)kd->kd_addr + 6),
2411 *((uint8_t *)kd->kd_addr + 8),
2412 *((uint8_t *)kd->kd_addr + 10),
2413 *((uint8_t *)kd->kd_addr + 12),
2414 *((uint8_t *)kd->kd_addr + 14));
2415 }
2416
2417 return str;
2418}
2419
a1502d76 2420static int
2421spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
2422{
2423 int i;
b17edc10 2424 SENTRY;
a1502d76 2425
2426 spin_lock_init(lock);
2427 INIT_LIST_HEAD(list);
2428
2429 for (i = 0; i < size; i++)
2430 INIT_HLIST_HEAD(&kmem_table[i]);
2431
b17edc10 2432 SRETURN(0);
a1502d76 2433}
2434
ff449ac4 2435static void
2436spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
5d86345d 2437{
2fb9b26a 2438 unsigned long flags;
2439 kmem_debug_t *kd;
2440 char str[17];
b17edc10 2441 SENTRY;
2fb9b26a 2442
ff449ac4 2443 spin_lock_irqsave(lock, flags);
2444 if (!list_empty(list))
a0f6da3d 2445 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
2446 "size", "data", "func", "line");
2fb9b26a 2447
ff449ac4 2448 list_for_each_entry(kd, list, kd_list)
a0f6da3d 2449 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
b6b2acc6 2450 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
2fb9b26a 2451 kd->kd_func, kd->kd_line);
2452
ff449ac4 2453 spin_unlock_irqrestore(lock, flags);
b17edc10 2454 SEXIT;
ff449ac4 2455}
2456#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
a1502d76 2457#define spl_kmem_init_tracking(list, lock, size)
ff449ac4 2458#define spl_kmem_fini_tracking(list, lock)
2459#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2460
36b313da
BB
2461static void
2462spl_kmem_init_globals(void)
2463{
2464 struct zone *zone;
2465
2466 /* For now all zones are includes, it may be wise to restrict
2467 * this to normal and highmem zones if we see problems. */
2468 for_each_zone(zone) {
2469
2470 if (!populated_zone(zone))
2471 continue;
2472
baf2979e
BB
2473 minfree += min_wmark_pages(zone);
2474 desfree += low_wmark_pages(zone);
2475 lotsfree += high_wmark_pages(zone);
36b313da 2476 }
4ab13d3b
BB
2477
2478 /* Solaris default values */
96dded38
BB
2479 swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
2480 swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
36b313da
BB
2481}
2482
d1ff2312
BB
2483/*
2484 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
2485 */
2486int
2487spl_kmem_init_kallsyms_lookup(void)
2488{
5232d256
BB
2489#ifdef HAVE_PGDAT_HELPERS
2490# ifndef HAVE_FIRST_ONLINE_PGDAT
d1ff2312
BB
2491 first_online_pgdat_fn = (first_online_pgdat_t)
2492 spl_kallsyms_lookup_name("first_online_pgdat");
e11d6c5f
BB
2493 if (!first_online_pgdat_fn) {
2494 printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
d1ff2312 2495 return -EFAULT;
e11d6c5f 2496 }
5232d256 2497# endif /* HAVE_FIRST_ONLINE_PGDAT */
d1ff2312 2498
5232d256 2499# ifndef HAVE_NEXT_ONLINE_PGDAT
d1ff2312
BB
2500 next_online_pgdat_fn = (next_online_pgdat_t)
2501 spl_kallsyms_lookup_name("next_online_pgdat");
e11d6c5f
BB
2502 if (!next_online_pgdat_fn) {
2503 printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
d1ff2312 2504 return -EFAULT;
e11d6c5f 2505 }
5232d256 2506# endif /* HAVE_NEXT_ONLINE_PGDAT */
d1ff2312 2507
5232d256 2508# ifndef HAVE_NEXT_ZONE
d1ff2312
BB
2509 next_zone_fn = (next_zone_t)
2510 spl_kallsyms_lookup_name("next_zone");
e11d6c5f
BB
2511 if (!next_zone_fn) {
2512 printk(KERN_ERR "Error: Unknown symbol next_zone\n");
d1ff2312 2513 return -EFAULT;
e11d6c5f 2514 }
5232d256
BB
2515# endif /* HAVE_NEXT_ZONE */
2516
2517#else /* HAVE_PGDAT_HELPERS */
2518
2519# ifndef HAVE_PGDAT_LIST
124ca8a5 2520 pgdat_list_addr = *(struct pglist_data **)
5232d256
BB
2521 spl_kallsyms_lookup_name("pgdat_list");
2522 if (!pgdat_list_addr) {
2523 printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
2524 return -EFAULT;
2525 }
2526# endif /* HAVE_PGDAT_LIST */
2527#endif /* HAVE_PGDAT_HELPERS */
d1ff2312 2528
6ae7fef5 2529#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
d1ff2312
BB
2530 get_zone_counts_fn = (get_zone_counts_t)
2531 spl_kallsyms_lookup_name("get_zone_counts");
e11d6c5f
BB
2532 if (!get_zone_counts_fn) {
2533 printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
d1ff2312 2534 return -EFAULT;
e11d6c5f 2535 }
6ae7fef5 2536#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
d1ff2312
BB
2537
2538 /*
2539 * It is now safe to initialize the global tunings which rely on
2540 * the use of the for_each_zone() macro. This macro in turns
2541 * depends on the *_pgdat symbols which are now available.
2542 */
2543 spl_kmem_init_globals();
2544
e76f4bf1 2545#ifndef HAVE_SHRINK_DCACHE_MEMORY
fe71c0e5 2546 /* When shrink_dcache_memory_fn == NULL support is disabled */
e76f4bf1 2547 shrink_dcache_memory_fn = (shrink_dcache_memory_t)
fe71c0e5 2548 spl_kallsyms_lookup_name("shrink_dcache_memory");
e76f4bf1
BB
2549#endif /* HAVE_SHRINK_DCACHE_MEMORY */
2550
2551#ifndef HAVE_SHRINK_ICACHE_MEMORY
fe71c0e5 2552 /* When shrink_icache_memory_fn == NULL support is disabled */
e76f4bf1 2553 shrink_icache_memory_fn = (shrink_icache_memory_t)
fe71c0e5 2554 spl_kallsyms_lookup_name("shrink_icache_memory");
e76f4bf1
BB
2555#endif /* HAVE_SHRINK_ICACHE_MEMORY */
2556
d1ff2312
BB
2557 return 0;
2558}
2559
a1502d76 2560int
2561spl_kmem_init(void)
2562{
2563 int rc = 0;
b17edc10 2564 SENTRY;
a1502d76 2565
a1502d76 2566#ifdef DEBUG_KMEM
d04c8a56
BB
2567 kmem_alloc_used_set(0);
2568 vmem_alloc_used_set(0);
a1502d76 2569
2570 spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
2571 spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
2572#endif
5c7a0369
TC
2573
2574 init_rwsem(&spl_kmem_cache_sem);
2575 INIT_LIST_HEAD(&spl_kmem_cache_list);
2576 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
2577 1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
2578
2579 spl_register_shrinker(&spl_kmem_cache_shrinker);
2580
b17edc10 2581 SRETURN(rc);
a1502d76 2582}
2583
ff449ac4 2584void
2585spl_kmem_fini(void)
2586{
ab4e74cc
BB
2587 SENTRY;
2588
2589 spl_unregister_shrinker(&spl_kmem_cache_shrinker);
2590 taskq_destroy(spl_kmem_cache_taskq);
2591
ff449ac4 2592#ifdef DEBUG_KMEM
2593 /* Display all unreclaimed memory addresses, including the
2594 * allocation size and the first few bytes of what's located
2595 * at that address to aid in debugging. Performance is not
2596 * a serious concern here since it is module unload time. */
d04c8a56 2597 if (kmem_alloc_used_read() != 0)
b17edc10 2598 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
3cb77549
BB
2599 "kmem leaked %ld/%ld bytes\n",
2600 kmem_alloc_used_read(), kmem_alloc_max);
ff449ac4 2601
2fb9b26a 2602
d04c8a56 2603 if (vmem_alloc_used_read() != 0)
b17edc10 2604 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
3cb77549
BB
2605 "vmem leaked %ld/%ld bytes\n",
2606 vmem_alloc_used_read(), vmem_alloc_max);
2fb9b26a 2607
ff449ac4 2608 spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
2609 spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
2610#endif /* DEBUG_KMEM */
2fb9b26a 2611
b17edc10 2612 SEXIT;
5d86345d 2613}