]> git.proxmox.com Git - mirror_spl.git/blobdiff - module/spl/spl-kmem.c
Remove compat includes from sys/types.h
[mirror_spl.git] / module / spl / spl-kmem.c
index 16eb4f884364e83597a61578c9aee9831bac9b95..502f5365b67ec848e8e4be1f280ebb52c0de7461 100644 (file)
@@ -6,7 +6,7 @@
  *  UCRL-CODE-235197
  *
  *  This file is part of the SPL, Solaris Porting Layer.
- *  For details, see <http://github.com/behlendorf/spl/>.
+ *  For details, see <http://zfsonlinux.org/>.
  *
  *  The SPL is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License as published by the
 \*****************************************************************************/
 
 #include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KMEM
+#include <linux/mm_compat.h>
+#include <linux/wait_compat.h>
 
 /*
- * The minimum amount of memory measured in pages to be free at all
- * times on the system.  This is similar to Linux's zone->pages_min
- * multiplied by the number of zones and is sized based on that.
+ * Within the scope of spl-kmem.c file the kmem_cache_* definitions
+ * are removed to allow access to the real Linux slab allocator.
  */
-pgcnt_t minfree = 0;
-EXPORT_SYMBOL(minfree);
+#undef kmem_cache_destroy
+#undef kmem_cache_create
+#undef kmem_cache_alloc
+#undef kmem_cache_free
+
 
 /*
- * The desired amount of memory measured in pages to be free at all
- * times on the system.  This is similar to Linux's zone->pages_low
- * multiplied by the number of zones and is sized based on that.
- * Assuming all zones are being used roughly equally, when we drop
- * below this threshold asynchronous page reclamation is triggered.
+ * Cache expiration was implemented because it was part of the default Solaris
+ * kmem_cache behavior.  The idea is that per-cpu objects which haven't been
+ * accessed in several seconds should be returned to the cache.  On the other
+ * hand Linux slabs never move objects back to the slabs unless there is
+ * memory pressure on the system.  By default the Linux method is enabled
+ * because it has been shown to improve responsiveness on low memory systems.
+ * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
  */
-pgcnt_t desfree = 0;
-EXPORT_SYMBOL(desfree);
+unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
+EXPORT_SYMBOL(spl_kmem_cache_expire);
+module_param(spl_kmem_cache_expire, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
 
 /*
- * When above this amount of memory measures in pages the system is
- * determined to have enough free memory.  This is similar to Linux's
- * zone->pages_high multiplied by the number of zones and is sized based
- * on that.  Assuming all zones are being used roughly equally, when
- * asynchronous page reclamation reaches this threshold it stops.
+ * The default behavior is to report the number of objects remaining in the
+ * cache.  This allows the Linux VM to repeatedly reclaim objects from the
+ * cache when memory is low satisfy other memory allocations.  Alternately,
+ * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
+ * is reclaimed.  This may increase the likelihood of out of memory events.
  */
-pgcnt_t lotsfree = 0;
-EXPORT_SYMBOL(lotsfree);
+unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
+module_param(spl_kmem_cache_reclaim, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
 
-/* Unused always 0 in this implementation */
-pgcnt_t needfree = 0;
-EXPORT_SYMBOL(needfree);
+unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
+module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
 
-pgcnt_t swapfs_minfree = 0;
-EXPORT_SYMBOL(swapfs_minfree);
+unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
+module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
+    "Minimal number of objects per slab");
 
-pgcnt_t swapfs_reserve = 0;
-EXPORT_SYMBOL(swapfs_reserve);
+unsigned int spl_kmem_cache_max_size = 32;
+module_param(spl_kmem_cache_max_size, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
+
+/*
+ * For small objects the Linux slab allocator should be used to make the most
+ * efficient use of the memory.  However, large objects are not supported by
+ * the Linux slab and therefore the SPL implementation is preferred.  A cutoff
+ * of 16K was determined to be optimal for architectures using 4K pages.
+ */
+#if PAGE_SIZE == 4096
+unsigned int spl_kmem_cache_slab_limit = 16384;
+#else
+unsigned int spl_kmem_cache_slab_limit = 0;
+#endif
+module_param(spl_kmem_cache_slab_limit, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
+    "Objects less than N bytes use the Linux slab");
+
+unsigned int spl_kmem_cache_kmem_limit = (PAGE_SIZE / 4);
+module_param(spl_kmem_cache_kmem_limit, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
+    "Objects less than N bytes use the kmalloc");
 
 vmem_t *heap_arena = NULL;
 EXPORT_SYMBOL(heap_arena);
@@ -80,147 +105,14 @@ EXPORT_SYMBOL(zio_alloc_arena);
 vmem_t *zio_arena = NULL;
 EXPORT_SYMBOL(zio_arena);
 
-#ifndef HAVE_GET_VMALLOC_INFO
-get_vmalloc_info_t get_vmalloc_info_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(get_vmalloc_info_fn);
-#endif /* HAVE_GET_VMALLOC_INFO */
-
-#ifdef HAVE_PGDAT_HELPERS
-# ifndef HAVE_FIRST_ONLINE_PGDAT
-first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(first_online_pgdat_fn);
-# endif /* HAVE_FIRST_ONLINE_PGDAT */
-
-# ifndef HAVE_NEXT_ONLINE_PGDAT
-next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(next_online_pgdat_fn);
-# endif /* HAVE_NEXT_ONLINE_PGDAT */
-
-# ifndef HAVE_NEXT_ZONE
-next_zone_t next_zone_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(next_zone_fn);
-# endif /* HAVE_NEXT_ZONE */
-
-#else /* HAVE_PGDAT_HELPERS */
-
-# ifndef HAVE_PGDAT_LIST
-struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
-EXPORT_SYMBOL(pgdat_list_addr);
-# endif /* HAVE_PGDAT_LIST */
-
-#endif /* HAVE_PGDAT_HELPERS */
-
-#ifdef NEED_GET_ZONE_COUNTS
-# ifndef HAVE_GET_ZONE_COUNTS
-get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(get_zone_counts_fn);
-# endif /* HAVE_GET_ZONE_COUNTS */
-
-unsigned long
-spl_global_page_state(spl_zone_stat_item_t item)
-{
-       unsigned long active;
-       unsigned long inactive;
-       unsigned long free;
-
-       get_zone_counts(&active, &inactive, &free);
-       switch (item) {
-       case SPL_NR_FREE_PAGES: return free;
-       case SPL_NR_INACTIVE:   return inactive;
-       case SPL_NR_ACTIVE:     return active;
-       default:                ASSERT(0); /* Unsupported */
-       }
-
-       return 0;
-}
-#else
-# ifdef HAVE_GLOBAL_PAGE_STATE
-unsigned long
-spl_global_page_state(spl_zone_stat_item_t item)
-{
-       unsigned long pages = 0;
-
-       switch (item) {
-       case SPL_NR_FREE_PAGES:
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
-               pages += global_page_state(NR_FREE_PAGES);
-#  endif
-               break;
-       case SPL_NR_INACTIVE:
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
-               pages += global_page_state(NR_INACTIVE);
-#  endif
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
-               pages += global_page_state(NR_INACTIVE_ANON);
-#  endif
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
-               pages += global_page_state(NR_INACTIVE_FILE);
-#  endif
-               break;
-       case SPL_NR_ACTIVE:
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
-               pages += global_page_state(NR_ACTIVE);
-#  endif
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
-               pages += global_page_state(NR_ACTIVE_ANON);
-#  endif
-#  ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
-               pages += global_page_state(NR_ACTIVE_FILE);
-#  endif
-               break;
-       default:
-               ASSERT(0); /* Unsupported */
-       }
-
-       return pages;
-}
-# else
-#  error "Both global_page_state() and get_zone_counts() unavailable"
-# endif /* HAVE_GLOBAL_PAGE_STATE */
-#endif /* NEED_GET_ZONE_COUNTS */
-EXPORT_SYMBOL(spl_global_page_state);
-
-#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
-invalidate_inodes_t invalidate_inodes_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(invalidate_inodes_fn);
-#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
-
-#ifndef HAVE_SHRINK_DCACHE_MEMORY
-shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(shrink_dcache_memory_fn);
-#endif /* HAVE_SHRINK_DCACHE_MEMORY */
-
-#ifndef HAVE_SHRINK_ICACHE_MEMORY
-shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(shrink_icache_memory_fn);
-#endif /* HAVE_SHRINK_ICACHE_MEMORY */
-
-pgcnt_t
-spl_kmem_availrmem(void)
-{
-       /* The amount of easily available memory */
-       return (spl_global_page_state(SPL_NR_FREE_PAGES) +
-               spl_global_page_state(SPL_NR_INACTIVE));
-}
-EXPORT_SYMBOL(spl_kmem_availrmem);
-
 size_t
 vmem_size(vmem_t *vmp, int typemask)
 {
-        struct vmalloc_info vmi;
-       size_t size = 0;
-
-       ASSERT(vmp == NULL);
-       ASSERT(typemask & (VMEM_ALLOC | VMEM_FREE));
+       ASSERT3P(vmp, ==, NULL);
+       ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
+       ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
 
-       get_vmalloc_info(&vmi);
-       if (typemask & VMEM_ALLOC)
-               size += (size_t)vmi.used;
-
-       if (typemask & VMEM_FREE)
-               size += (size_t)(VMALLOC_TOTAL - vmi.used);
-
-       return size;
+       return (VMALLOC_TOTAL);
 }
 EXPORT_SYMBOL(vmem_size);
 
@@ -231,29 +123,6 @@ kmem_debugging(void)
 }
 EXPORT_SYMBOL(kmem_debugging);
 
-#ifndef HAVE_KVASPRINTF
-/* Simplified asprintf. */
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
-{
-       unsigned int len;
-       char *p;
-       va_list aq;
-
-       va_copy(aq, ap);
-       len = vsnprintf(NULL, 0, fmt, aq);
-       va_end(aq);
-
-       p = kmalloc(len+1, gfp);
-       if (!p)
-               return NULL;
-
-       vsnprintf(p, len+1, fmt, ap);
-
-       return p;
-}
-EXPORT_SYMBOL(kvasprintf);
-#endif /* HAVE_KVASPRINTF */
-
 char *
 kmem_vasprintf(const char *fmt, va_list ap)
 {
@@ -391,12 +260,12 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *
        struct hlist_node *node;
        struct kmem_debug *p;
        unsigned long flags;
-       SENTRY;
 
        spin_lock_irqsave(lock, flags);
 
-       head = &table[hash_ptr(addr, bits)];
-       hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
+       head = &table[hash_ptr((void *)addr, bits)];
+       hlist_for_each(node, head) {
+               p = list_entry(node, struct kmem_debug, kd_hlist);
                if (p->kd_addr == addr) {
                        hlist_del_init(&p->kd_hlist);
                        list_del_init(&p->kd_list);
@@ -407,7 +276,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *
 
        spin_unlock_irqrestore(lock, flags);
 
-       SRETURN(NULL);
+       return (NULL);
 }
 
 void *
@@ -417,28 +286,26 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
        void *ptr = NULL;
        kmem_debug_t *dptr;
        unsigned long irq_flags;
-       SENTRY;
 
        /* Function may be called with KM_NOSLEEP so failure is possible */
        dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
            flags & ~__GFP_ZERO);
 
        if (unlikely(dptr == NULL)) {
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
-                   "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
-                   sizeof(kmem_debug_t), flags, func, line,
-                   kmem_alloc_used_read(), kmem_alloc_max);
+               printk(KERN_WARNING "debug kmem_alloc(%ld, 0x%x) at %s:%d "
+                   "failed (%lld/%llu)\n", sizeof(kmem_debug_t), flags,
+                   func, line, kmem_alloc_used_read(), kmem_alloc_max);
        } else {
                /*
                 * Marked unlikely because we should never be doing this,
                 * we tolerate to up 2 pages but a single page is best.
                 */
                if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
-                       SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
-                           "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
-                           (unsigned long long) size, flags, func, line,
+                       printk(KERN_WARNING "large kmem_alloc(%llu, 0x%x) "
+                           "at %s:%d failed (%lld/%llu)\n",
+                           (unsigned long long)size, flags, func, line,
                            kmem_alloc_used_read(), kmem_alloc_max);
-                       spl_debug_dumpstack(NULL);
+                       spl_dumpstack();
                }
 
                /*
@@ -450,9 +317,9 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
                dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
                if (unlikely(dptr->kd_func == NULL)) {
                        kfree(dptr);
-                       SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
-                           "debug __strdup() at %s:%d failed (%lld/%llu)\n",
-                           func, line, kmem_alloc_used_read(), kmem_alloc_max);
+                       printk(KERN_WARNING "debug __strdup() at %s:%d "
+                           "failed (%lld/%llu)\n", func, line,
+                           kmem_alloc_used_read(), kmem_alloc_max);
                        goto out;
                }
 
@@ -469,8 +336,8 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
                if (unlikely(ptr == NULL)) {
                        kfree(dptr->kd_func);
                        kfree(dptr);
-                       SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
-                           "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+                       printk(KERN_WARNING "kmem_alloc(%llu, 0x%x) "
+                           "at %s:%d failed (%lld/%llu)\n",
                            (unsigned long long) size, flags, func, line,
                            kmem_alloc_used_read(), kmem_alloc_max);
                        goto out;
@@ -488,18 +355,13 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
                dptr->kd_line = line;
 
                spin_lock_irqsave(&kmem_lock, irq_flags);
-               hlist_add_head_rcu(&dptr->kd_hlist,
+               hlist_add_head(&dptr->kd_hlist,
                    &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
                list_add_tail(&dptr->kd_list, &kmem_list);
                spin_unlock_irqrestore(&kmem_lock, irq_flags);
-
-               SDEBUG_LIMIT(SD_INFO,
-                   "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line, ptr,
-                   kmem_alloc_used_read(), kmem_alloc_max);
        }
 out:
-       SRETURN(ptr);
+       return (ptr);
 }
 EXPORT_SYMBOL(kmem_alloc_track);
 
@@ -507,14 +369,12 @@ void
 kmem_free_track(const void *ptr, size_t size)
 {
        kmem_debug_t *dptr;
-       SENTRY;
 
        ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
            (unsigned long long) size);
 
-       dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
-
        /* Must exist in hash due to kmem_alloc() */
+       dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
        ASSERT(dptr);
 
        /* Size must match */
@@ -523,19 +383,13 @@ kmem_free_track(const void *ptr, size_t size)
            (unsigned long long) size, dptr->kd_func, dptr->kd_line);
 
        kmem_alloc_used_sub(size);
-       SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
-           (unsigned long long) size, kmem_alloc_used_read(),
-           kmem_alloc_max);
-
        kfree(dptr->kd_func);
 
-       memset(dptr, 0x5a, sizeof(kmem_debug_t));
+       memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
        kfree(dptr);
 
-       memset(ptr, 0x5a, size);
+       memset((void *)ptr, 0x5a, size);
        kfree(ptr);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(kmem_free_track);
 
@@ -545,7 +399,6 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
        void *ptr = NULL;
        kmem_debug_t *dptr;
        unsigned long irq_flags;
-       SENTRY;
 
        ASSERT(flags & KM_SLEEP);
 
@@ -553,8 +406,8 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
        dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
            flags & ~__GFP_ZERO);
        if (unlikely(dptr == NULL)) {
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
-                   "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+               printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) "
+                   "at %s:%d failed (%lld/%llu)\n",
                    sizeof(kmem_debug_t), flags, func, line,
                    vmem_alloc_used_read(), vmem_alloc_max);
        } else {
@@ -568,9 +421,9 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
                dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
                if (unlikely(dptr->kd_func == NULL)) {
                        kfree(dptr);
-                       SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
-                           "debug __strdup() at %s:%d failed (%lld/%llu)\n",
-                           func, line, vmem_alloc_used_read(), vmem_alloc_max);
+                       printk(KERN_WARNING "debug __strdup() at %s:%d "
+                           "failed (%lld/%llu)\n", func, line,
+                           vmem_alloc_used_read(), vmem_alloc_max);
                        goto out;
                }
 
@@ -584,8 +437,8 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
                if (unlikely(ptr == NULL)) {
                        kfree(dptr->kd_func);
                        kfree(dptr);
-                       SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
-                           "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+                       printk(KERN_WARNING "vmem_alloc (%llu, 0x%x) "
+                           "at %s:%d failed (%lld/%llu)\n",
                            (unsigned long long) size, flags, func, line,
                            vmem_alloc_used_read(), vmem_alloc_max);
                        goto out;
@@ -603,18 +456,13 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
                dptr->kd_line = line;
 
                spin_lock_irqsave(&vmem_lock, irq_flags);
-               hlist_add_head_rcu(&dptr->kd_hlist,
+               hlist_add_head(&dptr->kd_hlist,
                    &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
                list_add_tail(&dptr->kd_list, &vmem_list);
                spin_unlock_irqrestore(&vmem_lock, irq_flags);
-
-               SDEBUG_LIMIT(SD_INFO,
-                   "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line,
-                   ptr, vmem_alloc_used_read(), vmem_alloc_max);
        }
 out:
-       SRETURN(ptr);
+       return (ptr);
 }
 EXPORT_SYMBOL(vmem_alloc_track);
 
@@ -622,14 +470,12 @@ void
 vmem_free_track(const void *ptr, size_t size)
 {
        kmem_debug_t *dptr;
-       SENTRY;
 
        ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
            (unsigned long long) size);
 
-       dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
-
        /* Must exist in hash due to vmem_alloc() */
+       dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
        ASSERT(dptr);
 
        /* Size must match */
@@ -638,19 +484,13 @@ vmem_free_track(const void *ptr, size_t size)
            (unsigned long long) size, dptr->kd_func, dptr->kd_line);
 
        vmem_alloc_used_sub(size);
-       SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
-           (unsigned long long) size, vmem_alloc_used_read(),
-           vmem_alloc_max);
-
        kfree(dptr->kd_func);
 
-       memset(dptr, 0x5a, sizeof(kmem_debug_t));
+       memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
        kfree(dptr);
 
-       memset(ptr, 0x5a, size);
+       memset((void *)ptr, 0x5a, size);
        vfree(ptr);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(vmem_free_track);
 
@@ -661,18 +501,17 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
     int node_alloc, int node)
 {
        void *ptr;
-       SENTRY;
 
        /*
         * Marked unlikely because we should never be doing this,
         * we tolerate to up 2 pages but a single page is best.
         */
        if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
-               SDEBUG(SD_CONSOLE | SD_WARNING,
+               printk(KERN_WARNING
                    "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line,
-                   kmem_alloc_used_read(), kmem_alloc_max);
-               dump_stack();
+                   (unsigned long long)size, flags, func, line,
+                   (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
+               spl_dumpstack();
        }
 
        /* Use the correct allocator */
@@ -686,40 +525,26 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
        }
 
        if (unlikely(ptr == NULL)) {
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+               printk(KERN_WARNING
                    "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line,
-                   kmem_alloc_used_read(), kmem_alloc_max);
+                   (unsigned long long)size, flags, func, line,
+                   (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
        } else {
                kmem_alloc_used_add(size);
                if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
                        kmem_alloc_max = kmem_alloc_used_read();
-
-               SDEBUG_LIMIT(SD_INFO,
-                   "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line, ptr,
-                   kmem_alloc_used_read(), kmem_alloc_max);
        }
 
-       SRETURN(ptr);
+       return (ptr);
 }
 EXPORT_SYMBOL(kmem_alloc_debug);
 
 void
 kmem_free_debug(const void *ptr, size_t size)
 {
-       SENTRY;
-
-       ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
-           (unsigned long long) size);
-
+       ASSERT(ptr || size > 0);
        kmem_alloc_used_sub(size);
-       SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
-           (unsigned long long) size, kmem_alloc_used_read(),
-           kmem_alloc_max);
        kfree(ptr);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(kmem_free_debug);
 
@@ -727,7 +552,6 @@ void *
 vmem_alloc_debug(size_t size, int flags, const char *func, int line)
 {
        void *ptr;
-       SENTRY;
 
        ASSERT(flags & KM_SLEEP);
 
@@ -739,39 +563,26 @@ vmem_alloc_debug(size_t size, int flags, const char *func, int line)
        }
 
        if (unlikely(ptr == NULL)) {
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+               printk(KERN_WARNING
                    "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
-                   (unsigned long long) size, flags, func, line,
-                   vmem_alloc_used_read(), vmem_alloc_max);
+                   (unsigned long long)size, flags, func, line,
+                   (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max);
        } else {
                vmem_alloc_used_add(size);
                if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
                        vmem_alloc_max = vmem_alloc_used_read();
-
-               SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
-                   "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
-                   vmem_alloc_used_read(), vmem_alloc_max);
        }
 
-       SRETURN(ptr);
+       return (ptr);
 }
 EXPORT_SYMBOL(vmem_alloc_debug);
 
 void
 vmem_free_debug(const void *ptr, size_t size)
 {
-       SENTRY;
-
-       ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
-           (unsigned long long) size);
-
+       ASSERT(ptr || size > 0);
        vmem_alloc_used_sub(size);
-       SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
-           (unsigned long long) size, vmem_alloc_used_read(),
-           vmem_alloc_max);
        vfree(ptr);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(vmem_free_debug);
 
@@ -825,9 +636,9 @@ EXPORT_SYMBOL(vmem_free_debug);
 
 struct list_head spl_kmem_cache_list;   /* List of caches */
 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
+taskq_t *spl_kmem_cache_taskq;          /* Task queue for ageing / reclaim */
 
-static int spl_cache_flush(spl_kmem_cache_t *skc,
-                           spl_kmem_magazine_t *skm, int flush);
+static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
 
 SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
 SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
@@ -840,31 +651,12 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
 
        ASSERT(ISP2(size));
 
-       if (skc->skc_flags & KMC_KMEM) {
-               ptr = (void *)__get_free_pages(flags, get_order(size));
-       } else {
-               /*
-                * As part of vmalloc() an __pte_alloc_kernel() allocation
-                * may occur.  This internal allocation does not honor the
-                * gfp flags passed to vmalloc().  This means even when
-                * vmalloc(GFP_NOFS) is called it is possible synchronous
-                * reclaim will occur.  This reclaim can trigger file IO
-                * which can result in a deadlock.  This issue can be avoided
-                * by explicitly setting PF_MEMALLOC on the process to
-                * subvert synchronous reclaim.  The following bug has
-                * been filed at kernel.org to track the issue.
-                *
-                * https://bugzilla.kernel.org/show_bug.cgi?id=30702
-                */
-               if (!(flags & __GFP_FS))
-                       current->flags |= PF_MEMALLOC;
-
+       if (skc->skc_flags & KMC_KMEM)
+               ptr = (void *)__get_free_pages(flags | __GFP_COMP,
+                   get_order(size));
+       else
                ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
 
-               if (!(flags & __GFP_FS))
-                       current->flags &= ~PF_MEMALLOC;
-       }
-
        /* Resulting allocated memory will be page aligned */
        ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
 
@@ -932,7 +724,7 @@ spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
 static inline uint32_t
 spl_offslab_size(spl_kmem_cache_t *skc)
 {
-       return 1UL << (highbit(spl_obj_size(skc)) + 1);
+       return 1UL << (fls64(spl_obj_size(skc)) + 1);
 }
 
 /*
@@ -977,7 +769,7 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
 
        base = kv_alloc(skc, skc->skc_slab_size, flags);
        if (base == NULL)
-               SRETURN(NULL);
+               return (NULL);
 
        sks = (spl_kmem_slab_t *)base;
        sks->sks_magic = SKS_MAGIC;
@@ -995,8 +787,10 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
        for (i = 0; i < sks->sks_objs; i++) {
                if (skc->skc_flags & KMC_OFFSLAB) {
                        obj = kv_alloc(skc, offslab_size, flags);
-                       if (!obj)
-                               SGOTO(out, rc = -ENOMEM);
+                       if (!obj) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
                } else {
                        obj = base + spl_sks_size(skc) + (i * obj_size);
                }
@@ -1010,9 +804,6 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
                list_add_tail(&sko->sko_list, &sks->sks_free_list);
        }
 
-       list_for_each_entry(sko, &sks->sks_free_list, sko_list)
-               if (skc->skc_ctor)
-                       skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
 out:
        if (rc) {
                if (skc->skc_flags & KMC_OFFSLAB)
@@ -1024,7 +815,7 @@ out:
                sks = NULL;
        }
 
-       SRETURN(sks);
+       return (sks);
 }
 
 /*
@@ -1037,7 +828,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
              struct list_head *sks_list, struct list_head *sko_list)
 {
        spl_kmem_cache_t *skc;
-       SENTRY;
 
        ASSERT(sks->sks_magic == SKS_MAGIC);
        ASSERT(sks->sks_ref == 0);
@@ -1057,8 +847,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
        list_del(&sks->sks_list);
        list_add(&sks->sks_list, sks_list);
        list_splice_init(&sks->sks_free_list, sko_list);
-
-       SEXIT;
 }
 
 /*
@@ -1078,7 +866,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
        LIST_HEAD(sko_list);
        uint32_t size = 0;
        int i = 0;
-       SENTRY;
 
        /*
         * Move empty slabs and objects which have not been touched in
@@ -1118,156 +905,245 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
        list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
                ASSERT(sko->sko_magic == SKO_MAGIC);
 
-               if (skc->skc_dtor)
-                       skc->skc_dtor(sko->sko_addr, skc->skc_private);
-
                if (skc->skc_flags & KMC_OFFSLAB)
                        kv_free(skc, sko->sko_addr, size);
-
-               cond_resched();
        }
 
        list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
                ASSERT(sks->sks_magic == SKS_MAGIC);
                kv_free(skc, sks, skc->skc_slab_size);
-               cond_resched();
+       }
+}
+
+static spl_kmem_emergency_t *
+spl_emergency_search(struct rb_root *root, void *obj)
+{
+       struct rb_node *node = root->rb_node;
+       spl_kmem_emergency_t *ske;
+       unsigned long address = (unsigned long)obj;
+
+       while (node) {
+               ske = container_of(node, spl_kmem_emergency_t, ske_node);
+
+               if (address < (unsigned long)ske->ske_obj)
+                       node = node->rb_left;
+               else if (address > (unsigned long)ske->ske_obj)
+                       node = node->rb_right;
+               else
+                       return ske;
        }
 
-       SEXIT;
+       return NULL;
+}
+
+static int
+spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+       spl_kmem_emergency_t *ske_tmp;
+       unsigned long address = (unsigned long)ske->ske_obj;
+
+       while (*new) {
+               ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
+
+               parent = *new;
+               if (address < (unsigned long)ske_tmp->ske_obj)
+                       new = &((*new)->rb_left);
+               else if (address > (unsigned long)ske_tmp->ske_obj)
+                       new = &((*new)->rb_right);
+               else
+                       return 0;
+       }
+
+       rb_link_node(&ske->ske_node, parent, new);
+       rb_insert_color(&ske->ske_node, root);
+
+       return 1;
 }
 
 /*
- * Allocate a single emergency object for use by the caller.
+ * Allocate a single emergency object and track it in a red black tree.
  */
 static int
 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
 {
        spl_kmem_emergency_t *ske;
        int empty;
-       SENTRY;
 
        /* Last chance use a partial slab if one now exists */
        spin_lock(&skc->skc_lock);
        empty = list_empty(&skc->skc_partial_list);
        spin_unlock(&skc->skc_lock);
        if (!empty)
-               SRETURN(-EEXIST);
+               return (-EEXIST);
 
        ske = kmalloc(sizeof(*ske), flags);
        if (ske == NULL)
-               SRETURN(-ENOMEM);
+               return (-ENOMEM);
 
        ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
        if (ske->ske_obj == NULL) {
                kfree(ske);
-               SRETURN(-ENOMEM);
+               return (-ENOMEM);
        }
 
-       if (skc->skc_ctor)
-               skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
-
        spin_lock(&skc->skc_lock);
-       skc->skc_obj_total++;
-       skc->skc_obj_emergency++;
-       if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
-               skc->skc_obj_emergency_max = skc->skc_obj_emergency;
-
-       list_add(&ske->ske_list, &skc->skc_emergency_list);
+       empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
+       if (likely(empty)) {
+               skc->skc_obj_total++;
+               skc->skc_obj_emergency++;
+               if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
+                       skc->skc_obj_emergency_max = skc->skc_obj_emergency;
+       }
        spin_unlock(&skc->skc_lock);
 
+       if (unlikely(!empty)) {
+               kfree(ske->ske_obj);
+               kfree(ske);
+               return (-EINVAL);
+       }
+
        *obj = ske->ske_obj;
 
-       SRETURN(0);
+       return (0);
 }
 
 /*
- * Free the passed object if it is an emergency object or a normal slab
- * object.  Currently this is done by walking what should be a short list of
- * emergency objects.  If this proves to be too inefficient we can replace
- * the simple list with a hash.
+ * Locate the passed object in the red black tree and free it.
  */
 static int
 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
 {
-       spl_kmem_emergency_t *m, *n, *ske = NULL;
-       SENTRY;
+       spl_kmem_emergency_t *ske;
 
        spin_lock(&skc->skc_lock);
-       list_for_each_entry_safe(m, n, &skc->skc_emergency_list, ske_list) {
-               if (m->ske_obj == obj) {
-                       list_del(&m->ske_list);
-                       skc->skc_obj_emergency--;
-                       skc->skc_obj_total--;
-                       ske = m;
-                       break;
-               }
+       ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
+       if (likely(ske)) {
+               rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
+               skc->skc_obj_emergency--;
+               skc->skc_obj_total--;
        }
        spin_unlock(&skc->skc_lock);
 
-       if (ske == NULL)
-               SRETURN(-ENOENT);
-
-       if (skc->skc_dtor)
-               skc->skc_dtor(ske->ske_obj, skc->skc_private);
+       if (unlikely(ske == NULL))
+               return (-ENOENT);
 
        kfree(ske->ske_obj);
        kfree(ske);
 
-       SRETURN(0);
+       return (0);
 }
 
 /*
- * Called regularly on all caches to age objects out of the magazines
- * which have not been access in skc->skc_delay seconds.  This prevents
- * idle magazines from holding memory which might be better used by
- * other caches or parts of the system.  The delay is present to
- * prevent thrashing the magazine.
+ * Release objects from the per-cpu magazine back to their slab.  The flush
+ * argument contains the max number of entries to remove from the magazine.
  */
+static void
+__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+       int i, count = MIN(flush, skm->skm_avail);
+
+       ASSERT(skc->skc_magic == SKC_MAGIC);
+       ASSERT(skm->skm_magic == SKM_MAGIC);
+       ASSERT(spin_is_locked(&skc->skc_lock));
+
+       for (i = 0; i < count; i++)
+               spl_cache_shrink(skc, skm->skm_objs[i]);
+
+       skm->skm_avail -= count;
+       memmove(skm->skm_objs, &(skm->skm_objs[count]),
+               sizeof(void *) * skm->skm_avail);
+}
+
+static void
+spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+       spin_lock(&skc->skc_lock);
+       __spl_cache_flush(skc, skm, flush);
+       spin_unlock(&skc->skc_lock);
+}
+
 static void
 spl_magazine_age(void *data)
 {
-       spl_kmem_magazine_t *skm =
-               spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work);
-       spl_kmem_cache_t *skc = skm->skm_cache;
+       spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+       spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
 
        ASSERT(skm->skm_magic == SKM_MAGIC);
-       ASSERT(skc->skc_magic == SKC_MAGIC);
-       ASSERT(skc->skc_mag[skm->skm_cpu] == skm);
+       ASSERT(skm->skm_cpu == smp_processor_id());
+       ASSERT(irqs_disabled());
+
+       /* There are no available objects or they are too young to age out */
+       if ((skm->skm_avail == 0) ||
+           time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
+               return;
 
-       if (skm->skm_avail > 0 &&
-           time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
-               (void)spl_cache_flush(skc, skm, skm->skm_refill);
+       /*
+        * Because we're executing in interrupt context we may have
+        * interrupted the holder of this lock.  To avoid a potential
+        * deadlock return if the lock is contended.
+        */
+       if (!spin_trylock(&skc->skc_lock))
+               return;
 
-       if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
-               schedule_delayed_work_on(skm->skm_cpu, &skm->skm_work,
-                                        skc->skc_delay / 3 * HZ);
+       __spl_cache_flush(skc, skm, skm->skm_refill);
+       spin_unlock(&skc->skc_lock);
 }
 
 /*
- * Called regularly to keep a downward pressure on the size of idle
- * magazines and to release free slabs from the cache.  This function
- * never calls the registered reclaim function, that only occurs
- * under memory pressure or with a direct call to spl_kmem_reap().
+ * Called regularly to keep a downward pressure on the cache.
+ *
+ * Objects older than skc->skc_delay seconds in the per-cpu magazines will
+ * be returned to the caches.  This is done to prevent idle magazines from
+ * holding memory which could be better used elsewhere.  The delay is
+ * present to prevent thrashing the magazine.
+ *
+ * The newly released objects may result in empty partial slabs.  Those
+ * slabs should be released to the system.  Otherwise moving the objects
+ * out of the magazines is just wasted work.
  */
 static void
 spl_cache_age(void *data)
 {
-       spl_kmem_cache_t *skc =
-               spl_get_work_data(data, spl_kmem_cache_t, skc_work.work);
+       spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+       taskqid_t id = 0;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
+
+       /* Dynamically disabled at run time */
+       if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
+               return;
+
+       atomic_inc(&skc->skc_ref);
+
+       if (!(skc->skc_flags & KMC_NOMAGAZINE))
+               on_each_cpu(spl_magazine_age, skc, 1);
+
        spl_slab_reclaim(skc, skc->skc_reap, 0);
 
-       if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
-               schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+       while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
+               id = taskq_dispatch_delay(
+                   spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
+                   ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
+
+               /* Destroy issued after dispatch immediately cancel it */
+               if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
+                       taskq_cancel_id(spl_kmem_cache_taskq, id);
+       }
+
+       spin_lock(&skc->skc_lock);
+       skc->skc_taskqid = id;
+       spin_unlock(&skc->skc_lock);
+
+       atomic_dec(&skc->skc_ref);
 }
 
 /*
  * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
- * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB.  However,
+ * When on-slab we want to target spl_kmem_cache_obj_per_slab.  However,
  * for very small objects we may end up with more than this so as not
  * to waste space in the minimal allocation of a single page.  Also for
- * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
+ * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
  * lower than this and we will fail.
  */
 static int
@@ -1276,8 +1152,9 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
        uint32_t sks_size, obj_size, max_size;
 
        if (skc->skc_flags & KMC_OFFSLAB) {
-               *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
-               *size = sizeof(spl_kmem_slab_t);
+               *objs = spl_kmem_cache_obj_per_slab;
+               *size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
+               return (0);
        } else {
                sks_size = spl_sks_size(skc);
                obj_size = spl_obj_size(skc);
@@ -1285,13 +1162,13 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
                if (skc->skc_flags & KMC_KMEM)
                        max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE;
                else
-                       max_size = (32 * 1024 * 1024);
+                       max_size = (spl_kmem_cache_max_size * 1024 * 1024);
 
                /* Power of two sized slab */
                for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
                        *objs = (*size - sks_size) / obj_size;
-                       if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
-                               SRETURN(0);
+                       if (*objs >= spl_kmem_cache_obj_per_slab)
+                               return (0);
                }
 
                /*
@@ -1301,11 +1178,11 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
                 */
                *size = max_size;
                *objs = (*size - sks_size) / obj_size;
-               if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
-                       SRETURN(0);
+               if (*objs >= (spl_kmem_cache_obj_per_slab_min))
+                       return (0);
        }
 
-       SRETURN(-ENOSPC);
+       return (-ENOSPC);
 }
 
 /*
@@ -1318,7 +1195,6 @@ spl_magazine_size(spl_kmem_cache_t *skc)
 {
        uint32_t obj_size = spl_obj_size(skc);
        int size;
-       SENTRY;
 
        /* Per-magazine sizes below assume a 4Kib page size */
        if (obj_size > (PAGE_SIZE * 256))
@@ -1332,7 +1208,7 @@ spl_magazine_size(spl_kmem_cache_t *skc)
        else
                size = 256;
 
-       SRETURN(size);
+       return (size);
 }
 
 /*
@@ -1344,7 +1220,6 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
        spl_kmem_magazine_t *skm;
        int size = sizeof(spl_kmem_magazine_t) +
                   sizeof(void *) * skc->skc_mag_size;
-       SENTRY;
 
        skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
        if (skm) {
@@ -1353,12 +1228,11 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
                skm->skm_size = skc->skc_mag_size;
                skm->skm_refill = skc->skc_mag_refill;
                skm->skm_cache = skc;
-               spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm);
                skm->skm_age = jiffies;
                skm->skm_cpu = cpu;
        }
 
-       SRETURN(skm);
+       return (skm);
 }
 
 /*
@@ -1370,12 +1244,10 @@ spl_magazine_free(spl_kmem_magazine_t *skm)
        int size = sizeof(spl_kmem_magazine_t) +
                   sizeof(void *) * skm->skm_size;
 
-       SENTRY;
        ASSERT(skm->skm_magic == SKM_MAGIC);
        ASSERT(skm->skm_avail == 0);
 
        kmem_free(skm, size);
-       SEXIT;
 }
 
 /*
@@ -1385,7 +1257,9 @@ static int
 spl_magazine_create(spl_kmem_cache_t *skc)
 {
        int i;
-       SENTRY;
+
+       if (skc->skc_flags & KMC_NOMAGAZINE)
+               return (0);
 
        skc->skc_mag_size = spl_magazine_size(skc);
        skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
@@ -1396,16 +1270,11 @@ spl_magazine_create(spl_kmem_cache_t *skc)
                        for (i--; i >= 0; i--)
                                spl_magazine_free(skc->skc_mag[i]);
 
-                       SRETURN(-ENOMEM);
+                       return (-ENOMEM);
                }
        }
 
-       /* Only after everything is allocated schedule magazine work */
-       for_each_online_cpu(i)
-               schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
-                                        skc->skc_delay / 3 * HZ);
-
-       SRETURN(0);
+       return (0);
 }
 
 /*
@@ -1416,15 +1285,15 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
 {
        spl_kmem_magazine_t *skm;
        int i;
-       SENTRY;
+
+       if (skc->skc_flags & KMC_NOMAGAZINE)
+               return;
 
         for_each_online_cpu(i) {
                skm = skc->skc_mag[i];
-               (void)spl_cache_flush(skc, skm, skm->skm_avail);
+               spl_cache_flush(skc, skm, skm->skm_avail);
                spl_magazine_free(skm);
         }
-
-       SEXIT;
 }
 
 /*
@@ -1440,11 +1309,12 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
  * flags
  *     KMC_NOTOUCH     Disable cache object aging (unsupported)
  *     KMC_NODEBUG     Disable debugging (unsupported)
- *     KMC_NOMAGAZINE  Disable magazine (unsupported)
  *     KMC_NOHASH      Disable hashing (unsupported)
  *     KMC_QCACHE      Disable qcache (unsupported)
+ *     KMC_NOMAGAZINE  Enabled for kmem/vmem, Disabled for Linux slab
  *     KMC_KMEM        Force kmem backed cache
  *     KMC_VMEM        Force vmem backed cache
+ *     KMC_SLAB        Force Linux slab backed cache
  *     KMC_OFFSLAB     Locate objects off the slab
  */
 spl_kmem_cache_t *
@@ -1455,36 +1325,35 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
                       void *priv, void *vmp, int flags)
 {
         spl_kmem_cache_t *skc;
-       int rc, kmem_flags = KM_SLEEP;
-       SENTRY;
+       int rc;
 
-       ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
-       ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
-       ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
+       /*
+        * Unsupported flags
+        */
+       ASSERT0(flags & KMC_NOMAGAZINE);
+       ASSERT0(flags & KMC_NOHASH);
+       ASSERT0(flags & KMC_QCACHE);
        ASSERT(vmp == NULL);
 
-        /* We may be called when there is a non-zero preempt_count or
-         * interrupts are disabled is which case we must not sleep.
-        */
-       if (current_thread_info()->preempt_count || irqs_disabled())
-               kmem_flags = KM_NOSLEEP;
+       might_sleep();
 
-       /* Allocate memory for a new cache an initialize it.  Unfortunately,
+       /*
+        * Allocate memory for a new cache an initialize it.  Unfortunately,
         * this usually ends up being a large allocation of ~32k because
         * we need to allocate enough memory for the worst case number of
         * cpus in the magazine, skc_mag[NR_CPUS].  Because of this we
-        * explicitly pass KM_NODEBUG to suppress the kmem warning */
-       skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
-                                             kmem_flags | KM_NODEBUG);
+        * explicitly pass KM_NODEBUG to suppress the kmem warning
+        */
+       skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
        if (skc == NULL)
-               SRETURN(NULL);
+               return (NULL);
 
        skc->skc_magic = SKC_MAGIC;
        skc->skc_name_size = strlen(name) + 1;
-       skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
+       skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
        if (skc->skc_name == NULL) {
                kmem_free(skc, sizeof(*skc));
-               SRETURN(NULL);
+               return (NULL);
        }
        strncpy(skc->skc_name, name, skc->skc_name_size);
 
@@ -1493,6 +1362,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
        skc->skc_reclaim = reclaim;
        skc->skc_private = priv;
        skc->skc_vmp = vmp;
+       skc->skc_linux_cache = NULL;
        skc->skc_flags = flags;
        skc->skc_obj_size = size;
        skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
@@ -1503,7 +1373,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
        INIT_LIST_HEAD(&skc->skc_list);
        INIT_LIST_HEAD(&skc->skc_complete_list);
        INIT_LIST_HEAD(&skc->skc_partial_list);
-       INIT_LIST_HEAD(&skc->skc_emergency_list);
+       skc->skc_emergency_tree = RB_ROOT;
        spin_lock_init(&skc->skc_lock);
        init_waitqueue_head(&skc->skc_waitq);
        skc->skc_slab_fail = 0;
@@ -1515,44 +1385,90 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
        skc->skc_obj_total = 0;
        skc->skc_obj_alloc = 0;
        skc->skc_obj_max = 0;
+       skc->skc_obj_deadlock = 0;
        skc->skc_obj_emergency = 0;
        skc->skc_obj_emergency_max = 0;
 
+       /*
+        * Verify the requested alignment restriction is sane.
+        */
        if (align) {
                VERIFY(ISP2(align));
-               VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); /* Min alignment */
-               VERIFY3U(align, <=, PAGE_SIZE);            /* Max alignment */
+               VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
+               VERIFY3U(align, <=, PAGE_SIZE);
                skc->skc_obj_align = align;
        }
 
-       /* If none passed select a cache type based on object size */
-       if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) {
-               if (spl_obj_size(skc) < (PAGE_SIZE / 8))
+       /*
+        * When no specific type of slab is requested (kmem, vmem, or
+        * linuxslab) then select a cache type based on the object size
+        * and default tunables.
+        */
+       if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
+
+               /*
+                * Objects smaller than spl_kmem_cache_slab_limit can
+                * use the Linux slab for better space-efficiency.  By
+                * default this functionality is disabled until its
+                * performance characters are fully understood.
+                */
+               if (spl_kmem_cache_slab_limit &&
+                   size <= (size_t)spl_kmem_cache_slab_limit)
+                       skc->skc_flags |= KMC_SLAB;
+
+               /*
+                * Small objects, less than spl_kmem_cache_kmem_limit per
+                * object should use kmem because their slabs are small.
+                */
+               else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit)
                        skc->skc_flags |= KMC_KMEM;
+
+               /*
+                * All other objects are considered large and are placed
+                * on vmem backed slabs.
+                */
                else
                        skc->skc_flags |= KMC_VMEM;
        }
 
-       rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
-       if (rc)
-               SGOTO(out, rc);
+       /*
+        * Given the type of slab allocate the required resources.
+        */
+       if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+               rc = spl_slab_size(skc,
+                   &skc->skc_slab_objs, &skc->skc_slab_size);
+               if (rc)
+                       goto out;
 
-       rc = spl_magazine_create(skc);
-       if (rc)
-               SGOTO(out, rc);
+               rc = spl_magazine_create(skc);
+               if (rc)
+                       goto out;
+       } else {
+               skc->skc_linux_cache = kmem_cache_create(
+                   skc->skc_name, size, align, 0, NULL);
+               if (skc->skc_linux_cache == NULL) {
+                       rc = ENOMEM;
+                       goto out;
+               }
 
-       spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
-       schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+               kmem_cache_set_allocflags(skc, __GFP_COMP);
+               skc->skc_flags |= KMC_NOMAGAZINE;
+       }
+
+       if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
+               skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
+                   spl_cache_age, skc, TQ_SLEEP,
+                   ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
 
        down_write(&spl_kmem_cache_sem);
        list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
        up_write(&spl_kmem_cache_sem);
 
-       SRETURN(skc);
+       return (skc);
 out:
        kmem_free(skc->skc_name, skc->skc_name_size);
        kmem_free(skc, sizeof(*skc));
-       SRETURN(NULL);
+       return (NULL);
 }
 EXPORT_SYMBOL(spl_kmem_cache_create);
 
@@ -1575,30 +1491,37 @@ void
 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
 {
        DECLARE_WAIT_QUEUE_HEAD(wq);
-       int i;
-       SENTRY;
+       taskqid_t id;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
+       ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
 
        down_write(&spl_kmem_cache_sem);
        list_del_init(&skc->skc_list);
        up_write(&spl_kmem_cache_sem);
 
-       /* Cancel any and wait for any pending delayed work */
+       /* Cancel any and wait for any pending delayed tasks */
        VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
-       cancel_delayed_work_sync(&skc->skc_work);
-       for_each_online_cpu(i)
-               cancel_delayed_work_sync(&skc->skc_mag[i]->skm_work);
 
-       flush_scheduled_work();
+       spin_lock(&skc->skc_lock);
+       id = skc->skc_taskqid;
+       spin_unlock(&skc->skc_lock);
+
+       taskq_cancel_id(spl_kmem_cache_taskq, id);
 
        /* Wait until all current callers complete, this is mainly
         * to catch the case where a low memory situation triggers a
         * cache reaping action which races with this destroy. */
        wait_event(wq, atomic_read(&skc->skc_ref) == 0);
 
-       spl_magazine_destroy(skc);
-       spl_slab_reclaim(skc, 0, 1);
+       if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+               spl_magazine_destroy(skc);
+               spl_slab_reclaim(skc, 0, 1);
+       } else {
+               ASSERT(skc->skc_flags & KMC_SLAB);
+               kmem_cache_destroy(skc->skc_linux_cache);
+       }
+
        spin_lock(&skc->skc_lock);
 
        /* Validate there are no objects in use and free all the
@@ -1609,14 +1532,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
        ASSERT3U(skc->skc_obj_total, ==, 0);
        ASSERT3U(skc->skc_obj_emergency, ==, 0);
        ASSERT(list_empty(&skc->skc_complete_list));
-       ASSERT(list_empty(&skc->skc_emergency_list));
 
        kmem_free(skc->skc_name, skc->skc_name_size);
        spin_unlock(&skc->skc_lock);
 
        kmem_free(skc, sizeof(*skc));
-
-       SEXIT;
 }
 EXPORT_SYMBOL(spl_kmem_cache_destroy);
 
@@ -1667,8 +1587,7 @@ spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
 static void
 spl_cache_grow_work(void *data)
 {
-       spl_kmem_alloc_t *ska =
-               spl_get_work_data(data, spl_kmem_alloc_t, ska_work.work);
+       spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
        spl_kmem_cache_t *skc = ska->ska_cache;
        spl_kmem_slab_t *sks;
 
@@ -1682,6 +1601,7 @@ spl_cache_grow_work(void *data)
 
        atomic_dec(&skc->skc_ref);
        clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
+       clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
        wake_up_all(&skc->skc_waitq);
        spin_unlock(&skc->skc_lock);
 
@@ -1698,25 +1618,29 @@ spl_cache_grow_wait(spl_kmem_cache_t *skc)
 }
 
 /*
- * No available objects on any slabs, create a new slab.
+ * No available objects on any slabs, create a new slab.  Note that this
+ * functionality is disabled for KMC_SLAB caches which are backed by the
+ * Linux slab.
  */
 static int
 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
 {
-       int remaining, rc = 0;
-       SENTRY;
+       int remaining, rc;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
+       ASSERT((skc->skc_flags & KMC_SLAB) == 0);
        might_sleep();
        *obj = NULL;
 
        /*
-        * Before allocating a new slab check if the slab is being reaped.
-        * If it is there is a good chance we can wait until it finishes
-        * and then use one of the newly freed but not aged-out slabs.
+        * Before allocating a new slab wait for any reaping to complete and
+        * then return so the local magazine can be rechecked for new objects.
         */
-       if (test_bit(KMC_BIT_REAPING, &skc->skc_flags))
-               SRETURN(-EAGAIN);
+       if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
+               rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
+                   TASK_UNINTERRUPTIBLE);
+               return (rc ? rc : -EAGAIN);
+       }
 
        /*
         * This is handled by dispatching a work request to the global work
@@ -1731,26 +1655,45 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
                if (ska == NULL) {
                        clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
                        wake_up_all(&skc->skc_waitq);
-                       SRETURN(-ENOMEM);
+                       return (-ENOMEM);
                }
 
                atomic_inc(&skc->skc_ref);
                ska->ska_cache = skc;
-               ska->ska_flags = flags;
-               spl_init_delayed_work(&ska->ska_work, spl_cache_grow_work, ska);
-               schedule_delayed_work(&ska->ska_work, 0);
+               ska->ska_flags = flags & ~__GFP_FS;
+               taskq_init_ent(&ska->ska_tqe);
+               taskq_dispatch_ent(spl_kmem_cache_taskq,
+                   spl_cache_grow_work, ska, 0, &ska->ska_tqe);
        }
 
        /*
-        * Allow a single timer tick before falling back to synchronously
-        * allocating the minimum about of memory required by the caller.
+        * The goal here is to only detect the rare case where a virtual slab
+        * allocation has deadlocked.  We must be careful to minimize the use
+        * of emergency objects which are more expensive to track.  Therefore,
+        * we set a very long timeout for the asynchronous allocation and if
+        * the timeout is reached the cache is flagged as deadlocked.  From
+        * this point only new emergency objects will be allocated until the
+        * asynchronous allocation completes and clears the deadlocked flag.
         */
-       remaining = wait_event_timeout(skc->skc_waitq,
-                                      spl_cache_grow_wait(skc), 1);
-       if (remaining == 0)
+       if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
                rc = spl_emergency_alloc(skc, flags, obj);
+       } else {
+               remaining = wait_event_timeout(skc->skc_waitq,
+                                              spl_cache_grow_wait(skc), HZ);
+
+               if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) {
+                       spin_lock(&skc->skc_lock);
+                       if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
+                               set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
+                               skc->skc_obj_deadlock++;
+                       }
+                       spin_unlock(&skc->skc_lock);
+               }
 
-       SRETURN(rc);
+               rc = -ENOMEM;
+       }
+
+       return (rc);
 }
 
 /*
@@ -1766,7 +1709,6 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
        spl_kmem_slab_t *sks;
        int count = 0, rc, refill;
        void *obj = NULL;
-       SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(skm->skm_magic == SKM_MAGIC);
@@ -1785,14 +1727,14 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
 
                        /* Emergency object for immediate use by caller */
                        if (rc == 0 && obj != NULL)
-                               SRETURN(obj);
+                               return (obj);
 
                        if (rc)
-                               SGOTO(out, rc);
+                               goto out;
 
                        /* Rescheduled to different CPU skm is not local */
                        if (skm != skc->skc_mag[smp_processor_id()])
-                               SGOTO(out, rc);
+                               goto out;
 
                        /* Potentially rescheduled to the same CPU but
                         * allocations may have occurred from this CPU while
@@ -1827,7 +1769,7 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
 
        spin_unlock(&skc->skc_lock);
 out:
-       SRETURN(NULL);
+       return (NULL);
 }
 
 /*
@@ -1838,7 +1780,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
 {
        spl_kmem_slab_t *sks = NULL;
        spl_kmem_obj_t *sko = NULL;
-       SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(spin_is_locked(&skc->skc_lock));
@@ -1869,44 +1810,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
                list_add_tail(&sks->sks_list, &skc->skc_partial_list);
                skc->skc_slab_alloc--;
        }
-
-       SEXIT;
-}
-
-/*
- * Release a batch of objects from a per-cpu magazine back to their
- * respective slabs.  This occurs when we exceed the magazine size,
- * are under memory pressure, when the cache is idle, or during
- * cache cleanup.  The flush argument contains the number of entries
- * to remove from the magazine.
- */
-static int
-spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
-{
-       int i, count = MIN(flush, skm->skm_avail);
-       SENTRY;
-
-       ASSERT(skc->skc_magic == SKC_MAGIC);
-       ASSERT(skm->skm_magic == SKM_MAGIC);
-
-       /*
-        * XXX: Currently we simply return objects from the magazine to
-        * the slabs in fifo order.  The ideal thing to do from a memory
-        * fragmentation standpoint is to cheaply determine the set of
-        * objects in the magazine which will result in the largest
-        * number of free slabs if released from the magazine.
-        */
-       spin_lock(&skc->skc_lock);
-       for (i = 0; i < count; i++)
-               spl_cache_shrink(skc, skm->skm_objs[i]);
-
-       skm->skm_avail -= count;
-       memmove(skm->skm_objs, &(skm->skm_objs[count]),
-               sizeof(void *) * skm->skm_avail);
-
-       spin_unlock(&skc->skc_lock);
-
-       SRETURN(count);
 }
 
 /*
@@ -1917,15 +1820,30 @@ void *
 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
 {
        spl_kmem_magazine_t *skm;
-       unsigned long irq_flags;
        void *obj = NULL;
-       SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
        ASSERT(flags & KM_SLEEP);
+
        atomic_inc(&skc->skc_ref);
-       local_irq_save(irq_flags);
+
+       /*
+        * Allocate directly from a Linux slab.  All optimizations are left
+        * to the underlying cache we only need to guarantee that KM_SLEEP
+        * callers will never fail.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               struct kmem_cache *slc = skc->skc_linux_cache;
+
+               do {
+                       obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
+               } while ((obj == NULL) && !(flags & KM_NOSLEEP));
+
+               goto ret;
+       }
+
+       local_irq_disable();
 
 restart:
        /* Safe to update per-cpu structure without lock, but
@@ -1933,9 +1851,7 @@ restart:
         * the local magazine since this may have changed
         * when we need to grow the cache. */
        skm = skc->skc_mag[smp_processor_id()];
-       ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
-               skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
-               skm->skm_size, skm->skm_refill, skm->skm_avail);
+       ASSERT(skm->skm_magic == SKM_MAGIC);
 
        if (likely(skm->skm_avail)) {
                /* Object available in CPU cache, use it */
@@ -1944,19 +1860,27 @@ restart:
        } else {
                obj = spl_cache_refill(skc, skm, flags);
                if (obj == NULL)
-                       SGOTO(restart, obj = NULL);
+                       goto restart;
        }
 
-       local_irq_restore(irq_flags);
+       local_irq_enable();
        ASSERT(obj);
        ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
 
+ret:
        /* Pre-emptively migrate object to CPU L1 cache */
-       prefetchw(obj);
+       if (obj) {
+               if (obj && skc->skc_ctor)
+                       skc->skc_ctor(obj, skc->skc_private, flags);
+               else
+                       prefetchw(obj);
+       }
+
        atomic_dec(&skc->skc_ref);
 
-       SRETURN(obj);
+       return (obj);
 }
+
 EXPORT_SYMBOL(spl_kmem_cache_alloc);
 
 /*
@@ -1970,18 +1894,34 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 {
        spl_kmem_magazine_t *skm;
        unsigned long flags;
-       SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
        atomic_inc(&skc->skc_ref);
 
        /*
-        * Emergency objects are never part of the virtual address space
-        * so if we get a virtual address we can optimize this check out.
+        * Run the destructor
+        */
+       if (skc->skc_dtor)
+               skc->skc_dtor(obj, skc->skc_private);
+
+       /*
+        * Free the object from the Linux underlying Linux slab.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               kmem_cache_free(skc->skc_linux_cache, obj);
+               goto out;
+       }
+
+       /*
+        * Only virtual slabs may have emergency objects and these objects
+        * are guaranteed to have physical addresses.  They must be removed
+        * from the tree of emergency objects and the freed.
         */
-       if (!kmem_virt(obj) && !spl_emergency_free(skc, obj))
-               SGOTO(out, 0);
+       if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj)) {
+               spl_emergency_free(skc, obj);
+               goto out;
+       }
 
        local_irq_save(flags);
 
@@ -1994,7 +1934,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 
        /* Per-CPU cache full, flush it to make space */
        if (unlikely(skm->skm_avail >= skm->skm_size))
-               (void)spl_cache_flush(skc, skm, skm->skm_refill);
+               spl_cache_flush(skc, skm, skm->skm_refill);
 
        /* Available space in cache, use it */
        skm->skm_objs[skm->skm_avail++] = obj;
@@ -2002,8 +1942,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
        local_irq_restore(flags);
 out:
        atomic_dec(&skc->skc_ref);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(spl_kmem_cache_free);
 
@@ -2014,37 +1952,61 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
  * report that they contain unused objects.  Because of this we only
  * register one shrinker function in the shim layer for all slab caches.
  * We always attempt to shrink all caches when this generic shrinker
- * is called.  The shrinker should return the number of free objects
- * in the cache when called with nr_to_scan == 0 but not attempt to
- * free any objects.  When nr_to_scan > 0 it is a request that nr_to_scan
- * objects should be freed, which differs from Solaris semantics.
- * Solaris semantics are to free all available objects which may (and
- * probably will) be more objects than the requested nr_to_scan.
+ * is called.
+ *
+ * If sc->nr_to_scan is zero, the caller is requesting a query of the
+ * number of objects which can potentially be freed.  If it is nonzero,
+ * the request is to free that many objects.
+ *
+ * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
+ * in struct shrinker and also require the shrinker to return the number
+ * of objects freed.
+ *
+ * Older kernels require the shrinker to return the number of freeable
+ * objects following the freeing of nr_to_free.
+ *
+ * Linux semantics differ from those under Solaris, which are to
+ * free all available objects which may (and probably will) be more
+ * objects than the requested nr_to_scan.
  */
-static int
+static spl_shrinker_t
 __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
     struct shrink_control *sc)
 {
        spl_kmem_cache_t *skc;
-       int unused = 0;
+       int alloc = 0;
 
        down_read(&spl_kmem_cache_sem);
        list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
-               if (sc->nr_to_scan)
+               if (sc->nr_to_scan) {
+#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
+                       uint64_t oldalloc = skc->skc_obj_alloc;
                        spl_kmem_cache_reap_now(skc,
                           MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
-
-               /*
-                * Presume everything alloc'ed in reclaimable, this ensures
-                * we are called again with nr_to_scan > 0 so can try and
-                * reclaim.  The exact number is not important either so
-                * we forgo taking this already highly contented lock.
-                */
-               unused += skc->skc_obj_alloc;
+                       if (oldalloc > skc->skc_obj_alloc)
+                               alloc += oldalloc - skc->skc_obj_alloc;
+#else
+                       spl_kmem_cache_reap_now(skc,
+                          MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
+                       alloc += skc->skc_obj_alloc;
+#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
+               } else {
+                       /* Request to query number of freeable objects */
+                       alloc += skc->skc_obj_alloc;
+               }
        }
        up_read(&spl_kmem_cache_sem);
 
-       return (unused * sysctl_vfs_cache_pressure) / 100;
+       /*
+        * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
+        * This functionality only exists to work around a rare issue where
+        * shrink_slabs() is repeatedly invoked by many cores causing the
+        * system to thrash.
+        */
+       if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
+               return (SHRINK_STOP);
+
+       return (MAX(alloc, 0));
 }
 
 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
@@ -2060,18 +2022,30 @@ SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
 void
 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
 {
-       SENTRY;
-
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
 
-       /* Prevent concurrent cache reaping when contended */
-       if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
-               SEXIT;
-               return;
+       atomic_inc(&skc->skc_ref);
+
+       /*
+        * Execute the registered reclaim callback if it exists.  The
+        * per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               if (skc->skc_reclaim)
+                       skc->skc_reclaim(skc->skc_private);
+
+               if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
+                       kmem_cache_shrink(skc->skc_linux_cache);
+
+               goto out;
        }
 
-       atomic_inc(&skc->skc_ref);
+       /*
+        * Prevent concurrent cache reaping when contended.
+        */
+       if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
+               goto out;
 
        /*
         * When a reclaim function is available it may be invoked repeatedly
@@ -2106,12 +2080,23 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
                } while (do_reclaim);
        }
 
-       /* Reclaim from the cache, ignoring it's age and delay. */
+       /* Reclaim from the magazine then the slabs ignoring age and delay. */
+       if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
+               spl_kmem_magazine_t *skm;
+               unsigned long irq_flags;
+
+               local_irq_save(irq_flags);
+               skm = skc->skc_mag[smp_processor_id()];
+               spl_cache_flush(skc, skm, skm->skm_avail);
+               local_irq_restore(irq_flags);
+       }
+
        spl_slab_reclaim(skc, count, 1);
        clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
+       smp_wmb();
+       wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
+out:
        atomic_dec(&skc->skc_ref);
-
-       SEXIT;
 }
 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
 
@@ -2126,7 +2111,7 @@ spl_kmem_reap(void)
        sc.nr_to_scan = KMC_REAP_CHUNK;
        sc.gfp_mask = GFP_KERNEL;
 
-       __spl_kmem_cache_generic_shrinker(NULL, &sc);
+       (void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
 }
 EXPORT_SYMBOL(spl_kmem_reap);
 
@@ -2176,7 +2161,6 @@ static int
 spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
 {
        int i;
-       SENTRY;
 
        spin_lock_init(lock);
        INIT_LIST_HEAD(list);
@@ -2184,7 +2168,7 @@ spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
        for (i = 0; i < size; i++)
                INIT_HLIST_HEAD(&kmem_table[i]);
 
-       SRETURN(0);
+       return (0);
 }
 
 static void
@@ -2193,7 +2177,6 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
        unsigned long flags;
        kmem_debug_t *kd;
        char str[17];
-       SENTRY;
 
        spin_lock_irqsave(lock, flags);
        if (!list_empty(list))
@@ -2206,140 +2189,16 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
                       kd->kd_func, kd->kd_line);
 
        spin_unlock_irqrestore(lock, flags);
-       SEXIT;
 }
 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
 #define spl_kmem_init_tracking(list, lock, size)
 #define spl_kmem_fini_tracking(list, lock)
 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
 
-static void
-spl_kmem_init_globals(void)
-{
-       struct zone *zone;
-
-       /* For now all zones are includes, it may be wise to restrict
-        * this to normal and highmem zones if we see problems. */
-        for_each_zone(zone) {
-
-                if (!populated_zone(zone))
-                        continue;
-
-               minfree += min_wmark_pages(zone);
-               desfree += low_wmark_pages(zone);
-               lotsfree += high_wmark_pages(zone);
-       }
-
-       /* Solaris default values */
-       swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
-       swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
-}
-
-/*
- * Called at module init when it is safe to use spl_kallsyms_lookup_name()
- */
-int
-spl_kmem_init_kallsyms_lookup(void)
-{
-#ifndef HAVE_GET_VMALLOC_INFO
-       get_vmalloc_info_fn = (get_vmalloc_info_t)
-               spl_kallsyms_lookup_name("get_vmalloc_info");
-       if (!get_vmalloc_info_fn) {
-               printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n");
-               return -EFAULT;
-       }
-#endif /* HAVE_GET_VMALLOC_INFO */
-
-#ifdef HAVE_PGDAT_HELPERS
-# ifndef HAVE_FIRST_ONLINE_PGDAT
-       first_online_pgdat_fn = (first_online_pgdat_t)
-               spl_kallsyms_lookup_name("first_online_pgdat");
-       if (!first_online_pgdat_fn) {
-               printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
-               return -EFAULT;
-       }
-# endif /* HAVE_FIRST_ONLINE_PGDAT */
-
-# ifndef HAVE_NEXT_ONLINE_PGDAT
-       next_online_pgdat_fn = (next_online_pgdat_t)
-               spl_kallsyms_lookup_name("next_online_pgdat");
-       if (!next_online_pgdat_fn) {
-               printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
-               return -EFAULT;
-       }
-# endif /* HAVE_NEXT_ONLINE_PGDAT */
-
-# ifndef HAVE_NEXT_ZONE
-       next_zone_fn = (next_zone_t)
-               spl_kallsyms_lookup_name("next_zone");
-       if (!next_zone_fn) {
-               printk(KERN_ERR "Error: Unknown symbol next_zone\n");
-               return -EFAULT;
-       }
-# endif /* HAVE_NEXT_ZONE */
-
-#else /* HAVE_PGDAT_HELPERS */
-
-# ifndef HAVE_PGDAT_LIST
-       pgdat_list_addr = *(struct pglist_data **)
-               spl_kallsyms_lookup_name("pgdat_list");
-       if (!pgdat_list_addr) {
-               printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
-               return -EFAULT;
-       }
-# endif /* HAVE_PGDAT_LIST */
-#endif /* HAVE_PGDAT_HELPERS */
-
-#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
-       get_zone_counts_fn = (get_zone_counts_t)
-               spl_kallsyms_lookup_name("get_zone_counts");
-       if (!get_zone_counts_fn) {
-               printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
-               return -EFAULT;
-       }
-#endif  /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
-
-       /*
-        * It is now safe to initialize the global tunings which rely on
-        * the use of the for_each_zone() macro.  This macro in turns
-        * depends on the *_pgdat symbols which are now available.
-        */
-       spl_kmem_init_globals();
-
-#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
-       invalidate_inodes_fn = (invalidate_inodes_t)
-               spl_kallsyms_lookup_name("invalidate_inodes");
-       if (!invalidate_inodes_fn) {
-               printk(KERN_ERR "Error: Unknown symbol invalidate_inodes\n");
-               return -EFAULT;
-       }
-#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
-
-#ifndef HAVE_SHRINK_DCACHE_MEMORY
-       /* When shrink_dcache_memory_fn == NULL support is disabled */
-       shrink_dcache_memory_fn = (shrink_dcache_memory_t)
-               spl_kallsyms_lookup_name("shrink_dcache_memory");
-#endif /* HAVE_SHRINK_DCACHE_MEMORY */
-
-#ifndef HAVE_SHRINK_ICACHE_MEMORY
-       /* When shrink_icache_memory_fn == NULL support is disabled */
-       shrink_icache_memory_fn = (shrink_icache_memory_t)
-               spl_kallsyms_lookup_name("shrink_icache_memory");
-#endif /* HAVE_SHRINK_ICACHE_MEMORY */
-
-       return 0;
-}
-
 int
 spl_kmem_init(void)
 {
        int rc = 0;
-       SENTRY;
-
-       init_rwsem(&spl_kmem_cache_sem);
-       INIT_LIST_HEAD(&spl_kmem_cache_list);
-
-       spl_register_shrinker(&spl_kmem_cache_shrinker);
 
 #ifdef DEBUG_KMEM
        kmem_alloc_used_set(0);
@@ -2348,34 +2207,37 @@ spl_kmem_init(void)
        spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
        spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
 #endif
-       SRETURN(rc);
+
+       init_rwsem(&spl_kmem_cache_sem);
+       INIT_LIST_HEAD(&spl_kmem_cache_list);
+       spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
+           1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
+
+       spl_register_shrinker(&spl_kmem_cache_shrinker);
+
+       return (rc);
 }
 
 void
 spl_kmem_fini(void)
 {
+       spl_unregister_shrinker(&spl_kmem_cache_shrinker);
+       taskq_destroy(spl_kmem_cache_taskq);
+
 #ifdef DEBUG_KMEM
        /* Display all unreclaimed memory addresses, including the
         * allocation size and the first few bytes of what's located
         * at that address to aid in debugging.  Performance is not
         * a serious concern here since it is module unload time. */
        if (kmem_alloc_used_read() != 0)
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
-                   "kmem leaked %ld/%ld bytes\n",
+               printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
                    kmem_alloc_used_read(), kmem_alloc_max);
 
-
        if (vmem_alloc_used_read() != 0)
-               SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
-                   "vmem leaked %ld/%ld bytes\n",
+               printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n",
                    vmem_alloc_used_read(), vmem_alloc_max);
 
        spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
        spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
 #endif /* DEBUG_KMEM */
-       SENTRY;
-
-       spl_unregister_shrinker(&spl_kmem_cache_shrinker);
-
-       SEXIT;
 }