]> git.proxmox.com Git - mirror_spl.git/commitdiff
kmem_cache: Call constructor/destructor on each alloc/free
authorRichard Yao <ryao@gentoo.org>
Tue, 17 Jun 2014 23:37:02 +0000 (19:37 -0400)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Tue, 28 Oct 2014 16:21:08 +0000 (09:21 -0700)
This has a few benefits. First, it fixes a regression that "Rework
generic memory allocation interfaces" appears to have triggered in
splat's slab_reap and slab_age tests. Second, it makes porting code from
Illumos to ZFSOnLinux easier. Third, it has the side effect of making
reclaim from slab caches that specify reclaim functions an order of
magnitude faster. The splat slab_reap test usually took 30 to 40
seconds. With this change, it takes 3 to 4.

Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #369

module/spl/spl-kmem.c

index 6054f11d8dff9858febbbc090e2c224f310c3022..65aa277399dd9e34f1fcb9c0acdd88606d9107d9 100644 (file)
@@ -866,9 +866,6 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
                list_add_tail(&sko->sko_list, &sks->sks_free_list);
        }
 
-       list_for_each_entry(sko, &sks->sks_free_list, sko_list)
-               if (skc->skc_ctor)
-                       skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
 out:
        if (rc) {
                if (skc->skc_flags & KMC_OFFSLAB)
@@ -974,9 +971,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
        list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
                ASSERT(sko->sko_magic == SKO_MAGIC);
 
-               if (skc->skc_dtor)
-                       skc->skc_dtor(sko->sko_addr, skc->skc_private);
-
                if (skc->skc_flags & KMC_OFFSLAB)
                        kv_free(skc, sko->sko_addr, size);
        }
@@ -1078,9 +1072,6 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
                SRETURN(-EINVAL);
        }
 
-       if (skc->skc_ctor)
-               skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
-
        *obj = ske->ske_obj;
 
        SRETURN(0);
@@ -1107,9 +1098,6 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
        if (unlikely(ske == NULL))
                SRETURN(-ENOENT);
 
-       if (skc->skc_dtor)
-               skc->skc_dtor(ske->ske_obj, skc->skc_private);
-
        kfree(ske->ske_obj);
        kfree(ske);
 
@@ -1938,13 +1926,9 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
 
                do {
                        obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
-                       if (obj && skc->skc_ctor)
-                               skc->skc_ctor(obj, skc->skc_private, flags);
-
                } while ((obj == NULL) && !(flags & KM_NOSLEEP));
 
-               atomic_dec(&skc->skc_ref);
-               SRETURN(obj);
+               goto ret;
        }
 
        local_irq_disable();
@@ -1973,12 +1957,20 @@ restart:
        ASSERT(obj);
        ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
 
+ret:
        /* Pre-emptively migrate object to CPU L1 cache */
-       prefetchw(obj);
+       if (obj) {
+               if (obj && skc->skc_ctor)
+                       skc->skc_ctor(obj, skc->skc_private, flags);
+               else
+                       prefetchw(obj);
+       }
+
        atomic_dec(&skc->skc_ref);
 
        SRETURN(obj);
 }
+
 EXPORT_SYMBOL(spl_kmem_cache_alloc);
 
 /*
@@ -1998,13 +1990,16 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
        atomic_inc(&skc->skc_ref);
 
+       /*
+        * Run the destructor
+        */
+       if (skc->skc_dtor)
+               skc->skc_dtor(obj, skc->skc_private);
+
        /*
         * Free the object from the Linux underlying Linux slab.
         */
        if (skc->skc_flags & KMC_SLAB) {
-               if (skc->skc_dtor)
-                       skc->skc_dtor(obj, skc->skc_private);
-
                kmem_cache_free(skc->skc_linux_cache, obj);
                goto out;
        }