]> git.proxmox.com Git - mirror_spl.git/blobdiff - module/spl/spl-kmem-cache.c
Invert minclsyspri and maxclsyspri
[mirror_spl.git] / module / spl / spl-kmem-cache.c
index 38fee703db218df5b22106d429b21660eb3ea07d..a83c9f3ae8c146ac19d9289f1dec5919266f484a 100644 (file)
@@ -459,9 +459,9 @@ spl_emergency_search(struct rb_root *root, void *obj)
        while (node) {
                ske = container_of(node, spl_kmem_emergency_t, ske_node);
 
-               if (address < (unsigned long)ske->ske_obj)
+               if (address < ske->ske_obj)
                        node = node->rb_left;
-               else if (address > (unsigned long)ske->ske_obj)
+               else if (address > ske->ske_obj)
                        node = node->rb_right;
                else
                        return (ske);
@@ -475,15 +475,15 @@ spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
 {
        struct rb_node **new = &(root->rb_node), *parent = NULL;
        spl_kmem_emergency_t *ske_tmp;
-       unsigned long address = (unsigned long)ske->ske_obj;
+       unsigned long address = ske->ske_obj;
 
        while (*new) {
                ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
 
                parent = *new;
-               if (address < (unsigned long)ske_tmp->ske_obj)
+               if (address < ske_tmp->ske_obj)
                        new = &((*new)->rb_left);
-               else if (address > (unsigned long)ske_tmp->ske_obj)
+               else if (address > ske_tmp->ske_obj)
                        new = &((*new)->rb_right);
                else
                        return (0);
@@ -503,6 +503,7 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
 {
        gfp_t lflags = kmem_flags_convert(flags);
        spl_kmem_emergency_t *ske;
+       int order = get_order(skc->skc_obj_size);
        int empty;
 
        /* Last chance use a partial slab if one now exists */
@@ -516,8 +517,8 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
        if (ske == NULL)
                return (-ENOMEM);
 
-       ske->ske_obj = kmalloc(skc->skc_obj_size, lflags);
-       if (ske->ske_obj == NULL) {
+       ske->ske_obj = __get_free_pages(lflags, order);
+       if (ske->ske_obj == 0) {
                kfree(ske);
                return (-ENOMEM);
        }
@@ -533,12 +534,12 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
        spin_unlock(&skc->skc_lock);
 
        if (unlikely(!empty)) {
-               kfree(ske->ske_obj);
+               free_pages(ske->ske_obj, order);
                kfree(ske);
                return (-EINVAL);
        }
 
-       *obj = ske->ske_obj;
+       *obj = (void *)ske->ske_obj;
 
        return (0);
 }
@@ -550,6 +551,7 @@ static int
 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
 {
        spl_kmem_emergency_t *ske;
+       int order = get_order(skc->skc_obj_size);
 
        spin_lock(&skc->skc_lock);
        ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
@@ -563,7 +565,7 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
        if (ske == NULL)
                return (-ENOENT);
 
-       kfree(ske->ske_obj);
+       free_pages(ske->ske_obj, order);
        kfree(ske);
 
        return (0);
@@ -1401,8 +1403,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
 
-       atomic_inc(&skc->skc_ref);
-
        /*
         * Allocate directly from a Linux slab.  All optimizations are left
         * to the underlying cache we only need to guarantee that KM_SLEEP
@@ -1455,8 +1455,6 @@ ret:
                        prefetchw(obj);
        }
 
-       atomic_dec(&skc->skc_ref);
-
        return (obj);
 }
 EXPORT_SYMBOL(spl_kmem_cache_alloc);
@@ -1477,7 +1475,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
-       atomic_inc(&skc->skc_ref);
 
        /*
         * Run the destructor
@@ -1490,7 +1487,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
         */
        if (skc->skc_flags & KMC_SLAB) {
                kmem_cache_free(skc->skc_linux_cache, obj);
-               goto out;
+               return;
        }
 
        /*
@@ -1505,7 +1502,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
                spin_unlock(&skc->skc_lock);
 
                if (do_emergency && (spl_emergency_free(skc, obj) == 0))
-                       goto out;
+                       return;
        }
 
        local_irq_save(flags);
@@ -1536,8 +1533,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 
        if (do_reclaim)
                spl_slab_reclaim(skc);
-out:
-       atomic_dec(&skc->skc_ref);
 }
 EXPORT_SYMBOL(spl_kmem_cache_free);
 
@@ -1572,6 +1567,12 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
        spl_kmem_cache_t *skc;
        int alloc = 0;
 
+       /*
+        * No shrinking in a transaction context.  Can cause deadlocks.
+        */
+       if (sc->nr_to_scan && spl_fstrans_check())
+               return (SHRINK_STOP);
+
        down_read(&spl_kmem_cache_sem);
        list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
                if (sc->nr_to_scan) {
@@ -1717,7 +1718,9 @@ spl_kmem_cache_init(void)
        init_rwsem(&spl_kmem_cache_sem);
        INIT_LIST_HEAD(&spl_kmem_cache_list);
        spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
-           spl_kmem_cache_kmem_threads, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
+           spl_kmem_cache_kmem_threads, maxclsyspri,
+           spl_kmem_cache_kmem_threads * 8, INT_MAX,
+           TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
        spl_register_shrinker(&spl_kmem_cache_shrinker);
 
        return (0);