]> git.proxmox.com Git - mirror_spl-debian.git/blobdiff - module/spl/spl-kmem-cache.c
Imported Upstream version 0.6.5.3
[mirror_spl-debian.git] / module / spl / spl-kmem-cache.c
index cd3e543ba08ebed0e3d75c13f38d9d558b9fe8f3..5a8493fe4c3c73515d0af77a2e6a06d48523a66c 100644 (file)
@@ -805,15 +805,18 @@ spl_magazine_create(spl_kmem_cache_t *skc)
        if (skc->skc_flags & KMC_NOMAGAZINE)
                return (0);
 
+       skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
+           num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
        skc->skc_mag_size = spl_magazine_size(skc);
        skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
 
-       for_each_online_cpu(i) {
+       for_each_possible_cpu(i) {
                skc->skc_mag[i] = spl_magazine_alloc(skc, i);
                if (!skc->skc_mag[i]) {
                        for (i--; i >= 0; i--)
                                spl_magazine_free(skc->skc_mag[i]);
 
+                       kfree(skc->skc_mag);
                        return (-ENOMEM);
                }
        }
@@ -833,11 +836,13 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
        if (skc->skc_flags & KMC_NOMAGAZINE)
                return;
 
-       for_each_online_cpu(i) {
+       for_each_possible_cpu(i) {
                skm = skc->skc_mag[i];
                spl_cache_flush(skc, skm, skm->skm_avail);
                spl_magazine_free(skm);
        }
+
+       kfree(skc->skc_mag);
 }
 
 /*
@@ -880,12 +885,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
 
        might_sleep();
 
-       /*
-        * Allocate memory for a new cache and initialize it.  Unfortunately,
-        * this usually ends up being a large allocation of ~32k because
-        * we need to allocate enough memory for the worst case number of
-        * cpus in the magazine, skc_mag[NR_CPUS].
-        */
        skc = kzalloc(sizeof (*skc), lflags);
        if (skc == NULL)
                return (NULL);
@@ -986,13 +985,23 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
                if (rc)
                        goto out;
        } else {
+               unsigned long slabflags = 0;
+
                if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) {
                        rc = EINVAL;
                        goto out;
                }
 
+#if defined(SLAB_USERCOPY)
+               /*
+                * Required for PAX-enabled kernels if the slab is to be
+                * used for coping between user and kernel space.
+                */
+               slabflags |= SLAB_USERCOPY;
+#endif
+
                skc->skc_linux_cache = kmem_cache_create(
-                   skc->skc_name, size, align, 0, NULL);
+                   skc->skc_name, size, align, slabflags, NULL);
                if (skc->skc_linux_cache == NULL) {
                        rc = ENOMEM;
                        goto out;
@@ -1403,8 +1412,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
 
-       atomic_inc(&skc->skc_ref);
-
        /*
         * Allocate directly from a Linux slab.  All optimizations are left
         * to the underlying cache we only need to guarantee that KM_SLEEP
@@ -1457,8 +1464,6 @@ ret:
                        prefetchw(obj);
        }
 
-       atomic_dec(&skc->skc_ref);
-
        return (obj);
 }
 EXPORT_SYMBOL(spl_kmem_cache_alloc);
@@ -1479,7 +1484,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
-       atomic_inc(&skc->skc_ref);
 
        /*
         * Run the destructor
@@ -1492,7 +1496,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
         */
        if (skc->skc_flags & KMC_SLAB) {
                kmem_cache_free(skc->skc_linux_cache, obj);
-               goto out;
+               return;
        }
 
        /*
@@ -1507,7 +1511,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
                spin_unlock(&skc->skc_lock);
 
                if (do_emergency && (spl_emergency_free(skc, obj) == 0))
-                       goto out;
+                       return;
        }
 
        local_irq_save(flags);
@@ -1538,8 +1542,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
 
        if (do_reclaim)
                spl_slab_reclaim(skc);
-out:
-       atomic_dec(&skc->skc_ref);
 }
 EXPORT_SYMBOL(spl_kmem_cache_free);
 
@@ -1725,7 +1727,9 @@ spl_kmem_cache_init(void)
        init_rwsem(&spl_kmem_cache_sem);
        INIT_LIST_HEAD(&spl_kmem_cache_list);
        spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
-           spl_kmem_cache_kmem_threads, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
+           spl_kmem_cache_kmem_threads, maxclsyspri,
+           spl_kmem_cache_kmem_threads * 8, INT_MAX,
+           TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
        spl_register_shrinker(&spl_kmem_cache_shrinker);
 
        return (0);