if (skc->skc_flags & KMC_NOMAGAZINE)
return (0);
+ skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
+ num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
- for_each_online_cpu(i) {
+ for_each_possible_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
+ kfree(skc->skc_mag);
return (-ENOMEM);
}
}
if (skc->skc_flags & KMC_NOMAGAZINE)
return;
- for_each_online_cpu(i) {
+ for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
+
+ kfree(skc->skc_mag);
}
/*
might_sleep();
- /*
- * Allocate memory for a new cache and initialize it. Unfortunately,
- * this usually ends up being a large allocation of ~32k because
- * we need to allocate enough memory for the worst case number of
- * cpus in the magazine, skc_mag[NR_CPUS].
- */
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
if (rc)
goto out;
} else {
+ unsigned long slabflags = 0;
+
if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) {
rc = EINVAL;
goto out;
}
+#if defined(SLAB_USERCOPY)
+ /*
+ * Required for PAX-enabled kernels if the slab is to be
+ * used for coping between user and kernel space.
+ */
+ slabflags |= SLAB_USERCOPY;
+#endif
+
skc->skc_linux_cache = kmem_cache_create(
- skc->skc_name, size, align, 0, NULL);
+ skc->skc_name, size, align, slabflags, NULL);
if (skc->skc_linux_cache == NULL) {
rc = ENOMEM;
goto out;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
-
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
prefetchw(obj);
}
- atomic_dec(&skc->skc_ref);
-
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
/*
* Run the destructor
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
- goto out;
+ return;
}
/*
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
- goto out;
+ return;
}
local_irq_save(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
-out:
- atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
- spl_kmem_cache_kmem_threads, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
+ spl_kmem_cache_kmem_threads, maxclsyspri,
+ spl_kmem_cache_kmem_threads * 8, INT_MAX,
+ TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
spl_register_shrinker(&spl_kmem_cache_shrinker);
return (0);