while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
- if (address < (unsigned long)ske->ske_obj)
+ if (address < ske->ske_obj)
node = node->rb_left;
- else if (address > (unsigned long)ske->ske_obj)
+ else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
- unsigned long address = (unsigned long)ske->ske_obj;
+ unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
- if (address < (unsigned long)ske_tmp->ske_obj)
+ if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
- else if (address > (unsigned long)ske_tmp->ske_obj)
+ else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
+ int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
if (ske == NULL)
return (-ENOMEM);
- ske->ske_obj = kmalloc(skc->skc_obj_size, lflags);
- if (ske->ske_obj == NULL) {
+ ske->ske_obj = __get_free_pages(lflags, order);
+ if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
- kfree(ske->ske_obj);
+ free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
- *obj = ske->ske_obj;
+ *obj = (void *)ske->ske_obj;
return (0);
}
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
+ int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske == NULL)
return (-ENOENT);
- kfree(ske->ske_obj);
+ free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
-
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
prefetchw(obj);
}
- atomic_dec(&skc->skc_ref);
-
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
/*
* Run the destructor
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
- goto out;
+ return;
}
/*
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
- goto out;
+ return;
}
local_irq_save(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
-out:
- atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
spl_kmem_cache_t *skc;
int alloc = 0;
+ /*
+ * No shrinking in a transaction context. Can cause deadlocks.
+ */
+ if (sc->nr_to_scan && spl_fstrans_check())
+ return (SHRINK_STOP);
+
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan) {
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
- spl_kmem_cache_kmem_threads, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
+ spl_kmem_cache_kmem_threads, maxclsyspri,
+ spl_kmem_cache_kmem_threads * 8, INT_MAX,
+ TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
spl_register_shrinker(&spl_kmem_cache_shrinker);
return (0);