* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://github.com/behlendorf/spl/>.
+ * For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#define SS_DEBUG_SUBSYS SS_KMEM
+/*
+ * Cache expiration was implemented because it was part of the default Solaris
+ * kmem_cache behavior. The idea is that per-cpu objects which haven't been
+ * accessed in several seconds should be returned to the cache. On the other
+ * hand Linux slabs never move objects back to the slabs unless there is
+ * memory pressure on the system. By default both methods are disabled, but
+ * may be enabled by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
+ */
+unsigned int spl_kmem_cache_expire = 0;
+EXPORT_SYMBOL(spl_kmem_cache_expire);
+module_param(spl_kmem_cache_expire, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
+
/*
* The minimum amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_min
#endif /* NEED_GET_ZONE_COUNTS */
EXPORT_SYMBOL(spl_global_page_state);
-#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
-invalidate_inodes_t invalidate_inodes_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(invalidate_inodes_fn);
-#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
-
#ifndef HAVE_SHRINK_DCACHE_MEMORY
shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
EXPORT_SYMBOL(shrink_dcache_memory_fn);
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
+taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
-static int spl_cache_flush(spl_kmem_cache_t *skc,
- spl_kmem_magazine_t *skm, int flush);
+static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
SEXIT;
}
+static spl_kmem_emergency_t *
+spl_emergency_search(struct rb_root *root, void *obj)
+{
+ struct rb_node *node = root->rb_node;
+ spl_kmem_emergency_t *ske;
+ unsigned long address = (unsigned long)obj;
+
+ while (node) {
+ ske = container_of(node, spl_kmem_emergency_t, ske_node);
+
+ if (address < (unsigned long)ske->ske_obj)
+ node = node->rb_left;
+ else if (address > (unsigned long)ske->ske_obj)
+ node = node->rb_right;
+ else
+ return ske;
+ }
+
+ return NULL;
+}
+
+static int
+spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ spl_kmem_emergency_t *ske_tmp;
+ unsigned long address = (unsigned long)ske->ske_obj;
+
+ while (*new) {
+ ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
+
+ parent = *new;
+ if (address < (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_left);
+ else if (address > (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_right);
+ else
+ return 0;
+ }
+
+ rb_link_node(&ske->ske_node, parent, new);
+ rb_insert_color(&ske->ske_node, root);
+
+ return 1;
+}
+
/*
- * Allocate a single emergency object for use by the caller.
+ * Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
SRETURN(-ENOMEM);
}
- if (skc->skc_ctor)
- skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
-
spin_lock(&skc->skc_lock);
- skc->skc_obj_total++;
- skc->skc_obj_emergency++;
- if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
- skc->skc_obj_emergency_max = skc->skc_obj_emergency;
-
- list_add(&ske->ske_list, &skc->skc_emergency_list);
+ empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
+ if (likely(empty)) {
+ skc->skc_obj_total++;
+ skc->skc_obj_emergency++;
+ if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
+ skc->skc_obj_emergency_max = skc->skc_obj_emergency;
+ }
spin_unlock(&skc->skc_lock);
+ if (unlikely(!empty)) {
+ kfree(ske->ske_obj);
+ kfree(ske);
+ SRETURN(-EINVAL);
+ }
+
+ if (skc->skc_ctor)
+ skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
+
*obj = ske->ske_obj;
SRETURN(0);
}
/*
- * Free the passed object if it is an emergency object or a normal slab
- * object. Currently this is done by walking what should be a short list of
- * emergency objects. If this proves to be too inefficient we can replace
- * the simple list with a hash.
+ * Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
- spl_kmem_emergency_t *m, *n, *ske = NULL;
+ spl_kmem_emergency_t *ske;
SENTRY;
spin_lock(&skc->skc_lock);
- list_for_each_entry_safe(m, n, &skc->skc_emergency_list, ske_list) {
- if (m->ske_obj == obj) {
- list_del(&m->ske_list);
- skc->skc_obj_emergency--;
- skc->skc_obj_total--;
- ske = m;
- break;
- }
+ ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
+ if (likely(ske)) {
+ rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
+ skc->skc_obj_emergency--;
+ skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
- if (ske == NULL)
+ if (unlikely(ske == NULL))
SRETURN(-ENOENT);
if (skc->skc_dtor)
}
/*
- * Called regularly on all caches to age objects out of the magazines
- * which have not been access in skc->skc_delay seconds. This prevents
- * idle magazines from holding memory which might be better used by
- * other caches or parts of the system. The delay is present to
- * prevent thrashing the magazine.
+ * Release objects from the per-cpu magazine back to their slab. The flush
+ * argument contains the max number of entries to remove from the magazine.
*/
+static void
+__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ int i, count = MIN(flush, skm->skm_avail);
+ SENTRY;
+
+ ASSERT(skc->skc_magic == SKC_MAGIC);
+ ASSERT(skm->skm_magic == SKM_MAGIC);
+ ASSERT(spin_is_locked(&skc->skc_lock));
+
+ for (i = 0; i < count; i++)
+ spl_cache_shrink(skc, skm->skm_objs[i]);
+
+ skm->skm_avail -= count;
+ memmove(skm->skm_objs, &(skm->skm_objs[count]),
+ sizeof(void *) * skm->skm_avail);
+
+ SEXIT;
+}
+
+static void
+spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ spin_lock(&skc->skc_lock);
+ __spl_cache_flush(skc, skm, flush);
+ spin_unlock(&skc->skc_lock);
+}
+
static void
spl_magazine_age(void *data)
{
- spl_kmem_magazine_t *skm =
- spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work);
- spl_kmem_cache_t *skc = skm->skm_cache;
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skc->skc_mag[skm->skm_cpu] == skm);
+ ASSERT(skm->skm_cpu == smp_processor_id());
+ ASSERT(irqs_disabled());
+
+ /* There are no available objects or they are too young to age out */
+ if ((skm->skm_avail == 0) ||
+ time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
+ return;
- if (skm->skm_avail > 0 &&
- time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ /*
+ * Because we're executing in interrupt context we may have
+ * interrupted the holder of this lock. To avoid a potential
+ * deadlock return if the lock is contended.
+ */
+ if (!spin_trylock(&skc->skc_lock))
+ return;
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work_on(skm->skm_cpu, &skm->skm_work,
- skc->skc_delay / 3 * HZ);
+ __spl_cache_flush(skc, skm, skm->skm_refill);
+ spin_unlock(&skc->skc_lock);
}
/*
- * Called regularly to keep a downward pressure on the size of idle
- * magazines and to release free slabs from the cache. This function
- * never calls the registered reclaim function, that only occurs
- * under memory pressure or with a direct call to spl_kmem_reap().
+ * Called regularly to keep a downward pressure on the cache.
+ *
+ * Objects older than skc->skc_delay seconds in the per-cpu magazines will
+ * be returned to the caches. This is done to prevent idle magazines from
+ * holding memory which could be better used elsewhere. The delay is
+ * present to prevent thrashing the magazine.
+ *
+ * The newly released objects may result in empty partial slabs. Those
+ * slabs should be released to the system. Otherwise moving the objects
+ * out of the magazines is just wasted work.
*/
static void
spl_cache_age(void *data)
{
- spl_kmem_cache_t *skc =
- spl_get_work_data(data, spl_kmem_cache_t, skc_work.work);
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ taskqid_t id = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
+
+ /* Dynamically disabled at run time */
+ if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
+ return;
+
+ atomic_inc(&skc->skc_ref);
+ spl_on_each_cpu(spl_magazine_age, skc, 1);
spl_slab_reclaim(skc, skc->skc_reap, 0);
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
+ id = taskq_dispatch_delay(
+ spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
+
+ /* Destroy issued after dispatch immediately cancel it */
+ if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
+ }
+
+ spin_lock(&skc->skc_lock);
+ skc->skc_taskqid = id;
+ spin_unlock(&skc->skc_lock);
+
+ atomic_dec(&skc->skc_ref);
}
/*
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
- spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm);
skm->skm_age = jiffies;
skm->skm_cpu = cpu;
}
}
}
- /* Only after everything is allocated schedule magazine work */
- for_each_online_cpu(i)
- schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
- skc->skc_delay / 3 * HZ);
-
SRETURN(0);
}
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
- (void)spl_cache_flush(skc, skm, skm->skm_avail);
+ spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
void *priv, void *vmp, int flags)
{
spl_kmem_cache_t *skc;
- int rc, kmem_flags = KM_SLEEP;
+ int rc;
SENTRY;
ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
ASSERT(vmp == NULL);
- /* We may be called when there is a non-zero preempt_count or
- * interrupts are disabled is which case we must not sleep.
- */
- if (current_thread_info()->preempt_count || irqs_disabled())
- kmem_flags = KM_NOSLEEP;
+ might_sleep();
- /* Allocate memory for a new cache an initialize it. Unfortunately,
+ /*
+ * Allocate memory for a new cache an initialize it. Unfortunately,
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
- * explicitly pass KM_NODEBUG to suppress the kmem warning */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
- kmem_flags | KM_NODEBUG);
+ * explicitly pass KM_NODEBUG to suppress the kmem warning
+ */
+ skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
if (skc == NULL)
SRETURN(NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
- skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
+ skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
SRETURN(NULL);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
- INIT_LIST_HEAD(&skc->skc_emergency_list);
+ skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
if (rc)
SGOTO(out, rc);
- spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
+ skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
+ spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
- int i;
+ taskqid_t id;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
- /* Cancel any and wait for any pending delayed work */
+ /* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- cancel_delayed_work_sync(&skc->skc_work);
- for_each_online_cpu(i)
- cancel_delayed_work_sync(&skc->skc_mag[i]->skm_work);
- flush_scheduled_work();
+ spin_lock(&skc->skc_lock);
+ id = skc->skc_taskqid;
+ spin_unlock(&skc->skc_lock);
+
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
/* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
- ASSERT(list_empty(&skc->skc_emergency_list));
kmem_free(skc->skc_name, skc->skc_name_size);
spin_unlock(&skc->skc_lock);
static void
spl_cache_grow_work(void *data)
{
- spl_kmem_alloc_t *ska =
- spl_get_work_data(data, spl_kmem_alloc_t, ska_work.work);
+ spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
spl_kmem_slab_t *sks;
return !test_bit(KMC_BIT_GROWING, &skc->skc_flags);
}
+static int
+spl_cache_reclaim_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
/*
* No available objects on any slabs, create a new slab.
*/
*obj = NULL;
/*
- * Before allocating a new slab check if the slab is being reaped.
- * If it is there is a good chance we can wait until it finishes
- * and then use one of the newly freed but not aged-out slabs.
+ * Before allocating a new slab wait for any reaping to complete and
+ * then return so the local magazine can be rechecked for new objects.
*/
- if (test_bit(KMC_BIT_REAPING, &skc->skc_flags))
- SRETURN(-EAGAIN);
+ if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
+ rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
+ spl_cache_reclaim_wait, TASK_UNINTERRUPTIBLE);
+ SRETURN(rc ? rc : -EAGAIN);
+ }
/*
* This is handled by dispatching a work request to the global work
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
- ska->ska_flags = flags;
- spl_init_delayed_work(&ska->ska_work, spl_cache_grow_work, ska);
- schedule_delayed_work(&ska->ska_work, 0);
+ ska->ska_flags = flags & ~__GFP_FS;
+ taskq_init_ent(&ska->ska_tqe);
+ taskq_dispatch_ent(spl_kmem_cache_taskq,
+ spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
SEXIT;
}
-/*
- * Release a batch of objects from a per-cpu magazine back to their
- * respective slabs. This occurs when we exceed the magazine size,
- * are under memory pressure, when the cache is idle, or during
- * cache cleanup. The flush argument contains the number of entries
- * to remove from the magazine.
- */
-static int
-spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
-{
- int i, count = MIN(flush, skm->skm_avail);
- SENTRY;
-
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skm->skm_magic == SKM_MAGIC);
-
- /*
- * XXX: Currently we simply return objects from the magazine to
- * the slabs in fifo order. The ideal thing to do from a memory
- * fragmentation standpoint is to cheaply determine the set of
- * objects in the magazine which will result in the largest
- * number of free slabs if released from the magazine.
- */
- spin_lock(&skc->skc_lock);
- for (i = 0; i < count; i++)
- spl_cache_shrink(skc, skm->skm_objs[i]);
-
- skm->skm_avail -= count;
- memmove(skm->skm_objs, &(skm->skm_objs[count]),
- sizeof(void *) * skm->skm_avail);
-
- spin_unlock(&skc->skc_lock);
-
- SRETURN(count);
-}
-
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
atomic_inc(&skc->skc_ref);
/*
- * Emergency objects are never part of the virtual address space
- * so if we get a virtual address we can optimize this check out.
+ * Only virtual slabs may have emergency objects and these objects
+ * are guaranteed to have physical addresses. They must be removed
+ * from the tree of emergency objects and the freed.
*/
- if (!kmem_virt(obj) && !spl_emergency_free(skc, obj))
- SGOTO(out, 0);
+ if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
+ SGOTO(out, spl_emergency_free(skc, obj));
local_irq_save(flags);
/* Per-CPU cache full, flush it to make space */
if (unlikely(skm->skm_avail >= skm->skm_size))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ spl_cache_flush(skc, skm, skm->skm_refill);
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
} while (do_reclaim);
}
- /* Reclaim from the cache, ignoring it's age and delay. */
+ /* Reclaim from the magazine then the slabs ignoring age and delay. */
+ if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
+ spl_kmem_magazine_t *skm;
+ int i;
+
+ for_each_online_cpu(i) {
+ skm = skc->skc_mag[i];
+ spl_cache_flush(skc, skm, skm->skm_avail);
+ }
+ }
+
spl_slab_reclaim(skc, count, 1);
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
+
atomic_dec(&skc->skc_ref);
SEXIT;
*/
spl_kmem_init_globals();
-#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
- invalidate_inodes_fn = (invalidate_inodes_t)
- spl_kallsyms_lookup_name("invalidate_inodes");
- if (!invalidate_inodes_fn) {
- printk(KERN_ERR "Error: Unknown symbol invalidate_inodes\n");
- return -EFAULT;
- }
-#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
-
#ifndef HAVE_SHRINK_DCACHE_MEMORY
/* When shrink_dcache_memory_fn == NULL support is disabled */
shrink_dcache_memory_fn = (shrink_dcache_memory_t)
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
+ spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
+ 1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
spl_register_shrinker(&spl_kmem_cache_shrinker);
SENTRY;
spl_unregister_shrinker(&spl_kmem_cache_shrinker);
+ taskq_destroy(spl_kmem_cache_taskq);
SEXIT;
}