* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://github.com/behlendorf/spl/>.
+ * For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#define SS_DEBUG_SUBSYS SS_KMEM
+/*
+ * Cache expiration was implemented because it was part of the default Solaris
+ * kmem_cache behavior. The idea is that per-cpu objects which haven't been
+ * accessed in several seconds should be returned to the cache. On the other
+ * hand Linux slabs never move objects back to the slabs unless there is
+ * memory pressure on the system. By default both methods are disabled, but
+ * may be enabled by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
+ */
+unsigned int spl_kmem_cache_expire = 0;
+EXPORT_SYMBOL(spl_kmem_cache_expire);
+module_param(spl_kmem_cache_expire, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
+
/*
* The minimum amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_min
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
*/
pgcnt_t minfree = 0;
EXPORT_SYMBOL(minfree);
/*
* The desired amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_low
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
* Assuming all zones are being used roughly equally, when we drop
- * below this threshold async page reclamation is triggered.
+ * below this threshold asynchronous page reclamation is triggered.
*/
pgcnt_t desfree = 0;
EXPORT_SYMBOL(desfree);
/*
* When above this amount of memory measures in pages the system is
* determined to have enough free memory. This is similar to Linux's
- * zone->pages_high multipled by the number of zones and is sized based
+ * zone->pages_high multiplied by the number of zones and is sized based
* on that. Assuming all zones are being used roughly equally, when
- * async page reclamation reaches this threshold it stops.
+ * asynchronous page reclamation reaches this threshold it stops.
*/
pgcnt_t lotsfree = 0;
EXPORT_SYMBOL(lotsfree);
#endif /* NEED_GET_ZONE_COUNTS */
EXPORT_SYMBOL(spl_global_page_state);
-#ifndef HAVE_INVALIDATE_INODES
-invalidate_inodes_t invalidate_inodes_fn = SYMBOL_POISON;
-EXPORT_SYMBOL(invalidate_inodes_fn);
-#endif /* HAVE_INVALIDATE_INODES */
-
#ifndef HAVE_SHRINK_DCACHE_MEMORY
shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
EXPORT_SYMBOL(shrink_dcache_memory_fn);
EXPORT_SYMBOL(vmem_list);
static kmem_debug_t *
-kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, void *addr)
+kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
EXPORT_SYMBOL(kmem_alloc_track);
void
-kmem_free_track(void *ptr, size_t size)
+kmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
SENTRY;
EXPORT_SYMBOL(vmem_alloc_track);
void
-vmem_free_track(void *ptr, size_t size)
+vmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
SENTRY;
"large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ dump_stack();
}
/* Use the correct allocator */
EXPORT_SYMBOL(kmem_alloc_debug);
void
-kmem_free_debug(void *ptr, size_t size)
+kmem_free_debug(const void *ptr, size_t size)
{
SENTRY;
EXPORT_SYMBOL(vmem_alloc_debug);
void
-vmem_free_debug(void *ptr, size_t size)
+vmem_free_debug(const void *ptr, size_t size)
{
SENTRY;
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
- * implemenation I cannot use it to emulate the Solaris APIs. I
+ * implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
- * virtual address space removes the need for contigeous pages
+ * virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
*
* XXX: Improve the partial slab list by carefully maintaining a
* strict ordering of fullest to emptiest slabs based on
- * the slab reference count. This gaurentees the when freeing
+ * the slab reference count. This guarantees the when freeing
* slabs back to the system we need only linearly traverse the
* last N slabs in the list to discover all the freeable slabs.
*
* XXX: NUMA awareness for optionally allocating memory close to a
- * particular core. This can be adventageous if you know the slab
+ * particular core. This can be advantageous if you know the slab
* object will be short lived and primarily accessed from one core.
*
* XXX: Slab coloring may also yield performance improvements and would
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
+taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
-static int spl_cache_flush(spl_kmem_cache_t *skc,
- spl_kmem_magazine_t *skm, int flush);
+static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
ASSERT(ISP2(size));
- if (skc->skc_flags & KMC_KMEM) {
+ if (skc->skc_flags & KMC_KMEM)
ptr = (void *)__get_free_pages(flags, get_order(size));
- } else {
- /*
- * As part of vmalloc() an __pte_alloc_kernel() allocation
- * may occur. This internal allocation does not honor the
- * gfp flags passed to vmalloc(). This means even when
- * vmalloc(GFP_NOFS) is called it is possible synchronous
- * reclaim will occur. This reclaim can trigger file IO
- * which can result in a deadlock. This issue can be avoided
- * by explicitly setting PF_MEMALLOC on the process to
- * subvert synchronous reclaim. The following bug has
- * been filed at kernel.org to track the issue.
- *
- * https://bugzilla.kernel.org/show_bug.cgi?id=30702
- */
- if (!(flags & __GFP_FS))
- current->flags |= PF_MEMALLOC;
-
+ else
ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
- if (!(flags & __GFP_FS))
- current->flags &= ~PF_MEMALLOC;
- }
-
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
ASSERT(ISP2(size));
+ /*
+ * The Linux direct reclaim path uses this out of band value to
+ * determine if forward progress is being made. Normally this is
+ * incremented by kmem_freepages() which is part of the various
+ * Linux slab implementations. However, since we are using none
+ * of that infrastructure we are responsible for incrementing it.
+ */
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
+
if (skc->skc_flags & KMC_KMEM)
free_pages((unsigned long)ptr, get_order(size));
else
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
- * it gets increasingly expensive since it requires contigeous pages.
+ * it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
- * which removes the need for contigeous pages. We do not use
+ * which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
- * global lock when aquiring an available virtual address range which
+ * global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
* All empty slabs are at the end of skc->skc_partial_list,
* therefore once a non-empty slab is found we can stop
* scanning. Additionally, stop when reaching the target
- * reclaim 'count' if a non-zero threshhold is given.
+ * reclaim 'count' if a non-zero threshold is given.
*/
- if ((sks->sks_ref > 0) || (count && i > count))
+ if ((sks->sks_ref > 0) || (count && i >= count))
break;
if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
SEXIT;
}
+static spl_kmem_emergency_t *
+spl_emergency_search(struct rb_root *root, void *obj)
+{
+ struct rb_node *node = root->rb_node;
+ spl_kmem_emergency_t *ske;
+ unsigned long address = (unsigned long)obj;
+
+ while (node) {
+ ske = container_of(node, spl_kmem_emergency_t, ske_node);
+
+ if (address < (unsigned long)ske->ske_obj)
+ node = node->rb_left;
+ else if (address > (unsigned long)ske->ske_obj)
+ node = node->rb_right;
+ else
+ return ske;
+ }
+
+ return NULL;
+}
+
+static int
+spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ spl_kmem_emergency_t *ske_tmp;
+ unsigned long address = (unsigned long)ske->ske_obj;
+
+ while (*new) {
+ ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
+
+ parent = *new;
+ if (address < (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_left);
+ else if (address > (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_right);
+ else
+ return 0;
+ }
+
+ rb_link_node(&ske->ske_node, parent, new);
+ rb_insert_color(&ske->ske_node, root);
+
+ return 1;
+}
+
+/*
+ * Allocate a single emergency object and track it in a red black tree.
+ */
+static int
+spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
+{
+ spl_kmem_emergency_t *ske;
+ int empty;
+ SENTRY;
+
+ /* Last chance use a partial slab if one now exists */
+ spin_lock(&skc->skc_lock);
+ empty = list_empty(&skc->skc_partial_list);
+ spin_unlock(&skc->skc_lock);
+ if (!empty)
+ SRETURN(-EEXIST);
+
+ ske = kmalloc(sizeof(*ske), flags);
+ if (ske == NULL)
+ SRETURN(-ENOMEM);
+
+ ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
+ if (ske->ske_obj == NULL) {
+ kfree(ske);
+ SRETURN(-ENOMEM);
+ }
+
+ spin_lock(&skc->skc_lock);
+ empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
+ if (likely(empty)) {
+ skc->skc_obj_total++;
+ skc->skc_obj_emergency++;
+ if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
+ skc->skc_obj_emergency_max = skc->skc_obj_emergency;
+ }
+ spin_unlock(&skc->skc_lock);
+
+ if (unlikely(!empty)) {
+ kfree(ske->ske_obj);
+ kfree(ske);
+ SRETURN(-EINVAL);
+ }
+
+ if (skc->skc_ctor)
+ skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
+
+ *obj = ske->ske_obj;
+
+ SRETURN(0);
+}
+
+/*
+ * Locate the passed object in the red black tree and free it.
+ */
+static int
+spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
+{
+ spl_kmem_emergency_t *ske;
+ SENTRY;
+
+ spin_lock(&skc->skc_lock);
+ ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
+ if (likely(ske)) {
+ rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
+ skc->skc_obj_emergency--;
+ skc->skc_obj_total--;
+ }
+ spin_unlock(&skc->skc_lock);
+
+ if (unlikely(ske == NULL))
+ SRETURN(-ENOENT);
+
+ if (skc->skc_dtor)
+ skc->skc_dtor(ske->ske_obj, skc->skc_private);
+
+ kfree(ske->ske_obj);
+ kfree(ske);
+
+ SRETURN(0);
+}
+
/*
- * Called regularly on all caches to age objects out of the magazines
- * which have not been access in skc->skc_delay seconds. This prevents
- * idle magazines from holding memory which might be better used by
- * other caches or parts of the system. The delay is present to
- * prevent thrashing the magazine.
+ * Release objects from the per-cpu magazine back to their slab. The flush
+ * argument contains the max number of entries to remove from the magazine.
*/
+static void
+__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ int i, count = MIN(flush, skm->skm_avail);
+ SENTRY;
+
+ ASSERT(skc->skc_magic == SKC_MAGIC);
+ ASSERT(skm->skm_magic == SKM_MAGIC);
+ ASSERT(spin_is_locked(&skc->skc_lock));
+
+ for (i = 0; i < count; i++)
+ spl_cache_shrink(skc, skm->skm_objs[i]);
+
+ skm->skm_avail -= count;
+ memmove(skm->skm_objs, &(skm->skm_objs[count]),
+ sizeof(void *) * skm->skm_avail);
+
+ SEXIT;
+}
+
+static void
+spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ spin_lock(&skc->skc_lock);
+ __spl_cache_flush(skc, skm, flush);
+ spin_unlock(&skc->skc_lock);
+}
+
static void
spl_magazine_age(void *data)
{
- spl_kmem_magazine_t *skm =
- spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work);
- spl_kmem_cache_t *skc = skm->skm_cache;
- int i = smp_processor_id();
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skc->skc_mag[i] == skm);
+ ASSERT(skm->skm_cpu == smp_processor_id());
+ ASSERT(irqs_disabled());
+
+ /* There are no available objects or they are too young to age out */
+ if ((skm->skm_avail == 0) ||
+ time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
+ return;
- if (skm->skm_avail > 0 &&
- time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ /*
+ * Because we're executing in interrupt context we may have
+ * interrupted the holder of this lock. To avoid a potential
+ * deadlock return if the lock is contended.
+ */
+ if (!spin_trylock(&skc->skc_lock))
+ return;
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work_on(i, &skm->skm_work,
- skc->skc_delay / 3 * HZ);
+ __spl_cache_flush(skc, skm, skm->skm_refill);
+ spin_unlock(&skc->skc_lock);
}
/*
- * Called regularly to keep a downward pressure on the size of idle
- * magazines and to release free slabs from the cache. This function
- * never calls the registered reclaim function, that only occures
- * under memory pressure or with a direct call to spl_kmem_reap().
+ * Called regularly to keep a downward pressure on the cache.
+ *
+ * Objects older than skc->skc_delay seconds in the per-cpu magazines will
+ * be returned to the caches. This is done to prevent idle magazines from
+ * holding memory which could be better used elsewhere. The delay is
+ * present to prevent thrashing the magazine.
+ *
+ * The newly released objects may result in empty partial slabs. Those
+ * slabs should be released to the system. Otherwise moving the objects
+ * out of the magazines is just wasted work.
*/
static void
spl_cache_age(void *data)
{
- spl_kmem_cache_t *skc =
- spl_get_work_data(data, spl_kmem_cache_t, skc_work.work);
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ taskqid_t id = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
+
+ /* Dynamically disabled at run time */
+ if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
+ return;
+
+ atomic_inc(&skc->skc_ref);
+ spl_on_each_cpu(spl_magazine_age, skc, 1);
spl_slab_reclaim(skc, skc->skc_reap, 0);
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
+ id = taskq_dispatch_delay(
+ spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
+
+ /* Destroy issued after dispatch immediately cancel it */
+ if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
+ }
+
+ spin_lock(&skc->skc_lock);
+ skc->skc_taskqid = id;
+ spin_unlock(&skc->skc_lock);
+
+ atomic_dec(&skc->skc_ref);
}
/*
}
/*
- * Allocate a per-cpu magazine to assoicate with a specific core.
+ * Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
-spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
+spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
SENTRY;
- skm = kmem_alloc_node(size, KM_SLEEP, node);
+ skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
- spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm);
skm->skm_age = jiffies;
+ skm->skm_cpu = cpu;
}
SRETURN(skm);
}
/*
- * Free a per-cpu magazine assoicated with a specific core.
+ * Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_online_cpu(i) {
- skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i));
+ skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
}
}
- /* Only after everything is allocated schedule magazine work */
- for_each_online_cpu(i)
- schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
- skc->skc_delay / 3 * HZ);
-
SRETURN(0);
}
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
- (void)spl_cache_flush(skc, skm, skm->skm_avail);
+ spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
void *priv, void *vmp, int flags)
{
spl_kmem_cache_t *skc;
- int rc, kmem_flags = KM_SLEEP;
+ int rc;
SENTRY;
ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
ASSERT(vmp == NULL);
- /* We may be called when there is a non-zero preempt_count or
- * interrupts are disabled is which case we must not sleep.
- */
- if (current_thread_info()->preempt_count || irqs_disabled())
- kmem_flags = KM_NOSLEEP;
+ might_sleep();
- /* Allocate memry for a new cache an initialize it. Unfortunately,
+ /*
+ * Allocate memory for a new cache an initialize it. Unfortunately,
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
- * explicitly pass KM_NODEBUG to suppress the kmem warning */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
- kmem_flags | KM_NODEBUG);
+ * explicitly pass KM_NODEBUG to suppress the kmem warning
+ */
+ skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
if (skc == NULL)
SRETURN(NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
- skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
+ skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
SRETURN(NULL);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
+ skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
+ init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
+ skc->skc_obj_deadlock = 0;
+ skc->skc_obj_emergency = 0;
+ skc->skc_obj_emergency_max = 0;
if (align) {
VERIFY(ISP2(align));
if (rc)
SGOTO(out, rc);
- spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
+ skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
+ spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
* XXX: Unimplemented but harmless to stub out for now.
*/
void
-spl_kmem_cache_set_move(kmem_cache_t *skc,
+spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
- * Destroy a cache and all objects assoicated with the cache.
+ * Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
- int i;
+ taskqid_t id;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
- /* Cancel any and wait for any pending delayed work */
+ /* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- cancel_delayed_work_sync(&skc->skc_work);
- for_each_online_cpu(i)
- cancel_delayed_work_sync(&skc->skc_mag[i]->skm_work);
- flush_scheduled_work();
+ spin_lock(&skc->skc_lock);
+ id = skc->skc_taskqid;
+ spin_unlock(&skc->skc_lock);
+
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
/* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
+ ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
kmem_free(skc->skc_name, skc->skc_name_size);
}
/*
- * No available objects on any slabsi, create a new slab. Since this
- * is an expensive operation we do it without holding the spinlock and
- * only briefly aquire it when we link in the fully allocated and
- * constructed slab.
+ * Generic slab allocation function to run by the global work queues.
+ * It is responsible for allocating a new slab, linking it in to the list
+ * of partial slabs, and then waking any waiters.
*/
-static spl_kmem_slab_t *
-spl_cache_grow(spl_kmem_cache_t *skc, int flags)
+static void
+spl_cache_grow_work(void *data)
{
+ spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
+ spl_kmem_cache_t *skc = ska->ska_cache;
spl_kmem_slab_t *sks;
+
+ sks = spl_slab_alloc(skc, ska->ska_flags | __GFP_NORETRY | KM_NODEBUG);
+ spin_lock(&skc->skc_lock);
+ if (sks) {
+ skc->skc_slab_total++;
+ skc->skc_obj_total += sks->sks_objs;
+ list_add_tail(&sks->sks_list, &skc->skc_partial_list);
+ }
+
+ atomic_dec(&skc->skc_ref);
+ clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
+ clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
+ wake_up_all(&skc->skc_waitq);
+ spin_unlock(&skc->skc_lock);
+
+ kfree(ska);
+}
+
+/*
+ * Returns non-zero when a new slab should be available.
+ */
+static int
+spl_cache_grow_wait(spl_kmem_cache_t *skc)
+{
+ return !test_bit(KMC_BIT_GROWING, &skc->skc_flags);
+}
+
+static int
+spl_cache_reclaim_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * No available objects on any slabs, create a new slab.
+ */
+static int
+spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
+{
+ int remaining, rc;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
- local_irq_enable();
might_sleep();
+ *obj = NULL;
/*
- * Before allocating a new slab check if the slab is being reaped.
- * If it is there is a good chance we can wait until it finishes
- * and then use one of the newly freed but not aged-out slabs.
+ * Before allocating a new slab wait for any reaping to complete and
+ * then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
- schedule();
- SGOTO(out, sks= NULL);
+ rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
+ spl_cache_reclaim_wait, TASK_UNINTERRUPTIBLE);
+ SRETURN(rc ? rc : -EAGAIN);
}
- /* Allocate a new slab for the cache */
- sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG);
- if (sks == NULL)
- SGOTO(out, sks = NULL);
+ /*
+ * This is handled by dispatching a work request to the global work
+ * queue. This allows us to asynchronously allocate a new slab while
+ * retaining the ability to safely fall back to a smaller synchronous
+ * allocations to ensure forward progress is always maintained.
+ */
+ if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
+ spl_kmem_alloc_t *ska;
- /* Link the new empty slab in to the end of skc_partial_list. */
- spin_lock(&skc->skc_lock);
- skc->skc_slab_total++;
- skc->skc_obj_total += sks->sks_objs;
- list_add_tail(&sks->sks_list, &skc->skc_partial_list);
- spin_unlock(&skc->skc_lock);
-out:
- local_irq_disable();
+ ska = kmalloc(sizeof(*ska), flags);
+ if (ska == NULL) {
+ clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
+ wake_up_all(&skc->skc_waitq);
+ SRETURN(-ENOMEM);
+ }
- SRETURN(sks);
+ atomic_inc(&skc->skc_ref);
+ ska->ska_cache = skc;
+ ska->ska_flags = flags & ~__GFP_FS;
+ taskq_init_ent(&ska->ska_tqe);
+ taskq_dispatch_ent(spl_kmem_cache_taskq,
+ spl_cache_grow_work, ska, 0, &ska->ska_tqe);
+ }
+
+ /*
+ * The goal here is to only detect the rare case where a virtual slab
+ * allocation has deadlocked. We must be careful to minimize the use
+ * of emergency objects which are more expensive to track. Therefore,
+ * we set a very long timeout for the asynchronous allocation and if
+ * the timeout is reached the cache is flagged as deadlocked. From
+ * this point only new emergency objects will be allocated until the
+ * asynchronous allocation completes and clears the deadlocked flag.
+ */
+ if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
+ rc = spl_emergency_alloc(skc, flags, obj);
+ } else {
+ remaining = wait_event_timeout(skc->skc_waitq,
+ spl_cache_grow_wait(skc), HZ);
+
+ if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) {
+ spin_lock(&skc->skc_lock);
+ if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
+ set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
+ skc->skc_obj_deadlock++;
+ }
+ spin_unlock(&skc->skc_lock);
+ }
+
+ rc = -ENOMEM;
+ }
+
+ SRETURN(rc);
}
/*
- * Refill a per-cpu magazine with objects from the slabs for this
- * cache. Ideally the magazine can be repopulated using existing
- * objects which have been released, however if we are unable to
- * locate enough free objects new slabs of objects will be created.
+ * Refill a per-cpu magazine with objects from the slabs for this cache.
+ * Ideally the magazine can be repopulated using existing objects which have
+ * been released, however if we are unable to locate enough free objects new
+ * slabs of objects will be created. On success NULL is returned, otherwise
+ * the address of a single emergency object is returned for use by the caller.
*/
-static int
+static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
- int rc = 0, refill;
+ int count = 0, rc, refill;
+ void *obj = NULL;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
- sks = spl_cache_grow(skc, flags);
- if (!sks)
+ local_irq_enable();
+ rc = spl_cache_grow(skc, flags, &obj);
+ local_irq_disable();
+
+ /* Emergency object for immediate use by caller */
+ if (rc == 0 && obj != NULL)
+ SRETURN(obj);
+
+ if (rc)
SGOTO(out, rc);
/* Rescheduled to different CPU skm is not local */
SGOTO(out, rc);
/* Potentially rescheduled to the same CPU but
- * allocations may have occured from this CPU while
+ * allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill. */
refill = MIN(refill, skm->skm_size - skm->skm_avail);
/* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it. */
- while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) {
+ while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) {
ASSERT(skm->skm_avail < skm->skm_size);
- ASSERT(rc < skm->skm_size);
+ ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
}
spin_unlock(&skc->skc_lock);
out:
- /* Returns the number of entries added to cache */
- SRETURN(rc);
+ SRETURN(NULL);
}
/*
list_add(&sks->sks_list, &skc->skc_partial_list);
}
- /* Move emply slabs to the end of the partial list so
+ /* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation. */
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
SEXIT;
}
-/*
- * Release a batch of objects from a per-cpu magazine back to their
- * respective slabs. This occurs when we exceed the magazine size,
- * are under memory pressure, when the cache is idle, or during
- * cache cleanup. The flush argument contains the number of entries
- * to remove from the magazine.
- */
-static int
-spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
-{
- int i, count = MIN(flush, skm->skm_avail);
- SENTRY;
-
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skm->skm_magic == SKM_MAGIC);
-
- /*
- * XXX: Currently we simply return objects from the magazine to
- * the slabs in fifo order. The ideal thing to do from a memory
- * fragmentation standpoint is to cheaply determine the set of
- * objects in the magazine which will result in the largest
- * number of free slabs if released from the magazine.
- */
- spin_lock(&skc->skc_lock);
- for (i = 0; i < count; i++)
- spl_cache_shrink(skc, skm->skm_objs[i]);
-
- skm->skm_avail -= count;
- memmove(skm->skm_objs, &(skm->skm_objs[count]),
- sizeof(void *) * skm->skm_avail);
-
- spin_unlock(&skc->skc_lock);
-
- SRETURN(count);
-}
-
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
restart:
/* Safe to update per-cpu structure without lock, but
- * in the restart case we must be careful to reaquire
+ * in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache. */
skm = skc->skc_mag[smp_processor_id()];
obj = skm->skm_objs[--skm->skm_avail];
skm->skm_age = jiffies;
} else {
- /* Per-CPU cache empty, directly allocate from
- * the slab and refill the per-CPU cache. */
- (void)spl_cache_refill(skc, skm, flags);
- SGOTO(restart, obj = NULL);
+ obj = spl_cache_refill(skc, skm, flags);
+ if (obj == NULL)
+ SGOTO(restart, obj = NULL);
}
local_irq_restore(irq_flags);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
+
+ /*
+ * Only virtual slabs may have emergency objects and these objects
+ * are guaranteed to have physical addresses. They must be removed
+ * from the tree of emergency objects and the freed.
+ */
+ if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
+ SGOTO(out, spl_emergency_free(skc, obj));
+
local_irq_save(flags);
/* Safe to update per-cpu structure without lock, but
/* Per-CPU cache full, flush it to make space */
if (unlikely(skm->skm_avail >= skm->skm_size))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ spl_cache_flush(skc, skm, skm->skm_refill);
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
+out:
atomic_dec(&skc->skc_ref);
SEXIT;
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
- * The generic shrinker function for all caches. Under linux a shrinker
- * may not be tightly coupled with a slab cache. In fact linux always
- * systematically trys calling all registered shrinker callbacks which
+ * The generic shrinker function for all caches. Under Linux a shrinker
+ * may not be tightly coupled with a slab cache. In fact Linux always
+ * systematically tries calling all registered shrinker callbacks which
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
- * objects should be freed, because Solaris semantics are to free
- * all available objects we may free more objects than requested.
+ * objects should be freed, which differs from Solaris semantics.
+ * Solaris semantics are to free all available objects which may (and
+ * probably will) be more objects than the requested nr_to_scan.
*/
static int
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan)
- spl_kmem_cache_reap_now(skc);
+ spl_kmem_cache_reap_now(skc,
+ MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed in reclaimable, this ensures
* effort and we do not want to thrash creating and destroying slabs.
*/
void
-spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
+spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
SENTRY;
atomic_inc(&skc->skc_ref);
- if (skc->skc_reclaim)
- skc->skc_reclaim(skc->skc_private);
+ /*
+ * When a reclaim function is available it may be invoked repeatedly
+ * until at least a single slab can be freed. This ensures that we
+ * do free memory back to the system. This helps minimize the chance
+ * of an OOM event when the bulk of memory is used by the slab.
+ *
+ * When free slabs are already available the reclaim callback will be
+ * skipped. Additionally, if no forward progress is detected despite
+ * a reclaim function the cache will be skipped to avoid deadlock.
+ *
+ * Longer term this would be the correct place to add the code which
+ * repacks the slabs in order minimize fragmentation.
+ */
+ if (skc->skc_reclaim) {
+ uint64_t objects = UINT64_MAX;
+ int do_reclaim;
- spl_slab_reclaim(skc, skc->skc_reap, 0);
+ do {
+ spin_lock(&skc->skc_lock);
+ do_reclaim =
+ (skc->skc_slab_total > 0) &&
+ ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) &&
+ (skc->skc_obj_alloc < objects);
+
+ objects = skc->skc_obj_alloc;
+ spin_unlock(&skc->skc_lock);
+
+ if (do_reclaim)
+ skc->skc_reclaim(skc->skc_private);
+
+ } while (do_reclaim);
+ }
+
+ /* Reclaim from the magazine then the slabs ignoring age and delay. */
+ if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
+ spl_kmem_magazine_t *skm;
+ int i;
+
+ for_each_online_cpu(i) {
+ skm = skc->skc_mag[i];
+ spl_cache_flush(skc, skm, skm->skm_avail);
+ }
+ }
+
+ spl_slab_reclaim(skc, count, 1);
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
+
atomic_dec(&skc->skc_ref);
SEXIT;
*/
spl_kmem_init_globals();
-#ifndef HAVE_INVALIDATE_INODES
- invalidate_inodes_fn = (invalidate_inodes_t)
- spl_kallsyms_lookup_name("invalidate_inodes");
- if (!invalidate_inodes_fn) {
- printk(KERN_ERR "Error: Unknown symbol invalidate_inodes\n");
- return -EFAULT;
- }
-#endif /* HAVE_INVALIDATE_INODES */
-
#ifndef HAVE_SHRINK_DCACHE_MEMORY
+ /* When shrink_dcache_memory_fn == NULL support is disabled */
shrink_dcache_memory_fn = (shrink_dcache_memory_t)
- spl_kallsyms_lookup_name("shrink_dcache_memory");
- if (!shrink_dcache_memory_fn) {
- printk(KERN_ERR "Error: Unknown symbol shrink_dcache_memory\n");
- return -EFAULT;
- }
+ spl_kallsyms_lookup_name("shrink_dcache_memory");
#endif /* HAVE_SHRINK_DCACHE_MEMORY */
#ifndef HAVE_SHRINK_ICACHE_MEMORY
+ /* When shrink_icache_memory_fn == NULL support is disabled */
shrink_icache_memory_fn = (shrink_icache_memory_t)
- spl_kallsyms_lookup_name("shrink_icache_memory");
- if (!shrink_icache_memory_fn) {
- printk(KERN_ERR "Error: Unknown symbol shrink_icache_memory\n");
- return -EFAULT;
- }
+ spl_kallsyms_lookup_name("shrink_icache_memory");
#endif /* HAVE_SHRINK_ICACHE_MEMORY */
return 0;
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
+ spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
+ 1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
spl_register_shrinker(&spl_kmem_cache_shrinker);
SENTRY;
spl_unregister_shrinker(&spl_kmem_cache_shrinker);
+ taskq_destroy(spl_kmem_cache_taskq);
SEXIT;
}