-/*
- * This file is part of the SPL: Solaris Porting Layer.
- *
- * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
- * Produced at Lawrence Livermore National Laboratory
- * Written by:
- * Brian Behlendorf <behlendorf1@llnl.gov>,
- * Herb Wartens <wartens2@llnl.gov>,
- * Jim Garlick <garlick@llnl.gov>
+/*****************************************************************************\
+ * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This file is part of the SPL, Solaris Porting Layer.
+ * For details, see <http://zfsonlinux.org/>.
*
- * This is distributed in the hope that it will be useful, but WITHOUT
+ * The SPL is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
+ * with the SPL. If not, see <http://www.gnu.org/licenses/>.
+ *****************************************************************************
+ * Solaris Porting Layer (SPL) Kmem Implementation.
+\*****************************************************************************/
#include <sys/kmem.h>
+#include <spl-debug.h>
-#ifdef DEBUG_SUBSYSTEM
-# undef DEBUG_SUBSYSTEM
+#ifdef SS_DEBUG_SUBSYS
+#undef SS_DEBUG_SUBSYS
#endif
-#define DEBUG_SUBSYSTEM S_KMEM
+#define SS_DEBUG_SUBSYS SS_KMEM
+
+/*
+ * Cache expiration was implemented because it was part of the default Solaris
+ * kmem_cache behavior. The idea is that per-cpu objects which haven't been
+ * accessed in several seconds should be returned to the cache. On the other
+ * hand Linux slabs never move objects back to the slabs unless there is
+ * memory pressure on the system. By default both methods are disabled, but
+ * may be enabled by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
+ */
+unsigned int spl_kmem_cache_expire = 0;
+EXPORT_SYMBOL(spl_kmem_cache_expire);
+module_param(spl_kmem_cache_expire, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
/*
* The minimum amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_min
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
*/
pgcnt_t minfree = 0;
EXPORT_SYMBOL(minfree);
/*
* The desired amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_low
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
* Assuming all zones are being used roughly equally, when we drop
- * below this threshold async page reclamation is triggered.
+ * below this threshold asynchronous page reclamation is triggered.
*/
pgcnt_t desfree = 0;
EXPORT_SYMBOL(desfree);
/*
* When above this amount of memory measures in pages the system is
* determined to have enough free memory. This is similar to Linux's
- * zone->pages_high multipled by the number of zones and is sized based
+ * zone->pages_high multiplied by the number of zones and is sized based
* on that. Assuming all zones are being used roughly equally, when
- * async page reclamation reaches this threshold it stops.
+ * asynchronous page reclamation reaches this threshold it stops.
*/
pgcnt_t lotsfree = 0;
EXPORT_SYMBOL(lotsfree);
#endif /* NEED_GET_ZONE_COUNTS */
EXPORT_SYMBOL(spl_global_page_state);
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_dcache_memory_fn);
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_icache_memory_fn);
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
pgcnt_t
spl_kmem_availrmem(void)
{
}
EXPORT_SYMBOL(vmem_size);
+int
+kmem_debugging(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL(kmem_debugging);
+
+#ifndef HAVE_KVASPRINTF
+/* Simplified asprintf. */
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = kmalloc(len+1, gfp);
+ if (!p)
+ return NULL;
+
+ vsnprintf(p, len+1, fmt, ap);
+
+ return p;
+}
+EXPORT_SYMBOL(kvasprintf);
+#endif /* HAVE_KVASPRINTF */
+
+char *
+kmem_vasprintf(const char *fmt, va_list ap)
+{
+ va_list aq;
+ char *ptr;
+
+ do {
+ va_copy(aq, ap);
+ ptr = kvasprintf(GFP_KERNEL, fmt, aq);
+ va_end(aq);
+ } while (ptr == NULL);
+
+ return ptr;
+}
+EXPORT_SYMBOL(kmem_vasprintf);
+
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+ va_list ap;
+ char *ptr;
+
+ do {
+ va_start(ap, fmt);
+ ptr = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ } while (ptr == NULL);
+
+ return ptr;
+}
+EXPORT_SYMBOL(kmem_asprintf);
+
+static char *
+__strdup(const char *str, int flags)
+{
+ char *ptr;
+ int n;
+
+ n = strlen(str);
+ ptr = kmalloc_nofail(n + 1, flags);
+ if (ptr)
+ memcpy(ptr, str, n + 1);
+
+ return ptr;
+}
+
+char *
+strdup(const char *str)
+{
+ return __strdup(str, KM_SLEEP);
+}
+EXPORT_SYMBOL(strdup);
+
+void
+strfree(char *str)
+{
+ kfree(str);
+}
+EXPORT_SYMBOL(strfree);
+
/*
* Memory allocation interfaces and debugging for basic kmem_*
- * and vmem_* style memory allocation. When DEBUG_KMEM is enable
- * all allocations will be tracked when they are allocated and
- * freed. When the SPL module is unload a list of all leaked
- * addresses and where they were allocated will be dumped to the
- * console. Enabling this feature has a significant impant on
- * performance but it makes finding memory leaks staight forward.
+ * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
+ * the SPL will keep track of the total memory allocated, and
+ * report any memory leaked when the module is unloaded.
*/
#ifdef DEBUG_KMEM
+
/* Shim layer memory accounting */
+# ifdef HAVE_ATOMIC64_T
atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
unsigned long long kmem_alloc_max = 0;
atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
unsigned long long vmem_alloc_max = 0;
-int kmem_warning_flag = 1;
+# else /* HAVE_ATOMIC64_T */
+atomic_t kmem_alloc_used = ATOMIC_INIT(0);
+unsigned long long kmem_alloc_max = 0;
+atomic_t vmem_alloc_used = ATOMIC_INIT(0);
+unsigned long long vmem_alloc_max = 0;
+# endif /* HAVE_ATOMIC64_T */
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max);
-EXPORT_SYMBOL(kmem_warning_flag);
-
-# ifdef DEBUG_KMEM_TRACKING
-/* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
- * highly contended particularly on xfree(). If we want to run with this
- * detailed debugging enabled for anything other than debugging we need to
- * minimize the contention by moving to a lock per xmem_table entry model.
+/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
+ * but also the location of every alloc and free. When the SPL module is
+ * unloaded a list of all leaked addresses and where they were allocated
+ * will be dumped to the console. Enabling this feature has a significant
+ * impact on performance but it makes finding memory leaks straight forward.
+ *
+ * Not surprisingly with debugging enabled the xmem_locks are very highly
+ * contended particularly on xfree(). If we want to run with this detailed
+ * debugging enabled for anything other than debugging we need to minimize
+ * the contention by moving to a lock per xmem_table entry model.
*/
+# ifdef DEBUG_KMEM_TRACKING
# define KMEM_HASH_BITS 10
# define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
EXPORT_SYMBOL(vmem_lock);
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
-# endif
-
-int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
-#else
-int kmem_set_warning(int flag) { return 0; }
-#endif
-EXPORT_SYMBOL(kmem_set_warning);
-
-/*
- * Slab allocation interfaces
- *
- * While the Linux slab implementation was inspired by the Solaris
- * implemenation I cannot use it to emulate the Solaris APIs. I
- * require two features which are not provided by the Linux slab.
- *
- * 1) Constructors AND destructors. Recent versions of the Linux
- * kernel have removed support for destructors. This is a deal
- * breaker for the SPL which contains particularly expensive
- * initializers for mutex's, condition variables, etc. We also
- * require a minimal level of cleanup for these data types unlike
- * many Linux data type which do need to be explicitly destroyed.
- *
- * 2) Virtual address space backed slab. Callers of the Solaris slab
- * expect it to work well for both small are very large allocations.
- * Because of memory fragmentation the Linux slab which is backed
- * by kmalloc'ed memory performs very badly when confronted with
- * large numbers of large allocations. Basing the slab on the
- * virtual address space removes the need for contigeous pages
- * and greatly improve performance for large allocations.
- *
- * For these reasons, the SPL has its own slab implementation with
- * the needed features. It is not as highly optimized as either the
- * Solaris or Linux slabs, but it should get me most of what is
- * needed until it can be optimized or obsoleted by another approach.
- *
- * One serious concern I do have about this method is the relatively
- * small virtual address space on 32bit arches. This will seriously
- * constrain the size of the slab caches and their performance.
- *
- * XXX: Improve the partial slab list by carefully maintaining a
- * strict ordering of fullest to emptiest slabs based on
- * the slab reference count. This gaurentees the when freeing
- * slabs back to the system we need only linearly traverse the
- * last N slabs in the list to discover all the freeable slabs.
- *
- * XXX: NUMA awareness for optionally allocating memory close to a
- * particular core. This can be adventageous if you know the slab
- * object will be short lived and primarily accessed from one core.
- *
- * XXX: Slab coloring may also yield performance improvements and would
- * be desirable to implement.
- */
-
-struct list_head spl_kmem_cache_list; /* List of caches */
-struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
-
-static int spl_cache_flush(spl_kmem_cache_t *skc,
- spl_kmem_magazine_t *skm, int flush);
-
-#ifdef HAVE_SET_SHRINKER
-static struct shrinker *spl_kmem_cache_shrinker;
-#else
-static int spl_kmem_cache_generic_shrinker(int nr_to_scan,
- unsigned int gfp_mask);
-static struct shrinker spl_kmem_cache_shrinker = {
- .shrink = spl_kmem_cache_generic_shrinker,
- .seeks = KMC_DEFAULT_SEEKS,
-};
-#endif
-
-#ifdef DEBUG_KMEM
-# ifdef DEBUG_KMEM_TRACKING
static kmem_debug_t *
-kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits,
- void *addr)
+kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
- ENTRY;
+ SENTRY;
spin_lock_irqsave(lock, flags);
spin_unlock_irqrestore(lock, flags);
- RETURN(NULL);
+ SRETURN(NULL);
}
void *
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- ENTRY;
+ SENTRY;
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t),
+ /* Function may be called with KM_NOSLEEP so failure is possible */
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
- if (dptr == NULL) {
- CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
- sizeof(kmem_debug_t), flags);
+ if (unlikely(dptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
+ "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ sizeof(kmem_debug_t), flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
} else {
- /* Marked unlikely because we should never be doing this,
- * we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag)
- CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
-
- /* We use kstrdup() below because the string pointed to by
+ /*
+ * Marked unlikely because we should never be doing this,
+ * we tolerate to up 2 pages but a single page is best.
+ */
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
+ "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
+
+ /*
+ * We use __strdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
- * to print it since the module might have been unloaded. */
- dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
+ * to print it since the module might have been unloaded.
+ * This can only fail in the KM_NOSLEEP case.
+ */
+ dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "debug __strdup() at %s:%d failed (%lld/%llu)\n",
+ func, line, kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & ~__GFP_ZERO);
+ ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
+ "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
- atomic64_add(size, &kmem_alloc_used);
- if (unlikely(atomic64_read(&kmem_alloc_used) >
- kmem_alloc_max))
- kmem_alloc_max =
- atomic64_read(&kmem_alloc_used);
+ kmem_alloc_used_add(size);
+ if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
+ kmem_alloc_max = kmem_alloc_used_read();
INIT_HLIST_NODE(&dptr->kd_hlist);
INIT_LIST_HEAD(&dptr->kd_list);
list_add_tail(&dptr->kd_list, &kmem_list);
spin_unlock_irqrestore(&kmem_lock, irq_flags);
- CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- ptr, atomic64_read(&kmem_alloc_used),
- kmem_alloc_max);
+ SDEBUG_LIMIT(SD_INFO,
+ "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line, ptr,
+ kmem_alloc_used_read(), kmem_alloc_max);
}
out:
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_track);
void
-kmem_free_track(void *ptr, size_t size)
+kmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
- ASSERT(dptr); /* Must exist in hash due to kmem_alloc() */
+ /* Must exist in hash due to kmem_alloc() */
+ ASSERT(dptr);
/* Size must match */
ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
"kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
- atomic64_sub(size, &kmem_alloc_used);
-
- CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, atomic64_read(&kmem_alloc_used),
+ kmem_alloc_used_sub(size);
+ SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ (unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
kfree(dptr->kd_func);
memset(ptr, 0x5a, size);
kfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(kmem_free_track);
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- ENTRY;
+ SENTRY;
ASSERT(flags & KM_SLEEP);
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags);
- if (dptr == NULL) {
- CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
- sizeof(kmem_debug_t), flags);
+ /* Function may be called with KM_NOSLEEP so failure is possible */
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
+ flags & ~__GFP_ZERO);
+ if (unlikely(dptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
+ "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ sizeof(kmem_debug_t), flags, func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
} else {
- /* We use kstrdup() below because the string pointed to by
+ /*
+ * We use __strdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
- * to print it, since the module might have been unloaded. */
- dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
+ * to print it, since the module might have been unloaded.
+ * This can never fail because we have already asserted
+ * that flags is KM_SLEEP.
+ */
+ dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- atomic64_read(&vmem_alloc_used), vmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "debug __strdup() at %s:%d failed (%lld/%llu)\n",
+ func, line, vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
- ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
- PAGE_KERNEL);
+ /* Use the correct allocator */
+ if (flags & __GFP_ZERO) {
+ ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
+ } else {
+ ptr = vmalloc_nofail(size, flags);
+ }
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&vmem_alloc_used), vmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
+ "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
-
- atomic64_add(size, &vmem_alloc_used);
- if (unlikely(atomic64_read(&vmem_alloc_used) >
- vmem_alloc_max))
- vmem_alloc_max =
- atomic64_read(&vmem_alloc_used);
+ vmem_alloc_used_add(size);
+ if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
+ vmem_alloc_max = vmem_alloc_used_read();
INIT_HLIST_NODE(&dptr->kd_hlist);
INIT_LIST_HEAD(&dptr->kd_list);
list_add_tail(&dptr->kd_list, &vmem_list);
spin_unlock_irqrestore(&vmem_lock, irq_flags);
- CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- ptr, atomic64_read(&vmem_alloc_used),
- vmem_alloc_max);
+ SDEBUG_LIMIT(SD_INFO,
+ "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ ptr, vmem_alloc_used_read(), vmem_alloc_max);
}
out:
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_track);
void
-vmem_free_track(void *ptr, size_t size)
+vmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
- ASSERT(dptr); /* Must exist in hash due to vmem_alloc() */
+
+ /* Must exist in hash due to vmem_alloc() */
+ ASSERT(dptr);
/* Size must match */
ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
"kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
- atomic64_sub(size, &vmem_alloc_used);
- CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, atomic64_read(&vmem_alloc_used),
+ vmem_alloc_used_sub(size);
+ SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ (unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
kfree(dptr->kd_func);
memset(ptr, 0x5a, size);
vfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(vmem_free_track);
int node_alloc, int node)
{
void *ptr;
- ENTRY;
+ SENTRY;
- /* Marked unlikely because we should never be doing this,
- * we tolerate to up 2 pages but a single page is best. */
- if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag)
- CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ /*
+ * Marked unlikely because we should never be doing this,
+ * we tolerate to up 2 pages but a single page is best.
+ */
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
+ SDEBUG(SD_CONSOLE | SD_WARNING,
+ "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
+ dump_stack();
+ }
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & (~__GFP_ZERO));
+ ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
- if (ptr == NULL) {
- CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ if (unlikely(ptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
} else {
- atomic64_add(size, &kmem_alloc_used);
- if (unlikely(atomic64_read(&kmem_alloc_used) > kmem_alloc_max))
- kmem_alloc_max = atomic64_read(&kmem_alloc_used);
-
- CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ kmem_alloc_used_add(size);
+ if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
+ kmem_alloc_max = kmem_alloc_used_read();
+
+ SDEBUG_LIMIT(SD_INFO,
+ "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line, ptr,
+ kmem_alloc_used_read(), kmem_alloc_max);
}
- RETURN(ptr);
+
+ SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_debug);
void
-kmem_free_debug(void *ptr, size_t size)
+kmem_free_debug(const void *ptr, size_t size)
{
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- atomic64_sub(size, &kmem_alloc_used);
-
- CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, atomic64_read(&kmem_alloc_used),
+ kmem_alloc_used_sub(size);
+ SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ (unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
-
- memset(ptr, 0x5a, size);
kfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(kmem_free_debug);
vmem_alloc_debug(size_t size, int flags, const char *func, int line)
{
void *ptr;
- ENTRY;
+ SENTRY;
ASSERT(flags & KM_SLEEP);
- ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
- PAGE_KERNEL);
- if (ptr == NULL) {
- CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
- atomic64_read(&vmem_alloc_used), vmem_alloc_max);
+ /* Use the correct allocator */
+ if (flags & __GFP_ZERO) {
+ ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
} else {
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
-
- atomic64_add(size, &vmem_alloc_used);
+ ptr = vmalloc_nofail(size, flags);
+ }
- if (unlikely(atomic64_read(&vmem_alloc_used) > vmem_alloc_max))
- vmem_alloc_max = atomic64_read(&vmem_alloc_used);
+ if (unlikely(ptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
+ } else {
+ vmem_alloc_used_add(size);
+ if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
+ vmem_alloc_max = vmem_alloc_used_read();
- CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
+ SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
"(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
- atomic64_read(&vmem_alloc_used), vmem_alloc_max);
+ vmem_alloc_used_read(), vmem_alloc_max);
}
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_debug);
void
-vmem_free_debug(void *ptr, size_t size)
+vmem_free_debug(const void *ptr, size_t size)
{
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- atomic64_sub(size, &vmem_alloc_used);
-
- CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, atomic64_read(&vmem_alloc_used),
+ vmem_alloc_used_sub(size);
+ SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ (unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
-
- memset(ptr, 0x5a, size);
vfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(vmem_free_debug);
# endif /* DEBUG_KMEM_TRACKING */
#endif /* DEBUG_KMEM */
+/*
+ * Slab allocation interfaces
+ *
+ * While the Linux slab implementation was inspired by the Solaris
+ * implementation I cannot use it to emulate the Solaris APIs. I
+ * require two features which are not provided by the Linux slab.
+ *
+ * 1) Constructors AND destructors. Recent versions of the Linux
+ * kernel have removed support for destructors. This is a deal
+ * breaker for the SPL which contains particularly expensive
+ * initializers for mutex's, condition variables, etc. We also
+ * require a minimal level of cleanup for these data types unlike
+ * many Linux data type which do need to be explicitly destroyed.
+ *
+ * 2) Virtual address space backed slab. Callers of the Solaris slab
+ * expect it to work well for both small are very large allocations.
+ * Because of memory fragmentation the Linux slab which is backed
+ * by kmalloc'ed memory performs very badly when confronted with
+ * large numbers of large allocations. Basing the slab on the
+ * virtual address space removes the need for contiguous pages
+ * and greatly improve performance for large allocations.
+ *
+ * For these reasons, the SPL has its own slab implementation with
+ * the needed features. It is not as highly optimized as either the
+ * Solaris or Linux slabs, but it should get me most of what is
+ * needed until it can be optimized or obsoleted by another approach.
+ *
+ * One serious concern I do have about this method is the relatively
+ * small virtual address space on 32bit arches. This will seriously
+ * constrain the size of the slab caches and their performance.
+ *
+ * XXX: Improve the partial slab list by carefully maintaining a
+ * strict ordering of fullest to emptiest slabs based on
+ * the slab reference count. This guarantees the when freeing
+ * slabs back to the system we need only linearly traverse the
+ * last N slabs in the list to discover all the freeable slabs.
+ *
+ * XXX: NUMA awareness for optionally allocating memory close to a
+ * particular core. This can be advantageous if you know the slab
+ * object will be short lived and primarily accessed from one core.
+ *
+ * XXX: Slab coloring may also yield performance improvements and would
+ * be desirable to implement.
+ */
+
+struct list_head spl_kmem_cache_list; /* List of caches */
+struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
+taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
+
+static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
+
+SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
+SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
+ spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
+
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
void *ptr;
- if (skc->skc_flags & KMC_KMEM) {
- if (size > (2 * PAGE_SIZE)) {
- ptr = (void *)__get_free_pages(flags, get_order(size));
- } else
- ptr = kmem_alloc(size, flags);
- } else {
- ptr = vmem_alloc(size, flags);
- }
+ ASSERT(ISP2(size));
+
+ if (skc->skc_flags & KMC_KMEM)
+ ptr = (void *)__get_free_pages(flags, get_order(size));
+ else
+ ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
+
+ /* Resulting allocated memory will be page aligned */
+ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return ptr;
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
- if (skc->skc_flags & KMC_KMEM) {
- if (size > (2 * PAGE_SIZE))
- free_pages((unsigned long)ptr, get_order(size));
- else
- kmem_free(ptr, size);
- } else {
- vmem_free(ptr, size);
- }
+ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
+ ASSERT(ISP2(size));
+
+ /*
+ * The Linux direct reclaim path uses this out of band value to
+ * determine if forward progress is being made. Normally this is
+ * incremented by kmem_freepages() which is part of the various
+ * Linux slab implementations. However, since we are using none
+ * of that infrastructure we are responsible for incrementing it.
+ */
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
+
+ if (skc->skc_flags & KMC_KMEM)
+ free_pages((unsigned long)ptr, get_order(size));
+ else
+ vfree(ptr);
+}
+
+/*
+ * Required space for each aligned sks.
+ */
+static inline uint32_t
+spl_sks_size(spl_kmem_cache_t *skc)
+{
+ return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t),
+ skc->skc_obj_align, uint32_t);
+}
+
+/*
+ * Required space for each aligned object.
+ */
+static inline uint32_t
+spl_obj_size(spl_kmem_cache_t *skc)
+{
+ uint32_t align = skc->skc_obj_align;
+
+ return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
+ P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t);
+}
+
+/*
+ * Lookup the spl_kmem_object_t for an object given that object.
+ */
+static inline spl_kmem_obj_t *
+spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
+{
+ return obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
+ skc->skc_obj_align, uint32_t);
+}
+
+/*
+ * Required space for each offslab object taking in to account alignment
+ * restrictions and the power-of-two requirement of kv_alloc().
+ */
+static inline uint32_t
+spl_offslab_size(spl_kmem_cache_t *skc)
+{
+ return 1UL << (highbit(spl_obj_size(skc)) + 1);
}
/*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
- * it gets increasingly expensive since it requires contigeous pages.
+ * it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
- * which removes the need for contigeous pages. We do not use
+ * which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
- * global lock when aquiring an available virtual address range which
+ * global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
spl_kmem_slab_t *sks;
spl_kmem_obj_t *sko, *n;
void *base, *obj;
- int i, align, size, rc = 0;
+ uint32_t obj_size, offslab_size = 0;
+ int i, rc = 0;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
- RETURN(NULL);
+ SRETURN(NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
+ obj_size = spl_obj_size(skc);
- align = skc->skc_obj_align;
- size = P2ROUNDUP(skc->skc_obj_size, align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
+ if (skc->skc_flags & KMC_OFFSLAB)
+ offslab_size = spl_offslab_size(skc);
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
- obj = kv_alloc(skc, size, flags);
+ obj = kv_alloc(skc, offslab_size, flags);
if (!obj)
- GOTO(out, rc = -ENOMEM);
+ SGOTO(out, rc = -ENOMEM);
} else {
- obj = base +
- P2ROUNDUP(sizeof(spl_kmem_slab_t), align) +
- (i * size);
+ obj = base + spl_sks_size(skc) + (i * obj_size);
}
- sko = obj + P2ROUNDUP(skc->skc_obj_size, align);
+ ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
+ sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
if (skc->skc_flags & KMC_OFFSLAB)
list_for_each_entry_safe(sko, n, &sks->sks_free_list,
sko_list)
- kv_free(skc, sko->sko_addr, size);
+ kv_free(skc, sko->sko_addr, offslab_size);
kv_free(skc, base, skc->skc_slab_size);
sks = NULL;
}
- RETURN(sks);
+ SRETURN(sks);
}
/*
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
- ENTRY;
+ SENTRY;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
- EXIT;
+ SEXIT;
}
/*
spl_kmem_obj_t *sko, *n;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
- int size = 0, i = 0;
- ENTRY;
+ uint32_t size = 0;
+ int i = 0;
+ SENTRY;
/*
* Move empty slabs and objects which have not been touched in
* All empty slabs are at the end of skc->skc_partial_list,
* therefore once a non-empty slab is found we can stop
* scanning. Additionally, stop when reaching the target
- * reclaim 'count' if a non-zero threshhold is given.
+ * reclaim 'count' if a non-zero threshold is given.
*/
- if ((sks->sks_ref > 0) || (count && i > count))
+ if ((sks->sks_ref > 0) || (count && i >= count))
break;
if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
* objects and slabs back to the system.
*/
if (skc->skc_flags & KMC_OFFSLAB)
- size = P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), skc->skc_obj_align);
+ size = spl_offslab_size(skc);
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
cond_resched();
}
- EXIT;
+ SEXIT;
+}
+
+static spl_kmem_emergency_t *
+spl_emergency_search(struct rb_root *root, void *obj)
+{
+ struct rb_node *node = root->rb_node;
+ spl_kmem_emergency_t *ske;
+ unsigned long address = (unsigned long)obj;
+
+ while (node) {
+ ske = container_of(node, spl_kmem_emergency_t, ske_node);
+
+ if (address < (unsigned long)ske->ske_obj)
+ node = node->rb_left;
+ else if (address > (unsigned long)ske->ske_obj)
+ node = node->rb_right;
+ else
+ return ske;
+ }
+
+ return NULL;
+}
+
+static int
+spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ spl_kmem_emergency_t *ske_tmp;
+ unsigned long address = (unsigned long)ske->ske_obj;
+
+ while (*new) {
+ ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
+
+ parent = *new;
+ if (address < (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_left);
+ else if (address > (unsigned long)ske_tmp->ske_obj)
+ new = &((*new)->rb_right);
+ else
+ return 0;
+ }
+
+ rb_link_node(&ske->ske_node, parent, new);
+ rb_insert_color(&ske->ske_node, root);
+
+ return 1;
}
/*
- * Called regularly on all caches to age objects out of the magazines
- * which have not been access in skc->skc_delay seconds. This prevents
- * idle magazines from holding memory which might be better used by
- * other caches or parts of the system. The delay is present to
- * prevent thrashing the magazine.
+ * Allocate a single emergency object and track it in a red black tree.
+ */
+static int
+spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
+{
+ spl_kmem_emergency_t *ske;
+ int empty;
+ SENTRY;
+
+ /* Last chance use a partial slab if one now exists */
+ spin_lock(&skc->skc_lock);
+ empty = list_empty(&skc->skc_partial_list);
+ spin_unlock(&skc->skc_lock);
+ if (!empty)
+ SRETURN(-EEXIST);
+
+ ske = kmalloc(sizeof(*ske), flags);
+ if (ske == NULL)
+ SRETURN(-ENOMEM);
+
+ ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
+ if (ske->ske_obj == NULL) {
+ kfree(ske);
+ SRETURN(-ENOMEM);
+ }
+
+ spin_lock(&skc->skc_lock);
+ empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
+ if (likely(empty)) {
+ skc->skc_obj_total++;
+ skc->skc_obj_emergency++;
+ if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
+ skc->skc_obj_emergency_max = skc->skc_obj_emergency;
+ }
+ spin_unlock(&skc->skc_lock);
+
+ if (unlikely(!empty)) {
+ kfree(ske->ske_obj);
+ kfree(ske);
+ SRETURN(-EINVAL);
+ }
+
+ if (skc->skc_ctor)
+ skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
+
+ *obj = ske->ske_obj;
+
+ SRETURN(0);
+}
+
+/*
+ * Locate the passed object in the red black tree and free it.
+ */
+static int
+spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
+{
+ spl_kmem_emergency_t *ske;
+ SENTRY;
+
+ spin_lock(&skc->skc_lock);
+ ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
+ if (likely(ske)) {
+ rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
+ skc->skc_obj_emergency--;
+ skc->skc_obj_total--;
+ }
+ spin_unlock(&skc->skc_lock);
+
+ if (unlikely(ske == NULL))
+ SRETURN(-ENOENT);
+
+ if (skc->skc_dtor)
+ skc->skc_dtor(ske->ske_obj, skc->skc_private);
+
+ kfree(ske->ske_obj);
+ kfree(ske);
+
+ SRETURN(0);
+}
+
+/*
+ * Release objects from the per-cpu magazine back to their slab. The flush
+ * argument contains the max number of entries to remove from the magazine.
*/
+static void
+__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ int i, count = MIN(flush, skm->skm_avail);
+ SENTRY;
+
+ ASSERT(skc->skc_magic == SKC_MAGIC);
+ ASSERT(skm->skm_magic == SKM_MAGIC);
+ ASSERT(spin_is_locked(&skc->skc_lock));
+
+ for (i = 0; i < count; i++)
+ spl_cache_shrink(skc, skm->skm_objs[i]);
+
+ skm->skm_avail -= count;
+ memmove(skm->skm_objs, &(skm->skm_objs[count]),
+ sizeof(void *) * skm->skm_avail);
+
+ SEXIT;
+}
+
+static void
+spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
+{
+ spin_lock(&skc->skc_lock);
+ __spl_cache_flush(skc, skm, flush);
+ spin_unlock(&skc->skc_lock);
+}
+
static void
spl_magazine_age(void *data)
{
- spl_kmem_magazine_t *skm =
- spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work);
- spl_kmem_cache_t *skc = skm->skm_cache;
- int i = smp_processor_id();
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skc->skc_mag[i] == skm);
+ ASSERT(skm->skm_cpu == smp_processor_id());
+ ASSERT(irqs_disabled());
+
+ /* There are no available objects or they are too young to age out */
+ if ((skm->skm_avail == 0) ||
+ time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
+ return;
- if (skm->skm_avail > 0 &&
- time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ /*
+ * Because we're executing in interrupt context we may have
+ * interrupted the holder of this lock. To avoid a potential
+ * deadlock return if the lock is contended.
+ */
+ if (!spin_trylock(&skc->skc_lock))
+ return;
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work_on(i, &skm->skm_work,
- skc->skc_delay / 3 * HZ);
+ __spl_cache_flush(skc, skm, skm->skm_refill);
+ spin_unlock(&skc->skc_lock);
}
/*
- * Called regularly to keep a downward pressure on the size of idle
- * magazines and to release free slabs from the cache. This function
- * never calls the registered reclaim function, that only occures
- * under memory pressure or with a direct call to spl_kmem_reap().
+ * Called regularly to keep a downward pressure on the cache.
+ *
+ * Objects older than skc->skc_delay seconds in the per-cpu magazines will
+ * be returned to the caches. This is done to prevent idle magazines from
+ * holding memory which could be better used elsewhere. The delay is
+ * present to prevent thrashing the magazine.
+ *
+ * The newly released objects may result in empty partial slabs. Those
+ * slabs should be released to the system. Otherwise moving the objects
+ * out of the magazines is just wasted work.
*/
static void
spl_cache_age(void *data)
{
- spl_kmem_cache_t *skc =
- spl_get_work_data(data, spl_kmem_cache_t, skc_work.work);
+ spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
+ taskqid_t id = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
+
+ /* Dynamically disabled at run time */
+ if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
+ return;
+
+ atomic_inc(&skc->skc_ref);
+ spl_on_each_cpu(spl_magazine_age, skc, 1);
spl_slab_reclaim(skc, skc->skc_reap, 0);
- if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
+ id = taskq_dispatch_delay(
+ spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
+
+ /* Destroy issued after dispatch immediately cancel it */
+ if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
+ }
+
+ spin_lock(&skc->skc_lock);
+ skc->skc_taskqid = id;
+ spin_unlock(&skc->skc_lock);
+
+ atomic_dec(&skc->skc_ref);
}
/*
- * Size a slab based on the size of each aliged object plus spl_kmem_obj_t.
+ * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page. Also for
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
- int sks_size, obj_size, max_size, align;
+ uint32_t sks_size, obj_size, max_size;
if (skc->skc_flags & KMC_OFFSLAB) {
*objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
*size = sizeof(spl_kmem_slab_t);
} else {
- align = skc->skc_obj_align;
- sks_size = P2ROUNDUP(sizeof(spl_kmem_slab_t), align);
- obj_size = P2ROUNDUP(skc->skc_obj_size, align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
+ sks_size = spl_sks_size(skc);
+ obj_size = spl_obj_size(skc);
if (skc->skc_flags & KMC_KMEM)
- max_size = ((uint64_t)1 << (MAX_ORDER-1)) * PAGE_SIZE;
+ max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE;
else
max_size = (32 * 1024 * 1024);
- for (*size = PAGE_SIZE; *size <= max_size; *size += PAGE_SIZE) {
+ /* Power of two sized slab */
+ for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
- RETURN(0);
+ SRETURN(0);
}
/*
- * Unable to satisfy target objets per slab, fallback to
+ * Unable to satisfy target objects per slab, fall back to
* allocating a maximally sized slab and assuming it can
* contain the minimum objects count use it. If not fail.
*/
*size = max_size;
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
- RETURN(0);
+ SRETURN(0);
}
- RETURN(-ENOSPC);
+ SRETURN(-ENOSPC);
}
/*
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
- int size, align = skc->skc_obj_align;
- ENTRY;
+ uint32_t obj_size = spl_obj_size(skc);
+ int size;
+ SENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
- if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 256))
+ if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 32))
+ else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE))
+ else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE / 4))
+ else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
- RETURN(size);
+ SRETURN(size);
}
/*
- * Allocate a per-cpu magazine to assoicate with a specific core.
+ * Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
-spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
+spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
- ENTRY;
+ SENTRY;
- skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node);
+ skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
- spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm);
skm->skm_age = jiffies;
+ skm->skm_cpu = cpu;
}
- RETURN(skm);
+ SRETURN(skm);
}
/*
- * Free a per-cpu magazine assoicated with a specific core.
+ * Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skm->skm_size;
- ENTRY;
+ SENTRY;
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kmem_free(skm, size);
- EXIT;
+ SEXIT;
}
/*
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
- ENTRY;
+ SENTRY;
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_online_cpu(i) {
- skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i));
+ skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
- RETURN(-ENOMEM);
+ SRETURN(-ENOMEM);
}
}
- /* Only after everything is allocated schedule magazine work */
- for_each_online_cpu(i)
- schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
- skc->skc_delay / 3 * HZ);
-
- RETURN(0);
+ SRETURN(0);
}
/*
{
spl_kmem_magazine_t *skm;
int i;
- ENTRY;
+ SENTRY;
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
- (void)spl_cache_flush(skc, skm, skm->skm_avail);
+ spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
- EXIT;
+ SEXIT;
}
/*
void *priv, void *vmp, int flags)
{
spl_kmem_cache_t *skc;
- int rc, kmem_flags = KM_SLEEP;
- ENTRY;
+ int rc;
+ SENTRY;
ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
ASSERT(vmp == NULL);
- /* We may be called when there is a non-zero preempt_count or
- * interrupts are disabled is which case we must not sleep.
- */
- if (current_thread_info()->preempt_count || irqs_disabled())
- kmem_flags = KM_NOSLEEP;
+ might_sleep();
- /* Allocate new cache memory and initialize. */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags);
+ /*
+ * Allocate memory for a new cache an initialize it. Unfortunately,
+ * this usually ends up being a large allocation of ~32k because
+ * we need to allocate enough memory for the worst case number of
+ * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
+ * explicitly pass KM_NODEBUG to suppress the kmem warning
+ */
+ skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
if (skc == NULL)
- RETURN(NULL);
+ SRETURN(NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
- skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
+ skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
- RETURN(NULL);
+ SRETURN(NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
+ skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
+ init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
+ skc->skc_obj_deadlock = 0;
+ skc->skc_obj_emergency = 0;
+ skc->skc_obj_emergency_max = 0;
if (align) {
- ASSERT((align & (align - 1)) == 0); /* Power of two */
- ASSERT(align >= SPL_KMEM_CACHE_ALIGN); /* Minimum size */
+ VERIFY(ISP2(align));
+ VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); /* Min alignment */
+ VERIFY3U(align, <=, PAGE_SIZE); /* Max alignment */
skc->skc_obj_align = align;
}
/* If none passed select a cache type based on object size */
if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) {
- if (P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) <
- (PAGE_SIZE / 8)) {
+ if (spl_obj_size(skc) < (PAGE_SIZE / 8))
skc->skc_flags |= KMC_KMEM;
- } else {
+ else
skc->skc_flags |= KMC_VMEM;
- }
}
rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
- GOTO(out, rc);
+ SGOTO(out, rc);
rc = spl_magazine_create(skc);
if (rc)
- GOTO(out, rc);
+ SGOTO(out, rc);
- spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
- schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
+ if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
+ skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
+ spl_cache_age, skc, TQ_SLEEP,
+ ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
- RETURN(skc);
+ SRETURN(skc);
out:
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
- RETURN(NULL);
+ SRETURN(NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
- * Destroy a cache and all objects assoicated with the cache.
+ * Register a move callback to for cache defragmentation.
+ * XXX: Unimplemented but harmless to stub out for now.
+ */
+void
+spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
+ kmem_cbrc_t (move)(void *, void *, size_t, void *))
+{
+ ASSERT(move != NULL);
+}
+EXPORT_SYMBOL(spl_kmem_cache_set_move);
+
+/*
+ * Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
- int i;
- ENTRY;
+ taskqid_t id;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
- /* Cancel any and wait for any pending delayed work */
- ASSERT(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- cancel_delayed_work(&skc->skc_work);
- for_each_online_cpu(i)
- cancel_delayed_work(&skc->skc_mag[i]->skm_work);
+ /* Cancel any and wait for any pending delayed tasks */
+ VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- flush_scheduled_work();
+ spin_lock(&skc->skc_lock);
+ id = skc->skc_taskqid;
+ spin_unlock(&skc->skc_lock);
+
+ taskq_cancel_id(spl_kmem_cache_taskq, id);
/* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
+ ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
}
/*
- * No available objects on any slabsi, create a new slab. Since this
- * is an expensive operation we do it without holding the spinlock and
- * only briefly aquire it when we link in the fully allocated and
- * constructed slab.
+ * Generic slab allocation function to run by the global work queues.
+ * It is responsible for allocating a new slab, linking it in to the list
+ * of partial slabs, and then waking any waiters.
*/
-static spl_kmem_slab_t *
-spl_cache_grow(spl_kmem_cache_t *skc, int flags)
+static void
+spl_cache_grow_work(void *data)
{
+ spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
+ spl_kmem_cache_t *skc = ska->ska_cache;
spl_kmem_slab_t *sks;
- ENTRY;
+
+ sks = spl_slab_alloc(skc, ska->ska_flags | __GFP_NORETRY | KM_NODEBUG);
+ spin_lock(&skc->skc_lock);
+ if (sks) {
+ skc->skc_slab_total++;
+ skc->skc_obj_total += sks->sks_objs;
+ list_add_tail(&sks->sks_list, &skc->skc_partial_list);
+ }
+
+ atomic_dec(&skc->skc_ref);
+ clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
+ clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
+ wake_up_all(&skc->skc_waitq);
+ spin_unlock(&skc->skc_lock);
+
+ kfree(ska);
+}
+
+/*
+ * Returns non-zero when a new slab should be available.
+ */
+static int
+spl_cache_grow_wait(spl_kmem_cache_t *skc)
+{
+ return !test_bit(KMC_BIT_GROWING, &skc->skc_flags);
+}
+
+static int
+spl_cache_reclaim_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * No available objects on any slabs, create a new slab.
+ */
+static int
+spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
+{
+ int remaining, rc;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
- local_irq_enable();
might_sleep();
+ *obj = NULL;
/*
- * Before allocating a new slab check if the slab is being reaped.
- * If it is there is a good chance we can wait until it finishes
- * and then use one of the newly freed but not aged-out slabs.
+ * Before allocating a new slab wait for any reaping to complete and
+ * then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
- schedule();
- GOTO(out, sks= NULL);
+ rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
+ spl_cache_reclaim_wait, TASK_UNINTERRUPTIBLE);
+ SRETURN(rc ? rc : -EAGAIN);
}
- /* Allocate a new slab for the cache */
- sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN);
- if (sks == NULL)
- GOTO(out, sks = NULL);
+ /*
+ * This is handled by dispatching a work request to the global work
+ * queue. This allows us to asynchronously allocate a new slab while
+ * retaining the ability to safely fall back to a smaller synchronous
+ * allocations to ensure forward progress is always maintained.
+ */
+ if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
+ spl_kmem_alloc_t *ska;
+
+ ska = kmalloc(sizeof(*ska), flags);
+ if (ska == NULL) {
+ clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
+ wake_up_all(&skc->skc_waitq);
+ SRETURN(-ENOMEM);
+ }
- /* Link the new empty slab in to the end of skc_partial_list. */
- spin_lock(&skc->skc_lock);
- skc->skc_slab_total++;
- skc->skc_obj_total += sks->sks_objs;
- list_add_tail(&sks->sks_list, &skc->skc_partial_list);
- spin_unlock(&skc->skc_lock);
-out:
- local_irq_disable();
+ atomic_inc(&skc->skc_ref);
+ ska->ska_cache = skc;
+ ska->ska_flags = flags & ~__GFP_FS;
+ taskq_init_ent(&ska->ska_tqe);
+ taskq_dispatch_ent(spl_kmem_cache_taskq,
+ spl_cache_grow_work, ska, 0, &ska->ska_tqe);
+ }
- RETURN(sks);
+ /*
+ * The goal here is to only detect the rare case where a virtual slab
+ * allocation has deadlocked. We must be careful to minimize the use
+ * of emergency objects which are more expensive to track. Therefore,
+ * we set a very long timeout for the asynchronous allocation and if
+ * the timeout is reached the cache is flagged as deadlocked. From
+ * this point only new emergency objects will be allocated until the
+ * asynchronous allocation completes and clears the deadlocked flag.
+ */
+ if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
+ rc = spl_emergency_alloc(skc, flags, obj);
+ } else {
+ remaining = wait_event_timeout(skc->skc_waitq,
+ spl_cache_grow_wait(skc), HZ);
+
+ if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) {
+ spin_lock(&skc->skc_lock);
+ if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
+ set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
+ skc->skc_obj_deadlock++;
+ }
+ spin_unlock(&skc->skc_lock);
+ }
+
+ rc = -ENOMEM;
+ }
+
+ SRETURN(rc);
}
/*
- * Refill a per-cpu magazine with objects from the slabs for this
- * cache. Ideally the magazine can be repopulated using existing
- * objects which have been released, however if we are unable to
- * locate enough free objects new slabs of objects will be created.
+ * Refill a per-cpu magazine with objects from the slabs for this cache.
+ * Ideally the magazine can be repopulated using existing objects which have
+ * been released, however if we are unable to locate enough free objects new
+ * slabs of objects will be created. On success NULL is returned, otherwise
+ * the address of a single emergency object is returned for use by the caller.
*/
-static int
+static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
- int rc = 0, refill;
- ENTRY;
+ int count = 0, rc, refill;
+ void *obj = NULL;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
- sks = spl_cache_grow(skc, flags);
- if (!sks)
- GOTO(out, rc);
+ local_irq_enable();
+ rc = spl_cache_grow(skc, flags, &obj);
+ local_irq_disable();
+
+ /* Emergency object for immediate use by caller */
+ if (rc == 0 && obj != NULL)
+ SRETURN(obj);
+
+ if (rc)
+ SGOTO(out, rc);
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
- GOTO(out, rc);
+ SGOTO(out, rc);
/* Potentially rescheduled to the same CPU but
- * allocations may have occured from this CPU while
+ * allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill. */
refill = MIN(refill, skm->skm_size - skm->skm_avail);
/* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it. */
- while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) {
+ while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) {
ASSERT(skm->skm_avail < skm->skm_size);
- ASSERT(rc < skm->skm_size);
+ ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
}
spin_unlock(&skc->skc_lock);
out:
- /* Returns the number of entries added to cache */
- RETURN(rc);
+ SRETURN(NULL);
}
/*
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
- sko = obj + P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align);
+ sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
-
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
- /* Move emply slabs to the end of the partial list so
+ /* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation. */
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
skc->skc_slab_alloc--;
}
- EXIT;
-}
-
-/*
- * Release a batch of objects from a per-cpu magazine back to their
- * respective slabs. This occurs when we exceed the magazine size,
- * are under memory pressure, when the cache is idle, or during
- * cache cleanup. The flush argument contains the number of entries
- * to remove from the magazine.
- */
-static int
-spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
-{
- int i, count = MIN(flush, skm->skm_avail);
- ENTRY;
-
- ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skm->skm_magic == SKM_MAGIC);
-
- /*
- * XXX: Currently we simply return objects from the magazine to
- * the slabs in fifo order. The ideal thing to do from a memory
- * fragmentation standpoint is to cheaply determine the set of
- * objects in the magazine which will result in the largest
- * number of free slabs if released from the magazine.
- */
- spin_lock(&skc->skc_lock);
- for (i = 0; i < count; i++)
- spl_cache_shrink(skc, skm->skm_objs[i]);
-
- skm->skm_avail -= count;
- memmove(skm->skm_objs, &(skm->skm_objs[count]),
- sizeof(void *) * skm->skm_avail);
-
- spin_unlock(&skc->skc_lock);
-
- RETURN(count);
+ SEXIT;
}
/*
spl_kmem_magazine_t *skm;
unsigned long irq_flags;
void *obj = NULL;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
restart:
/* Safe to update per-cpu structure without lock, but
- * in the restart case we must be careful to reaquire
+ * in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache. */
skm = skc->skc_mag[smp_processor_id()];
obj = skm->skm_objs[--skm->skm_avail];
skm->skm_age = jiffies;
} else {
- /* Per-CPU cache empty, directly allocate from
- * the slab and refill the per-CPU cache. */
- (void)spl_cache_refill(skc, skm, flags);
- GOTO(restart, obj = NULL);
+ obj = spl_cache_refill(skc, skm, flags);
+ if (obj == NULL)
+ SGOTO(restart, obj = NULL);
}
local_irq_restore(irq_flags);
ASSERT(obj);
- ASSERT(((unsigned long)(obj) % skc->skc_obj_align) == 0);
+ ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
/* Pre-emptively migrate object to CPU L1 cache */
prefetchw(obj);
atomic_dec(&skc->skc_ref);
- RETURN(obj);
+ SRETURN(obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
{
spl_kmem_magazine_t *skm;
unsigned long flags;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
+
+ /*
+ * Only virtual slabs may have emergency objects and these objects
+ * are guaranteed to have physical addresses. They must be removed
+ * from the tree of emergency objects and the freed.
+ */
+ if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
+ SGOTO(out, spl_emergency_free(skc, obj));
+
local_irq_save(flags);
/* Safe to update per-cpu structure without lock, but
/* Per-CPU cache full, flush it to make space */
if (unlikely(skm->skm_avail >= skm->skm_size))
- (void)spl_cache_flush(skc, skm, skm->skm_refill);
+ spl_cache_flush(skc, skm, skm->skm_refill);
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
+out:
atomic_dec(&skc->skc_ref);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
- * The generic shrinker function for all caches. Under linux a shrinker
- * may not be tightly coupled with a slab cache. In fact linux always
- * systematically trys calling all registered shrinker callbacks which
+ * The generic shrinker function for all caches. Under Linux a shrinker
+ * may not be tightly coupled with a slab cache. In fact Linux always
+ * systematically tries calling all registered shrinker callbacks which
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
- * objects should be freed, because Solaris semantics are to free
- * all available objects we may free more objects than requested.
+ * objects should be freed, which differs from Solaris semantics.
+ * Solaris semantics are to free all available objects which may (and
+ * probably will) be more objects than the requested nr_to_scan.
*/
static int
-spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
+__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
+ struct shrink_control *sc)
{
spl_kmem_cache_t *skc;
int unused = 0;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
- if (nr_to_scan)
- spl_kmem_cache_reap_now(skc);
+ if (sc->nr_to_scan)
+ spl_kmem_cache_reap_now(skc,
+ MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed in reclaimable, this ensures
return (unused * sysctl_vfs_cache_pressure) / 100;
}
+SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
+
/*
* Call the registered reclaim function for a cache. Depending on how
* many and which objects are released it may simply repopulate the
* effort and we do not want to thrash creating and destroying slabs.
*/
void
-spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
+spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/* Prevent concurrent cache reaping when contended */
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
- EXIT;
+ SEXIT;
return;
}
atomic_inc(&skc->skc_ref);
- if (skc->skc_reclaim)
- skc->skc_reclaim(skc->skc_private);
+ /*
+ * When a reclaim function is available it may be invoked repeatedly
+ * until at least a single slab can be freed. This ensures that we
+ * do free memory back to the system. This helps minimize the chance
+ * of an OOM event when the bulk of memory is used by the slab.
+ *
+ * When free slabs are already available the reclaim callback will be
+ * skipped. Additionally, if no forward progress is detected despite
+ * a reclaim function the cache will be skipped to avoid deadlock.
+ *
+ * Longer term this would be the correct place to add the code which
+ * repacks the slabs in order minimize fragmentation.
+ */
+ if (skc->skc_reclaim) {
+ uint64_t objects = UINT64_MAX;
+ int do_reclaim;
- spl_slab_reclaim(skc, skc->skc_reap, 0);
+ do {
+ spin_lock(&skc->skc_lock);
+ do_reclaim =
+ (skc->skc_slab_total > 0) &&
+ ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) &&
+ (skc->skc_obj_alloc < objects);
+
+ objects = skc->skc_obj_alloc;
+ spin_unlock(&skc->skc_lock);
+
+ if (do_reclaim)
+ skc->skc_reclaim(skc->skc_private);
+
+ } while (do_reclaim);
+ }
+
+ /* Reclaim from the magazine then the slabs ignoring age and delay. */
+ if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
+ spl_kmem_magazine_t *skm;
+ int i;
+
+ for_each_online_cpu(i) {
+ skm = skc->skc_mag[i];
+ spl_cache_flush(skc, skm, skm->skm_avail);
+ }
+ }
+
+ spl_slab_reclaim(skc, count, 1);
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
+
atomic_dec(&skc->skc_ref);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
void
spl_kmem_reap(void)
{
- spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
+ struct shrink_control sc;
+
+ sc.nr_to_scan = KMC_REAP_CHUNK;
+ sc.gfp_mask = GFP_KERNEL;
+
+ __spl_kmem_cache_generic_shrinker(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);
spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
{
int i;
- ENTRY;
+ SENTRY;
spin_lock_init(lock);
INIT_LIST_HEAD(list);
for (i = 0; i < size; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
- RETURN(0);
+ SRETURN(0);
}
static void
unsigned long flags;
kmem_debug_t *kd;
char str[17];
- ENTRY;
+ SENTRY;
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
- EXIT;
+ SEXIT;
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
#define spl_kmem_init_tracking(list, lock, size)
if (!populated_zone(zone))
continue;
- minfree += zone->pages_min;
- desfree += zone->pages_low;
- lotsfree += zone->pages_high;
+ minfree += min_wmark_pages(zone);
+ desfree += low_wmark_pages(zone);
+ lotsfree += high_wmark_pages(zone);
}
/* Solaris default values */
*/
spl_kmem_init_globals();
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+ /* When shrink_dcache_memory_fn == NULL support is disabled */
+ shrink_dcache_memory_fn = (shrink_dcache_memory_t)
+ spl_kallsyms_lookup_name("shrink_dcache_memory");
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+ /* When shrink_icache_memory_fn == NULL support is disabled */
+ shrink_icache_memory_fn = (shrink_icache_memory_t)
+ spl_kallsyms_lookup_name("shrink_icache_memory");
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
return 0;
}
spl_kmem_init(void)
{
int rc = 0;
- ENTRY;
+ SENTRY;
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
+ spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
+ 1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
-#ifdef HAVE_SET_SHRINKER
- spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
- spl_kmem_cache_generic_shrinker);
- if (spl_kmem_cache_shrinker == NULL)
- RETURN(rc = -ENOMEM);
-#else
- register_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_register_shrinker(&spl_kmem_cache_shrinker);
#ifdef DEBUG_KMEM
- atomic64_set(&kmem_alloc_used, 0);
- atomic64_set(&vmem_alloc_used, 0);
+ kmem_alloc_used_set(0);
+ vmem_alloc_used_set(0);
spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
#endif
- RETURN(rc);
+ SRETURN(rc);
}
void
* allocation size and the first few bytes of what's located
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
- if (atomic64_read(&kmem_alloc_used) != 0)
- CWARN("kmem leaked %ld/%ld bytes\n",
- atomic64_read(&kmem_alloc_used), kmem_alloc_max);
+ if (kmem_alloc_used_read() != 0)
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "kmem leaked %ld/%ld bytes\n",
+ kmem_alloc_used_read(), kmem_alloc_max);
- if (atomic64_read(&vmem_alloc_used) != 0)
- CWARN("vmem leaked %ld/%ld bytes\n",
- atomic64_read(&vmem_alloc_used), vmem_alloc_max);
+ if (vmem_alloc_used_read() != 0)
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "vmem leaked %ld/%ld bytes\n",
+ vmem_alloc_used_read(), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
#endif /* DEBUG_KMEM */
- ENTRY;
+ SENTRY;
-#ifdef HAVE_SET_SHRINKER
- remove_shrinker(spl_kmem_cache_shrinker);
-#else
- unregister_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_unregister_shrinker(&spl_kmem_cache_shrinker);
+ taskq_destroy(spl_kmem_cache_taskq);
- EXIT;
+ SEXIT;
}