\*****************************************************************************/
#include <sys/kmem.h>
+#include <spl-debug.h>
-#ifdef DEBUG_SUBSYSTEM
-# undef DEBUG_SUBSYSTEM
+#ifdef SS_DEBUG_SUBSYS
+#undef SS_DEBUG_SUBSYS
#endif
-#define DEBUG_SUBSYSTEM S_KMEM
+#define SS_DEBUG_SUBSYS SS_KMEM
/*
* The minimum amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_min
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
*/
pgcnt_t minfree = 0;
EXPORT_SYMBOL(minfree);
/*
* The desired amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_low
- * multipled by the number of zones and is sized based on that.
+ * multiplied by the number of zones and is sized based on that.
* Assuming all zones are being used roughly equally, when we drop
- * below this threshold async page reclamation is triggered.
+ * below this threshold asynchronous page reclamation is triggered.
*/
pgcnt_t desfree = 0;
EXPORT_SYMBOL(desfree);
/*
* When above this amount of memory measures in pages the system is
* determined to have enough free memory. This is similar to Linux's
- * zone->pages_high multipled by the number of zones and is sized based
+ * zone->pages_high multiplied by the number of zones and is sized based
* on that. Assuming all zones are being used roughly equally, when
- * async page reclamation reaches this threshold it stops.
+ * asynchronous page reclamation reaches this threshold it stops.
*/
pgcnt_t lotsfree = 0;
EXPORT_SYMBOL(lotsfree);
#endif /* NEED_GET_ZONE_COUNTS */
EXPORT_SYMBOL(spl_global_page_state);
+#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
+invalidate_inodes_t invalidate_inodes_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(invalidate_inodes_fn);
+#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
+
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_dcache_memory_fn);
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_icache_memory_fn);
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
pgcnt_t
spl_kmem_availrmem(void)
{
}
EXPORT_SYMBOL(vmem_size);
+int
+kmem_debugging(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL(kmem_debugging);
+
+#ifndef HAVE_KVASPRINTF
+/* Simplified asprintf. */
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = kmalloc(len+1, gfp);
+ if (!p)
+ return NULL;
+
+ vsnprintf(p, len+1, fmt, ap);
+
+ return p;
+}
+EXPORT_SYMBOL(kvasprintf);
+#endif /* HAVE_KVASPRINTF */
+
+char *
+kmem_vasprintf(const char *fmt, va_list ap)
+{
+ va_list aq;
+ char *ptr;
+
+ do {
+ va_copy(aq, ap);
+ ptr = kvasprintf(GFP_KERNEL, fmt, aq);
+ va_end(aq);
+ } while (ptr == NULL);
+
+ return ptr;
+}
+EXPORT_SYMBOL(kmem_vasprintf);
+
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+ va_list ap;
+ char *ptr;
+
+ do {
+ va_start(ap, fmt);
+ ptr = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ } while (ptr == NULL);
+
+ return ptr;
+}
+EXPORT_SYMBOL(kmem_asprintf);
+
+static char *
+__strdup(const char *str, int flags)
+{
+ char *ptr;
+ int n;
+
+ n = strlen(str);
+ ptr = kmalloc_nofail(n + 1, flags);
+ if (ptr)
+ memcpy(ptr, str, n + 1);
+
+ return ptr;
+}
+
+char *
+strdup(const char *str)
+{
+ return __strdup(str, KM_SLEEP);
+}
+EXPORT_SYMBOL(strdup);
+
+void
+strfree(char *str)
+{
+ kfree(str);
+}
+EXPORT_SYMBOL(strfree);
+
/*
* Memory allocation interfaces and debugging for basic kmem_*
* and vmem_* style memory allocation. When DEBUG_KMEM is enabled
unsigned long long kmem_alloc_max = 0;
atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
unsigned long long vmem_alloc_max = 0;
-# else
+# else /* HAVE_ATOMIC64_T */
atomic_t kmem_alloc_used = ATOMIC_INIT(0);
unsigned long long kmem_alloc_max = 0;
atomic_t vmem_alloc_used = ATOMIC_INIT(0);
unsigned long long vmem_alloc_max = 0;
-# endif /* _LP64 */
+# endif /* HAVE_ATOMIC64_T */
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_lock);
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
-# endif
-#endif
-
-/*
- * Slab allocation interfaces
- *
- * While the Linux slab implementation was inspired by the Solaris
- * implemenation I cannot use it to emulate the Solaris APIs. I
- * require two features which are not provided by the Linux slab.
- *
- * 1) Constructors AND destructors. Recent versions of the Linux
- * kernel have removed support for destructors. This is a deal
- * breaker for the SPL which contains particularly expensive
- * initializers for mutex's, condition variables, etc. We also
- * require a minimal level of cleanup for these data types unlike
- * many Linux data type which do need to be explicitly destroyed.
- *
- * 2) Virtual address space backed slab. Callers of the Solaris slab
- * expect it to work well for both small are very large allocations.
- * Because of memory fragmentation the Linux slab which is backed
- * by kmalloc'ed memory performs very badly when confronted with
- * large numbers of large allocations. Basing the slab on the
- * virtual address space removes the need for contigeous pages
- * and greatly improve performance for large allocations.
- *
- * For these reasons, the SPL has its own slab implementation with
- * the needed features. It is not as highly optimized as either the
- * Solaris or Linux slabs, but it should get me most of what is
- * needed until it can be optimized or obsoleted by another approach.
- *
- * One serious concern I do have about this method is the relatively
- * small virtual address space on 32bit arches. This will seriously
- * constrain the size of the slab caches and their performance.
- *
- * XXX: Improve the partial slab list by carefully maintaining a
- * strict ordering of fullest to emptiest slabs based on
- * the slab reference count. This gaurentees the when freeing
- * slabs back to the system we need only linearly traverse the
- * last N slabs in the list to discover all the freeable slabs.
- *
- * XXX: NUMA awareness for optionally allocating memory close to a
- * particular core. This can be adventageous if you know the slab
- * object will be short lived and primarily accessed from one core.
- *
- * XXX: Slab coloring may also yield performance improvements and would
- * be desirable to implement.
- */
-
-struct list_head spl_kmem_cache_list; /* List of caches */
-struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
-
-static int spl_cache_flush(spl_kmem_cache_t *skc,
- spl_kmem_magazine_t *skm, int flush);
-
-#ifdef HAVE_SET_SHRINKER
-static struct shrinker *spl_kmem_cache_shrinker;
-#else
-static int spl_kmem_cache_generic_shrinker(int nr_to_scan,
- unsigned int gfp_mask);
-static struct shrinker spl_kmem_cache_shrinker = {
- .shrink = spl_kmem_cache_generic_shrinker,
- .seeks = KMC_DEFAULT_SEEKS,
-};
-#endif
-
-#ifdef DEBUG_KMEM
-# ifdef DEBUG_KMEM_TRACKING
static kmem_debug_t *
-kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits,
- void *addr)
+kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
- ENTRY;
+ SENTRY;
spin_lock_irqsave(lock, flags);
spin_unlock_irqrestore(lock, flags);
- RETURN(NULL);
+ SRETURN(NULL);
}
void *
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- ENTRY;
+ SENTRY;
+ /* Function may be called with KM_NOSLEEP so failure is possible */
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
- if (dptr == NULL) {
- CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
- sizeof(kmem_debug_t), flags);
+ if (unlikely(dptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
+ "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ sizeof(kmem_debug_t), flags, func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
} else {
- /* Marked unlikely because we should never be doing this,
- * we tolerate to up 2 pages but a single page is best. */
+ /*
+ * Marked unlikely because we should never be doing this,
+ * we tolerate to up 2 pages but a single page is best.
+ */
if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
- CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
- (unsigned long long) size, flags,
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
+ "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
spl_debug_dumpstack(NULL);
}
- /* We use kstrdup() below because the string pointed to by
+ /*
+ * We use __strdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
- * to print it since the module might have been unloaded. */
- dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
+ * to print it since the module might have been unloaded.
+ * This can only fail in the KM_NOSLEEP case.
+ */
+ dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- kmem_alloc_used_read(), kmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "debug __strdup() at %s:%d failed (%lld/%llu)\n",
+ func, line, kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
+ "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
list_add_tail(&dptr->kd_list, &kmem_list);
spin_unlock_irqrestore(&kmem_lock, irq_flags);
- CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- ptr, kmem_alloc_used_read(),
- kmem_alloc_max);
+ SDEBUG_LIMIT(SD_INFO,
+ "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line, ptr,
+ kmem_alloc_used_read(), kmem_alloc_max);
}
out:
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_track);
void
-kmem_free_track(void *ptr, size_t size)
+kmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
- ASSERT(dptr); /* Must exist in hash due to kmem_alloc() */
+ /* Must exist in hash due to kmem_alloc() */
+ ASSERT(dptr);
/* Size must match */
ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
kmem_alloc_used_sub(size);
- CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
memset(ptr, 0x5a, size);
kfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(kmem_free_track);
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- ENTRY;
+ SENTRY;
ASSERT(flags & KM_SLEEP);
+ /* Function may be called with KM_NOSLEEP so failure is possible */
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
- if (dptr == NULL) {
- CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
- sizeof(kmem_debug_t), flags);
+ if (unlikely(dptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
+ "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ sizeof(kmem_debug_t), flags, func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
} else {
- /* We use kstrdup() below because the string pointed to by
+ /*
+ * We use __strdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
- * to print it, since the module might have been unloaded. */
- dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
+ * to print it, since the module might have been unloaded.
+ * This can never fail because we have already asserted
+ * that flags is KM_SLEEP.
+ */
+ dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- vmem_alloc_used_read(), vmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "debug __strdup() at %s:%d failed (%lld/%llu)\n",
+ func, line, vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
- ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
- PAGE_KERNEL);
+ /* Use the correct allocator */
+ if (flags & __GFP_ZERO) {
+ ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
+ } else {
+ ptr = vmalloc_nofail(size, flags);
+ }
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
+ "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
-
vmem_alloc_used_add(size);
if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
vmem_alloc_max = vmem_alloc_used_read();
list_add_tail(&dptr->kd_list, &vmem_list);
spin_unlock_irqrestore(&vmem_lock, irq_flags);
- CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags,
- ptr, vmem_alloc_used_read(),
- vmem_alloc_max);
+ SDEBUG_LIMIT(SD_INFO,
+ "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ ptr, vmem_alloc_used_read(), vmem_alloc_max);
}
out:
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_track);
void
-vmem_free_track(void *ptr, size_t size)
+vmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
- ASSERT(dptr); /* Must exist in hash due to vmem_alloc() */
+
+ /* Must exist in hash due to vmem_alloc() */
+ ASSERT(dptr);
/* Size must match */
ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
vmem_alloc_used_sub(size);
- CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
memset(ptr, 0x5a, size);
vfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(vmem_free_track);
int node_alloc, int node)
{
void *ptr;
- ENTRY;
+ SENTRY;
- /* Marked unlikely because we should never be doing this,
- * we tolerate to up 2 pages but a single page is best. */
+ /*
+ * Marked unlikely because we should never be doing this,
+ * we tolerate to up 2 pages but a single page is best.
+ */
if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
- CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
- (unsigned long long) size, flags,
+ SDEBUG(SD_CONSOLE | SD_WARNING,
+ "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ dump_stack();
}
/* Use the correct allocator */
ptr = kmalloc_nofail(size, flags);
}
- if (ptr == NULL) {
- CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
+ if (unlikely(ptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
} else {
kmem_alloc_used_add(size);
if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
kmem_alloc_max = kmem_alloc_used_read();
- CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
- kmem_alloc_used_read(), kmem_alloc_max);
+ SDEBUG_LIMIT(SD_INFO,
+ "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line, ptr,
+ kmem_alloc_used_read(), kmem_alloc_max);
}
- RETURN(ptr);
+
+ SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_debug);
void
-kmem_free_debug(void *ptr, size_t size)
+kmem_free_debug(const void *ptr, size_t size)
{
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
kmem_alloc_used_sub(size);
- CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
-
- memset(ptr, 0x5a, size);
kfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(kmem_free_debug);
vmem_alloc_debug(size_t size, int flags, const char *func, int line)
{
void *ptr;
- ENTRY;
+ SENTRY;
ASSERT(flags & KM_SLEEP);
- ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
- PAGE_KERNEL);
- if (ptr == NULL) {
- CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
- (unsigned long long) size, flags,
- vmem_alloc_used_read(), vmem_alloc_max);
+ /* Use the correct allocator */
+ if (flags & __GFP_ZERO) {
+ ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
} else {
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
+ ptr = vmalloc_nofail(size, flags);
+ }
+ if (unlikely(ptr == NULL)) {
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long) size, flags, func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
+ } else {
vmem_alloc_used_add(size);
if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
vmem_alloc_max = vmem_alloc_used_read();
- CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
+ SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
"(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
vmem_alloc_used_read(), vmem_alloc_max);
}
- RETURN(ptr);
+ SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_debug);
void
-vmem_free_debug(void *ptr, size_t size)
+vmem_free_debug(const void *ptr, size_t size)
{
- ENTRY;
+ SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
vmem_alloc_used_sub(size);
- CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
+ SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
-
- memset(ptr, 0x5a, size);
vfree(ptr);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(vmem_free_debug);
# endif /* DEBUG_KMEM_TRACKING */
#endif /* DEBUG_KMEM */
+/*
+ * Slab allocation interfaces
+ *
+ * While the Linux slab implementation was inspired by the Solaris
+ * implementation I cannot use it to emulate the Solaris APIs. I
+ * require two features which are not provided by the Linux slab.
+ *
+ * 1) Constructors AND destructors. Recent versions of the Linux
+ * kernel have removed support for destructors. This is a deal
+ * breaker for the SPL which contains particularly expensive
+ * initializers for mutex's, condition variables, etc. We also
+ * require a minimal level of cleanup for these data types unlike
+ * many Linux data type which do need to be explicitly destroyed.
+ *
+ * 2) Virtual address space backed slab. Callers of the Solaris slab
+ * expect it to work well for both small are very large allocations.
+ * Because of memory fragmentation the Linux slab which is backed
+ * by kmalloc'ed memory performs very badly when confronted with
+ * large numbers of large allocations. Basing the slab on the
+ * virtual address space removes the need for contiguous pages
+ * and greatly improve performance for large allocations.
+ *
+ * For these reasons, the SPL has its own slab implementation with
+ * the needed features. It is not as highly optimized as either the
+ * Solaris or Linux slabs, but it should get me most of what is
+ * needed until it can be optimized or obsoleted by another approach.
+ *
+ * One serious concern I do have about this method is the relatively
+ * small virtual address space on 32bit arches. This will seriously
+ * constrain the size of the slab caches and their performance.
+ *
+ * XXX: Improve the partial slab list by carefully maintaining a
+ * strict ordering of fullest to emptiest slabs based on
+ * the slab reference count. This guarantees the when freeing
+ * slabs back to the system we need only linearly traverse the
+ * last N slabs in the list to discover all the freeable slabs.
+ *
+ * XXX: NUMA awareness for optionally allocating memory close to a
+ * particular core. This can be advantageous if you know the slab
+ * object will be short lived and primarily accessed from one core.
+ *
+ * XXX: Slab coloring may also yield performance improvements and would
+ * be desirable to implement.
+ */
+
+struct list_head spl_kmem_cache_list; /* List of caches */
+struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
+
+static int spl_cache_flush(spl_kmem_cache_t *skc,
+ spl_kmem_magazine_t *skm, int flush);
+
+SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
+SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
+ spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
+
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
ASSERT(ISP2(size));
- if (skc->skc_flags & KMC_KMEM)
+ if (skc->skc_flags & KMC_KMEM) {
ptr = (void *)__get_free_pages(flags, get_order(size));
- else
- ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
+ } else {
+ /*
+ * As part of vmalloc() an __pte_alloc_kernel() allocation
+ * may occur. This internal allocation does not honor the
+ * gfp flags passed to vmalloc(). This means even when
+ * vmalloc(GFP_NOFS) is called it is possible synchronous
+ * reclaim will occur. This reclaim can trigger file IO
+ * which can result in a deadlock. This issue can be avoided
+ * by explicitly setting PF_MEMALLOC on the process to
+ * subvert synchronous reclaim. The following bug has
+ * been filed at kernel.org to track the issue.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=30702
+ *
+ * NOTE: Only set PF_MEMALLOC if it's not already set, and
+ * then only clear it when we were the one who set it.
+ */
+ if (!(flags & __GFP_FS) && !(current->flags & PF_MEMALLOC)) {
+ current->flags |= PF_MEMALLOC;
+ ptr = __vmalloc(size, flags|__GFP_HIGHMEM, PAGE_KERNEL);
+ current->flags &= ~PF_MEMALLOC;
+ } else {
+ ptr = __vmalloc(size, flags|__GFP_HIGHMEM, PAGE_KERNEL);
+ }
+ }
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
ASSERT(ISP2(size));
+ /*
+ * The Linux direct reclaim path uses this out of band value to
+ * determine if forward progress is being made. Normally this is
+ * incremented by kmem_freepages() which is part of the various
+ * Linux slab implementations. However, since we are using none
+ * of that infrastructure we are responsible for incrementing it.
+ */
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
+
if (skc->skc_flags & KMC_KMEM)
free_pages((unsigned long)ptr, get_order(size));
else
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
- * it gets increasingly expensive since it requires contigeous pages.
+ * it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
- * which removes the need for contigeous pages. We do not use
+ * which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
- * global lock when aquiring an available virtual address range which
+ * global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
- RETURN(NULL);
+ SRETURN(NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
- if (skc->skc_flags * KMC_OFFSLAB)
+ if (skc->skc_flags & KMC_OFFSLAB)
offslab_size = spl_offslab_size(skc);
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
obj = kv_alloc(skc, offslab_size, flags);
if (!obj)
- GOTO(out, rc = -ENOMEM);
+ SGOTO(out, rc = -ENOMEM);
} else {
obj = base + spl_sks_size(skc) + (i * obj_size);
}
sks = NULL;
}
- RETURN(sks);
+ SRETURN(sks);
}
/*
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
- ENTRY;
+ SENTRY;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
- EXIT;
+ SEXIT;
}
/*
LIST_HEAD(sko_list);
uint32_t size = 0;
int i = 0;
- ENTRY;
+ SENTRY;
/*
* Move empty slabs and objects which have not been touched in
* All empty slabs are at the end of skc->skc_partial_list,
* therefore once a non-empty slab is found we can stop
* scanning. Additionally, stop when reaching the target
- * reclaim 'count' if a non-zero threshhold is given.
+ * reclaim 'count' if a non-zero threshold is given.
*/
- if ((sks->sks_ref > 0) || (count && i > count))
+ if ((sks->sks_ref > 0) || (count && i >= count))
break;
if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
cond_resched();
}
- EXIT;
+ SEXIT;
}
/*
/*
* Called regularly to keep a downward pressure on the size of idle
* magazines and to release free slabs from the cache. This function
- * never calls the registered reclaim function, that only occures
+ * never calls the registered reclaim function, that only occurs
* under memory pressure or with a direct call to spl_kmem_reap().
*/
static void
for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
- RETURN(0);
+ SRETURN(0);
}
/*
*size = max_size;
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
- RETURN(0);
+ SRETURN(0);
}
- RETURN(-ENOSPC);
+ SRETURN(-ENOSPC);
}
/*
{
uint32_t obj_size = spl_obj_size(skc);
int size;
- ENTRY;
+ SENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
else
size = 256;
- RETURN(size);
+ SRETURN(size);
}
/*
- * Allocate a per-cpu magazine to assoicate with a specific core.
+ * Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
- ENTRY;
+ SENTRY;
skm = kmem_alloc_node(size, KM_SLEEP, node);
if (skm) {
skm->skm_age = jiffies;
}
- RETURN(skm);
+ SRETURN(skm);
}
/*
- * Free a per-cpu magazine assoicated with a specific core.
+ * Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skm->skm_size;
- ENTRY;
+ SENTRY;
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kmem_free(skm, size);
- EXIT;
+ SEXIT;
}
/*
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
- ENTRY;
+ SENTRY;
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
- RETURN(-ENOMEM);
+ SRETURN(-ENOMEM);
}
}
schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
skc->skc_delay / 3 * HZ);
- RETURN(0);
+ SRETURN(0);
}
/*
{
spl_kmem_magazine_t *skm;
int i;
- ENTRY;
+ SENTRY;
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
spl_magazine_free(skm);
}
- EXIT;
+ SEXIT;
}
/*
{
spl_kmem_cache_t *skc;
int rc, kmem_flags = KM_SLEEP;
- ENTRY;
+ SENTRY;
ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
if (current_thread_info()->preempt_count || irqs_disabled())
kmem_flags = KM_NOSLEEP;
- /* Allocate memry for a new cache an initialize it. Unfortunately,
+ /* Allocate memory for a new cache an initialize it. Unfortunately,
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
kmem_flags | KM_NODEBUG);
if (skc == NULL)
- RETURN(NULL);
+ SRETURN(NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
- RETURN(NULL);
+ SRETURN(NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
- GOTO(out, rc);
+ SGOTO(out, rc);
rc = spl_magazine_create(skc);
if (rc)
- GOTO(out, rc);
+ SGOTO(out, rc);
spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
- RETURN(skc);
+ SRETURN(skc);
out:
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
- RETURN(NULL);
+ SRETURN(NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
- * Destroy a cache and all objects assoicated with the cache.
+ * Register a move callback to for cache defragmentation.
+ * XXX: Unimplemented but harmless to stub out for now.
+ */
+void
+spl_kmem_cache_set_move(kmem_cache_t *skc,
+ kmem_cbrc_t (move)(void *, void *, size_t, void *))
+{
+ ASSERT(move != NULL);
+}
+EXPORT_SYMBOL(spl_kmem_cache_set_move);
+
+/*
+ * Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
int i;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed work */
- ASSERT(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- cancel_delayed_work(&skc->skc_work);
+ VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
+ cancel_delayed_work_sync(&skc->skc_work);
for_each_online_cpu(i)
- cancel_delayed_work(&skc->skc_mag[i]->skm_work);
+ cancel_delayed_work_sync(&skc->skc_mag[i]->skm_work);
flush_scheduled_work();
kmem_free(skc, sizeof(*skc));
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
}
/*
- * No available objects on any slabsi, create a new slab. Since this
- * is an expensive operation we do it without holding the spinlock and
- * only briefly aquire it when we link in the fully allocated and
+ * No available objects on any slabs, create a new slab. Since this
+ * is an expensive operation we do it without holding the spin lock and
+ * only briefly acquire it when we link in the fully allocated and
* constructed slab.
*/
static spl_kmem_slab_t *
spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
local_irq_enable();
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
schedule();
- GOTO(out, sks= NULL);
+ SGOTO(out, sks= NULL);
}
/* Allocate a new slab for the cache */
sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG);
if (sks == NULL)
- GOTO(out, sks = NULL);
+ SGOTO(out, sks = NULL);
/* Link the new empty slab in to the end of skc_partial_list. */
spin_lock(&skc->skc_lock);
out:
local_irq_disable();
- RETURN(sks);
+ SRETURN(sks);
}
/*
{
spl_kmem_slab_t *sks;
int rc = 0, refill;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
sks = spl_cache_grow(skc, flags);
if (!sks)
- GOTO(out, rc);
+ SGOTO(out, rc);
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
- GOTO(out, rc);
+ SGOTO(out, rc);
/* Potentially rescheduled to the same CPU but
- * allocations may have occured from this CPU while
+ * allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill. */
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_unlock(&skc->skc_lock);
out:
/* Returns the number of entries added to cache */
- RETURN(rc);
+ SRETURN(rc);
}
/*
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
list_add(&sks->sks_list, &skc->skc_partial_list);
}
- /* Move emply slabs to the end of the partial list so
+ /* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation. */
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
skc->skc_slab_alloc--;
}
- EXIT;
+ SEXIT;
}
/*
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
int i, count = MIN(flush, skm->skm_avail);
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
spin_unlock(&skc->skc_lock);
- RETURN(count);
+ SRETURN(count);
}
/*
spl_kmem_magazine_t *skm;
unsigned long irq_flags;
void *obj = NULL;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
restart:
/* Safe to update per-cpu structure without lock, but
- * in the restart case we must be careful to reaquire
+ * in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache. */
skm = skc->skc_mag[smp_processor_id()];
/* Per-CPU cache empty, directly allocate from
* the slab and refill the per-CPU cache. */
(void)spl_cache_refill(skc, skm, flags);
- GOTO(restart, obj = NULL);
+ SGOTO(restart, obj = NULL);
}
local_irq_restore(irq_flags);
prefetchw(obj);
atomic_dec(&skc->skc_ref);
- RETURN(obj);
+ SRETURN(obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
{
spl_kmem_magazine_t *skm;
unsigned long flags;
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
local_irq_restore(flags);
atomic_dec(&skc->skc_ref);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
- * The generic shrinker function for all caches. Under linux a shrinker
- * may not be tightly coupled with a slab cache. In fact linux always
- * systematically trys calling all registered shrinker callbacks which
+ * The generic shrinker function for all caches. Under Linux a shrinker
+ * may not be tightly coupled with a slab cache. In fact Linux always
+ * systematically tries calling all registered shrinker callbacks which
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
- * objects should be freed, because Solaris semantics are to free
- * all available objects we may free more objects than requested.
+ * objects should be freed, which differs from Solaris semantics.
+ * Solaris semantics are to free all available objects which may (and
+ * probably will) be more objects than the requested nr_to_scan.
*/
static int
-spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
+__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
+ struct shrink_control *sc)
{
spl_kmem_cache_t *skc;
int unused = 0;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
- if (nr_to_scan)
- spl_kmem_cache_reap_now(skc);
+ if (sc->nr_to_scan)
+ spl_kmem_cache_reap_now(skc,
+ MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed in reclaimable, this ensures
return (unused * sysctl_vfs_cache_pressure) / 100;
}
+SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
+
/*
* Call the registered reclaim function for a cache. Depending on how
* many and which objects are released it may simply repopulate the
* effort and we do not want to thrash creating and destroying slabs.
*/
void
-spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
+spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
- ENTRY;
+ SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/* Prevent concurrent cache reaping when contended */
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
- EXIT;
+ SEXIT;
return;
}
atomic_inc(&skc->skc_ref);
- if (skc->skc_reclaim)
- skc->skc_reclaim(skc->skc_private);
+ /*
+ * When a reclaim function is available it may be invoked repeatedly
+ * until at least a single slab can be freed. This ensures that we
+ * do free memory back to the system. This helps minimize the chance
+ * of an OOM event when the bulk of memory is used by the slab.
+ *
+ * When free slabs are already available the reclaim callback will be
+ * skipped. Additionally, if no forward progress is detected despite
+ * a reclaim function the cache will be skipped to avoid deadlock.
+ *
+ * Longer term this would be the correct place to add the code which
+ * repacks the slabs in order minimize fragmentation.
+ */
+ if (skc->skc_reclaim) {
+ uint64_t objects = UINT64_MAX;
+ int do_reclaim;
- spl_slab_reclaim(skc, skc->skc_reap, 0);
+ do {
+ spin_lock(&skc->skc_lock);
+ do_reclaim =
+ (skc->skc_slab_total > 0) &&
+ ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) &&
+ (skc->skc_obj_alloc < objects);
+
+ objects = skc->skc_obj_alloc;
+ spin_unlock(&skc->skc_lock);
+
+ if (do_reclaim)
+ skc->skc_reclaim(skc->skc_private);
+
+ } while (do_reclaim);
+ }
+
+ /* Reclaim from the cache, ignoring it's age and delay. */
+ spl_slab_reclaim(skc, count, 1);
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
atomic_dec(&skc->skc_ref);
- EXIT;
+ SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
void
spl_kmem_reap(void)
{
- spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
+ struct shrink_control sc;
+
+ sc.nr_to_scan = KMC_REAP_CHUNK;
+ sc.gfp_mask = GFP_KERNEL;
+
+ __spl_kmem_cache_generic_shrinker(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);
spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
{
int i;
- ENTRY;
+ SENTRY;
spin_lock_init(lock);
INIT_LIST_HEAD(list);
for (i = 0; i < size; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
- RETURN(0);
+ SRETURN(0);
}
static void
unsigned long flags;
kmem_debug_t *kd;
char str[17];
- ENTRY;
+ SENTRY;
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
- EXIT;
+ SEXIT;
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
#define spl_kmem_init_tracking(list, lock, size)
*/
spl_kmem_init_globals();
+#if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
+ invalidate_inodes_fn = (invalidate_inodes_t)
+ spl_kallsyms_lookup_name("invalidate_inodes");
+ if (!invalidate_inodes_fn) {
+ printk(KERN_ERR "Error: Unknown symbol invalidate_inodes\n");
+ return -EFAULT;
+ }
+#endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
+
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+ /* When shrink_dcache_memory_fn == NULL support is disabled */
+ shrink_dcache_memory_fn = (shrink_dcache_memory_t)
+ spl_kallsyms_lookup_name("shrink_dcache_memory");
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+ /* When shrink_icache_memory_fn == NULL support is disabled */
+ shrink_icache_memory_fn = (shrink_icache_memory_t)
+ spl_kallsyms_lookup_name("shrink_icache_memory");
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
return 0;
}
spl_kmem_init(void)
{
int rc = 0;
- ENTRY;
+ SENTRY;
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
-#ifdef HAVE_SET_SHRINKER
- spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
- spl_kmem_cache_generic_shrinker);
- if (spl_kmem_cache_shrinker == NULL)
- RETURN(rc = -ENOMEM);
-#else
- register_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_register_shrinker(&spl_kmem_cache_shrinker);
#ifdef DEBUG_KMEM
kmem_alloc_used_set(0);
spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
#endif
- RETURN(rc);
+ SRETURN(rc);
}
void
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
if (kmem_alloc_used_read() != 0)
- CWARN("kmem leaked %ld/%ld bytes\n",
- kmem_alloc_used_read(), kmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "kmem leaked %ld/%ld bytes\n",
+ kmem_alloc_used_read(), kmem_alloc_max);
if (vmem_alloc_used_read() != 0)
- CWARN("vmem leaked %ld/%ld bytes\n",
- vmem_alloc_used_read(), vmem_alloc_max);
+ SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ "vmem leaked %ld/%ld bytes\n",
+ vmem_alloc_used_read(), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
#endif /* DEBUG_KMEM */
- ENTRY;
+ SENTRY;
-#ifdef HAVE_SET_SHRINKER
- remove_shrinker(spl_kmem_cache_shrinker);
-#else
- unregister_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_unregister_shrinker(&spl_kmem_cache_shrinker);
- EXIT;
+ SEXIT;
}