Name: spl
Branch: 1.0
Version: 0.6.0
-Release: rc2
+Release: rc3
Release-Tags: relext
SPL_AC_3ARGS_FILE_FSYNC
SPL_AC_EXPORTED_RWSEM_IS_LOCKED
SPL_AC_KERNEL_INVALIDATE_INODES
+ SPL_AC_SHRINK_DCACHE_MEMORY
+ SPL_AC_SHRINK_ICACHE_MEMORY
])
AC_DEFUN([SPL_AC_MODULE_SYMVERS], [
[invalidate_inodes() is available])],
[])
])
+
+dnl #
+dnl # 2.6.xx API compat,
+dnl # There currently exists no exposed API to partially shrink the dcache.
+dnl # The expected mechanism to shrink the cache is a registered shrinker
+dnl # which is called during memory pressure.
+dnl #
+AC_DEFUN([SPL_AC_SHRINK_DCACHE_MEMORY], [
+ SPL_CHECK_SYMBOL_EXPORT(
+ [shrink_dcache_memory],
+ [fs/dcache.c],
+ [AC_DEFINE(HAVE_SHRINK_DCACHE_MEMORY, 1,
+ [shrink_dcache_memory() is available])],
+ [])
+])
+
+dnl #
+dnl # 2.6.xx API compat,
+dnl # There currently exists no exposed API to partially shrink the icache.
+dnl # The expected mechanism to shrink the cache is a registered shrinker
+dnl # which is called during memory pressure.
+dnl #
+AC_DEFUN([SPL_AC_SHRINK_ICACHE_MEMORY], [
+ SPL_CHECK_SYMBOL_EXPORT(
+ [shrink_icache_memory],
+ [fs/inode.c],
+ [AC_DEFINE(HAVE_SHRINK_ICACHE_MEMORY, 1,
+ [shrink_icache_memory() is available])],
+ [])
+])
fi
+
+ { $as_echo "$as_me:$LINENO: checking whether symbol shrink_dcache_memory is exported" >&5
+$as_echo_n "checking whether symbol shrink_dcache_memory is exported... " >&6; }
+ grep -q -E '[[:space:]]shrink_dcache_memory[[:space:]]' \
+ $LINUX_OBJ/Module*.symvers 2>/dev/null
+ rc=$?
+ if test $rc -ne 0; then
+ export=0
+ for file in fs/dcache.c; do
+ grep -q -E "EXPORT_SYMBOL.*(shrink_dcache_memory)" \
+ "$LINUX_OBJ/$file" 2>/dev/null
+ rc=$?
+ if test $rc -eq 0; then
+ export=1
+ break;
+ fi
+ done
+ if test $export -eq 0; then
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_DCACHE_MEMORY 1
+_ACEOF
+
+ fi
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_DCACHE_MEMORY 1
+_ACEOF
+
+ fi
+
+
+
+ { $as_echo "$as_me:$LINENO: checking whether symbol shrink_icache_memory is exported" >&5
+$as_echo_n "checking whether symbol shrink_icache_memory is exported... " >&6; }
+ grep -q -E '[[:space:]]shrink_icache_memory[[:space:]]' \
+ $LINUX_OBJ/Module*.symvers 2>/dev/null
+ rc=$?
+ if test $rc -ne 0; then
+ export=0
+ for file in fs/inode.c; do
+ grep -q -E "EXPORT_SYMBOL.*(shrink_icache_memory)" \
+ "$LINUX_OBJ/$file" 2>/dev/null
+ rc=$?
+ if test $rc -eq 0; then
+ export=1
+ break;
+ fi
+ done
+ if test $export -eq 0; then
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_ICACHE_MEMORY 1
+_ACEOF
+
+ fi
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_ICACHE_MEMORY 1
+_ACEOF
+
+ fi
+
+
;;
user)
+ { $as_echo "$as_me:$LINENO: checking whether symbol shrink_dcache_memory is exported" >&5
+$as_echo_n "checking whether symbol shrink_dcache_memory is exported... " >&6; }
+ grep -q -E '[[:space:]]shrink_dcache_memory[[:space:]]' \
+ $LINUX_OBJ/Module*.symvers 2>/dev/null
+ rc=$?
+ if test $rc -ne 0; then
+ export=0
+ for file in fs/dcache.c; do
+ grep -q -E "EXPORT_SYMBOL.*(shrink_dcache_memory)" \
+ "$LINUX_OBJ/$file" 2>/dev/null
+ rc=$?
+ if test $rc -eq 0; then
+ export=1
+ break;
+ fi
+ done
+ if test $export -eq 0; then
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_DCACHE_MEMORY 1
+_ACEOF
+
+ fi
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_DCACHE_MEMORY 1
+_ACEOF
+
+ fi
+
+
+
+ { $as_echo "$as_me:$LINENO: checking whether symbol shrink_icache_memory is exported" >&5
+$as_echo_n "checking whether symbol shrink_icache_memory is exported... " >&6; }
+ grep -q -E '[[:space:]]shrink_icache_memory[[:space:]]' \
+ $LINUX_OBJ/Module*.symvers 2>/dev/null
+ rc=$?
+ if test $rc -ne 0; then
+ export=0
+ for file in fs/inode.c; do
+ grep -q -E "EXPORT_SYMBOL.*(shrink_icache_memory)" \
+ "$LINUX_OBJ/$file" 2>/dev/null
+ rc=$?
+ if test $rc -eq 0; then
+ export=1
+ break;
+ fi
+ done
+ if test $export -eq 0; then
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_ICACHE_MEMORY 1
+_ACEOF
+
+ fi
+ else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SHRINK_ICACHE_MEMORY 1
+_ACEOF
+
+ fi
+
+
+
if test "x$AWK" != xgawk; then
#define invalidate_inodes(sb) invalidate_inodes_fn(sb)
#endif /* HAVE_INVALIDATE_INODES */
+/*
+ * 2.6.xx API compat,
+ * There currently exists no exposed API to partially shrink the dcache.
+ * The expected mechanism to shrink the cache is a registered shrinker
+ * which is called during memory pressure.
+ */
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+# ifdef HAVE_3ARGS_SHRINKER_CALLBACK
+typedef int (*shrink_dcache_memory_t)(struct shrinker *, int, gfp_t);
+extern shrink_dcache_memory_t shrink_dcache_memory_fn;
+# define shrink_dcache_memory(nr, gfp) shrink_dcache_memory_fn(NULL, nr, gfp)
+# else
+typedef int (*shrink_dcache_memory_t)(int, gfp_t);
+extern shrink_dcache_memory_t shrink_dcache_memory_fn;
+# define shrink_dcache_memory(nr, gfp) shrink_dcache_memory_fn(nr, gfp)
+# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+/*
+ * 2.6.xx API compat,
+ * There currently exists no exposed API to partially shrink the icache.
+ * The expected mechanism to shrink the cache is a registered shrinker
+ * which is called during memory pressure.
+ */
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+# ifdef HAVE_3ARGS_SHRINKER_CALLBACK
+typedef int (*shrink_icache_memory_t)(struct shrinker *, int, gfp_t);
+extern shrink_icache_memory_t shrink_icache_memory_fn;
+# define shrink_icache_memory(nr, gfp) shrink_icache_memory_fn(NULL, nr, gfp)
+# else
+typedef int (*shrink_icache_memory_t)(int, gfp_t);
+extern shrink_icache_memory_t shrink_icache_memory_fn;
+# define shrink_icache_memory(nr, gfp) shrink_icache_memory_fn(nr, gfp)
+# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
+#ifdef HAVE_SET_SHRINKER
+typedef struct spl_shrinker {
+ struct shrinker *shrinker;
+ shrinker_t fn;
+ int seeks;
+} spl_shrinker_t;
+
+static inline void
+spl_register_shrinker(spl_shrinker_t *ss)
+{
+ ss->shrinker = set_shrinker(ss->seeks, ss->fn);
+}
+
+static inline void
+spl_unregister_shrinker(spl_shrinker_t *ss)
+{
+ remove_shrinker(ss->shrinker);
+}
+
+# define SPL_SHRINKER_DECLARE(s, x, y) \
+ static spl_shrinker_t s = { .shrinker = NULL, .fn = x, .seeks = y }
+# define SPL_SHRINKER_CALLBACK_PROTO(fn, x, y, z) \
+ static int fn(int y, unsigned int z)
+# define spl_exec_shrinker(ss, nr, gfp) \
+ ((spl_shrinker_t *)ss)->fn(nr, gfp)
+
+#else /* HAVE_SET_SHRINKER */
+
+# define spl_register_shrinker(x) register_shrinker(x)
+# define spl_unregister_shrinker(x) unregister_shrinker(x)
+# define SPL_SHRINKER_DECLARE(s, x, y) \
+ static struct shrinker s = { .shrink = x, .seeks = y }
+
+# ifdef HAVE_3ARGS_SHRINKER_CALLBACK
+# define SPL_SHRINKER_CALLBACK_PROTO(fn, x, y, z) \
+ static int fn(struct shrinker *x, int y, unsigned int z)
+# define spl_exec_shrinker(ss, nr, gfp) \
+ ((struct shrinker *)ss)->shrink(NULL, nr, gfp)
+# else /* HAVE_3ARGS_SHRINKER_CALLBACK */
+# define SPL_SHRINKER_CALLBACK_PROTO(fn, x, y, z) \
+ static int fn(int y, unsigned int z)
+# define spl_exec_shrinker(ss, nr, gfp) \
+ ((struct shrinker *)ss)->shrink(nr, gfp)
+# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
+#endif /* HAVE_SET_SHRINKER */
+
#endif /* SPL_MM_COMPAT_H */
#ifndef _SPL_DNLC_H
#define _SPL_DNLC_H
-#define dnlc_reduce_cache(percent) ((void)0)
+/*
+ * Reduce the dcache and icache then reap the free'd slabs. Note the
+ * interface takes a reclaim percentage but we don't have easy access to
+ * the total number of entries to calculate the reclaim count. However,
+ * in practice this doesn't need to be even close to correct. We simply
+ * need to reclaim some useful fraction of the cache. The caller can
+ * determine if more needs to be done.
+ */
+static inline void
+dnlc_reduce_cache(void *reduce_percent)
+{
+ int nr = (uintptr_t)reduce_percent * 10000;
+
+ shrink_dcache_memory(nr, GFP_KERNEL);
+ shrink_icache_memory(nr, GFP_KERNEL);
+ kmem_reap();
+}
#endif /* SPL_DNLC_H */
KMC_BIT_OFFSLAB = 7, /* Objects not on slab */
KMC_BIT_REAPING = 16, /* Reaping in progress */
KMC_BIT_DESTROY = 17, /* Destroy in progress */
+ KMC_BIT_TOTAL = 18, /* Proc handler helper bit */
+ KMC_BIT_ALLOC = 19, /* Proc handler helper bit */
+ KMC_BIT_MAX = 20, /* Proc handler helper bit */
};
/* kmem move callback return values */
#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
#define KMC_REAPING (1 << KMC_BIT_REAPING)
#define KMC_DESTROY (1 << KMC_BIT_DESTROY)
+#define KMC_TOTAL (1 << KMC_BIT_TOTAL)
+#define KMC_ALLOC (1 << KMC_BIT_ALLOC)
+#define KMC_MAX (1 << KMC_BIT_MAX)
#define KMC_REAP_CHUNK INT_MAX
#define KMC_DEFAULT_SEEKS 1
#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
-#define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 /* Target objects per slab */
+#define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */
#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
uint64_t va_nblocks; /* space used */
uint32_t va_blksize; /* block size */
uint32_t va_seq; /* sequence */
+ struct dentry *va_dentry; /* dentry to wire */
} vattr_t;
typedef struct vnode {
EXPORT_SYMBOL(invalidate_inodes_fn);
#endif /* HAVE_INVALIDATE_INODES */
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_dcache_memory_fn);
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
+EXPORT_SYMBOL(shrink_icache_memory_fn);
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
pgcnt_t
spl_kmem_availrmem(void)
{
static int spl_cache_flush(spl_kmem_cache_t *skc,
spl_kmem_magazine_t *skm, int flush);
-#ifdef HAVE_SET_SHRINKER
-static struct shrinker *spl_kmem_cache_shrinker;
-#else
-# ifdef HAVE_3ARGS_SHRINKER_CALLBACK
-static int spl_kmem_cache_generic_shrinker(struct shrinker *shrinker_cb,
- int nr_to_scan, unsigned int gfp_mask);
-# else
-static int spl_kmem_cache_generic_shrinker(
- int nr_to_scan, unsigned int gfp_mask);
-# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
-static struct shrinker spl_kmem_cache_shrinker = {
- .shrink = spl_kmem_cache_generic_shrinker,
- .seeks = KMC_DEFAULT_SEEKS,
-};
-#endif /* HAVE_SET_SHRINKER */
+SPL_SHRINKER_CALLBACK_PROTO(spl_kmem_cache_generic_shrinker,
+ shrinker_cb, nr_to_scan, gfp_mask);
+SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
+ spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
* objects should be freed, because Solaris semantics are to free
* all available objects we may free more objects than requested.
*/
-#ifdef HAVE_3ARGS_SHRINKER_CALLBACK
-static int
-spl_kmem_cache_generic_shrinker(struct shrinker *shrinker_cb,
- int nr_to_scan, unsigned int gfp_mask)
-#else
-static int
-spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
-#endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
+SPL_SHRINKER_CALLBACK_PROTO(spl_kmem_cache_generic_shrinker,
+ shrinker_cb, nr_to_scan, gfp_mask)
{
spl_kmem_cache_t *skc;
int unused = 0;
void
spl_kmem_reap(void)
{
-#ifdef HAVE_3ARGS_SHRINKER_CALLBACK
- spl_kmem_cache_generic_shrinker(NULL, KMC_REAP_CHUNK, GFP_KERNEL);
-#else
- spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
-#endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
+ spl_exec_shrinker(&spl_kmem_cache_shrinker, KMC_REAP_CHUNK, GFP_KERNEL);
}
EXPORT_SYMBOL(spl_kmem_reap);
}
#endif /* HAVE_INVALIDATE_INODES */
+#ifndef HAVE_SHRINK_DCACHE_MEMORY
+ shrink_dcache_memory_fn = (shrink_dcache_memory_t)
+ spl_kallsyms_lookup_name("shrink_dcache_memory");
+ if (!shrink_dcache_memory_fn) {
+ printk(KERN_ERR "Error: Unknown symbol shrink_dcache_memory\n");
+ return -EFAULT;
+ }
+#endif /* HAVE_SHRINK_DCACHE_MEMORY */
+
+#ifndef HAVE_SHRINK_ICACHE_MEMORY
+ shrink_icache_memory_fn = (shrink_icache_memory_t)
+ spl_kallsyms_lookup_name("shrink_icache_memory");
+ if (!shrink_icache_memory_fn) {
+ printk(KERN_ERR "Error: Unknown symbol shrink_icache_memory\n");
+ return -EFAULT;
+ }
+#endif /* HAVE_SHRINK_ICACHE_MEMORY */
+
return 0;
}
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
-#ifdef HAVE_SET_SHRINKER
- spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
- spl_kmem_cache_generic_shrinker);
- if (spl_kmem_cache_shrinker == NULL)
- SRETURN(rc = -ENOMEM);
-#else
- register_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_register_shrinker(&spl_kmem_cache_shrinker);
#ifdef DEBUG_KMEM
kmem_alloc_used_set(0);
#endif /* DEBUG_KMEM */
SENTRY;
-#ifdef HAVE_SET_SHRINKER
- remove_shrinker(spl_kmem_cache_shrinker);
-#else
- unregister_shrinker(&spl_kmem_cache_shrinker);
-#endif
+ spl_unregister_shrinker(&spl_kmem_cache_shrinker);
SEXIT;
}
#define CTL_KMEM_KMEMMAX CTL_UNNUMBERED /* Max alloc'd by kmem bytes */
#define CTL_KMEM_VMEMUSED CTL_UNNUMBERED /* Alloc'd vmem bytes */
#define CTL_KMEM_VMEMMAX CTL_UNNUMBERED /* Max alloc'd by vmem bytes */
-#define CTL_KMEM_ALLOC_FAILED CTL_UNNUMBERED /* Cache allocations failed */
+#define CTL_KMEM_SLAB_KMEMTOTAL CTL_UNNUMBERED /* Total kmem slab size */
+#define CTL_KMEM_SLAB_KMEMALLOC CTL_UNNUMBERED /* Alloc'd kmem slab size */
+#define CTL_KMEM_SLAB_KMEMMAX CTL_UNNUMBERED /* Max kmem slab size */
+#define CTL_KMEM_SLAB_VMEMTOTAL CTL_UNNUMBERED /* Total vmem slab size */
+#define CTL_KMEM_SLAB_VMEMALLOC CTL_UNNUMBERED /* Alloc'd vmem slab size */
+#define CTL_KMEM_SLAB_VMEMMAX CTL_UNNUMBERED /* Max vmem slab size */
#endif
#else /* HAVE_CTL_UNNUMBERED */
CTL_KMEM_KMEMMAX, /* Max alloc'd by kmem bytes */
CTL_KMEM_VMEMUSED, /* Alloc'd vmem bytes */
CTL_KMEM_VMEMMAX, /* Max alloc'd by vmem bytes */
+ CTL_KMEM_SLAB_KMEMTOTAL, /* Total kmem slab size */
+ CTL_KMEM_SLAB_KMEMALLOC, /* Alloc'd kmem slab size */
+ CTL_KMEM_SLAB_KMEMMAX, /* Max kmem slab size */
+ CTL_KMEM_SLAB_VMEMTOTAL, /* Total vmem slab size */
+ CTL_KMEM_SLAB_VMEMALLOC, /* Alloc'd vmem slab size */
+ CTL_KMEM_SLAB_VMEMMAX, /* Max vmem slab size */
#endif
};
#endif /* HAVE_CTL_UNNUMBERED */
SRETURN(rc);
}
+
+SPL_PROC_HANDLER(proc_doslab)
+{
+ int rc = 0;
+ unsigned long min = 0, max = ~0, val = 0, mask;
+ struct ctl_table dummy = *table;
+ spl_kmem_cache_t *skc;
+ SENTRY;
+
+ dummy.data = &val;
+ dummy.proc_handler = &proc_dointvec;
+ dummy.extra1 = &min;
+ dummy.extra2 = &max;
+
+ if (write) {
+ *ppos += *lenp;
+ } else {
+ down_read(&spl_kmem_cache_sem);
+ mask = (unsigned long)table->data;
+
+ list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
+
+ /* Only use slabs of the correct kmem/vmem type */
+ if (!(skc->skc_flags & mask))
+ continue;
+
+ /* Sum the specified field for selected slabs */
+ switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
+ case KMC_TOTAL:
+ val += skc->skc_slab_size * skc->skc_slab_total;
+ break;
+ case KMC_ALLOC:
+ val += skc->skc_obj_size * skc->skc_obj_alloc;
+ break;
+ case KMC_MAX:
+ val += skc->skc_obj_size * skc->skc_obj_max;
+ break;
+ }
+ }
+
+ up_read(&spl_kmem_cache_sem);
+ rc = spl_proc_doulongvec_minmax(&dummy, write, filp,
+ buffer, lenp, ppos);
+ }
+
+ SRETURN(rc);
+}
#endif /* DEBUG_KMEM */
SPL_PROC_HANDLER(proc_dohostid)
static void
slab_seq_show_headers(struct seq_file *f)
{
- seq_printf(f, "%-36s %-6s - %s %s %s - %s %s %s - "
- "%s %s %s - %s %s %s\n", "name", "flags",
- "obj_size", "slab_objs", "slab_size",
- "slab_fail", "slab_create", "slab_destroy",
- "slab_total", "slab_alloc", "slab_max",
- "obj_total", "obj_alloc", "obj_max");
+ seq_printf(f,
+ "--------------------- cache ----------"
+ "--------------------------------------------- "
+ "----- slab ------ "
+ "---- object -----\n");
+ seq_printf(f,
+ "name "
+ " flags size alloc slabsize objsize "
+ "total alloc max "
+ "total alloc max\n");
}
static int
ASSERT(skc->skc_magic == SKC_MAGIC);
spin_lock(&skc->skc_lock);
- seq_printf(f, "%-36s ", skc->skc_name);
- seq_printf(f, "0x%04lx - %u %u %u - %lu %lu %lu - "
- "%lu %lu %lu - %lu %lu %lu\n",
- (long unsigned)skc->skc_flags,
- (unsigned)skc->skc_obj_size,
- (unsigned)skc->skc_slab_objs,
- (unsigned)skc->skc_slab_size,
- (long unsigned)skc->skc_slab_fail,
- (long unsigned)skc->skc_slab_create,
- (long unsigned)skc->skc_slab_destroy,
- (long unsigned)skc->skc_slab_total,
- (long unsigned)skc->skc_slab_alloc,
- (long unsigned)skc->skc_slab_max,
- (long unsigned)skc->skc_obj_total,
- (long unsigned)skc->skc_obj_alloc,
- (long unsigned)skc->skc_obj_max);
+ seq_printf(f, "%-36s ", skc->skc_name);
+ seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
+ "%5lu %5lu %5lu %5lu %5lu %5lu\n",
+ (long unsigned)skc->skc_flags,
+ (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
+ (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
+ (unsigned)skc->skc_slab_size,
+ (unsigned)skc->skc_obj_size,
+ (long unsigned)skc->skc_slab_total,
+ (long unsigned)skc->skc_slab_alloc,
+ (long unsigned)skc->skc_slab_max,
+ (long unsigned)skc->skc_obj_total,
+ (long unsigned)skc->skc_obj_alloc,
+ (long unsigned)skc->skc_obj_max);
spin_unlock(&skc->skc_lock);
.mode = 0444,
.proc_handler = &proc_doulongvec_minmax,
},
+ {
+ CTL_NAME (CTL_KMEM_SLAB_KMEMTOTAL)
+ .procname = "slab_kmem_total",
+ .data = (void *)(KMC_KMEM | KMC_TOTAL),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ CTL_NAME (CTL_KMEM_SLAB_KMEMALLOC)
+ .procname = "slab_kmem_alloc",
+ .data = (void *)(KMC_KMEM | KMC_ALLOC),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ CTL_NAME (CTL_KMEM_SLAB_KMEMMAX)
+ .procname = "slab_kmem_max",
+ .data = (void *)(KMC_KMEM | KMC_MAX),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ CTL_NAME (CTL_KMEM_SLAB_VMEMTOTAL)
+ .procname = "slab_vmem_total",
+ .data = (void *)(KMC_VMEM | KMC_TOTAL),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ CTL_NAME (CTL_KMEM_SLAB_VMEMALLOC)
+ .procname = "slab_vmem_alloc",
+ .data = (void *)(KMC_VMEM | KMC_ALLOC),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ CTL_NAME (CTL_KMEM_SLAB_VMEMMAX)
+ .procname = "slab_vmem_max",
+ .data = (void *)(KMC_VMEM | KMC_MAX),
+ .maxlen = sizeof(unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
{0},
};
#endif /* DEBUG_KMEM */
/* set_shrinker() available */
#undef HAVE_SET_SHRINKER
+/* shrink_dcache_memory() is available */
+#undef HAVE_SHRINK_DCACHE_MEMORY
+
+/* shrink_icache_memory() is available */
+#undef HAVE_SHRINK_ICACHE_MEMORY
+
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H