KMC_BIT_QCACHE = 4, /* XXX: Unsupported */
KMC_BIT_KMEM = 5, /* Use kmem cache */
KMC_BIT_VMEM = 6, /* Use vmem cache */
- KMC_BIT_SLAB = 7, /* Use Linux slab cache */
- KMC_BIT_OFFSLAB = 8, /* Objects not on slab */
+ KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
+ KMC_BIT_SLAB = 8, /* Use Linux slab cache */
+ KMC_BIT_OFFSLAB = 9, /* Objects not on slab */
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
KMC_BIT_GROWING = 15, /* Growing in progress */
KMC_BIT_REAPING = 16, /* Reaping in progress */
#define KMC_QCACHE (1 << KMC_BIT_QCACHE)
#define KMC_KMEM (1 << KMC_BIT_KMEM)
#define KMC_VMEM (1 << KMC_BIT_VMEM)
+#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
#define KMC_SLAB (1 << KMC_BIT_SLAB)
#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KMEM 0x0
#define KMC_VMEM 0x0
+#define KMC_KVMEM 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)
if (skc->skc_flags & KMC_KMEM) {
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
+ } else if (skc->skc_flags & KMC_KVMEM) {
+ ptr = spl_kvmalloc(size, lflags);
} else {
/*
* GFP_KERNEL allocations can safely use kvmalloc which may
* flags
* KMC_KMEM Force SPL kmem backed cache
* KMC_VMEM Force SPL vmem backed cache
+ * KMC_KVMEM Force kvmem backed cache
* KMC_SLAB Force Linux slab backed cache
* KMC_OFFSLAB Locate objects off the slab
* KMC_NOTOUCH Disable cache object aging (unsupported)
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
- if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
-
+ if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
} else {
/*
* All other objects are considered large and are
- * placed on vmem backed slabs.
+ * placed on kvmem backed slabs.
*/
- skc->skc_flags |= KMC_VMEM;
+ skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
- if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+ if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
+ ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
- if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+ if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
* However, this can't be applied to KVM_VMEM due to a bug that
* __vmalloc() doesn't honor gfp flags in page table allocation.
*/
- if (!(skc->skc_flags & KMC_VMEM)) {
+ if (!(skc->skc_flags & KMC_VMEM) && !(skc->skc_flags & KMC_KVMEM)) {
rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);
if (rc == 0)
return (0);
.mode = 0444,
.proc_handler = &proc_doslab,
},
+ {
+ .procname = "slab_kvmem_total",
+ .data = (void *)(KMC_KVMEM | KMC_TOTAL),
+ .maxlen = sizeof (unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ .procname = "slab_kvmem_alloc",
+ .data = (void *)(KMC_KVMEM | KMC_ALLOC),
+ .maxlen = sizeof (unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
+ {
+ .procname = "slab_kvmem_max",
+ .data = (void *)(KMC_KVMEM | KMC_MAX),
+ .maxlen = sizeof (unsigned long),
+ .extra1 = &table_min,
+ .extra2 = &table_max,
+ .mode = 0444,
+ .proc_handler = &proc_doslab,
+ },
{},
};
zlib_workspace_cache = kmem_cache_create(
"spl_zlib_workspace_cache",
size, 0, NULL, NULL, NULL, NULL, NULL,
- KMC_VMEM);
+ KMC_KVMEM);
if (!zlib_workspace_cache)
return (1);