#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
-#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
__free_kmem_pages(page, 0);
}
- if (area->flags & VM_VPAGES)
- vfree(area->pages);
- else
- kfree(area->pages);
+ kvfree(area->pages);
}
kfree(area);
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
PAGE_KERNEL, node, area->caller);
- area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
if (v->flags & VM_USERMAP)
seq_puts(m, " user");
- if (v->flags & VM_VPAGES)
+ if (is_vmalloc_addr(v->pages))
seq_puts(m, " vpages");
show_numa_info(m, v);