]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/vmalloc.h
mm: vmalloc: add flag preventing guard hole allocation
[mirror_ubuntu-jammy-kernel.git] / include / linux / vmalloc.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_VMALLOC_H
2#define _LINUX_VMALLOC_H
3
4#include <linux/spinlock.h>
db64fe02 5#include <linux/init.h>
13ba3fcb 6#include <linux/list.h>
1da177e4 7#include <asm/page.h> /* pgprot_t */
13ba3fcb 8#include <linux/rbtree.h>
1da177e4 9
605d9288 10struct vm_area_struct; /* vma defining user mapping in mm_types.h */
83342314 11
605d9288 12/* bits in flags of vmalloc's vm_struct below */
20fc02b4
ZY
13#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
14#define VM_ALLOC 0x00000002 /* vmalloc() */
15#define VM_MAP 0x00000004 /* vmap()ed pages */
16#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
18#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
71394fe5 19#define VM_NO_GUARD 0x00000040 /* don't add guard page */
1da177e4
LT
20/* bits [20..32] reserved for arch specific ioremap internals */
21
fd195c49
DS
22/*
23 * Maximum alignment for ioremap() regions.
24 * Can be overriden by arch-specific value.
25 */
26#ifndef IOREMAP_MAX_ORDER
27#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
28#endif
29
1da177e4 30struct vm_struct {
2b4ac44e 31 struct vm_struct *next;
1da177e4
LT
32 void *addr;
33 unsigned long size;
34 unsigned long flags;
35 struct page **pages;
36 unsigned int nr_pages;
ffa71f33 37 phys_addr_t phys_addr;
5e6cafc8 38 const void *caller;
1da177e4
LT
39};
40
13ba3fcb
AK
41struct vmap_area {
42 unsigned long va_start;
43 unsigned long va_end;
44 unsigned long flags;
45 struct rb_node rb_node; /* address sorted rbtree */
46 struct list_head list; /* address sorted list */
47 struct list_head purge_list; /* "lazy purge" list */
48 struct vm_struct *vm;
49 struct rcu_head rcu_head;
50};
51
1da177e4
LT
52/*
53 * Highlevel APIs for driver use
54 */
db64fe02
NP
55extern void vm_unmap_ram(const void *mem, unsigned int count);
56extern void *vm_map_ram(struct page **pages, unsigned int count,
57 int node, pgprot_t prot);
58extern void vm_unmap_aliases(void);
59
60#ifdef CONFIG_MMU
61extern void __init vmalloc_init(void);
62#else
63static inline void vmalloc_init(void)
64{
65}
66#endif
67
1da177e4 68extern void *vmalloc(unsigned long size);
e1ca7788 69extern void *vzalloc(unsigned long size);
83342314 70extern void *vmalloc_user(unsigned long size);
930fc45a 71extern void *vmalloc_node(unsigned long size, int node);
e1ca7788 72extern void *vzalloc_node(unsigned long size, int node);
1da177e4
LT
73extern void *vmalloc_exec(unsigned long size);
74extern void *vmalloc_32(unsigned long size);
83342314 75extern void *vmalloc_32_user(unsigned long size);
dd0fc66f 76extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
d0a21265
DR
77extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
78 unsigned long start, unsigned long end, gfp_t gfp_mask,
5e6cafc8 79 pgprot_t prot, int node, const void *caller);
b3bdda02 80extern void vfree(const void *addr);
1da177e4
LT
81
82extern void *vmap(struct page **pages, unsigned int count,
83 unsigned long flags, pgprot_t prot);
b3bdda02 84extern void vunmap(const void *addr);
83342314 85
e69e9d4a
HD
86extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87 unsigned long uaddr, void *kaddr,
88 unsigned long size);
89
83342314
NP
90extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
91 unsigned long pgoff);
1eeb66a1 92void vmalloc_sync_all(void);
1da177e4
LT
93
94/*
95 * Lowlevel-APIs (not for driver use!)
96 */
9585116b
JF
97
98static inline size_t get_vm_area_size(const struct vm_struct *area)
99{
71394fe5
AR
100 if (!(area->flags & VM_NO_GUARD))
101 /* return actual size without guard page */
102 return area->size - PAGE_SIZE;
103 else
104 return area->size;
105
9585116b
JF
106}
107
1da177e4 108extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
23016969 109extern struct vm_struct *get_vm_area_caller(unsigned long size,
5e6cafc8 110 unsigned long flags, const void *caller);
1da177e4
LT
111extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
112 unsigned long start, unsigned long end);
c2968612
BH
113extern struct vm_struct *__get_vm_area_caller(unsigned long size,
114 unsigned long flags,
115 unsigned long start, unsigned long end,
5e6cafc8 116 const void *caller);
b3bdda02 117extern struct vm_struct *remove_vm_area(const void *addr);
e9da6e99 118extern struct vm_struct *find_vm_area(const void *addr);
c19c03fc 119
1da177e4 120extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
f6f8ed47 121 struct page **pages);
b554cb42 122#ifdef CONFIG_MMU
8fc48985
TH
123extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
124 pgprot_t prot, struct page **pages);
125extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
c19c03fc 126extern void unmap_kernel_range(unsigned long addr, unsigned long size);
b554cb42
GY
127#else
128static inline int
129map_kernel_range_noflush(unsigned long start, unsigned long size,
130 pgprot_t prot, struct page **pages)
131{
132 return size >> PAGE_SHIFT;
133}
134static inline void
135unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
136{
137}
138static inline void
139unmap_kernel_range(unsigned long addr, unsigned long size)
140{
141}
142#endif
1da177e4 143
5f4352fb 144/* Allocate/destroy a 'vmalloc' VM area. */
cd12909c 145extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
5f4352fb
JF
146extern void free_vm_area(struct vm_struct *area);
147
69beeb1d
KM
148/* for /dev/kmem */
149extern long vread(char *buf, char *addr, unsigned long count);
150extern long vwrite(char *buf, char *addr, unsigned long count);
151
1da177e4
LT
152/*
153 * Internals. Dont't use..
154 */
f1c4069e 155extern struct list_head vmap_area_list;
be9b7335 156extern __init void vm_area_add_early(struct vm_struct *vm);
c0c0a293 157extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
1da177e4 158
4f8b02b4 159#ifdef CONFIG_SMP
b554cb42 160# ifdef CONFIG_MMU
ca23e405
TH
161struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
162 const size_t *sizes, int nr_vms,
ec3f64fc 163 size_t align);
ca23e405
TH
164
165void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
b554cb42
GY
166# else
167static inline struct vm_struct **
168pcpu_get_vm_areas(const unsigned long *offsets,
169 const size_t *sizes, int nr_vms,
170 size_t align)
171{
172 return NULL;
173}
174
175static inline void
176pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
177{
178}
179# endif
4f8b02b4 180#endif
ca23e405 181
db3808c1
JK
182struct vmalloc_info {
183 unsigned long used;
184 unsigned long largest_chunk;
185};
186
187#ifdef CONFIG_MMU
188#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
189extern void get_vmalloc_info(struct vmalloc_info *vmi);
190#else
191
192#define VMALLOC_TOTAL 0UL
193#define get_vmalloc_info(vmi) \
194do { \
195 (vmi)->used = 0; \
196 (vmi)->largest_chunk = 0; \
197} while (0)
198#endif
199
1da177e4 200#endif /* _LINUX_VMALLOC_H */