]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_VMALLOC_H |
3 | #define _LINUX_VMALLOC_H | |
4 | ||
5 | #include <linux/spinlock.h> | |
db64fe02 | 6 | #include <linux/init.h> |
13ba3fcb | 7 | #include <linux/list.h> |
80c4bd7a | 8 | #include <linux/llist.h> |
1da177e4 | 9 | #include <asm/page.h> /* pgprot_t */ |
13ba3fcb | 10 | #include <linux/rbtree.h> |
3b3b1a29 | 11 | #include <linux/overflow.h> |
1da177e4 | 12 | |
1f059dfd IM |
13 | #include <asm/vmalloc.h> |
14 | ||
605d9288 | 15 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
4da56b99 | 16 | struct notifier_block; /* in notifier.h */ |
83342314 | 17 | |
605d9288 | 18 | /* bits in flags of vmalloc's vm_struct below */ |
20fc02b4 ZY |
19 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
20 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | |
21 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | |
22 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | |
fe9041c2 | 23 | #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ |
20fc02b4 | 24 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
71394fe5 | 25 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
a5af5aa8 | 26 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
4f6ec860 RE |
27 | #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ |
28 | #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ | |
3c5c3cfb DA |
29 | |
30 | /* | |
31 | * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. | |
32 | * | |
33 | * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after | |
34 | * shadow memory has been mapped. It's used to handle allocation errors so that | |
35 | * we don't try to poision shadow on free if it was never allocated. | |
36 | * | |
37 | * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to | |
38 | * determine which allocations need the module shadow freed. | |
39 | */ | |
40 | ||
1da177e4 LT |
41 | /* bits [20..32] reserved for arch specific ioremap internals */ |
42 | ||
fd195c49 DS |
43 | /* |
44 | * Maximum alignment for ioremap() regions. | |
45 | * Can be overriden by arch-specific value. | |
46 | */ | |
47 | #ifndef IOREMAP_MAX_ORDER | |
48 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ | |
49 | #endif | |
50 | ||
1da177e4 | 51 | struct vm_struct { |
2b4ac44e | 52 | struct vm_struct *next; |
1da177e4 LT |
53 | void *addr; |
54 | unsigned long size; | |
55 | unsigned long flags; | |
56 | struct page **pages; | |
57 | unsigned int nr_pages; | |
ffa71f33 | 58 | phys_addr_t phys_addr; |
5e6cafc8 | 59 | const void *caller; |
1da177e4 LT |
60 | }; |
61 | ||
13ba3fcb AK |
62 | struct vmap_area { |
63 | unsigned long va_start; | |
64 | unsigned long va_end; | |
68ad4a33 | 65 | |
13ba3fcb AK |
66 | struct rb_node rb_node; /* address sorted rbtree */ |
67 | struct list_head list; /* address sorted list */ | |
688fcbfc PL |
68 | |
69 | /* | |
96e2db45 URS |
70 | * The following two variables can be packed, because |
71 | * a vmap_area object can be either: | |
688fcbfc | 72 | * 1) in "free" tree (root is vmap_area_root) |
96e2db45 | 73 | * 2) or "busy" tree (root is free_vmap_area_root) |
688fcbfc PL |
74 | */ |
75 | union { | |
76 | unsigned long subtree_max_size; /* in "free" tree */ | |
77 | struct vm_struct *vm; /* in "busy" tree */ | |
688fcbfc | 78 | }; |
13ba3fcb AK |
79 | }; |
80 | ||
1da177e4 LT |
81 | /* |
82 | * Highlevel APIs for driver use | |
83 | */ | |
db64fe02 | 84 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
d4efd79a | 85 | extern void *vm_map_ram(struct page **pages, unsigned int count, int node); |
db64fe02 NP |
86 | extern void vm_unmap_aliases(void); |
87 | ||
88 | #ifdef CONFIG_MMU | |
89 | extern void __init vmalloc_init(void); | |
97105f0a | 90 | extern unsigned long vmalloc_nr_pages(void); |
db64fe02 NP |
91 | #else |
92 | static inline void vmalloc_init(void) | |
93 | { | |
94 | } | |
97105f0a | 95 | static inline unsigned long vmalloc_nr_pages(void) { return 0; } |
db64fe02 NP |
96 | #endif |
97 | ||
1da177e4 | 98 | extern void *vmalloc(unsigned long size); |
e1ca7788 | 99 | extern void *vzalloc(unsigned long size); |
83342314 | 100 | extern void *vmalloc_user(unsigned long size); |
930fc45a | 101 | extern void *vmalloc_node(unsigned long size, int node); |
e1ca7788 | 102 | extern void *vzalloc_node(unsigned long size, int node); |
1da177e4 | 103 | extern void *vmalloc_32(unsigned long size); |
83342314 | 104 | extern void *vmalloc_32_user(unsigned long size); |
88dca4ca | 105 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); |
d0a21265 DR |
106 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
107 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
cb9e3c29 AR |
108 | pgprot_t prot, unsigned long vm_flags, int node, |
109 | const void *caller); | |
2b905948 CH |
110 | void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
111 | int node, const void *caller); | |
cb9e3c29 | 112 | |
b3bdda02 | 113 | extern void vfree(const void *addr); |
bf22e37a | 114 | extern void vfree_atomic(const void *addr); |
1da177e4 LT |
115 | |
116 | extern void *vmap(struct page **pages, unsigned int count, | |
117 | unsigned long flags, pgprot_t prot); | |
3e9a9e25 | 118 | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); |
b3bdda02 | 119 | extern void vunmap(const void *addr); |
83342314 | 120 | |
e69e9d4a HD |
121 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
122 | unsigned long uaddr, void *kaddr, | |
bdebd6a2 | 123 | unsigned long pgoff, unsigned long size); |
e69e9d4a | 124 | |
83342314 NP |
125 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
126 | unsigned long pgoff); | |
763802b5 | 127 | |
2ba3e694 JR |
128 | /* |
129 | * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values | |
130 | * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() | |
131 | * needs to be called. | |
132 | */ | |
133 | #ifndef ARCH_PAGE_TABLE_SYNC_MASK | |
134 | #define ARCH_PAGE_TABLE_SYNC_MASK 0 | |
135 | #endif | |
136 | ||
137 | /* | |
138 | * There is no default implementation for arch_sync_kernel_mappings(). It is | |
139 | * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK | |
140 | * is 0. | |
141 | */ | |
142 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end); | |
143 | ||
1da177e4 LT |
144 | /* |
145 | * Lowlevel-APIs (not for driver use!) | |
146 | */ | |
9585116b JF |
147 | |
148 | static inline size_t get_vm_area_size(const struct vm_struct *area) | |
149 | { | |
71394fe5 AR |
150 | if (!(area->flags & VM_NO_GUARD)) |
151 | /* return actual size without guard page */ | |
152 | return area->size - PAGE_SIZE; | |
153 | else | |
154 | return area->size; | |
155 | ||
9585116b JF |
156 | } |
157 | ||
1da177e4 | 158 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
23016969 | 159 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
5e6cafc8 | 160 | unsigned long flags, const void *caller); |
c2968612 BH |
161 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
162 | unsigned long flags, | |
163 | unsigned long start, unsigned long end, | |
5e6cafc8 | 164 | const void *caller); |
301fa9f2 | 165 | void free_vm_area(struct vm_struct *area); |
b3bdda02 | 166 | extern struct vm_struct *remove_vm_area(const void *addr); |
e9da6e99 | 167 | extern struct vm_struct *find_vm_area(const void *addr); |
c19c03fc | 168 | |
b554cb42 | 169 | #ifdef CONFIG_MMU |
8fc48985 TH |
170 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, |
171 | pgprot_t prot, struct page **pages); | |
ed1f324c CH |
172 | int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, |
173 | struct page **pages); | |
8fc48985 | 174 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); |
c19c03fc | 175 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
868b104d RE |
176 | static inline void set_vm_flush_reset_perms(void *addr) |
177 | { | |
178 | struct vm_struct *vm = find_vm_area(addr); | |
179 | ||
180 | if (vm) | |
181 | vm->flags |= VM_FLUSH_RESET_PERMS; | |
182 | } | |
b554cb42 GY |
183 | #else |
184 | static inline int | |
185 | map_kernel_range_noflush(unsigned long start, unsigned long size, | |
186 | pgprot_t prot, struct page **pages) | |
187 | { | |
188 | return size >> PAGE_SHIFT; | |
189 | } | |
ed1f324c | 190 | #define map_kernel_range map_kernel_range_noflush |
b554cb42 GY |
191 | static inline void |
192 | unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | |
193 | { | |
194 | } | |
ed1f324c | 195 | #define unmap_kernel_range unmap_kernel_range_noflush |
868b104d RE |
196 | static inline void set_vm_flush_reset_perms(void *addr) |
197 | { | |
198 | } | |
b554cb42 | 199 | #endif |
1da177e4 | 200 | |
69beeb1d KM |
201 | /* for /dev/kmem */ |
202 | extern long vread(char *buf, char *addr, unsigned long count); | |
203 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
204 | ||
1da177e4 LT |
205 | /* |
206 | * Internals. Dont't use.. | |
207 | */ | |
f1c4069e | 208 | extern struct list_head vmap_area_list; |
be9b7335 | 209 | extern __init void vm_area_add_early(struct vm_struct *vm); |
c0c0a293 | 210 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
1da177e4 | 211 | |
4f8b02b4 | 212 | #ifdef CONFIG_SMP |
b554cb42 | 213 | # ifdef CONFIG_MMU |
ca23e405 TH |
214 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
215 | const size_t *sizes, int nr_vms, | |
ec3f64fc | 216 | size_t align); |
ca23e405 TH |
217 | |
218 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
b554cb42 GY |
219 | # else |
220 | static inline struct vm_struct ** | |
221 | pcpu_get_vm_areas(const unsigned long *offsets, | |
222 | const size_t *sizes, int nr_vms, | |
223 | size_t align) | |
224 | { | |
225 | return NULL; | |
226 | } | |
227 | ||
228 | static inline void | |
229 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |
230 | { | |
231 | } | |
232 | # endif | |
4f8b02b4 | 233 | #endif |
ca23e405 | 234 | |
db3808c1 JK |
235 | #ifdef CONFIG_MMU |
236 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | |
db3808c1 | 237 | #else |
db3808c1 | 238 | #define VMALLOC_TOTAL 0UL |
db3808c1 JK |
239 | #endif |
240 | ||
4da56b99 CW |
241 | int register_vmap_purge_notifier(struct notifier_block *nb); |
242 | int unregister_vmap_purge_notifier(struct notifier_block *nb); | |
243 | ||
98f18083 PM |
244 | #ifdef CONFIG_MMU |
245 | bool vmalloc_dump_obj(void *object); | |
246 | #else | |
247 | static inline bool vmalloc_dump_obj(void *object) { return false; } | |
248 | #endif | |
249 | ||
1da177e4 | 250 | #endif /* _LINUX_VMALLOC_H */ |