]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_VMALLOC_H |
2 | #define _LINUX_VMALLOC_H | |
3 | ||
4 | #include <linux/spinlock.h> | |
db64fe02 | 5 | #include <linux/init.h> |
13ba3fcb | 6 | #include <linux/list.h> |
80c4bd7a | 7 | #include <linux/llist.h> |
1da177e4 | 8 | #include <asm/page.h> /* pgprot_t */ |
1f5307b1 | 9 | #include <asm/pgtable.h> /* PAGE_KERNEL */ |
13ba3fcb | 10 | #include <linux/rbtree.h> |
1da177e4 | 11 | |
605d9288 | 12 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
4da56b99 | 13 | struct notifier_block; /* in notifier.h */ |
83342314 | 14 | |
605d9288 | 15 | /* bits in flags of vmalloc's vm_struct below */ |
20fc02b4 ZY |
16 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
17 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | |
18 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | |
19 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | |
20fc02b4 | 20 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
71394fe5 | 21 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
a5af5aa8 | 22 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
1da177e4 LT |
23 | /* bits [20..32] reserved for arch specific ioremap internals */ |
24 | ||
fd195c49 DS |
25 | /* |
26 | * Maximum alignment for ioremap() regions. | |
27 | * Can be overriden by arch-specific value. | |
28 | */ | |
29 | #ifndef IOREMAP_MAX_ORDER | |
30 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ | |
31 | #endif | |
32 | ||
1da177e4 | 33 | struct vm_struct { |
2b4ac44e | 34 | struct vm_struct *next; |
1da177e4 LT |
35 | void *addr; |
36 | unsigned long size; | |
37 | unsigned long flags; | |
38 | struct page **pages; | |
39 | unsigned int nr_pages; | |
ffa71f33 | 40 | phys_addr_t phys_addr; |
5e6cafc8 | 41 | const void *caller; |
1da177e4 LT |
42 | }; |
43 | ||
13ba3fcb AK |
44 | struct vmap_area { |
45 | unsigned long va_start; | |
46 | unsigned long va_end; | |
47 | unsigned long flags; | |
48 | struct rb_node rb_node; /* address sorted rbtree */ | |
49 | struct list_head list; /* address sorted list */ | |
80c4bd7a | 50 | struct llist_node purge_list; /* "lazy purge" list */ |
13ba3fcb AK |
51 | struct vm_struct *vm; |
52 | struct rcu_head rcu_head; | |
53 | }; | |
54 | ||
1da177e4 LT |
55 | /* |
56 | * Highlevel APIs for driver use | |
57 | */ | |
db64fe02 NP |
58 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
59 | extern void *vm_map_ram(struct page **pages, unsigned int count, | |
60 | int node, pgprot_t prot); | |
61 | extern void vm_unmap_aliases(void); | |
62 | ||
63 | #ifdef CONFIG_MMU | |
64 | extern void __init vmalloc_init(void); | |
65 | #else | |
66 | static inline void vmalloc_init(void) | |
67 | { | |
68 | } | |
69 | #endif | |
70 | ||
1da177e4 | 71 | extern void *vmalloc(unsigned long size); |
e1ca7788 | 72 | extern void *vzalloc(unsigned long size); |
83342314 | 73 | extern void *vmalloc_user(unsigned long size); |
930fc45a | 74 | extern void *vmalloc_node(unsigned long size, int node); |
e1ca7788 | 75 | extern void *vzalloc_node(unsigned long size, int node); |
1da177e4 LT |
76 | extern void *vmalloc_exec(unsigned long size); |
77 | extern void *vmalloc_32(unsigned long size); | |
83342314 | 78 | extern void *vmalloc_32_user(unsigned long size); |
dd0fc66f | 79 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
d0a21265 DR |
80 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
81 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
cb9e3c29 AR |
82 | pgprot_t prot, unsigned long vm_flags, int node, |
83 | const void *caller); | |
1f5307b1 | 84 | #ifndef CONFIG_MMU |
a7c3e901 | 85 | extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); |
1f5307b1 MH |
86 | #else |
87 | extern void *__vmalloc_node(unsigned long size, unsigned long align, | |
88 | gfp_t gfp_mask, pgprot_t prot, | |
89 | int node, const void *caller); | |
90 | ||
91 | /* | |
92 | * We really want to have this inlined due to caller tracking. This | |
93 | * function is used by the highlevel vmalloc apis and so we want to track | |
94 | * their callers and inlining will achieve that. | |
95 | */ | |
96 | static inline void *__vmalloc_node_flags(unsigned long size, | |
97 | int node, gfp_t flags) | |
98 | { | |
99 | return __vmalloc_node(size, 1, flags, PAGE_KERNEL, | |
100 | node, __builtin_return_address(0)); | |
101 | } | |
102 | #endif | |
cb9e3c29 | 103 | |
b3bdda02 | 104 | extern void vfree(const void *addr); |
bf22e37a | 105 | extern void vfree_atomic(const void *addr); |
1da177e4 LT |
106 | |
107 | extern void *vmap(struct page **pages, unsigned int count, | |
108 | unsigned long flags, pgprot_t prot); | |
b3bdda02 | 109 | extern void vunmap(const void *addr); |
83342314 | 110 | |
e69e9d4a HD |
111 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
112 | unsigned long uaddr, void *kaddr, | |
113 | unsigned long size); | |
114 | ||
83342314 NP |
115 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
116 | unsigned long pgoff); | |
1eeb66a1 | 117 | void vmalloc_sync_all(void); |
1da177e4 LT |
118 | |
119 | /* | |
120 | * Lowlevel-APIs (not for driver use!) | |
121 | */ | |
9585116b JF |
122 | |
123 | static inline size_t get_vm_area_size(const struct vm_struct *area) | |
124 | { | |
71394fe5 AR |
125 | if (!(area->flags & VM_NO_GUARD)) |
126 | /* return actual size without guard page */ | |
127 | return area->size - PAGE_SIZE; | |
128 | else | |
129 | return area->size; | |
130 | ||
9585116b JF |
131 | } |
132 | ||
1da177e4 | 133 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
23016969 | 134 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
5e6cafc8 | 135 | unsigned long flags, const void *caller); |
1da177e4 LT |
136 | extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
137 | unsigned long start, unsigned long end); | |
c2968612 BH |
138 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
139 | unsigned long flags, | |
140 | unsigned long start, unsigned long end, | |
5e6cafc8 | 141 | const void *caller); |
b3bdda02 | 142 | extern struct vm_struct *remove_vm_area(const void *addr); |
e9da6e99 | 143 | extern struct vm_struct *find_vm_area(const void *addr); |
c19c03fc | 144 | |
1da177e4 | 145 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
f6f8ed47 | 146 | struct page **pages); |
b554cb42 | 147 | #ifdef CONFIG_MMU |
8fc48985 TH |
148 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, |
149 | pgprot_t prot, struct page **pages); | |
150 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | |
c19c03fc | 151 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
b554cb42 GY |
152 | #else |
153 | static inline int | |
154 | map_kernel_range_noflush(unsigned long start, unsigned long size, | |
155 | pgprot_t prot, struct page **pages) | |
156 | { | |
157 | return size >> PAGE_SHIFT; | |
158 | } | |
159 | static inline void | |
160 | unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | |
161 | { | |
162 | } | |
163 | static inline void | |
164 | unmap_kernel_range(unsigned long addr, unsigned long size) | |
165 | { | |
166 | } | |
167 | #endif | |
1da177e4 | 168 | |
5f4352fb | 169 | /* Allocate/destroy a 'vmalloc' VM area. */ |
cd12909c | 170 | extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); |
5f4352fb JF |
171 | extern void free_vm_area(struct vm_struct *area); |
172 | ||
69beeb1d KM |
173 | /* for /dev/kmem */ |
174 | extern long vread(char *buf, char *addr, unsigned long count); | |
175 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
176 | ||
1da177e4 LT |
177 | /* |
178 | * Internals. Dont't use.. | |
179 | */ | |
f1c4069e | 180 | extern struct list_head vmap_area_list; |
be9b7335 | 181 | extern __init void vm_area_add_early(struct vm_struct *vm); |
c0c0a293 | 182 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
1da177e4 | 183 | |
4f8b02b4 | 184 | #ifdef CONFIG_SMP |
b554cb42 | 185 | # ifdef CONFIG_MMU |
ca23e405 TH |
186 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
187 | const size_t *sizes, int nr_vms, | |
ec3f64fc | 188 | size_t align); |
ca23e405 TH |
189 | |
190 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
b554cb42 GY |
191 | # else |
192 | static inline struct vm_struct ** | |
193 | pcpu_get_vm_areas(const unsigned long *offsets, | |
194 | const size_t *sizes, int nr_vms, | |
195 | size_t align) | |
196 | { | |
197 | return NULL; | |
198 | } | |
199 | ||
200 | static inline void | |
201 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |
202 | { | |
203 | } | |
204 | # endif | |
4f8b02b4 | 205 | #endif |
ca23e405 | 206 | |
db3808c1 JK |
207 | #ifdef CONFIG_MMU |
208 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | |
db3808c1 | 209 | #else |
db3808c1 | 210 | #define VMALLOC_TOTAL 0UL |
db3808c1 JK |
211 | #endif |
212 | ||
4da56b99 CW |
213 | int register_vmap_purge_notifier(struct notifier_block *nb); |
214 | int unregister_vmap_purge_notifier(struct notifier_block *nb); | |
215 | ||
1da177e4 | 216 | #endif /* _LINUX_VMALLOC_H */ |