]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/vmalloc.c | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 | |
7 | * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 | |
930fc45a | 8 | * Numa awareness, Christoph Lameter, SGI, June 2005 |
1da177e4 LT |
9 | */ |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/interrupt.h> | |
a10aa579 | 17 | #include <linux/seq_file.h> |
3ac7fe5a | 18 | #include <linux/debugobjects.h> |
1da177e4 | 19 | #include <linux/vmalloc.h> |
23016969 | 20 | #include <linux/kallsyms.h> |
1da177e4 LT |
21 | |
22 | #include <asm/uaccess.h> | |
23 | #include <asm/tlbflush.h> | |
24 | ||
25 | ||
26 | DEFINE_RWLOCK(vmlist_lock); | |
27 | struct vm_struct *vmlist; | |
28 | ||
b221385b | 29 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
23016969 | 30 | int node, void *caller); |
b221385b | 31 | |
1da177e4 LT |
32 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
33 | { | |
34 | pte_t *pte; | |
35 | ||
36 | pte = pte_offset_kernel(pmd, addr); | |
37 | do { | |
38 | pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); | |
39 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | |
40 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
41 | } | |
42 | ||
43 | static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, | |
44 | unsigned long end) | |
45 | { | |
46 | pmd_t *pmd; | |
47 | unsigned long next; | |
48 | ||
49 | pmd = pmd_offset(pud, addr); | |
50 | do { | |
51 | next = pmd_addr_end(addr, end); | |
52 | if (pmd_none_or_clear_bad(pmd)) | |
53 | continue; | |
54 | vunmap_pte_range(pmd, addr, next); | |
55 | } while (pmd++, addr = next, addr != end); | |
56 | } | |
57 | ||
58 | static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, | |
59 | unsigned long end) | |
60 | { | |
61 | pud_t *pud; | |
62 | unsigned long next; | |
63 | ||
64 | pud = pud_offset(pgd, addr); | |
65 | do { | |
66 | next = pud_addr_end(addr, end); | |
67 | if (pud_none_or_clear_bad(pud)) | |
68 | continue; | |
69 | vunmap_pmd_range(pud, addr, next); | |
70 | } while (pud++, addr = next, addr != end); | |
71 | } | |
72 | ||
c19c03fc | 73 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1da177e4 LT |
74 | { |
75 | pgd_t *pgd; | |
76 | unsigned long next; | |
c19c03fc BH |
77 | unsigned long start = addr; |
78 | unsigned long end = addr + size; | |
1da177e4 LT |
79 | |
80 | BUG_ON(addr >= end); | |
81 | pgd = pgd_offset_k(addr); | |
82 | flush_cache_vunmap(addr, end); | |
83 | do { | |
84 | next = pgd_addr_end(addr, end); | |
85 | if (pgd_none_or_clear_bad(pgd)) | |
86 | continue; | |
87 | vunmap_pud_range(pgd, addr, next); | |
88 | } while (pgd++, addr = next, addr != end); | |
c19c03fc BH |
89 | flush_tlb_kernel_range(start, end); |
90 | } | |
91 | ||
92 | static void unmap_vm_area(struct vm_struct *area) | |
93 | { | |
94 | unmap_kernel_range((unsigned long)area->addr, area->size); | |
1da177e4 LT |
95 | } |
96 | ||
97 | static int vmap_pte_range(pmd_t *pmd, unsigned long addr, | |
98 | unsigned long end, pgprot_t prot, struct page ***pages) | |
99 | { | |
100 | pte_t *pte; | |
101 | ||
872fec16 | 102 | pte = pte_alloc_kernel(pmd, addr); |
1da177e4 LT |
103 | if (!pte) |
104 | return -ENOMEM; | |
105 | do { | |
106 | struct page *page = **pages; | |
107 | WARN_ON(!pte_none(*pte)); | |
108 | if (!page) | |
109 | return -ENOMEM; | |
110 | set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); | |
111 | (*pages)++; | |
112 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
113 | return 0; | |
114 | } | |
115 | ||
116 | static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, | |
117 | unsigned long end, pgprot_t prot, struct page ***pages) | |
118 | { | |
119 | pmd_t *pmd; | |
120 | unsigned long next; | |
121 | ||
122 | pmd = pmd_alloc(&init_mm, pud, addr); | |
123 | if (!pmd) | |
124 | return -ENOMEM; | |
125 | do { | |
126 | next = pmd_addr_end(addr, end); | |
127 | if (vmap_pte_range(pmd, addr, next, prot, pages)) | |
128 | return -ENOMEM; | |
129 | } while (pmd++, addr = next, addr != end); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |
134 | unsigned long end, pgprot_t prot, struct page ***pages) | |
135 | { | |
136 | pud_t *pud; | |
137 | unsigned long next; | |
138 | ||
139 | pud = pud_alloc(&init_mm, pgd, addr); | |
140 | if (!pud) | |
141 | return -ENOMEM; | |
142 | do { | |
143 | next = pud_addr_end(addr, end); | |
144 | if (vmap_pmd_range(pud, addr, next, prot, pages)) | |
145 | return -ENOMEM; | |
146 | } while (pud++, addr = next, addr != end); | |
147 | return 0; | |
148 | } | |
149 | ||
150 | int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) | |
151 | { | |
152 | pgd_t *pgd; | |
153 | unsigned long next; | |
154 | unsigned long addr = (unsigned long) area->addr; | |
155 | unsigned long end = addr + area->size - PAGE_SIZE; | |
156 | int err; | |
157 | ||
158 | BUG_ON(addr >= end); | |
159 | pgd = pgd_offset_k(addr); | |
1da177e4 LT |
160 | do { |
161 | next = pgd_addr_end(addr, end); | |
162 | err = vmap_pud_range(pgd, addr, next, prot, pages); | |
163 | if (err) | |
164 | break; | |
165 | } while (pgd++, addr = next, addr != end); | |
1da177e4 LT |
166 | flush_cache_vmap((unsigned long) area->addr, end); |
167 | return err; | |
168 | } | |
5992b6da | 169 | EXPORT_SYMBOL_GPL(map_vm_area); |
1da177e4 | 170 | |
48667e7a CL |
171 | /* |
172 | * Map a vmalloc()-space virtual address to the physical page. | |
173 | */ | |
b3bdda02 | 174 | struct page *vmalloc_to_page(const void *vmalloc_addr) |
48667e7a CL |
175 | { |
176 | unsigned long addr = (unsigned long) vmalloc_addr; | |
177 | struct page *page = NULL; | |
178 | pgd_t *pgd = pgd_offset_k(addr); | |
179 | pud_t *pud; | |
180 | pmd_t *pmd; | |
181 | pte_t *ptep, pte; | |
182 | ||
7aa413de IM |
183 | /* |
184 | * XXX we might need to change this if we add VIRTUAL_BUG_ON for | |
185 | * architectures that do not vmalloc module space | |
186 | */ | |
59ea7463 JS |
187 | VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr) && |
188 | !is_module_address(addr)); | |
189 | ||
48667e7a CL |
190 | if (!pgd_none(*pgd)) { |
191 | pud = pud_offset(pgd, addr); | |
192 | if (!pud_none(*pud)) { | |
193 | pmd = pmd_offset(pud, addr); | |
194 | if (!pmd_none(*pmd)) { | |
195 | ptep = pte_offset_map(pmd, addr); | |
196 | pte = *ptep; | |
197 | if (pte_present(pte)) | |
198 | page = pte_page(pte); | |
199 | pte_unmap(ptep); | |
200 | } | |
201 | } | |
202 | } | |
203 | return page; | |
204 | } | |
205 | EXPORT_SYMBOL(vmalloc_to_page); | |
206 | ||
207 | /* | |
208 | * Map a vmalloc()-space virtual address to the physical page frame number. | |
209 | */ | |
b3bdda02 | 210 | unsigned long vmalloc_to_pfn(const void *vmalloc_addr) |
48667e7a CL |
211 | { |
212 | return page_to_pfn(vmalloc_to_page(vmalloc_addr)); | |
213 | } | |
214 | EXPORT_SYMBOL(vmalloc_to_pfn); | |
215 | ||
23016969 CL |
216 | static struct vm_struct * |
217 | __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, | |
218 | unsigned long end, int node, gfp_t gfp_mask, void *caller) | |
1da177e4 LT |
219 | { |
220 | struct vm_struct **p, *tmp, *area; | |
221 | unsigned long align = 1; | |
222 | unsigned long addr; | |
223 | ||
52fd24ca | 224 | BUG_ON(in_interrupt()); |
1da177e4 LT |
225 | if (flags & VM_IOREMAP) { |
226 | int bit = fls(size); | |
227 | ||
228 | if (bit > IOREMAP_MAX_ORDER) | |
229 | bit = IOREMAP_MAX_ORDER; | |
230 | else if (bit < PAGE_SHIFT) | |
231 | bit = PAGE_SHIFT; | |
232 | ||
233 | align = 1ul << bit; | |
234 | } | |
235 | addr = ALIGN(start, align); | |
236 | size = PAGE_ALIGN(size); | |
31be8309 OH |
237 | if (unlikely(!size)) |
238 | return NULL; | |
1da177e4 | 239 | |
6cb06229 CL |
240 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
241 | ||
1da177e4 LT |
242 | if (unlikely(!area)) |
243 | return NULL; | |
244 | ||
1da177e4 LT |
245 | /* |
246 | * We always allocate a guard page. | |
247 | */ | |
248 | size += PAGE_SIZE; | |
249 | ||
250 | write_lock(&vmlist_lock); | |
251 | for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { | |
252 | if ((unsigned long)tmp->addr < addr) { | |
253 | if((unsigned long)tmp->addr + tmp->size >= addr) | |
254 | addr = ALIGN(tmp->size + | |
255 | (unsigned long)tmp->addr, align); | |
256 | continue; | |
257 | } | |
258 | if ((size + addr) < addr) | |
259 | goto out; | |
260 | if (size + addr <= (unsigned long)tmp->addr) | |
261 | goto found; | |
262 | addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); | |
263 | if (addr > end - size) | |
264 | goto out; | |
265 | } | |
5dc33185 RB |
266 | if ((size + addr) < addr) |
267 | goto out; | |
268 | if (addr > end - size) | |
269 | goto out; | |
1da177e4 LT |
270 | |
271 | found: | |
272 | area->next = *p; | |
273 | *p = area; | |
274 | ||
275 | area->flags = flags; | |
276 | area->addr = (void *)addr; | |
277 | area->size = size; | |
278 | area->pages = NULL; | |
279 | area->nr_pages = 0; | |
280 | area->phys_addr = 0; | |
23016969 | 281 | area->caller = caller; |
1da177e4 LT |
282 | write_unlock(&vmlist_lock); |
283 | ||
284 | return area; | |
285 | ||
286 | out: | |
287 | write_unlock(&vmlist_lock); | |
288 | kfree(area); | |
289 | if (printk_ratelimit()) | |
290 | printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); | |
291 | return NULL; | |
292 | } | |
293 | ||
930fc45a CL |
294 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
295 | unsigned long start, unsigned long end) | |
296 | { | |
23016969 CL |
297 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, |
298 | __builtin_return_address(0)); | |
930fc45a | 299 | } |
5992b6da | 300 | EXPORT_SYMBOL_GPL(__get_vm_area); |
930fc45a | 301 | |
1da177e4 | 302 | /** |
183ff22b | 303 | * get_vm_area - reserve a contiguous kernel virtual area |
1da177e4 LT |
304 | * @size: size of the area |
305 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | |
306 | * | |
307 | * Search an area of @size in the kernel virtual mapping area, | |
308 | * and reserved it for out purposes. Returns the area descriptor | |
309 | * on success or %NULL on failure. | |
310 | */ | |
311 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | |
312 | { | |
23016969 CL |
313 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, |
314 | -1, GFP_KERNEL, __builtin_return_address(0)); | |
315 | } | |
316 | ||
317 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | |
318 | void *caller) | |
319 | { | |
320 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | |
321 | -1, GFP_KERNEL, caller); | |
1da177e4 LT |
322 | } |
323 | ||
52fd24ca GP |
324 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, |
325 | int node, gfp_t gfp_mask) | |
930fc45a | 326 | { |
52fd24ca | 327 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, |
23016969 | 328 | gfp_mask, __builtin_return_address(0)); |
930fc45a CL |
329 | } |
330 | ||
83342314 | 331 | /* Caller must hold vmlist_lock */ |
b3bdda02 | 332 | static struct vm_struct *__find_vm_area(const void *addr) |
83342314 NP |
333 | { |
334 | struct vm_struct *tmp; | |
335 | ||
336 | for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { | |
337 | if (tmp->addr == addr) | |
338 | break; | |
339 | } | |
340 | ||
341 | return tmp; | |
342 | } | |
343 | ||
7856dfeb | 344 | /* Caller must hold vmlist_lock */ |
b3bdda02 | 345 | static struct vm_struct *__remove_vm_area(const void *addr) |
1da177e4 LT |
346 | { |
347 | struct vm_struct **p, *tmp; | |
348 | ||
1da177e4 LT |
349 | for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { |
350 | if (tmp->addr == addr) | |
351 | goto found; | |
352 | } | |
1da177e4 LT |
353 | return NULL; |
354 | ||
355 | found: | |
356 | unmap_vm_area(tmp); | |
357 | *p = tmp->next; | |
1da177e4 LT |
358 | |
359 | /* | |
360 | * Remove the guard page. | |
361 | */ | |
362 | tmp->size -= PAGE_SIZE; | |
363 | return tmp; | |
364 | } | |
365 | ||
7856dfeb | 366 | /** |
183ff22b | 367 | * remove_vm_area - find and remove a continuous kernel virtual area |
7856dfeb AK |
368 | * @addr: base address |
369 | * | |
370 | * Search for the kernel VM area starting at @addr, and remove it. | |
371 | * This function returns the found VM area, but using it is NOT safe | |
372 | * on SMP machines, except for its size or flags. | |
373 | */ | |
b3bdda02 | 374 | struct vm_struct *remove_vm_area(const void *addr) |
7856dfeb AK |
375 | { |
376 | struct vm_struct *v; | |
377 | write_lock(&vmlist_lock); | |
378 | v = __remove_vm_area(addr); | |
379 | write_unlock(&vmlist_lock); | |
380 | return v; | |
381 | } | |
382 | ||
b3bdda02 | 383 | static void __vunmap(const void *addr, int deallocate_pages) |
1da177e4 LT |
384 | { |
385 | struct vm_struct *area; | |
386 | ||
387 | if (!addr) | |
388 | return; | |
389 | ||
390 | if ((PAGE_SIZE-1) & (unsigned long)addr) { | |
4c8573e2 | 391 | WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); |
1da177e4 LT |
392 | return; |
393 | } | |
394 | ||
395 | area = remove_vm_area(addr); | |
396 | if (unlikely(!area)) { | |
4c8573e2 | 397 | WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", |
1da177e4 | 398 | addr); |
1da177e4 LT |
399 | return; |
400 | } | |
401 | ||
9a11b49a | 402 | debug_check_no_locks_freed(addr, area->size); |
3ac7fe5a | 403 | debug_check_no_obj_freed(addr, area->size); |
9a11b49a | 404 | |
1da177e4 LT |
405 | if (deallocate_pages) { |
406 | int i; | |
407 | ||
408 | for (i = 0; i < area->nr_pages; i++) { | |
bf53d6f8 CL |
409 | struct page *page = area->pages[i]; |
410 | ||
411 | BUG_ON(!page); | |
412 | __free_page(page); | |
1da177e4 LT |
413 | } |
414 | ||
8757d5fa | 415 | if (area->flags & VM_VPAGES) |
1da177e4 LT |
416 | vfree(area->pages); |
417 | else | |
418 | kfree(area->pages); | |
419 | } | |
420 | ||
421 | kfree(area); | |
422 | return; | |
423 | } | |
424 | ||
425 | /** | |
426 | * vfree - release memory allocated by vmalloc() | |
1da177e4 LT |
427 | * @addr: memory base address |
428 | * | |
183ff22b | 429 | * Free the virtually continuous memory area starting at @addr, as |
80e93eff PE |
430 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
431 | * NULL, no operation is performed. | |
1da177e4 | 432 | * |
80e93eff | 433 | * Must not be called in interrupt context. |
1da177e4 | 434 | */ |
b3bdda02 | 435 | void vfree(const void *addr) |
1da177e4 LT |
436 | { |
437 | BUG_ON(in_interrupt()); | |
438 | __vunmap(addr, 1); | |
439 | } | |
1da177e4 LT |
440 | EXPORT_SYMBOL(vfree); |
441 | ||
442 | /** | |
443 | * vunmap - release virtual mapping obtained by vmap() | |
1da177e4 LT |
444 | * @addr: memory base address |
445 | * | |
446 | * Free the virtually contiguous memory area starting at @addr, | |
447 | * which was created from the page array passed to vmap(). | |
448 | * | |
80e93eff | 449 | * Must not be called in interrupt context. |
1da177e4 | 450 | */ |
b3bdda02 | 451 | void vunmap(const void *addr) |
1da177e4 LT |
452 | { |
453 | BUG_ON(in_interrupt()); | |
454 | __vunmap(addr, 0); | |
455 | } | |
1da177e4 LT |
456 | EXPORT_SYMBOL(vunmap); |
457 | ||
458 | /** | |
459 | * vmap - map an array of pages into virtually contiguous space | |
1da177e4 LT |
460 | * @pages: array of page pointers |
461 | * @count: number of pages to map | |
462 | * @flags: vm_area->flags | |
463 | * @prot: page protection for the mapping | |
464 | * | |
465 | * Maps @count pages from @pages into contiguous kernel virtual | |
466 | * space. | |
467 | */ | |
468 | void *vmap(struct page **pages, unsigned int count, | |
469 | unsigned long flags, pgprot_t prot) | |
470 | { | |
471 | struct vm_struct *area; | |
472 | ||
473 | if (count > num_physpages) | |
474 | return NULL; | |
475 | ||
23016969 CL |
476 | area = get_vm_area_caller((count << PAGE_SHIFT), flags, |
477 | __builtin_return_address(0)); | |
1da177e4 LT |
478 | if (!area) |
479 | return NULL; | |
23016969 | 480 | |
1da177e4 LT |
481 | if (map_vm_area(area, prot, &pages)) { |
482 | vunmap(area->addr); | |
483 | return NULL; | |
484 | } | |
485 | ||
486 | return area->addr; | |
487 | } | |
1da177e4 LT |
488 | EXPORT_SYMBOL(vmap); |
489 | ||
e31d9eb5 | 490 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
23016969 | 491 | pgprot_t prot, int node, void *caller) |
1da177e4 LT |
492 | { |
493 | struct page **pages; | |
494 | unsigned int nr_pages, array_size, i; | |
495 | ||
496 | nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; | |
497 | array_size = (nr_pages * sizeof(struct page *)); | |
498 | ||
499 | area->nr_pages = nr_pages; | |
500 | /* Please note that the recursion is strictly bounded. */ | |
8757d5fa | 501 | if (array_size > PAGE_SIZE) { |
94f6030c | 502 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, |
23016969 | 503 | PAGE_KERNEL, node, caller); |
8757d5fa | 504 | area->flags |= VM_VPAGES; |
286e1ea3 AM |
505 | } else { |
506 | pages = kmalloc_node(array_size, | |
6cb06229 | 507 | (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, |
286e1ea3 AM |
508 | node); |
509 | } | |
1da177e4 | 510 | area->pages = pages; |
23016969 | 511 | area->caller = caller; |
1da177e4 LT |
512 | if (!area->pages) { |
513 | remove_vm_area(area->addr); | |
514 | kfree(area); | |
515 | return NULL; | |
516 | } | |
1da177e4 LT |
517 | |
518 | for (i = 0; i < area->nr_pages; i++) { | |
bf53d6f8 CL |
519 | struct page *page; |
520 | ||
930fc45a | 521 | if (node < 0) |
bf53d6f8 | 522 | page = alloc_page(gfp_mask); |
930fc45a | 523 | else |
bf53d6f8 CL |
524 | page = alloc_pages_node(node, gfp_mask, 0); |
525 | ||
526 | if (unlikely(!page)) { | |
1da177e4 LT |
527 | /* Successfully allocated i pages, free them in __vunmap() */ |
528 | area->nr_pages = i; | |
529 | goto fail; | |
530 | } | |
bf53d6f8 | 531 | area->pages[i] = page; |
1da177e4 LT |
532 | } |
533 | ||
534 | if (map_vm_area(area, prot, &pages)) | |
535 | goto fail; | |
536 | return area->addr; | |
537 | ||
538 | fail: | |
539 | vfree(area->addr); | |
540 | return NULL; | |
541 | } | |
542 | ||
930fc45a CL |
543 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
544 | { | |
23016969 CL |
545 | return __vmalloc_area_node(area, gfp_mask, prot, -1, |
546 | __builtin_return_address(0)); | |
930fc45a CL |
547 | } |
548 | ||
1da177e4 | 549 | /** |
930fc45a | 550 | * __vmalloc_node - allocate virtually contiguous memory |
1da177e4 LT |
551 | * @size: allocation size |
552 | * @gfp_mask: flags for the page level allocator | |
553 | * @prot: protection mask for the allocated pages | |
d44e0780 | 554 | * @node: node to use for allocation or -1 |
c85d194b | 555 | * @caller: caller's return address |
1da177e4 LT |
556 | * |
557 | * Allocate enough pages to cover @size from the page level | |
558 | * allocator with @gfp_mask flags. Map them into contiguous | |
559 | * kernel virtual space, using a pagetable protection of @prot. | |
560 | */ | |
b221385b | 561 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
23016969 | 562 | int node, void *caller) |
1da177e4 LT |
563 | { |
564 | struct vm_struct *area; | |
565 | ||
566 | size = PAGE_ALIGN(size); | |
567 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | |
568 | return NULL; | |
569 | ||
23016969 CL |
570 | area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, |
571 | node, gfp_mask, caller); | |
572 | ||
1da177e4 LT |
573 | if (!area) |
574 | return NULL; | |
575 | ||
23016969 | 576 | return __vmalloc_area_node(area, gfp_mask, prot, node, caller); |
1da177e4 LT |
577 | } |
578 | ||
930fc45a CL |
579 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
580 | { | |
23016969 CL |
581 | return __vmalloc_node(size, gfp_mask, prot, -1, |
582 | __builtin_return_address(0)); | |
930fc45a | 583 | } |
1da177e4 LT |
584 | EXPORT_SYMBOL(__vmalloc); |
585 | ||
586 | /** | |
587 | * vmalloc - allocate virtually contiguous memory | |
1da177e4 | 588 | * @size: allocation size |
1da177e4 LT |
589 | * Allocate enough pages to cover @size from the page level |
590 | * allocator and map them into contiguous kernel virtual space. | |
591 | * | |
c1c8897f | 592 | * For tight control over page level allocator and protection flags |
1da177e4 LT |
593 | * use __vmalloc() instead. |
594 | */ | |
595 | void *vmalloc(unsigned long size) | |
596 | { | |
23016969 CL |
597 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
598 | -1, __builtin_return_address(0)); | |
1da177e4 | 599 | } |
1da177e4 LT |
600 | EXPORT_SYMBOL(vmalloc); |
601 | ||
83342314 | 602 | /** |
ead04089 REB |
603 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
604 | * @size: allocation size | |
83342314 | 605 | * |
ead04089 REB |
606 | * The resulting memory area is zeroed so it can be mapped to userspace |
607 | * without leaking data. | |
83342314 NP |
608 | */ |
609 | void *vmalloc_user(unsigned long size) | |
610 | { | |
611 | struct vm_struct *area; | |
612 | void *ret; | |
613 | ||
614 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | |
2b4ac44e ED |
615 | if (ret) { |
616 | write_lock(&vmlist_lock); | |
617 | area = __find_vm_area(ret); | |
618 | area->flags |= VM_USERMAP; | |
619 | write_unlock(&vmlist_lock); | |
620 | } | |
83342314 NP |
621 | return ret; |
622 | } | |
623 | EXPORT_SYMBOL(vmalloc_user); | |
624 | ||
930fc45a CL |
625 | /** |
626 | * vmalloc_node - allocate memory on a specific node | |
930fc45a | 627 | * @size: allocation size |
d44e0780 | 628 | * @node: numa node |
930fc45a CL |
629 | * |
630 | * Allocate enough pages to cover @size from the page level | |
631 | * allocator and map them into contiguous kernel virtual space. | |
632 | * | |
c1c8897f | 633 | * For tight control over page level allocator and protection flags |
930fc45a CL |
634 | * use __vmalloc() instead. |
635 | */ | |
636 | void *vmalloc_node(unsigned long size, int node) | |
637 | { | |
23016969 CL |
638 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
639 | node, __builtin_return_address(0)); | |
930fc45a CL |
640 | } |
641 | EXPORT_SYMBOL(vmalloc_node); | |
642 | ||
4dc3b16b PP |
643 | #ifndef PAGE_KERNEL_EXEC |
644 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | |
645 | #endif | |
646 | ||
1da177e4 LT |
647 | /** |
648 | * vmalloc_exec - allocate virtually contiguous, executable memory | |
1da177e4 LT |
649 | * @size: allocation size |
650 | * | |
651 | * Kernel-internal function to allocate enough pages to cover @size | |
652 | * the page level allocator and map them into contiguous and | |
653 | * executable kernel virtual space. | |
654 | * | |
c1c8897f | 655 | * For tight control over page level allocator and protection flags |
1da177e4 LT |
656 | * use __vmalloc() instead. |
657 | */ | |
658 | ||
1da177e4 LT |
659 | void *vmalloc_exec(unsigned long size) |
660 | { | |
661 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | |
662 | } | |
663 | ||
0d08e0d3 | 664 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
7ac674f5 | 665 | #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL |
0d08e0d3 | 666 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) |
7ac674f5 | 667 | #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL |
0d08e0d3 AK |
668 | #else |
669 | #define GFP_VMALLOC32 GFP_KERNEL | |
670 | #endif | |
671 | ||
1da177e4 LT |
672 | /** |
673 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | |
1da177e4 LT |
674 | * @size: allocation size |
675 | * | |
676 | * Allocate enough 32bit PA addressable pages to cover @size from the | |
677 | * page level allocator and map them into contiguous kernel virtual space. | |
678 | */ | |
679 | void *vmalloc_32(unsigned long size) | |
680 | { | |
0d08e0d3 | 681 | return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); |
1da177e4 | 682 | } |
1da177e4 LT |
683 | EXPORT_SYMBOL(vmalloc_32); |
684 | ||
83342314 | 685 | /** |
ead04089 | 686 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
83342314 | 687 | * @size: allocation size |
ead04089 REB |
688 | * |
689 | * The resulting memory area is 32bit addressable and zeroed so it can be | |
690 | * mapped to userspace without leaking data. | |
83342314 NP |
691 | */ |
692 | void *vmalloc_32_user(unsigned long size) | |
693 | { | |
694 | struct vm_struct *area; | |
695 | void *ret; | |
696 | ||
0d08e0d3 | 697 | ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); |
2b4ac44e ED |
698 | if (ret) { |
699 | write_lock(&vmlist_lock); | |
700 | area = __find_vm_area(ret); | |
701 | area->flags |= VM_USERMAP; | |
702 | write_unlock(&vmlist_lock); | |
703 | } | |
83342314 NP |
704 | return ret; |
705 | } | |
706 | EXPORT_SYMBOL(vmalloc_32_user); | |
707 | ||
1da177e4 LT |
708 | long vread(char *buf, char *addr, unsigned long count) |
709 | { | |
710 | struct vm_struct *tmp; | |
711 | char *vaddr, *buf_start = buf; | |
712 | unsigned long n; | |
713 | ||
714 | /* Don't allow overflow */ | |
715 | if ((unsigned long) addr + count < count) | |
716 | count = -(unsigned long) addr; | |
717 | ||
718 | read_lock(&vmlist_lock); | |
719 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
720 | vaddr = (char *) tmp->addr; | |
721 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
722 | continue; | |
723 | while (addr < vaddr) { | |
724 | if (count == 0) | |
725 | goto finished; | |
726 | *buf = '\0'; | |
727 | buf++; | |
728 | addr++; | |
729 | count--; | |
730 | } | |
731 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
732 | do { | |
733 | if (count == 0) | |
734 | goto finished; | |
735 | *buf = *addr; | |
736 | buf++; | |
737 | addr++; | |
738 | count--; | |
739 | } while (--n > 0); | |
740 | } | |
741 | finished: | |
742 | read_unlock(&vmlist_lock); | |
743 | return buf - buf_start; | |
744 | } | |
745 | ||
746 | long vwrite(char *buf, char *addr, unsigned long count) | |
747 | { | |
748 | struct vm_struct *tmp; | |
749 | char *vaddr, *buf_start = buf; | |
750 | unsigned long n; | |
751 | ||
752 | /* Don't allow overflow */ | |
753 | if ((unsigned long) addr + count < count) | |
754 | count = -(unsigned long) addr; | |
755 | ||
756 | read_lock(&vmlist_lock); | |
757 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
758 | vaddr = (char *) tmp->addr; | |
759 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
760 | continue; | |
761 | while (addr < vaddr) { | |
762 | if (count == 0) | |
763 | goto finished; | |
764 | buf++; | |
765 | addr++; | |
766 | count--; | |
767 | } | |
768 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
769 | do { | |
770 | if (count == 0) | |
771 | goto finished; | |
772 | *addr = *buf; | |
773 | buf++; | |
774 | addr++; | |
775 | count--; | |
776 | } while (--n > 0); | |
777 | } | |
778 | finished: | |
779 | read_unlock(&vmlist_lock); | |
780 | return buf - buf_start; | |
781 | } | |
83342314 NP |
782 | |
783 | /** | |
784 | * remap_vmalloc_range - map vmalloc pages to userspace | |
83342314 NP |
785 | * @vma: vma to cover (map full range of vma) |
786 | * @addr: vmalloc memory | |
787 | * @pgoff: number of pages into addr before first page to map | |
7682486b RD |
788 | * |
789 | * Returns: 0 for success, -Exxx on failure | |
83342314 NP |
790 | * |
791 | * This function checks that addr is a valid vmalloc'ed area, and | |
792 | * that it is big enough to cover the vma. Will return failure if | |
793 | * that criteria isn't met. | |
794 | * | |
72fd4a35 | 795 | * Similar to remap_pfn_range() (see mm/memory.c) |
83342314 NP |
796 | */ |
797 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |
798 | unsigned long pgoff) | |
799 | { | |
800 | struct vm_struct *area; | |
801 | unsigned long uaddr = vma->vm_start; | |
802 | unsigned long usize = vma->vm_end - vma->vm_start; | |
803 | int ret; | |
804 | ||
805 | if ((PAGE_SIZE-1) & (unsigned long)addr) | |
806 | return -EINVAL; | |
807 | ||
808 | read_lock(&vmlist_lock); | |
809 | area = __find_vm_area(addr); | |
810 | if (!area) | |
811 | goto out_einval_locked; | |
812 | ||
813 | if (!(area->flags & VM_USERMAP)) | |
814 | goto out_einval_locked; | |
815 | ||
816 | if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | |
817 | goto out_einval_locked; | |
818 | read_unlock(&vmlist_lock); | |
819 | ||
820 | addr += pgoff << PAGE_SHIFT; | |
821 | do { | |
822 | struct page *page = vmalloc_to_page(addr); | |
823 | ret = vm_insert_page(vma, uaddr, page); | |
824 | if (ret) | |
825 | return ret; | |
826 | ||
827 | uaddr += PAGE_SIZE; | |
828 | addr += PAGE_SIZE; | |
829 | usize -= PAGE_SIZE; | |
830 | } while (usize > 0); | |
831 | ||
832 | /* Prevent "things" like memory migration? VM_flags need a cleanup... */ | |
833 | vma->vm_flags |= VM_RESERVED; | |
834 | ||
835 | return ret; | |
836 | ||
837 | out_einval_locked: | |
838 | read_unlock(&vmlist_lock); | |
839 | return -EINVAL; | |
840 | } | |
841 | EXPORT_SYMBOL(remap_vmalloc_range); | |
842 | ||
1eeb66a1 CH |
843 | /* |
844 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to | |
845 | * have one. | |
846 | */ | |
847 | void __attribute__((weak)) vmalloc_sync_all(void) | |
848 | { | |
849 | } | |
5f4352fb JF |
850 | |
851 | ||
2f569afd | 852 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) |
5f4352fb JF |
853 | { |
854 | /* apply_to_page_range() does all the hard work. */ | |
855 | return 0; | |
856 | } | |
857 | ||
858 | /** | |
859 | * alloc_vm_area - allocate a range of kernel address space | |
860 | * @size: size of the area | |
7682486b RD |
861 | * |
862 | * Returns: NULL on failure, vm_struct on success | |
5f4352fb JF |
863 | * |
864 | * This function reserves a range of kernel address space, and | |
865 | * allocates pagetables to map that range. No actual mappings | |
866 | * are created. If the kernel address space is not shared | |
867 | * between processes, it syncs the pagetable across all | |
868 | * processes. | |
869 | */ | |
870 | struct vm_struct *alloc_vm_area(size_t size) | |
871 | { | |
872 | struct vm_struct *area; | |
873 | ||
23016969 CL |
874 | area = get_vm_area_caller(size, VM_IOREMAP, |
875 | __builtin_return_address(0)); | |
5f4352fb JF |
876 | if (area == NULL) |
877 | return NULL; | |
878 | ||
879 | /* | |
880 | * This ensures that page tables are constructed for this region | |
881 | * of kernel virtual address space and mapped into init_mm. | |
882 | */ | |
883 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | |
884 | area->size, f, NULL)) { | |
885 | free_vm_area(area); | |
886 | return NULL; | |
887 | } | |
888 | ||
889 | /* Make sure the pagetables are constructed in process kernel | |
890 | mappings */ | |
891 | vmalloc_sync_all(); | |
892 | ||
893 | return area; | |
894 | } | |
895 | EXPORT_SYMBOL_GPL(alloc_vm_area); | |
896 | ||
897 | void free_vm_area(struct vm_struct *area) | |
898 | { | |
899 | struct vm_struct *ret; | |
900 | ret = remove_vm_area(area->addr); | |
901 | BUG_ON(ret != area); | |
902 | kfree(area); | |
903 | } | |
904 | EXPORT_SYMBOL_GPL(free_vm_area); | |
a10aa579 CL |
905 | |
906 | ||
907 | #ifdef CONFIG_PROC_FS | |
908 | static void *s_start(struct seq_file *m, loff_t *pos) | |
909 | { | |
910 | loff_t n = *pos; | |
911 | struct vm_struct *v; | |
912 | ||
913 | read_lock(&vmlist_lock); | |
914 | v = vmlist; | |
915 | while (n > 0 && v) { | |
916 | n--; | |
917 | v = v->next; | |
918 | } | |
919 | if (!n) | |
920 | return v; | |
921 | ||
922 | return NULL; | |
923 | ||
924 | } | |
925 | ||
926 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |
927 | { | |
928 | struct vm_struct *v = p; | |
929 | ||
930 | ++*pos; | |
931 | return v->next; | |
932 | } | |
933 | ||
934 | static void s_stop(struct seq_file *m, void *p) | |
935 | { | |
936 | read_unlock(&vmlist_lock); | |
937 | } | |
938 | ||
a47a126a ED |
939 | static void show_numa_info(struct seq_file *m, struct vm_struct *v) |
940 | { | |
941 | if (NUMA_BUILD) { | |
942 | unsigned int nr, *counters = m->private; | |
943 | ||
944 | if (!counters) | |
945 | return; | |
946 | ||
947 | memset(counters, 0, nr_node_ids * sizeof(unsigned int)); | |
948 | ||
949 | for (nr = 0; nr < v->nr_pages; nr++) | |
950 | counters[page_to_nid(v->pages[nr])]++; | |
951 | ||
952 | for_each_node_state(nr, N_HIGH_MEMORY) | |
953 | if (counters[nr]) | |
954 | seq_printf(m, " N%u=%u", nr, counters[nr]); | |
955 | } | |
956 | } | |
957 | ||
a10aa579 CL |
958 | static int s_show(struct seq_file *m, void *p) |
959 | { | |
960 | struct vm_struct *v = p; | |
961 | ||
962 | seq_printf(m, "0x%p-0x%p %7ld", | |
963 | v->addr, v->addr + v->size, v->size); | |
964 | ||
23016969 CL |
965 | if (v->caller) { |
966 | char buff[2 * KSYM_NAME_LEN]; | |
967 | ||
968 | seq_putc(m, ' '); | |
969 | sprint_symbol(buff, (unsigned long)v->caller); | |
970 | seq_puts(m, buff); | |
971 | } | |
972 | ||
a10aa579 CL |
973 | if (v->nr_pages) |
974 | seq_printf(m, " pages=%d", v->nr_pages); | |
975 | ||
976 | if (v->phys_addr) | |
977 | seq_printf(m, " phys=%lx", v->phys_addr); | |
978 | ||
979 | if (v->flags & VM_IOREMAP) | |
980 | seq_printf(m, " ioremap"); | |
981 | ||
982 | if (v->flags & VM_ALLOC) | |
983 | seq_printf(m, " vmalloc"); | |
984 | ||
985 | if (v->flags & VM_MAP) | |
986 | seq_printf(m, " vmap"); | |
987 | ||
988 | if (v->flags & VM_USERMAP) | |
989 | seq_printf(m, " user"); | |
990 | ||
991 | if (v->flags & VM_VPAGES) | |
992 | seq_printf(m, " vpages"); | |
993 | ||
a47a126a | 994 | show_numa_info(m, v); |
a10aa579 CL |
995 | seq_putc(m, '\n'); |
996 | return 0; | |
997 | } | |
998 | ||
999 | const struct seq_operations vmalloc_op = { | |
1000 | .start = s_start, | |
1001 | .next = s_next, | |
1002 | .stop = s_stop, | |
1003 | .show = s_show, | |
1004 | }; | |
1005 | #endif | |
1006 |