]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
18 #include <linux/vmalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
24 DEFINE_RWLOCK(vmlist_lock
);
25 struct vm_struct
*vmlist
;
27 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
31 pte
= pte_offset_kernel(pmd
, addr
);
33 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
34 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
35 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
38 static inline void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
,
44 pmd
= pmd_offset(pud
, addr
);
46 next
= pmd_addr_end(addr
, end
);
47 if (pmd_none_or_clear_bad(pmd
))
49 vunmap_pte_range(pmd
, addr
, next
);
50 } while (pmd
++, addr
= next
, addr
!= end
);
53 static inline void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
59 pud
= pud_offset(pgd
, addr
);
61 next
= pud_addr_end(addr
, end
);
62 if (pud_none_or_clear_bad(pud
))
64 vunmap_pmd_range(pud
, addr
, next
);
65 } while (pud
++, addr
= next
, addr
!= end
);
68 void unmap_vm_area(struct vm_struct
*area
)
72 unsigned long addr
= (unsigned long) area
->addr
;
73 unsigned long end
= addr
+ area
->size
;
76 pgd
= pgd_offset_k(addr
);
77 flush_cache_vunmap(addr
, end
);
79 next
= pgd_addr_end(addr
, end
);
80 if (pgd_none_or_clear_bad(pgd
))
82 vunmap_pud_range(pgd
, addr
, next
);
83 } while (pgd
++, addr
= next
, addr
!= end
);
84 flush_tlb_kernel_range((unsigned long) area
->addr
, end
);
87 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
88 unsigned long end
, pgprot_t prot
, struct page
***pages
)
92 pte
= pte_alloc_kernel(&init_mm
, pmd
, addr
);
96 struct page
*page
= **pages
;
97 WARN_ON(!pte_none(*pte
));
100 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
102 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
106 static inline int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
107 unsigned long end
, pgprot_t prot
, struct page
***pages
)
112 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
116 next
= pmd_addr_end(addr
, end
);
117 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
))
119 } while (pmd
++, addr
= next
, addr
!= end
);
123 static inline int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
124 unsigned long end
, pgprot_t prot
, struct page
***pages
)
129 pud
= pud_alloc(&init_mm
, pgd
, addr
);
133 next
= pud_addr_end(addr
, end
);
134 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
))
136 } while (pud
++, addr
= next
, addr
!= end
);
140 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
144 unsigned long addr
= (unsigned long) area
->addr
;
145 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
149 pgd
= pgd_offset_k(addr
);
150 spin_lock(&init_mm
.page_table_lock
);
152 next
= pgd_addr_end(addr
, end
);
153 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
);
156 } while (pgd
++, addr
= next
, addr
!= end
);
157 spin_unlock(&init_mm
.page_table_lock
);
158 flush_cache_vmap((unsigned long) area
->addr
, end
);
162 struct vm_struct
*__get_vm_area_node(unsigned long size
, unsigned long flags
,
163 unsigned long start
, unsigned long end
, int node
)
165 struct vm_struct
**p
, *tmp
, *area
;
166 unsigned long align
= 1;
169 if (flags
& VM_IOREMAP
) {
172 if (bit
> IOREMAP_MAX_ORDER
)
173 bit
= IOREMAP_MAX_ORDER
;
174 else if (bit
< PAGE_SHIFT
)
179 addr
= ALIGN(start
, align
);
180 size
= PAGE_ALIGN(size
);
182 area
= kmalloc_node(sizeof(*area
), GFP_KERNEL
, node
);
186 if (unlikely(!size
)) {
192 * We always allocate a guard page.
196 write_lock(&vmlist_lock
);
197 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
198 if ((unsigned long)tmp
->addr
< addr
) {
199 if((unsigned long)tmp
->addr
+ tmp
->size
>= addr
)
200 addr
= ALIGN(tmp
->size
+
201 (unsigned long)tmp
->addr
, align
);
204 if ((size
+ addr
) < addr
)
206 if (size
+ addr
<= (unsigned long)tmp
->addr
)
208 addr
= ALIGN(tmp
->size
+ (unsigned long)tmp
->addr
, align
);
209 if (addr
> end
- size
)
218 area
->addr
= (void *)addr
;
223 write_unlock(&vmlist_lock
);
228 write_unlock(&vmlist_lock
);
230 if (printk_ratelimit())
231 printk(KERN_WARNING
"allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
235 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
236 unsigned long start
, unsigned long end
)
238 return __get_vm_area_node(size
, flags
, start
, end
, -1);
242 * get_vm_area - reserve a contingous kernel virtual area
244 * @size: size of the area
245 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
247 * Search an area of @size in the kernel virtual mapping area,
248 * and reserved it for out purposes. Returns the area descriptor
249 * on success or %NULL on failure.
251 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
253 return __get_vm_area(size
, flags
, VMALLOC_START
, VMALLOC_END
);
256 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
, int node
)
258 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
);
261 /* Caller must hold vmlist_lock */
262 struct vm_struct
*__remove_vm_area(void *addr
)
264 struct vm_struct
**p
, *tmp
;
266 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
267 if (tmp
->addr
== addr
)
277 * Remove the guard page.
279 tmp
->size
-= PAGE_SIZE
;
284 * remove_vm_area - find and remove a contingous kernel virtual area
286 * @addr: base address
288 * Search for the kernel VM area starting at @addr, and remove it.
289 * This function returns the found VM area, but using it is NOT safe
290 * on SMP machines, except for its size or flags.
292 struct vm_struct
*remove_vm_area(void *addr
)
295 write_lock(&vmlist_lock
);
296 v
= __remove_vm_area(addr
);
297 write_unlock(&vmlist_lock
);
301 void __vunmap(void *addr
, int deallocate_pages
)
303 struct vm_struct
*area
;
308 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
309 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
314 area
= remove_vm_area(addr
);
315 if (unlikely(!area
)) {
316 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
322 if (deallocate_pages
) {
325 for (i
= 0; i
< area
->nr_pages
; i
++) {
326 if (unlikely(!area
->pages
[i
]))
328 __free_page(area
->pages
[i
]);
331 if (area
->nr_pages
> PAGE_SIZE
/sizeof(struct page
*))
342 * vfree - release memory allocated by vmalloc()
344 * @addr: memory base address
346 * Free the virtually contiguous memory area starting at @addr, as
347 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
348 * NULL, no operation is performed.
350 * Must not be called in interrupt context.
352 void vfree(void *addr
)
354 BUG_ON(in_interrupt());
357 EXPORT_SYMBOL(vfree
);
360 * vunmap - release virtual mapping obtained by vmap()
362 * @addr: memory base address
364 * Free the virtually contiguous memory area starting at @addr,
365 * which was created from the page array passed to vmap().
367 * Must not be called in interrupt context.
369 void vunmap(void *addr
)
371 BUG_ON(in_interrupt());
374 EXPORT_SYMBOL(vunmap
);
377 * vmap - map an array of pages into virtually contiguous space
379 * @pages: array of page pointers
380 * @count: number of pages to map
381 * @flags: vm_area->flags
382 * @prot: page protection for the mapping
384 * Maps @count pages from @pages into contiguous kernel virtual
387 void *vmap(struct page
**pages
, unsigned int count
,
388 unsigned long flags
, pgprot_t prot
)
390 struct vm_struct
*area
;
392 if (count
> num_physpages
)
395 area
= get_vm_area((count
<< PAGE_SHIFT
), flags
);
398 if (map_vm_area(area
, prot
, &pages
)) {
407 void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
408 pgprot_t prot
, int node
)
411 unsigned int nr_pages
, array_size
, i
;
413 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
414 array_size
= (nr_pages
* sizeof(struct page
*));
416 area
->nr_pages
= nr_pages
;
417 /* Please note that the recursion is strictly bounded. */
418 if (array_size
> PAGE_SIZE
)
419 pages
= __vmalloc_node(array_size
, gfp_mask
, PAGE_KERNEL
, node
);
421 pages
= kmalloc_node(array_size
, (gfp_mask
& ~__GFP_HIGHMEM
), node
);
424 remove_vm_area(area
->addr
);
428 memset(area
->pages
, 0, array_size
);
430 for (i
= 0; i
< area
->nr_pages
; i
++) {
432 area
->pages
[i
] = alloc_page(gfp_mask
);
434 area
->pages
[i
] = alloc_pages_node(node
, gfp_mask
, 0);
435 if (unlikely(!area
->pages
[i
])) {
436 /* Successfully allocated i pages, free them in __vunmap() */
442 if (map_vm_area(area
, prot
, &pages
))
451 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
453 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1);
457 * __vmalloc_node - allocate virtually contiguous memory
459 * @size: allocation size
460 * @gfp_mask: flags for the page level allocator
461 * @prot: protection mask for the allocated pages
462 * @node node to use for allocation or -1
464 * Allocate enough pages to cover @size from the page level
465 * allocator with @gfp_mask flags. Map them into contiguous
466 * kernel virtual space, using a pagetable protection of @prot.
468 void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
471 struct vm_struct
*area
;
473 size
= PAGE_ALIGN(size
);
474 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
477 area
= get_vm_area_node(size
, VM_ALLOC
, node
);
481 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
);
483 EXPORT_SYMBOL(__vmalloc_node
);
485 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
487 return __vmalloc_node(size
, gfp_mask
, prot
, -1);
489 EXPORT_SYMBOL(__vmalloc
);
492 * vmalloc - allocate virtually contiguous memory
494 * @size: allocation size
496 * Allocate enough pages to cover @size from the page level
497 * allocator and map them into contiguous kernel virtual space.
499 * For tight cotrol over page level allocator and protection flags
500 * use __vmalloc() instead.
502 void *vmalloc(unsigned long size
)
504 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
);
506 EXPORT_SYMBOL(vmalloc
);
509 * vmalloc_node - allocate memory on a specific node
511 * @size: allocation size
514 * Allocate enough pages to cover @size from the page level
515 * allocator and map them into contiguous kernel virtual space.
517 * For tight cotrol over page level allocator and protection flags
518 * use __vmalloc() instead.
520 void *vmalloc_node(unsigned long size
, int node
)
522 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
, node
);
524 EXPORT_SYMBOL(vmalloc_node
);
526 #ifndef PAGE_KERNEL_EXEC
527 # define PAGE_KERNEL_EXEC PAGE_KERNEL
531 * vmalloc_exec - allocate virtually contiguous, executable memory
533 * @size: allocation size
535 * Kernel-internal function to allocate enough pages to cover @size
536 * the page level allocator and map them into contiguous and
537 * executable kernel virtual space.
539 * For tight cotrol over page level allocator and protection flags
540 * use __vmalloc() instead.
543 void *vmalloc_exec(unsigned long size
)
545 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
549 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
551 * @size: allocation size
553 * Allocate enough 32bit PA addressable pages to cover @size from the
554 * page level allocator and map them into contiguous kernel virtual space.
556 void *vmalloc_32(unsigned long size
)
558 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL
);
560 EXPORT_SYMBOL(vmalloc_32
);
562 long vread(char *buf
, char *addr
, unsigned long count
)
564 struct vm_struct
*tmp
;
565 char *vaddr
, *buf_start
= buf
;
568 /* Don't allow overflow */
569 if ((unsigned long) addr
+ count
< count
)
570 count
= -(unsigned long) addr
;
572 read_lock(&vmlist_lock
);
573 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
574 vaddr
= (char *) tmp
->addr
;
575 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
577 while (addr
< vaddr
) {
585 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
596 read_unlock(&vmlist_lock
);
597 return buf
- buf_start
;
600 long vwrite(char *buf
, char *addr
, unsigned long count
)
602 struct vm_struct
*tmp
;
603 char *vaddr
, *buf_start
= buf
;
606 /* Don't allow overflow */
607 if ((unsigned long) addr
+ count
< count
)
608 count
= -(unsigned long) addr
;
610 read_lock(&vmlist_lock
);
611 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
612 vaddr
= (char *) tmp
->addr
;
613 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
615 while (addr
< vaddr
) {
622 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
633 read_unlock(&vmlist_lock
);
634 return buf
- buf_start
;