1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/rcupdate.h>
29 #include <linux/pfn.h>
30 #include <linux/kmemleak.h>
31 #include <linux/atomic.h>
32 #include <linux/compiler.h>
33 #include <linux/llist.h>
34 #include <linux/bitops.h>
35 #include <linux/rbtree_augmented.h>
36 #include <linux/overflow.h>
38 #include <linux/uaccess.h>
39 #include <asm/tlbflush.h>
40 #include <asm/shmparam.h>
43 #include "pgalloc-track.h"
45 bool is_vmalloc_addr(const void *x
)
47 unsigned long addr
= (unsigned long)x
;
49 return addr
>= VMALLOC_START
&& addr
< VMALLOC_END
;
51 EXPORT_SYMBOL(is_vmalloc_addr
);
53 struct vfree_deferred
{
54 struct llist_head list
;
55 struct work_struct wq
;
57 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
59 static void __vunmap(const void *, int);
61 static void free_work(struct work_struct
*w
)
63 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
64 struct llist_node
*t
, *llnode
;
66 llist_for_each_safe(llnode
, t
, llist_del_all(&p
->list
))
67 __vunmap((void *)llnode
, 1);
70 /*** Page table manipulation functions ***/
72 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
77 pte
= pte_offset_kernel(pmd
, addr
);
79 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
80 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
81 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
82 *mask
|= PGTBL_PTE_MODIFIED
;
85 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
92 pmd
= pmd_offset(pud
, addr
);
94 next
= pmd_addr_end(addr
, end
);
96 cleared
= pmd_clear_huge(pmd
);
97 if (cleared
|| pmd_bad(*pmd
))
98 *mask
|= PGTBL_PMD_MODIFIED
;
102 if (pmd_none_or_clear_bad(pmd
))
104 vunmap_pte_range(pmd
, addr
, next
, mask
);
107 } while (pmd
++, addr
= next
, addr
!= end
);
110 static void vunmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
111 pgtbl_mod_mask
*mask
)
117 pud
= pud_offset(p4d
, addr
);
119 next
= pud_addr_end(addr
, end
);
121 cleared
= pud_clear_huge(pud
);
122 if (cleared
|| pud_bad(*pud
))
123 *mask
|= PGTBL_PUD_MODIFIED
;
127 if (pud_none_or_clear_bad(pud
))
129 vunmap_pmd_range(pud
, addr
, next
, mask
);
130 } while (pud
++, addr
= next
, addr
!= end
);
133 static void vunmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
134 pgtbl_mod_mask
*mask
)
140 p4d
= p4d_offset(pgd
, addr
);
142 next
= p4d_addr_end(addr
, end
);
144 cleared
= p4d_clear_huge(p4d
);
145 if (cleared
|| p4d_bad(*p4d
))
146 *mask
|= PGTBL_P4D_MODIFIED
;
150 if (p4d_none_or_clear_bad(p4d
))
152 vunmap_pud_range(p4d
, addr
, next
, mask
);
153 } while (p4d
++, addr
= next
, addr
!= end
);
157 * unmap_kernel_range_noflush - unmap kernel VM area
158 * @start: start of the VM area to unmap
159 * @size: size of the VM area to unmap
161 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
162 * should have been allocated using get_vm_area() and its friends.
165 * This function does NOT do any cache flushing. The caller is responsible
166 * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
167 * function and flush_tlb_kernel_range() after.
169 void unmap_kernel_range_noflush(unsigned long start
, unsigned long size
)
171 unsigned long end
= start
+ size
;
174 unsigned long addr
= start
;
175 pgtbl_mod_mask mask
= 0;
178 pgd
= pgd_offset_k(addr
);
180 next
= pgd_addr_end(addr
, end
);
182 mask
|= PGTBL_PGD_MODIFIED
;
183 if (pgd_none_or_clear_bad(pgd
))
185 vunmap_p4d_range(pgd
, addr
, next
, &mask
);
186 } while (pgd
++, addr
= next
, addr
!= end
);
188 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
189 arch_sync_kernel_mappings(start
, end
);
192 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
193 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
194 pgtbl_mod_mask
*mask
)
199 * nr is a running index into the array which helps higher level
200 * callers keep track of where we're up to.
203 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
207 struct page
*page
= pages
[*nr
];
209 if (WARN_ON(!pte_none(*pte
)))
213 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
215 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
216 *mask
|= PGTBL_PTE_MODIFIED
;
220 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
221 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
222 pgtbl_mod_mask
*mask
)
227 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
231 next
= pmd_addr_end(addr
, end
);
232 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
, mask
))
234 } while (pmd
++, addr
= next
, addr
!= end
);
238 static int vmap_pud_range(p4d_t
*p4d
, unsigned long addr
,
239 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
240 pgtbl_mod_mask
*mask
)
245 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
249 next
= pud_addr_end(addr
, end
);
250 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
, mask
))
252 } while (pud
++, addr
= next
, addr
!= end
);
256 static int vmap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
257 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
258 pgtbl_mod_mask
*mask
)
263 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
267 next
= p4d_addr_end(addr
, end
);
268 if (vmap_pud_range(p4d
, addr
, next
, prot
, pages
, nr
, mask
))
270 } while (p4d
++, addr
= next
, addr
!= end
);
275 * map_kernel_range_noflush - map kernel VM area with the specified pages
276 * @addr: start of the VM area to map
277 * @size: size of the VM area to map
278 * @prot: page protection flags to use
279 * @pages: pages to map
281 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
282 * have been allocated using get_vm_area() and its friends.
285 * This function does NOT do any cache flushing. The caller is responsible for
286 * calling flush_cache_vmap() on to-be-mapped areas before calling this
290 * 0 on success, -errno on failure.
292 int map_kernel_range_noflush(unsigned long addr
, unsigned long size
,
293 pgprot_t prot
, struct page
**pages
)
295 unsigned long start
= addr
;
296 unsigned long end
= addr
+ size
;
301 pgtbl_mod_mask mask
= 0;
304 pgd
= pgd_offset_k(addr
);
306 next
= pgd_addr_end(addr
, end
);
308 mask
|= PGTBL_PGD_MODIFIED
;
309 err
= vmap_p4d_range(pgd
, addr
, next
, prot
, pages
, &nr
, &mask
);
312 } while (pgd
++, addr
= next
, addr
!= end
);
314 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
315 arch_sync_kernel_mappings(start
, end
);
319 EXPORT_SYMBOL(map_kernel_range_noflush
);
321 int map_kernel_range(unsigned long start
, unsigned long size
, pgprot_t prot
,
326 ret
= map_kernel_range_noflush(start
, size
, prot
, pages
);
327 flush_cache_vmap(start
, start
+ size
);
331 int is_vmalloc_or_module_addr(const void *x
)
334 * ARM, x86-64 and sparc64 put modules in a special place,
335 * and fall back on vmalloc() if that fails. Others
336 * just put it in the vmalloc space.
338 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
339 unsigned long addr
= (unsigned long)x
;
340 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
343 return is_vmalloc_addr(x
);
347 * Walk a vmap address to the struct page it maps.
349 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
351 unsigned long addr
= (unsigned long) vmalloc_addr
;
352 struct page
*page
= NULL
;
353 pgd_t
*pgd
= pgd_offset_k(addr
);
360 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
361 * architectures that do not vmalloc module space
363 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
367 p4d
= p4d_offset(pgd
, addr
);
370 pud
= pud_offset(p4d
, addr
);
373 * Don't dereference bad PUD or PMD (below) entries. This will also
374 * identify huge mappings, which we may encounter on architectures
375 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
376 * identified as vmalloc addresses by is_vmalloc_addr(), but are
377 * not [unambiguously] associated with a struct page, so there is
378 * no correct value to return for them.
380 WARN_ON_ONCE(pud_bad(*pud
));
381 if (pud_none(*pud
) || pud_bad(*pud
))
383 pmd
= pmd_offset(pud
, addr
);
384 WARN_ON_ONCE(pmd_bad(*pmd
));
385 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
388 ptep
= pte_offset_map(pmd
, addr
);
390 if (pte_present(pte
))
391 page
= pte_page(pte
);
395 EXPORT_SYMBOL(vmalloc_to_page
);
398 * Map a vmalloc()-space virtual address to the physical page frame number.
400 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
402 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
404 EXPORT_SYMBOL(vmalloc_to_pfn
);
407 /*** Global kva allocator ***/
409 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
410 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
413 static DEFINE_SPINLOCK(vmap_area_lock
);
414 static DEFINE_SPINLOCK(free_vmap_area_lock
);
415 /* Export for kexec only */
416 LIST_HEAD(vmap_area_list
);
417 static struct rb_root vmap_area_root
= RB_ROOT
;
418 static bool vmap_initialized __read_mostly
;
420 static struct rb_root purge_vmap_area_root
= RB_ROOT
;
421 static LIST_HEAD(purge_vmap_area_list
);
422 static DEFINE_SPINLOCK(purge_vmap_area_lock
);
425 * This kmem_cache is used for vmap_area objects. Instead of
426 * allocating from slab we reuse an object from this cache to
427 * make things faster. Especially in "no edge" splitting of
430 static struct kmem_cache
*vmap_area_cachep
;
433 * This linked list is used in pair with free_vmap_area_root.
434 * It gives O(1) access to prev/next to perform fast coalescing.
436 static LIST_HEAD(free_vmap_area_list
);
439 * This augment red-black tree represents the free vmap space.
440 * All vmap_area objects in this tree are sorted by va->va_start
441 * address. It is used for allocation and merging when a vmap
442 * object is released.
444 * Each vmap_area node contains a maximum available free block
445 * of its sub-tree, right or left. Therefore it is possible to
446 * find a lowest match of free area.
448 static struct rb_root free_vmap_area_root
= RB_ROOT
;
451 * Preload a CPU with one object for "no edge" split case. The
452 * aim is to get rid of allocations from the atomic context, thus
453 * to use more permissive allocation masks.
455 static DEFINE_PER_CPU(struct vmap_area
*, ne_fit_preload_node
);
457 static __always_inline
unsigned long
458 va_size(struct vmap_area
*va
)
460 return (va
->va_end
- va
->va_start
);
463 static __always_inline
unsigned long
464 get_subtree_max_size(struct rb_node
*node
)
466 struct vmap_area
*va
;
468 va
= rb_entry_safe(node
, struct vmap_area
, rb_node
);
469 return va
? va
->subtree_max_size
: 0;
473 * Gets called when remove the node and rotate.
475 static __always_inline
unsigned long
476 compute_subtree_max_size(struct vmap_area
*va
)
478 return max3(va_size(va
),
479 get_subtree_max_size(va
->rb_node
.rb_left
),
480 get_subtree_max_size(va
->rb_node
.rb_right
));
483 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb
,
484 struct vmap_area
, rb_node
, unsigned long, subtree_max_size
, va_size
)
486 static void purge_vmap_area_lazy(void);
487 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list
);
488 static unsigned long lazy_max_pages(void);
490 static atomic_long_t nr_vmalloc_pages
;
492 unsigned long vmalloc_nr_pages(void)
494 return atomic_long_read(&nr_vmalloc_pages
);
497 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
499 struct rb_node
*n
= vmap_area_root
.rb_node
;
502 struct vmap_area
*va
;
504 va
= rb_entry(n
, struct vmap_area
, rb_node
);
505 if (addr
< va
->va_start
)
507 else if (addr
>= va
->va_end
)
517 * This function returns back addresses of parent node
518 * and its left or right link for further processing.
520 * Otherwise NULL is returned. In that case all further
521 * steps regarding inserting of conflicting overlap range
522 * have to be declined and actually considered as a bug.
524 static __always_inline
struct rb_node
**
525 find_va_links(struct vmap_area
*va
,
526 struct rb_root
*root
, struct rb_node
*from
,
527 struct rb_node
**parent
)
529 struct vmap_area
*tmp_va
;
530 struct rb_node
**link
;
533 link
= &root
->rb_node
;
534 if (unlikely(!*link
)) {
543 * Go to the bottom of the tree. When we hit the last point
544 * we end up with parent rb_node and correct direction, i name
545 * it link, where the new va->rb_node will be attached to.
548 tmp_va
= rb_entry(*link
, struct vmap_area
, rb_node
);
551 * During the traversal we also do some sanity check.
552 * Trigger the BUG() if there are sides(left/right)
555 if (va
->va_start
< tmp_va
->va_end
&&
556 va
->va_end
<= tmp_va
->va_start
)
557 link
= &(*link
)->rb_left
;
558 else if (va
->va_end
> tmp_va
->va_start
&&
559 va
->va_start
>= tmp_va
->va_end
)
560 link
= &(*link
)->rb_right
;
562 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
563 va
->va_start
, va
->va_end
, tmp_va
->va_start
, tmp_va
->va_end
);
569 *parent
= &tmp_va
->rb_node
;
573 static __always_inline
struct list_head
*
574 get_va_next_sibling(struct rb_node
*parent
, struct rb_node
**link
)
576 struct list_head
*list
;
578 if (unlikely(!parent
))
580 * The red-black tree where we try to find VA neighbors
581 * before merging or inserting is empty, i.e. it means
582 * there is no free vmap space. Normally it does not
583 * happen but we handle this case anyway.
587 list
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
588 return (&parent
->rb_right
== link
? list
->next
: list
);
591 static __always_inline
void
592 link_va(struct vmap_area
*va
, struct rb_root
*root
,
593 struct rb_node
*parent
, struct rb_node
**link
, struct list_head
*head
)
596 * VA is still not in the list, but we can
597 * identify its future previous list_head node.
599 if (likely(parent
)) {
600 head
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
601 if (&parent
->rb_right
!= link
)
605 /* Insert to the rb-tree */
606 rb_link_node(&va
->rb_node
, parent
, link
);
607 if (root
== &free_vmap_area_root
) {
609 * Some explanation here. Just perform simple insertion
610 * to the tree. We do not set va->subtree_max_size to
611 * its current size before calling rb_insert_augmented().
612 * It is because of we populate the tree from the bottom
613 * to parent levels when the node _is_ in the tree.
615 * Therefore we set subtree_max_size to zero after insertion,
616 * to let __augment_tree_propagate_from() puts everything to
617 * the correct order later on.
619 rb_insert_augmented(&va
->rb_node
,
620 root
, &free_vmap_area_rb_augment_cb
);
621 va
->subtree_max_size
= 0;
623 rb_insert_color(&va
->rb_node
, root
);
626 /* Address-sort this list */
627 list_add(&va
->list
, head
);
630 static __always_inline
void
631 unlink_va(struct vmap_area
*va
, struct rb_root
*root
)
633 if (WARN_ON(RB_EMPTY_NODE(&va
->rb_node
)))
636 if (root
== &free_vmap_area_root
)
637 rb_erase_augmented(&va
->rb_node
,
638 root
, &free_vmap_area_rb_augment_cb
);
640 rb_erase(&va
->rb_node
, root
);
643 RB_CLEAR_NODE(&va
->rb_node
);
646 #if DEBUG_AUGMENT_PROPAGATE_CHECK
648 augment_tree_propagate_check(void)
650 struct vmap_area
*va
;
651 unsigned long computed_size
;
653 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
654 computed_size
= compute_subtree_max_size(va
);
655 if (computed_size
!= va
->subtree_max_size
)
656 pr_emerg("tree is corrupted: %lu, %lu\n",
657 va_size(va
), va
->subtree_max_size
);
663 * This function populates subtree_max_size from bottom to upper
664 * levels starting from VA point. The propagation must be done
665 * when VA size is modified by changing its va_start/va_end. Or
666 * in case of newly inserting of VA to the tree.
668 * It means that __augment_tree_propagate_from() must be called:
669 * - After VA has been inserted to the tree(free path);
670 * - After VA has been shrunk(allocation path);
671 * - After VA has been increased(merging path).
673 * Please note that, it does not mean that upper parent nodes
674 * and their subtree_max_size are recalculated all the time up
683 * For example if we modify the node 4, shrinking it to 2, then
684 * no any modification is required. If we shrink the node 2 to 1
685 * its subtree_max_size is updated only, and set to 1. If we shrink
686 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
689 static __always_inline
void
690 augment_tree_propagate_from(struct vmap_area
*va
)
693 * Populate the tree from bottom towards the root until
694 * the calculated maximum available size of checked node
695 * is equal to its current one.
697 free_vmap_area_rb_augment_cb_propagate(&va
->rb_node
, NULL
);
699 #if DEBUG_AUGMENT_PROPAGATE_CHECK
700 augment_tree_propagate_check();
705 insert_vmap_area(struct vmap_area
*va
,
706 struct rb_root
*root
, struct list_head
*head
)
708 struct rb_node
**link
;
709 struct rb_node
*parent
;
711 link
= find_va_links(va
, root
, NULL
, &parent
);
713 link_va(va
, root
, parent
, link
, head
);
717 insert_vmap_area_augment(struct vmap_area
*va
,
718 struct rb_node
*from
, struct rb_root
*root
,
719 struct list_head
*head
)
721 struct rb_node
**link
;
722 struct rb_node
*parent
;
725 link
= find_va_links(va
, NULL
, from
, &parent
);
727 link
= find_va_links(va
, root
, NULL
, &parent
);
730 link_va(va
, root
, parent
, link
, head
);
731 augment_tree_propagate_from(va
);
736 * Merge de-allocated chunk of VA memory with previous
737 * and next free blocks. If coalesce is not done a new
738 * free area is inserted. If VA has been merged, it is
741 * Please note, it can return NULL in case of overlap
742 * ranges, followed by WARN() report. Despite it is a
743 * buggy behaviour, a system can be alive and keep
746 static __always_inline
struct vmap_area
*
747 merge_or_add_vmap_area(struct vmap_area
*va
,
748 struct rb_root
*root
, struct list_head
*head
)
750 struct vmap_area
*sibling
;
751 struct list_head
*next
;
752 struct rb_node
**link
;
753 struct rb_node
*parent
;
757 * Find a place in the tree where VA potentially will be
758 * inserted, unless it is merged with its sibling/siblings.
760 link
= find_va_links(va
, root
, NULL
, &parent
);
765 * Get next node of VA to check if merging can be done.
767 next
= get_va_next_sibling(parent
, link
);
768 if (unlikely(next
== NULL
))
774 * |<------VA------>|<-----Next----->|
779 sibling
= list_entry(next
, struct vmap_area
, list
);
780 if (sibling
->va_start
== va
->va_end
) {
781 sibling
->va_start
= va
->va_start
;
783 /* Free vmap_area object. */
784 kmem_cache_free(vmap_area_cachep
, va
);
786 /* Point to the new merged area. */
795 * |<-----Prev----->|<------VA------>|
799 if (next
->prev
!= head
) {
800 sibling
= list_entry(next
->prev
, struct vmap_area
, list
);
801 if (sibling
->va_end
== va
->va_start
) {
803 * If both neighbors are coalesced, it is important
804 * to unlink the "next" node first, followed by merging
805 * with "previous" one. Otherwise the tree might not be
806 * fully populated if a sibling's augmented value is
807 * "normalized" because of rotation operations.
812 sibling
->va_end
= va
->va_end
;
814 /* Free vmap_area object. */
815 kmem_cache_free(vmap_area_cachep
, va
);
817 /* Point to the new merged area. */
825 link_va(va
, root
, parent
, link
, head
);
830 static __always_inline
struct vmap_area
*
831 merge_or_add_vmap_area_augment(struct vmap_area
*va
,
832 struct rb_root
*root
, struct list_head
*head
)
834 va
= merge_or_add_vmap_area(va
, root
, head
);
836 augment_tree_propagate_from(va
);
841 static __always_inline
bool
842 is_within_this_va(struct vmap_area
*va
, unsigned long size
,
843 unsigned long align
, unsigned long vstart
)
845 unsigned long nva_start_addr
;
847 if (va
->va_start
> vstart
)
848 nva_start_addr
= ALIGN(va
->va_start
, align
);
850 nva_start_addr
= ALIGN(vstart
, align
);
852 /* Can be overflowed due to big size or alignment. */
853 if (nva_start_addr
+ size
< nva_start_addr
||
854 nva_start_addr
< vstart
)
857 return (nva_start_addr
+ size
<= va
->va_end
);
861 * Find the first free block(lowest start address) in the tree,
862 * that will accomplish the request corresponding to passing
865 static __always_inline
struct vmap_area
*
866 find_vmap_lowest_match(unsigned long size
,
867 unsigned long align
, unsigned long vstart
)
869 struct vmap_area
*va
;
870 struct rb_node
*node
;
871 unsigned long length
;
873 /* Start from the root. */
874 node
= free_vmap_area_root
.rb_node
;
876 /* Adjust the search size for alignment overhead. */
877 length
= size
+ align
- 1;
880 va
= rb_entry(node
, struct vmap_area
, rb_node
);
882 if (get_subtree_max_size(node
->rb_left
) >= length
&&
883 vstart
< va
->va_start
) {
884 node
= node
->rb_left
;
886 if (is_within_this_va(va
, size
, align
, vstart
))
890 * Does not make sense to go deeper towards the right
891 * sub-tree if it does not have a free block that is
892 * equal or bigger to the requested search length.
894 if (get_subtree_max_size(node
->rb_right
) >= length
) {
895 node
= node
->rb_right
;
900 * OK. We roll back and find the first right sub-tree,
901 * that will satisfy the search criteria. It can happen
902 * only once due to "vstart" restriction.
904 while ((node
= rb_parent(node
))) {
905 va
= rb_entry(node
, struct vmap_area
, rb_node
);
906 if (is_within_this_va(va
, size
, align
, vstart
))
909 if (get_subtree_max_size(node
->rb_right
) >= length
&&
910 vstart
<= va
->va_start
) {
911 node
= node
->rb_right
;
921 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
922 #include <linux/random.h>
924 static struct vmap_area
*
925 find_vmap_lowest_linear_match(unsigned long size
,
926 unsigned long align
, unsigned long vstart
)
928 struct vmap_area
*va
;
930 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
931 if (!is_within_this_va(va
, size
, align
, vstart
))
941 find_vmap_lowest_match_check(unsigned long size
)
943 struct vmap_area
*va_1
, *va_2
;
944 unsigned long vstart
;
947 get_random_bytes(&rnd
, sizeof(rnd
));
948 vstart
= VMALLOC_START
+ rnd
;
950 va_1
= find_vmap_lowest_match(size
, 1, vstart
);
951 va_2
= find_vmap_lowest_linear_match(size
, 1, vstart
);
954 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
961 FL_FIT_TYPE
= 1, /* full fit */
962 LE_FIT_TYPE
= 2, /* left edge fit */
963 RE_FIT_TYPE
= 3, /* right edge fit */
964 NE_FIT_TYPE
= 4 /* no edge fit */
967 static __always_inline
enum fit_type
968 classify_va_fit_type(struct vmap_area
*va
,
969 unsigned long nva_start_addr
, unsigned long size
)
973 /* Check if it is within VA. */
974 if (nva_start_addr
< va
->va_start
||
975 nva_start_addr
+ size
> va
->va_end
)
979 if (va
->va_start
== nva_start_addr
) {
980 if (va
->va_end
== nva_start_addr
+ size
)
984 } else if (va
->va_end
== nva_start_addr
+ size
) {
993 static __always_inline
int
994 adjust_va_to_fit_type(struct vmap_area
*va
,
995 unsigned long nva_start_addr
, unsigned long size
,
998 struct vmap_area
*lva
= NULL
;
1000 if (type
== FL_FIT_TYPE
) {
1002 * No need to split VA, it fully fits.
1008 unlink_va(va
, &free_vmap_area_root
);
1009 kmem_cache_free(vmap_area_cachep
, va
);
1010 } else if (type
== LE_FIT_TYPE
) {
1012 * Split left edge of fit VA.
1018 va
->va_start
+= size
;
1019 } else if (type
== RE_FIT_TYPE
) {
1021 * Split right edge of fit VA.
1027 va
->va_end
= nva_start_addr
;
1028 } else if (type
== NE_FIT_TYPE
) {
1030 * Split no edge of fit VA.
1036 lva
= __this_cpu_xchg(ne_fit_preload_node
, NULL
);
1037 if (unlikely(!lva
)) {
1039 * For percpu allocator we do not do any pre-allocation
1040 * and leave it as it is. The reason is it most likely
1041 * never ends up with NE_FIT_TYPE splitting. In case of
1042 * percpu allocations offsets and sizes are aligned to
1043 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1044 * are its main fitting cases.
1046 * There are a few exceptions though, as an example it is
1047 * a first allocation (early boot up) when we have "one"
1048 * big free space that has to be split.
1050 * Also we can hit this path in case of regular "vmap"
1051 * allocations, if "this" current CPU was not preloaded.
1052 * See the comment in alloc_vmap_area() why. If so, then
1053 * GFP_NOWAIT is used instead to get an extra object for
1054 * split purpose. That is rare and most time does not
1057 * What happens if an allocation gets failed. Basically,
1058 * an "overflow" path is triggered to purge lazily freed
1059 * areas to free some memory, then, the "retry" path is
1060 * triggered to repeat one more time. See more details
1061 * in alloc_vmap_area() function.
1063 lva
= kmem_cache_alloc(vmap_area_cachep
, GFP_NOWAIT
);
1069 * Build the remainder.
1071 lva
->va_start
= va
->va_start
;
1072 lva
->va_end
= nva_start_addr
;
1075 * Shrink this VA to remaining size.
1077 va
->va_start
= nva_start_addr
+ size
;
1082 if (type
!= FL_FIT_TYPE
) {
1083 augment_tree_propagate_from(va
);
1085 if (lva
) /* type == NE_FIT_TYPE */
1086 insert_vmap_area_augment(lva
, &va
->rb_node
,
1087 &free_vmap_area_root
, &free_vmap_area_list
);
1094 * Returns a start address of the newly allocated area, if success.
1095 * Otherwise a vend is returned that indicates failure.
1097 static __always_inline
unsigned long
1098 __alloc_vmap_area(unsigned long size
, unsigned long align
,
1099 unsigned long vstart
, unsigned long vend
)
1101 unsigned long nva_start_addr
;
1102 struct vmap_area
*va
;
1106 va
= find_vmap_lowest_match(size
, align
, vstart
);
1110 if (va
->va_start
> vstart
)
1111 nva_start_addr
= ALIGN(va
->va_start
, align
);
1113 nva_start_addr
= ALIGN(vstart
, align
);
1115 /* Check the "vend" restriction. */
1116 if (nva_start_addr
+ size
> vend
)
1119 /* Classify what we have found. */
1120 type
= classify_va_fit_type(va
, nva_start_addr
, size
);
1121 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
1124 /* Update the free vmap_area. */
1125 ret
= adjust_va_to_fit_type(va
, nva_start_addr
, size
, type
);
1129 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1130 find_vmap_lowest_match_check(size
);
1133 return nva_start_addr
;
1137 * Free a region of KVA allocated by alloc_vmap_area
1139 static void free_vmap_area(struct vmap_area
*va
)
1142 * Remove from the busy tree/list.
1144 spin_lock(&vmap_area_lock
);
1145 unlink_va(va
, &vmap_area_root
);
1146 spin_unlock(&vmap_area_lock
);
1149 * Insert/Merge it back to the free tree/list.
1151 spin_lock(&free_vmap_area_lock
);
1152 merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
, &free_vmap_area_list
);
1153 spin_unlock(&free_vmap_area_lock
);
1157 * Allocate a region of KVA of the specified size and alignment, within the
1160 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
1161 unsigned long align
,
1162 unsigned long vstart
, unsigned long vend
,
1163 int node
, gfp_t gfp_mask
)
1165 struct vmap_area
*va
, *pva
;
1171 BUG_ON(offset_in_page(size
));
1172 BUG_ON(!is_power_of_2(align
));
1174 if (unlikely(!vmap_initialized
))
1175 return ERR_PTR(-EBUSY
);
1178 gfp_mask
= gfp_mask
& GFP_RECLAIM_MASK
;
1180 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1182 return ERR_PTR(-ENOMEM
);
1185 * Only scan the relevant parts containing pointers to other objects
1186 * to avoid false negatives.
1188 kmemleak_scan_area(&va
->rb_node
, SIZE_MAX
, gfp_mask
);
1192 * Preload this CPU with one extra vmap_area object. It is used
1193 * when fit type of free area is NE_FIT_TYPE. Please note, it
1194 * does not guarantee that an allocation occurs on a CPU that
1195 * is preloaded, instead we minimize the case when it is not.
1196 * It can happen because of cpu migration, because there is a
1197 * race until the below spinlock is taken.
1199 * The preload is done in non-atomic context, thus it allows us
1200 * to use more permissive allocation masks to be more stable under
1201 * low memory condition and high memory pressure. In rare case,
1202 * if not preloaded, GFP_NOWAIT is used.
1204 * Set "pva" to NULL here, because of "retry" path.
1208 if (!this_cpu_read(ne_fit_preload_node
))
1210 * Even if it fails we do not really care about that.
1211 * Just proceed as it is. If needed "overflow" path
1212 * will refill the cache we allocate from.
1214 pva
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1216 spin_lock(&free_vmap_area_lock
);
1218 if (pva
&& __this_cpu_cmpxchg(ne_fit_preload_node
, NULL
, pva
))
1219 kmem_cache_free(vmap_area_cachep
, pva
);
1222 * If an allocation fails, the "vend" address is
1223 * returned. Therefore trigger the overflow path.
1225 addr
= __alloc_vmap_area(size
, align
, vstart
, vend
);
1226 spin_unlock(&free_vmap_area_lock
);
1228 if (unlikely(addr
== vend
))
1231 va
->va_start
= addr
;
1232 va
->va_end
= addr
+ size
;
1236 spin_lock(&vmap_area_lock
);
1237 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
1238 spin_unlock(&vmap_area_lock
);
1240 BUG_ON(!IS_ALIGNED(va
->va_start
, align
));
1241 BUG_ON(va
->va_start
< vstart
);
1242 BUG_ON(va
->va_end
> vend
);
1244 ret
= kasan_populate_vmalloc(addr
, size
);
1247 return ERR_PTR(ret
);
1254 purge_vmap_area_lazy();
1259 if (gfpflags_allow_blocking(gfp_mask
)) {
1260 unsigned long freed
= 0;
1261 blocking_notifier_call_chain(&vmap_notify_list
, 0, &freed
);
1268 if (!(gfp_mask
& __GFP_NOWARN
) && printk_ratelimit())
1269 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1272 kmem_cache_free(vmap_area_cachep
, va
);
1273 return ERR_PTR(-EBUSY
);
1276 int register_vmap_purge_notifier(struct notifier_block
*nb
)
1278 return blocking_notifier_chain_register(&vmap_notify_list
, nb
);
1280 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier
);
1282 int unregister_vmap_purge_notifier(struct notifier_block
*nb
)
1284 return blocking_notifier_chain_unregister(&vmap_notify_list
, nb
);
1286 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier
);
1289 * lazy_max_pages is the maximum amount of virtual address space we gather up
1290 * before attempting to purge with a TLB flush.
1292 * There is a tradeoff here: a larger number will cover more kernel page tables
1293 * and take slightly longer to purge, but it will linearly reduce the number of
1294 * global TLB flushes that must be performed. It would seem natural to scale
1295 * this number up linearly with the number of CPUs (because vmapping activity
1296 * could also scale linearly with the number of CPUs), however it is likely
1297 * that in practice, workloads might be constrained in other ways that mean
1298 * vmap activity will not scale linearly with CPUs. Also, I want to be
1299 * conservative and not introduce a big latency on huge systems, so go with
1300 * a less aggressive log scale. It will still be an improvement over the old
1301 * code, and it will be simple to change the scale factor if we find that it
1302 * becomes a problem on bigger systems.
1304 static unsigned long lazy_max_pages(void)
1308 log
= fls(num_online_cpus());
1310 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
1313 static atomic_long_t vmap_lazy_nr
= ATOMIC_LONG_INIT(0);
1316 * Serialize vmap purging. There is no actual criticial section protected
1317 * by this look, but we want to avoid concurrent calls for performance
1318 * reasons and to make the pcpu_get_vm_areas more deterministic.
1320 static DEFINE_MUTEX(vmap_purge_lock
);
1322 /* for per-CPU blocks */
1323 static void purge_fragmented_blocks_allcpus(void);
1326 * called before a call to iounmap() if the caller wants vm_area_struct's
1327 * immediately freed.
1329 void set_iounmap_nonlazy(void)
1331 atomic_long_set(&vmap_lazy_nr
, lazy_max_pages()+1);
1335 * Purges all lazily-freed vmap areas.
1337 static bool __purge_vmap_area_lazy(unsigned long start
, unsigned long end
)
1339 unsigned long resched_threshold
;
1340 struct list_head local_pure_list
;
1341 struct vmap_area
*va
, *n_va
;
1343 lockdep_assert_held(&vmap_purge_lock
);
1345 spin_lock(&purge_vmap_area_lock
);
1346 purge_vmap_area_root
= RB_ROOT
;
1347 list_replace_init(&purge_vmap_area_list
, &local_pure_list
);
1348 spin_unlock(&purge_vmap_area_lock
);
1350 if (unlikely(list_empty(&local_pure_list
)))
1354 list_first_entry(&local_pure_list
,
1355 struct vmap_area
, list
)->va_start
);
1358 list_last_entry(&local_pure_list
,
1359 struct vmap_area
, list
)->va_end
);
1361 flush_tlb_kernel_range(start
, end
);
1362 resched_threshold
= lazy_max_pages() << 1;
1364 spin_lock(&free_vmap_area_lock
);
1365 list_for_each_entry_safe(va
, n_va
, &local_pure_list
, list
) {
1366 unsigned long nr
= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
1367 unsigned long orig_start
= va
->va_start
;
1368 unsigned long orig_end
= va
->va_end
;
1371 * Finally insert or merge lazily-freed area. It is
1372 * detached and there is no need to "unlink" it from
1375 va
= merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
,
1376 &free_vmap_area_list
);
1381 if (is_vmalloc_or_module_addr((void *)orig_start
))
1382 kasan_release_vmalloc(orig_start
, orig_end
,
1383 va
->va_start
, va
->va_end
);
1385 atomic_long_sub(nr
, &vmap_lazy_nr
);
1387 if (atomic_long_read(&vmap_lazy_nr
) < resched_threshold
)
1388 cond_resched_lock(&free_vmap_area_lock
);
1390 spin_unlock(&free_vmap_area_lock
);
1395 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1396 * is already purging.
1398 static void try_purge_vmap_area_lazy(void)
1400 if (mutex_trylock(&vmap_purge_lock
)) {
1401 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1402 mutex_unlock(&vmap_purge_lock
);
1407 * Kick off a purge of the outstanding lazy areas.
1409 static void purge_vmap_area_lazy(void)
1411 mutex_lock(&vmap_purge_lock
);
1412 purge_fragmented_blocks_allcpus();
1413 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1414 mutex_unlock(&vmap_purge_lock
);
1418 * Free a vmap area, caller ensuring that the area has been unmapped
1419 * and flush_cache_vunmap had been called for the correct range
1422 static void free_vmap_area_noflush(struct vmap_area
*va
)
1424 unsigned long nr_lazy
;
1426 spin_lock(&vmap_area_lock
);
1427 unlink_va(va
, &vmap_area_root
);
1428 spin_unlock(&vmap_area_lock
);
1430 nr_lazy
= atomic_long_add_return((va
->va_end
- va
->va_start
) >>
1431 PAGE_SHIFT
, &vmap_lazy_nr
);
1434 * Merge or place it to the purge tree/list.
1436 spin_lock(&purge_vmap_area_lock
);
1437 merge_or_add_vmap_area(va
,
1438 &purge_vmap_area_root
, &purge_vmap_area_list
);
1439 spin_unlock(&purge_vmap_area_lock
);
1441 /* After this point, we may free va at any time */
1442 if (unlikely(nr_lazy
> lazy_max_pages()))
1443 try_purge_vmap_area_lazy();
1447 * Free and unmap a vmap area
1449 static void free_unmap_vmap_area(struct vmap_area
*va
)
1451 flush_cache_vunmap(va
->va_start
, va
->va_end
);
1452 unmap_kernel_range_noflush(va
->va_start
, va
->va_end
- va
->va_start
);
1453 if (debug_pagealloc_enabled_static())
1454 flush_tlb_kernel_range(va
->va_start
, va
->va_end
);
1456 free_vmap_area_noflush(va
);
1459 static struct vmap_area
*find_vmap_area(unsigned long addr
)
1461 struct vmap_area
*va
;
1463 spin_lock(&vmap_area_lock
);
1464 va
= __find_vmap_area(addr
);
1465 spin_unlock(&vmap_area_lock
);
1470 /*** Per cpu kva allocator ***/
1473 * vmap space is limited especially on 32 bit architectures. Ensure there is
1474 * room for at least 16 percpu vmap blocks per CPU.
1477 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1478 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1479 * instead (we just need a rough idea)
1481 #if BITS_PER_LONG == 32
1482 #define VMALLOC_SPACE (128UL*1024*1024)
1484 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1487 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1488 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1489 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1490 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1491 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1492 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1493 #define VMAP_BBMAP_BITS \
1494 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1495 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1496 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1498 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1500 struct vmap_block_queue
{
1502 struct list_head free
;
1507 struct vmap_area
*va
;
1508 unsigned long free
, dirty
;
1509 unsigned long dirty_min
, dirty_max
; /*< dirty range */
1510 struct list_head free_list
;
1511 struct rcu_head rcu_head
;
1512 struct list_head purge
;
1515 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1516 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
1519 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1520 * in the free path. Could get rid of this if we change the API to return a
1521 * "cookie" from alloc, to be passed to free. But no big deal yet.
1523 static DEFINE_XARRAY(vmap_blocks
);
1526 * We should probably have a fallback mechanism to allocate virtual memory
1527 * out of partially filled vmap blocks. However vmap block sizing should be
1528 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1532 static unsigned long addr_to_vb_idx(unsigned long addr
)
1534 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
1535 addr
/= VMAP_BLOCK_SIZE
;
1539 static void *vmap_block_vaddr(unsigned long va_start
, unsigned long pages_off
)
1543 addr
= va_start
+ (pages_off
<< PAGE_SHIFT
);
1544 BUG_ON(addr_to_vb_idx(addr
) != addr_to_vb_idx(va_start
));
1545 return (void *)addr
;
1549 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1550 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1551 * @order: how many 2^order pages should be occupied in newly allocated block
1552 * @gfp_mask: flags for the page level allocator
1554 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1556 static void *new_vmap_block(unsigned int order
, gfp_t gfp_mask
)
1558 struct vmap_block_queue
*vbq
;
1559 struct vmap_block
*vb
;
1560 struct vmap_area
*va
;
1561 unsigned long vb_idx
;
1565 node
= numa_node_id();
1567 vb
= kmalloc_node(sizeof(struct vmap_block
),
1568 gfp_mask
& GFP_RECLAIM_MASK
, node
);
1570 return ERR_PTR(-ENOMEM
);
1572 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
1573 VMALLOC_START
, VMALLOC_END
,
1577 return ERR_CAST(va
);
1580 vaddr
= vmap_block_vaddr(va
->va_start
, 0);
1581 spin_lock_init(&vb
->lock
);
1583 /* At least something should be left free */
1584 BUG_ON(VMAP_BBMAP_BITS
<= (1UL << order
));
1585 vb
->free
= VMAP_BBMAP_BITS
- (1UL << order
);
1587 vb
->dirty_min
= VMAP_BBMAP_BITS
;
1589 INIT_LIST_HEAD(&vb
->free_list
);
1591 vb_idx
= addr_to_vb_idx(va
->va_start
);
1592 err
= xa_insert(&vmap_blocks
, vb_idx
, vb
, gfp_mask
);
1596 return ERR_PTR(err
);
1599 vbq
= &get_cpu_var(vmap_block_queue
);
1600 spin_lock(&vbq
->lock
);
1601 list_add_tail_rcu(&vb
->free_list
, &vbq
->free
);
1602 spin_unlock(&vbq
->lock
);
1603 put_cpu_var(vmap_block_queue
);
1608 static void free_vmap_block(struct vmap_block
*vb
)
1610 struct vmap_block
*tmp
;
1612 tmp
= xa_erase(&vmap_blocks
, addr_to_vb_idx(vb
->va
->va_start
));
1615 free_vmap_area_noflush(vb
->va
);
1616 kfree_rcu(vb
, rcu_head
);
1619 static void purge_fragmented_blocks(int cpu
)
1622 struct vmap_block
*vb
;
1623 struct vmap_block
*n_vb
;
1624 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1627 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1629 if (!(vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
))
1632 spin_lock(&vb
->lock
);
1633 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
1634 vb
->free
= 0; /* prevent further allocs after releasing lock */
1635 vb
->dirty
= VMAP_BBMAP_BITS
; /* prevent purging it again */
1637 vb
->dirty_max
= VMAP_BBMAP_BITS
;
1638 spin_lock(&vbq
->lock
);
1639 list_del_rcu(&vb
->free_list
);
1640 spin_unlock(&vbq
->lock
);
1641 spin_unlock(&vb
->lock
);
1642 list_add_tail(&vb
->purge
, &purge
);
1644 spin_unlock(&vb
->lock
);
1648 list_for_each_entry_safe(vb
, n_vb
, &purge
, purge
) {
1649 list_del(&vb
->purge
);
1650 free_vmap_block(vb
);
1654 static void purge_fragmented_blocks_allcpus(void)
1658 for_each_possible_cpu(cpu
)
1659 purge_fragmented_blocks(cpu
);
1662 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
1664 struct vmap_block_queue
*vbq
;
1665 struct vmap_block
*vb
;
1669 BUG_ON(offset_in_page(size
));
1670 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1671 if (WARN_ON(size
== 0)) {
1673 * Allocating 0 bytes isn't what caller wants since
1674 * get_order(0) returns funny result. Just warn and terminate
1679 order
= get_order(size
);
1682 vbq
= &get_cpu_var(vmap_block_queue
);
1683 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1684 unsigned long pages_off
;
1686 spin_lock(&vb
->lock
);
1687 if (vb
->free
< (1UL << order
)) {
1688 spin_unlock(&vb
->lock
);
1692 pages_off
= VMAP_BBMAP_BITS
- vb
->free
;
1693 vaddr
= vmap_block_vaddr(vb
->va
->va_start
, pages_off
);
1694 vb
->free
-= 1UL << order
;
1695 if (vb
->free
== 0) {
1696 spin_lock(&vbq
->lock
);
1697 list_del_rcu(&vb
->free_list
);
1698 spin_unlock(&vbq
->lock
);
1701 spin_unlock(&vb
->lock
);
1705 put_cpu_var(vmap_block_queue
);
1708 /* Allocate new block if nothing was found */
1710 vaddr
= new_vmap_block(order
, gfp_mask
);
1715 static void vb_free(unsigned long addr
, unsigned long size
)
1717 unsigned long offset
;
1719 struct vmap_block
*vb
;
1721 BUG_ON(offset_in_page(size
));
1722 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1724 flush_cache_vunmap(addr
, addr
+ size
);
1726 order
= get_order(size
);
1727 offset
= (addr
& (VMAP_BLOCK_SIZE
- 1)) >> PAGE_SHIFT
;
1728 vb
= xa_load(&vmap_blocks
, addr_to_vb_idx(addr
));
1730 unmap_kernel_range_noflush(addr
, size
);
1732 if (debug_pagealloc_enabled_static())
1733 flush_tlb_kernel_range(addr
, addr
+ size
);
1735 spin_lock(&vb
->lock
);
1737 /* Expand dirty range */
1738 vb
->dirty_min
= min(vb
->dirty_min
, offset
);
1739 vb
->dirty_max
= max(vb
->dirty_max
, offset
+ (1UL << order
));
1741 vb
->dirty
+= 1UL << order
;
1742 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
1744 spin_unlock(&vb
->lock
);
1745 free_vmap_block(vb
);
1747 spin_unlock(&vb
->lock
);
1750 static void _vm_unmap_aliases(unsigned long start
, unsigned long end
, int flush
)
1754 if (unlikely(!vmap_initialized
))
1759 for_each_possible_cpu(cpu
) {
1760 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1761 struct vmap_block
*vb
;
1764 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1765 spin_lock(&vb
->lock
);
1767 unsigned long va_start
= vb
->va
->va_start
;
1770 s
= va_start
+ (vb
->dirty_min
<< PAGE_SHIFT
);
1771 e
= va_start
+ (vb
->dirty_max
<< PAGE_SHIFT
);
1773 start
= min(s
, start
);
1778 spin_unlock(&vb
->lock
);
1783 mutex_lock(&vmap_purge_lock
);
1784 purge_fragmented_blocks_allcpus();
1785 if (!__purge_vmap_area_lazy(start
, end
) && flush
)
1786 flush_tlb_kernel_range(start
, end
);
1787 mutex_unlock(&vmap_purge_lock
);
1791 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1793 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1794 * to amortize TLB flushing overheads. What this means is that any page you
1795 * have now, may, in a former life, have been mapped into kernel virtual
1796 * address by the vmap layer and so there might be some CPUs with TLB entries
1797 * still referencing that page (additional to the regular 1:1 kernel mapping).
1799 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1800 * be sure that none of the pages we have control over will have any aliases
1801 * from the vmap layer.
1803 void vm_unmap_aliases(void)
1805 unsigned long start
= ULONG_MAX
, end
= 0;
1808 _vm_unmap_aliases(start
, end
, flush
);
1810 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
1813 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1814 * @mem: the pointer returned by vm_map_ram
1815 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1817 void vm_unmap_ram(const void *mem
, unsigned int count
)
1819 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
1820 unsigned long addr
= (unsigned long)mem
;
1821 struct vmap_area
*va
;
1825 BUG_ON(addr
< VMALLOC_START
);
1826 BUG_ON(addr
> VMALLOC_END
);
1827 BUG_ON(!PAGE_ALIGNED(addr
));
1829 kasan_poison_vmalloc(mem
, size
);
1831 if (likely(count
<= VMAP_MAX_ALLOC
)) {
1832 debug_check_no_locks_freed(mem
, size
);
1833 vb_free(addr
, size
);
1837 va
= find_vmap_area(addr
);
1839 debug_check_no_locks_freed((void *)va
->va_start
,
1840 (va
->va_end
- va
->va_start
));
1841 free_unmap_vmap_area(va
);
1843 EXPORT_SYMBOL(vm_unmap_ram
);
1846 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1847 * @pages: an array of pointers to the pages to be mapped
1848 * @count: number of pages
1849 * @node: prefer to allocate data structures on this node
1851 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1852 * faster than vmap so it's good. But if you mix long-life and short-life
1853 * objects with vm_map_ram(), it could consume lots of address space through
1854 * fragmentation (especially on a 32bit machine). You could see failures in
1855 * the end. Please use this function for short-lived objects.
1857 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1859 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
)
1861 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
1865 if (likely(count
<= VMAP_MAX_ALLOC
)) {
1866 mem
= vb_alloc(size
, GFP_KERNEL
);
1869 addr
= (unsigned long)mem
;
1871 struct vmap_area
*va
;
1872 va
= alloc_vmap_area(size
, PAGE_SIZE
,
1873 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
1877 addr
= va
->va_start
;
1881 kasan_unpoison_vmalloc(mem
, size
);
1883 if (map_kernel_range(addr
, size
, PAGE_KERNEL
, pages
) < 0) {
1884 vm_unmap_ram(mem
, count
);
1889 EXPORT_SYMBOL(vm_map_ram
);
1891 static struct vm_struct
*vmlist __initdata
;
1894 * vm_area_add_early - add vmap area early during boot
1895 * @vm: vm_struct to add
1897 * This function is used to add fixed kernel vm area to vmlist before
1898 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1899 * should contain proper values and the other fields should be zero.
1901 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1903 void __init
vm_area_add_early(struct vm_struct
*vm
)
1905 struct vm_struct
*tmp
, **p
;
1907 BUG_ON(vmap_initialized
);
1908 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1909 if (tmp
->addr
>= vm
->addr
) {
1910 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
1913 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
1920 * vm_area_register_early - register vmap area early during boot
1921 * @vm: vm_struct to register
1922 * @align: requested alignment
1924 * This function is used to register kernel vm area before
1925 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1926 * proper values on entry and other fields should be zero. On return,
1927 * vm->addr contains the allocated address.
1929 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1931 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
1933 static size_t vm_init_off __initdata
;
1936 addr
= ALIGN(VMALLOC_START
+ vm_init_off
, align
);
1937 vm_init_off
= PFN_ALIGN(addr
+ vm
->size
) - VMALLOC_START
;
1939 vm
->addr
= (void *)addr
;
1941 vm_area_add_early(vm
);
1944 static void vmap_init_free_space(void)
1946 unsigned long vmap_start
= 1;
1947 const unsigned long vmap_end
= ULONG_MAX
;
1948 struct vmap_area
*busy
, *free
;
1952 * -|-----|.....|-----|-----|-----|.....|-
1954 * |<--------------------------------->|
1956 list_for_each_entry(busy
, &vmap_area_list
, list
) {
1957 if (busy
->va_start
- vmap_start
> 0) {
1958 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
1959 if (!WARN_ON_ONCE(!free
)) {
1960 free
->va_start
= vmap_start
;
1961 free
->va_end
= busy
->va_start
;
1963 insert_vmap_area_augment(free
, NULL
,
1964 &free_vmap_area_root
,
1965 &free_vmap_area_list
);
1969 vmap_start
= busy
->va_end
;
1972 if (vmap_end
- vmap_start
> 0) {
1973 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
1974 if (!WARN_ON_ONCE(!free
)) {
1975 free
->va_start
= vmap_start
;
1976 free
->va_end
= vmap_end
;
1978 insert_vmap_area_augment(free
, NULL
,
1979 &free_vmap_area_root
,
1980 &free_vmap_area_list
);
1985 void __init
vmalloc_init(void)
1987 struct vmap_area
*va
;
1988 struct vm_struct
*tmp
;
1992 * Create the cache for vmap_area objects.
1994 vmap_area_cachep
= KMEM_CACHE(vmap_area
, SLAB_PANIC
);
1996 for_each_possible_cpu(i
) {
1997 struct vmap_block_queue
*vbq
;
1998 struct vfree_deferred
*p
;
2000 vbq
= &per_cpu(vmap_block_queue
, i
);
2001 spin_lock_init(&vbq
->lock
);
2002 INIT_LIST_HEAD(&vbq
->free
);
2003 p
= &per_cpu(vfree_deferred
, i
);
2004 init_llist_head(&p
->list
);
2005 INIT_WORK(&p
->wq
, free_work
);
2008 /* Import existing vmlist entries. */
2009 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
2010 va
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2011 if (WARN_ON_ONCE(!va
))
2014 va
->va_start
= (unsigned long)tmp
->addr
;
2015 va
->va_end
= va
->va_start
+ tmp
->size
;
2017 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
2021 * Now we can initialize a free vmap space.
2023 vmap_init_free_space();
2024 vmap_initialized
= true;
2028 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2029 * @addr: start of the VM area to unmap
2030 * @size: size of the VM area to unmap
2032 * Similar to unmap_kernel_range_noflush() but flushes vcache before
2033 * the unmapping and tlb after.
2035 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
2037 unsigned long end
= addr
+ size
;
2039 flush_cache_vunmap(addr
, end
);
2040 unmap_kernel_range_noflush(addr
, size
);
2041 flush_tlb_kernel_range(addr
, end
);
2044 static inline void setup_vmalloc_vm_locked(struct vm_struct
*vm
,
2045 struct vmap_area
*va
, unsigned long flags
, const void *caller
)
2048 vm
->addr
= (void *)va
->va_start
;
2049 vm
->size
= va
->va_end
- va
->va_start
;
2050 vm
->caller
= caller
;
2054 static void setup_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
2055 unsigned long flags
, const void *caller
)
2057 spin_lock(&vmap_area_lock
);
2058 setup_vmalloc_vm_locked(vm
, va
, flags
, caller
);
2059 spin_unlock(&vmap_area_lock
);
2062 static void clear_vm_uninitialized_flag(struct vm_struct
*vm
)
2065 * Before removing VM_UNINITIALIZED,
2066 * we should make sure that vm has proper values.
2067 * Pair with smp_rmb() in show_numa_info().
2070 vm
->flags
&= ~VM_UNINITIALIZED
;
2073 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
2074 unsigned long align
, unsigned long flags
, unsigned long start
,
2075 unsigned long end
, int node
, gfp_t gfp_mask
, const void *caller
)
2077 struct vmap_area
*va
;
2078 struct vm_struct
*area
;
2079 unsigned long requested_size
= size
;
2081 BUG_ON(in_interrupt());
2082 size
= PAGE_ALIGN(size
);
2083 if (unlikely(!size
))
2086 if (flags
& VM_IOREMAP
)
2087 align
= 1ul << clamp_t(int, get_count_order_long(size
),
2088 PAGE_SHIFT
, IOREMAP_MAX_ORDER
);
2090 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
2091 if (unlikely(!area
))
2094 if (!(flags
& VM_NO_GUARD
))
2097 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
2103 kasan_unpoison_vmalloc((void *)va
->va_start
, requested_size
);
2105 setup_vmalloc_vm(area
, va
, flags
, caller
);
2110 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
2111 unsigned long start
, unsigned long end
,
2114 return __get_vm_area_node(size
, 1, flags
, start
, end
, NUMA_NO_NODE
,
2115 GFP_KERNEL
, caller
);
2119 * get_vm_area - reserve a contiguous kernel virtual area
2120 * @size: size of the area
2121 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2123 * Search an area of @size in the kernel virtual mapping area,
2124 * and reserved it for out purposes. Returns the area descriptor
2125 * on success or %NULL on failure.
2127 * Return: the area descriptor on success or %NULL on failure.
2129 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
2131 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
2132 NUMA_NO_NODE
, GFP_KERNEL
,
2133 __builtin_return_address(0));
2135 EXPORT_SYMBOL(get_vm_area
);
2137 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
2140 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
2141 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2145 * find_vm_area - find a continuous kernel virtual area
2146 * @addr: base address
2148 * Search for the kernel VM area starting at @addr, and return it.
2149 * It is up to the caller to do all required locking to keep the returned
2152 * Return: the area descriptor on success or %NULL on failure.
2154 struct vm_struct
*find_vm_area(const void *addr
)
2156 struct vmap_area
*va
;
2158 va
= find_vmap_area((unsigned long)addr
);
2166 * remove_vm_area - find and remove a continuous kernel virtual area
2167 * @addr: base address
2169 * Search for the kernel VM area starting at @addr, and remove it.
2170 * This function returns the found VM area, but using it is NOT safe
2171 * on SMP machines, except for its size or flags.
2173 * Return: the area descriptor on success or %NULL on failure.
2175 struct vm_struct
*remove_vm_area(const void *addr
)
2177 struct vmap_area
*va
;
2181 spin_lock(&vmap_area_lock
);
2182 va
= __find_vmap_area((unsigned long)addr
);
2184 struct vm_struct
*vm
= va
->vm
;
2187 spin_unlock(&vmap_area_lock
);
2189 kasan_free_shadow(vm
);
2190 free_unmap_vmap_area(va
);
2195 spin_unlock(&vmap_area_lock
);
2199 static inline void set_area_direct_map(const struct vm_struct
*area
,
2200 int (*set_direct_map
)(struct page
*page
))
2204 for (i
= 0; i
< area
->nr_pages
; i
++)
2205 if (page_address(area
->pages
[i
]))
2206 set_direct_map(area
->pages
[i
]);
2209 /* Handle removing and resetting vm mappings related to the vm_struct. */
2210 static void vm_remove_mappings(struct vm_struct
*area
, int deallocate_pages
)
2212 unsigned long start
= ULONG_MAX
, end
= 0;
2213 int flush_reset
= area
->flags
& VM_FLUSH_RESET_PERMS
;
2217 remove_vm_area(area
->addr
);
2219 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2224 * If not deallocating pages, just do the flush of the VM area and
2227 if (!deallocate_pages
) {
2233 * If execution gets here, flush the vm mapping and reset the direct
2234 * map. Find the start and end range of the direct mappings to make sure
2235 * the vm_unmap_aliases() flush includes the direct map.
2237 for (i
= 0; i
< area
->nr_pages
; i
++) {
2238 unsigned long addr
= (unsigned long)page_address(area
->pages
[i
]);
2240 start
= min(addr
, start
);
2241 end
= max(addr
+ PAGE_SIZE
, end
);
2247 * Set direct map to something invalid so that it won't be cached if
2248 * there are any accesses after the TLB flush, then flush the TLB and
2249 * reset the direct map permissions to the default.
2251 set_area_direct_map(area
, set_direct_map_invalid_noflush
);
2252 _vm_unmap_aliases(start
, end
, flush_dmap
);
2253 set_area_direct_map(area
, set_direct_map_default_noflush
);
2256 static void __vunmap(const void *addr
, int deallocate_pages
)
2258 struct vm_struct
*area
;
2263 if (WARN(!PAGE_ALIGNED(addr
), "Trying to vfree() bad address (%p)\n",
2267 area
= find_vm_area(addr
);
2268 if (unlikely(!area
)) {
2269 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
2274 debug_check_no_locks_freed(area
->addr
, get_vm_area_size(area
));
2275 debug_check_no_obj_freed(area
->addr
, get_vm_area_size(area
));
2277 kasan_poison_vmalloc(area
->addr
, get_vm_area_size(area
));
2279 vm_remove_mappings(area
, deallocate_pages
);
2281 if (deallocate_pages
) {
2284 for (i
= 0; i
< area
->nr_pages
; i
++) {
2285 struct page
*page
= area
->pages
[i
];
2288 __free_pages(page
, 0);
2290 atomic_long_sub(area
->nr_pages
, &nr_vmalloc_pages
);
2292 kvfree(area
->pages
);
2298 static inline void __vfree_deferred(const void *addr
)
2301 * Use raw_cpu_ptr() because this can be called from preemptible
2302 * context. Preemption is absolutely fine here, because the llist_add()
2303 * implementation is lockless, so it works even if we are adding to
2304 * another cpu's list. schedule_work() should be fine with this too.
2306 struct vfree_deferred
*p
= raw_cpu_ptr(&vfree_deferred
);
2308 if (llist_add((struct llist_node
*)addr
, &p
->list
))
2309 schedule_work(&p
->wq
);
2313 * vfree_atomic - release memory allocated by vmalloc()
2314 * @addr: memory base address
2316 * This one is just like vfree() but can be called in any atomic context
2319 void vfree_atomic(const void *addr
)
2323 kmemleak_free(addr
);
2327 __vfree_deferred(addr
);
2330 static void __vfree(const void *addr
)
2332 if (unlikely(in_interrupt()))
2333 __vfree_deferred(addr
);
2339 * vfree - Release memory allocated by vmalloc()
2340 * @addr: Memory base address
2342 * Free the virtually continuous memory area starting at @addr, as obtained
2343 * from one of the vmalloc() family of APIs. This will usually also free the
2344 * physical memory underlying the virtual allocation, but that memory is
2345 * reference counted, so it will not be freed until the last user goes away.
2347 * If @addr is NULL, no operation is performed.
2350 * May sleep if called *not* from interrupt context.
2351 * Must not be called in NMI context (strictly speaking, it could be
2352 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2353 * conventions for vfree() arch-depenedent would be a really bad idea).
2355 void vfree(const void *addr
)
2359 kmemleak_free(addr
);
2361 might_sleep_if(!in_interrupt());
2368 EXPORT_SYMBOL(vfree
);
2371 * vunmap - release virtual mapping obtained by vmap()
2372 * @addr: memory base address
2374 * Free the virtually contiguous memory area starting at @addr,
2375 * which was created from the page array passed to vmap().
2377 * Must not be called in interrupt context.
2379 void vunmap(const void *addr
)
2381 BUG_ON(in_interrupt());
2386 EXPORT_SYMBOL(vunmap
);
2389 * vmap - map an array of pages into virtually contiguous space
2390 * @pages: array of page pointers
2391 * @count: number of pages to map
2392 * @flags: vm_area->flags
2393 * @prot: page protection for the mapping
2395 * Maps @count pages from @pages into contiguous kernel virtual space.
2396 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2397 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2398 * are transferred from the caller to vmap(), and will be freed / dropped when
2399 * vfree() is called on the return value.
2401 * Return: the address of the area or %NULL on failure
2403 void *vmap(struct page
**pages
, unsigned int count
,
2404 unsigned long flags
, pgprot_t prot
)
2406 struct vm_struct
*area
;
2407 unsigned long size
; /* In bytes */
2411 if (count
> totalram_pages())
2414 size
= (unsigned long)count
<< PAGE_SHIFT
;
2415 area
= get_vm_area_caller(size
, flags
, __builtin_return_address(0));
2419 if (map_kernel_range((unsigned long)area
->addr
, size
, pgprot_nx(prot
),
2425 if (flags
& VM_MAP_PUT_PAGES
) {
2426 area
->pages
= pages
;
2427 area
->nr_pages
= count
;
2431 EXPORT_SYMBOL(vmap
);
2433 #ifdef CONFIG_VMAP_PFN
2434 struct vmap_pfn_data
{
2435 unsigned long *pfns
;
2440 static int vmap_pfn_apply(pte_t
*pte
, unsigned long addr
, void *private)
2442 struct vmap_pfn_data
*data
= private;
2444 if (WARN_ON_ONCE(pfn_valid(data
->pfns
[data
->idx
])))
2446 *pte
= pte_mkspecial(pfn_pte(data
->pfns
[data
->idx
++], data
->prot
));
2451 * vmap_pfn - map an array of PFNs into virtually contiguous space
2452 * @pfns: array of PFNs
2453 * @count: number of pages to map
2454 * @prot: page protection for the mapping
2456 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2457 * the start address of the mapping.
2459 void *vmap_pfn(unsigned long *pfns
, unsigned int count
, pgprot_t prot
)
2461 struct vmap_pfn_data data
= { .pfns
= pfns
, .prot
= pgprot_nx(prot
) };
2462 struct vm_struct
*area
;
2464 area
= get_vm_area_caller(count
* PAGE_SIZE
, VM_IOREMAP
,
2465 __builtin_return_address(0));
2468 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
2469 count
* PAGE_SIZE
, vmap_pfn_apply
, &data
)) {
2475 EXPORT_SYMBOL_GPL(vmap_pfn
);
2476 #endif /* CONFIG_VMAP_PFN */
2478 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
2479 pgprot_t prot
, int node
)
2481 const gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
2482 unsigned int nr_pages
= get_vm_area_size(area
) >> PAGE_SHIFT
;
2483 unsigned long array_size
;
2485 struct page
**pages
;
2487 array_size
= (unsigned long)nr_pages
* sizeof(struct page
*);
2488 gfp_mask
|= __GFP_NOWARN
;
2489 if (!(gfp_mask
& (GFP_DMA
| GFP_DMA32
)))
2490 gfp_mask
|= __GFP_HIGHMEM
;
2492 /* Please note that the recursion is strictly bounded. */
2493 if (array_size
> PAGE_SIZE
) {
2494 pages
= __vmalloc_node(array_size
, 1, nested_gfp
, node
,
2497 pages
= kmalloc_node(array_size
, nested_gfp
, node
);
2505 area
->pages
= pages
;
2506 area
->nr_pages
= nr_pages
;
2508 for (i
= 0; i
< area
->nr_pages
; i
++) {
2511 if (node
== NUMA_NO_NODE
)
2512 page
= alloc_page(gfp_mask
);
2514 page
= alloc_pages_node(node
, gfp_mask
, 0);
2516 if (unlikely(!page
)) {
2517 /* Successfully allocated i pages, free them in __vfree() */
2519 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
2522 area
->pages
[i
] = page
;
2523 if (gfpflags_allow_blocking(gfp_mask
))
2526 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
2528 if (map_kernel_range((unsigned long)area
->addr
, get_vm_area_size(area
),
2535 warn_alloc(gfp_mask
, NULL
,
2536 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2537 (area
->nr_pages
*PAGE_SIZE
), area
->size
);
2538 __vfree(area
->addr
);
2543 * __vmalloc_node_range - allocate virtually contiguous memory
2544 * @size: allocation size
2545 * @align: desired alignment
2546 * @start: vm area range start
2547 * @end: vm area range end
2548 * @gfp_mask: flags for the page level allocator
2549 * @prot: protection mask for the allocated pages
2550 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2551 * @node: node to use for allocation or NUMA_NO_NODE
2552 * @caller: caller's return address
2554 * Allocate enough pages to cover @size from the page level
2555 * allocator with @gfp_mask flags. Map them into contiguous
2556 * kernel virtual space, using a pagetable protection of @prot.
2558 * Return: the address of the area or %NULL on failure
2560 void *__vmalloc_node_range(unsigned long size
, unsigned long align
,
2561 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
2562 pgprot_t prot
, unsigned long vm_flags
, int node
,
2565 struct vm_struct
*area
;
2567 unsigned long real_size
= size
;
2569 size
= PAGE_ALIGN(size
);
2570 if (!size
|| (size
>> PAGE_SHIFT
) > totalram_pages())
2573 area
= __get_vm_area_node(real_size
, align
, VM_ALLOC
| VM_UNINITIALIZED
|
2574 vm_flags
, start
, end
, node
, gfp_mask
, caller
);
2578 addr
= __vmalloc_area_node(area
, gfp_mask
, prot
, node
);
2583 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2584 * flag. It means that vm_struct is not fully initialized.
2585 * Now, it is fully initialized, so remove this flag here.
2587 clear_vm_uninitialized_flag(area
);
2589 kmemleak_vmalloc(area
, size
, gfp_mask
);
2594 warn_alloc(gfp_mask
, NULL
,
2595 "vmalloc: allocation failure: %lu bytes", real_size
);
2600 * __vmalloc_node - allocate virtually contiguous memory
2601 * @size: allocation size
2602 * @align: desired alignment
2603 * @gfp_mask: flags for the page level allocator
2604 * @node: node to use for allocation or NUMA_NO_NODE
2605 * @caller: caller's return address
2607 * Allocate enough pages to cover @size from the page level allocator with
2608 * @gfp_mask flags. Map them into contiguous kernel virtual space.
2610 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2611 * and __GFP_NOFAIL are not supported
2613 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2616 * Return: pointer to the allocated memory or %NULL on error
2618 void *__vmalloc_node(unsigned long size
, unsigned long align
,
2619 gfp_t gfp_mask
, int node
, const void *caller
)
2621 return __vmalloc_node_range(size
, align
, VMALLOC_START
, VMALLOC_END
,
2622 gfp_mask
, PAGE_KERNEL
, 0, node
, caller
);
2625 * This is only for performance analysis of vmalloc and stress purpose.
2626 * It is required by vmalloc test module, therefore do not use it other
2629 #ifdef CONFIG_TEST_VMALLOC_MODULE
2630 EXPORT_SYMBOL_GPL(__vmalloc_node
);
2633 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
)
2635 return __vmalloc_node(size
, 1, gfp_mask
, NUMA_NO_NODE
,
2636 __builtin_return_address(0));
2638 EXPORT_SYMBOL(__vmalloc
);
2641 * vmalloc - allocate virtually contiguous memory
2642 * @size: allocation size
2644 * Allocate enough pages to cover @size from the page level
2645 * allocator and map them into contiguous kernel virtual space.
2647 * For tight control over page level allocator and protection flags
2648 * use __vmalloc() instead.
2650 * Return: pointer to the allocated memory or %NULL on error
2652 void *vmalloc(unsigned long size
)
2654 return __vmalloc_node(size
, 1, GFP_KERNEL
, NUMA_NO_NODE
,
2655 __builtin_return_address(0));
2657 EXPORT_SYMBOL(vmalloc
);
2660 * vzalloc - allocate virtually contiguous memory with zero fill
2661 * @size: allocation size
2663 * Allocate enough pages to cover @size from the page level
2664 * allocator and map them into contiguous kernel virtual space.
2665 * The memory allocated is set to zero.
2667 * For tight control over page level allocator and protection flags
2668 * use __vmalloc() instead.
2670 * Return: pointer to the allocated memory or %NULL on error
2672 void *vzalloc(unsigned long size
)
2674 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, NUMA_NO_NODE
,
2675 __builtin_return_address(0));
2677 EXPORT_SYMBOL(vzalloc
);
2680 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2681 * @size: allocation size
2683 * The resulting memory area is zeroed so it can be mapped to userspace
2684 * without leaking data.
2686 * Return: pointer to the allocated memory or %NULL on error
2688 void *vmalloc_user(unsigned long size
)
2690 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
2691 GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
,
2692 VM_USERMAP
, NUMA_NO_NODE
,
2693 __builtin_return_address(0));
2695 EXPORT_SYMBOL(vmalloc_user
);
2698 * vmalloc_node - allocate memory on a specific node
2699 * @size: allocation size
2702 * Allocate enough pages to cover @size from the page level
2703 * allocator and map them into contiguous kernel virtual space.
2705 * For tight control over page level allocator and protection flags
2706 * use __vmalloc() instead.
2708 * Return: pointer to the allocated memory or %NULL on error
2710 void *vmalloc_node(unsigned long size
, int node
)
2712 return __vmalloc_node(size
, 1, GFP_KERNEL
, node
,
2713 __builtin_return_address(0));
2715 EXPORT_SYMBOL(vmalloc_node
);
2718 * vzalloc_node - allocate memory on a specific node with zero fill
2719 * @size: allocation size
2722 * Allocate enough pages to cover @size from the page level
2723 * allocator and map them into contiguous kernel virtual space.
2724 * The memory allocated is set to zero.
2726 * Return: pointer to the allocated memory or %NULL on error
2728 void *vzalloc_node(unsigned long size
, int node
)
2730 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, node
,
2731 __builtin_return_address(0));
2733 EXPORT_SYMBOL(vzalloc_node
);
2735 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2736 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2737 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2738 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2741 * 64b systems should always have either DMA or DMA32 zones. For others
2742 * GFP_DMA32 should do the right thing and use the normal zone.
2744 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2748 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2749 * @size: allocation size
2751 * Allocate enough 32bit PA addressable pages to cover @size from the
2752 * page level allocator and map them into contiguous kernel virtual space.
2754 * Return: pointer to the allocated memory or %NULL on error
2756 void *vmalloc_32(unsigned long size
)
2758 return __vmalloc_node(size
, 1, GFP_VMALLOC32
, NUMA_NO_NODE
,
2759 __builtin_return_address(0));
2761 EXPORT_SYMBOL(vmalloc_32
);
2764 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2765 * @size: allocation size
2767 * The resulting memory area is 32bit addressable and zeroed so it can be
2768 * mapped to userspace without leaking data.
2770 * Return: pointer to the allocated memory or %NULL on error
2772 void *vmalloc_32_user(unsigned long size
)
2774 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
2775 GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
2776 VM_USERMAP
, NUMA_NO_NODE
,
2777 __builtin_return_address(0));
2779 EXPORT_SYMBOL(vmalloc_32_user
);
2782 * small helper routine , copy contents to buf from addr.
2783 * If the page is not present, fill zero.
2786 static int aligned_vread(char *buf
, char *addr
, unsigned long count
)
2792 unsigned long offset
, length
;
2794 offset
= offset_in_page(addr
);
2795 length
= PAGE_SIZE
- offset
;
2798 p
= vmalloc_to_page(addr
);
2800 * To do safe access to this _mapped_ area, we need
2801 * lock. But adding lock here means that we need to add
2802 * overhead of vmalloc()/vfree() calles for this _debug_
2803 * interface, rarely used. Instead of that, we'll use
2804 * kmap() and get small overhead in this access function.
2808 * we can expect USER0 is not used (see vread/vwrite's
2809 * function description)
2811 void *map
= kmap_atomic(p
);
2812 memcpy(buf
, map
+ offset
, length
);
2815 memset(buf
, 0, length
);
2825 static int aligned_vwrite(char *buf
, char *addr
, unsigned long count
)
2831 unsigned long offset
, length
;
2833 offset
= offset_in_page(addr
);
2834 length
= PAGE_SIZE
- offset
;
2837 p
= vmalloc_to_page(addr
);
2839 * To do safe access to this _mapped_ area, we need
2840 * lock. But adding lock here means that we need to add
2841 * overhead of vmalloc()/vfree() calles for this _debug_
2842 * interface, rarely used. Instead of that, we'll use
2843 * kmap() and get small overhead in this access function.
2847 * we can expect USER0 is not used (see vread/vwrite's
2848 * function description)
2850 void *map
= kmap_atomic(p
);
2851 memcpy(map
+ offset
, buf
, length
);
2863 * vread() - read vmalloc area in a safe way.
2864 * @buf: buffer for reading data
2865 * @addr: vm address.
2866 * @count: number of bytes to be read.
2868 * This function checks that addr is a valid vmalloc'ed area, and
2869 * copy data from that area to a given buffer. If the given memory range
2870 * of [addr...addr+count) includes some valid address, data is copied to
2871 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2872 * IOREMAP area is treated as memory hole and no copy is done.
2874 * If [addr...addr+count) doesn't includes any intersects with alive
2875 * vm_struct area, returns 0. @buf should be kernel's buffer.
2877 * Note: In usual ops, vread() is never necessary because the caller
2878 * should know vmalloc() area is valid and can use memcpy().
2879 * This is for routines which have to access vmalloc area without
2880 * any information, as /dev/kmem.
2882 * Return: number of bytes for which addr and buf should be increased
2883 * (same number as @count) or %0 if [addr...addr+count) doesn't
2884 * include any intersection with valid vmalloc area
2886 long vread(char *buf
, char *addr
, unsigned long count
)
2888 struct vmap_area
*va
;
2889 struct vm_struct
*vm
;
2890 char *vaddr
, *buf_start
= buf
;
2891 unsigned long buflen
= count
;
2894 /* Don't allow overflow */
2895 if ((unsigned long) addr
+ count
< count
)
2896 count
= -(unsigned long) addr
;
2898 spin_lock(&vmap_area_lock
);
2899 list_for_each_entry(va
, &vmap_area_list
, list
) {
2907 vaddr
= (char *) vm
->addr
;
2908 if (addr
>= vaddr
+ get_vm_area_size(vm
))
2910 while (addr
< vaddr
) {
2918 n
= vaddr
+ get_vm_area_size(vm
) - addr
;
2921 if (!(vm
->flags
& VM_IOREMAP
))
2922 aligned_vread(buf
, addr
, n
);
2923 else /* IOREMAP area is treated as memory hole */
2930 spin_unlock(&vmap_area_lock
);
2932 if (buf
== buf_start
)
2934 /* zero-fill memory holes */
2935 if (buf
!= buf_start
+ buflen
)
2936 memset(buf
, 0, buflen
- (buf
- buf_start
));
2942 * vwrite() - write vmalloc area in a safe way.
2943 * @buf: buffer for source data
2944 * @addr: vm address.
2945 * @count: number of bytes to be read.
2947 * This function checks that addr is a valid vmalloc'ed area, and
2948 * copy data from a buffer to the given addr. If specified range of
2949 * [addr...addr+count) includes some valid address, data is copied from
2950 * proper area of @buf. If there are memory holes, no copy to hole.
2951 * IOREMAP area is treated as memory hole and no copy is done.
2953 * If [addr...addr+count) doesn't includes any intersects with alive
2954 * vm_struct area, returns 0. @buf should be kernel's buffer.
2956 * Note: In usual ops, vwrite() is never necessary because the caller
2957 * should know vmalloc() area is valid and can use memcpy().
2958 * This is for routines which have to access vmalloc area without
2959 * any information, as /dev/kmem.
2961 * Return: number of bytes for which addr and buf should be
2962 * increased (same number as @count) or %0 if [addr...addr+count)
2963 * doesn't include any intersection with valid vmalloc area
2965 long vwrite(char *buf
, char *addr
, unsigned long count
)
2967 struct vmap_area
*va
;
2968 struct vm_struct
*vm
;
2970 unsigned long n
, buflen
;
2973 /* Don't allow overflow */
2974 if ((unsigned long) addr
+ count
< count
)
2975 count
= -(unsigned long) addr
;
2978 spin_lock(&vmap_area_lock
);
2979 list_for_each_entry(va
, &vmap_area_list
, list
) {
2987 vaddr
= (char *) vm
->addr
;
2988 if (addr
>= vaddr
+ get_vm_area_size(vm
))
2990 while (addr
< vaddr
) {
2997 n
= vaddr
+ get_vm_area_size(vm
) - addr
;
3000 if (!(vm
->flags
& VM_IOREMAP
)) {
3001 aligned_vwrite(buf
, addr
, n
);
3009 spin_unlock(&vmap_area_lock
);
3016 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3017 * @vma: vma to cover
3018 * @uaddr: target user address to start at
3019 * @kaddr: virtual address of vmalloc kernel memory
3020 * @pgoff: offset from @kaddr to start at
3021 * @size: size of map area
3023 * Returns: 0 for success, -Exxx on failure
3025 * This function checks that @kaddr is a valid vmalloc'ed area,
3026 * and that it is big enough to cover the range starting at
3027 * @uaddr in @vma. Will return failure if that criteria isn't
3030 * Similar to remap_pfn_range() (see mm/memory.c)
3032 int remap_vmalloc_range_partial(struct vm_area_struct
*vma
, unsigned long uaddr
,
3033 void *kaddr
, unsigned long pgoff
,
3036 struct vm_struct
*area
;
3038 unsigned long end_index
;
3040 if (check_shl_overflow(pgoff
, PAGE_SHIFT
, &off
))
3043 size
= PAGE_ALIGN(size
);
3045 if (!PAGE_ALIGNED(uaddr
) || !PAGE_ALIGNED(kaddr
))
3048 area
= find_vm_area(kaddr
);
3052 if (!(area
->flags
& (VM_USERMAP
| VM_DMA_COHERENT
)))
3055 if (check_add_overflow(size
, off
, &end_index
) ||
3056 end_index
> get_vm_area_size(area
))
3061 struct page
*page
= vmalloc_to_page(kaddr
);
3064 ret
= vm_insert_page(vma
, uaddr
, page
);
3073 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3077 EXPORT_SYMBOL(remap_vmalloc_range_partial
);
3080 * remap_vmalloc_range - map vmalloc pages to userspace
3081 * @vma: vma to cover (map full range of vma)
3082 * @addr: vmalloc memory
3083 * @pgoff: number of pages into addr before first page to map
3085 * Returns: 0 for success, -Exxx on failure
3087 * This function checks that addr is a valid vmalloc'ed area, and
3088 * that it is big enough to cover the vma. Will return failure if
3089 * that criteria isn't met.
3091 * Similar to remap_pfn_range() (see mm/memory.c)
3093 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
3094 unsigned long pgoff
)
3096 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
3098 vma
->vm_end
- vma
->vm_start
);
3100 EXPORT_SYMBOL(remap_vmalloc_range
);
3102 void free_vm_area(struct vm_struct
*area
)
3104 struct vm_struct
*ret
;
3105 ret
= remove_vm_area(area
->addr
);
3106 BUG_ON(ret
!= area
);
3109 EXPORT_SYMBOL_GPL(free_vm_area
);
3112 static struct vmap_area
*node_to_va(struct rb_node
*n
)
3114 return rb_entry_safe(n
, struct vmap_area
, rb_node
);
3118 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3119 * @addr: target address
3121 * Returns: vmap_area if it is found. If there is no such area
3122 * the first highest(reverse order) vmap_area is returned
3123 * i.e. va->va_start < addr && va->va_end < addr or NULL
3124 * if there are no any areas before @addr.
3126 static struct vmap_area
*
3127 pvm_find_va_enclose_addr(unsigned long addr
)
3129 struct vmap_area
*va
, *tmp
;
3132 n
= free_vmap_area_root
.rb_node
;
3136 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
3137 if (tmp
->va_start
<= addr
) {
3139 if (tmp
->va_end
>= addr
)
3152 * pvm_determine_end_from_reverse - find the highest aligned address
3153 * of free block below VMALLOC_END
3155 * in - the VA we start the search(reverse order);
3156 * out - the VA with the highest aligned end address.
3157 * @align: alignment for required highest address
3159 * Returns: determined end address within vmap_area
3161 static unsigned long
3162 pvm_determine_end_from_reverse(struct vmap_area
**va
, unsigned long align
)
3164 unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3168 list_for_each_entry_from_reverse((*va
),
3169 &free_vmap_area_list
, list
) {
3170 addr
= min((*va
)->va_end
& ~(align
- 1), vmalloc_end
);
3171 if ((*va
)->va_start
< addr
)
3180 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3181 * @offsets: array containing offset of each area
3182 * @sizes: array containing size of each area
3183 * @nr_vms: the number of areas to allocate
3184 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3186 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3187 * vm_structs on success, %NULL on failure
3189 * Percpu allocator wants to use congruent vm areas so that it can
3190 * maintain the offsets among percpu areas. This function allocates
3191 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3192 * be scattered pretty far, distance between two areas easily going up
3193 * to gigabytes. To avoid interacting with regular vmallocs, these
3194 * areas are allocated from top.
3196 * Despite its complicated look, this allocator is rather simple. It
3197 * does everything top-down and scans free blocks from the end looking
3198 * for matching base. While scanning, if any of the areas do not fit the
3199 * base address is pulled down to fit the area. Scanning is repeated till
3200 * all the areas fit and then all necessary data structures are inserted
3201 * and the result is returned.
3203 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
3204 const size_t *sizes
, int nr_vms
,
3207 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
3208 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3209 struct vmap_area
**vas
, *va
;
3210 struct vm_struct
**vms
;
3211 int area
, area2
, last_area
, term_area
;
3212 unsigned long base
, start
, size
, end
, last_end
, orig_start
, orig_end
;
3213 bool purged
= false;
3216 /* verify parameters and allocate data structures */
3217 BUG_ON(offset_in_page(align
) || !is_power_of_2(align
));
3218 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
3219 start
= offsets
[area
];
3220 end
= start
+ sizes
[area
];
3222 /* is everything aligned properly? */
3223 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
3224 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
3226 /* detect the area with the highest address */
3227 if (start
> offsets
[last_area
])
3230 for (area2
= area
+ 1; area2
< nr_vms
; area2
++) {
3231 unsigned long start2
= offsets
[area2
];
3232 unsigned long end2
= start2
+ sizes
[area2
];
3234 BUG_ON(start2
< end
&& start
< end2
);
3237 last_end
= offsets
[last_area
] + sizes
[last_area
];
3239 if (vmalloc_end
- vmalloc_start
< last_end
) {
3244 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
3245 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
3249 for (area
= 0; area
< nr_vms
; area
++) {
3250 vas
[area
] = kmem_cache_zalloc(vmap_area_cachep
, GFP_KERNEL
);
3251 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
3252 if (!vas
[area
] || !vms
[area
])
3256 spin_lock(&free_vmap_area_lock
);
3258 /* start scanning - we scan from the top, begin with the last area */
3259 area
= term_area
= last_area
;
3260 start
= offsets
[area
];
3261 end
= start
+ sizes
[area
];
3263 va
= pvm_find_va_enclose_addr(vmalloc_end
);
3264 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3268 * base might have underflowed, add last_end before
3271 if (base
+ last_end
< vmalloc_start
+ last_end
)
3275 * Fitting base has not been found.
3281 * If required width exceeds current VA block, move
3282 * base downwards and then recheck.
3284 if (base
+ end
> va
->va_end
) {
3285 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3291 * If this VA does not fit, move base downwards and recheck.
3293 if (base
+ start
< va
->va_start
) {
3294 va
= node_to_va(rb_prev(&va
->rb_node
));
3295 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3301 * This area fits, move on to the previous one. If
3302 * the previous one is the terminal one, we're done.
3304 area
= (area
+ nr_vms
- 1) % nr_vms
;
3305 if (area
== term_area
)
3308 start
= offsets
[area
];
3309 end
= start
+ sizes
[area
];
3310 va
= pvm_find_va_enclose_addr(base
+ end
);
3313 /* we've found a fitting base, insert all va's */
3314 for (area
= 0; area
< nr_vms
; area
++) {
3317 start
= base
+ offsets
[area
];
3320 va
= pvm_find_va_enclose_addr(start
);
3321 if (WARN_ON_ONCE(va
== NULL
))
3322 /* It is a BUG(), but trigger recovery instead. */
3325 type
= classify_va_fit_type(va
, start
, size
);
3326 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
3327 /* It is a BUG(), but trigger recovery instead. */
3330 ret
= adjust_va_to_fit_type(va
, start
, size
, type
);
3334 /* Allocated area. */
3336 va
->va_start
= start
;
3337 va
->va_end
= start
+ size
;
3340 spin_unlock(&free_vmap_area_lock
);
3342 /* populate the kasan shadow space */
3343 for (area
= 0; area
< nr_vms
; area
++) {
3344 if (kasan_populate_vmalloc(vas
[area
]->va_start
, sizes
[area
]))
3345 goto err_free_shadow
;
3347 kasan_unpoison_vmalloc((void *)vas
[area
]->va_start
,
3351 /* insert all vm's */
3352 spin_lock(&vmap_area_lock
);
3353 for (area
= 0; area
< nr_vms
; area
++) {
3354 insert_vmap_area(vas
[area
], &vmap_area_root
, &vmap_area_list
);
3356 setup_vmalloc_vm_locked(vms
[area
], vas
[area
], VM_ALLOC
,
3359 spin_unlock(&vmap_area_lock
);
3366 * Remove previously allocated areas. There is no
3367 * need in removing these areas from the busy tree,
3368 * because they are inserted only on the final step
3369 * and when pcpu_get_vm_areas() is success.
3372 orig_start
= vas
[area
]->va_start
;
3373 orig_end
= vas
[area
]->va_end
;
3374 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
3375 &free_vmap_area_list
);
3377 kasan_release_vmalloc(orig_start
, orig_end
,
3378 va
->va_start
, va
->va_end
);
3383 spin_unlock(&free_vmap_area_lock
);
3385 purge_vmap_area_lazy();
3388 /* Before "retry", check if we recover. */
3389 for (area
= 0; area
< nr_vms
; area
++) {
3393 vas
[area
] = kmem_cache_zalloc(
3394 vmap_area_cachep
, GFP_KERNEL
);
3403 for (area
= 0; area
< nr_vms
; area
++) {
3405 kmem_cache_free(vmap_area_cachep
, vas
[area
]);
3415 spin_lock(&free_vmap_area_lock
);
3417 * We release all the vmalloc shadows, even the ones for regions that
3418 * hadn't been successfully added. This relies on kasan_release_vmalloc
3419 * being able to tolerate this case.
3421 for (area
= 0; area
< nr_vms
; area
++) {
3422 orig_start
= vas
[area
]->va_start
;
3423 orig_end
= vas
[area
]->va_end
;
3424 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
3425 &free_vmap_area_list
);
3427 kasan_release_vmalloc(orig_start
, orig_end
,
3428 va
->va_start
, va
->va_end
);
3432 spin_unlock(&free_vmap_area_lock
);
3439 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3440 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3441 * @nr_vms: the number of allocated areas
3443 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3445 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
3449 for (i
= 0; i
< nr_vms
; i
++)
3450 free_vm_area(vms
[i
]);
3453 #endif /* CONFIG_SMP */
3455 #ifdef CONFIG_PROC_FS
3456 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3457 __acquires(&vmap_purge_lock
)
3458 __acquires(&vmap_area_lock
)
3460 mutex_lock(&vmap_purge_lock
);
3461 spin_lock(&vmap_area_lock
);
3463 return seq_list_start(&vmap_area_list
, *pos
);
3466 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
3468 return seq_list_next(p
, &vmap_area_list
, pos
);
3471 static void s_stop(struct seq_file
*m
, void *p
)
3472 __releases(&vmap_area_lock
)
3473 __releases(&vmap_purge_lock
)
3475 spin_unlock(&vmap_area_lock
);
3476 mutex_unlock(&vmap_purge_lock
);
3479 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
3481 if (IS_ENABLED(CONFIG_NUMA
)) {
3482 unsigned int nr
, *counters
= m
->private;
3487 if (v
->flags
& VM_UNINITIALIZED
)
3489 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3492 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
3494 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
3495 counters
[page_to_nid(v
->pages
[nr
])]++;
3497 for_each_node_state(nr
, N_HIGH_MEMORY
)
3499 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
3503 static void show_purge_info(struct seq_file
*m
)
3505 struct vmap_area
*va
;
3507 spin_lock(&purge_vmap_area_lock
);
3508 list_for_each_entry(va
, &purge_vmap_area_list
, list
) {
3509 seq_printf(m
, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3510 (void *)va
->va_start
, (void *)va
->va_end
,
3511 va
->va_end
- va
->va_start
);
3513 spin_unlock(&purge_vmap_area_lock
);
3516 static int s_show(struct seq_file
*m
, void *p
)
3518 struct vmap_area
*va
;
3519 struct vm_struct
*v
;
3521 va
= list_entry(p
, struct vmap_area
, list
);
3524 * s_show can encounter race with remove_vm_area, !vm on behalf
3525 * of vmap area is being tear down or vm_map_ram allocation.
3528 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
3529 (void *)va
->va_start
, (void *)va
->va_end
,
3530 va
->va_end
- va
->va_start
);
3537 seq_printf(m
, "0x%pK-0x%pK %7ld",
3538 v
->addr
, v
->addr
+ v
->size
, v
->size
);
3541 seq_printf(m
, " %pS", v
->caller
);
3544 seq_printf(m
, " pages=%d", v
->nr_pages
);
3547 seq_printf(m
, " phys=%pa", &v
->phys_addr
);
3549 if (v
->flags
& VM_IOREMAP
)
3550 seq_puts(m
, " ioremap");
3552 if (v
->flags
& VM_ALLOC
)
3553 seq_puts(m
, " vmalloc");
3555 if (v
->flags
& VM_MAP
)
3556 seq_puts(m
, " vmap");
3558 if (v
->flags
& VM_USERMAP
)
3559 seq_puts(m
, " user");
3561 if (v
->flags
& VM_DMA_COHERENT
)
3562 seq_puts(m
, " dma-coherent");
3564 if (is_vmalloc_addr(v
->pages
))
3565 seq_puts(m
, " vpages");
3567 show_numa_info(m
, v
);
3571 * As a final step, dump "unpurged" areas.
3573 if (list_is_last(&va
->list
, &vmap_area_list
))
3579 static const struct seq_operations vmalloc_op
= {
3586 static int __init
proc_vmalloc_init(void)
3588 if (IS_ENABLED(CONFIG_NUMA
))
3589 proc_create_seq_private("vmallocinfo", 0400, NULL
,
3591 nr_node_ids
* sizeof(unsigned int), NULL
);
3593 proc_create_seq("vmallocinfo", 0400, NULL
, &vmalloc_op
);
3596 module_init(proc_vmalloc_init
);