1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
38 #include <linux/pgtable.h>
39 #include <linux/uaccess.h>
40 #include <linux/hugetlb.h>
41 #include <asm/tlbflush.h>
42 #include <asm/shmparam.h>
45 #include "pgalloc-track.h"
47 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
48 static unsigned int __ro_after_init ioremap_max_page_shift
= BITS_PER_LONG
- 1;
50 static int __init
set_nohugeiomap(char *str
)
52 ioremap_max_page_shift
= PAGE_SHIFT
;
55 early_param("nohugeiomap", set_nohugeiomap
);
56 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
57 static const unsigned int ioremap_max_page_shift
= PAGE_SHIFT
;
58 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
60 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
61 static bool __ro_after_init vmap_allow_huge
= true;
63 static int __init
set_nohugevmalloc(char *str
)
65 vmap_allow_huge
= false;
68 early_param("nohugevmalloc", set_nohugevmalloc
);
69 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
70 static const bool vmap_allow_huge
= false;
71 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
73 bool is_vmalloc_addr(const void *x
)
75 unsigned long addr
= (unsigned long)x
;
77 return addr
>= VMALLOC_START
&& addr
< VMALLOC_END
;
79 EXPORT_SYMBOL(is_vmalloc_addr
);
81 struct vfree_deferred
{
82 struct llist_head list
;
83 struct work_struct wq
;
85 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
87 static void __vunmap(const void *, int);
89 static void free_work(struct work_struct
*w
)
91 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
92 struct llist_node
*t
, *llnode
;
94 llist_for_each_safe(llnode
, t
, llist_del_all(&p
->list
))
95 __vunmap((void *)llnode
, 1);
98 /*** Page table manipulation functions ***/
99 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
100 phys_addr_t phys_addr
, pgprot_t prot
,
101 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
105 unsigned long size
= PAGE_SIZE
;
107 pfn
= phys_addr
>> PAGE_SHIFT
;
108 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
112 BUG_ON(!pte_none(*pte
));
114 #ifdef CONFIG_HUGETLB_PAGE
115 size
= arch_vmap_pte_range_map_size(addr
, end
, pfn
, max_page_shift
);
116 if (size
!= PAGE_SIZE
) {
117 pte_t entry
= pfn_pte(pfn
, prot
);
119 entry
= pte_mkhuge(entry
);
120 entry
= arch_make_huge_pte(entry
, ilog2(size
), 0);
121 set_huge_pte_at(&init_mm
, addr
, pte
, entry
);
122 pfn
+= PFN_DOWN(size
);
126 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
128 } while (pte
+= PFN_DOWN(size
), addr
+= size
, addr
!= end
);
129 *mask
|= PGTBL_PTE_MODIFIED
;
133 static int vmap_try_huge_pmd(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
134 phys_addr_t phys_addr
, pgprot_t prot
,
135 unsigned int max_page_shift
)
137 if (max_page_shift
< PMD_SHIFT
)
140 if (!arch_vmap_pmd_supported(prot
))
143 if ((end
- addr
) != PMD_SIZE
)
146 if (!IS_ALIGNED(addr
, PMD_SIZE
))
149 if (!IS_ALIGNED(phys_addr
, PMD_SIZE
))
152 if (pmd_present(*pmd
) && !pmd_free_pte_page(pmd
, addr
))
155 return pmd_set_huge(pmd
, phys_addr
, prot
);
158 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
159 phys_addr_t phys_addr
, pgprot_t prot
,
160 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
165 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
169 next
= pmd_addr_end(addr
, end
);
171 if (vmap_try_huge_pmd(pmd
, addr
, next
, phys_addr
, prot
,
173 *mask
|= PGTBL_PMD_MODIFIED
;
177 if (vmap_pte_range(pmd
, addr
, next
, phys_addr
, prot
, max_page_shift
, mask
))
179 } while (pmd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
183 static int vmap_try_huge_pud(pud_t
*pud
, unsigned long addr
, unsigned long end
,
184 phys_addr_t phys_addr
, pgprot_t prot
,
185 unsigned int max_page_shift
)
187 if (max_page_shift
< PUD_SHIFT
)
190 if (!arch_vmap_pud_supported(prot
))
193 if ((end
- addr
) != PUD_SIZE
)
196 if (!IS_ALIGNED(addr
, PUD_SIZE
))
199 if (!IS_ALIGNED(phys_addr
, PUD_SIZE
))
202 if (pud_present(*pud
) && !pud_free_pmd_page(pud
, addr
))
205 return pud_set_huge(pud
, phys_addr
, prot
);
208 static int vmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
209 phys_addr_t phys_addr
, pgprot_t prot
,
210 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
215 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
219 next
= pud_addr_end(addr
, end
);
221 if (vmap_try_huge_pud(pud
, addr
, next
, phys_addr
, prot
,
223 *mask
|= PGTBL_PUD_MODIFIED
;
227 if (vmap_pmd_range(pud
, addr
, next
, phys_addr
, prot
,
228 max_page_shift
, mask
))
230 } while (pud
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
234 static int vmap_try_huge_p4d(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
235 phys_addr_t phys_addr
, pgprot_t prot
,
236 unsigned int max_page_shift
)
238 if (max_page_shift
< P4D_SHIFT
)
241 if (!arch_vmap_p4d_supported(prot
))
244 if ((end
- addr
) != P4D_SIZE
)
247 if (!IS_ALIGNED(addr
, P4D_SIZE
))
250 if (!IS_ALIGNED(phys_addr
, P4D_SIZE
))
253 if (p4d_present(*p4d
) && !p4d_free_pud_page(p4d
, addr
))
256 return p4d_set_huge(p4d
, phys_addr
, prot
);
259 static int vmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
260 phys_addr_t phys_addr
, pgprot_t prot
,
261 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
266 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
270 next
= p4d_addr_end(addr
, end
);
272 if (vmap_try_huge_p4d(p4d
, addr
, next
, phys_addr
, prot
,
274 *mask
|= PGTBL_P4D_MODIFIED
;
278 if (vmap_pud_range(p4d
, addr
, next
, phys_addr
, prot
,
279 max_page_shift
, mask
))
281 } while (p4d
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
285 static int vmap_range_noflush(unsigned long addr
, unsigned long end
,
286 phys_addr_t phys_addr
, pgprot_t prot
,
287 unsigned int max_page_shift
)
293 pgtbl_mod_mask mask
= 0;
299 pgd
= pgd_offset_k(addr
);
301 next
= pgd_addr_end(addr
, end
);
302 err
= vmap_p4d_range(pgd
, addr
, next
, phys_addr
, prot
,
303 max_page_shift
, &mask
);
306 } while (pgd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
308 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
309 arch_sync_kernel_mappings(start
, end
);
314 int ioremap_page_range(unsigned long addr
, unsigned long end
,
315 phys_addr_t phys_addr
, pgprot_t prot
)
319 err
= vmap_range_noflush(addr
, end
, phys_addr
, pgprot_nx(prot
),
320 ioremap_max_page_shift
);
321 flush_cache_vmap(addr
, end
);
325 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
326 pgtbl_mod_mask
*mask
)
330 pte
= pte_offset_kernel(pmd
, addr
);
332 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
333 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
334 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
335 *mask
|= PGTBL_PTE_MODIFIED
;
338 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
339 pgtbl_mod_mask
*mask
)
345 pmd
= pmd_offset(pud
, addr
);
347 next
= pmd_addr_end(addr
, end
);
349 cleared
= pmd_clear_huge(pmd
);
350 if (cleared
|| pmd_bad(*pmd
))
351 *mask
|= PGTBL_PMD_MODIFIED
;
355 if (pmd_none_or_clear_bad(pmd
))
357 vunmap_pte_range(pmd
, addr
, next
, mask
);
360 } while (pmd
++, addr
= next
, addr
!= end
);
363 static void vunmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
364 pgtbl_mod_mask
*mask
)
370 pud
= pud_offset(p4d
, addr
);
372 next
= pud_addr_end(addr
, end
);
374 cleared
= pud_clear_huge(pud
);
375 if (cleared
|| pud_bad(*pud
))
376 *mask
|= PGTBL_PUD_MODIFIED
;
380 if (pud_none_or_clear_bad(pud
))
382 vunmap_pmd_range(pud
, addr
, next
, mask
);
383 } while (pud
++, addr
= next
, addr
!= end
);
386 static void vunmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
387 pgtbl_mod_mask
*mask
)
393 p4d
= p4d_offset(pgd
, addr
);
395 next
= p4d_addr_end(addr
, end
);
397 cleared
= p4d_clear_huge(p4d
);
398 if (cleared
|| p4d_bad(*p4d
))
399 *mask
|= PGTBL_P4D_MODIFIED
;
403 if (p4d_none_or_clear_bad(p4d
))
405 vunmap_pud_range(p4d
, addr
, next
, mask
);
406 } while (p4d
++, addr
= next
, addr
!= end
);
410 * vunmap_range_noflush is similar to vunmap_range, but does not
411 * flush caches or TLBs.
413 * The caller is responsible for calling flush_cache_vmap() before calling
414 * this function, and flush_tlb_kernel_range after it has returned
415 * successfully (and before the addresses are expected to cause a page fault
416 * or be re-mapped for something else, if TLB flushes are being delayed or
419 * This is an internal function only. Do not use outside mm/.
421 void vunmap_range_noflush(unsigned long start
, unsigned long end
)
425 unsigned long addr
= start
;
426 pgtbl_mod_mask mask
= 0;
429 pgd
= pgd_offset_k(addr
);
431 next
= pgd_addr_end(addr
, end
);
433 mask
|= PGTBL_PGD_MODIFIED
;
434 if (pgd_none_or_clear_bad(pgd
))
436 vunmap_p4d_range(pgd
, addr
, next
, &mask
);
437 } while (pgd
++, addr
= next
, addr
!= end
);
439 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
440 arch_sync_kernel_mappings(start
, end
);
444 * vunmap_range - unmap kernel virtual addresses
445 * @addr: start of the VM area to unmap
446 * @end: end of the VM area to unmap (non-inclusive)
448 * Clears any present PTEs in the virtual address range, flushes TLBs and
449 * caches. Any subsequent access to the address before it has been re-mapped
452 void vunmap_range(unsigned long addr
, unsigned long end
)
454 flush_cache_vunmap(addr
, end
);
455 vunmap_range_noflush(addr
, end
);
456 flush_tlb_kernel_range(addr
, end
);
459 static int vmap_pages_pte_range(pmd_t
*pmd
, unsigned long addr
,
460 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
461 pgtbl_mod_mask
*mask
)
466 * nr is a running index into the array which helps higher level
467 * callers keep track of where we're up to.
470 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
474 struct page
*page
= pages
[*nr
];
476 if (WARN_ON(!pte_none(*pte
)))
480 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
482 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
483 *mask
|= PGTBL_PTE_MODIFIED
;
487 static int vmap_pages_pmd_range(pud_t
*pud
, unsigned long addr
,
488 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
489 pgtbl_mod_mask
*mask
)
494 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
498 next
= pmd_addr_end(addr
, end
);
499 if (vmap_pages_pte_range(pmd
, addr
, next
, prot
, pages
, nr
, mask
))
501 } while (pmd
++, addr
= next
, addr
!= end
);
505 static int vmap_pages_pud_range(p4d_t
*p4d
, unsigned long addr
,
506 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
507 pgtbl_mod_mask
*mask
)
512 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
516 next
= pud_addr_end(addr
, end
);
517 if (vmap_pages_pmd_range(pud
, addr
, next
, prot
, pages
, nr
, mask
))
519 } while (pud
++, addr
= next
, addr
!= end
);
523 static int vmap_pages_p4d_range(pgd_t
*pgd
, unsigned long addr
,
524 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
525 pgtbl_mod_mask
*mask
)
530 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
534 next
= p4d_addr_end(addr
, end
);
535 if (vmap_pages_pud_range(p4d
, addr
, next
, prot
, pages
, nr
, mask
))
537 } while (p4d
++, addr
= next
, addr
!= end
);
541 static int vmap_small_pages_range_noflush(unsigned long addr
, unsigned long end
,
542 pgprot_t prot
, struct page
**pages
)
544 unsigned long start
= addr
;
549 pgtbl_mod_mask mask
= 0;
552 pgd
= pgd_offset_k(addr
);
554 next
= pgd_addr_end(addr
, end
);
556 mask
|= PGTBL_PGD_MODIFIED
;
557 err
= vmap_pages_p4d_range(pgd
, addr
, next
, prot
, pages
, &nr
, &mask
);
560 } while (pgd
++, addr
= next
, addr
!= end
);
562 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
563 arch_sync_kernel_mappings(start
, end
);
569 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
572 * The caller is responsible for calling flush_cache_vmap() after this
573 * function returns successfully and before the addresses are accessed.
575 * This is an internal function only. Do not use outside mm/.
577 int vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
578 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
580 unsigned int i
, nr
= (end
- addr
) >> PAGE_SHIFT
;
582 WARN_ON(page_shift
< PAGE_SHIFT
);
584 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC
) ||
585 page_shift
== PAGE_SHIFT
)
586 return vmap_small_pages_range_noflush(addr
, end
, prot
, pages
);
588 for (i
= 0; i
< nr
; i
+= 1U << (page_shift
- PAGE_SHIFT
)) {
591 err
= vmap_range_noflush(addr
, addr
+ (1UL << page_shift
),
592 __pa(page_address(pages
[i
])), prot
,
597 addr
+= 1UL << page_shift
;
604 * vmap_pages_range - map pages to a kernel virtual address
605 * @addr: start of the VM area to map
606 * @end: end of the VM area to map (non-inclusive)
607 * @prot: page protection flags to use
608 * @pages: pages to map (always PAGE_SIZE pages)
609 * @page_shift: maximum shift that the pages may be mapped with, @pages must
610 * be aligned and contiguous up to at least this shift.
613 * 0 on success, -errno on failure.
615 static int vmap_pages_range(unsigned long addr
, unsigned long end
,
616 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
620 err
= vmap_pages_range_noflush(addr
, end
, prot
, pages
, page_shift
);
621 flush_cache_vmap(addr
, end
);
625 int is_vmalloc_or_module_addr(const void *x
)
628 * ARM, x86-64 and sparc64 put modules in a special place,
629 * and fall back on vmalloc() if that fails. Others
630 * just put it in the vmalloc space.
632 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
633 unsigned long addr
= (unsigned long)x
;
634 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
637 return is_vmalloc_addr(x
);
641 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
642 * return the tail page that corresponds to the base page address, which
643 * matches small vmap mappings.
645 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
647 unsigned long addr
= (unsigned long) vmalloc_addr
;
648 struct page
*page
= NULL
;
649 pgd_t
*pgd
= pgd_offset_k(addr
);
656 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
657 * architectures that do not vmalloc module space
659 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
663 if (WARN_ON_ONCE(pgd_leaf(*pgd
)))
664 return NULL
; /* XXX: no allowance for huge pgd */
665 if (WARN_ON_ONCE(pgd_bad(*pgd
)))
668 p4d
= p4d_offset(pgd
, addr
);
672 return p4d_page(*p4d
) + ((addr
& ~P4D_MASK
) >> PAGE_SHIFT
);
673 if (WARN_ON_ONCE(p4d_bad(*p4d
)))
676 pud
= pud_offset(p4d
, addr
);
680 return pud_page(*pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
681 if (WARN_ON_ONCE(pud_bad(*pud
)))
684 pmd
= pmd_offset(pud
, addr
);
688 return pmd_page(*pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
689 if (WARN_ON_ONCE(pmd_bad(*pmd
)))
692 ptep
= pte_offset_map(pmd
, addr
);
694 if (pte_present(pte
))
695 page
= pte_page(pte
);
700 EXPORT_SYMBOL(vmalloc_to_page
);
703 * Map a vmalloc()-space virtual address to the physical page frame number.
705 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
707 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
709 EXPORT_SYMBOL(vmalloc_to_pfn
);
712 /*** Global kva allocator ***/
714 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
715 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
718 static DEFINE_SPINLOCK(vmap_area_lock
);
719 static DEFINE_SPINLOCK(free_vmap_area_lock
);
720 /* Export for kexec only */
721 LIST_HEAD(vmap_area_list
);
722 static struct rb_root vmap_area_root
= RB_ROOT
;
723 static bool vmap_initialized __read_mostly
;
725 static struct rb_root purge_vmap_area_root
= RB_ROOT
;
726 static LIST_HEAD(purge_vmap_area_list
);
727 static DEFINE_SPINLOCK(purge_vmap_area_lock
);
730 * This kmem_cache is used for vmap_area objects. Instead of
731 * allocating from slab we reuse an object from this cache to
732 * make things faster. Especially in "no edge" splitting of
735 static struct kmem_cache
*vmap_area_cachep
;
738 * This linked list is used in pair with free_vmap_area_root.
739 * It gives O(1) access to prev/next to perform fast coalescing.
741 static LIST_HEAD(free_vmap_area_list
);
744 * This augment red-black tree represents the free vmap space.
745 * All vmap_area objects in this tree are sorted by va->va_start
746 * address. It is used for allocation and merging when a vmap
747 * object is released.
749 * Each vmap_area node contains a maximum available free block
750 * of its sub-tree, right or left. Therefore it is possible to
751 * find a lowest match of free area.
753 static struct rb_root free_vmap_area_root
= RB_ROOT
;
756 * Preload a CPU with one object for "no edge" split case. The
757 * aim is to get rid of allocations from the atomic context, thus
758 * to use more permissive allocation masks.
760 static DEFINE_PER_CPU(struct vmap_area
*, ne_fit_preload_node
);
762 static __always_inline
unsigned long
763 va_size(struct vmap_area
*va
)
765 return (va
->va_end
- va
->va_start
);
768 static __always_inline
unsigned long
769 get_subtree_max_size(struct rb_node
*node
)
771 struct vmap_area
*va
;
773 va
= rb_entry_safe(node
, struct vmap_area
, rb_node
);
774 return va
? va
->subtree_max_size
: 0;
778 * Gets called when remove the node and rotate.
780 static __always_inline
unsigned long
781 compute_subtree_max_size(struct vmap_area
*va
)
783 return max3(va_size(va
),
784 get_subtree_max_size(va
->rb_node
.rb_left
),
785 get_subtree_max_size(va
->rb_node
.rb_right
));
788 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb
,
789 struct vmap_area
, rb_node
, unsigned long, subtree_max_size
, va_size
)
791 static void purge_vmap_area_lazy(void);
792 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list
);
793 static unsigned long lazy_max_pages(void);
795 static atomic_long_t nr_vmalloc_pages
;
797 unsigned long vmalloc_nr_pages(void)
799 return atomic_long_read(&nr_vmalloc_pages
);
802 static struct vmap_area
*find_vmap_area_exceed_addr(unsigned long addr
)
804 struct vmap_area
*va
= NULL
;
805 struct rb_node
*n
= vmap_area_root
.rb_node
;
808 struct vmap_area
*tmp
;
810 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
811 if (tmp
->va_end
> addr
) {
813 if (tmp
->va_start
<= addr
)
824 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
826 struct rb_node
*n
= vmap_area_root
.rb_node
;
829 struct vmap_area
*va
;
831 va
= rb_entry(n
, struct vmap_area
, rb_node
);
832 if (addr
< va
->va_start
)
834 else if (addr
>= va
->va_end
)
844 * This function returns back addresses of parent node
845 * and its left or right link for further processing.
847 * Otherwise NULL is returned. In that case all further
848 * steps regarding inserting of conflicting overlap range
849 * have to be declined and actually considered as a bug.
851 static __always_inline
struct rb_node
**
852 find_va_links(struct vmap_area
*va
,
853 struct rb_root
*root
, struct rb_node
*from
,
854 struct rb_node
**parent
)
856 struct vmap_area
*tmp_va
;
857 struct rb_node
**link
;
860 link
= &root
->rb_node
;
861 if (unlikely(!*link
)) {
870 * Go to the bottom of the tree. When we hit the last point
871 * we end up with parent rb_node and correct direction, i name
872 * it link, where the new va->rb_node will be attached to.
875 tmp_va
= rb_entry(*link
, struct vmap_area
, rb_node
);
878 * During the traversal we also do some sanity check.
879 * Trigger the BUG() if there are sides(left/right)
882 if (va
->va_start
< tmp_va
->va_end
&&
883 va
->va_end
<= tmp_va
->va_start
)
884 link
= &(*link
)->rb_left
;
885 else if (va
->va_end
> tmp_va
->va_start
&&
886 va
->va_start
>= tmp_va
->va_end
)
887 link
= &(*link
)->rb_right
;
889 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
890 va
->va_start
, va
->va_end
, tmp_va
->va_start
, tmp_va
->va_end
);
896 *parent
= &tmp_va
->rb_node
;
900 static __always_inline
struct list_head
*
901 get_va_next_sibling(struct rb_node
*parent
, struct rb_node
**link
)
903 struct list_head
*list
;
905 if (unlikely(!parent
))
907 * The red-black tree where we try to find VA neighbors
908 * before merging or inserting is empty, i.e. it means
909 * there is no free vmap space. Normally it does not
910 * happen but we handle this case anyway.
914 list
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
915 return (&parent
->rb_right
== link
? list
->next
: list
);
918 static __always_inline
void
919 link_va(struct vmap_area
*va
, struct rb_root
*root
,
920 struct rb_node
*parent
, struct rb_node
**link
, struct list_head
*head
)
923 * VA is still not in the list, but we can
924 * identify its future previous list_head node.
926 if (likely(parent
)) {
927 head
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
928 if (&parent
->rb_right
!= link
)
932 /* Insert to the rb-tree */
933 rb_link_node(&va
->rb_node
, parent
, link
);
934 if (root
== &free_vmap_area_root
) {
936 * Some explanation here. Just perform simple insertion
937 * to the tree. We do not set va->subtree_max_size to
938 * its current size before calling rb_insert_augmented().
939 * It is because of we populate the tree from the bottom
940 * to parent levels when the node _is_ in the tree.
942 * Therefore we set subtree_max_size to zero after insertion,
943 * to let __augment_tree_propagate_from() puts everything to
944 * the correct order later on.
946 rb_insert_augmented(&va
->rb_node
,
947 root
, &free_vmap_area_rb_augment_cb
);
948 va
->subtree_max_size
= 0;
950 rb_insert_color(&va
->rb_node
, root
);
953 /* Address-sort this list */
954 list_add(&va
->list
, head
);
957 static __always_inline
void
958 unlink_va(struct vmap_area
*va
, struct rb_root
*root
)
960 if (WARN_ON(RB_EMPTY_NODE(&va
->rb_node
)))
963 if (root
== &free_vmap_area_root
)
964 rb_erase_augmented(&va
->rb_node
,
965 root
, &free_vmap_area_rb_augment_cb
);
967 rb_erase(&va
->rb_node
, root
);
970 RB_CLEAR_NODE(&va
->rb_node
);
973 #if DEBUG_AUGMENT_PROPAGATE_CHECK
975 augment_tree_propagate_check(void)
977 struct vmap_area
*va
;
978 unsigned long computed_size
;
980 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
981 computed_size
= compute_subtree_max_size(va
);
982 if (computed_size
!= va
->subtree_max_size
)
983 pr_emerg("tree is corrupted: %lu, %lu\n",
984 va_size(va
), va
->subtree_max_size
);
990 * This function populates subtree_max_size from bottom to upper
991 * levels starting from VA point. The propagation must be done
992 * when VA size is modified by changing its va_start/va_end. Or
993 * in case of newly inserting of VA to the tree.
995 * It means that __augment_tree_propagate_from() must be called:
996 * - After VA has been inserted to the tree(free path);
997 * - After VA has been shrunk(allocation path);
998 * - After VA has been increased(merging path).
1000 * Please note that, it does not mean that upper parent nodes
1001 * and their subtree_max_size are recalculated all the time up
1010 * For example if we modify the node 4, shrinking it to 2, then
1011 * no any modification is required. If we shrink the node 2 to 1
1012 * its subtree_max_size is updated only, and set to 1. If we shrink
1013 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1014 * node becomes 4--6.
1016 static __always_inline
void
1017 augment_tree_propagate_from(struct vmap_area
*va
)
1020 * Populate the tree from bottom towards the root until
1021 * the calculated maximum available size of checked node
1022 * is equal to its current one.
1024 free_vmap_area_rb_augment_cb_propagate(&va
->rb_node
, NULL
);
1026 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1027 augment_tree_propagate_check();
1032 insert_vmap_area(struct vmap_area
*va
,
1033 struct rb_root
*root
, struct list_head
*head
)
1035 struct rb_node
**link
;
1036 struct rb_node
*parent
;
1038 link
= find_va_links(va
, root
, NULL
, &parent
);
1040 link_va(va
, root
, parent
, link
, head
);
1044 insert_vmap_area_augment(struct vmap_area
*va
,
1045 struct rb_node
*from
, struct rb_root
*root
,
1046 struct list_head
*head
)
1048 struct rb_node
**link
;
1049 struct rb_node
*parent
;
1052 link
= find_va_links(va
, NULL
, from
, &parent
);
1054 link
= find_va_links(va
, root
, NULL
, &parent
);
1057 link_va(va
, root
, parent
, link
, head
);
1058 augment_tree_propagate_from(va
);
1063 * Merge de-allocated chunk of VA memory with previous
1064 * and next free blocks. If coalesce is not done a new
1065 * free area is inserted. If VA has been merged, it is
1068 * Please note, it can return NULL in case of overlap
1069 * ranges, followed by WARN() report. Despite it is a
1070 * buggy behaviour, a system can be alive and keep
1073 static __always_inline
struct vmap_area
*
1074 merge_or_add_vmap_area(struct vmap_area
*va
,
1075 struct rb_root
*root
, struct list_head
*head
)
1077 struct vmap_area
*sibling
;
1078 struct list_head
*next
;
1079 struct rb_node
**link
;
1080 struct rb_node
*parent
;
1081 bool merged
= false;
1084 * Find a place in the tree where VA potentially will be
1085 * inserted, unless it is merged with its sibling/siblings.
1087 link
= find_va_links(va
, root
, NULL
, &parent
);
1092 * Get next node of VA to check if merging can be done.
1094 next
= get_va_next_sibling(parent
, link
);
1095 if (unlikely(next
== NULL
))
1101 * |<------VA------>|<-----Next----->|
1106 sibling
= list_entry(next
, struct vmap_area
, list
);
1107 if (sibling
->va_start
== va
->va_end
) {
1108 sibling
->va_start
= va
->va_start
;
1110 /* Free vmap_area object. */
1111 kmem_cache_free(vmap_area_cachep
, va
);
1113 /* Point to the new merged area. */
1122 * |<-----Prev----->|<------VA------>|
1126 if (next
->prev
!= head
) {
1127 sibling
= list_entry(next
->prev
, struct vmap_area
, list
);
1128 if (sibling
->va_end
== va
->va_start
) {
1130 * If both neighbors are coalesced, it is important
1131 * to unlink the "next" node first, followed by merging
1132 * with "previous" one. Otherwise the tree might not be
1133 * fully populated if a sibling's augmented value is
1134 * "normalized" because of rotation operations.
1137 unlink_va(va
, root
);
1139 sibling
->va_end
= va
->va_end
;
1141 /* Free vmap_area object. */
1142 kmem_cache_free(vmap_area_cachep
, va
);
1144 /* Point to the new merged area. */
1152 link_va(va
, root
, parent
, link
, head
);
1157 static __always_inline
struct vmap_area
*
1158 merge_or_add_vmap_area_augment(struct vmap_area
*va
,
1159 struct rb_root
*root
, struct list_head
*head
)
1161 va
= merge_or_add_vmap_area(va
, root
, head
);
1163 augment_tree_propagate_from(va
);
1168 static __always_inline
bool
1169 is_within_this_va(struct vmap_area
*va
, unsigned long size
,
1170 unsigned long align
, unsigned long vstart
)
1172 unsigned long nva_start_addr
;
1174 if (va
->va_start
> vstart
)
1175 nva_start_addr
= ALIGN(va
->va_start
, align
);
1177 nva_start_addr
= ALIGN(vstart
, align
);
1179 /* Can be overflowed due to big size or alignment. */
1180 if (nva_start_addr
+ size
< nva_start_addr
||
1181 nva_start_addr
< vstart
)
1184 return (nva_start_addr
+ size
<= va
->va_end
);
1188 * Find the first free block(lowest start address) in the tree,
1189 * that will accomplish the request corresponding to passing
1192 static __always_inline
struct vmap_area
*
1193 find_vmap_lowest_match(unsigned long size
,
1194 unsigned long align
, unsigned long vstart
)
1196 struct vmap_area
*va
;
1197 struct rb_node
*node
;
1198 unsigned long length
;
1200 /* Start from the root. */
1201 node
= free_vmap_area_root
.rb_node
;
1203 /* Adjust the search size for alignment overhead. */
1204 length
= size
+ align
- 1;
1207 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1209 if (get_subtree_max_size(node
->rb_left
) >= length
&&
1210 vstart
< va
->va_start
) {
1211 node
= node
->rb_left
;
1213 if (is_within_this_va(va
, size
, align
, vstart
))
1217 * Does not make sense to go deeper towards the right
1218 * sub-tree if it does not have a free block that is
1219 * equal or bigger to the requested search length.
1221 if (get_subtree_max_size(node
->rb_right
) >= length
) {
1222 node
= node
->rb_right
;
1227 * OK. We roll back and find the first right sub-tree,
1228 * that will satisfy the search criteria. It can happen
1229 * only once due to "vstart" restriction.
1231 while ((node
= rb_parent(node
))) {
1232 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1233 if (is_within_this_va(va
, size
, align
, vstart
))
1236 if (get_subtree_max_size(node
->rb_right
) >= length
&&
1237 vstart
<= va
->va_start
) {
1238 node
= node
->rb_right
;
1248 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1249 #include <linux/random.h>
1251 static struct vmap_area
*
1252 find_vmap_lowest_linear_match(unsigned long size
,
1253 unsigned long align
, unsigned long vstart
)
1255 struct vmap_area
*va
;
1257 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
1258 if (!is_within_this_va(va
, size
, align
, vstart
))
1268 find_vmap_lowest_match_check(unsigned long size
)
1270 struct vmap_area
*va_1
, *va_2
;
1271 unsigned long vstart
;
1274 get_random_bytes(&rnd
, sizeof(rnd
));
1275 vstart
= VMALLOC_START
+ rnd
;
1277 va_1
= find_vmap_lowest_match(size
, 1, vstart
);
1278 va_2
= find_vmap_lowest_linear_match(size
, 1, vstart
);
1281 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1282 va_1
, va_2
, vstart
);
1288 FL_FIT_TYPE
= 1, /* full fit */
1289 LE_FIT_TYPE
= 2, /* left edge fit */
1290 RE_FIT_TYPE
= 3, /* right edge fit */
1291 NE_FIT_TYPE
= 4 /* no edge fit */
1294 static __always_inline
enum fit_type
1295 classify_va_fit_type(struct vmap_area
*va
,
1296 unsigned long nva_start_addr
, unsigned long size
)
1300 /* Check if it is within VA. */
1301 if (nva_start_addr
< va
->va_start
||
1302 nva_start_addr
+ size
> va
->va_end
)
1306 if (va
->va_start
== nva_start_addr
) {
1307 if (va
->va_end
== nva_start_addr
+ size
)
1311 } else if (va
->va_end
== nva_start_addr
+ size
) {
1320 static __always_inline
int
1321 adjust_va_to_fit_type(struct vmap_area
*va
,
1322 unsigned long nva_start_addr
, unsigned long size
,
1325 struct vmap_area
*lva
= NULL
;
1327 if (type
== FL_FIT_TYPE
) {
1329 * No need to split VA, it fully fits.
1335 unlink_va(va
, &free_vmap_area_root
);
1336 kmem_cache_free(vmap_area_cachep
, va
);
1337 } else if (type
== LE_FIT_TYPE
) {
1339 * Split left edge of fit VA.
1345 va
->va_start
+= size
;
1346 } else if (type
== RE_FIT_TYPE
) {
1348 * Split right edge of fit VA.
1354 va
->va_end
= nva_start_addr
;
1355 } else if (type
== NE_FIT_TYPE
) {
1357 * Split no edge of fit VA.
1363 lva
= __this_cpu_xchg(ne_fit_preload_node
, NULL
);
1364 if (unlikely(!lva
)) {
1366 * For percpu allocator we do not do any pre-allocation
1367 * and leave it as it is. The reason is it most likely
1368 * never ends up with NE_FIT_TYPE splitting. In case of
1369 * percpu allocations offsets and sizes are aligned to
1370 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1371 * are its main fitting cases.
1373 * There are a few exceptions though, as an example it is
1374 * a first allocation (early boot up) when we have "one"
1375 * big free space that has to be split.
1377 * Also we can hit this path in case of regular "vmap"
1378 * allocations, if "this" current CPU was not preloaded.
1379 * See the comment in alloc_vmap_area() why. If so, then
1380 * GFP_NOWAIT is used instead to get an extra object for
1381 * split purpose. That is rare and most time does not
1384 * What happens if an allocation gets failed. Basically,
1385 * an "overflow" path is triggered to purge lazily freed
1386 * areas to free some memory, then, the "retry" path is
1387 * triggered to repeat one more time. See more details
1388 * in alloc_vmap_area() function.
1390 lva
= kmem_cache_alloc(vmap_area_cachep
, GFP_NOWAIT
);
1396 * Build the remainder.
1398 lva
->va_start
= va
->va_start
;
1399 lva
->va_end
= nva_start_addr
;
1402 * Shrink this VA to remaining size.
1404 va
->va_start
= nva_start_addr
+ size
;
1409 if (type
!= FL_FIT_TYPE
) {
1410 augment_tree_propagate_from(va
);
1412 if (lva
) /* type == NE_FIT_TYPE */
1413 insert_vmap_area_augment(lva
, &va
->rb_node
,
1414 &free_vmap_area_root
, &free_vmap_area_list
);
1421 * Returns a start address of the newly allocated area, if success.
1422 * Otherwise a vend is returned that indicates failure.
1424 static __always_inline
unsigned long
1425 __alloc_vmap_area(unsigned long size
, unsigned long align
,
1426 unsigned long vstart
, unsigned long vend
)
1428 unsigned long nva_start_addr
;
1429 struct vmap_area
*va
;
1433 va
= find_vmap_lowest_match(size
, align
, vstart
);
1437 if (va
->va_start
> vstart
)
1438 nva_start_addr
= ALIGN(va
->va_start
, align
);
1440 nva_start_addr
= ALIGN(vstart
, align
);
1442 /* Check the "vend" restriction. */
1443 if (nva_start_addr
+ size
> vend
)
1446 /* Classify what we have found. */
1447 type
= classify_va_fit_type(va
, nva_start_addr
, size
);
1448 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
1451 /* Update the free vmap_area. */
1452 ret
= adjust_va_to_fit_type(va
, nva_start_addr
, size
, type
);
1456 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1457 find_vmap_lowest_match_check(size
);
1460 return nva_start_addr
;
1464 * Free a region of KVA allocated by alloc_vmap_area
1466 static void free_vmap_area(struct vmap_area
*va
)
1469 * Remove from the busy tree/list.
1471 spin_lock(&vmap_area_lock
);
1472 unlink_va(va
, &vmap_area_root
);
1473 spin_unlock(&vmap_area_lock
);
1476 * Insert/Merge it back to the free tree/list.
1478 spin_lock(&free_vmap_area_lock
);
1479 merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
, &free_vmap_area_list
);
1480 spin_unlock(&free_vmap_area_lock
);
1484 preload_this_cpu_lock(spinlock_t
*lock
, gfp_t gfp_mask
, int node
)
1486 struct vmap_area
*va
= NULL
;
1489 * Preload this CPU with one extra vmap_area object. It is used
1490 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1491 * a CPU that does an allocation is preloaded.
1493 * We do it in non-atomic context, thus it allows us to use more
1494 * permissive allocation masks to be more stable under low memory
1495 * condition and high memory pressure.
1497 if (!this_cpu_read(ne_fit_preload_node
))
1498 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1502 if (va
&& __this_cpu_cmpxchg(ne_fit_preload_node
, NULL
, va
))
1503 kmem_cache_free(vmap_area_cachep
, va
);
1507 * Allocate a region of KVA of the specified size and alignment, within the
1510 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
1511 unsigned long align
,
1512 unsigned long vstart
, unsigned long vend
,
1513 int node
, gfp_t gfp_mask
)
1515 struct vmap_area
*va
;
1516 unsigned long freed
;
1522 BUG_ON(offset_in_page(size
));
1523 BUG_ON(!is_power_of_2(align
));
1525 if (unlikely(!vmap_initialized
))
1526 return ERR_PTR(-EBUSY
);
1529 gfp_mask
= gfp_mask
& GFP_RECLAIM_MASK
;
1531 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1533 return ERR_PTR(-ENOMEM
);
1536 * Only scan the relevant parts containing pointers to other objects
1537 * to avoid false negatives.
1539 kmemleak_scan_area(&va
->rb_node
, SIZE_MAX
, gfp_mask
);
1542 preload_this_cpu_lock(&free_vmap_area_lock
, gfp_mask
, node
);
1543 addr
= __alloc_vmap_area(size
, align
, vstart
, vend
);
1544 spin_unlock(&free_vmap_area_lock
);
1547 * If an allocation fails, the "vend" address is
1548 * returned. Therefore trigger the overflow path.
1550 if (unlikely(addr
== vend
))
1553 va
->va_start
= addr
;
1554 va
->va_end
= addr
+ size
;
1557 spin_lock(&vmap_area_lock
);
1558 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
1559 spin_unlock(&vmap_area_lock
);
1561 BUG_ON(!IS_ALIGNED(va
->va_start
, align
));
1562 BUG_ON(va
->va_start
< vstart
);
1563 BUG_ON(va
->va_end
> vend
);
1565 ret
= kasan_populate_vmalloc(addr
, size
);
1568 return ERR_PTR(ret
);
1575 purge_vmap_area_lazy();
1581 blocking_notifier_call_chain(&vmap_notify_list
, 0, &freed
);
1588 if (!(gfp_mask
& __GFP_NOWARN
) && printk_ratelimit())
1589 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1592 kmem_cache_free(vmap_area_cachep
, va
);
1593 return ERR_PTR(-EBUSY
);
1596 int register_vmap_purge_notifier(struct notifier_block
*nb
)
1598 return blocking_notifier_chain_register(&vmap_notify_list
, nb
);
1600 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier
);
1602 int unregister_vmap_purge_notifier(struct notifier_block
*nb
)
1604 return blocking_notifier_chain_unregister(&vmap_notify_list
, nb
);
1606 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier
);
1609 * lazy_max_pages is the maximum amount of virtual address space we gather up
1610 * before attempting to purge with a TLB flush.
1612 * There is a tradeoff here: a larger number will cover more kernel page tables
1613 * and take slightly longer to purge, but it will linearly reduce the number of
1614 * global TLB flushes that must be performed. It would seem natural to scale
1615 * this number up linearly with the number of CPUs (because vmapping activity
1616 * could also scale linearly with the number of CPUs), however it is likely
1617 * that in practice, workloads might be constrained in other ways that mean
1618 * vmap activity will not scale linearly with CPUs. Also, I want to be
1619 * conservative and not introduce a big latency on huge systems, so go with
1620 * a less aggressive log scale. It will still be an improvement over the old
1621 * code, and it will be simple to change the scale factor if we find that it
1622 * becomes a problem on bigger systems.
1624 static unsigned long lazy_max_pages(void)
1628 log
= fls(num_online_cpus());
1630 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
1633 static atomic_long_t vmap_lazy_nr
= ATOMIC_LONG_INIT(0);
1636 * Serialize vmap purging. There is no actual critical section protected
1637 * by this look, but we want to avoid concurrent calls for performance
1638 * reasons and to make the pcpu_get_vm_areas more deterministic.
1640 static DEFINE_MUTEX(vmap_purge_lock
);
1642 /* for per-CPU blocks */
1643 static void purge_fragmented_blocks_allcpus(void);
1645 #ifdef CONFIG_X86_64
1647 * called before a call to iounmap() if the caller wants vm_area_struct's
1648 * immediately freed.
1650 void set_iounmap_nonlazy(void)
1652 atomic_long_set(&vmap_lazy_nr
, lazy_max_pages()+1);
1654 #endif /* CONFIG_X86_64 */
1657 * Purges all lazily-freed vmap areas.
1659 static bool __purge_vmap_area_lazy(unsigned long start
, unsigned long end
)
1661 unsigned long resched_threshold
;
1662 struct list_head local_pure_list
;
1663 struct vmap_area
*va
, *n_va
;
1665 lockdep_assert_held(&vmap_purge_lock
);
1667 spin_lock(&purge_vmap_area_lock
);
1668 purge_vmap_area_root
= RB_ROOT
;
1669 list_replace_init(&purge_vmap_area_list
, &local_pure_list
);
1670 spin_unlock(&purge_vmap_area_lock
);
1672 if (unlikely(list_empty(&local_pure_list
)))
1676 list_first_entry(&local_pure_list
,
1677 struct vmap_area
, list
)->va_start
);
1680 list_last_entry(&local_pure_list
,
1681 struct vmap_area
, list
)->va_end
);
1683 flush_tlb_kernel_range(start
, end
);
1684 resched_threshold
= lazy_max_pages() << 1;
1686 spin_lock(&free_vmap_area_lock
);
1687 list_for_each_entry_safe(va
, n_va
, &local_pure_list
, list
) {
1688 unsigned long nr
= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
1689 unsigned long orig_start
= va
->va_start
;
1690 unsigned long orig_end
= va
->va_end
;
1693 * Finally insert or merge lazily-freed area. It is
1694 * detached and there is no need to "unlink" it from
1697 va
= merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
,
1698 &free_vmap_area_list
);
1703 if (is_vmalloc_or_module_addr((void *)orig_start
))
1704 kasan_release_vmalloc(orig_start
, orig_end
,
1705 va
->va_start
, va
->va_end
);
1707 atomic_long_sub(nr
, &vmap_lazy_nr
);
1709 if (atomic_long_read(&vmap_lazy_nr
) < resched_threshold
)
1710 cond_resched_lock(&free_vmap_area_lock
);
1712 spin_unlock(&free_vmap_area_lock
);
1717 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1718 * is already purging.
1720 static void try_purge_vmap_area_lazy(void)
1722 if (mutex_trylock(&vmap_purge_lock
)) {
1723 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1724 mutex_unlock(&vmap_purge_lock
);
1729 * Kick off a purge of the outstanding lazy areas.
1731 static void purge_vmap_area_lazy(void)
1733 mutex_lock(&vmap_purge_lock
);
1734 purge_fragmented_blocks_allcpus();
1735 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1736 mutex_unlock(&vmap_purge_lock
);
1740 * Free a vmap area, caller ensuring that the area has been unmapped
1741 * and flush_cache_vunmap had been called for the correct range
1744 static void free_vmap_area_noflush(struct vmap_area
*va
)
1746 unsigned long nr_lazy
;
1748 spin_lock(&vmap_area_lock
);
1749 unlink_va(va
, &vmap_area_root
);
1750 spin_unlock(&vmap_area_lock
);
1752 nr_lazy
= atomic_long_add_return((va
->va_end
- va
->va_start
) >>
1753 PAGE_SHIFT
, &vmap_lazy_nr
);
1756 * Merge or place it to the purge tree/list.
1758 spin_lock(&purge_vmap_area_lock
);
1759 merge_or_add_vmap_area(va
,
1760 &purge_vmap_area_root
, &purge_vmap_area_list
);
1761 spin_unlock(&purge_vmap_area_lock
);
1763 /* After this point, we may free va at any time */
1764 if (unlikely(nr_lazy
> lazy_max_pages()))
1765 try_purge_vmap_area_lazy();
1769 * Free and unmap a vmap area
1771 static void free_unmap_vmap_area(struct vmap_area
*va
)
1773 flush_cache_vunmap(va
->va_start
, va
->va_end
);
1774 vunmap_range_noflush(va
->va_start
, va
->va_end
);
1775 if (debug_pagealloc_enabled_static())
1776 flush_tlb_kernel_range(va
->va_start
, va
->va_end
);
1778 free_vmap_area_noflush(va
);
1781 static struct vmap_area
*find_vmap_area(unsigned long addr
)
1783 struct vmap_area
*va
;
1785 spin_lock(&vmap_area_lock
);
1786 va
= __find_vmap_area(addr
);
1787 spin_unlock(&vmap_area_lock
);
1792 /*** Per cpu kva allocator ***/
1795 * vmap space is limited especially on 32 bit architectures. Ensure there is
1796 * room for at least 16 percpu vmap blocks per CPU.
1799 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1800 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1801 * instead (we just need a rough idea)
1803 #if BITS_PER_LONG == 32
1804 #define VMALLOC_SPACE (128UL*1024*1024)
1806 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1809 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1810 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1811 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1812 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1813 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1814 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1815 #define VMAP_BBMAP_BITS \
1816 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1817 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1818 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1820 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1822 struct vmap_block_queue
{
1824 struct list_head free
;
1829 struct vmap_area
*va
;
1830 unsigned long free
, dirty
;
1831 unsigned long dirty_min
, dirty_max
; /*< dirty range */
1832 struct list_head free_list
;
1833 struct rcu_head rcu_head
;
1834 struct list_head purge
;
1837 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1838 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
1841 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1842 * in the free path. Could get rid of this if we change the API to return a
1843 * "cookie" from alloc, to be passed to free. But no big deal yet.
1845 static DEFINE_XARRAY(vmap_blocks
);
1848 * We should probably have a fallback mechanism to allocate virtual memory
1849 * out of partially filled vmap blocks. However vmap block sizing should be
1850 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1854 static unsigned long addr_to_vb_idx(unsigned long addr
)
1856 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
1857 addr
/= VMAP_BLOCK_SIZE
;
1861 static void *vmap_block_vaddr(unsigned long va_start
, unsigned long pages_off
)
1865 addr
= va_start
+ (pages_off
<< PAGE_SHIFT
);
1866 BUG_ON(addr_to_vb_idx(addr
) != addr_to_vb_idx(va_start
));
1867 return (void *)addr
;
1871 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1872 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1873 * @order: how many 2^order pages should be occupied in newly allocated block
1874 * @gfp_mask: flags for the page level allocator
1876 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1878 static void *new_vmap_block(unsigned int order
, gfp_t gfp_mask
)
1880 struct vmap_block_queue
*vbq
;
1881 struct vmap_block
*vb
;
1882 struct vmap_area
*va
;
1883 unsigned long vb_idx
;
1887 node
= numa_node_id();
1889 vb
= kmalloc_node(sizeof(struct vmap_block
),
1890 gfp_mask
& GFP_RECLAIM_MASK
, node
);
1892 return ERR_PTR(-ENOMEM
);
1894 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
1895 VMALLOC_START
, VMALLOC_END
,
1899 return ERR_CAST(va
);
1902 vaddr
= vmap_block_vaddr(va
->va_start
, 0);
1903 spin_lock_init(&vb
->lock
);
1905 /* At least something should be left free */
1906 BUG_ON(VMAP_BBMAP_BITS
<= (1UL << order
));
1907 vb
->free
= VMAP_BBMAP_BITS
- (1UL << order
);
1909 vb
->dirty_min
= VMAP_BBMAP_BITS
;
1911 INIT_LIST_HEAD(&vb
->free_list
);
1913 vb_idx
= addr_to_vb_idx(va
->va_start
);
1914 err
= xa_insert(&vmap_blocks
, vb_idx
, vb
, gfp_mask
);
1918 return ERR_PTR(err
);
1921 vbq
= &get_cpu_var(vmap_block_queue
);
1922 spin_lock(&vbq
->lock
);
1923 list_add_tail_rcu(&vb
->free_list
, &vbq
->free
);
1924 spin_unlock(&vbq
->lock
);
1925 put_cpu_var(vmap_block_queue
);
1930 static void free_vmap_block(struct vmap_block
*vb
)
1932 struct vmap_block
*tmp
;
1934 tmp
= xa_erase(&vmap_blocks
, addr_to_vb_idx(vb
->va
->va_start
));
1937 free_vmap_area_noflush(vb
->va
);
1938 kfree_rcu(vb
, rcu_head
);
1941 static void purge_fragmented_blocks(int cpu
)
1944 struct vmap_block
*vb
;
1945 struct vmap_block
*n_vb
;
1946 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1949 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1951 if (!(vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
))
1954 spin_lock(&vb
->lock
);
1955 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
1956 vb
->free
= 0; /* prevent further allocs after releasing lock */
1957 vb
->dirty
= VMAP_BBMAP_BITS
; /* prevent purging it again */
1959 vb
->dirty_max
= VMAP_BBMAP_BITS
;
1960 spin_lock(&vbq
->lock
);
1961 list_del_rcu(&vb
->free_list
);
1962 spin_unlock(&vbq
->lock
);
1963 spin_unlock(&vb
->lock
);
1964 list_add_tail(&vb
->purge
, &purge
);
1966 spin_unlock(&vb
->lock
);
1970 list_for_each_entry_safe(vb
, n_vb
, &purge
, purge
) {
1971 list_del(&vb
->purge
);
1972 free_vmap_block(vb
);
1976 static void purge_fragmented_blocks_allcpus(void)
1980 for_each_possible_cpu(cpu
)
1981 purge_fragmented_blocks(cpu
);
1984 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
1986 struct vmap_block_queue
*vbq
;
1987 struct vmap_block
*vb
;
1991 BUG_ON(offset_in_page(size
));
1992 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1993 if (WARN_ON(size
== 0)) {
1995 * Allocating 0 bytes isn't what caller wants since
1996 * get_order(0) returns funny result. Just warn and terminate
2001 order
= get_order(size
);
2004 vbq
= &get_cpu_var(vmap_block_queue
);
2005 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2006 unsigned long pages_off
;
2008 spin_lock(&vb
->lock
);
2009 if (vb
->free
< (1UL << order
)) {
2010 spin_unlock(&vb
->lock
);
2014 pages_off
= VMAP_BBMAP_BITS
- vb
->free
;
2015 vaddr
= vmap_block_vaddr(vb
->va
->va_start
, pages_off
);
2016 vb
->free
-= 1UL << order
;
2017 if (vb
->free
== 0) {
2018 spin_lock(&vbq
->lock
);
2019 list_del_rcu(&vb
->free_list
);
2020 spin_unlock(&vbq
->lock
);
2023 spin_unlock(&vb
->lock
);
2027 put_cpu_var(vmap_block_queue
);
2030 /* Allocate new block if nothing was found */
2032 vaddr
= new_vmap_block(order
, gfp_mask
);
2037 static void vb_free(unsigned long addr
, unsigned long size
)
2039 unsigned long offset
;
2041 struct vmap_block
*vb
;
2043 BUG_ON(offset_in_page(size
));
2044 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
2046 flush_cache_vunmap(addr
, addr
+ size
);
2048 order
= get_order(size
);
2049 offset
= (addr
& (VMAP_BLOCK_SIZE
- 1)) >> PAGE_SHIFT
;
2050 vb
= xa_load(&vmap_blocks
, addr_to_vb_idx(addr
));
2052 vunmap_range_noflush(addr
, addr
+ size
);
2054 if (debug_pagealloc_enabled_static())
2055 flush_tlb_kernel_range(addr
, addr
+ size
);
2057 spin_lock(&vb
->lock
);
2059 /* Expand dirty range */
2060 vb
->dirty_min
= min(vb
->dirty_min
, offset
);
2061 vb
->dirty_max
= max(vb
->dirty_max
, offset
+ (1UL << order
));
2063 vb
->dirty
+= 1UL << order
;
2064 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
2066 spin_unlock(&vb
->lock
);
2067 free_vmap_block(vb
);
2069 spin_unlock(&vb
->lock
);
2072 static void _vm_unmap_aliases(unsigned long start
, unsigned long end
, int flush
)
2076 if (unlikely(!vmap_initialized
))
2081 for_each_possible_cpu(cpu
) {
2082 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
2083 struct vmap_block
*vb
;
2086 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2087 spin_lock(&vb
->lock
);
2088 if (vb
->dirty
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
2089 unsigned long va_start
= vb
->va
->va_start
;
2092 s
= va_start
+ (vb
->dirty_min
<< PAGE_SHIFT
);
2093 e
= va_start
+ (vb
->dirty_max
<< PAGE_SHIFT
);
2095 start
= min(s
, start
);
2100 spin_unlock(&vb
->lock
);
2105 mutex_lock(&vmap_purge_lock
);
2106 purge_fragmented_blocks_allcpus();
2107 if (!__purge_vmap_area_lazy(start
, end
) && flush
)
2108 flush_tlb_kernel_range(start
, end
);
2109 mutex_unlock(&vmap_purge_lock
);
2113 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2115 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2116 * to amortize TLB flushing overheads. What this means is that any page you
2117 * have now, may, in a former life, have been mapped into kernel virtual
2118 * address by the vmap layer and so there might be some CPUs with TLB entries
2119 * still referencing that page (additional to the regular 1:1 kernel mapping).
2121 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2122 * be sure that none of the pages we have control over will have any aliases
2123 * from the vmap layer.
2125 void vm_unmap_aliases(void)
2127 unsigned long start
= ULONG_MAX
, end
= 0;
2130 _vm_unmap_aliases(start
, end
, flush
);
2132 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
2135 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2136 * @mem: the pointer returned by vm_map_ram
2137 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2139 void vm_unmap_ram(const void *mem
, unsigned int count
)
2141 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
2142 unsigned long addr
= (unsigned long)mem
;
2143 struct vmap_area
*va
;
2147 BUG_ON(addr
< VMALLOC_START
);
2148 BUG_ON(addr
> VMALLOC_END
);
2149 BUG_ON(!PAGE_ALIGNED(addr
));
2151 kasan_poison_vmalloc(mem
, size
);
2153 if (likely(count
<= VMAP_MAX_ALLOC
)) {
2154 debug_check_no_locks_freed(mem
, size
);
2155 vb_free(addr
, size
);
2159 va
= find_vmap_area(addr
);
2161 debug_check_no_locks_freed((void *)va
->va_start
,
2162 (va
->va_end
- va
->va_start
));
2163 free_unmap_vmap_area(va
);
2165 EXPORT_SYMBOL(vm_unmap_ram
);
2168 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2169 * @pages: an array of pointers to the pages to be mapped
2170 * @count: number of pages
2171 * @node: prefer to allocate data structures on this node
2173 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2174 * faster than vmap so it's good. But if you mix long-life and short-life
2175 * objects with vm_map_ram(), it could consume lots of address space through
2176 * fragmentation (especially on a 32bit machine). You could see failures in
2177 * the end. Please use this function for short-lived objects.
2179 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2181 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
)
2183 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
2187 if (likely(count
<= VMAP_MAX_ALLOC
)) {
2188 mem
= vb_alloc(size
, GFP_KERNEL
);
2191 addr
= (unsigned long)mem
;
2193 struct vmap_area
*va
;
2194 va
= alloc_vmap_area(size
, PAGE_SIZE
,
2195 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
2199 addr
= va
->va_start
;
2203 kasan_unpoison_vmalloc(mem
, size
);
2205 if (vmap_pages_range(addr
, addr
+ size
, PAGE_KERNEL
,
2206 pages
, PAGE_SHIFT
) < 0) {
2207 vm_unmap_ram(mem
, count
);
2213 EXPORT_SYMBOL(vm_map_ram
);
2215 static struct vm_struct
*vmlist __initdata
;
2217 static inline unsigned int vm_area_page_order(struct vm_struct
*vm
)
2219 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2220 return vm
->page_order
;
2226 static inline void set_vm_area_page_order(struct vm_struct
*vm
, unsigned int order
)
2228 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2229 vm
->page_order
= order
;
2236 * vm_area_add_early - add vmap area early during boot
2237 * @vm: vm_struct to add
2239 * This function is used to add fixed kernel vm area to vmlist before
2240 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2241 * should contain proper values and the other fields should be zero.
2243 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2245 void __init
vm_area_add_early(struct vm_struct
*vm
)
2247 struct vm_struct
*tmp
, **p
;
2249 BUG_ON(vmap_initialized
);
2250 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
2251 if (tmp
->addr
>= vm
->addr
) {
2252 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
2255 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
2262 * vm_area_register_early - register vmap area early during boot
2263 * @vm: vm_struct to register
2264 * @align: requested alignment
2266 * This function is used to register kernel vm area before
2267 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2268 * proper values on entry and other fields should be zero. On return,
2269 * vm->addr contains the allocated address.
2271 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2273 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
2275 static size_t vm_init_off __initdata
;
2278 addr
= ALIGN(VMALLOC_START
+ vm_init_off
, align
);
2279 vm_init_off
= PFN_ALIGN(addr
+ vm
->size
) - VMALLOC_START
;
2281 vm
->addr
= (void *)addr
;
2283 vm_area_add_early(vm
);
2286 static void vmap_init_free_space(void)
2288 unsigned long vmap_start
= 1;
2289 const unsigned long vmap_end
= ULONG_MAX
;
2290 struct vmap_area
*busy
, *free
;
2294 * -|-----|.....|-----|-----|-----|.....|-
2296 * |<--------------------------------->|
2298 list_for_each_entry(busy
, &vmap_area_list
, list
) {
2299 if (busy
->va_start
- vmap_start
> 0) {
2300 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2301 if (!WARN_ON_ONCE(!free
)) {
2302 free
->va_start
= vmap_start
;
2303 free
->va_end
= busy
->va_start
;
2305 insert_vmap_area_augment(free
, NULL
,
2306 &free_vmap_area_root
,
2307 &free_vmap_area_list
);
2311 vmap_start
= busy
->va_end
;
2314 if (vmap_end
- vmap_start
> 0) {
2315 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2316 if (!WARN_ON_ONCE(!free
)) {
2317 free
->va_start
= vmap_start
;
2318 free
->va_end
= vmap_end
;
2320 insert_vmap_area_augment(free
, NULL
,
2321 &free_vmap_area_root
,
2322 &free_vmap_area_list
);
2327 void __init
vmalloc_init(void)
2329 struct vmap_area
*va
;
2330 struct vm_struct
*tmp
;
2334 * Create the cache for vmap_area objects.
2336 vmap_area_cachep
= KMEM_CACHE(vmap_area
, SLAB_PANIC
);
2338 for_each_possible_cpu(i
) {
2339 struct vmap_block_queue
*vbq
;
2340 struct vfree_deferred
*p
;
2342 vbq
= &per_cpu(vmap_block_queue
, i
);
2343 spin_lock_init(&vbq
->lock
);
2344 INIT_LIST_HEAD(&vbq
->free
);
2345 p
= &per_cpu(vfree_deferred
, i
);
2346 init_llist_head(&p
->list
);
2347 INIT_WORK(&p
->wq
, free_work
);
2350 /* Import existing vmlist entries. */
2351 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
2352 va
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2353 if (WARN_ON_ONCE(!va
))
2356 va
->va_start
= (unsigned long)tmp
->addr
;
2357 va
->va_end
= va
->va_start
+ tmp
->size
;
2359 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
2363 * Now we can initialize a free vmap space.
2365 vmap_init_free_space();
2366 vmap_initialized
= true;
2369 static inline void setup_vmalloc_vm_locked(struct vm_struct
*vm
,
2370 struct vmap_area
*va
, unsigned long flags
, const void *caller
)
2373 vm
->addr
= (void *)va
->va_start
;
2374 vm
->size
= va
->va_end
- va
->va_start
;
2375 vm
->caller
= caller
;
2379 static void setup_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
2380 unsigned long flags
, const void *caller
)
2382 spin_lock(&vmap_area_lock
);
2383 setup_vmalloc_vm_locked(vm
, va
, flags
, caller
);
2384 spin_unlock(&vmap_area_lock
);
2387 static void clear_vm_uninitialized_flag(struct vm_struct
*vm
)
2390 * Before removing VM_UNINITIALIZED,
2391 * we should make sure that vm has proper values.
2392 * Pair with smp_rmb() in show_numa_info().
2395 vm
->flags
&= ~VM_UNINITIALIZED
;
2398 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
2399 unsigned long align
, unsigned long shift
, unsigned long flags
,
2400 unsigned long start
, unsigned long end
, int node
,
2401 gfp_t gfp_mask
, const void *caller
)
2403 struct vmap_area
*va
;
2404 struct vm_struct
*area
;
2405 unsigned long requested_size
= size
;
2407 BUG_ON(in_interrupt());
2408 size
= ALIGN(size
, 1ul << shift
);
2409 if (unlikely(!size
))
2412 if (flags
& VM_IOREMAP
)
2413 align
= 1ul << clamp_t(int, get_count_order_long(size
),
2414 PAGE_SHIFT
, IOREMAP_MAX_ORDER
);
2416 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
2417 if (unlikely(!area
))
2420 if (!(flags
& VM_NO_GUARD
))
2423 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
2429 kasan_unpoison_vmalloc((void *)va
->va_start
, requested_size
);
2431 setup_vmalloc_vm(area
, va
, flags
, caller
);
2436 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
2437 unsigned long start
, unsigned long end
,
2440 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
, start
, end
,
2441 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2445 * get_vm_area - reserve a contiguous kernel virtual area
2446 * @size: size of the area
2447 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2449 * Search an area of @size in the kernel virtual mapping area,
2450 * and reserved it for out purposes. Returns the area descriptor
2451 * on success or %NULL on failure.
2453 * Return: the area descriptor on success or %NULL on failure.
2455 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
2457 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
2458 VMALLOC_START
, VMALLOC_END
,
2459 NUMA_NO_NODE
, GFP_KERNEL
,
2460 __builtin_return_address(0));
2462 EXPORT_SYMBOL(get_vm_area
);
2464 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
2467 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
2468 VMALLOC_START
, VMALLOC_END
,
2469 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2473 * find_vm_area - find a continuous kernel virtual area
2474 * @addr: base address
2476 * Search for the kernel VM area starting at @addr, and return it.
2477 * It is up to the caller to do all required locking to keep the returned
2480 * Return: the area descriptor on success or %NULL on failure.
2482 struct vm_struct
*find_vm_area(const void *addr
)
2484 struct vmap_area
*va
;
2486 va
= find_vmap_area((unsigned long)addr
);
2494 * remove_vm_area - find and remove a continuous kernel virtual area
2495 * @addr: base address
2497 * Search for the kernel VM area starting at @addr, and remove it.
2498 * This function returns the found VM area, but using it is NOT safe
2499 * on SMP machines, except for its size or flags.
2501 * Return: the area descriptor on success or %NULL on failure.
2503 struct vm_struct
*remove_vm_area(const void *addr
)
2505 struct vmap_area
*va
;
2509 spin_lock(&vmap_area_lock
);
2510 va
= __find_vmap_area((unsigned long)addr
);
2512 struct vm_struct
*vm
= va
->vm
;
2515 spin_unlock(&vmap_area_lock
);
2517 kasan_free_shadow(vm
);
2518 free_unmap_vmap_area(va
);
2523 spin_unlock(&vmap_area_lock
);
2527 static inline void set_area_direct_map(const struct vm_struct
*area
,
2528 int (*set_direct_map
)(struct page
*page
))
2532 /* HUGE_VMALLOC passes small pages to set_direct_map */
2533 for (i
= 0; i
< area
->nr_pages
; i
++)
2534 if (page_address(area
->pages
[i
]))
2535 set_direct_map(area
->pages
[i
]);
2538 /* Handle removing and resetting vm mappings related to the vm_struct. */
2539 static void vm_remove_mappings(struct vm_struct
*area
, int deallocate_pages
)
2541 unsigned long start
= ULONG_MAX
, end
= 0;
2542 unsigned int page_order
= vm_area_page_order(area
);
2543 int flush_reset
= area
->flags
& VM_FLUSH_RESET_PERMS
;
2547 remove_vm_area(area
->addr
);
2549 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2554 * If not deallocating pages, just do the flush of the VM area and
2557 if (!deallocate_pages
) {
2563 * If execution gets here, flush the vm mapping and reset the direct
2564 * map. Find the start and end range of the direct mappings to make sure
2565 * the vm_unmap_aliases() flush includes the direct map.
2567 for (i
= 0; i
< area
->nr_pages
; i
+= 1U << page_order
) {
2568 unsigned long addr
= (unsigned long)page_address(area
->pages
[i
]);
2570 unsigned long page_size
;
2572 page_size
= PAGE_SIZE
<< page_order
;
2573 start
= min(addr
, start
);
2574 end
= max(addr
+ page_size
, end
);
2580 * Set direct map to something invalid so that it won't be cached if
2581 * there are any accesses after the TLB flush, then flush the TLB and
2582 * reset the direct map permissions to the default.
2584 set_area_direct_map(area
, set_direct_map_invalid_noflush
);
2585 _vm_unmap_aliases(start
, end
, flush_dmap
);
2586 set_area_direct_map(area
, set_direct_map_default_noflush
);
2589 static void __vunmap(const void *addr
, int deallocate_pages
)
2591 struct vm_struct
*area
;
2596 if (WARN(!PAGE_ALIGNED(addr
), "Trying to vfree() bad address (%p)\n",
2600 area
= find_vm_area(addr
);
2601 if (unlikely(!area
)) {
2602 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
2607 debug_check_no_locks_freed(area
->addr
, get_vm_area_size(area
));
2608 debug_check_no_obj_freed(area
->addr
, get_vm_area_size(area
));
2610 kasan_poison_vmalloc(area
->addr
, get_vm_area_size(area
));
2612 vm_remove_mappings(area
, deallocate_pages
);
2614 if (deallocate_pages
) {
2615 unsigned int page_order
= vm_area_page_order(area
);
2618 for (i
= 0; i
< area
->nr_pages
; i
+= 1U << page_order
) {
2619 struct page
*page
= area
->pages
[i
];
2622 __free_pages(page
, page_order
);
2625 atomic_long_sub(area
->nr_pages
, &nr_vmalloc_pages
);
2627 kvfree(area
->pages
);
2633 static inline void __vfree_deferred(const void *addr
)
2636 * Use raw_cpu_ptr() because this can be called from preemptible
2637 * context. Preemption is absolutely fine here, because the llist_add()
2638 * implementation is lockless, so it works even if we are adding to
2639 * another cpu's list. schedule_work() should be fine with this too.
2641 struct vfree_deferred
*p
= raw_cpu_ptr(&vfree_deferred
);
2643 if (llist_add((struct llist_node
*)addr
, &p
->list
))
2644 schedule_work(&p
->wq
);
2648 * vfree_atomic - release memory allocated by vmalloc()
2649 * @addr: memory base address
2651 * This one is just like vfree() but can be called in any atomic context
2654 void vfree_atomic(const void *addr
)
2658 kmemleak_free(addr
);
2662 __vfree_deferred(addr
);
2665 static void __vfree(const void *addr
)
2667 if (unlikely(in_interrupt()))
2668 __vfree_deferred(addr
);
2674 * vfree - Release memory allocated by vmalloc()
2675 * @addr: Memory base address
2677 * Free the virtually continuous memory area starting at @addr, as obtained
2678 * from one of the vmalloc() family of APIs. This will usually also free the
2679 * physical memory underlying the virtual allocation, but that memory is
2680 * reference counted, so it will not be freed until the last user goes away.
2682 * If @addr is NULL, no operation is performed.
2685 * May sleep if called *not* from interrupt context.
2686 * Must not be called in NMI context (strictly speaking, it could be
2687 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2688 * conventions for vfree() arch-dependent would be a really bad idea).
2690 void vfree(const void *addr
)
2694 kmemleak_free(addr
);
2696 might_sleep_if(!in_interrupt());
2703 EXPORT_SYMBOL(vfree
);
2706 * vunmap - release virtual mapping obtained by vmap()
2707 * @addr: memory base address
2709 * Free the virtually contiguous memory area starting at @addr,
2710 * which was created from the page array passed to vmap().
2712 * Must not be called in interrupt context.
2714 void vunmap(const void *addr
)
2716 BUG_ON(in_interrupt());
2721 EXPORT_SYMBOL(vunmap
);
2724 * vmap - map an array of pages into virtually contiguous space
2725 * @pages: array of page pointers
2726 * @count: number of pages to map
2727 * @flags: vm_area->flags
2728 * @prot: page protection for the mapping
2730 * Maps @count pages from @pages into contiguous kernel virtual space.
2731 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2732 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2733 * are transferred from the caller to vmap(), and will be freed / dropped when
2734 * vfree() is called on the return value.
2736 * Return: the address of the area or %NULL on failure
2738 void *vmap(struct page
**pages
, unsigned int count
,
2739 unsigned long flags
, pgprot_t prot
)
2741 struct vm_struct
*area
;
2743 unsigned long size
; /* In bytes */
2747 if (count
> totalram_pages())
2750 size
= (unsigned long)count
<< PAGE_SHIFT
;
2751 area
= get_vm_area_caller(size
, flags
, __builtin_return_address(0));
2755 addr
= (unsigned long)area
->addr
;
2756 if (vmap_pages_range(addr
, addr
+ size
, pgprot_nx(prot
),
2757 pages
, PAGE_SHIFT
) < 0) {
2762 if (flags
& VM_MAP_PUT_PAGES
) {
2763 area
->pages
= pages
;
2764 area
->nr_pages
= count
;
2768 EXPORT_SYMBOL(vmap
);
2770 #ifdef CONFIG_VMAP_PFN
2771 struct vmap_pfn_data
{
2772 unsigned long *pfns
;
2777 static int vmap_pfn_apply(pte_t
*pte
, unsigned long addr
, void *private)
2779 struct vmap_pfn_data
*data
= private;
2781 if (WARN_ON_ONCE(pfn_valid(data
->pfns
[data
->idx
])))
2783 *pte
= pte_mkspecial(pfn_pte(data
->pfns
[data
->idx
++], data
->prot
));
2788 * vmap_pfn - map an array of PFNs into virtually contiguous space
2789 * @pfns: array of PFNs
2790 * @count: number of pages to map
2791 * @prot: page protection for the mapping
2793 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2794 * the start address of the mapping.
2796 void *vmap_pfn(unsigned long *pfns
, unsigned int count
, pgprot_t prot
)
2798 struct vmap_pfn_data data
= { .pfns
= pfns
, .prot
= pgprot_nx(prot
) };
2799 struct vm_struct
*area
;
2801 area
= get_vm_area_caller(count
* PAGE_SIZE
, VM_IOREMAP
,
2802 __builtin_return_address(0));
2805 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
2806 count
* PAGE_SIZE
, vmap_pfn_apply
, &data
)) {
2812 EXPORT_SYMBOL_GPL(vmap_pfn
);
2813 #endif /* CONFIG_VMAP_PFN */
2815 static inline unsigned int
2816 vm_area_alloc_pages(gfp_t gfp
, int nid
,
2817 unsigned int order
, unsigned int nr_pages
, struct page
**pages
)
2819 unsigned int nr_allocated
= 0;
2824 * For order-0 pages we make use of bulk allocator, if
2825 * the page array is partly or not at all populated due
2826 * to fails, fallback to a single page allocator that is
2829 if (!order
&& nid
!= NUMA_NO_NODE
) {
2830 while (nr_allocated
< nr_pages
) {
2831 unsigned int nr
, nr_pages_request
;
2834 * A maximum allowed request is hard-coded and is 100
2835 * pages per call. That is done in order to prevent a
2836 * long preemption off scenario in the bulk-allocator
2837 * so the range is [1:100].
2839 nr_pages_request
= min(100U, nr_pages
- nr_allocated
);
2841 nr
= alloc_pages_bulk_array_node(gfp
, nid
,
2842 nr_pages_request
, pages
+ nr_allocated
);
2848 * If zero or pages were obtained partly,
2849 * fallback to a single page allocator.
2851 if (nr
!= nr_pages_request
)
2856 * Compound pages required for remap_vmalloc_page if
2861 /* High-order pages or fallback path if "bulk" fails. */
2863 while (nr_allocated
< nr_pages
) {
2864 if (nid
== NUMA_NO_NODE
)
2865 page
= alloc_pages(gfp
, order
);
2867 page
= alloc_pages_node(nid
, gfp
, order
);
2868 if (unlikely(!page
))
2872 * Careful, we allocate and map page-order pages, but
2873 * tracking is done per PAGE_SIZE page so as to keep the
2874 * vm_struct APIs independent of the physical/mapped size.
2876 for (i
= 0; i
< (1U << order
); i
++)
2877 pages
[nr_allocated
+ i
] = page
+ i
;
2880 nr_allocated
+= 1U << order
;
2883 return nr_allocated
;
2886 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
2887 pgprot_t prot
, unsigned int page_shift
,
2890 const gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
2891 unsigned long addr
= (unsigned long)area
->addr
;
2892 unsigned long size
= get_vm_area_size(area
);
2893 unsigned long array_size
;
2894 unsigned int nr_small_pages
= size
>> PAGE_SHIFT
;
2895 unsigned int page_order
;
2897 array_size
= (unsigned long)nr_small_pages
* sizeof(struct page
*);
2898 gfp_mask
|= __GFP_NOWARN
;
2899 if (!(gfp_mask
& (GFP_DMA
| GFP_DMA32
)))
2900 gfp_mask
|= __GFP_HIGHMEM
;
2902 /* Please note that the recursion is strictly bounded. */
2903 if (array_size
> PAGE_SIZE
) {
2904 area
->pages
= __vmalloc_node(array_size
, 1, nested_gfp
, node
,
2907 area
->pages
= kmalloc_node(array_size
, nested_gfp
, node
);
2911 warn_alloc(gfp_mask
, NULL
,
2912 "vmalloc error: size %lu, failed to allocated page array size %lu",
2913 nr_small_pages
* PAGE_SIZE
, array_size
);
2918 set_vm_area_page_order(area
, page_shift
- PAGE_SHIFT
);
2919 page_order
= vm_area_page_order(area
);
2921 area
->nr_pages
= vm_area_alloc_pages(gfp_mask
, node
,
2922 page_order
, nr_small_pages
, area
->pages
);
2924 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
2927 * If not enough pages were obtained to accomplish an
2928 * allocation request, free them via __vfree() if any.
2930 if (area
->nr_pages
!= nr_small_pages
) {
2931 warn_alloc(gfp_mask
, NULL
,
2932 "vmalloc error: size %lu, page order %u, failed to allocate pages",
2933 area
->nr_pages
* PAGE_SIZE
, page_order
);
2937 if (vmap_pages_range(addr
, addr
+ size
, prot
, area
->pages
,
2939 warn_alloc(gfp_mask
, NULL
,
2940 "vmalloc error: size %lu, failed to map pages",
2941 area
->nr_pages
* PAGE_SIZE
);
2948 __vfree(area
->addr
);
2953 * __vmalloc_node_range - allocate virtually contiguous memory
2954 * @size: allocation size
2955 * @align: desired alignment
2956 * @start: vm area range start
2957 * @end: vm area range end
2958 * @gfp_mask: flags for the page level allocator
2959 * @prot: protection mask for the allocated pages
2960 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2961 * @node: node to use for allocation or NUMA_NO_NODE
2962 * @caller: caller's return address
2964 * Allocate enough pages to cover @size from the page level
2965 * allocator with @gfp_mask flags. Map them into contiguous
2966 * kernel virtual space, using a pagetable protection of @prot.
2968 * Return: the address of the area or %NULL on failure
2970 void *__vmalloc_node_range(unsigned long size
, unsigned long align
,
2971 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
2972 pgprot_t prot
, unsigned long vm_flags
, int node
,
2975 struct vm_struct
*area
;
2977 unsigned long real_size
= size
;
2978 unsigned long real_align
= align
;
2979 unsigned int shift
= PAGE_SHIFT
;
2981 if (WARN_ON_ONCE(!size
))
2984 if ((size
>> PAGE_SHIFT
) > totalram_pages()) {
2985 warn_alloc(gfp_mask
, NULL
,
2986 "vmalloc error: size %lu, exceeds total pages",
2991 if (vmap_allow_huge
&& !(vm_flags
& VM_NO_HUGE_VMAP
)) {
2992 unsigned long size_per_node
;
2995 * Try huge pages. Only try for PAGE_KERNEL allocations,
2996 * others like modules don't yet expect huge pages in
2997 * their allocations due to apply_to_page_range not
3001 size_per_node
= size
;
3002 if (node
== NUMA_NO_NODE
)
3003 size_per_node
/= num_online_nodes();
3004 if (arch_vmap_pmd_supported(prot
) && size_per_node
>= PMD_SIZE
)
3007 shift
= arch_vmap_pte_supported_shift(size_per_node
);
3009 align
= max(real_align
, 1UL << shift
);
3010 size
= ALIGN(real_size
, 1UL << shift
);
3014 area
= __get_vm_area_node(real_size
, align
, shift
, VM_ALLOC
|
3015 VM_UNINITIALIZED
| vm_flags
, start
, end
, node
,
3018 warn_alloc(gfp_mask
, NULL
,
3019 "vmalloc error: size %lu, vm_struct allocation failed",
3024 addr
= __vmalloc_area_node(area
, gfp_mask
, prot
, shift
, node
);
3029 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3030 * flag. It means that vm_struct is not fully initialized.
3031 * Now, it is fully initialized, so remove this flag here.
3033 clear_vm_uninitialized_flag(area
);
3035 size
= PAGE_ALIGN(size
);
3036 kmemleak_vmalloc(area
, size
, gfp_mask
);
3041 if (shift
> PAGE_SHIFT
) {
3052 * __vmalloc_node - allocate virtually contiguous memory
3053 * @size: allocation size
3054 * @align: desired alignment
3055 * @gfp_mask: flags for the page level allocator
3056 * @node: node to use for allocation or NUMA_NO_NODE
3057 * @caller: caller's return address
3059 * Allocate enough pages to cover @size from the page level allocator with
3060 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3062 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3063 * and __GFP_NOFAIL are not supported
3065 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3068 * Return: pointer to the allocated memory or %NULL on error
3070 void *__vmalloc_node(unsigned long size
, unsigned long align
,
3071 gfp_t gfp_mask
, int node
, const void *caller
)
3073 return __vmalloc_node_range(size
, align
, VMALLOC_START
, VMALLOC_END
,
3074 gfp_mask
, PAGE_KERNEL
, 0, node
, caller
);
3077 * This is only for performance analysis of vmalloc and stress purpose.
3078 * It is required by vmalloc test module, therefore do not use it other
3081 #ifdef CONFIG_TEST_VMALLOC_MODULE
3082 EXPORT_SYMBOL_GPL(__vmalloc_node
);
3085 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
)
3087 return __vmalloc_node(size
, 1, gfp_mask
, NUMA_NO_NODE
,
3088 __builtin_return_address(0));
3090 EXPORT_SYMBOL(__vmalloc
);
3093 * vmalloc - allocate virtually contiguous memory
3094 * @size: allocation size
3096 * Allocate enough pages to cover @size from the page level
3097 * allocator and map them into contiguous kernel virtual space.
3099 * For tight control over page level allocator and protection flags
3100 * use __vmalloc() instead.
3102 * Return: pointer to the allocated memory or %NULL on error
3104 void *vmalloc(unsigned long size
)
3106 return __vmalloc_node(size
, 1, GFP_KERNEL
, NUMA_NO_NODE
,
3107 __builtin_return_address(0));
3109 EXPORT_SYMBOL(vmalloc
);
3112 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3113 * @size: allocation size
3115 * Allocate enough non-huge pages to cover @size from the page level
3116 * allocator and map them into contiguous kernel virtual space.
3118 * Return: pointer to the allocated memory or %NULL on error
3120 void *vmalloc_no_huge(unsigned long size
)
3122 return __vmalloc_node_range(size
, 1, VMALLOC_START
, VMALLOC_END
,
3123 GFP_KERNEL
, PAGE_KERNEL
, VM_NO_HUGE_VMAP
,
3124 NUMA_NO_NODE
, __builtin_return_address(0));
3126 EXPORT_SYMBOL(vmalloc_no_huge
);
3129 * vzalloc - allocate virtually contiguous memory with zero fill
3130 * @size: allocation size
3132 * Allocate enough pages to cover @size from the page level
3133 * allocator and map them into contiguous kernel virtual space.
3134 * The memory allocated is set to zero.
3136 * For tight control over page level allocator and protection flags
3137 * use __vmalloc() instead.
3139 * Return: pointer to the allocated memory or %NULL on error
3141 void *vzalloc(unsigned long size
)
3143 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, NUMA_NO_NODE
,
3144 __builtin_return_address(0));
3146 EXPORT_SYMBOL(vzalloc
);
3149 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3150 * @size: allocation size
3152 * The resulting memory area is zeroed so it can be mapped to userspace
3153 * without leaking data.
3155 * Return: pointer to the allocated memory or %NULL on error
3157 void *vmalloc_user(unsigned long size
)
3159 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
3160 GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
,
3161 VM_USERMAP
, NUMA_NO_NODE
,
3162 __builtin_return_address(0));
3164 EXPORT_SYMBOL(vmalloc_user
);
3167 * vmalloc_node - allocate memory on a specific node
3168 * @size: allocation size
3171 * Allocate enough pages to cover @size from the page level
3172 * allocator and map them into contiguous kernel virtual space.
3174 * For tight control over page level allocator and protection flags
3175 * use __vmalloc() instead.
3177 * Return: pointer to the allocated memory or %NULL on error
3179 void *vmalloc_node(unsigned long size
, int node
)
3181 return __vmalloc_node(size
, 1, GFP_KERNEL
, node
,
3182 __builtin_return_address(0));
3184 EXPORT_SYMBOL(vmalloc_node
);
3187 * vzalloc_node - allocate memory on a specific node with zero fill
3188 * @size: allocation size
3191 * Allocate enough pages to cover @size from the page level
3192 * allocator and map them into contiguous kernel virtual space.
3193 * The memory allocated is set to zero.
3195 * Return: pointer to the allocated memory or %NULL on error
3197 void *vzalloc_node(unsigned long size
, int node
)
3199 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, node
,
3200 __builtin_return_address(0));
3202 EXPORT_SYMBOL(vzalloc_node
);
3204 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3205 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3206 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3207 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3210 * 64b systems should always have either DMA or DMA32 zones. For others
3211 * GFP_DMA32 should do the right thing and use the normal zone.
3213 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3217 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3218 * @size: allocation size
3220 * Allocate enough 32bit PA addressable pages to cover @size from the
3221 * page level allocator and map them into contiguous kernel virtual space.
3223 * Return: pointer to the allocated memory or %NULL on error
3225 void *vmalloc_32(unsigned long size
)
3227 return __vmalloc_node(size
, 1, GFP_VMALLOC32
, NUMA_NO_NODE
,
3228 __builtin_return_address(0));
3230 EXPORT_SYMBOL(vmalloc_32
);
3233 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3234 * @size: allocation size
3236 * The resulting memory area is 32bit addressable and zeroed so it can be
3237 * mapped to userspace without leaking data.
3239 * Return: pointer to the allocated memory or %NULL on error
3241 void *vmalloc_32_user(unsigned long size
)
3243 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
3244 GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
3245 VM_USERMAP
, NUMA_NO_NODE
,
3246 __builtin_return_address(0));
3248 EXPORT_SYMBOL(vmalloc_32_user
);
3251 * small helper routine , copy contents to buf from addr.
3252 * If the page is not present, fill zero.
3255 static int aligned_vread(char *buf
, char *addr
, unsigned long count
)
3261 unsigned long offset
, length
;
3263 offset
= offset_in_page(addr
);
3264 length
= PAGE_SIZE
- offset
;
3267 p
= vmalloc_to_page(addr
);
3269 * To do safe access to this _mapped_ area, we need
3270 * lock. But adding lock here means that we need to add
3271 * overhead of vmalloc()/vfree() calls for this _debug_
3272 * interface, rarely used. Instead of that, we'll use
3273 * kmap() and get small overhead in this access function.
3276 /* We can expect USER0 is not used -- see vread() */
3277 void *map
= kmap_atomic(p
);
3278 memcpy(buf
, map
+ offset
, length
);
3281 memset(buf
, 0, length
);
3292 * vread() - read vmalloc area in a safe way.
3293 * @buf: buffer for reading data
3294 * @addr: vm address.
3295 * @count: number of bytes to be read.
3297 * This function checks that addr is a valid vmalloc'ed area, and
3298 * copy data from that area to a given buffer. If the given memory range
3299 * of [addr...addr+count) includes some valid address, data is copied to
3300 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3301 * IOREMAP area is treated as memory hole and no copy is done.
3303 * If [addr...addr+count) doesn't includes any intersects with alive
3304 * vm_struct area, returns 0. @buf should be kernel's buffer.
3306 * Note: In usual ops, vread() is never necessary because the caller
3307 * should know vmalloc() area is valid and can use memcpy().
3308 * This is for routines which have to access vmalloc area without
3309 * any information, as /proc/kcore.
3311 * Return: number of bytes for which addr and buf should be increased
3312 * (same number as @count) or %0 if [addr...addr+count) doesn't
3313 * include any intersection with valid vmalloc area
3315 long vread(char *buf
, char *addr
, unsigned long count
)
3317 struct vmap_area
*va
;
3318 struct vm_struct
*vm
;
3319 char *vaddr
, *buf_start
= buf
;
3320 unsigned long buflen
= count
;
3323 /* Don't allow overflow */
3324 if ((unsigned long) addr
+ count
< count
)
3325 count
= -(unsigned long) addr
;
3327 spin_lock(&vmap_area_lock
);
3328 va
= find_vmap_area_exceed_addr((unsigned long)addr
);
3332 /* no intersects with alive vmap_area */
3333 if ((unsigned long)addr
+ count
<= va
->va_start
)
3336 list_for_each_entry_from(va
, &vmap_area_list
, list
) {
3344 vaddr
= (char *) vm
->addr
;
3345 if (addr
>= vaddr
+ get_vm_area_size(vm
))
3347 while (addr
< vaddr
) {
3355 n
= vaddr
+ get_vm_area_size(vm
) - addr
;
3358 if (!(vm
->flags
& VM_IOREMAP
))
3359 aligned_vread(buf
, addr
, n
);
3360 else /* IOREMAP area is treated as memory hole */
3367 spin_unlock(&vmap_area_lock
);
3369 if (buf
== buf_start
)
3371 /* zero-fill memory holes */
3372 if (buf
!= buf_start
+ buflen
)
3373 memset(buf
, 0, buflen
- (buf
- buf_start
));
3379 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3380 * @vma: vma to cover
3381 * @uaddr: target user address to start at
3382 * @kaddr: virtual address of vmalloc kernel memory
3383 * @pgoff: offset from @kaddr to start at
3384 * @size: size of map area
3386 * Returns: 0 for success, -Exxx on failure
3388 * This function checks that @kaddr is a valid vmalloc'ed area,
3389 * and that it is big enough to cover the range starting at
3390 * @uaddr in @vma. Will return failure if that criteria isn't
3393 * Similar to remap_pfn_range() (see mm/memory.c)
3395 int remap_vmalloc_range_partial(struct vm_area_struct
*vma
, unsigned long uaddr
,
3396 void *kaddr
, unsigned long pgoff
,
3399 struct vm_struct
*area
;
3401 unsigned long end_index
;
3403 if (check_shl_overflow(pgoff
, PAGE_SHIFT
, &off
))
3406 size
= PAGE_ALIGN(size
);
3408 if (!PAGE_ALIGNED(uaddr
) || !PAGE_ALIGNED(kaddr
))
3411 area
= find_vm_area(kaddr
);
3415 if (!(area
->flags
& (VM_USERMAP
| VM_DMA_COHERENT
)))
3418 if (check_add_overflow(size
, off
, &end_index
) ||
3419 end_index
> get_vm_area_size(area
))
3424 struct page
*page
= vmalloc_to_page(kaddr
);
3427 ret
= vm_insert_page(vma
, uaddr
, page
);
3436 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3442 * remap_vmalloc_range - map vmalloc pages to userspace
3443 * @vma: vma to cover (map full range of vma)
3444 * @addr: vmalloc memory
3445 * @pgoff: number of pages into addr before first page to map
3447 * Returns: 0 for success, -Exxx on failure
3449 * This function checks that addr is a valid vmalloc'ed area, and
3450 * that it is big enough to cover the vma. Will return failure if
3451 * that criteria isn't met.
3453 * Similar to remap_pfn_range() (see mm/memory.c)
3455 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
3456 unsigned long pgoff
)
3458 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
3460 vma
->vm_end
- vma
->vm_start
);
3462 EXPORT_SYMBOL(remap_vmalloc_range
);
3464 void free_vm_area(struct vm_struct
*area
)
3466 struct vm_struct
*ret
;
3467 ret
= remove_vm_area(area
->addr
);
3468 BUG_ON(ret
!= area
);
3471 EXPORT_SYMBOL_GPL(free_vm_area
);
3474 static struct vmap_area
*node_to_va(struct rb_node
*n
)
3476 return rb_entry_safe(n
, struct vmap_area
, rb_node
);
3480 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3481 * @addr: target address
3483 * Returns: vmap_area if it is found. If there is no such area
3484 * the first highest(reverse order) vmap_area is returned
3485 * i.e. va->va_start < addr && va->va_end < addr or NULL
3486 * if there are no any areas before @addr.
3488 static struct vmap_area
*
3489 pvm_find_va_enclose_addr(unsigned long addr
)
3491 struct vmap_area
*va
, *tmp
;
3494 n
= free_vmap_area_root
.rb_node
;
3498 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
3499 if (tmp
->va_start
<= addr
) {
3501 if (tmp
->va_end
>= addr
)
3514 * pvm_determine_end_from_reverse - find the highest aligned address
3515 * of free block below VMALLOC_END
3517 * in - the VA we start the search(reverse order);
3518 * out - the VA with the highest aligned end address.
3519 * @align: alignment for required highest address
3521 * Returns: determined end address within vmap_area
3523 static unsigned long
3524 pvm_determine_end_from_reverse(struct vmap_area
**va
, unsigned long align
)
3526 unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3530 list_for_each_entry_from_reverse((*va
),
3531 &free_vmap_area_list
, list
) {
3532 addr
= min((*va
)->va_end
& ~(align
- 1), vmalloc_end
);
3533 if ((*va
)->va_start
< addr
)
3542 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3543 * @offsets: array containing offset of each area
3544 * @sizes: array containing size of each area
3545 * @nr_vms: the number of areas to allocate
3546 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3548 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3549 * vm_structs on success, %NULL on failure
3551 * Percpu allocator wants to use congruent vm areas so that it can
3552 * maintain the offsets among percpu areas. This function allocates
3553 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3554 * be scattered pretty far, distance between two areas easily going up
3555 * to gigabytes. To avoid interacting with regular vmallocs, these
3556 * areas are allocated from top.
3558 * Despite its complicated look, this allocator is rather simple. It
3559 * does everything top-down and scans free blocks from the end looking
3560 * for matching base. While scanning, if any of the areas do not fit the
3561 * base address is pulled down to fit the area. Scanning is repeated till
3562 * all the areas fit and then all necessary data structures are inserted
3563 * and the result is returned.
3565 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
3566 const size_t *sizes
, int nr_vms
,
3569 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
3570 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3571 struct vmap_area
**vas
, *va
;
3572 struct vm_struct
**vms
;
3573 int area
, area2
, last_area
, term_area
;
3574 unsigned long base
, start
, size
, end
, last_end
, orig_start
, orig_end
;
3575 bool purged
= false;
3578 /* verify parameters and allocate data structures */
3579 BUG_ON(offset_in_page(align
) || !is_power_of_2(align
));
3580 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
3581 start
= offsets
[area
];
3582 end
= start
+ sizes
[area
];
3584 /* is everything aligned properly? */
3585 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
3586 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
3588 /* detect the area with the highest address */
3589 if (start
> offsets
[last_area
])
3592 for (area2
= area
+ 1; area2
< nr_vms
; area2
++) {
3593 unsigned long start2
= offsets
[area2
];
3594 unsigned long end2
= start2
+ sizes
[area2
];
3596 BUG_ON(start2
< end
&& start
< end2
);
3599 last_end
= offsets
[last_area
] + sizes
[last_area
];
3601 if (vmalloc_end
- vmalloc_start
< last_end
) {
3606 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
3607 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
3611 for (area
= 0; area
< nr_vms
; area
++) {
3612 vas
[area
] = kmem_cache_zalloc(vmap_area_cachep
, GFP_KERNEL
);
3613 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
3614 if (!vas
[area
] || !vms
[area
])
3618 spin_lock(&free_vmap_area_lock
);
3620 /* start scanning - we scan from the top, begin with the last area */
3621 area
= term_area
= last_area
;
3622 start
= offsets
[area
];
3623 end
= start
+ sizes
[area
];
3625 va
= pvm_find_va_enclose_addr(vmalloc_end
);
3626 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3630 * base might have underflowed, add last_end before
3633 if (base
+ last_end
< vmalloc_start
+ last_end
)
3637 * Fitting base has not been found.
3643 * If required width exceeds current VA block, move
3644 * base downwards and then recheck.
3646 if (base
+ end
> va
->va_end
) {
3647 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3653 * If this VA does not fit, move base downwards and recheck.
3655 if (base
+ start
< va
->va_start
) {
3656 va
= node_to_va(rb_prev(&va
->rb_node
));
3657 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3663 * This area fits, move on to the previous one. If
3664 * the previous one is the terminal one, we're done.
3666 area
= (area
+ nr_vms
- 1) % nr_vms
;
3667 if (area
== term_area
)
3670 start
= offsets
[area
];
3671 end
= start
+ sizes
[area
];
3672 va
= pvm_find_va_enclose_addr(base
+ end
);
3675 /* we've found a fitting base, insert all va's */
3676 for (area
= 0; area
< nr_vms
; area
++) {
3679 start
= base
+ offsets
[area
];
3682 va
= pvm_find_va_enclose_addr(start
);
3683 if (WARN_ON_ONCE(va
== NULL
))
3684 /* It is a BUG(), but trigger recovery instead. */
3687 type
= classify_va_fit_type(va
, start
, size
);
3688 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
3689 /* It is a BUG(), but trigger recovery instead. */
3692 ret
= adjust_va_to_fit_type(va
, start
, size
, type
);
3696 /* Allocated area. */
3698 va
->va_start
= start
;
3699 va
->va_end
= start
+ size
;
3702 spin_unlock(&free_vmap_area_lock
);
3704 /* populate the kasan shadow space */
3705 for (area
= 0; area
< nr_vms
; area
++) {
3706 if (kasan_populate_vmalloc(vas
[area
]->va_start
, sizes
[area
]))
3707 goto err_free_shadow
;
3709 kasan_unpoison_vmalloc((void *)vas
[area
]->va_start
,
3713 /* insert all vm's */
3714 spin_lock(&vmap_area_lock
);
3715 for (area
= 0; area
< nr_vms
; area
++) {
3716 insert_vmap_area(vas
[area
], &vmap_area_root
, &vmap_area_list
);
3718 setup_vmalloc_vm_locked(vms
[area
], vas
[area
], VM_ALLOC
,
3721 spin_unlock(&vmap_area_lock
);
3728 * Remove previously allocated areas. There is no
3729 * need in removing these areas from the busy tree,
3730 * because they are inserted only on the final step
3731 * and when pcpu_get_vm_areas() is success.
3734 orig_start
= vas
[area
]->va_start
;
3735 orig_end
= vas
[area
]->va_end
;
3736 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
3737 &free_vmap_area_list
);
3739 kasan_release_vmalloc(orig_start
, orig_end
,
3740 va
->va_start
, va
->va_end
);
3745 spin_unlock(&free_vmap_area_lock
);
3747 purge_vmap_area_lazy();
3750 /* Before "retry", check if we recover. */
3751 for (area
= 0; area
< nr_vms
; area
++) {
3755 vas
[area
] = kmem_cache_zalloc(
3756 vmap_area_cachep
, GFP_KERNEL
);
3765 for (area
= 0; area
< nr_vms
; area
++) {
3767 kmem_cache_free(vmap_area_cachep
, vas
[area
]);
3777 spin_lock(&free_vmap_area_lock
);
3779 * We release all the vmalloc shadows, even the ones for regions that
3780 * hadn't been successfully added. This relies on kasan_release_vmalloc
3781 * being able to tolerate this case.
3783 for (area
= 0; area
< nr_vms
; area
++) {
3784 orig_start
= vas
[area
]->va_start
;
3785 orig_end
= vas
[area
]->va_end
;
3786 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
3787 &free_vmap_area_list
);
3789 kasan_release_vmalloc(orig_start
, orig_end
,
3790 va
->va_start
, va
->va_end
);
3794 spin_unlock(&free_vmap_area_lock
);
3801 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3802 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3803 * @nr_vms: the number of allocated areas
3805 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3807 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
3811 for (i
= 0; i
< nr_vms
; i
++)
3812 free_vm_area(vms
[i
]);
3815 #endif /* CONFIG_SMP */
3817 #ifdef CONFIG_PRINTK
3818 bool vmalloc_dump_obj(void *object
)
3820 struct vm_struct
*vm
;
3821 void *objp
= (void *)PAGE_ALIGN((unsigned long)object
);
3823 vm
= find_vm_area(objp
);
3826 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3827 vm
->nr_pages
, (unsigned long)vm
->addr
, vm
->caller
);
3832 #ifdef CONFIG_PROC_FS
3833 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3834 __acquires(&vmap_purge_lock
)
3835 __acquires(&vmap_area_lock
)
3837 mutex_lock(&vmap_purge_lock
);
3838 spin_lock(&vmap_area_lock
);
3840 return seq_list_start(&vmap_area_list
, *pos
);
3843 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
3845 return seq_list_next(p
, &vmap_area_list
, pos
);
3848 static void s_stop(struct seq_file
*m
, void *p
)
3849 __releases(&vmap_area_lock
)
3850 __releases(&vmap_purge_lock
)
3852 spin_unlock(&vmap_area_lock
);
3853 mutex_unlock(&vmap_purge_lock
);
3856 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
3858 if (IS_ENABLED(CONFIG_NUMA
)) {
3859 unsigned int nr
, *counters
= m
->private;
3864 if (v
->flags
& VM_UNINITIALIZED
)
3866 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3869 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
3871 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
3872 counters
[page_to_nid(v
->pages
[nr
])]++;
3874 for_each_node_state(nr
, N_HIGH_MEMORY
)
3876 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
3880 static void show_purge_info(struct seq_file
*m
)
3882 struct vmap_area
*va
;
3884 spin_lock(&purge_vmap_area_lock
);
3885 list_for_each_entry(va
, &purge_vmap_area_list
, list
) {
3886 seq_printf(m
, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3887 (void *)va
->va_start
, (void *)va
->va_end
,
3888 va
->va_end
- va
->va_start
);
3890 spin_unlock(&purge_vmap_area_lock
);
3893 static int s_show(struct seq_file
*m
, void *p
)
3895 struct vmap_area
*va
;
3896 struct vm_struct
*v
;
3898 va
= list_entry(p
, struct vmap_area
, list
);
3901 * s_show can encounter race with remove_vm_area, !vm on behalf
3902 * of vmap area is being tear down or vm_map_ram allocation.
3905 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
3906 (void *)va
->va_start
, (void *)va
->va_end
,
3907 va
->va_end
- va
->va_start
);
3914 seq_printf(m
, "0x%pK-0x%pK %7ld",
3915 v
->addr
, v
->addr
+ v
->size
, v
->size
);
3918 seq_printf(m
, " %pS", v
->caller
);
3921 seq_printf(m
, " pages=%d", v
->nr_pages
);
3924 seq_printf(m
, " phys=%pa", &v
->phys_addr
);
3926 if (v
->flags
& VM_IOREMAP
)
3927 seq_puts(m
, " ioremap");
3929 if (v
->flags
& VM_ALLOC
)
3930 seq_puts(m
, " vmalloc");
3932 if (v
->flags
& VM_MAP
)
3933 seq_puts(m
, " vmap");
3935 if (v
->flags
& VM_USERMAP
)
3936 seq_puts(m
, " user");
3938 if (v
->flags
& VM_DMA_COHERENT
)
3939 seq_puts(m
, " dma-coherent");
3941 if (is_vmalloc_addr(v
->pages
))
3942 seq_puts(m
, " vpages");
3944 show_numa_info(m
, v
);
3948 * As a final step, dump "unpurged" areas.
3950 if (list_is_last(&va
->list
, &vmap_area_list
))
3956 static const struct seq_operations vmalloc_op
= {
3963 static int __init
proc_vmalloc_init(void)
3965 if (IS_ENABLED(CONFIG_NUMA
))
3966 proc_create_seq_private("vmallocinfo", 0400, NULL
,
3968 nr_node_ids
* sizeof(unsigned int), NULL
);
3970 proc_create_seq("vmallocinfo", 0400, NULL
, &vmalloc_op
);
3973 module_init(proc_vmalloc_init
);