]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/vmalloc.c
mm: remove duplicated call of get_pfn_range_for_nid
[mirror_ubuntu-zesty-kernel.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
d43c36dc 15#include <linux/sched.h>
1da177e4
LT
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
5f6a6a9c 19#include <linux/proc_fs.h>
a10aa579 20#include <linux/seq_file.h>
3ac7fe5a 21#include <linux/debugobjects.h>
23016969 22#include <linux/kallsyms.h>
db64fe02
NP
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
f0aa6617 27#include <linux/pfn.h>
89219d37 28#include <linux/kmemleak.h>
60063497 29#include <linux/atomic.h>
32fcfd40 30#include <linux/llist.h>
1da177e4
LT
31#include <asm/uaccess.h>
32#include <asm/tlbflush.h>
2dca6999 33#include <asm/shmparam.h>
1da177e4 34
32fcfd40
AV
35struct vfree_deferred {
36 struct llist_head list;
37 struct work_struct wq;
38};
39static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
40
41static void __vunmap(const void *, int);
42
43static void free_work(struct work_struct *w)
44{
45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
46 struct llist_node *llnode = llist_del_all(&p->list);
47 while (llnode) {
48 void *p = llnode;
49 llnode = llist_next(llnode);
50 __vunmap(p, 1);
51 }
52}
53
db64fe02 54/*** Page table manipulation functions ***/
b221385b 55
1da177e4
LT
56static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
57{
58 pte_t *pte;
59
60 pte = pte_offset_kernel(pmd, addr);
61 do {
62 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
63 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64 } while (pte++, addr += PAGE_SIZE, addr != end);
65}
66
db64fe02 67static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
68{
69 pmd_t *pmd;
70 unsigned long next;
71
72 pmd = pmd_offset(pud, addr);
73 do {
74 next = pmd_addr_end(addr, end);
75 if (pmd_none_or_clear_bad(pmd))
76 continue;
77 vunmap_pte_range(pmd, addr, next);
78 } while (pmd++, addr = next, addr != end);
79}
80
db64fe02 81static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
82{
83 pud_t *pud;
84 unsigned long next;
85
86 pud = pud_offset(pgd, addr);
87 do {
88 next = pud_addr_end(addr, end);
89 if (pud_none_or_clear_bad(pud))
90 continue;
91 vunmap_pmd_range(pud, addr, next);
92 } while (pud++, addr = next, addr != end);
93}
94
db64fe02 95static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
96{
97 pgd_t *pgd;
98 unsigned long next;
1da177e4
LT
99
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
1da177e4
LT
102 do {
103 next = pgd_addr_end(addr, end);
104 if (pgd_none_or_clear_bad(pgd))
105 continue;
106 vunmap_pud_range(pgd, addr, next);
107 } while (pgd++, addr = next, addr != end);
1da177e4
LT
108}
109
110static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 111 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
112{
113 pte_t *pte;
114
db64fe02
NP
115 /*
116 * nr is a running index into the array which helps higher level
117 * callers keep track of where we're up to.
118 */
119
872fec16 120 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
121 if (!pte)
122 return -ENOMEM;
123 do {
db64fe02
NP
124 struct page *page = pages[*nr];
125
126 if (WARN_ON(!pte_none(*pte)))
127 return -EBUSY;
128 if (WARN_ON(!page))
1da177e4
LT
129 return -ENOMEM;
130 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 131 (*nr)++;
1da177e4
LT
132 } while (pte++, addr += PAGE_SIZE, addr != end);
133 return 0;
134}
135
db64fe02
NP
136static int vmap_pmd_range(pud_t *pud, unsigned long addr,
137 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
138{
139 pmd_t *pmd;
140 unsigned long next;
141
142 pmd = pmd_alloc(&init_mm, pud, addr);
143 if (!pmd)
144 return -ENOMEM;
145 do {
146 next = pmd_addr_end(addr, end);
db64fe02 147 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
148 return -ENOMEM;
149 } while (pmd++, addr = next, addr != end);
150 return 0;
151}
152
db64fe02
NP
153static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
154 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
155{
156 pud_t *pud;
157 unsigned long next;
158
159 pud = pud_alloc(&init_mm, pgd, addr);
160 if (!pud)
161 return -ENOMEM;
162 do {
163 next = pud_addr_end(addr, end);
db64fe02 164 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
165 return -ENOMEM;
166 } while (pud++, addr = next, addr != end);
167 return 0;
168}
169
db64fe02
NP
170/*
171 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
172 * will have pfns corresponding to the "pages" array.
173 *
174 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
175 */
8fc48985
TH
176static int vmap_page_range_noflush(unsigned long start, unsigned long end,
177 pgprot_t prot, struct page **pages)
1da177e4
LT
178{
179 pgd_t *pgd;
180 unsigned long next;
2e4e27c7 181 unsigned long addr = start;
db64fe02
NP
182 int err = 0;
183 int nr = 0;
1da177e4
LT
184
185 BUG_ON(addr >= end);
186 pgd = pgd_offset_k(addr);
1da177e4
LT
187 do {
188 next = pgd_addr_end(addr, end);
db64fe02 189 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4 190 if (err)
bf88c8c8 191 return err;
1da177e4 192 } while (pgd++, addr = next, addr != end);
db64fe02 193
db64fe02 194 return nr;
1da177e4
LT
195}
196
8fc48985
TH
197static int vmap_page_range(unsigned long start, unsigned long end,
198 pgprot_t prot, struct page **pages)
199{
200 int ret;
201
202 ret = vmap_page_range_noflush(start, end, prot, pages);
203 flush_cache_vmap(start, end);
204 return ret;
205}
206
81ac3ad9 207int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
208{
209 /*
ab4f2ee1 210 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
211 * and fall back on vmalloc() if that fails. Others
212 * just put it in the vmalloc space.
213 */
214#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
215 unsigned long addr = (unsigned long)x;
216 if (addr >= MODULES_VADDR && addr < MODULES_END)
217 return 1;
218#endif
219 return is_vmalloc_addr(x);
220}
221
48667e7a 222/*
db64fe02 223 * Walk a vmap address to the struct page it maps.
48667e7a 224 */
b3bdda02 225struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
226{
227 unsigned long addr = (unsigned long) vmalloc_addr;
228 struct page *page = NULL;
229 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 230
7aa413de
IM
231 /*
232 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
233 * architectures that do not vmalloc module space
234 */
73bdf0a6 235 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 236
48667e7a 237 if (!pgd_none(*pgd)) {
db64fe02 238 pud_t *pud = pud_offset(pgd, addr);
48667e7a 239 if (!pud_none(*pud)) {
db64fe02 240 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 241 if (!pmd_none(*pmd)) {
db64fe02
NP
242 pte_t *ptep, pte;
243
48667e7a
CL
244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep;
246 if (pte_present(pte))
247 page = pte_page(pte);
248 pte_unmap(ptep);
249 }
250 }
251 }
252 return page;
253}
254EXPORT_SYMBOL(vmalloc_to_page);
255
256/*
257 * Map a vmalloc()-space virtual address to the physical page frame number.
258 */
b3bdda02 259unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a
CL
260{
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
262}
263EXPORT_SYMBOL(vmalloc_to_pfn);
264
db64fe02
NP
265
266/*** Global kva allocator ***/
267
268#define VM_LAZY_FREE 0x01
269#define VM_LAZY_FREEING 0x02
270#define VM_VM_AREA 0x04
271
db64fe02 272static DEFINE_SPINLOCK(vmap_area_lock);
f1c4069e
JK
273/* Export for kexec only */
274LIST_HEAD(vmap_area_list);
89699605
NP
275static struct rb_root vmap_area_root = RB_ROOT;
276
277/* The vmap cache globals are protected by vmap_area_lock */
278static struct rb_node *free_vmap_cache;
279static unsigned long cached_hole_size;
280static unsigned long cached_vstart;
281static unsigned long cached_align;
282
ca23e405 283static unsigned long vmap_area_pcpu_hole;
db64fe02
NP
284
285static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 286{
db64fe02
NP
287 struct rb_node *n = vmap_area_root.rb_node;
288
289 while (n) {
290 struct vmap_area *va;
291
292 va = rb_entry(n, struct vmap_area, rb_node);
293 if (addr < va->va_start)
294 n = n->rb_left;
cef2ac3f 295 else if (addr >= va->va_end)
db64fe02
NP
296 n = n->rb_right;
297 else
298 return va;
299 }
300
301 return NULL;
302}
303
304static void __insert_vmap_area(struct vmap_area *va)
305{
306 struct rb_node **p = &vmap_area_root.rb_node;
307 struct rb_node *parent = NULL;
308 struct rb_node *tmp;
309
310 while (*p) {
170168d0 311 struct vmap_area *tmp_va;
db64fe02
NP
312
313 parent = *p;
170168d0
NK
314 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
315 if (va->va_start < tmp_va->va_end)
db64fe02 316 p = &(*p)->rb_left;
170168d0 317 else if (va->va_end > tmp_va->va_start)
db64fe02
NP
318 p = &(*p)->rb_right;
319 else
320 BUG();
321 }
322
323 rb_link_node(&va->rb_node, parent, p);
324 rb_insert_color(&va->rb_node, &vmap_area_root);
325
4341fa45 326 /* address-sort this list */
db64fe02
NP
327 tmp = rb_prev(&va->rb_node);
328 if (tmp) {
329 struct vmap_area *prev;
330 prev = rb_entry(tmp, struct vmap_area, rb_node);
331 list_add_rcu(&va->list, &prev->list);
332 } else
333 list_add_rcu(&va->list, &vmap_area_list);
334}
335
336static void purge_vmap_area_lazy(void);
337
338/*
339 * Allocate a region of KVA of the specified size and alignment, within the
340 * vstart and vend.
341 */
342static struct vmap_area *alloc_vmap_area(unsigned long size,
343 unsigned long align,
344 unsigned long vstart, unsigned long vend,
345 int node, gfp_t gfp_mask)
346{
347 struct vmap_area *va;
348 struct rb_node *n;
1da177e4 349 unsigned long addr;
db64fe02 350 int purged = 0;
89699605 351 struct vmap_area *first;
db64fe02 352
7766970c 353 BUG_ON(!size);
db64fe02 354 BUG_ON(size & ~PAGE_MASK);
89699605 355 BUG_ON(!is_power_of_2(align));
db64fe02 356
db64fe02
NP
357 va = kmalloc_node(sizeof(struct vmap_area),
358 gfp_mask & GFP_RECLAIM_MASK, node);
359 if (unlikely(!va))
360 return ERR_PTR(-ENOMEM);
361
362retry:
363 spin_lock(&vmap_area_lock);
89699605
NP
364 /*
365 * Invalidate cache if we have more permissive parameters.
366 * cached_hole_size notes the largest hole noticed _below_
367 * the vmap_area cached in free_vmap_cache: if size fits
368 * into that hole, we want to scan from vstart to reuse
369 * the hole instead of allocating above free_vmap_cache.
370 * Note that __free_vmap_area may update free_vmap_cache
371 * without updating cached_hole_size or cached_align.
372 */
373 if (!free_vmap_cache ||
374 size < cached_hole_size ||
375 vstart < cached_vstart ||
376 align < cached_align) {
377nocache:
378 cached_hole_size = 0;
379 free_vmap_cache = NULL;
380 }
381 /* record if we encounter less permissive parameters */
382 cached_vstart = vstart;
383 cached_align = align;
384
385 /* find starting point for our search */
386 if (free_vmap_cache) {
387 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
248ac0e1 388 addr = ALIGN(first->va_end, align);
89699605
NP
389 if (addr < vstart)
390 goto nocache;
391 if (addr + size - 1 < addr)
392 goto overflow;
393
394 } else {
395 addr = ALIGN(vstart, align);
396 if (addr + size - 1 < addr)
397 goto overflow;
398
399 n = vmap_area_root.rb_node;
400 first = NULL;
401
402 while (n) {
db64fe02
NP
403 struct vmap_area *tmp;
404 tmp = rb_entry(n, struct vmap_area, rb_node);
405 if (tmp->va_end >= addr) {
db64fe02 406 first = tmp;
89699605
NP
407 if (tmp->va_start <= addr)
408 break;
409 n = n->rb_left;
410 } else
db64fe02 411 n = n->rb_right;
89699605 412 }
db64fe02
NP
413
414 if (!first)
415 goto found;
db64fe02 416 }
89699605
NP
417
418 /* from the starting point, walk areas until a suitable hole is found */
248ac0e1 419 while (addr + size > first->va_start && addr + size <= vend) {
89699605
NP
420 if (addr + cached_hole_size < first->va_start)
421 cached_hole_size = first->va_start - addr;
248ac0e1 422 addr = ALIGN(first->va_end, align);
89699605
NP
423 if (addr + size - 1 < addr)
424 goto overflow;
425
92ca922f 426 if (list_is_last(&first->list, &vmap_area_list))
89699605 427 goto found;
92ca922f
H
428
429 first = list_entry(first->list.next,
430 struct vmap_area, list);
db64fe02
NP
431 }
432
89699605
NP
433found:
434 if (addr + size > vend)
435 goto overflow;
db64fe02
NP
436
437 va->va_start = addr;
438 va->va_end = addr + size;
439 va->flags = 0;
440 __insert_vmap_area(va);
89699605 441 free_vmap_cache = &va->rb_node;
db64fe02
NP
442 spin_unlock(&vmap_area_lock);
443
89699605
NP
444 BUG_ON(va->va_start & (align-1));
445 BUG_ON(va->va_start < vstart);
446 BUG_ON(va->va_end > vend);
447
db64fe02 448 return va;
89699605
NP
449
450overflow:
451 spin_unlock(&vmap_area_lock);
452 if (!purged) {
453 purge_vmap_area_lazy();
454 purged = 1;
455 goto retry;
456 }
457 if (printk_ratelimit())
458 printk(KERN_WARNING
459 "vmap allocation for size %lu failed: "
460 "use vmalloc=<size> to increase size.\n", size);
461 kfree(va);
462 return ERR_PTR(-EBUSY);
db64fe02
NP
463}
464
db64fe02
NP
465static void __free_vmap_area(struct vmap_area *va)
466{
467 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
89699605
NP
468
469 if (free_vmap_cache) {
470 if (va->va_end < cached_vstart) {
471 free_vmap_cache = NULL;
472 } else {
473 struct vmap_area *cache;
474 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
475 if (va->va_start <= cache->va_start) {
476 free_vmap_cache = rb_prev(&va->rb_node);
477 /*
478 * We don't try to update cached_hole_size or
479 * cached_align, but it won't go very wrong.
480 */
481 }
482 }
483 }
db64fe02
NP
484 rb_erase(&va->rb_node, &vmap_area_root);
485 RB_CLEAR_NODE(&va->rb_node);
486 list_del_rcu(&va->list);
487
ca23e405
TH
488 /*
489 * Track the highest possible candidate for pcpu area
490 * allocation. Areas outside of vmalloc area can be returned
491 * here too, consider only end addresses which fall inside
492 * vmalloc area proper.
493 */
494 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
495 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
496
14769de9 497 kfree_rcu(va, rcu_head);
db64fe02
NP
498}
499
500/*
501 * Free a region of KVA allocated by alloc_vmap_area
502 */
503static void free_vmap_area(struct vmap_area *va)
504{
505 spin_lock(&vmap_area_lock);
506 __free_vmap_area(va);
507 spin_unlock(&vmap_area_lock);
508}
509
510/*
511 * Clear the pagetable entries of a given vmap_area
512 */
513static void unmap_vmap_area(struct vmap_area *va)
514{
515 vunmap_page_range(va->va_start, va->va_end);
516}
517
cd52858c
NP
518static void vmap_debug_free_range(unsigned long start, unsigned long end)
519{
520 /*
521 * Unmap page tables and force a TLB flush immediately if
522 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
523 * bugs similarly to those in linear kernel virtual address
524 * space after a page has been freed.
525 *
526 * All the lazy freeing logic is still retained, in order to
527 * minimise intrusiveness of this debugging feature.
528 *
529 * This is going to be *slow* (linear kernel virtual address
530 * debugging doesn't do a broadcast TLB flush so it is a lot
531 * faster).
532 */
533#ifdef CONFIG_DEBUG_PAGEALLOC
534 vunmap_page_range(start, end);
535 flush_tlb_kernel_range(start, end);
536#endif
537}
538
db64fe02
NP
539/*
540 * lazy_max_pages is the maximum amount of virtual address space we gather up
541 * before attempting to purge with a TLB flush.
542 *
543 * There is a tradeoff here: a larger number will cover more kernel page tables
544 * and take slightly longer to purge, but it will linearly reduce the number of
545 * global TLB flushes that must be performed. It would seem natural to scale
546 * this number up linearly with the number of CPUs (because vmapping activity
547 * could also scale linearly with the number of CPUs), however it is likely
548 * that in practice, workloads might be constrained in other ways that mean
549 * vmap activity will not scale linearly with CPUs. Also, I want to be
550 * conservative and not introduce a big latency on huge systems, so go with
551 * a less aggressive log scale. It will still be an improvement over the old
552 * code, and it will be simple to change the scale factor if we find that it
553 * becomes a problem on bigger systems.
554 */
555static unsigned long lazy_max_pages(void)
556{
557 unsigned int log;
558
559 log = fls(num_online_cpus());
560
561 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
562}
563
564static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
565
02b709df
NP
566/* for per-CPU blocks */
567static void purge_fragmented_blocks_allcpus(void);
568
3ee48b6a
CW
569/*
570 * called before a call to iounmap() if the caller wants vm_area_struct's
571 * immediately freed.
572 */
573void set_iounmap_nonlazy(void)
574{
575 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
576}
577
db64fe02
NP
578/*
579 * Purges all lazily-freed vmap areas.
580 *
581 * If sync is 0 then don't purge if there is already a purge in progress.
582 * If force_flush is 1, then flush kernel TLBs between *start and *end even
583 * if we found no lazy vmap areas to unmap (callers can use this to optimise
584 * their own TLB flushing).
585 * Returns with *start = min(*start, lowest purged address)
586 * *end = max(*end, highest purged address)
587 */
588static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
589 int sync, int force_flush)
590{
46666d8a 591 static DEFINE_SPINLOCK(purge_lock);
db64fe02
NP
592 LIST_HEAD(valist);
593 struct vmap_area *va;
cbb76676 594 struct vmap_area *n_va;
db64fe02
NP
595 int nr = 0;
596
597 /*
598 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
599 * should not expect such behaviour. This just simplifies locking for
600 * the case that isn't actually used at the moment anyway.
601 */
602 if (!sync && !force_flush) {
46666d8a 603 if (!spin_trylock(&purge_lock))
db64fe02
NP
604 return;
605 } else
46666d8a 606 spin_lock(&purge_lock);
db64fe02 607
02b709df
NP
608 if (sync)
609 purge_fragmented_blocks_allcpus();
610
db64fe02
NP
611 rcu_read_lock();
612 list_for_each_entry_rcu(va, &vmap_area_list, list) {
613 if (va->flags & VM_LAZY_FREE) {
614 if (va->va_start < *start)
615 *start = va->va_start;
616 if (va->va_end > *end)
617 *end = va->va_end;
618 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
db64fe02
NP
619 list_add_tail(&va->purge_list, &valist);
620 va->flags |= VM_LAZY_FREEING;
621 va->flags &= ~VM_LAZY_FREE;
622 }
623 }
624 rcu_read_unlock();
625
88f50044 626 if (nr)
db64fe02 627 atomic_sub(nr, &vmap_lazy_nr);
db64fe02
NP
628
629 if (nr || force_flush)
630 flush_tlb_kernel_range(*start, *end);
631
632 if (nr) {
633 spin_lock(&vmap_area_lock);
cbb76676 634 list_for_each_entry_safe(va, n_va, &valist, purge_list)
db64fe02
NP
635 __free_vmap_area(va);
636 spin_unlock(&vmap_area_lock);
637 }
46666d8a 638 spin_unlock(&purge_lock);
db64fe02
NP
639}
640
496850e5
NP
641/*
642 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
643 * is already purging.
644 */
645static void try_purge_vmap_area_lazy(void)
646{
647 unsigned long start = ULONG_MAX, end = 0;
648
649 __purge_vmap_area_lazy(&start, &end, 0, 0);
650}
651
db64fe02
NP
652/*
653 * Kick off a purge of the outstanding lazy areas.
654 */
655static void purge_vmap_area_lazy(void)
656{
657 unsigned long start = ULONG_MAX, end = 0;
658
496850e5 659 __purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe02
NP
660}
661
662/*
64141da5
JF
663 * Free a vmap area, caller ensuring that the area has been unmapped
664 * and flush_cache_vunmap had been called for the correct range
665 * previously.
db64fe02 666 */
64141da5 667static void free_vmap_area_noflush(struct vmap_area *va)
db64fe02
NP
668{
669 va->flags |= VM_LAZY_FREE;
670 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
671 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5 672 try_purge_vmap_area_lazy();
db64fe02
NP
673}
674
64141da5
JF
675/*
676 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
677 * called for the correct range previously.
678 */
679static void free_unmap_vmap_area_noflush(struct vmap_area *va)
680{
681 unmap_vmap_area(va);
682 free_vmap_area_noflush(va);
683}
684
b29acbdc
NP
685/*
686 * Free and unmap a vmap area
687 */
688static void free_unmap_vmap_area(struct vmap_area *va)
689{
690 flush_cache_vunmap(va->va_start, va->va_end);
691 free_unmap_vmap_area_noflush(va);
692}
693
db64fe02
NP
694static struct vmap_area *find_vmap_area(unsigned long addr)
695{
696 struct vmap_area *va;
697
698 spin_lock(&vmap_area_lock);
699 va = __find_vmap_area(addr);
700 spin_unlock(&vmap_area_lock);
701
702 return va;
703}
704
705static void free_unmap_vmap_area_addr(unsigned long addr)
706{
707 struct vmap_area *va;
708
709 va = find_vmap_area(addr);
710 BUG_ON(!va);
711 free_unmap_vmap_area(va);
712}
713
714
715/*** Per cpu kva allocator ***/
716
717/*
718 * vmap space is limited especially on 32 bit architectures. Ensure there is
719 * room for at least 16 percpu vmap blocks per CPU.
720 */
721/*
722 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
723 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
724 * instead (we just need a rough idea)
725 */
726#if BITS_PER_LONG == 32
727#define VMALLOC_SPACE (128UL*1024*1024)
728#else
729#define VMALLOC_SPACE (128UL*1024*1024*1024)
730#endif
731
732#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
733#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
734#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
735#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
736#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
737#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
f982f915
CL
738#define VMAP_BBMAP_BITS \
739 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
740 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
741 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe02
NP
742
743#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
744
9b463334
JF
745static bool vmap_initialized __read_mostly = false;
746
db64fe02
NP
747struct vmap_block_queue {
748 spinlock_t lock;
749 struct list_head free;
db64fe02
NP
750};
751
752struct vmap_block {
753 spinlock_t lock;
754 struct vmap_area *va;
755 struct vmap_block_queue *vbq;
756 unsigned long free, dirty;
757 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
758 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
de560423
NP
759 struct list_head free_list;
760 struct rcu_head rcu_head;
02b709df 761 struct list_head purge;
db64fe02
NP
762};
763
764/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
765static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
766
767/*
768 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
769 * in the free path. Could get rid of this if we change the API to return a
770 * "cookie" from alloc, to be passed to free. But no big deal yet.
771 */
772static DEFINE_SPINLOCK(vmap_block_tree_lock);
773static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
774
775/*
776 * We should probably have a fallback mechanism to allocate virtual memory
777 * out of partially filled vmap blocks. However vmap block sizing should be
778 * fairly reasonable according to the vmalloc size, so it shouldn't be a
779 * big problem.
780 */
781
782static unsigned long addr_to_vb_idx(unsigned long addr)
783{
784 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
785 addr /= VMAP_BLOCK_SIZE;
786 return addr;
787}
788
789static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
790{
791 struct vmap_block_queue *vbq;
792 struct vmap_block *vb;
793 struct vmap_area *va;
794 unsigned long vb_idx;
795 int node, err;
796
797 node = numa_node_id();
798
799 vb = kmalloc_node(sizeof(struct vmap_block),
800 gfp_mask & GFP_RECLAIM_MASK, node);
801 if (unlikely(!vb))
802 return ERR_PTR(-ENOMEM);
803
804 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
805 VMALLOC_START, VMALLOC_END,
806 node, gfp_mask);
ddf9c6d4 807 if (IS_ERR(va)) {
db64fe02 808 kfree(vb);
e7d86340 809 return ERR_CAST(va);
db64fe02
NP
810 }
811
812 err = radix_tree_preload(gfp_mask);
813 if (unlikely(err)) {
814 kfree(vb);
815 free_vmap_area(va);
816 return ERR_PTR(err);
817 }
818
819 spin_lock_init(&vb->lock);
820 vb->va = va;
821 vb->free = VMAP_BBMAP_BITS;
822 vb->dirty = 0;
823 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
824 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
825 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
826
827 vb_idx = addr_to_vb_idx(va->va_start);
828 spin_lock(&vmap_block_tree_lock);
829 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
830 spin_unlock(&vmap_block_tree_lock);
831 BUG_ON(err);
832 radix_tree_preload_end();
833
834 vbq = &get_cpu_var(vmap_block_queue);
835 vb->vbq = vbq;
836 spin_lock(&vbq->lock);
de560423 837 list_add_rcu(&vb->free_list, &vbq->free);
db64fe02 838 spin_unlock(&vbq->lock);
3f04ba85 839 put_cpu_var(vmap_block_queue);
db64fe02
NP
840
841 return vb;
842}
843
db64fe02
NP
844static void free_vmap_block(struct vmap_block *vb)
845{
846 struct vmap_block *tmp;
847 unsigned long vb_idx;
848
db64fe02
NP
849 vb_idx = addr_to_vb_idx(vb->va->va_start);
850 spin_lock(&vmap_block_tree_lock);
851 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
852 spin_unlock(&vmap_block_tree_lock);
853 BUG_ON(tmp != vb);
854
64141da5 855 free_vmap_area_noflush(vb->va);
22a3c7d1 856 kfree_rcu(vb, rcu_head);
db64fe02
NP
857}
858
02b709df
NP
859static void purge_fragmented_blocks(int cpu)
860{
861 LIST_HEAD(purge);
862 struct vmap_block *vb;
863 struct vmap_block *n_vb;
864 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
865
866 rcu_read_lock();
867 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
868
869 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
870 continue;
871
872 spin_lock(&vb->lock);
873 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
874 vb->free = 0; /* prevent further allocs after releasing lock */
875 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
876 bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
877 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
878 spin_lock(&vbq->lock);
879 list_del_rcu(&vb->free_list);
880 spin_unlock(&vbq->lock);
881 spin_unlock(&vb->lock);
882 list_add_tail(&vb->purge, &purge);
883 } else
884 spin_unlock(&vb->lock);
885 }
886 rcu_read_unlock();
887
888 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
889 list_del(&vb->purge);
890 free_vmap_block(vb);
891 }
892}
893
894static void purge_fragmented_blocks_thiscpu(void)
895{
896 purge_fragmented_blocks(smp_processor_id());
897}
898
899static void purge_fragmented_blocks_allcpus(void)
900{
901 int cpu;
902
903 for_each_possible_cpu(cpu)
904 purge_fragmented_blocks(cpu);
905}
906
db64fe02
NP
907static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
908{
909 struct vmap_block_queue *vbq;
910 struct vmap_block *vb;
911 unsigned long addr = 0;
912 unsigned int order;
02b709df 913 int purge = 0;
db64fe02
NP
914
915 BUG_ON(size & ~PAGE_MASK);
916 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d8
JK
917 if (WARN_ON(size == 0)) {
918 /*
919 * Allocating 0 bytes isn't what caller wants since
920 * get_order(0) returns funny result. Just warn and terminate
921 * early.
922 */
923 return NULL;
924 }
db64fe02
NP
925 order = get_order(size);
926
927again:
928 rcu_read_lock();
929 vbq = &get_cpu_var(vmap_block_queue);
930 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
931 int i;
932
933 spin_lock(&vb->lock);
02b709df
NP
934 if (vb->free < 1UL << order)
935 goto next;
936
db64fe02
NP
937 i = bitmap_find_free_region(vb->alloc_map,
938 VMAP_BBMAP_BITS, order);
939
02b709df
NP
940 if (i < 0) {
941 if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
942 /* fragmented and no outstanding allocations */
943 BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
944 purge = 1;
db64fe02 945 }
02b709df 946 goto next;
db64fe02 947 }
02b709df
NP
948 addr = vb->va->va_start + (i << PAGE_SHIFT);
949 BUG_ON(addr_to_vb_idx(addr) !=
950 addr_to_vb_idx(vb->va->va_start));
951 vb->free -= 1UL << order;
952 if (vb->free == 0) {
953 spin_lock(&vbq->lock);
954 list_del_rcu(&vb->free_list);
955 spin_unlock(&vbq->lock);
956 }
957 spin_unlock(&vb->lock);
958 break;
959next:
db64fe02
NP
960 spin_unlock(&vb->lock);
961 }
02b709df
NP
962
963 if (purge)
964 purge_fragmented_blocks_thiscpu();
965
3f04ba85 966 put_cpu_var(vmap_block_queue);
db64fe02
NP
967 rcu_read_unlock();
968
969 if (!addr) {
970 vb = new_vmap_block(gfp_mask);
971 if (IS_ERR(vb))
972 return vb;
973 goto again;
974 }
975
976 return (void *)addr;
977}
978
979static void vb_free(const void *addr, unsigned long size)
980{
981 unsigned long offset;
982 unsigned long vb_idx;
983 unsigned int order;
984 struct vmap_block *vb;
985
986 BUG_ON(size & ~PAGE_MASK);
987 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
988
989 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
990
db64fe02
NP
991 order = get_order(size);
992
993 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
994
995 vb_idx = addr_to_vb_idx((unsigned long)addr);
996 rcu_read_lock();
997 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
998 rcu_read_unlock();
999 BUG_ON(!vb);
1000
64141da5
JF
1001 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1002
db64fe02 1003 spin_lock(&vb->lock);
de560423 1004 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
d086817d 1005
db64fe02
NP
1006 vb->dirty += 1UL << order;
1007 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 1008 BUG_ON(vb->free);
db64fe02
NP
1009 spin_unlock(&vb->lock);
1010 free_vmap_block(vb);
1011 } else
1012 spin_unlock(&vb->lock);
1013}
1014
1015/**
1016 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1017 *
1018 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1019 * to amortize TLB flushing overheads. What this means is that any page you
1020 * have now, may, in a former life, have been mapped into kernel virtual
1021 * address by the vmap layer and so there might be some CPUs with TLB entries
1022 * still referencing that page (additional to the regular 1:1 kernel mapping).
1023 *
1024 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1025 * be sure that none of the pages we have control over will have any aliases
1026 * from the vmap layer.
1027 */
1028void vm_unmap_aliases(void)
1029{
1030 unsigned long start = ULONG_MAX, end = 0;
1031 int cpu;
1032 int flush = 0;
1033
9b463334
JF
1034 if (unlikely(!vmap_initialized))
1035 return;
1036
db64fe02
NP
1037 for_each_possible_cpu(cpu) {
1038 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1039 struct vmap_block *vb;
1040
1041 rcu_read_lock();
1042 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1043 int i;
1044
1045 spin_lock(&vb->lock);
1046 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1047 while (i < VMAP_BBMAP_BITS) {
1048 unsigned long s, e;
1049 int j;
1050 j = find_next_zero_bit(vb->dirty_map,
1051 VMAP_BBMAP_BITS, i);
1052
1053 s = vb->va->va_start + (i << PAGE_SHIFT);
1054 e = vb->va->va_start + (j << PAGE_SHIFT);
db64fe02
NP
1055 flush = 1;
1056
1057 if (s < start)
1058 start = s;
1059 if (e > end)
1060 end = e;
1061
1062 i = j;
1063 i = find_next_bit(vb->dirty_map,
1064 VMAP_BBMAP_BITS, i);
1065 }
1066 spin_unlock(&vb->lock);
1067 }
1068 rcu_read_unlock();
1069 }
1070
1071 __purge_vmap_area_lazy(&start, &end, 1, flush);
1072}
1073EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1074
1075/**
1076 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1077 * @mem: the pointer returned by vm_map_ram
1078 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1079 */
1080void vm_unmap_ram(const void *mem, unsigned int count)
1081{
1082 unsigned long size = count << PAGE_SHIFT;
1083 unsigned long addr = (unsigned long)mem;
1084
1085 BUG_ON(!addr);
1086 BUG_ON(addr < VMALLOC_START);
1087 BUG_ON(addr > VMALLOC_END);
1088 BUG_ON(addr & (PAGE_SIZE-1));
1089
1090 debug_check_no_locks_freed(mem, size);
cd52858c 1091 vmap_debug_free_range(addr, addr+size);
db64fe02
NP
1092
1093 if (likely(count <= VMAP_MAX_ALLOC))
1094 vb_free(mem, size);
1095 else
1096 free_unmap_vmap_area_addr(addr);
1097}
1098EXPORT_SYMBOL(vm_unmap_ram);
1099
1100/**
1101 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1102 * @pages: an array of pointers to the pages to be mapped
1103 * @count: number of pages
1104 * @node: prefer to allocate data structures on this node
1105 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad
RD
1106 *
1107 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
1108 */
1109void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1110{
1111 unsigned long size = count << PAGE_SHIFT;
1112 unsigned long addr;
1113 void *mem;
1114
1115 if (likely(count <= VMAP_MAX_ALLOC)) {
1116 mem = vb_alloc(size, GFP_KERNEL);
1117 if (IS_ERR(mem))
1118 return NULL;
1119 addr = (unsigned long)mem;
1120 } else {
1121 struct vmap_area *va;
1122 va = alloc_vmap_area(size, PAGE_SIZE,
1123 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1124 if (IS_ERR(va))
1125 return NULL;
1126
1127 addr = va->va_start;
1128 mem = (void *)addr;
1129 }
1130 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1131 vm_unmap_ram(mem, count);
1132 return NULL;
1133 }
1134 return mem;
1135}
1136EXPORT_SYMBOL(vm_map_ram);
1137
4341fa45 1138static struct vm_struct *vmlist __initdata;
be9b7335
NP
1139/**
1140 * vm_area_add_early - add vmap area early during boot
1141 * @vm: vm_struct to add
1142 *
1143 * This function is used to add fixed kernel vm area to vmlist before
1144 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1145 * should contain proper values and the other fields should be zero.
1146 *
1147 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1148 */
1149void __init vm_area_add_early(struct vm_struct *vm)
1150{
1151 struct vm_struct *tmp, **p;
1152
1153 BUG_ON(vmap_initialized);
1154 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1155 if (tmp->addr >= vm->addr) {
1156 BUG_ON(tmp->addr < vm->addr + vm->size);
1157 break;
1158 } else
1159 BUG_ON(tmp->addr + tmp->size > vm->addr);
1160 }
1161 vm->next = *p;
1162 *p = vm;
1163}
1164
f0aa6617
TH
1165/**
1166 * vm_area_register_early - register vmap area early during boot
1167 * @vm: vm_struct to register
c0c0a293 1168 * @align: requested alignment
f0aa6617
TH
1169 *
1170 * This function is used to register kernel vm area before
1171 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1172 * proper values on entry and other fields should be zero. On return,
1173 * vm->addr contains the allocated address.
1174 *
1175 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1176 */
c0c0a293 1177void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617
TH
1178{
1179 static size_t vm_init_off __initdata;
c0c0a293
TH
1180 unsigned long addr;
1181
1182 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1183 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa6617 1184
c0c0a293 1185 vm->addr = (void *)addr;
f0aa6617 1186
be9b7335 1187 vm_area_add_early(vm);
f0aa6617
TH
1188}
1189
db64fe02
NP
1190void __init vmalloc_init(void)
1191{
822c18f2
IK
1192 struct vmap_area *va;
1193 struct vm_struct *tmp;
db64fe02
NP
1194 int i;
1195
1196 for_each_possible_cpu(i) {
1197 struct vmap_block_queue *vbq;
32fcfd40 1198 struct vfree_deferred *p;
db64fe02
NP
1199
1200 vbq = &per_cpu(vmap_block_queue, i);
1201 spin_lock_init(&vbq->lock);
1202 INIT_LIST_HEAD(&vbq->free);
32fcfd40
AV
1203 p = &per_cpu(vfree_deferred, i);
1204 init_llist_head(&p->list);
1205 INIT_WORK(&p->wq, free_work);
db64fe02 1206 }
9b463334 1207
822c18f2
IK
1208 /* Import existing vmlist entries. */
1209 for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac4 1210 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
dbda591d 1211 va->flags = VM_VM_AREA;
822c18f2
IK
1212 va->va_start = (unsigned long)tmp->addr;
1213 va->va_end = va->va_start + tmp->size;
dbda591d 1214 va->vm = tmp;
822c18f2
IK
1215 __insert_vmap_area(va);
1216 }
ca23e405
TH
1217
1218 vmap_area_pcpu_hole = VMALLOC_END;
1219
9b463334 1220 vmap_initialized = true;
db64fe02
NP
1221}
1222
8fc48985
TH
1223/**
1224 * map_kernel_range_noflush - map kernel VM area with the specified pages
1225 * @addr: start of the VM area to map
1226 * @size: size of the VM area to map
1227 * @prot: page protection flags to use
1228 * @pages: pages to map
1229 *
1230 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1231 * specify should have been allocated using get_vm_area() and its
1232 * friends.
1233 *
1234 * NOTE:
1235 * This function does NOT do any cache flushing. The caller is
1236 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1237 * before calling this function.
1238 *
1239 * RETURNS:
1240 * The number of pages mapped on success, -errno on failure.
1241 */
1242int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1243 pgprot_t prot, struct page **pages)
1244{
1245 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1246}
1247
1248/**
1249 * unmap_kernel_range_noflush - unmap kernel VM area
1250 * @addr: start of the VM area to unmap
1251 * @size: size of the VM area to unmap
1252 *
1253 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1254 * specify should have been allocated using get_vm_area() and its
1255 * friends.
1256 *
1257 * NOTE:
1258 * This function does NOT do any cache flushing. The caller is
1259 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1260 * before calling this function and flush_tlb_kernel_range() after.
1261 */
1262void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1263{
1264 vunmap_page_range(addr, addr + size);
1265}
81e88fdc 1266EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc48985
TH
1267
1268/**
1269 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1270 * @addr: start of the VM area to unmap
1271 * @size: size of the VM area to unmap
1272 *
1273 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1274 * the unmapping and tlb after.
1275 */
db64fe02
NP
1276void unmap_kernel_range(unsigned long addr, unsigned long size)
1277{
1278 unsigned long end = addr + size;
f6fcba70
TH
1279
1280 flush_cache_vunmap(addr, end);
db64fe02
NP
1281 vunmap_page_range(addr, end);
1282 flush_tlb_kernel_range(addr, end);
1283}
1284
1285int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1286{
1287 unsigned long addr = (unsigned long)area->addr;
1288 unsigned long end = addr + area->size - PAGE_SIZE;
1289 int err;
1290
1291 err = vmap_page_range(addr, end, prot, *pages);
1292 if (err > 0) {
1293 *pages += err;
1294 err = 0;
1295 }
1296
1297 return err;
1298}
1299EXPORT_SYMBOL_GPL(map_vm_area);
1300
f5252e00 1301static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc8 1302 unsigned long flags, const void *caller)
cf88c790 1303{
c69480ad 1304 spin_lock(&vmap_area_lock);
cf88c790
TH
1305 vm->flags = flags;
1306 vm->addr = (void *)va->va_start;
1307 vm->size = va->va_end - va->va_start;
1308 vm->caller = caller;
db1aecaf 1309 va->vm = vm;
cf88c790 1310 va->flags |= VM_VM_AREA;
c69480ad 1311 spin_unlock(&vmap_area_lock);
f5252e00 1312}
cf88c790 1313
4341fa45 1314static void clear_vm_unlist(struct vm_struct *vm)
f5252e00 1315{
d4033afd
JK
1316 /*
1317 * Before removing VM_UNLIST,
1318 * we should make sure that vm has proper values.
1319 * Pair with smp_rmb() in show_numa_info().
1320 */
1321 smp_wmb();
f5252e00 1322 vm->flags &= ~VM_UNLIST;
cf88c790
TH
1323}
1324
db64fe02 1325static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999 1326 unsigned long align, unsigned long flags, unsigned long start,
5e6cafc8 1327 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe02 1328{
0006526d 1329 struct vmap_area *va;
db64fe02 1330 struct vm_struct *area;
1da177e4 1331
52fd24ca 1332 BUG_ON(in_interrupt());
0f2d4a8e
ZY
1333 if (flags & VM_IOREMAP)
1334 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
db64fe02 1335
1da177e4 1336 size = PAGE_ALIGN(size);
31be8309
OH
1337 if (unlikely(!size))
1338 return NULL;
1da177e4 1339
cf88c790 1340 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1341 if (unlikely(!area))
1342 return NULL;
1343
1da177e4
LT
1344 /*
1345 * We always allocate a guard page.
1346 */
1347 size += PAGE_SIZE;
1348
db64fe02
NP
1349 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1350 if (IS_ERR(va)) {
1351 kfree(area);
1352 return NULL;
1da177e4 1353 }
1da177e4 1354
d82b1d85 1355 setup_vmalloc_vm(area, va, flags, caller);
f5252e00 1356
1da177e4 1357 return area;
1da177e4
LT
1358}
1359
930fc45a
CL
1360struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1361 unsigned long start, unsigned long end)
1362{
00ef2d2f
DR
1363 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1364 GFP_KERNEL, __builtin_return_address(0));
930fc45a 1365}
5992b6da 1366EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1367
c2968612
BH
1368struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1369 unsigned long start, unsigned long end,
5e6cafc8 1370 const void *caller)
c2968612 1371{
00ef2d2f
DR
1372 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1373 GFP_KERNEL, caller);
c2968612
BH
1374}
1375
1da177e4 1376/**
183ff22b 1377 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1378 * @size: size of the area
1379 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1380 *
1381 * Search an area of @size in the kernel virtual mapping area,
1382 * and reserved it for out purposes. Returns the area descriptor
1383 * on success or %NULL on failure.
1384 */
1385struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1386{
2dca6999 1387 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f
DR
1388 NUMA_NO_NODE, GFP_KERNEL,
1389 __builtin_return_address(0));
23016969
CL
1390}
1391
1392struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc8 1393 const void *caller)
23016969 1394{
2dca6999 1395 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f 1396 NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4
LT
1397}
1398
e9da6e99
MS
1399/**
1400 * find_vm_area - find a continuous kernel virtual area
1401 * @addr: base address
1402 *
1403 * Search for the kernel VM area starting at @addr, and return it.
1404 * It is up to the caller to do all required locking to keep the returned
1405 * pointer valid.
1406 */
1407struct vm_struct *find_vm_area(const void *addr)
83342314 1408{
db64fe02 1409 struct vmap_area *va;
83342314 1410
db64fe02
NP
1411 va = find_vmap_area((unsigned long)addr);
1412 if (va && va->flags & VM_VM_AREA)
db1aecaf 1413 return va->vm;
1da177e4 1414
1da177e4 1415 return NULL;
1da177e4
LT
1416}
1417
7856dfeb 1418/**
183ff22b 1419 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1420 * @addr: base address
1421 *
1422 * Search for the kernel VM area starting at @addr, and remove it.
1423 * This function returns the found VM area, but using it is NOT safe
1424 * on SMP machines, except for its size or flags.
1425 */
b3bdda02 1426struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1427{
db64fe02
NP
1428 struct vmap_area *va;
1429
1430 va = find_vmap_area((unsigned long)addr);
1431 if (va && va->flags & VM_VM_AREA) {
db1aecaf 1432 struct vm_struct *vm = va->vm;
f5252e00 1433
c69480ad
JK
1434 spin_lock(&vmap_area_lock);
1435 va->vm = NULL;
1436 va->flags &= ~VM_VM_AREA;
1437 spin_unlock(&vmap_area_lock);
1438
dd32c279
KH
1439 vmap_debug_free_range(va->va_start, va->va_end);
1440 free_unmap_vmap_area(va);
1441 vm->size -= PAGE_SIZE;
1442
db64fe02
NP
1443 return vm;
1444 }
1445 return NULL;
7856dfeb
AK
1446}
1447
b3bdda02 1448static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1449{
1450 struct vm_struct *area;
1451
1452 if (!addr)
1453 return;
1454
e69e9d4a
HD
1455 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1456 addr));
1da177e4 1457 return;
1da177e4
LT
1458
1459 area = remove_vm_area(addr);
1460 if (unlikely(!area)) {
4c8573e2 1461 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1462 addr);
1da177e4
LT
1463 return;
1464 }
1465
9a11b49a 1466 debug_check_no_locks_freed(addr, area->size);
3ac7fe5a 1467 debug_check_no_obj_freed(addr, area->size);
9a11b49a 1468
1da177e4
LT
1469 if (deallocate_pages) {
1470 int i;
1471
1472 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1473 struct page *page = area->pages[i];
1474
1475 BUG_ON(!page);
1476 __free_page(page);
1da177e4
LT
1477 }
1478
8757d5fa 1479 if (area->flags & VM_VPAGES)
1da177e4
LT
1480 vfree(area->pages);
1481 else
1482 kfree(area->pages);
1483 }
1484
1485 kfree(area);
1486 return;
1487}
32fcfd40 1488
1da177e4
LT
1489/**
1490 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1491 * @addr: memory base address
1492 *
183ff22b 1493 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1494 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1495 * NULL, no operation is performed.
1da177e4 1496 *
32fcfd40
AV
1497 * Must not be called in NMI context (strictly speaking, only if we don't
1498 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1499 * conventions for vfree() arch-depenedent would be a really bad idea)
c9fcee51
AM
1500 *
1501 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
32fcfd40 1502 *
1da177e4 1503 */
b3bdda02 1504void vfree(const void *addr)
1da177e4 1505{
32fcfd40 1506 BUG_ON(in_nmi());
89219d37
CM
1507
1508 kmemleak_free(addr);
1509
32fcfd40
AV
1510 if (!addr)
1511 return;
1512 if (unlikely(in_interrupt())) {
1513 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1514 llist_add((struct llist_node *)addr, &p->list);
1515 schedule_work(&p->wq);
1516 } else
1517 __vunmap(addr, 1);
1da177e4 1518}
1da177e4
LT
1519EXPORT_SYMBOL(vfree);
1520
1521/**
1522 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1523 * @addr: memory base address
1524 *
1525 * Free the virtually contiguous memory area starting at @addr,
1526 * which was created from the page array passed to vmap().
1527 *
80e93eff 1528 * Must not be called in interrupt context.
1da177e4 1529 */
b3bdda02 1530void vunmap(const void *addr)
1da177e4
LT
1531{
1532 BUG_ON(in_interrupt());
34754b69 1533 might_sleep();
32fcfd40
AV
1534 if (addr)
1535 __vunmap(addr, 0);
1da177e4 1536}
1da177e4
LT
1537EXPORT_SYMBOL(vunmap);
1538
1539/**
1540 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1541 * @pages: array of page pointers
1542 * @count: number of pages to map
1543 * @flags: vm_area->flags
1544 * @prot: page protection for the mapping
1545 *
1546 * Maps @count pages from @pages into contiguous kernel virtual
1547 * space.
1548 */
1549void *vmap(struct page **pages, unsigned int count,
1550 unsigned long flags, pgprot_t prot)
1551{
1552 struct vm_struct *area;
1553
34754b69
PZ
1554 might_sleep();
1555
4481374c 1556 if (count > totalram_pages)
1da177e4
LT
1557 return NULL;
1558
23016969
CL
1559 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1560 __builtin_return_address(0));
1da177e4
LT
1561 if (!area)
1562 return NULL;
23016969 1563
1da177e4
LT
1564 if (map_vm_area(area, prot, &pages)) {
1565 vunmap(area->addr);
1566 return NULL;
1567 }
1568
1569 return area->addr;
1570}
1da177e4
LT
1571EXPORT_SYMBOL(vmap);
1572
2dca6999
DM
1573static void *__vmalloc_node(unsigned long size, unsigned long align,
1574 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1575 int node, const void *caller);
e31d9eb5 1576static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
5e6cafc8 1577 pgprot_t prot, int node, const void *caller)
1da177e4 1578{
22943ab1 1579 const int order = 0;
1da177e4
LT
1580 struct page **pages;
1581 unsigned int nr_pages, array_size, i;
976d6dfb 1582 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1da177e4
LT
1583
1584 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1585 array_size = (nr_pages * sizeof(struct page *));
1586
1587 area->nr_pages = nr_pages;
1588 /* Please note that the recursion is strictly bounded. */
8757d5fa 1589 if (array_size > PAGE_SIZE) {
976d6dfb 1590 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
23016969 1591 PAGE_KERNEL, node, caller);
8757d5fa 1592 area->flags |= VM_VPAGES;
286e1ea3 1593 } else {
976d6dfb 1594 pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 1595 }
1da177e4 1596 area->pages = pages;
23016969 1597 area->caller = caller;
1da177e4
LT
1598 if (!area->pages) {
1599 remove_vm_area(area->addr);
1600 kfree(area);
1601 return NULL;
1602 }
1da177e4
LT
1603
1604 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8 1605 struct page *page;
22943ab1 1606 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
bf53d6f8 1607
930fc45a 1608 if (node < 0)
22943ab1 1609 page = alloc_page(tmp_mask);
930fc45a 1610 else
22943ab1 1611 page = alloc_pages_node(node, tmp_mask, order);
bf53d6f8
CL
1612
1613 if (unlikely(!page)) {
1da177e4
LT
1614 /* Successfully allocated i pages, free them in __vunmap() */
1615 area->nr_pages = i;
1616 goto fail;
1617 }
bf53d6f8 1618 area->pages[i] = page;
1da177e4
LT
1619 }
1620
1621 if (map_vm_area(area, prot, &pages))
1622 goto fail;
1623 return area->addr;
1624
1625fail:
3ee9a4f0
JP
1626 warn_alloc_failed(gfp_mask, order,
1627 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
22943ab1 1628 (area->nr_pages*PAGE_SIZE), area->size);
1da177e4
LT
1629 vfree(area->addr);
1630 return NULL;
1631}
1632
1633/**
d0a21265 1634 * __vmalloc_node_range - allocate virtually contiguous memory
1da177e4 1635 * @size: allocation size
2dca6999 1636 * @align: desired alignment
d0a21265
DR
1637 * @start: vm area range start
1638 * @end: vm area range end
1da177e4
LT
1639 * @gfp_mask: flags for the page level allocator
1640 * @prot: protection mask for the allocated pages
00ef2d2f 1641 * @node: node to use for allocation or NUMA_NO_NODE
c85d194b 1642 * @caller: caller's return address
1da177e4
LT
1643 *
1644 * Allocate enough pages to cover @size from the page level
1645 * allocator with @gfp_mask flags. Map them into contiguous
1646 * kernel virtual space, using a pagetable protection of @prot.
1647 */
d0a21265
DR
1648void *__vmalloc_node_range(unsigned long size, unsigned long align,
1649 unsigned long start, unsigned long end, gfp_t gfp_mask,
5e6cafc8 1650 pgprot_t prot, int node, const void *caller)
1da177e4
LT
1651{
1652 struct vm_struct *area;
89219d37
CM
1653 void *addr;
1654 unsigned long real_size = size;
1da177e4
LT
1655
1656 size = PAGE_ALIGN(size);
4481374c 1657 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
de7d2b56 1658 goto fail;
1da177e4 1659
f5252e00
MH
1660 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
1661 start, end, node, gfp_mask, caller);
1da177e4 1662 if (!area)
de7d2b56 1663 goto fail;
1da177e4 1664
89219d37 1665 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1368edf0
MG
1666 if (!addr)
1667 return NULL;
89219d37 1668
f5252e00 1669 /*
4341fa45
JK
1670 * In this function, newly allocated vm_struct has VM_UNLIST flag.
1671 * It means that vm_struct is not fully initialized.
1672 * Now, it is fully initialized, so remove this flag here.
f5252e00 1673 */
4341fa45 1674 clear_vm_unlist(area);
f5252e00 1675
89219d37
CM
1676 /*
1677 * A ref_count = 3 is needed because the vm_struct and vmap_area
1678 * structures allocated in the __get_vm_area_node() function contain
1679 * references to the virtual address of the vmalloc'ed block.
1680 */
1681 kmemleak_alloc(addr, real_size, 3, gfp_mask);
1682
1683 return addr;
de7d2b56
JP
1684
1685fail:
1686 warn_alloc_failed(gfp_mask, 0,
1687 "vmalloc: allocation failure: %lu bytes\n",
1688 real_size);
1689 return NULL;
1da177e4
LT
1690}
1691
d0a21265
DR
1692/**
1693 * __vmalloc_node - allocate virtually contiguous memory
1694 * @size: allocation size
1695 * @align: desired alignment
1696 * @gfp_mask: flags for the page level allocator
1697 * @prot: protection mask for the allocated pages
00ef2d2f 1698 * @node: node to use for allocation or NUMA_NO_NODE
d0a21265
DR
1699 * @caller: caller's return address
1700 *
1701 * Allocate enough pages to cover @size from the page level
1702 * allocator with @gfp_mask flags. Map them into contiguous
1703 * kernel virtual space, using a pagetable protection of @prot.
1704 */
1705static void *__vmalloc_node(unsigned long size, unsigned long align,
1706 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1707 int node, const void *caller)
d0a21265
DR
1708{
1709 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1710 gfp_mask, prot, node, caller);
1711}
1712
930fc45a
CL
1713void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1714{
00ef2d2f 1715 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
23016969 1716 __builtin_return_address(0));
930fc45a 1717}
1da177e4
LT
1718EXPORT_SYMBOL(__vmalloc);
1719
e1ca7788
DY
1720static inline void *__vmalloc_node_flags(unsigned long size,
1721 int node, gfp_t flags)
1722{
1723 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1724 node, __builtin_return_address(0));
1725}
1726
1da177e4
LT
1727/**
1728 * vmalloc - allocate virtually contiguous memory
1da177e4 1729 * @size: allocation size
1da177e4
LT
1730 * Allocate enough pages to cover @size from the page level
1731 * allocator and map them into contiguous kernel virtual space.
1732 *
c1c8897f 1733 * For tight control over page level allocator and protection flags
1da177e4
LT
1734 * use __vmalloc() instead.
1735 */
1736void *vmalloc(unsigned long size)
1737{
00ef2d2f
DR
1738 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1739 GFP_KERNEL | __GFP_HIGHMEM);
1da177e4 1740}
1da177e4
LT
1741EXPORT_SYMBOL(vmalloc);
1742
e1ca7788
DY
1743/**
1744 * vzalloc - allocate virtually contiguous memory with zero fill
1745 * @size: allocation size
1746 * Allocate enough pages to cover @size from the page level
1747 * allocator and map them into contiguous kernel virtual space.
1748 * The memory allocated is set to zero.
1749 *
1750 * For tight control over page level allocator and protection flags
1751 * use __vmalloc() instead.
1752 */
1753void *vzalloc(unsigned long size)
1754{
00ef2d2f 1755 return __vmalloc_node_flags(size, NUMA_NO_NODE,
e1ca7788
DY
1756 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1757}
1758EXPORT_SYMBOL(vzalloc);
1759
83342314 1760/**
ead04089
REB
1761 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1762 * @size: allocation size
83342314 1763 *
ead04089
REB
1764 * The resulting memory area is zeroed so it can be mapped to userspace
1765 * without leaking data.
83342314
NP
1766 */
1767void *vmalloc_user(unsigned long size)
1768{
1769 struct vm_struct *area;
1770 void *ret;
1771
2dca6999
DM
1772 ret = __vmalloc_node(size, SHMLBA,
1773 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
00ef2d2f
DR
1774 PAGE_KERNEL, NUMA_NO_NODE,
1775 __builtin_return_address(0));
2b4ac44e 1776 if (ret) {
db64fe02 1777 area = find_vm_area(ret);
2b4ac44e 1778 area->flags |= VM_USERMAP;
2b4ac44e 1779 }
83342314
NP
1780 return ret;
1781}
1782EXPORT_SYMBOL(vmalloc_user);
1783
930fc45a
CL
1784/**
1785 * vmalloc_node - allocate memory on a specific node
930fc45a 1786 * @size: allocation size
d44e0780 1787 * @node: numa node
930fc45a
CL
1788 *
1789 * Allocate enough pages to cover @size from the page level
1790 * allocator and map them into contiguous kernel virtual space.
1791 *
c1c8897f 1792 * For tight control over page level allocator and protection flags
930fc45a
CL
1793 * use __vmalloc() instead.
1794 */
1795void *vmalloc_node(unsigned long size, int node)
1796{
2dca6999 1797 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
23016969 1798 node, __builtin_return_address(0));
930fc45a
CL
1799}
1800EXPORT_SYMBOL(vmalloc_node);
1801
e1ca7788
DY
1802/**
1803 * vzalloc_node - allocate memory on a specific node with zero fill
1804 * @size: allocation size
1805 * @node: numa node
1806 *
1807 * Allocate enough pages to cover @size from the page level
1808 * allocator and map them into contiguous kernel virtual space.
1809 * The memory allocated is set to zero.
1810 *
1811 * For tight control over page level allocator and protection flags
1812 * use __vmalloc_node() instead.
1813 */
1814void *vzalloc_node(unsigned long size, int node)
1815{
1816 return __vmalloc_node_flags(size, node,
1817 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1818}
1819EXPORT_SYMBOL(vzalloc_node);
1820
4dc3b16b
PP
1821#ifndef PAGE_KERNEL_EXEC
1822# define PAGE_KERNEL_EXEC PAGE_KERNEL
1823#endif
1824
1da177e4
LT
1825/**
1826 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1827 * @size: allocation size
1828 *
1829 * Kernel-internal function to allocate enough pages to cover @size
1830 * the page level allocator and map them into contiguous and
1831 * executable kernel virtual space.
1832 *
c1c8897f 1833 * For tight control over page level allocator and protection flags
1da177e4
LT
1834 * use __vmalloc() instead.
1835 */
1836
1da177e4
LT
1837void *vmalloc_exec(unsigned long size)
1838{
2dca6999 1839 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
00ef2d2f 1840 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4
LT
1841}
1842
0d08e0d3 1843#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1844#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1845#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1846#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1847#else
1848#define GFP_VMALLOC32 GFP_KERNEL
1849#endif
1850
1da177e4
LT
1851/**
1852 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1853 * @size: allocation size
1854 *
1855 * Allocate enough 32bit PA addressable pages to cover @size from the
1856 * page level allocator and map them into contiguous kernel virtual space.
1857 */
1858void *vmalloc_32(unsigned long size)
1859{
2dca6999 1860 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f 1861 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4 1862}
1da177e4
LT
1863EXPORT_SYMBOL(vmalloc_32);
1864
83342314 1865/**
ead04089 1866 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1867 * @size: allocation size
ead04089
REB
1868 *
1869 * The resulting memory area is 32bit addressable and zeroed so it can be
1870 * mapped to userspace without leaking data.
83342314
NP
1871 */
1872void *vmalloc_32_user(unsigned long size)
1873{
1874 struct vm_struct *area;
1875 void *ret;
1876
2dca6999 1877 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
00ef2d2f 1878 NUMA_NO_NODE, __builtin_return_address(0));
2b4ac44e 1879 if (ret) {
db64fe02 1880 area = find_vm_area(ret);
2b4ac44e 1881 area->flags |= VM_USERMAP;
2b4ac44e 1882 }
83342314
NP
1883 return ret;
1884}
1885EXPORT_SYMBOL(vmalloc_32_user);
1886
d0107eb0
KH
1887/*
1888 * small helper routine , copy contents to buf from addr.
1889 * If the page is not present, fill zero.
1890 */
1891
1892static int aligned_vread(char *buf, char *addr, unsigned long count)
1893{
1894 struct page *p;
1895 int copied = 0;
1896
1897 while (count) {
1898 unsigned long offset, length;
1899
1900 offset = (unsigned long)addr & ~PAGE_MASK;
1901 length = PAGE_SIZE - offset;
1902 if (length > count)
1903 length = count;
1904 p = vmalloc_to_page(addr);
1905 /*
1906 * To do safe access to this _mapped_ area, we need
1907 * lock. But adding lock here means that we need to add
1908 * overhead of vmalloc()/vfree() calles for this _debug_
1909 * interface, rarely used. Instead of that, we'll use
1910 * kmap() and get small overhead in this access function.
1911 */
1912 if (p) {
1913 /*
1914 * we can expect USER0 is not used (see vread/vwrite's
1915 * function description)
1916 */
9b04c5fe 1917 void *map = kmap_atomic(p);
d0107eb0 1918 memcpy(buf, map + offset, length);
9b04c5fe 1919 kunmap_atomic(map);
d0107eb0
KH
1920 } else
1921 memset(buf, 0, length);
1922
1923 addr += length;
1924 buf += length;
1925 copied += length;
1926 count -= length;
1927 }
1928 return copied;
1929}
1930
1931static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1932{
1933 struct page *p;
1934 int copied = 0;
1935
1936 while (count) {
1937 unsigned long offset, length;
1938
1939 offset = (unsigned long)addr & ~PAGE_MASK;
1940 length = PAGE_SIZE - offset;
1941 if (length > count)
1942 length = count;
1943 p = vmalloc_to_page(addr);
1944 /*
1945 * To do safe access to this _mapped_ area, we need
1946 * lock. But adding lock here means that we need to add
1947 * overhead of vmalloc()/vfree() calles for this _debug_
1948 * interface, rarely used. Instead of that, we'll use
1949 * kmap() and get small overhead in this access function.
1950 */
1951 if (p) {
1952 /*
1953 * we can expect USER0 is not used (see vread/vwrite's
1954 * function description)
1955 */
9b04c5fe 1956 void *map = kmap_atomic(p);
d0107eb0 1957 memcpy(map + offset, buf, length);
9b04c5fe 1958 kunmap_atomic(map);
d0107eb0
KH
1959 }
1960 addr += length;
1961 buf += length;
1962 copied += length;
1963 count -= length;
1964 }
1965 return copied;
1966}
1967
1968/**
1969 * vread() - read vmalloc area in a safe way.
1970 * @buf: buffer for reading data
1971 * @addr: vm address.
1972 * @count: number of bytes to be read.
1973 *
1974 * Returns # of bytes which addr and buf should be increased.
1975 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
1976 * includes any intersect with alive vmalloc area.
1977 *
1978 * This function checks that addr is a valid vmalloc'ed area, and
1979 * copy data from that area to a given buffer. If the given memory range
1980 * of [addr...addr+count) includes some valid address, data is copied to
1981 * proper area of @buf. If there are memory holes, they'll be zero-filled.
1982 * IOREMAP area is treated as memory hole and no copy is done.
1983 *
1984 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 1985 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
1986 *
1987 * Note: In usual ops, vread() is never necessary because the caller
1988 * should know vmalloc() area is valid and can use memcpy().
1989 * This is for routines which have to access vmalloc area without
1990 * any informaion, as /dev/kmem.
1991 *
1992 */
1993
1da177e4
LT
1994long vread(char *buf, char *addr, unsigned long count)
1995{
e81ce85f
JK
1996 struct vmap_area *va;
1997 struct vm_struct *vm;
1da177e4 1998 char *vaddr, *buf_start = buf;
d0107eb0 1999 unsigned long buflen = count;
1da177e4
LT
2000 unsigned long n;
2001
2002 /* Don't allow overflow */
2003 if ((unsigned long) addr + count < count)
2004 count = -(unsigned long) addr;
2005
e81ce85f
JK
2006 spin_lock(&vmap_area_lock);
2007 list_for_each_entry(va, &vmap_area_list, list) {
2008 if (!count)
2009 break;
2010
2011 if (!(va->flags & VM_VM_AREA))
2012 continue;
2013
2014 vm = va->vm;
2015 vaddr = (char *) vm->addr;
2016 if (addr >= vaddr + vm->size - PAGE_SIZE)
1da177e4
LT
2017 continue;
2018 while (addr < vaddr) {
2019 if (count == 0)
2020 goto finished;
2021 *buf = '\0';
2022 buf++;
2023 addr++;
2024 count--;
2025 }
e81ce85f 2026 n = vaddr + vm->size - PAGE_SIZE - addr;
d0107eb0
KH
2027 if (n > count)
2028 n = count;
e81ce85f 2029 if (!(vm->flags & VM_IOREMAP))
d0107eb0
KH
2030 aligned_vread(buf, addr, n);
2031 else /* IOREMAP area is treated as memory hole */
2032 memset(buf, 0, n);
2033 buf += n;
2034 addr += n;
2035 count -= n;
1da177e4
LT
2036 }
2037finished:
e81ce85f 2038 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2039
2040 if (buf == buf_start)
2041 return 0;
2042 /* zero-fill memory holes */
2043 if (buf != buf_start + buflen)
2044 memset(buf, 0, buflen - (buf - buf_start));
2045
2046 return buflen;
1da177e4
LT
2047}
2048
d0107eb0
KH
2049/**
2050 * vwrite() - write vmalloc area in a safe way.
2051 * @buf: buffer for source data
2052 * @addr: vm address.
2053 * @count: number of bytes to be read.
2054 *
2055 * Returns # of bytes which addr and buf should be incresed.
2056 * (same number to @count).
2057 * If [addr...addr+count) doesn't includes any intersect with valid
2058 * vmalloc area, returns 0.
2059 *
2060 * This function checks that addr is a valid vmalloc'ed area, and
2061 * copy data from a buffer to the given addr. If specified range of
2062 * [addr...addr+count) includes some valid address, data is copied from
2063 * proper area of @buf. If there are memory holes, no copy to hole.
2064 * IOREMAP area is treated as memory hole and no copy is done.
2065 *
2066 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 2067 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
2068 *
2069 * Note: In usual ops, vwrite() is never necessary because the caller
2070 * should know vmalloc() area is valid and can use memcpy().
2071 * This is for routines which have to access vmalloc area without
2072 * any informaion, as /dev/kmem.
d0107eb0
KH
2073 */
2074
1da177e4
LT
2075long vwrite(char *buf, char *addr, unsigned long count)
2076{
e81ce85f
JK
2077 struct vmap_area *va;
2078 struct vm_struct *vm;
d0107eb0
KH
2079 char *vaddr;
2080 unsigned long n, buflen;
2081 int copied = 0;
1da177e4
LT
2082
2083 /* Don't allow overflow */
2084 if ((unsigned long) addr + count < count)
2085 count = -(unsigned long) addr;
d0107eb0 2086 buflen = count;
1da177e4 2087
e81ce85f
JK
2088 spin_lock(&vmap_area_lock);
2089 list_for_each_entry(va, &vmap_area_list, list) {
2090 if (!count)
2091 break;
2092
2093 if (!(va->flags & VM_VM_AREA))
2094 continue;
2095
2096 vm = va->vm;
2097 vaddr = (char *) vm->addr;
2098 if (addr >= vaddr + vm->size - PAGE_SIZE)
1da177e4
LT
2099 continue;
2100 while (addr < vaddr) {
2101 if (count == 0)
2102 goto finished;
2103 buf++;
2104 addr++;
2105 count--;
2106 }
e81ce85f 2107 n = vaddr + vm->size - PAGE_SIZE - addr;
d0107eb0
KH
2108 if (n > count)
2109 n = count;
e81ce85f 2110 if (!(vm->flags & VM_IOREMAP)) {
d0107eb0
KH
2111 aligned_vwrite(buf, addr, n);
2112 copied++;
2113 }
2114 buf += n;
2115 addr += n;
2116 count -= n;
1da177e4
LT
2117 }
2118finished:
e81ce85f 2119 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2120 if (!copied)
2121 return 0;
2122 return buflen;
1da177e4 2123}
83342314
NP
2124
2125/**
e69e9d4a
HD
2126 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2127 * @vma: vma to cover
2128 * @uaddr: target user address to start at
2129 * @kaddr: virtual address of vmalloc kernel memory
2130 * @size: size of map area
7682486b
RD
2131 *
2132 * Returns: 0 for success, -Exxx on failure
83342314 2133 *
e69e9d4a
HD
2134 * This function checks that @kaddr is a valid vmalloc'ed area,
2135 * and that it is big enough to cover the range starting at
2136 * @uaddr in @vma. Will return failure if that criteria isn't
2137 * met.
83342314 2138 *
72fd4a35 2139 * Similar to remap_pfn_range() (see mm/memory.c)
83342314 2140 */
e69e9d4a
HD
2141int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2142 void *kaddr, unsigned long size)
83342314
NP
2143{
2144 struct vm_struct *area;
83342314 2145
e69e9d4a
HD
2146 size = PAGE_ALIGN(size);
2147
2148 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
83342314
NP
2149 return -EINVAL;
2150
e69e9d4a 2151 area = find_vm_area(kaddr);
83342314 2152 if (!area)
db64fe02 2153 return -EINVAL;
83342314
NP
2154
2155 if (!(area->flags & VM_USERMAP))
db64fe02 2156 return -EINVAL;
83342314 2157
e69e9d4a 2158 if (kaddr + size > area->addr + area->size)
db64fe02 2159 return -EINVAL;
83342314 2160
83342314 2161 do {
e69e9d4a 2162 struct page *page = vmalloc_to_page(kaddr);
db64fe02
NP
2163 int ret;
2164
83342314
NP
2165 ret = vm_insert_page(vma, uaddr, page);
2166 if (ret)
2167 return ret;
2168
2169 uaddr += PAGE_SIZE;
e69e9d4a
HD
2170 kaddr += PAGE_SIZE;
2171 size -= PAGE_SIZE;
2172 } while (size > 0);
83342314 2173
314e51b9 2174 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
83342314 2175
db64fe02 2176 return 0;
83342314 2177}
e69e9d4a
HD
2178EXPORT_SYMBOL(remap_vmalloc_range_partial);
2179
2180/**
2181 * remap_vmalloc_range - map vmalloc pages to userspace
2182 * @vma: vma to cover (map full range of vma)
2183 * @addr: vmalloc memory
2184 * @pgoff: number of pages into addr before first page to map
2185 *
2186 * Returns: 0 for success, -Exxx on failure
2187 *
2188 * This function checks that addr is a valid vmalloc'ed area, and
2189 * that it is big enough to cover the vma. Will return failure if
2190 * that criteria isn't met.
2191 *
2192 * Similar to remap_pfn_range() (see mm/memory.c)
2193 */
2194int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2195 unsigned long pgoff)
2196{
2197 return remap_vmalloc_range_partial(vma, vma->vm_start,
2198 addr + (pgoff << PAGE_SHIFT),
2199 vma->vm_end - vma->vm_start);
2200}
83342314
NP
2201EXPORT_SYMBOL(remap_vmalloc_range);
2202
1eeb66a1
CH
2203/*
2204 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2205 * have one.
2206 */
2207void __attribute__((weak)) vmalloc_sync_all(void)
2208{
2209}
5f4352fb
JF
2210
2211
2f569afd 2212static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb 2213{
cd12909c
DV
2214 pte_t ***p = data;
2215
2216 if (p) {
2217 *(*p) = pte;
2218 (*p)++;
2219 }
5f4352fb
JF
2220 return 0;
2221}
2222
2223/**
2224 * alloc_vm_area - allocate a range of kernel address space
2225 * @size: size of the area
cd12909c 2226 * @ptes: returns the PTEs for the address space
7682486b
RD
2227 *
2228 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
2229 *
2230 * This function reserves a range of kernel address space, and
2231 * allocates pagetables to map that range. No actual mappings
cd12909c
DV
2232 * are created.
2233 *
2234 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2235 * allocated for the VM area are returned.
5f4352fb 2236 */
cd12909c 2237struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fb
JF
2238{
2239 struct vm_struct *area;
2240
23016969
CL
2241 area = get_vm_area_caller(size, VM_IOREMAP,
2242 __builtin_return_address(0));
5f4352fb
JF
2243 if (area == NULL)
2244 return NULL;
2245
2246 /*
2247 * This ensures that page tables are constructed for this region
2248 * of kernel virtual address space and mapped into init_mm.
2249 */
2250 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909c 2251 size, f, ptes ? &ptes : NULL)) {
5f4352fb
JF
2252 free_vm_area(area);
2253 return NULL;
2254 }
2255
5f4352fb
JF
2256 return area;
2257}
2258EXPORT_SYMBOL_GPL(alloc_vm_area);
2259
2260void free_vm_area(struct vm_struct *area)
2261{
2262 struct vm_struct *ret;
2263 ret = remove_vm_area(area->addr);
2264 BUG_ON(ret != area);
2265 kfree(area);
2266}
2267EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 2268
4f8b02b4 2269#ifdef CONFIG_SMP
ca23e405
TH
2270static struct vmap_area *node_to_va(struct rb_node *n)
2271{
2272 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2273}
2274
2275/**
2276 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2277 * @end: target address
2278 * @pnext: out arg for the next vmap_area
2279 * @pprev: out arg for the previous vmap_area
2280 *
2281 * Returns: %true if either or both of next and prev are found,
2282 * %false if no vmap_area exists
2283 *
2284 * Find vmap_areas end addresses of which enclose @end. ie. if not
2285 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2286 */
2287static bool pvm_find_next_prev(unsigned long end,
2288 struct vmap_area **pnext,
2289 struct vmap_area **pprev)
2290{
2291 struct rb_node *n = vmap_area_root.rb_node;
2292 struct vmap_area *va = NULL;
2293
2294 while (n) {
2295 va = rb_entry(n, struct vmap_area, rb_node);
2296 if (end < va->va_end)
2297 n = n->rb_left;
2298 else if (end > va->va_end)
2299 n = n->rb_right;
2300 else
2301 break;
2302 }
2303
2304 if (!va)
2305 return false;
2306
2307 if (va->va_end > end) {
2308 *pnext = va;
2309 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2310 } else {
2311 *pprev = va;
2312 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2313 }
2314 return true;
2315}
2316
2317/**
2318 * pvm_determine_end - find the highest aligned address between two vmap_areas
2319 * @pnext: in/out arg for the next vmap_area
2320 * @pprev: in/out arg for the previous vmap_area
2321 * @align: alignment
2322 *
2323 * Returns: determined end address
2324 *
2325 * Find the highest aligned address between *@pnext and *@pprev below
2326 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2327 * down address is between the end addresses of the two vmap_areas.
2328 *
2329 * Please note that the address returned by this function may fall
2330 * inside *@pnext vmap_area. The caller is responsible for checking
2331 * that.
2332 */
2333static unsigned long pvm_determine_end(struct vmap_area **pnext,
2334 struct vmap_area **pprev,
2335 unsigned long align)
2336{
2337 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2338 unsigned long addr;
2339
2340 if (*pnext)
2341 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2342 else
2343 addr = vmalloc_end;
2344
2345 while (*pprev && (*pprev)->va_end > addr) {
2346 *pnext = *pprev;
2347 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2348 }
2349
2350 return addr;
2351}
2352
2353/**
2354 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2355 * @offsets: array containing offset of each area
2356 * @sizes: array containing size of each area
2357 * @nr_vms: the number of areas to allocate
2358 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405
TH
2359 *
2360 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2361 * vm_structs on success, %NULL on failure
2362 *
2363 * Percpu allocator wants to use congruent vm areas so that it can
2364 * maintain the offsets among percpu areas. This function allocates
ec3f64fc
DR
2365 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2366 * be scattered pretty far, distance between two areas easily going up
2367 * to gigabytes. To avoid interacting with regular vmallocs, these
2368 * areas are allocated from top.
ca23e405
TH
2369 *
2370 * Despite its complicated look, this allocator is rather simple. It
2371 * does everything top-down and scans areas from the end looking for
2372 * matching slot. While scanning, if any of the areas overlaps with
2373 * existing vmap_area, the base address is pulled down to fit the
2374 * area. Scanning is repeated till all the areas fit and then all
2375 * necessary data structres are inserted and the result is returned.
2376 */
2377struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2378 const size_t *sizes, int nr_vms,
ec3f64fc 2379 size_t align)
ca23e405
TH
2380{
2381 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2382 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2383 struct vmap_area **vas, *prev, *next;
2384 struct vm_struct **vms;
2385 int area, area2, last_area, term_area;
2386 unsigned long base, start, end, last_end;
2387 bool purged = false;
2388
ca23e405
TH
2389 /* verify parameters and allocate data structures */
2390 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2391 for (last_area = 0, area = 0; area < nr_vms; area++) {
2392 start = offsets[area];
2393 end = start + sizes[area];
2394
2395 /* is everything aligned properly? */
2396 BUG_ON(!IS_ALIGNED(offsets[area], align));
2397 BUG_ON(!IS_ALIGNED(sizes[area], align));
2398
2399 /* detect the area with the highest address */
2400 if (start > offsets[last_area])
2401 last_area = area;
2402
2403 for (area2 = 0; area2 < nr_vms; area2++) {
2404 unsigned long start2 = offsets[area2];
2405 unsigned long end2 = start2 + sizes[area2];
2406
2407 if (area2 == area)
2408 continue;
2409
2410 BUG_ON(start2 >= start && start2 < end);
2411 BUG_ON(end2 <= end && end2 > start);
2412 }
2413 }
2414 last_end = offsets[last_area] + sizes[last_area];
2415
2416 if (vmalloc_end - vmalloc_start < last_end) {
2417 WARN_ON(true);
2418 return NULL;
2419 }
2420
4d67d860
TM
2421 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2422 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405 2423 if (!vas || !vms)
f1db7afd 2424 goto err_free2;
ca23e405
TH
2425
2426 for (area = 0; area < nr_vms; area++) {
ec3f64fc
DR
2427 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2428 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405
TH
2429 if (!vas[area] || !vms[area])
2430 goto err_free;
2431 }
2432retry:
2433 spin_lock(&vmap_area_lock);
2434
2435 /* start scanning - we scan from the top, begin with the last area */
2436 area = term_area = last_area;
2437 start = offsets[area];
2438 end = start + sizes[area];
2439
2440 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2441 base = vmalloc_end - last_end;
2442 goto found;
2443 }
2444 base = pvm_determine_end(&next, &prev, align) - end;
2445
2446 while (true) {
2447 BUG_ON(next && next->va_end <= base + end);
2448 BUG_ON(prev && prev->va_end > base + end);
2449
2450 /*
2451 * base might have underflowed, add last_end before
2452 * comparing.
2453 */
2454 if (base + last_end < vmalloc_start + last_end) {
2455 spin_unlock(&vmap_area_lock);
2456 if (!purged) {
2457 purge_vmap_area_lazy();
2458 purged = true;
2459 goto retry;
2460 }
2461 goto err_free;
2462 }
2463
2464 /*
2465 * If next overlaps, move base downwards so that it's
2466 * right below next and then recheck.
2467 */
2468 if (next && next->va_start < base + end) {
2469 base = pvm_determine_end(&next, &prev, align) - end;
2470 term_area = area;
2471 continue;
2472 }
2473
2474 /*
2475 * If prev overlaps, shift down next and prev and move
2476 * base so that it's right below new next and then
2477 * recheck.
2478 */
2479 if (prev && prev->va_end > base + start) {
2480 next = prev;
2481 prev = node_to_va(rb_prev(&next->rb_node));
2482 base = pvm_determine_end(&next, &prev, align) - end;
2483 term_area = area;
2484 continue;
2485 }
2486
2487 /*
2488 * This area fits, move on to the previous one. If
2489 * the previous one is the terminal one, we're done.
2490 */
2491 area = (area + nr_vms - 1) % nr_vms;
2492 if (area == term_area)
2493 break;
2494 start = offsets[area];
2495 end = start + sizes[area];
2496 pvm_find_next_prev(base + end, &next, &prev);
2497 }
2498found:
2499 /* we've found a fitting base, insert all va's */
2500 for (area = 0; area < nr_vms; area++) {
2501 struct vmap_area *va = vas[area];
2502
2503 va->va_start = base + offsets[area];
2504 va->va_end = va->va_start + sizes[area];
2505 __insert_vmap_area(va);
2506 }
2507
2508 vmap_area_pcpu_hole = base + offsets[last_area];
2509
2510 spin_unlock(&vmap_area_lock);
2511
2512 /* insert all vm's */
2513 for (area = 0; area < nr_vms; area++)
3645cb4a
ZY
2514 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2515 pcpu_get_vm_areas);
ca23e405
TH
2516
2517 kfree(vas);
2518 return vms;
2519
2520err_free:
2521 for (area = 0; area < nr_vms; area++) {
f1db7afd
KC
2522 kfree(vas[area]);
2523 kfree(vms[area]);
ca23e405 2524 }
f1db7afd 2525err_free2:
ca23e405
TH
2526 kfree(vas);
2527 kfree(vms);
2528 return NULL;
2529}
2530
2531/**
2532 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2533 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2534 * @nr_vms: the number of allocated areas
2535 *
2536 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2537 */
2538void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2539{
2540 int i;
2541
2542 for (i = 0; i < nr_vms; i++)
2543 free_vm_area(vms[i]);
2544 kfree(vms);
2545}
4f8b02b4 2546#endif /* CONFIG_SMP */
a10aa579
CL
2547
2548#ifdef CONFIG_PROC_FS
2549static void *s_start(struct seq_file *m, loff_t *pos)
d4033afd 2550 __acquires(&vmap_area_lock)
a10aa579
CL
2551{
2552 loff_t n = *pos;
d4033afd 2553 struct vmap_area *va;
a10aa579 2554
d4033afd
JK
2555 spin_lock(&vmap_area_lock);
2556 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2557 while (n > 0 && &va->list != &vmap_area_list) {
a10aa579 2558 n--;
d4033afd 2559 va = list_entry(va->list.next, typeof(*va), list);
a10aa579 2560 }
d4033afd
JK
2561 if (!n && &va->list != &vmap_area_list)
2562 return va;
a10aa579
CL
2563
2564 return NULL;
2565
2566}
2567
2568static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2569{
d4033afd 2570 struct vmap_area *va = p, *next;
a10aa579
CL
2571
2572 ++*pos;
d4033afd
JK
2573 next = list_entry(va->list.next, typeof(*va), list);
2574 if (&next->list != &vmap_area_list)
2575 return next;
2576
2577 return NULL;
a10aa579
CL
2578}
2579
2580static void s_stop(struct seq_file *m, void *p)
d4033afd 2581 __releases(&vmap_area_lock)
a10aa579 2582{
d4033afd 2583 spin_unlock(&vmap_area_lock);
a10aa579
CL
2584}
2585
a47a126a
ED
2586static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2587{
e5adfffc 2588 if (IS_ENABLED(CONFIG_NUMA)) {
a47a126a
ED
2589 unsigned int nr, *counters = m->private;
2590
2591 if (!counters)
2592 return;
2593
4341fa45 2594 /* Pair with smp_wmb() in clear_vm_unlist() */
d4033afd
JK
2595 smp_rmb();
2596 if (v->flags & VM_UNLIST)
2597 return;
2598
a47a126a
ED
2599 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2600
2601 for (nr = 0; nr < v->nr_pages; nr++)
2602 counters[page_to_nid(v->pages[nr])]++;
2603
2604 for_each_node_state(nr, N_HIGH_MEMORY)
2605 if (counters[nr])
2606 seq_printf(m, " N%u=%u", nr, counters[nr]);
2607 }
2608}
2609
a10aa579
CL
2610static int s_show(struct seq_file *m, void *p)
2611{
d4033afd
JK
2612 struct vmap_area *va = p;
2613 struct vm_struct *v;
2614
2615 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2616 return 0;
2617
2618 if (!(va->flags & VM_VM_AREA)) {
2619 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
2620 (void *)va->va_start, (void *)va->va_end,
2621 va->va_end - va->va_start);
2622 return 0;
2623 }
2624
2625 v = va->vm;
a10aa579 2626
45ec1690 2627 seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa579
CL
2628 v->addr, v->addr + v->size, v->size);
2629
62c70bce
JP
2630 if (v->caller)
2631 seq_printf(m, " %pS", v->caller);
23016969 2632
a10aa579
CL
2633 if (v->nr_pages)
2634 seq_printf(m, " pages=%d", v->nr_pages);
2635
2636 if (v->phys_addr)
ffa71f33 2637 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
a10aa579
CL
2638
2639 if (v->flags & VM_IOREMAP)
2640 seq_printf(m, " ioremap");
2641
2642 if (v->flags & VM_ALLOC)
2643 seq_printf(m, " vmalloc");
2644
2645 if (v->flags & VM_MAP)
2646 seq_printf(m, " vmap");
2647
2648 if (v->flags & VM_USERMAP)
2649 seq_printf(m, " user");
2650
2651 if (v->flags & VM_VPAGES)
2652 seq_printf(m, " vpages");
2653
a47a126a 2654 show_numa_info(m, v);
a10aa579
CL
2655 seq_putc(m, '\n');
2656 return 0;
2657}
2658
5f6a6a9c 2659static const struct seq_operations vmalloc_op = {
a10aa579
CL
2660 .start = s_start,
2661 .next = s_next,
2662 .stop = s_stop,
2663 .show = s_show,
2664};
5f6a6a9c
AD
2665
2666static int vmalloc_open(struct inode *inode, struct file *file)
2667{
2668 unsigned int *ptr = NULL;
2669 int ret;
2670
e5adfffc 2671 if (IS_ENABLED(CONFIG_NUMA)) {
5f6a6a9c 2672 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
51980ac9
KV
2673 if (ptr == NULL)
2674 return -ENOMEM;
2675 }
5f6a6a9c
AD
2676 ret = seq_open(file, &vmalloc_op);
2677 if (!ret) {
2678 struct seq_file *m = file->private_data;
2679 m->private = ptr;
2680 } else
2681 kfree(ptr);
2682 return ret;
2683}
2684
2685static const struct file_operations proc_vmalloc_operations = {
2686 .open = vmalloc_open,
2687 .read = seq_read,
2688 .llseek = seq_lseek,
2689 .release = seq_release_private,
2690};
2691
2692static int __init proc_vmalloc_init(void)
2693{
2694 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2695 return 0;
2696}
2697module_init(proc_vmalloc_init);
db3808c1
JK
2698
2699void get_vmalloc_info(struct vmalloc_info *vmi)
2700{
f98782dd 2701 struct vmap_area *va;
db3808c1
JK
2702 unsigned long free_area_size;
2703 unsigned long prev_end;
2704
2705 vmi->used = 0;
f98782dd 2706 vmi->largest_chunk = 0;
db3808c1 2707
f98782dd 2708 prev_end = VMALLOC_START;
db3808c1 2709
f98782dd 2710 spin_lock(&vmap_area_lock);
db3808c1 2711
f98782dd
JK
2712 if (list_empty(&vmap_area_list)) {
2713 vmi->largest_chunk = VMALLOC_TOTAL;
2714 goto out;
2715 }
db3808c1 2716
f98782dd
JK
2717 list_for_each_entry(va, &vmap_area_list, list) {
2718 unsigned long addr = va->va_start;
db3808c1 2719
f98782dd
JK
2720 /*
2721 * Some archs keep another range for modules in vmalloc space
2722 */
2723 if (addr < VMALLOC_START)
2724 continue;
2725 if (addr >= VMALLOC_END)
2726 break;
db3808c1 2727
f98782dd
JK
2728 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2729 continue;
db3808c1 2730
f98782dd 2731 vmi->used += (va->va_end - va->va_start);
db3808c1 2732
f98782dd
JK
2733 free_area_size = addr - prev_end;
2734 if (vmi->largest_chunk < free_area_size)
2735 vmi->largest_chunk = free_area_size;
db3808c1 2736
f98782dd 2737 prev_end = va->va_end;
db3808c1 2738 }
f98782dd
JK
2739
2740 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2741 vmi->largest_chunk = VMALLOC_END - prev_end;
2742
2743out:
2744 spin_unlock(&vmap_area_lock);
db3808c1 2745}
a10aa579
CL
2746#endif
2747