]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/vmalloc.c | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 | |
7 | * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 | |
930fc45a | 8 | * Numa awareness, Christoph Lameter, SGI, June 2005 |
1da177e4 LT |
9 | */ |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/interrupt.h> | |
17 | ||
18 | #include <linux/vmalloc.h> | |
19 | ||
20 | #include <asm/uaccess.h> | |
21 | #include <asm/tlbflush.h> | |
22 | ||
23 | ||
24 | DEFINE_RWLOCK(vmlist_lock); | |
25 | struct vm_struct *vmlist; | |
26 | ||
b221385b AB |
27 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
28 | int node); | |
29 | ||
1da177e4 LT |
30 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
31 | { | |
32 | pte_t *pte; | |
33 | ||
34 | pte = pte_offset_kernel(pmd, addr); | |
35 | do { | |
36 | pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); | |
37 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | |
38 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
39 | } | |
40 | ||
41 | static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, | |
42 | unsigned long end) | |
43 | { | |
44 | pmd_t *pmd; | |
45 | unsigned long next; | |
46 | ||
47 | pmd = pmd_offset(pud, addr); | |
48 | do { | |
49 | next = pmd_addr_end(addr, end); | |
50 | if (pmd_none_or_clear_bad(pmd)) | |
51 | continue; | |
52 | vunmap_pte_range(pmd, addr, next); | |
53 | } while (pmd++, addr = next, addr != end); | |
54 | } | |
55 | ||
56 | static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, | |
57 | unsigned long end) | |
58 | { | |
59 | pud_t *pud; | |
60 | unsigned long next; | |
61 | ||
62 | pud = pud_offset(pgd, addr); | |
63 | do { | |
64 | next = pud_addr_end(addr, end); | |
65 | if (pud_none_or_clear_bad(pud)) | |
66 | continue; | |
67 | vunmap_pmd_range(pud, addr, next); | |
68 | } while (pud++, addr = next, addr != end); | |
69 | } | |
70 | ||
c19c03fc | 71 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1da177e4 LT |
72 | { |
73 | pgd_t *pgd; | |
74 | unsigned long next; | |
c19c03fc BH |
75 | unsigned long start = addr; |
76 | unsigned long end = addr + size; | |
1da177e4 LT |
77 | |
78 | BUG_ON(addr >= end); | |
79 | pgd = pgd_offset_k(addr); | |
80 | flush_cache_vunmap(addr, end); | |
81 | do { | |
82 | next = pgd_addr_end(addr, end); | |
83 | if (pgd_none_or_clear_bad(pgd)) | |
84 | continue; | |
85 | vunmap_pud_range(pgd, addr, next); | |
86 | } while (pgd++, addr = next, addr != end); | |
c19c03fc BH |
87 | flush_tlb_kernel_range(start, end); |
88 | } | |
89 | ||
90 | static void unmap_vm_area(struct vm_struct *area) | |
91 | { | |
92 | unmap_kernel_range((unsigned long)area->addr, area->size); | |
1da177e4 LT |
93 | } |
94 | ||
95 | static int vmap_pte_range(pmd_t *pmd, unsigned long addr, | |
96 | unsigned long end, pgprot_t prot, struct page ***pages) | |
97 | { | |
98 | pte_t *pte; | |
99 | ||
872fec16 | 100 | pte = pte_alloc_kernel(pmd, addr); |
1da177e4 LT |
101 | if (!pte) |
102 | return -ENOMEM; | |
103 | do { | |
104 | struct page *page = **pages; | |
105 | WARN_ON(!pte_none(*pte)); | |
106 | if (!page) | |
107 | return -ENOMEM; | |
108 | set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); | |
109 | (*pages)++; | |
110 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
111 | return 0; | |
112 | } | |
113 | ||
114 | static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, | |
115 | unsigned long end, pgprot_t prot, struct page ***pages) | |
116 | { | |
117 | pmd_t *pmd; | |
118 | unsigned long next; | |
119 | ||
120 | pmd = pmd_alloc(&init_mm, pud, addr); | |
121 | if (!pmd) | |
122 | return -ENOMEM; | |
123 | do { | |
124 | next = pmd_addr_end(addr, end); | |
125 | if (vmap_pte_range(pmd, addr, next, prot, pages)) | |
126 | return -ENOMEM; | |
127 | } while (pmd++, addr = next, addr != end); | |
128 | return 0; | |
129 | } | |
130 | ||
131 | static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |
132 | unsigned long end, pgprot_t prot, struct page ***pages) | |
133 | { | |
134 | pud_t *pud; | |
135 | unsigned long next; | |
136 | ||
137 | pud = pud_alloc(&init_mm, pgd, addr); | |
138 | if (!pud) | |
139 | return -ENOMEM; | |
140 | do { | |
141 | next = pud_addr_end(addr, end); | |
142 | if (vmap_pmd_range(pud, addr, next, prot, pages)) | |
143 | return -ENOMEM; | |
144 | } while (pud++, addr = next, addr != end); | |
145 | return 0; | |
146 | } | |
147 | ||
148 | int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) | |
149 | { | |
150 | pgd_t *pgd; | |
151 | unsigned long next; | |
152 | unsigned long addr = (unsigned long) area->addr; | |
153 | unsigned long end = addr + area->size - PAGE_SIZE; | |
154 | int err; | |
155 | ||
156 | BUG_ON(addr >= end); | |
157 | pgd = pgd_offset_k(addr); | |
1da177e4 LT |
158 | do { |
159 | next = pgd_addr_end(addr, end); | |
160 | err = vmap_pud_range(pgd, addr, next, prot, pages); | |
161 | if (err) | |
162 | break; | |
163 | } while (pgd++, addr = next, addr != end); | |
1da177e4 LT |
164 | flush_cache_vmap((unsigned long) area->addr, end); |
165 | return err; | |
166 | } | |
5992b6da | 167 | EXPORT_SYMBOL_GPL(map_vm_area); |
1da177e4 | 168 | |
48667e7a CL |
169 | /* |
170 | * Map a vmalloc()-space virtual address to the physical page. | |
171 | */ | |
b3bdda02 | 172 | struct page *vmalloc_to_page(const void *vmalloc_addr) |
48667e7a CL |
173 | { |
174 | unsigned long addr = (unsigned long) vmalloc_addr; | |
175 | struct page *page = NULL; | |
176 | pgd_t *pgd = pgd_offset_k(addr); | |
177 | pud_t *pud; | |
178 | pmd_t *pmd; | |
179 | pte_t *ptep, pte; | |
180 | ||
181 | if (!pgd_none(*pgd)) { | |
182 | pud = pud_offset(pgd, addr); | |
183 | if (!pud_none(*pud)) { | |
184 | pmd = pmd_offset(pud, addr); | |
185 | if (!pmd_none(*pmd)) { | |
186 | ptep = pte_offset_map(pmd, addr); | |
187 | pte = *ptep; | |
188 | if (pte_present(pte)) | |
189 | page = pte_page(pte); | |
190 | pte_unmap(ptep); | |
191 | } | |
192 | } | |
193 | } | |
194 | return page; | |
195 | } | |
196 | EXPORT_SYMBOL(vmalloc_to_page); | |
197 | ||
198 | /* | |
199 | * Map a vmalloc()-space virtual address to the physical page frame number. | |
200 | */ | |
b3bdda02 | 201 | unsigned long vmalloc_to_pfn(const void *vmalloc_addr) |
48667e7a CL |
202 | { |
203 | return page_to_pfn(vmalloc_to_page(vmalloc_addr)); | |
204 | } | |
205 | EXPORT_SYMBOL(vmalloc_to_pfn); | |
206 | ||
52fd24ca GP |
207 | static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, |
208 | unsigned long start, unsigned long end, | |
209 | int node, gfp_t gfp_mask) | |
1da177e4 LT |
210 | { |
211 | struct vm_struct **p, *tmp, *area; | |
212 | unsigned long align = 1; | |
213 | unsigned long addr; | |
214 | ||
52fd24ca | 215 | BUG_ON(in_interrupt()); |
1da177e4 LT |
216 | if (flags & VM_IOREMAP) { |
217 | int bit = fls(size); | |
218 | ||
219 | if (bit > IOREMAP_MAX_ORDER) | |
220 | bit = IOREMAP_MAX_ORDER; | |
221 | else if (bit < PAGE_SHIFT) | |
222 | bit = PAGE_SHIFT; | |
223 | ||
224 | align = 1ul << bit; | |
225 | } | |
226 | addr = ALIGN(start, align); | |
227 | size = PAGE_ALIGN(size); | |
31be8309 OH |
228 | if (unlikely(!size)) |
229 | return NULL; | |
1da177e4 | 230 | |
6cb06229 CL |
231 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
232 | ||
1da177e4 LT |
233 | if (unlikely(!area)) |
234 | return NULL; | |
235 | ||
1da177e4 LT |
236 | /* |
237 | * We always allocate a guard page. | |
238 | */ | |
239 | size += PAGE_SIZE; | |
240 | ||
241 | write_lock(&vmlist_lock); | |
242 | for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { | |
243 | if ((unsigned long)tmp->addr < addr) { | |
244 | if((unsigned long)tmp->addr + tmp->size >= addr) | |
245 | addr = ALIGN(tmp->size + | |
246 | (unsigned long)tmp->addr, align); | |
247 | continue; | |
248 | } | |
249 | if ((size + addr) < addr) | |
250 | goto out; | |
251 | if (size + addr <= (unsigned long)tmp->addr) | |
252 | goto found; | |
253 | addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); | |
254 | if (addr > end - size) | |
255 | goto out; | |
256 | } | |
257 | ||
258 | found: | |
259 | area->next = *p; | |
260 | *p = area; | |
261 | ||
262 | area->flags = flags; | |
263 | area->addr = (void *)addr; | |
264 | area->size = size; | |
265 | area->pages = NULL; | |
266 | area->nr_pages = 0; | |
267 | area->phys_addr = 0; | |
268 | write_unlock(&vmlist_lock); | |
269 | ||
270 | return area; | |
271 | ||
272 | out: | |
273 | write_unlock(&vmlist_lock); | |
274 | kfree(area); | |
275 | if (printk_ratelimit()) | |
276 | printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); | |
277 | return NULL; | |
278 | } | |
279 | ||
930fc45a CL |
280 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
281 | unsigned long start, unsigned long end) | |
282 | { | |
52fd24ca | 283 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); |
930fc45a | 284 | } |
5992b6da | 285 | EXPORT_SYMBOL_GPL(__get_vm_area); |
930fc45a | 286 | |
1da177e4 | 287 | /** |
183ff22b | 288 | * get_vm_area - reserve a contiguous kernel virtual area |
1da177e4 LT |
289 | * @size: size of the area |
290 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | |
291 | * | |
292 | * Search an area of @size in the kernel virtual mapping area, | |
293 | * and reserved it for out purposes. Returns the area descriptor | |
294 | * on success or %NULL on failure. | |
295 | */ | |
296 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | |
297 | { | |
298 | return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); | |
299 | } | |
300 | ||
52fd24ca GP |
301 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, |
302 | int node, gfp_t gfp_mask) | |
930fc45a | 303 | { |
52fd24ca GP |
304 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, |
305 | gfp_mask); | |
930fc45a CL |
306 | } |
307 | ||
83342314 | 308 | /* Caller must hold vmlist_lock */ |
b3bdda02 | 309 | static struct vm_struct *__find_vm_area(const void *addr) |
83342314 NP |
310 | { |
311 | struct vm_struct *tmp; | |
312 | ||
313 | for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { | |
314 | if (tmp->addr == addr) | |
315 | break; | |
316 | } | |
317 | ||
318 | return tmp; | |
319 | } | |
320 | ||
7856dfeb | 321 | /* Caller must hold vmlist_lock */ |
b3bdda02 | 322 | static struct vm_struct *__remove_vm_area(const void *addr) |
1da177e4 LT |
323 | { |
324 | struct vm_struct **p, *tmp; | |
325 | ||
1da177e4 LT |
326 | for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { |
327 | if (tmp->addr == addr) | |
328 | goto found; | |
329 | } | |
1da177e4 LT |
330 | return NULL; |
331 | ||
332 | found: | |
333 | unmap_vm_area(tmp); | |
334 | *p = tmp->next; | |
1da177e4 LT |
335 | |
336 | /* | |
337 | * Remove the guard page. | |
338 | */ | |
339 | tmp->size -= PAGE_SIZE; | |
340 | return tmp; | |
341 | } | |
342 | ||
7856dfeb | 343 | /** |
183ff22b | 344 | * remove_vm_area - find and remove a continuous kernel virtual area |
7856dfeb AK |
345 | * @addr: base address |
346 | * | |
347 | * Search for the kernel VM area starting at @addr, and remove it. | |
348 | * This function returns the found VM area, but using it is NOT safe | |
349 | * on SMP machines, except for its size or flags. | |
350 | */ | |
b3bdda02 | 351 | struct vm_struct *remove_vm_area(const void *addr) |
7856dfeb AK |
352 | { |
353 | struct vm_struct *v; | |
354 | write_lock(&vmlist_lock); | |
355 | v = __remove_vm_area(addr); | |
356 | write_unlock(&vmlist_lock); | |
357 | return v; | |
358 | } | |
359 | ||
b3bdda02 | 360 | static void __vunmap(const void *addr, int deallocate_pages) |
1da177e4 LT |
361 | { |
362 | struct vm_struct *area; | |
363 | ||
364 | if (!addr) | |
365 | return; | |
366 | ||
367 | if ((PAGE_SIZE-1) & (unsigned long)addr) { | |
368 | printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); | |
369 | WARN_ON(1); | |
370 | return; | |
371 | } | |
372 | ||
373 | area = remove_vm_area(addr); | |
374 | if (unlikely(!area)) { | |
375 | printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", | |
376 | addr); | |
377 | WARN_ON(1); | |
378 | return; | |
379 | } | |
380 | ||
9a11b49a IM |
381 | debug_check_no_locks_freed(addr, area->size); |
382 | ||
1da177e4 LT |
383 | if (deallocate_pages) { |
384 | int i; | |
385 | ||
386 | for (i = 0; i < area->nr_pages; i++) { | |
bf53d6f8 CL |
387 | struct page *page = area->pages[i]; |
388 | ||
389 | BUG_ON(!page); | |
390 | __free_page(page); | |
1da177e4 LT |
391 | } |
392 | ||
8757d5fa | 393 | if (area->flags & VM_VPAGES) |
1da177e4 LT |
394 | vfree(area->pages); |
395 | else | |
396 | kfree(area->pages); | |
397 | } | |
398 | ||
399 | kfree(area); | |
400 | return; | |
401 | } | |
402 | ||
403 | /** | |
404 | * vfree - release memory allocated by vmalloc() | |
1da177e4 LT |
405 | * @addr: memory base address |
406 | * | |
183ff22b | 407 | * Free the virtually continuous memory area starting at @addr, as |
80e93eff PE |
408 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
409 | * NULL, no operation is performed. | |
1da177e4 | 410 | * |
80e93eff | 411 | * Must not be called in interrupt context. |
1da177e4 | 412 | */ |
b3bdda02 | 413 | void vfree(const void *addr) |
1da177e4 LT |
414 | { |
415 | BUG_ON(in_interrupt()); | |
416 | __vunmap(addr, 1); | |
417 | } | |
1da177e4 LT |
418 | EXPORT_SYMBOL(vfree); |
419 | ||
420 | /** | |
421 | * vunmap - release virtual mapping obtained by vmap() | |
1da177e4 LT |
422 | * @addr: memory base address |
423 | * | |
424 | * Free the virtually contiguous memory area starting at @addr, | |
425 | * which was created from the page array passed to vmap(). | |
426 | * | |
80e93eff | 427 | * Must not be called in interrupt context. |
1da177e4 | 428 | */ |
b3bdda02 | 429 | void vunmap(const void *addr) |
1da177e4 LT |
430 | { |
431 | BUG_ON(in_interrupt()); | |
432 | __vunmap(addr, 0); | |
433 | } | |
1da177e4 LT |
434 | EXPORT_SYMBOL(vunmap); |
435 | ||
436 | /** | |
437 | * vmap - map an array of pages into virtually contiguous space | |
1da177e4 LT |
438 | * @pages: array of page pointers |
439 | * @count: number of pages to map | |
440 | * @flags: vm_area->flags | |
441 | * @prot: page protection for the mapping | |
442 | * | |
443 | * Maps @count pages from @pages into contiguous kernel virtual | |
444 | * space. | |
445 | */ | |
446 | void *vmap(struct page **pages, unsigned int count, | |
447 | unsigned long flags, pgprot_t prot) | |
448 | { | |
449 | struct vm_struct *area; | |
450 | ||
451 | if (count > num_physpages) | |
452 | return NULL; | |
453 | ||
454 | area = get_vm_area((count << PAGE_SHIFT), flags); | |
455 | if (!area) | |
456 | return NULL; | |
457 | if (map_vm_area(area, prot, &pages)) { | |
458 | vunmap(area->addr); | |
459 | return NULL; | |
460 | } | |
461 | ||
462 | return area->addr; | |
463 | } | |
1da177e4 LT |
464 | EXPORT_SYMBOL(vmap); |
465 | ||
930fc45a CL |
466 | void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
467 | pgprot_t prot, int node) | |
1da177e4 LT |
468 | { |
469 | struct page **pages; | |
470 | unsigned int nr_pages, array_size, i; | |
471 | ||
472 | nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; | |
473 | array_size = (nr_pages * sizeof(struct page *)); | |
474 | ||
475 | area->nr_pages = nr_pages; | |
476 | /* Please note that the recursion is strictly bounded. */ | |
8757d5fa | 477 | if (array_size > PAGE_SIZE) { |
94f6030c CL |
478 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, |
479 | PAGE_KERNEL, node); | |
8757d5fa | 480 | area->flags |= VM_VPAGES; |
286e1ea3 AM |
481 | } else { |
482 | pages = kmalloc_node(array_size, | |
6cb06229 | 483 | (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, |
286e1ea3 AM |
484 | node); |
485 | } | |
1da177e4 LT |
486 | area->pages = pages; |
487 | if (!area->pages) { | |
488 | remove_vm_area(area->addr); | |
489 | kfree(area); | |
490 | return NULL; | |
491 | } | |
1da177e4 LT |
492 | |
493 | for (i = 0; i < area->nr_pages; i++) { | |
bf53d6f8 CL |
494 | struct page *page; |
495 | ||
930fc45a | 496 | if (node < 0) |
bf53d6f8 | 497 | page = alloc_page(gfp_mask); |
930fc45a | 498 | else |
bf53d6f8 CL |
499 | page = alloc_pages_node(node, gfp_mask, 0); |
500 | ||
501 | if (unlikely(!page)) { | |
1da177e4 LT |
502 | /* Successfully allocated i pages, free them in __vunmap() */ |
503 | area->nr_pages = i; | |
504 | goto fail; | |
505 | } | |
bf53d6f8 | 506 | area->pages[i] = page; |
1da177e4 LT |
507 | } |
508 | ||
509 | if (map_vm_area(area, prot, &pages)) | |
510 | goto fail; | |
511 | return area->addr; | |
512 | ||
513 | fail: | |
514 | vfree(area->addr); | |
515 | return NULL; | |
516 | } | |
517 | ||
930fc45a CL |
518 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
519 | { | |
520 | return __vmalloc_area_node(area, gfp_mask, prot, -1); | |
521 | } | |
522 | ||
1da177e4 | 523 | /** |
930fc45a | 524 | * __vmalloc_node - allocate virtually contiguous memory |
1da177e4 LT |
525 | * @size: allocation size |
526 | * @gfp_mask: flags for the page level allocator | |
527 | * @prot: protection mask for the allocated pages | |
d44e0780 | 528 | * @node: node to use for allocation or -1 |
1da177e4 LT |
529 | * |
530 | * Allocate enough pages to cover @size from the page level | |
531 | * allocator with @gfp_mask flags. Map them into contiguous | |
532 | * kernel virtual space, using a pagetable protection of @prot. | |
533 | */ | |
b221385b AB |
534 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
535 | int node) | |
1da177e4 LT |
536 | { |
537 | struct vm_struct *area; | |
538 | ||
539 | size = PAGE_ALIGN(size); | |
540 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | |
541 | return NULL; | |
542 | ||
52fd24ca | 543 | area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); |
1da177e4 LT |
544 | if (!area) |
545 | return NULL; | |
546 | ||
930fc45a | 547 | return __vmalloc_area_node(area, gfp_mask, prot, node); |
1da177e4 LT |
548 | } |
549 | ||
930fc45a CL |
550 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
551 | { | |
552 | return __vmalloc_node(size, gfp_mask, prot, -1); | |
553 | } | |
1da177e4 LT |
554 | EXPORT_SYMBOL(__vmalloc); |
555 | ||
556 | /** | |
557 | * vmalloc - allocate virtually contiguous memory | |
1da177e4 | 558 | * @size: allocation size |
1da177e4 LT |
559 | * Allocate enough pages to cover @size from the page level |
560 | * allocator and map them into contiguous kernel virtual space. | |
561 | * | |
c1c8897f | 562 | * For tight control over page level allocator and protection flags |
1da177e4 LT |
563 | * use __vmalloc() instead. |
564 | */ | |
565 | void *vmalloc(unsigned long size) | |
566 | { | |
83342314 | 567 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
1da177e4 | 568 | } |
1da177e4 LT |
569 | EXPORT_SYMBOL(vmalloc); |
570 | ||
83342314 | 571 | /** |
ead04089 REB |
572 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
573 | * @size: allocation size | |
83342314 | 574 | * |
ead04089 REB |
575 | * The resulting memory area is zeroed so it can be mapped to userspace |
576 | * without leaking data. | |
83342314 NP |
577 | */ |
578 | void *vmalloc_user(unsigned long size) | |
579 | { | |
580 | struct vm_struct *area; | |
581 | void *ret; | |
582 | ||
583 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | |
2b4ac44e ED |
584 | if (ret) { |
585 | write_lock(&vmlist_lock); | |
586 | area = __find_vm_area(ret); | |
587 | area->flags |= VM_USERMAP; | |
588 | write_unlock(&vmlist_lock); | |
589 | } | |
83342314 NP |
590 | return ret; |
591 | } | |
592 | EXPORT_SYMBOL(vmalloc_user); | |
593 | ||
930fc45a CL |
594 | /** |
595 | * vmalloc_node - allocate memory on a specific node | |
930fc45a | 596 | * @size: allocation size |
d44e0780 | 597 | * @node: numa node |
930fc45a CL |
598 | * |
599 | * Allocate enough pages to cover @size from the page level | |
600 | * allocator and map them into contiguous kernel virtual space. | |
601 | * | |
c1c8897f | 602 | * For tight control over page level allocator and protection flags |
930fc45a CL |
603 | * use __vmalloc() instead. |
604 | */ | |
605 | void *vmalloc_node(unsigned long size, int node) | |
606 | { | |
83342314 | 607 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); |
930fc45a CL |
608 | } |
609 | EXPORT_SYMBOL(vmalloc_node); | |
610 | ||
4dc3b16b PP |
611 | #ifndef PAGE_KERNEL_EXEC |
612 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | |
613 | #endif | |
614 | ||
1da177e4 LT |
615 | /** |
616 | * vmalloc_exec - allocate virtually contiguous, executable memory | |
1da177e4 LT |
617 | * @size: allocation size |
618 | * | |
619 | * Kernel-internal function to allocate enough pages to cover @size | |
620 | * the page level allocator and map them into contiguous and | |
621 | * executable kernel virtual space. | |
622 | * | |
c1c8897f | 623 | * For tight control over page level allocator and protection flags |
1da177e4 LT |
624 | * use __vmalloc() instead. |
625 | */ | |
626 | ||
1da177e4 LT |
627 | void *vmalloc_exec(unsigned long size) |
628 | { | |
629 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | |
630 | } | |
631 | ||
0d08e0d3 | 632 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
7ac674f5 | 633 | #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL |
0d08e0d3 | 634 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) |
7ac674f5 | 635 | #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL |
0d08e0d3 AK |
636 | #else |
637 | #define GFP_VMALLOC32 GFP_KERNEL | |
638 | #endif | |
639 | ||
1da177e4 LT |
640 | /** |
641 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | |
1da177e4 LT |
642 | * @size: allocation size |
643 | * | |
644 | * Allocate enough 32bit PA addressable pages to cover @size from the | |
645 | * page level allocator and map them into contiguous kernel virtual space. | |
646 | */ | |
647 | void *vmalloc_32(unsigned long size) | |
648 | { | |
0d08e0d3 | 649 | return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); |
1da177e4 | 650 | } |
1da177e4 LT |
651 | EXPORT_SYMBOL(vmalloc_32); |
652 | ||
83342314 | 653 | /** |
ead04089 | 654 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
83342314 | 655 | * @size: allocation size |
ead04089 REB |
656 | * |
657 | * The resulting memory area is 32bit addressable and zeroed so it can be | |
658 | * mapped to userspace without leaking data. | |
83342314 NP |
659 | */ |
660 | void *vmalloc_32_user(unsigned long size) | |
661 | { | |
662 | struct vm_struct *area; | |
663 | void *ret; | |
664 | ||
0d08e0d3 | 665 | ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); |
2b4ac44e ED |
666 | if (ret) { |
667 | write_lock(&vmlist_lock); | |
668 | area = __find_vm_area(ret); | |
669 | area->flags |= VM_USERMAP; | |
670 | write_unlock(&vmlist_lock); | |
671 | } | |
83342314 NP |
672 | return ret; |
673 | } | |
674 | EXPORT_SYMBOL(vmalloc_32_user); | |
675 | ||
1da177e4 LT |
676 | long vread(char *buf, char *addr, unsigned long count) |
677 | { | |
678 | struct vm_struct *tmp; | |
679 | char *vaddr, *buf_start = buf; | |
680 | unsigned long n; | |
681 | ||
682 | /* Don't allow overflow */ | |
683 | if ((unsigned long) addr + count < count) | |
684 | count = -(unsigned long) addr; | |
685 | ||
686 | read_lock(&vmlist_lock); | |
687 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
688 | vaddr = (char *) tmp->addr; | |
689 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
690 | continue; | |
691 | while (addr < vaddr) { | |
692 | if (count == 0) | |
693 | goto finished; | |
694 | *buf = '\0'; | |
695 | buf++; | |
696 | addr++; | |
697 | count--; | |
698 | } | |
699 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
700 | do { | |
701 | if (count == 0) | |
702 | goto finished; | |
703 | *buf = *addr; | |
704 | buf++; | |
705 | addr++; | |
706 | count--; | |
707 | } while (--n > 0); | |
708 | } | |
709 | finished: | |
710 | read_unlock(&vmlist_lock); | |
711 | return buf - buf_start; | |
712 | } | |
713 | ||
714 | long vwrite(char *buf, char *addr, unsigned long count) | |
715 | { | |
716 | struct vm_struct *tmp; | |
717 | char *vaddr, *buf_start = buf; | |
718 | unsigned long n; | |
719 | ||
720 | /* Don't allow overflow */ | |
721 | if ((unsigned long) addr + count < count) | |
722 | count = -(unsigned long) addr; | |
723 | ||
724 | read_lock(&vmlist_lock); | |
725 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
726 | vaddr = (char *) tmp->addr; | |
727 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
728 | continue; | |
729 | while (addr < vaddr) { | |
730 | if (count == 0) | |
731 | goto finished; | |
732 | buf++; | |
733 | addr++; | |
734 | count--; | |
735 | } | |
736 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
737 | do { | |
738 | if (count == 0) | |
739 | goto finished; | |
740 | *addr = *buf; | |
741 | buf++; | |
742 | addr++; | |
743 | count--; | |
744 | } while (--n > 0); | |
745 | } | |
746 | finished: | |
747 | read_unlock(&vmlist_lock); | |
748 | return buf - buf_start; | |
749 | } | |
83342314 NP |
750 | |
751 | /** | |
752 | * remap_vmalloc_range - map vmalloc pages to userspace | |
83342314 NP |
753 | * @vma: vma to cover (map full range of vma) |
754 | * @addr: vmalloc memory | |
755 | * @pgoff: number of pages into addr before first page to map | |
756 | * @returns: 0 for success, -Exxx on failure | |
757 | * | |
758 | * This function checks that addr is a valid vmalloc'ed area, and | |
759 | * that it is big enough to cover the vma. Will return failure if | |
760 | * that criteria isn't met. | |
761 | * | |
72fd4a35 | 762 | * Similar to remap_pfn_range() (see mm/memory.c) |
83342314 NP |
763 | */ |
764 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |
765 | unsigned long pgoff) | |
766 | { | |
767 | struct vm_struct *area; | |
768 | unsigned long uaddr = vma->vm_start; | |
769 | unsigned long usize = vma->vm_end - vma->vm_start; | |
770 | int ret; | |
771 | ||
772 | if ((PAGE_SIZE-1) & (unsigned long)addr) | |
773 | return -EINVAL; | |
774 | ||
775 | read_lock(&vmlist_lock); | |
776 | area = __find_vm_area(addr); | |
777 | if (!area) | |
778 | goto out_einval_locked; | |
779 | ||
780 | if (!(area->flags & VM_USERMAP)) | |
781 | goto out_einval_locked; | |
782 | ||
783 | if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | |
784 | goto out_einval_locked; | |
785 | read_unlock(&vmlist_lock); | |
786 | ||
787 | addr += pgoff << PAGE_SHIFT; | |
788 | do { | |
789 | struct page *page = vmalloc_to_page(addr); | |
790 | ret = vm_insert_page(vma, uaddr, page); | |
791 | if (ret) | |
792 | return ret; | |
793 | ||
794 | uaddr += PAGE_SIZE; | |
795 | addr += PAGE_SIZE; | |
796 | usize -= PAGE_SIZE; | |
797 | } while (usize > 0); | |
798 | ||
799 | /* Prevent "things" like memory migration? VM_flags need a cleanup... */ | |
800 | vma->vm_flags |= VM_RESERVED; | |
801 | ||
802 | return ret; | |
803 | ||
804 | out_einval_locked: | |
805 | read_unlock(&vmlist_lock); | |
806 | return -EINVAL; | |
807 | } | |
808 | EXPORT_SYMBOL(remap_vmalloc_range); | |
809 | ||
1eeb66a1 CH |
810 | /* |
811 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to | |
812 | * have one. | |
813 | */ | |
814 | void __attribute__((weak)) vmalloc_sync_all(void) | |
815 | { | |
816 | } | |
5f4352fb JF |
817 | |
818 | ||
819 | static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) | |
820 | { | |
821 | /* apply_to_page_range() does all the hard work. */ | |
822 | return 0; | |
823 | } | |
824 | ||
825 | /** | |
826 | * alloc_vm_area - allocate a range of kernel address space | |
827 | * @size: size of the area | |
828 | * @returns: NULL on failure, vm_struct on success | |
829 | * | |
830 | * This function reserves a range of kernel address space, and | |
831 | * allocates pagetables to map that range. No actual mappings | |
832 | * are created. If the kernel address space is not shared | |
833 | * between processes, it syncs the pagetable across all | |
834 | * processes. | |
835 | */ | |
836 | struct vm_struct *alloc_vm_area(size_t size) | |
837 | { | |
838 | struct vm_struct *area; | |
839 | ||
840 | area = get_vm_area(size, VM_IOREMAP); | |
841 | if (area == NULL) | |
842 | return NULL; | |
843 | ||
844 | /* | |
845 | * This ensures that page tables are constructed for this region | |
846 | * of kernel virtual address space and mapped into init_mm. | |
847 | */ | |
848 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | |
849 | area->size, f, NULL)) { | |
850 | free_vm_area(area); | |
851 | return NULL; | |
852 | } | |
853 | ||
854 | /* Make sure the pagetables are constructed in process kernel | |
855 | mappings */ | |
856 | vmalloc_sync_all(); | |
857 | ||
858 | return area; | |
859 | } | |
860 | EXPORT_SYMBOL_GPL(alloc_vm_area); | |
861 | ||
862 | void free_vm_area(struct vm_struct *area) | |
863 | { | |
864 | struct vm_struct *ret; | |
865 | ret = remove_vm_area(area->addr); | |
866 | BUG_ON(ret != area); | |
867 | kfree(area); | |
868 | } | |
869 | EXPORT_SYMBOL_GPL(free_vm_area); |