]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/nommu.c
mm/migrate.c: fix potential indeterminate pte entry in migrate_vma_insert_page()
[mirror_ubuntu-focal-kernel.git] / mm / nommu.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/nommu.c
4 *
5 * Replacement code for mm functions to support CPU's that don't
6 * have any form of memory management unit (thus no virtual memory).
7 *
8 * See Documentation/nommu-mmap.txt
9 *
8feae131 10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
1da177e4
LT
11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
29c185e5 14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
1da177e4
LT
15 */
16
b1de0d13
MH
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
b95f1b31 19#include <linux/export.h>
1da177e4 20#include <linux/mm.h>
6e84f315 21#include <linux/sched/mm.h>
615d6e87 22#include <linux/vmacache.h>
1da177e4
LT
23#include <linux/mman.h>
24#include <linux/swap.h>
25#include <linux/file.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/slab.h>
29#include <linux/vmalloc.h>
1da177e4
LT
30#include <linux/blkdev.h>
31#include <linux/backing-dev.h>
3b32123d 32#include <linux/compiler.h>
1da177e4
LT
33#include <linux/mount.h>
34#include <linux/personality.h>
35#include <linux/security.h>
36#include <linux/syscalls.h>
120a795d 37#include <linux/audit.h>
b1de0d13 38#include <linux/printk.h>
1da177e4 39
7c0f6ba6 40#include <linux/uaccess.h>
1da177e4
LT
41#include <asm/tlb.h>
42#include <asm/tlbflush.h>
eb8cdec4 43#include <asm/mmu_context.h>
8feae131
DH
44#include "internal.h"
45
1da177e4 46void *high_memory;
944b6874 47EXPORT_SYMBOL(high_memory);
1da177e4
LT
48struct page *mem_map;
49unsigned long max_mapnr;
5b8bf307 50EXPORT_SYMBOL(max_mapnr);
4266c97a 51unsigned long highest_memmap_pfn;
fc4d5c29 52int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
1da177e4
LT
53int heap_stack_gap = 0;
54
33e5d769 55atomic_long_t mmap_pages_allocated;
8feae131 56
1da177e4 57EXPORT_SYMBOL(mem_map);
1da177e4 58
8feae131
DH
59/* list of mapped, potentially shareable regions */
60static struct kmem_cache *vm_region_jar;
61struct rb_root nommu_region_tree = RB_ROOT;
62DECLARE_RWSEM(nommu_region_sem);
1da177e4 63
f0f37e2f 64const struct vm_operations_struct generic_file_vm_ops = {
1da177e4
LT
65};
66
1da177e4
LT
67/*
68 * Return the total memory allocated for this pointer, not
69 * just what the caller asked for.
70 *
71 * Doesn't have to be accurate, i.e. may have races.
72 */
73unsigned int kobjsize(const void *objp)
74{
75 struct page *page;
76
4016a139
MH
77 /*
78 * If the object we have should not have ksize performed on it,
79 * return size of 0
80 */
5a1603be 81 if (!objp || !virt_addr_valid(objp))
6cfd53fc
PM
82 return 0;
83
84 page = virt_to_head_page(objp);
6cfd53fc
PM
85
86 /*
87 * If the allocator sets PageSlab, we know the pointer came from
88 * kmalloc().
89 */
1da177e4
LT
90 if (PageSlab(page))
91 return ksize(objp);
92
ab2e83ea
PM
93 /*
94 * If it's not a compound page, see if we have a matching VMA
95 * region. This test is intentionally done in reverse order,
96 * so if there's no VMA, we still fall through and hand back
97 * PAGE_SIZE for 0-order pages.
98 */
99 if (!PageCompound(page)) {
100 struct vm_area_struct *vma;
101
102 vma = find_vma(current->mm, (unsigned long)objp);
103 if (vma)
104 return vma->vm_end - vma->vm_start;
105 }
106
6cfd53fc
PM
107 /*
108 * The ksize() function is only guaranteed to work for pointers
5a1603be 109 * returned by kmalloc(). So handle arbitrary pointers here.
6cfd53fc 110 */
a50b854e 111 return page_size(page);
1da177e4
LT
112}
113
dfc2f91a
PM
114/**
115 * follow_pfn - look up PFN at a user virtual address
116 * @vma: memory mapping
117 * @address: user virtual address
118 * @pfn: location to store found PFN
119 *
120 * Only IO mappings and raw PFN mappings are allowed.
121 *
122 * Returns zero and the pfn at @pfn on success, -ve otherwise.
123 */
124int follow_pfn(struct vm_area_struct *vma, unsigned long address,
125 unsigned long *pfn)
126{
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
128 return -EINVAL;
129
130 *pfn = address >> PAGE_SHIFT;
131 return 0;
132}
133EXPORT_SYMBOL(follow_pfn);
134
f1c4069e 135LIST_HEAD(vmap_area_list);
1da177e4 136
b3bdda02 137void vfree(const void *addr)
1da177e4
LT
138{
139 kfree(addr);
140}
b5073173 141EXPORT_SYMBOL(vfree);
1da177e4 142
dd0fc66f 143void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1da177e4
LT
144{
145 /*
8518609d
RD
146 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
147 * returns only a logical address.
1da177e4 148 */
84097518 149 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
1da177e4 150}
b5073173 151EXPORT_SYMBOL(__vmalloc);
1da177e4 152
a7c3e901
MH
153void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
154{
155 return __vmalloc(size, flags, PAGE_KERNEL);
156}
157
f905bc44
PM
158void *vmalloc_user(unsigned long size)
159{
160 void *ret;
161
19809c2d 162 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
f905bc44
PM
163 if (ret) {
164 struct vm_area_struct *vma;
165
166 down_write(&current->mm->mmap_sem);
167 vma = find_vma(current->mm, (unsigned long)ret);
168 if (vma)
169 vma->vm_flags |= VM_USERMAP;
170 up_write(&current->mm->mmap_sem);
171 }
172
173 return ret;
174}
175EXPORT_SYMBOL(vmalloc_user);
176
b3bdda02 177struct page *vmalloc_to_page(const void *addr)
1da177e4
LT
178{
179 return virt_to_page(addr);
180}
b5073173 181EXPORT_SYMBOL(vmalloc_to_page);
1da177e4 182
b3bdda02 183unsigned long vmalloc_to_pfn(const void *addr)
1da177e4
LT
184{
185 return page_to_pfn(virt_to_page(addr));
186}
b5073173 187EXPORT_SYMBOL(vmalloc_to_pfn);
1da177e4
LT
188
189long vread(char *buf, char *addr, unsigned long count)
190{
9bde916b
CG
191 /* Don't allow overflow */
192 if ((unsigned long) buf + count < count)
193 count = -(unsigned long) buf;
194
1da177e4
LT
195 memcpy(buf, addr, count);
196 return count;
197}
198
199long vwrite(char *buf, char *addr, unsigned long count)
200{
201 /* Don't allow overflow */
202 if ((unsigned long) addr + count < count)
203 count = -(unsigned long) addr;
204
205 memcpy(addr, buf, count);
ac714904 206 return count;
1da177e4
LT
207}
208
209/*
e1c05067 210 * vmalloc - allocate virtually contiguous memory
1da177e4
LT
211 *
212 * @size: allocation size
213 *
214 * Allocate enough pages to cover @size from the page level
e1c05067 215 * allocator and map them into contiguous kernel virtual space.
1da177e4 216 *
c1c8897f 217 * For tight control over page level allocator and protection flags
1da177e4
LT
218 * use __vmalloc() instead.
219 */
220void *vmalloc(unsigned long size)
221{
222 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
223}
f6138882
AM
224EXPORT_SYMBOL(vmalloc);
225
e1ca7788 226/*
e1c05067 227 * vzalloc - allocate virtually contiguous memory with zero fill
e1ca7788
DY
228 *
229 * @size: allocation size
230 *
231 * Allocate enough pages to cover @size from the page level
e1c05067 232 * allocator and map them into contiguous kernel virtual space.
e1ca7788
DY
233 * The memory allocated is set to zero.
234 *
235 * For tight control over page level allocator and protection flags
236 * use __vmalloc() instead.
237 */
238void *vzalloc(unsigned long size)
239{
240 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
241 PAGE_KERNEL);
242}
243EXPORT_SYMBOL(vzalloc);
244
245/**
246 * vmalloc_node - allocate memory on a specific node
247 * @size: allocation size
248 * @node: numa node
249 *
250 * Allocate enough pages to cover @size from the page level
251 * allocator and map them into contiguous kernel virtual space.
252 *
253 * For tight control over page level allocator and protection flags
254 * use __vmalloc() instead.
255 */
f6138882
AM
256void *vmalloc_node(unsigned long size, int node)
257{
258 return vmalloc(size);
259}
9a14f653 260EXPORT_SYMBOL(vmalloc_node);
e1ca7788
DY
261
262/**
263 * vzalloc_node - allocate memory on a specific node with zero fill
264 * @size: allocation size
265 * @node: numa node
266 *
267 * Allocate enough pages to cover @size from the page level
268 * allocator and map them into contiguous kernel virtual space.
269 * The memory allocated is set to zero.
270 *
271 * For tight control over page level allocator and protection flags
272 * use __vmalloc() instead.
273 */
274void *vzalloc_node(unsigned long size, int node)
275{
276 return vzalloc(size);
277}
278EXPORT_SYMBOL(vzalloc_node);
1da177e4 279
1af446ed
PM
280/**
281 * vmalloc_exec - allocate virtually contiguous, executable memory
282 * @size: allocation size
283 *
284 * Kernel-internal function to allocate enough pages to cover @size
285 * the page level allocator and map them into contiguous and
286 * executable kernel virtual space.
287 *
288 * For tight control over page level allocator and protection flags
289 * use __vmalloc() instead.
290 */
291
292void *vmalloc_exec(unsigned long size)
293{
294 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
295}
296
b5073173
PM
297/**
298 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
299 * @size: allocation size
300 *
301 * Allocate enough 32bit PA addressable pages to cover @size from the
e1c05067 302 * page level allocator and map them into contiguous kernel virtual space.
1da177e4
LT
303 */
304void *vmalloc_32(unsigned long size)
305{
306 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
307}
b5073173
PM
308EXPORT_SYMBOL(vmalloc_32);
309
310/**
311 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
312 * @size: allocation size
313 *
314 * The resulting memory area is 32bit addressable and zeroed so it can be
315 * mapped to userspace without leaking data.
f905bc44
PM
316 *
317 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
318 * remap_vmalloc_range() are permissible.
b5073173
PM
319 */
320void *vmalloc_32_user(unsigned long size)
321{
f905bc44
PM
322 /*
323 * We'll have to sort out the ZONE_DMA bits for 64-bit,
324 * but for now this can simply use vmalloc_user() directly.
325 */
326 return vmalloc_user(size);
b5073173
PM
327}
328EXPORT_SYMBOL(vmalloc_32_user);
1da177e4
LT
329
330void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
331{
332 BUG();
333 return NULL;
334}
b5073173 335EXPORT_SYMBOL(vmap);
1da177e4 336
b3bdda02 337void vunmap(const void *addr)
1da177e4
LT
338{
339 BUG();
340}
b5073173 341EXPORT_SYMBOL(vunmap);
1da177e4 342
eb6434d9
PM
343void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
344{
345 BUG();
346 return NULL;
347}
348EXPORT_SYMBOL(vm_map_ram);
349
350void vm_unmap_ram(const void *mem, unsigned int count)
351{
352 BUG();
353}
354EXPORT_SYMBOL(vm_unmap_ram);
355
356void vm_unmap_aliases(void)
357{
358}
359EXPORT_SYMBOL_GPL(vm_unmap_aliases);
360
1eeb66a1 361/*
d44d71bb
JR
362 * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
363 * chose not to have one.
1eeb66a1 364 */
d44d71bb
JR
365void __weak vmalloc_sync_mappings(void)
366{
367}
368
369void __weak vmalloc_sync_unmappings(void)
1eeb66a1
CH
370{
371}
372
cd12909c 373struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
29c185e5
PM
374{
375 BUG();
376 return NULL;
377}
378EXPORT_SYMBOL_GPL(alloc_vm_area);
379
380void free_vm_area(struct vm_struct *area)
381{
382 BUG();
383}
384EXPORT_SYMBOL_GPL(free_vm_area);
385
b5073173
PM
386int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
387 struct page *page)
388{
389 return -EINVAL;
390}
391EXPORT_SYMBOL(vm_insert_page);
392
a667d745
SJ
393int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
394 unsigned long num)
395{
396 return -EINVAL;
397}
398EXPORT_SYMBOL(vm_map_pages);
399
400int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
401 unsigned long num)
402{
403 return -EINVAL;
404}
405EXPORT_SYMBOL(vm_map_pages_zero);
406
1da177e4
LT
407/*
408 * sys_brk() for the most part doesn't need the global kernel
409 * lock, except when an application is doing something nasty
410 * like trying to un-brk an area that has already been mapped
411 * to a regular file. in this case, the unmapping will need
412 * to invoke file system routines that need the global lock.
413 */
6a6160a7 414SYSCALL_DEFINE1(brk, unsigned long, brk)
1da177e4
LT
415{
416 struct mm_struct *mm = current->mm;
417
418 if (brk < mm->start_brk || brk > mm->context.end_brk)
419 return mm->brk;
420
421 if (mm->brk == brk)
422 return mm->brk;
423
424 /*
425 * Always allow shrinking brk
426 */
427 if (brk <= mm->brk) {
428 mm->brk = brk;
429 return brk;
430 }
431
432 /*
433 * Ok, looks good - let it rip.
434 */
cfe79c00 435 flush_icache_range(mm->brk, brk);
1da177e4
LT
436 return mm->brk = brk;
437}
438
8feae131 439/*
3edf41d8 440 * initialise the percpu counter for VM and region record slabs
8feae131
DH
441 */
442void __init mmap_init(void)
1da177e4 443{
00a62ce9
KM
444 int ret;
445
908c7f19 446 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
00a62ce9 447 VM_BUG_ON(ret);
5d097056 448 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
1da177e4 449}
1da177e4 450
3034097a 451/*
8feae131
DH
452 * validate the region tree
453 * - the caller must hold the region lock
3034097a 454 */
8feae131
DH
455#ifdef CONFIG_DEBUG_NOMMU_REGIONS
456static noinline void validate_nommu_regions(void)
3034097a 457{
8feae131
DH
458 struct vm_region *region, *last;
459 struct rb_node *p, *lastp;
3034097a 460
8feae131
DH
461 lastp = rb_first(&nommu_region_tree);
462 if (!lastp)
463 return;
464
465 last = rb_entry(lastp, struct vm_region, vm_rb);
c9427bc0
GT
466 BUG_ON(last->vm_end <= last->vm_start);
467 BUG_ON(last->vm_top < last->vm_end);
8feae131
DH
468
469 while ((p = rb_next(lastp))) {
470 region = rb_entry(p, struct vm_region, vm_rb);
471 last = rb_entry(lastp, struct vm_region, vm_rb);
472
c9427bc0
GT
473 BUG_ON(region->vm_end <= region->vm_start);
474 BUG_ON(region->vm_top < region->vm_end);
475 BUG_ON(region->vm_start < last->vm_top);
3034097a 476
8feae131
DH
477 lastp = p;
478 }
3034097a 479}
8feae131 480#else
33e5d769
DH
481static void validate_nommu_regions(void)
482{
483}
8feae131 484#endif
3034097a
DH
485
486/*
8feae131 487 * add a region into the global tree
3034097a 488 */
8feae131 489static void add_nommu_region(struct vm_region *region)
3034097a 490{
8feae131
DH
491 struct vm_region *pregion;
492 struct rb_node **p, *parent;
3034097a 493
8feae131
DH
494 validate_nommu_regions();
495
8feae131
DH
496 parent = NULL;
497 p = &nommu_region_tree.rb_node;
498 while (*p) {
499 parent = *p;
500 pregion = rb_entry(parent, struct vm_region, vm_rb);
501 if (region->vm_start < pregion->vm_start)
502 p = &(*p)->rb_left;
503 else if (region->vm_start > pregion->vm_start)
504 p = &(*p)->rb_right;
505 else if (pregion == region)
506 return;
507 else
508 BUG();
3034097a
DH
509 }
510
8feae131
DH
511 rb_link_node(&region->vm_rb, parent, p);
512 rb_insert_color(&region->vm_rb, &nommu_region_tree);
3034097a 513
8feae131 514 validate_nommu_regions();
3034097a 515}
3034097a 516
930e652a 517/*
8feae131 518 * delete a region from the global tree
930e652a 519 */
8feae131 520static void delete_nommu_region(struct vm_region *region)
930e652a 521{
8feae131 522 BUG_ON(!nommu_region_tree.rb_node);
930e652a 523
8feae131
DH
524 validate_nommu_regions();
525 rb_erase(&region->vm_rb, &nommu_region_tree);
526 validate_nommu_regions();
57c8f63e
GU
527}
528
6fa5f80b 529/*
8feae131 530 * free a contiguous series of pages
6fa5f80b 531 */
8feae131 532static void free_page_series(unsigned long from, unsigned long to)
6fa5f80b 533{
8feae131
DH
534 for (; from < to; from += PAGE_SIZE) {
535 struct page *page = virt_to_page(from);
536
33e5d769 537 atomic_long_dec(&mmap_pages_allocated);
8feae131 538 put_page(page);
6fa5f80b 539 }
6fa5f80b
DH
540}
541
3034097a 542/*
8feae131 543 * release a reference to a region
33e5d769 544 * - the caller must hold the region semaphore for writing, which this releases
dd8632a1 545 * - the region may not have been added to the tree yet, in which case vm_top
8feae131 546 * will equal vm_start
3034097a 547 */
8feae131
DH
548static void __put_nommu_region(struct vm_region *region)
549 __releases(nommu_region_sem)
1da177e4 550{
8feae131 551 BUG_ON(!nommu_region_tree.rb_node);
1da177e4 552
1e2ae599 553 if (--region->vm_usage == 0) {
dd8632a1 554 if (region->vm_top > region->vm_start)
8feae131
DH
555 delete_nommu_region(region);
556 up_write(&nommu_region_sem);
557
558 if (region->vm_file)
a3a49a17 559 vmr_fput(region);
8feae131
DH
560
561 /* IO memory and memory shared directly out of the pagecache
562 * from ramfs/tmpfs mustn't be released here */
22cc877b 563 if (region->vm_flags & VM_MAPPED_COPY)
dd8632a1 564 free_page_series(region->vm_start, region->vm_top);
8feae131
DH
565 kmem_cache_free(vm_region_jar, region);
566 } else {
567 up_write(&nommu_region_sem);
1da177e4 568 }
8feae131 569}
1da177e4 570
8feae131
DH
571/*
572 * release a reference to a region
573 */
574static void put_nommu_region(struct vm_region *region)
575{
576 down_write(&nommu_region_sem);
577 __put_nommu_region(region);
1da177e4
LT
578}
579
3034097a 580/*
8feae131
DH
581 * add a VMA into a process's mm_struct in the appropriate place in the list
582 * and tree and add to the address space's page tree also if not an anonymous
583 * page
584 * - should be called with mm->mmap_sem held writelocked
3034097a 585 */
8feae131 586static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
1da177e4 587{
6038def0 588 struct vm_area_struct *pvma, *prev;
1da177e4 589 struct address_space *mapping;
6038def0 590 struct rb_node **p, *parent, *rb_prev;
8feae131 591
8feae131
DH
592 BUG_ON(!vma->vm_region);
593
594 mm->map_count++;
595 vma->vm_mm = mm;
1da177e4
LT
596
597 /* add the VMA to the mapping */
598 if (vma->vm_file) {
599 mapping = vma->vm_file->f_mapping;
600
83cde9e8 601 i_mmap_lock_write(mapping);
1da177e4 602 flush_dcache_mmap_lock(mapping);
6b2dbba8 603 vma_interval_tree_insert(vma, &mapping->i_mmap);
1da177e4 604 flush_dcache_mmap_unlock(mapping);
83cde9e8 605 i_mmap_unlock_write(mapping);
1da177e4
LT
606 }
607
8feae131 608 /* add the VMA to the tree */
6038def0 609 parent = rb_prev = NULL;
8feae131 610 p = &mm->mm_rb.rb_node;
1da177e4
LT
611 while (*p) {
612 parent = *p;
613 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
614
8feae131
DH
615 /* sort by: start addr, end addr, VMA struct addr in that order
616 * (the latter is necessary as we may get identical VMAs) */
617 if (vma->vm_start < pvma->vm_start)
1da177e4 618 p = &(*p)->rb_left;
6038def0
NK
619 else if (vma->vm_start > pvma->vm_start) {
620 rb_prev = parent;
1da177e4 621 p = &(*p)->rb_right;
6038def0 622 } else if (vma->vm_end < pvma->vm_end)
8feae131 623 p = &(*p)->rb_left;
6038def0
NK
624 else if (vma->vm_end > pvma->vm_end) {
625 rb_prev = parent;
8feae131 626 p = &(*p)->rb_right;
6038def0 627 } else if (vma < pvma)
8feae131 628 p = &(*p)->rb_left;
6038def0
NK
629 else if (vma > pvma) {
630 rb_prev = parent;
8feae131 631 p = &(*p)->rb_right;
6038def0 632 } else
8feae131 633 BUG();
1da177e4
LT
634 }
635
636 rb_link_node(&vma->vm_rb, parent, p);
8feae131
DH
637 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
638
639 /* add VMA to the VMA list also */
6038def0
NK
640 prev = NULL;
641 if (rb_prev)
642 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
8feae131 643
6038def0 644 __vma_link_list(mm, vma, prev, parent);
1da177e4
LT
645}
646
3034097a 647/*
8feae131 648 * delete a VMA from its owning mm_struct and address space
3034097a 649 */
8feae131 650static void delete_vma_from_mm(struct vm_area_struct *vma)
1da177e4 651{
615d6e87 652 int i;
1da177e4 653 struct address_space *mapping;
8feae131 654 struct mm_struct *mm = vma->vm_mm;
615d6e87 655 struct task_struct *curr = current;
8feae131 656
8feae131 657 mm->map_count--;
615d6e87
DB
658 for (i = 0; i < VMACACHE_SIZE; i++) {
659 /* if the vma is cached, invalidate the entire cache */
314ff785 660 if (curr->vmacache.vmas[i] == vma) {
e020d5bd 661 vmacache_invalidate(mm);
615d6e87
DB
662 break;
663 }
664 }
1da177e4
LT
665
666 /* remove the VMA from the mapping */
667 if (vma->vm_file) {
668 mapping = vma->vm_file->f_mapping;
669
83cde9e8 670 i_mmap_lock_write(mapping);
1da177e4 671 flush_dcache_mmap_lock(mapping);
6b2dbba8 672 vma_interval_tree_remove(vma, &mapping->i_mmap);
1da177e4 673 flush_dcache_mmap_unlock(mapping);
83cde9e8 674 i_mmap_unlock_write(mapping);
1da177e4
LT
675 }
676
8feae131
DH
677 /* remove from the MM's tree and list */
678 rb_erase(&vma->vm_rb, &mm->mm_rb);
b951bf2c
NK
679
680 if (vma->vm_prev)
681 vma->vm_prev->vm_next = vma->vm_next;
682 else
683 mm->mmap = vma->vm_next;
684
685 if (vma->vm_next)
686 vma->vm_next->vm_prev = vma->vm_prev;
8feae131
DH
687}
688
689/*
690 * destroy a VMA record
691 */
692static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
693{
8feae131
DH
694 if (vma->vm_ops && vma->vm_ops->close)
695 vma->vm_ops->close(vma);
e9714acf 696 if (vma->vm_file)
a3a49a17 697 vma_fput(vma);
8feae131 698 put_nommu_region(vma->vm_region);
3928d4f5 699 vm_area_free(vma);
8feae131
DH
700}
701
702/*
703 * look up the first VMA in which addr resides, NULL if none
704 * - should be called with mm->mmap_sem at least held readlocked
705 */
706struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
707{
708 struct vm_area_struct *vma;
8feae131
DH
709
710 /* check the cache first */
615d6e87
DB
711 vma = vmacache_find(mm, addr);
712 if (likely(vma))
8feae131
DH
713 return vma;
714
e922c4c5 715 /* trawl the list (there may be multiple mappings in which addr
8feae131 716 * resides) */
e922c4c5 717 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8feae131
DH
718 if (vma->vm_start > addr)
719 return NULL;
720 if (vma->vm_end > addr) {
615d6e87 721 vmacache_update(addr, vma);
8feae131
DH
722 return vma;
723 }
724 }
725
726 return NULL;
727}
728EXPORT_SYMBOL(find_vma);
729
730/*
731 * find a VMA
732 * - we don't extend stack VMAs under NOMMU conditions
733 */
734struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
735{
7561e8ca 736 return find_vma(mm, addr);
8feae131
DH
737}
738
739/*
740 * expand a stack to a given address
741 * - not supported under NOMMU conditions
742 */
743int expand_stack(struct vm_area_struct *vma, unsigned long address)
744{
745 return -ENOMEM;
746}
747
748/*
749 * look up the first VMA exactly that exactly matches addr
750 * - should be called with mm->mmap_sem at least held readlocked
751 */
752static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
753 unsigned long addr,
754 unsigned long len)
755{
756 struct vm_area_struct *vma;
8feae131
DH
757 unsigned long end = addr + len;
758
759 /* check the cache first */
615d6e87
DB
760 vma = vmacache_find_exact(mm, addr, end);
761 if (vma)
8feae131
DH
762 return vma;
763
e922c4c5 764 /* trawl the list (there may be multiple mappings in which addr
8feae131 765 * resides) */
e922c4c5 766 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8feae131
DH
767 if (vma->vm_start < addr)
768 continue;
769 if (vma->vm_start > addr)
770 return NULL;
771 if (vma->vm_end == end) {
615d6e87 772 vmacache_update(addr, vma);
8feae131
DH
773 return vma;
774 }
775 }
776
777 return NULL;
1da177e4
LT
778}
779
780/*
781 * determine whether a mapping should be permitted and, if so, what sort of
782 * mapping we're capable of supporting
783 */
784static int validate_mmap_request(struct file *file,
785 unsigned long addr,
786 unsigned long len,
787 unsigned long prot,
788 unsigned long flags,
789 unsigned long pgoff,
790 unsigned long *_capabilities)
791{
8feae131 792 unsigned long capabilities, rlen;
1da177e4
LT
793 int ret;
794
795 /* do the simple checks first */
22cc877b 796 if (flags & MAP_FIXED)
1da177e4 797 return -EINVAL;
1da177e4
LT
798
799 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
800 (flags & MAP_TYPE) != MAP_SHARED)
801 return -EINVAL;
802
f81cff0d 803 if (!len)
1da177e4
LT
804 return -EINVAL;
805
f81cff0d 806 /* Careful about overflows.. */
8feae131
DH
807 rlen = PAGE_ALIGN(len);
808 if (!rlen || rlen > TASK_SIZE)
f81cff0d
MF
809 return -ENOMEM;
810
1da177e4 811 /* offset overflow? */
8feae131 812 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
f81cff0d 813 return -EOVERFLOW;
1da177e4
LT
814
815 if (file) {
1da177e4 816 /* files must support mmap */
72c2d531 817 if (!file->f_op->mmap)
1da177e4
LT
818 return -ENODEV;
819
820 /* work out if what we've got could possibly be shared
821 * - we support chardevs that provide their own "memory"
822 * - we support files/blockdevs that are memory backed
823 */
b4caecd4
CH
824 if (file->f_op->mmap_capabilities) {
825 capabilities = file->f_op->mmap_capabilities(file);
826 } else {
1da177e4
LT
827 /* no explicit capabilities set, so assume some
828 * defaults */
496ad9aa 829 switch (file_inode(file)->i_mode & S_IFMT) {
1da177e4
LT
830 case S_IFREG:
831 case S_IFBLK:
b4caecd4 832 capabilities = NOMMU_MAP_COPY;
1da177e4
LT
833 break;
834
835 case S_IFCHR:
836 capabilities =
b4caecd4
CH
837 NOMMU_MAP_DIRECT |
838 NOMMU_MAP_READ |
839 NOMMU_MAP_WRITE;
1da177e4
LT
840 break;
841
842 default:
843 return -EINVAL;
844 }
845 }
846
847 /* eliminate any capabilities that we can't support on this
848 * device */
849 if (!file->f_op->get_unmapped_area)
b4caecd4 850 capabilities &= ~NOMMU_MAP_DIRECT;
6e242a1c 851 if (!(file->f_mode & FMODE_CAN_READ))
b4caecd4 852 capabilities &= ~NOMMU_MAP_COPY;
1da177e4 853
28d7a6ae
GY
854 /* The file shall have been opened with read permission. */
855 if (!(file->f_mode & FMODE_READ))
856 return -EACCES;
857
1da177e4
LT
858 if (flags & MAP_SHARED) {
859 /* do checks for writing, appending and locking */
860 if ((prot & PROT_WRITE) &&
861 !(file->f_mode & FMODE_WRITE))
862 return -EACCES;
863
496ad9aa 864 if (IS_APPEND(file_inode(file)) &&
1da177e4
LT
865 (file->f_mode & FMODE_WRITE))
866 return -EACCES;
867
d7a06983 868 if (locks_verify_locked(file))
1da177e4
LT
869 return -EAGAIN;
870
b4caecd4 871 if (!(capabilities & NOMMU_MAP_DIRECT))
1da177e4
LT
872 return -ENODEV;
873
1da177e4 874 /* we mustn't privatise shared mappings */
b4caecd4 875 capabilities &= ~NOMMU_MAP_COPY;
ac714904 876 } else {
1da177e4
LT
877 /* we're going to read the file into private memory we
878 * allocate */
b4caecd4 879 if (!(capabilities & NOMMU_MAP_COPY))
1da177e4
LT
880 return -ENODEV;
881
882 /* we don't permit a private writable mapping to be
883 * shared with the backing device */
884 if (prot & PROT_WRITE)
b4caecd4 885 capabilities &= ~NOMMU_MAP_DIRECT;
1da177e4
LT
886 }
887
b4caecd4
CH
888 if (capabilities & NOMMU_MAP_DIRECT) {
889 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
890 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
891 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
3c7b2045 892 ) {
b4caecd4 893 capabilities &= ~NOMMU_MAP_DIRECT;
3c7b2045 894 if (flags & MAP_SHARED) {
22cc877b 895 pr_warn("MAP_SHARED not completely supported on !MMU\n");
3c7b2045
BS
896 return -EINVAL;
897 }
898 }
899 }
900
1da177e4
LT
901 /* handle executable mappings and implied executable
902 * mappings */
90f8572b 903 if (path_noexec(&file->f_path)) {
1da177e4
LT
904 if (prot & PROT_EXEC)
905 return -EPERM;
ac714904 906 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1da177e4
LT
907 /* handle implication of PROT_EXEC by PROT_READ */
908 if (current->personality & READ_IMPLIES_EXEC) {
b4caecd4 909 if (capabilities & NOMMU_MAP_EXEC)
1da177e4
LT
910 prot |= PROT_EXEC;
911 }
ac714904 912 } else if ((prot & PROT_READ) &&
1da177e4 913 (prot & PROT_EXEC) &&
b4caecd4 914 !(capabilities & NOMMU_MAP_EXEC)
1da177e4
LT
915 ) {
916 /* backing file is not executable, try to copy */
b4caecd4 917 capabilities &= ~NOMMU_MAP_DIRECT;
1da177e4 918 }
ac714904 919 } else {
1da177e4
LT
920 /* anonymous mappings are always memory backed and can be
921 * privately mapped
922 */
b4caecd4 923 capabilities = NOMMU_MAP_COPY;
1da177e4
LT
924
925 /* handle PROT_EXEC implication by PROT_READ */
926 if ((prot & PROT_READ) &&
927 (current->personality & READ_IMPLIES_EXEC))
928 prot |= PROT_EXEC;
929 }
930
931 /* allow the security API to have its say */
e5467859 932 ret = security_mmap_addr(addr);
1da177e4
LT
933 if (ret < 0)
934 return ret;
935
936 /* looks okay */
937 *_capabilities = capabilities;
938 return 0;
939}
940
941/*
942 * we've determined that we can make the mapping, now translate what we
943 * now know into VMA flags
944 */
945static unsigned long determine_vm_flags(struct file *file,
946 unsigned long prot,
947 unsigned long flags,
948 unsigned long capabilities)
949{
950 unsigned long vm_flags;
951
e6bfb709 952 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
1da177e4
LT
953 /* vm_flags |= mm->def_flags; */
954
b4caecd4 955 if (!(capabilities & NOMMU_MAP_DIRECT)) {
1da177e4 956 /* attempt to share read-only copies of mapped file chunks */
3c7b2045 957 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1da177e4
LT
958 if (file && !(prot & PROT_WRITE))
959 vm_flags |= VM_MAYSHARE;
3c7b2045 960 } else {
1da177e4
LT
961 /* overlay a shareable mapping on the backing device or inode
962 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
963 * romfs/cramfs */
b4caecd4 964 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1da177e4 965 if (flags & MAP_SHARED)
3c7b2045 966 vm_flags |= VM_SHARED;
1da177e4
LT
967 }
968
969 /* refuse to let anyone share private mappings with this process if
970 * it's being traced - otherwise breakpoints set in it may interfere
971 * with another untraced process
972 */
a288eecc 973 if ((flags & MAP_PRIVATE) && current->ptrace)
1da177e4
LT
974 vm_flags &= ~VM_MAYSHARE;
975
976 return vm_flags;
977}
978
979/*
8feae131
DH
980 * set up a shared mapping on a file (the driver or filesystem provides and
981 * pins the storage)
1da177e4 982 */
8feae131 983static int do_mmap_shared_file(struct vm_area_struct *vma)
1da177e4
LT
984{
985 int ret;
986
f74ac015 987 ret = call_mmap(vma->vm_file, vma);
dd8632a1
PM
988 if (ret == 0) {
989 vma->vm_region->vm_top = vma->vm_region->vm_end;
645d83c5 990 return 0;
dd8632a1 991 }
1da177e4
LT
992 if (ret != -ENOSYS)
993 return ret;
994
3fa30460
DH
995 /* getting -ENOSYS indicates that direct mmap isn't possible (as
996 * opposed to tried but failed) so we can only give a suitable error as
997 * it's not possible to make a private copy if MAP_SHARED was given */
1da177e4
LT
998 return -ENODEV;
999}
1000
1001/*
1002 * set up a private mapping or an anonymous shared mapping
1003 */
8feae131
DH
1004static int do_mmap_private(struct vm_area_struct *vma,
1005 struct vm_region *region,
645d83c5
DH
1006 unsigned long len,
1007 unsigned long capabilities)
1da177e4 1008{
dbc8358c 1009 unsigned long total, point;
1da177e4 1010 void *base;
8feae131 1011 int ret, order;
1da177e4
LT
1012
1013 /* invoke the file's mapping function so that it can keep track of
1014 * shared mappings on devices or memory
1015 * - VM_MAYSHARE will be set if it may attempt to share
1016 */
b4caecd4 1017 if (capabilities & NOMMU_MAP_DIRECT) {
f74ac015 1018 ret = call_mmap(vma->vm_file, vma);
dd8632a1 1019 if (ret == 0) {
1da177e4 1020 /* shouldn't return success if we're not sharing */
dd8632a1
PM
1021 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1022 vma->vm_region->vm_top = vma->vm_region->vm_end;
645d83c5 1023 return 0;
1da177e4 1024 }
dd8632a1
PM
1025 if (ret != -ENOSYS)
1026 return ret;
1da177e4
LT
1027
1028 /* getting an ENOSYS error indicates that direct mmap isn't
1029 * possible (as opposed to tried but failed) so we'll try to
1030 * make a private copy of the data and map that instead */
1031 }
1032
8feae131 1033
1da177e4
LT
1034 /* allocate some memory to hold the mapping
1035 * - note that this may not return a page-aligned address if the object
1036 * we're allocating is smaller than a page
1037 */
f67d9b15 1038 order = get_order(len);
8feae131 1039 total = 1 << order;
f67d9b15 1040 point = len >> PAGE_SHIFT;
dd8632a1 1041
dbc8358c 1042 /* we don't want to allocate a power-of-2 sized page set */
22cc877b 1043 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
dbc8358c 1044 total = point;
8feae131 1045
da616534 1046 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
dbc8358c
JK
1047 if (!base)
1048 goto enomem;
1049
1050 atomic_long_add(total, &mmap_pages_allocated);
1da177e4 1051
8feae131
DH
1052 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1053 region->vm_start = (unsigned long) base;
f67d9b15 1054 region->vm_end = region->vm_start + len;
dd8632a1 1055 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
8feae131
DH
1056
1057 vma->vm_start = region->vm_start;
1058 vma->vm_end = region->vm_start + len;
1da177e4
LT
1059
1060 if (vma->vm_file) {
1061 /* read the contents of a file into the copy */
1da177e4
LT
1062 loff_t fpos;
1063
1064 fpos = vma->vm_pgoff;
1065 fpos <<= PAGE_SHIFT;
1066
b4bf802a 1067 ret = kernel_read(vma->vm_file, base, len, &fpos);
1da177e4
LT
1068 if (ret < 0)
1069 goto error_free;
1070
1071 /* clear the last little bit */
f67d9b15
BL
1072 if (ret < len)
1073 memset(base + ret, 0, len - ret);
1da177e4 1074
bfd40eaf
KS
1075 } else {
1076 vma_set_anonymous(vma);
1da177e4
LT
1077 }
1078
1079 return 0;
1080
1081error_free:
7223bb4a 1082 free_page_series(region->vm_start, region->vm_top);
8feae131
DH
1083 region->vm_start = vma->vm_start = 0;
1084 region->vm_end = vma->vm_end = 0;
dd8632a1 1085 region->vm_top = 0;
1da177e4
LT
1086 return ret;
1087
1088enomem:
b1de0d13 1089 pr_err("Allocation of length %lu from process %d (%s) failed\n",
05ae6fa3 1090 len, current->pid, current->comm);
9af744d7 1091 show_free_areas(0, NULL);
1da177e4
LT
1092 return -ENOMEM;
1093}
1094
1095/*
1096 * handle mapping creation for uClinux
1097 */
1fcfd8db
ON
1098unsigned long do_mmap(struct file *file,
1099 unsigned long addr,
1100 unsigned long len,
1101 unsigned long prot,
1102 unsigned long flags,
1103 vm_flags_t vm_flags,
1104 unsigned long pgoff,
897ab3e0
MR
1105 unsigned long *populate,
1106 struct list_head *uf)
1da177e4 1107{
8feae131
DH
1108 struct vm_area_struct *vma;
1109 struct vm_region *region;
1da177e4 1110 struct rb_node *rb;
1fcfd8db 1111 unsigned long capabilities, result;
1da177e4
LT
1112 int ret;
1113
41badc15 1114 *populate = 0;
bebeb3d6 1115
1da177e4
LT
1116 /* decide whether we should attempt the mapping, and if so what sort of
1117 * mapping */
1118 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1119 &capabilities);
22cc877b 1120 if (ret < 0)
1da177e4
LT
1121 return ret;
1122
06aab5a3
DH
1123 /* we ignore the address hint */
1124 addr = 0;
f67d9b15 1125 len = PAGE_ALIGN(len);
06aab5a3 1126
1da177e4
LT
1127 /* we've determined that we can make the mapping, now translate what we
1128 * now know into VMA flags */
1fcfd8db 1129 vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1da177e4 1130
8feae131
DH
1131 /* we're going to need to record the mapping */
1132 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1133 if (!region)
1134 goto error_getting_region;
1135
490fc053 1136 vma = vm_area_alloc(current->mm);
8feae131
DH
1137 if (!vma)
1138 goto error_getting_vma;
1da177e4 1139
1e2ae599 1140 region->vm_usage = 1;
8feae131
DH
1141 region->vm_flags = vm_flags;
1142 region->vm_pgoff = pgoff;
1143
8feae131
DH
1144 vma->vm_flags = vm_flags;
1145 vma->vm_pgoff = pgoff;
1da177e4 1146
8feae131 1147 if (file) {
cb0942b8
AV
1148 region->vm_file = get_file(file);
1149 vma->vm_file = get_file(file);
8feae131
DH
1150 }
1151
1152 down_write(&nommu_region_sem);
1153
1154 /* if we want to share, we need to check for regions created by other
1da177e4 1155 * mmap() calls that overlap with our proposed mapping
8feae131 1156 * - we can only share with a superset match on most regular files
1da177e4
LT
1157 * - shared mappings on character devices and memory backed files are
1158 * permitted to overlap inexactly as far as we are concerned for in
1159 * these cases, sharing is handled in the driver or filesystem rather
1160 * than here
1161 */
1162 if (vm_flags & VM_MAYSHARE) {
8feae131
DH
1163 struct vm_region *pregion;
1164 unsigned long pglen, rpglen, pgend, rpgend, start;
1da177e4 1165
8feae131
DH
1166 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1167 pgend = pgoff + pglen;
165b2392 1168
8feae131
DH
1169 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1170 pregion = rb_entry(rb, struct vm_region, vm_rb);
1da177e4 1171
8feae131 1172 if (!(pregion->vm_flags & VM_MAYSHARE))
1da177e4
LT
1173 continue;
1174
1175 /* search for overlapping mappings on the same file */
496ad9aa
AV
1176 if (file_inode(pregion->vm_file) !=
1177 file_inode(file))
1da177e4
LT
1178 continue;
1179
8feae131 1180 if (pregion->vm_pgoff >= pgend)
1da177e4
LT
1181 continue;
1182
8feae131
DH
1183 rpglen = pregion->vm_end - pregion->vm_start;
1184 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1185 rpgend = pregion->vm_pgoff + rpglen;
1186 if (pgoff >= rpgend)
1da177e4
LT
1187 continue;
1188
8feae131
DH
1189 /* handle inexactly overlapping matches between
1190 * mappings */
1191 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1192 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1193 /* new mapping is not a subset of the region */
b4caecd4 1194 if (!(capabilities & NOMMU_MAP_DIRECT))
1da177e4
LT
1195 goto sharing_violation;
1196 continue;
1197 }
1198
8feae131 1199 /* we've found a region we can share */
1e2ae599 1200 pregion->vm_usage++;
8feae131
DH
1201 vma->vm_region = pregion;
1202 start = pregion->vm_start;
1203 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1204 vma->vm_start = start;
1205 vma->vm_end = start + len;
1206
22cc877b 1207 if (pregion->vm_flags & VM_MAPPED_COPY)
8feae131 1208 vma->vm_flags |= VM_MAPPED_COPY;
22cc877b 1209 else {
8feae131
DH
1210 ret = do_mmap_shared_file(vma);
1211 if (ret < 0) {
1212 vma->vm_region = NULL;
1213 vma->vm_start = 0;
1214 vma->vm_end = 0;
1e2ae599 1215 pregion->vm_usage--;
8feae131
DH
1216 pregion = NULL;
1217 goto error_just_free;
1218 }
1219 }
a3a49a17 1220 vmr_fput(region);
8feae131
DH
1221 kmem_cache_free(vm_region_jar, region);
1222 region = pregion;
1223 result = start;
1224 goto share;
1da177e4
LT
1225 }
1226
1da177e4
LT
1227 /* obtain the address at which to make a shared mapping
1228 * - this is the hook for quasi-memory character devices to
1229 * tell us the location of a shared mapping
1230 */
b4caecd4 1231 if (capabilities & NOMMU_MAP_DIRECT) {
1da177e4
LT
1232 addr = file->f_op->get_unmapped_area(file, addr, len,
1233 pgoff, flags);
bb005a59 1234 if (IS_ERR_VALUE(addr)) {
1da177e4 1235 ret = addr;
bb005a59 1236 if (ret != -ENOSYS)
8feae131 1237 goto error_just_free;
1da177e4
LT
1238
1239 /* the driver refused to tell us where to site
1240 * the mapping so we'll have to attempt to copy
1241 * it */
bb005a59 1242 ret = -ENODEV;
b4caecd4 1243 if (!(capabilities & NOMMU_MAP_COPY))
8feae131 1244 goto error_just_free;
1da177e4 1245
b4caecd4 1246 capabilities &= ~NOMMU_MAP_DIRECT;
8feae131
DH
1247 } else {
1248 vma->vm_start = region->vm_start = addr;
1249 vma->vm_end = region->vm_end = addr + len;
1da177e4
LT
1250 }
1251 }
1252 }
1253
8feae131 1254 vma->vm_region = region;
1da177e4 1255
645d83c5 1256 /* set up the mapping
b4caecd4 1257 * - the region is filled in if NOMMU_MAP_DIRECT is still set
645d83c5 1258 */
1da177e4 1259 if (file && vma->vm_flags & VM_SHARED)
8feae131 1260 ret = do_mmap_shared_file(vma);
1da177e4 1261 else
645d83c5 1262 ret = do_mmap_private(vma, region, len, capabilities);
1da177e4 1263 if (ret < 0)
645d83c5
DH
1264 goto error_just_free;
1265 add_nommu_region(region);
8feae131 1266
ea637639 1267 /* clear anonymous mappings that don't ask for uninitialized data */
0bf5f949
CH
1268 if (!vma->vm_file &&
1269 (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1270 !(flags & MAP_UNINITIALIZED)))
ea637639
JZ
1271 memset((void *)region->vm_start, 0,
1272 region->vm_end - region->vm_start);
1273
1da177e4 1274 /* okay... we have a mapping; now we have to register it */
8feae131 1275 result = vma->vm_start;
1da177e4 1276
1da177e4
LT
1277 current->mm->total_vm += len >> PAGE_SHIFT;
1278
8feae131
DH
1279share:
1280 add_vma_to_mm(current->mm, vma);
1da177e4 1281
cfe79c00
MF
1282 /* we flush the region from the icache only when the first executable
1283 * mapping of it is made */
1284 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1285 flush_icache_range(region->vm_start, region->vm_end);
1286 region->vm_icache_flushed = true;
1287 }
1da177e4 1288
cfe79c00 1289 up_write(&nommu_region_sem);
1da177e4 1290
8feae131 1291 return result;
1da177e4 1292
8feae131
DH
1293error_just_free:
1294 up_write(&nommu_region_sem);
1295error:
89a86402 1296 if (region->vm_file)
a3a49a17 1297 vmr_fput(region);
8feae131 1298 kmem_cache_free(vm_region_jar, region);
89a86402 1299 if (vma->vm_file)
a3a49a17 1300 vma_fput(vma);
3928d4f5 1301 vm_area_free(vma);
8feae131
DH
1302 return ret;
1303
1304sharing_violation:
1305 up_write(&nommu_region_sem);
22cc877b 1306 pr_warn("Attempt to share mismatched mappings\n");
8feae131
DH
1307 ret = -EINVAL;
1308 goto error;
1da177e4 1309
8feae131
DH
1310error_getting_vma:
1311 kmem_cache_free(vm_region_jar, region);
22cc877b
LR
1312 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1313 len, current->pid);
9af744d7 1314 show_free_areas(0, NULL);
1da177e4
LT
1315 return -ENOMEM;
1316
8feae131 1317error_getting_region:
22cc877b
LR
1318 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1319 len, current->pid);
9af744d7 1320 show_free_areas(0, NULL);
1da177e4
LT
1321 return -ENOMEM;
1322}
6be5ceb0 1323
a90f590a
DB
1324unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1325 unsigned long prot, unsigned long flags,
1326 unsigned long fd, unsigned long pgoff)
66f0dc48
HD
1327{
1328 struct file *file = NULL;
1329 unsigned long retval = -EBADF;
1330
120a795d 1331 audit_mmap_fd(fd, flags);
66f0dc48
HD
1332 if (!(flags & MAP_ANONYMOUS)) {
1333 file = fget(fd);
1334 if (!file)
1335 goto out;
1336 }
1337
1338 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1339
ad1ed293 1340 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
66f0dc48
HD
1341
1342 if (file)
1343 fput(file);
1344out:
1345 return retval;
1346}
1347
a90f590a
DB
1348SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1349 unsigned long, prot, unsigned long, flags,
1350 unsigned long, fd, unsigned long, pgoff)
1351{
1352 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1353}
1354
a4679373
CH
1355#ifdef __ARCH_WANT_SYS_OLD_MMAP
1356struct mmap_arg_struct {
1357 unsigned long addr;
1358 unsigned long len;
1359 unsigned long prot;
1360 unsigned long flags;
1361 unsigned long fd;
1362 unsigned long offset;
1363};
1364
1365SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1366{
1367 struct mmap_arg_struct a;
1368
1369 if (copy_from_user(&a, arg, sizeof(a)))
1370 return -EFAULT;
1824cb75 1371 if (offset_in_page(a.offset))
a4679373
CH
1372 return -EINVAL;
1373
a90f590a
DB
1374 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1375 a.offset >> PAGE_SHIFT);
a4679373
CH
1376}
1377#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1378
1da177e4 1379/*
8feae131
DH
1380 * split a vma into two pieces at address 'addr', a new vma is allocated either
1381 * for the first part or the tail.
1da177e4 1382 */
8feae131
DH
1383int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1384 unsigned long addr, int new_below)
1da177e4 1385{
8feae131
DH
1386 struct vm_area_struct *new;
1387 struct vm_region *region;
1388 unsigned long npages;
1da177e4 1389
779c1023
DH
1390 /* we're only permitted to split anonymous regions (these should have
1391 * only a single usage on the region) */
1392 if (vma->vm_file)
8feae131 1393 return -ENOMEM;
1da177e4 1394
8feae131
DH
1395 if (mm->map_count >= sysctl_max_map_count)
1396 return -ENOMEM;
1da177e4 1397
8feae131
DH
1398 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1399 if (!region)
1400 return -ENOMEM;
1da177e4 1401
3928d4f5 1402 new = vm_area_dup(vma);
8feae131
DH
1403 if (!new) {
1404 kmem_cache_free(vm_region_jar, region);
1405 return -ENOMEM;
1406 }
1407
1408 /* most fields are the same, copy all, and then fixup */
8feae131
DH
1409 *region = *vma->vm_region;
1410 new->vm_region = region;
1411
1412 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1413
1414 if (new_below) {
dd8632a1 1415 region->vm_top = region->vm_end = new->vm_end = addr;
8feae131
DH
1416 } else {
1417 region->vm_start = new->vm_start = addr;
1418 region->vm_pgoff = new->vm_pgoff += npages;
1da177e4 1419 }
8feae131
DH
1420
1421 if (new->vm_ops && new->vm_ops->open)
1422 new->vm_ops->open(new);
1423
1424 delete_vma_from_mm(vma);
1425 down_write(&nommu_region_sem);
1426 delete_nommu_region(vma->vm_region);
1427 if (new_below) {
1428 vma->vm_region->vm_start = vma->vm_start = addr;
1429 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1430 } else {
1431 vma->vm_region->vm_end = vma->vm_end = addr;
dd8632a1 1432 vma->vm_region->vm_top = addr;
8feae131
DH
1433 }
1434 add_nommu_region(vma->vm_region);
1435 add_nommu_region(new->vm_region);
1436 up_write(&nommu_region_sem);
1437 add_vma_to_mm(mm, vma);
1438 add_vma_to_mm(mm, new);
1439 return 0;
1da177e4
LT
1440}
1441
3034097a 1442/*
8feae131
DH
1443 * shrink a VMA by removing the specified chunk from either the beginning or
1444 * the end
3034097a 1445 */
8feae131
DH
1446static int shrink_vma(struct mm_struct *mm,
1447 struct vm_area_struct *vma,
1448 unsigned long from, unsigned long to)
1da177e4 1449{
8feae131 1450 struct vm_region *region;
1da177e4 1451
8feae131
DH
1452 /* adjust the VMA's pointers, which may reposition it in the MM's tree
1453 * and list */
1454 delete_vma_from_mm(vma);
1455 if (from > vma->vm_start)
1456 vma->vm_end = from;
1457 else
1458 vma->vm_start = to;
1459 add_vma_to_mm(mm, vma);
1da177e4 1460
8feae131
DH
1461 /* cut the backing region down to size */
1462 region = vma->vm_region;
1e2ae599 1463 BUG_ON(region->vm_usage != 1);
8feae131
DH
1464
1465 down_write(&nommu_region_sem);
1466 delete_nommu_region(region);
dd8632a1
PM
1467 if (from > region->vm_start) {
1468 to = region->vm_top;
1469 region->vm_top = region->vm_end = from;
1470 } else {
8feae131 1471 region->vm_start = to;
dd8632a1 1472 }
8feae131
DH
1473 add_nommu_region(region);
1474 up_write(&nommu_region_sem);
1475
1476 free_page_series(from, to);
1477 return 0;
1478}
1da177e4 1479
8feae131
DH
1480/*
1481 * release a mapping
1482 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1483 * VMA, though it need not cover the whole VMA
1484 */
897ab3e0 1485int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
8feae131
DH
1486{
1487 struct vm_area_struct *vma;
f67d9b15 1488 unsigned long end;
8feae131 1489 int ret;
1da177e4 1490
f67d9b15 1491 len = PAGE_ALIGN(len);
8feae131
DH
1492 if (len == 0)
1493 return -EINVAL;
365e9c87 1494
f67d9b15
BL
1495 end = start + len;
1496
8feae131
DH
1497 /* find the first potentially overlapping VMA */
1498 vma = find_vma(mm, start);
1499 if (!vma) {
ac714904 1500 static int limit;
33e5d769 1501 if (limit < 5) {
22cc877b
LR
1502 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1503 current->pid, current->comm,
1504 start, start + len - 1);
33e5d769
DH
1505 limit++;
1506 }
8feae131
DH
1507 return -EINVAL;
1508 }
1da177e4 1509
8feae131
DH
1510 /* we're allowed to split an anonymous VMA but not a file-backed one */
1511 if (vma->vm_file) {
1512 do {
22cc877b 1513 if (start > vma->vm_start)
8feae131 1514 return -EINVAL;
8feae131
DH
1515 if (end == vma->vm_end)
1516 goto erase_whole_vma;
d75a310c
NK
1517 vma = vma->vm_next;
1518 } while (vma);
8feae131
DH
1519 return -EINVAL;
1520 } else {
1521 /* the chunk must be a subset of the VMA found */
1522 if (start == vma->vm_start && end == vma->vm_end)
1523 goto erase_whole_vma;
22cc877b 1524 if (start < vma->vm_start || end > vma->vm_end)
8feae131 1525 return -EINVAL;
1824cb75 1526 if (offset_in_page(start))
8feae131 1527 return -EINVAL;
1824cb75 1528 if (end != vma->vm_end && offset_in_page(end))
8feae131 1529 return -EINVAL;
8feae131
DH
1530 if (start != vma->vm_start && end != vma->vm_end) {
1531 ret = split_vma(mm, vma, start, 1);
22cc877b 1532 if (ret < 0)
8feae131 1533 return ret;
8feae131
DH
1534 }
1535 return shrink_vma(mm, vma, start, end);
1536 }
1da177e4 1537
8feae131
DH
1538erase_whole_vma:
1539 delete_vma_from_mm(vma);
1540 delete_vma(mm, vma);
1da177e4
LT
1541 return 0;
1542}
b5073173 1543EXPORT_SYMBOL(do_munmap);
1da177e4 1544
bfce281c 1545int vm_munmap(unsigned long addr, size_t len)
3034097a 1546{
bfce281c 1547 struct mm_struct *mm = current->mm;
3034097a 1548 int ret;
3034097a
DH
1549
1550 down_write(&mm->mmap_sem);
897ab3e0 1551 ret = do_munmap(mm, addr, len, NULL);
3034097a
DH
1552 up_write(&mm->mmap_sem);
1553 return ret;
1554}
a46ef99d
LT
1555EXPORT_SYMBOL(vm_munmap);
1556
1557SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1558{
bfce281c 1559 return vm_munmap(addr, len);
a46ef99d 1560}
3034097a
DH
1561
1562/*
8feae131 1563 * release all the mappings made in a process's VM space
3034097a 1564 */
8feae131 1565void exit_mmap(struct mm_struct *mm)
1da177e4 1566{
8feae131 1567 struct vm_area_struct *vma;
1da177e4 1568
8feae131
DH
1569 if (!mm)
1570 return;
1da177e4 1571
8feae131 1572 mm->total_vm = 0;
1da177e4 1573
8feae131
DH
1574 while ((vma = mm->mmap)) {
1575 mm->mmap = vma->vm_next;
1576 delete_vma_from_mm(vma);
1577 delete_vma(mm, vma);
04c34961 1578 cond_resched();
1da177e4
LT
1579 }
1580}
1581
5d22fc25 1582int vm_brk(unsigned long addr, unsigned long len)
1da177e4
LT
1583{
1584 return -ENOMEM;
1585}
1586
1587/*
6fa5f80b
DH
1588 * expand (or shrink) an existing mapping, potentially moving it at the same
1589 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1da177e4 1590 *
6fa5f80b 1591 * under NOMMU conditions, we only permit changing a mapping's size, and only
8feae131
DH
1592 * as long as it stays within the region allocated by do_mmap_private() and the
1593 * block is not shareable
1da177e4 1594 *
6fa5f80b 1595 * MREMAP_FIXED is not supported under NOMMU conditions
1da177e4 1596 */
4b377bab 1597static unsigned long do_mremap(unsigned long addr,
1da177e4
LT
1598 unsigned long old_len, unsigned long new_len,
1599 unsigned long flags, unsigned long new_addr)
1600{
6fa5f80b 1601 struct vm_area_struct *vma;
1da177e4
LT
1602
1603 /* insanity checks first */
f67d9b15
BL
1604 old_len = PAGE_ALIGN(old_len);
1605 new_len = PAGE_ALIGN(new_len);
8feae131 1606 if (old_len == 0 || new_len == 0)
1da177e4
LT
1607 return (unsigned long) -EINVAL;
1608
1824cb75 1609 if (offset_in_page(addr))
8feae131
DH
1610 return -EINVAL;
1611
1da177e4
LT
1612 if (flags & MREMAP_FIXED && new_addr != addr)
1613 return (unsigned long) -EINVAL;
1614
8feae131 1615 vma = find_vma_exact(current->mm, addr, old_len);
6fa5f80b
DH
1616 if (!vma)
1617 return (unsigned long) -EINVAL;
1da177e4 1618
6fa5f80b 1619 if (vma->vm_end != vma->vm_start + old_len)
1da177e4
LT
1620 return (unsigned long) -EFAULT;
1621
6fa5f80b 1622 if (vma->vm_flags & VM_MAYSHARE)
1da177e4
LT
1623 return (unsigned long) -EPERM;
1624
8feae131 1625 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1da177e4
LT
1626 return (unsigned long) -ENOMEM;
1627
1628 /* all checks complete - do it */
6fa5f80b 1629 vma->vm_end = vma->vm_start + new_len;
6fa5f80b
DH
1630 return vma->vm_start;
1631}
1632
6a6160a7
HC
1633SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1634 unsigned long, new_len, unsigned long, flags,
1635 unsigned long, new_addr)
6fa5f80b
DH
1636{
1637 unsigned long ret;
1638
1639 down_write(&current->mm->mmap_sem);
1640 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1641 up_write(&current->mm->mmap_sem);
1642 return ret;
1da177e4
LT
1643}
1644
df06b37f
KB
1645struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1646 unsigned int foll_flags)
1da177e4
LT
1647{
1648 return NULL;
1649}
1650
8f3b1327
BL
1651int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1652 unsigned long pfn, unsigned long size, pgprot_t prot)
1da177e4 1653{
8f3b1327
BL
1654 if (addr != (pfn << PAGE_SHIFT))
1655 return -EINVAL;
1656
314e51b9 1657 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
66aa2b4b 1658 return 0;
1da177e4 1659}
22c4af40 1660EXPORT_SYMBOL(remap_pfn_range);
1da177e4 1661
3c0b9de6
LT
1662int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1663{
1664 unsigned long pfn = start >> PAGE_SHIFT;
1665 unsigned long vm_len = vma->vm_end - vma->vm_start;
1666
1667 pfn += vma->vm_pgoff;
1668 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1669}
1670EXPORT_SYMBOL(vm_iomap_memory);
1671
f905bc44
PM
1672int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1673 unsigned long pgoff)
1674{
1675 unsigned int size = vma->vm_end - vma->vm_start;
1676
1677 if (!(vma->vm_flags & VM_USERMAP))
1678 return -EINVAL;
1679
1680 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1681 vma->vm_end = vma->vm_start + size;
1682
1683 return 0;
1684}
1685EXPORT_SYMBOL(remap_vmalloc_range);
1686
1da177e4
LT
1687unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1688 unsigned long len, unsigned long pgoff, unsigned long flags)
1689{
1690 return -ENOMEM;
1691}
1692
2bcd6454 1693vm_fault_t filemap_fault(struct vm_fault *vmf)
b0e15190
DH
1694{
1695 BUG();
d0217ac0 1696 return 0;
b0e15190 1697}
b5073173 1698EXPORT_SYMBOL(filemap_fault);
0ec76a11 1699
82b0f8c3 1700void filemap_map_pages(struct vm_fault *vmf,
bae473a4 1701 pgoff_t start_pgoff, pgoff_t end_pgoff)
f1820361
KS
1702{
1703 BUG();
1704}
1705EXPORT_SYMBOL(filemap_map_pages);
1706
84d77d3f 1707int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
442486ec 1708 unsigned long addr, void *buf, int len, unsigned int gup_flags)
0ec76a11 1709{
0ec76a11 1710 struct vm_area_struct *vma;
442486ec 1711 int write = gup_flags & FOLL_WRITE;
0ec76a11 1712
1e426fe2
KK
1713 if (down_read_killable(&mm->mmap_sem))
1714 return 0;
0ec76a11
DH
1715
1716 /* the access must start within one of the target process's mappings */
0159b141
DH
1717 vma = find_vma(mm, addr);
1718 if (vma) {
0ec76a11
DH
1719 /* don't overrun this mapping */
1720 if (addr + len >= vma->vm_end)
1721 len = vma->vm_end - addr;
1722
1723 /* only read or write mappings where it is permitted */
d00c7b99 1724 if (write && vma->vm_flags & VM_MAYWRITE)
7959722b
JZ
1725 copy_to_user_page(vma, NULL, addr,
1726 (void *) addr, buf, len);
d00c7b99 1727 else if (!write && vma->vm_flags & VM_MAYREAD)
7959722b
JZ
1728 copy_from_user_page(vma, NULL, addr,
1729 buf, (void *) addr, len);
0ec76a11
DH
1730 else
1731 len = 0;
1732 } else {
1733 len = 0;
1734 }
1735
1736 up_read(&mm->mmap_sem);
f55f199b
MF
1737
1738 return len;
1739}
1740
1741/**
b7701a5f 1742 * access_remote_vm - access another process' address space
f55f199b
MF
1743 * @mm: the mm_struct of the target address space
1744 * @addr: start address to access
1745 * @buf: source or destination buffer
1746 * @len: number of bytes to transfer
6347e8d5 1747 * @gup_flags: flags modifying lookup behaviour
f55f199b
MF
1748 *
1749 * The caller must hold a reference on @mm.
1750 */
1751int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5 1752 void *buf, int len, unsigned int gup_flags)
f55f199b 1753{
6347e8d5 1754 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
f55f199b
MF
1755}
1756
1757/*
1758 * Access another process' address space.
1759 * - source/target buffer must be kernel space
1760 */
f307ab6d
LS
1761int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1762 unsigned int gup_flags)
f55f199b
MF
1763{
1764 struct mm_struct *mm;
1765
1766 if (addr + len < addr)
1767 return 0;
1768
1769 mm = get_task_mm(tsk);
1770 if (!mm)
1771 return 0;
1772
f307ab6d 1773 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
f55f199b 1774
0ec76a11
DH
1775 mmput(mm);
1776 return len;
1777}
fcd35857 1778EXPORT_SYMBOL_GPL(access_process_vm);
7e660872
DH
1779
1780/**
1781 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1782 * @inode: The inode to check
1783 * @size: The current filesize of the inode
1784 * @newsize: The proposed filesize of the inode
1785 *
1786 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1787 * make sure that that any outstanding VMAs aren't broken and then shrink the
1788 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1789 * automatically grant mappings that are too large.
1790 */
1791int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1792 size_t newsize)
1793{
1794 struct vm_area_struct *vma;
7e660872
DH
1795 struct vm_region *region;
1796 pgoff_t low, high;
1797 size_t r_size, r_top;
1798
1799 low = newsize >> PAGE_SHIFT;
1800 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1801
1802 down_write(&nommu_region_sem);
1acf2e04 1803 i_mmap_lock_read(inode->i_mapping);
7e660872
DH
1804
1805 /* search for VMAs that fall within the dead zone */
6b2dbba8 1806 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
7e660872
DH
1807 /* found one - only interested if it's shared out of the page
1808 * cache */
1809 if (vma->vm_flags & VM_SHARED) {
1acf2e04 1810 i_mmap_unlock_read(inode->i_mapping);
7e660872
DH
1811 up_write(&nommu_region_sem);
1812 return -ETXTBSY; /* not quite true, but near enough */
1813 }
1814 }
1815
1816 /* reduce any regions that overlap the dead zone - if in existence,
1817 * these will be pointed to by VMAs that don't overlap the dead zone
1818 *
1819 * we don't check for any regions that start beyond the EOF as there
1820 * shouldn't be any
1821 */
1acf2e04 1822 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
7e660872
DH
1823 if (!(vma->vm_flags & VM_SHARED))
1824 continue;
1825
1826 region = vma->vm_region;
1827 r_size = region->vm_top - region->vm_start;
1828 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1829
1830 if (r_top > newsize) {
1831 region->vm_top -= r_top - newsize;
1832 if (region->vm_end > region->vm_top)
1833 region->vm_end = region->vm_top;
1834 }
1835 }
1836
1acf2e04 1837 i_mmap_unlock_read(inode->i_mapping);
7e660872
DH
1838 up_write(&nommu_region_sem);
1839 return 0;
1840}
c9b1d098
AS
1841
1842/*
1843 * Initialise sysctl_user_reserve_kbytes.
1844 *
1845 * This is intended to prevent a user from starting a single memory hogging
1846 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1847 * mode.
1848 *
1849 * The default value is min(3% of free memory, 128MB)
1850 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1851 */
1852static int __meminit init_user_reserve(void)
1853{
1854 unsigned long free_kbytes;
1855
c41f012a 1856 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
c9b1d098
AS
1857
1858 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1859 return 0;
1860}
a4bc6fc7 1861subsys_initcall(init_user_reserve);
4eeab4f5
AS
1862
1863/*
1864 * Initialise sysctl_admin_reserve_kbytes.
1865 *
1866 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1867 * to log in and kill a memory hogging process.
1868 *
1869 * Systems with more than 256MB will reserve 8MB, enough to recover
1870 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1871 * only reserve 3% of free pages by default.
1872 */
1873static int __meminit init_admin_reserve(void)
1874{
1875 unsigned long free_kbytes;
1876
c41f012a 1877 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
4eeab4f5
AS
1878
1879 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1880 return 0;
1881}
a4bc6fc7 1882subsys_initcall(init_admin_reserve);