]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/arm/mm/mmap.c
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-eoan-kernel.git] / arch / arm / mm / mmap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/mmap.c
3 */
1da177e4
LT
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
e8edc6e0 8#include <linux/sched.h>
09d9bae0 9#include <linux/io.h>
df5419a9 10#include <linux/personality.h>
cc92c28b 11#include <linux/random.h>
41dfaa93 12#include <asm/cachetype.h>
1da177e4
LT
13
14#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17
18/*
19 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
22 * SHMLBA bytes.
23 *
24 * We unconditionally provide this function for all cases, however
25 * in the VIVT case, we optimise out the alignment rules.
26 */
27unsigned long
28arch_get_unmapped_area(struct file *filp, unsigned long addr,
29 unsigned long len, unsigned long pgoff, unsigned long flags)
30{
31 struct mm_struct *mm = current->mm;
32 struct vm_area_struct *vma;
33 unsigned long start_addr;
41dfaa93
RH
34 int do_align = 0;
35 int aliasing = cache_is_vipt_aliasing();
1da177e4
LT
36
37 /*
38 * We only need to do colour alignment if either the I or D
41dfaa93 39 * caches alias.
1da177e4 40 */
41dfaa93
RH
41 if (aliasing)
42 do_align = filp || (flags & MAP_SHARED);
1da177e4
LT
43
44 /*
acec0ac0 45 * We enforce the MAP_FIXED case.
1da177e4
LT
46 */
47 if (flags & MAP_FIXED) {
e77414e0
AV
48 if (aliasing && flags & MAP_SHARED &&
49 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
1da177e4
LT
50 return -EINVAL;
51 return addr;
52 }
53
54 if (len > TASK_SIZE)
55 return -ENOMEM;
56
57 if (addr) {
58 if (do_align)
59 addr = COLOUR_ALIGN(addr, pgoff);
60 else
61 addr = PAGE_ALIGN(addr);
62
63 vma = find_vma(mm, addr);
64 if (TASK_SIZE - len >= addr &&
65 (!vma || addr + len <= vma->vm_start))
66 return addr;
67 }
1363c3cd
WW
68 if (len > mm->cached_hole_size) {
69 start_addr = addr = mm->free_area_cache;
70 } else {
71 start_addr = addr = TASK_UNMAPPED_BASE;
72 mm->cached_hole_size = 0;
73 }
cc92c28b 74 /* 8 bits of randomness in 20 address space bits */
df5419a9
NP
75 if ((current->flags & PF_RANDOMIZE) &&
76 !(current->personality & ADDR_NO_RANDOMIZE))
cc92c28b 77 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
1da177e4
LT
78
79full_search:
80 if (do_align)
81 addr = COLOUR_ALIGN(addr, pgoff);
82 else
83 addr = PAGE_ALIGN(addr);
84
85 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
86 /* At this point: (!vma || addr < vma->vm_end). */
87 if (TASK_SIZE - len < addr) {
88 /*
89 * Start a new search - just in case we missed
90 * some holes.
91 */
92 if (start_addr != TASK_UNMAPPED_BASE) {
93 start_addr = addr = TASK_UNMAPPED_BASE;
1363c3cd 94 mm->cached_hole_size = 0;
1da177e4
LT
95 goto full_search;
96 }
97 return -ENOMEM;
98 }
99 if (!vma || addr + len <= vma->vm_start) {
100 /*
101 * Remember the place where we stopped the search:
102 */
103 mm->free_area_cache = addr + len;
104 return addr;
105 }
1363c3cd
WW
106 if (addr + mm->cached_hole_size < vma->vm_start)
107 mm->cached_hole_size = vma->vm_start - addr;
1da177e4
LT
108 addr = vma->vm_end;
109 if (do_align)
110 addr = COLOUR_ALIGN(addr, pgoff);
111 }
112}
113
51635ad2
LB
114
115/*
116 * You really shouldn't be using read() or write() on /dev/mem. This
117 * might go away in the future.
118 */
119int valid_phys_addr_range(unsigned long addr, size_t size)
120{
9ae3ae0b
AR
121 if (addr < PHYS_OFFSET)
122 return 0;
6806bfe1 123 if (addr + size > __pa(high_memory - 1) + 1)
51635ad2
LB
124 return 0;
125
126 return 1;
127}
128
129/*
130 * We don't use supersection mappings for mmap() on /dev/mem, which
131 * means that we can't map the memory area above the 4G barrier into
132 * userspace.
133 */
134int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
135{
136 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
137}
087aaffc
NP
138
139#ifdef CONFIG_STRICT_DEVMEM
140
141#include <linux/ioport.h>
142
143/*
144 * devmem_is_allowed() checks to see if /dev/mem access to a certain
145 * address is valid. The argument is a physical page number.
146 * We mimic x86 here by disallowing access to system RAM as well as
147 * device-exclusive MMIO regions. This effectively disable read()/write()
148 * on /dev/mem.
149 */
150int devmem_is_allowed(unsigned long pfn)
151{
152 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
153 return 0;
154 if (!page_is_ram(pfn))
155 return 1;
156 return 0;
157}
158
159#endif