2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
8 #include <linux/sched.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cachetype.h>
14 #define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
18 /* gap between mmap and stack */
19 #define MIN_GAP (128*1024*1024UL)
20 #define MAX_GAP ((TASK_SIZE)/6*5)
22 static int mmap_is_legacy(void)
24 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
27 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
30 return sysctl_legacy_va_layout
;
33 static unsigned long mmap_base(unsigned long rnd
)
35 unsigned long gap
= rlimit(RLIMIT_STACK
);
39 else if (gap
> MAX_GAP
)
42 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
46 * We need to ensure that shared mappings are correctly aligned to
47 * avoid aliasing issues with VIPT caches. We need to ensure that
48 * a specific page of an object is always mapped at a multiple of
51 * We unconditionally provide this function for all cases, however
52 * in the VIVT case, we optimise out the alignment rules.
55 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
56 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
58 struct mm_struct
*mm
= current
->mm
;
59 struct vm_area_struct
*vma
;
61 int aliasing
= cache_is_vipt_aliasing();
62 struct vm_unmapped_area_info info
;
65 * We only need to do colour alignment if either the I or D
69 do_align
= filp
|| (flags
& MAP_SHARED
);
72 * We enforce the MAP_FIXED case.
74 if (flags
& MAP_FIXED
) {
75 if (aliasing
&& flags
& MAP_SHARED
&&
76 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
86 addr
= COLOUR_ALIGN(addr
, pgoff
);
88 addr
= PAGE_ALIGN(addr
);
90 vma
= find_vma(mm
, addr
);
91 if (TASK_SIZE
- len
>= addr
&&
92 (!vma
|| addr
+ len
<= vma
->vm_start
))
98 info
.low_limit
= mm
->mmap_base
;
99 info
.high_limit
= TASK_SIZE
;
100 info
.align_mask
= do_align
? (PAGE_MASK
& (SHMLBA
- 1)) : 0;
101 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
102 return vm_unmapped_area(&info
);
106 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
107 const unsigned long len
, const unsigned long pgoff
,
108 const unsigned long flags
)
110 struct vm_area_struct
*vma
;
111 struct mm_struct
*mm
= current
->mm
;
112 unsigned long addr
= addr0
;
114 int aliasing
= cache_is_vipt_aliasing();
115 struct vm_unmapped_area_info info
;
118 * We only need to do colour alignment if either the I or D
122 do_align
= filp
|| (flags
& MAP_SHARED
);
124 /* requested length too big for entire address space */
128 if (flags
& MAP_FIXED
) {
129 if (aliasing
&& flags
& MAP_SHARED
&&
130 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
135 /* requesting a specific address */
138 addr
= COLOUR_ALIGN(addr
, pgoff
);
140 addr
= PAGE_ALIGN(addr
);
141 vma
= find_vma(mm
, addr
);
142 if (TASK_SIZE
- len
>= addr
&&
143 (!vma
|| addr
+ len
<= vma
->vm_start
))
147 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
149 info
.low_limit
= FIRST_USER_ADDRESS
;
150 info
.high_limit
= mm
->mmap_base
;
151 info
.align_mask
= do_align
? (PAGE_MASK
& (SHMLBA
- 1)) : 0;
152 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
153 addr
= vm_unmapped_area(&info
);
156 * A failed mmap() very likely causes application failure,
157 * so fall back to the bottom-up function here. This scenario
158 * can happen with large stack limits and large mmap()
161 if (addr
& ~PAGE_MASK
) {
162 VM_BUG_ON(addr
!= -ENOMEM
);
164 info
.low_limit
= mm
->mmap_base
;
165 info
.high_limit
= TASK_SIZE
;
166 addr
= vm_unmapped_area(&info
);
172 unsigned long arch_mmap_rnd(void)
176 rnd
= get_random_long() & ((1UL << mmap_rnd_bits
) - 1);
178 return rnd
<< PAGE_SHIFT
;
181 void arch_pick_mmap_layout(struct mm_struct
*mm
)
183 unsigned long random_factor
= 0UL;
185 if (current
->flags
& PF_RANDOMIZE
)
186 random_factor
= arch_mmap_rnd();
188 if (mmap_is_legacy()) {
189 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
190 mm
->get_unmapped_area
= arch_get_unmapped_area
;
192 mm
->mmap_base
= mmap_base(random_factor
);
193 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
198 * You really shouldn't be using read() or write() on /dev/mem. This
199 * might go away in the future.
201 int valid_phys_addr_range(phys_addr_t addr
, size_t size
)
203 if (addr
< PHYS_OFFSET
)
205 if (addr
+ size
> __pa(high_memory
- 1) + 1)
212 * Do not allow /dev/mem mappings beyond the supported physical range.
214 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
216 return (pfn
+ (size
>> PAGE_SHIFT
)) <= (1 + (PHYS_MASK
>> PAGE_SHIFT
));
219 #ifdef CONFIG_STRICT_DEVMEM
221 #include <linux/ioport.h>
224 * devmem_is_allowed() checks to see if /dev/mem access to a certain
225 * address is valid. The argument is a physical page number.
226 * We mimic x86 here by disallowing access to system RAM as well as
227 * device-exclusive MMIO regions. This effectively disable read()/write()
230 int devmem_is_allowed(unsigned long pfn
)
232 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
234 if (!page_is_ram(pfn
))