]>
Commit | Line | Data |
---|---|---|
6f6c3c33 RB |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2011 Wind River Systems, | |
7 | * written by Ralf Baechle <ralf@linux-mips.org> | |
8 | */ | |
16650107 | 9 | #include <linux/compiler.h> |
6f6c3c33 RB |
10 | #include <linux/errno.h> |
11 | #include <linux/mm.h> | |
12 | #include <linux/mman.h> | |
13 | #include <linux/module.h> | |
d0be89f6 | 14 | #include <linux/personality.h> |
6f6c3c33 RB |
15 | #include <linux/random.h> |
16 | #include <linux/sched.h> | |
17 | ||
18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | |
6f6c3c33 RB |
19 | EXPORT_SYMBOL(shm_align_mask); |
20 | ||
d0be89f6 JP |
21 | /* gap between mmap and stack */ |
22 | #define MIN_GAP (128*1024*1024UL) | |
16650107 | 23 | #define MAX_GAP ((TASK_SIZE)/6*5) |
d0be89f6 JP |
24 | |
25 | static int mmap_is_legacy(void) | |
26 | { | |
27 | if (current->personality & ADDR_COMPAT_LAYOUT) | |
28 | return 1; | |
29 | ||
30 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | |
31 | return 1; | |
32 | ||
33 | return sysctl_legacy_va_layout; | |
34 | } | |
35 | ||
36 | static unsigned long mmap_base(unsigned long rnd) | |
37 | { | |
38 | unsigned long gap = rlimit(RLIMIT_STACK); | |
39 | ||
40 | if (gap < MIN_GAP) | |
41 | gap = MIN_GAP; | |
42 | else if (gap > MAX_GAP) | |
43 | gap = MAX_GAP; | |
44 | ||
45 | return PAGE_ALIGN(TASK_SIZE - gap - rnd); | |
46 | } | |
47 | ||
16650107 | 48 | #define COLOUR_ALIGN(addr, pgoff) \ |
6f6c3c33 RB |
49 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ |
50 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | |
51 | ||
d0be89f6 JP |
52 | enum mmap_allocation_direction {UP, DOWN}; |
53 | ||
16650107 | 54 | static unsigned long arch_get_unmapped_area_common(struct file *filp, |
d0be89f6 JP |
55 | unsigned long addr0, unsigned long len, unsigned long pgoff, |
56 | unsigned long flags, enum mmap_allocation_direction dir) | |
6f6c3c33 | 57 | { |
d0be89f6 JP |
58 | struct mm_struct *mm = current->mm; |
59 | struct vm_area_struct *vma; | |
60 | unsigned long addr = addr0; | |
6f6c3c33 | 61 | int do_color_align; |
b6661861 | 62 | struct vm_unmapped_area_info info; |
6f6c3c33 | 63 | |
d0be89f6 | 64 | if (unlikely(len > TASK_SIZE)) |
6f6c3c33 RB |
65 | return -ENOMEM; |
66 | ||
67 | if (flags & MAP_FIXED) { | |
d0be89f6 | 68 | /* Even MAP_FIXED mappings must reside within TASK_SIZE */ |
6f6c3c33 RB |
69 | if (TASK_SIZE - len < addr) |
70 | return -EINVAL; | |
71 | ||
72 | /* | |
73 | * We do not accept a shared mapping if it would violate | |
74 | * cache aliasing constraints. | |
75 | */ | |
76 | if ((flags & MAP_SHARED) && | |
77 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | |
78 | return -EINVAL; | |
79 | return addr; | |
80 | } | |
81 | ||
82 | do_color_align = 0; | |
83 | if (filp || (flags & MAP_SHARED)) | |
84 | do_color_align = 1; | |
d0be89f6 JP |
85 | |
86 | /* requesting a specific address */ | |
6f6c3c33 RB |
87 | if (addr) { |
88 | if (do_color_align) | |
89 | addr = COLOUR_ALIGN(addr, pgoff); | |
90 | else | |
91 | addr = PAGE_ALIGN(addr); | |
d0be89f6 JP |
92 | |
93 | vma = find_vma(mm, addr); | |
6f6c3c33 | 94 | if (TASK_SIZE - len >= addr && |
16650107 | 95 | (!vma || addr + len <= vma->vm_start)) |
6f6c3c33 RB |
96 | return addr; |
97 | } | |
6f6c3c33 | 98 | |
b6661861 ML |
99 | info.length = len; |
100 | info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; | |
101 | info.align_offset = pgoff << PAGE_SHIFT; | |
d0be89f6 | 102 | |
b6661861 ML |
103 | if (dir == DOWN) { |
104 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
105 | info.low_limit = PAGE_SIZE; | |
106 | info.high_limit = mm->mmap_base; | |
107 | addr = vm_unmapped_area(&info); | |
108 | ||
109 | if (!(addr & ~PAGE_MASK)) | |
110 | return addr; | |
d0be89f6 | 111 | |
d0be89f6 JP |
112 | /* |
113 | * A failed mmap() very likely causes application failure, | |
114 | * so fall back to the bottom-up function here. This scenario | |
115 | * can happen with large stack limits and large mmap() | |
116 | * allocations. | |
117 | */ | |
6f6c3c33 | 118 | } |
b6661861 ML |
119 | |
120 | info.flags = 0; | |
121 | info.low_limit = mm->mmap_base; | |
122 | info.high_limit = TASK_SIZE; | |
123 | return vm_unmapped_area(&info); | |
6f6c3c33 RB |
124 | } |
125 | ||
d0be89f6 JP |
126 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, |
127 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
128 | { | |
16650107 | 129 | return arch_get_unmapped_area_common(filp, |
d0be89f6 JP |
130 | addr0, len, pgoff, flags, UP); |
131 | } | |
132 | ||
133 | /* | |
134 | * There is no need to export this but sched.h declares the function as | |
135 | * extern so making it static here results in an error. | |
136 | */ | |
137 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | |
138 | unsigned long addr0, unsigned long len, unsigned long pgoff, | |
139 | unsigned long flags) | |
140 | { | |
16650107 | 141 | return arch_get_unmapped_area_common(filp, |
d0be89f6 JP |
142 | addr0, len, pgoff, flags, DOWN); |
143 | } | |
144 | ||
2b68f6ca | 145 | unsigned long arch_mmap_rnd(void) |
1f0569df KC |
146 | { |
147 | unsigned long rnd; | |
148 | ||
5ef11c35 | 149 | rnd = get_random_long(); |
1f0569df KC |
150 | rnd <<= PAGE_SHIFT; |
151 | if (TASK_IS_32BIT_ADDR) | |
152 | rnd &= 0xfffffful; | |
153 | else | |
154 | rnd &= 0xffffffful; | |
155 | ||
156 | return rnd; | |
157 | } | |
158 | ||
6f6c3c33 RB |
159 | void arch_pick_mmap_layout(struct mm_struct *mm) |
160 | { | |
161 | unsigned long random_factor = 0UL; | |
162 | ||
1f0569df | 163 | if (current->flags & PF_RANDOMIZE) |
2b68f6ca | 164 | random_factor = arch_mmap_rnd(); |
6f6c3c33 | 165 | |
d0be89f6 JP |
166 | if (mmap_is_legacy()) { |
167 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
168 | mm->get_unmapped_area = arch_get_unmapped_area; | |
d0be89f6 JP |
169 | } else { |
170 | mm->mmap_base = mmap_base(random_factor); | |
171 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | |
d0be89f6 | 172 | } |
6f6c3c33 RB |
173 | } |
174 | ||
175 | static inline unsigned long brk_rnd(void) | |
176 | { | |
5ef11c35 | 177 | unsigned long rnd = get_random_long(); |
6f6c3c33 RB |
178 | |
179 | rnd = rnd << PAGE_SHIFT; | |
180 | /* 8MB for 32bit, 256MB for 64bit */ | |
181 | if (TASK_IS_32BIT_ADDR) | |
182 | rnd = rnd & 0x7ffffful; | |
183 | else | |
184 | rnd = rnd & 0xffffffful; | |
185 | ||
186 | return rnd; | |
187 | } | |
188 | ||
189 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
190 | { | |
191 | unsigned long base = mm->brk; | |
192 | unsigned long ret; | |
193 | ||
194 | ret = PAGE_ALIGN(base + brk_rnd()); | |
195 | ||
196 | if (ret < mm->brk) | |
197 | return mm->brk; | |
198 | ||
199 | return ret; | |
200 | } | |
196897a2 SR |
201 | |
202 | int __virt_addr_valid(const volatile void *kaddr) | |
203 | { | |
204 | return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); | |
205 | } | |
206 | EXPORT_SYMBOL_GPL(__virt_addr_valid); |