]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/mm/mmap.c
Merge tag 'for-linus-20150216' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / arch / s390 / mm / mmap.c
1 /*
2 * flexible mmap layout support
3 *
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 *
22 * Started by Ingo Molnar <mingo@elte.hu>
23 */
24
25 #include <linux/personality.h>
26 #include <linux/mm.h>
27 #include <linux/mman.h>
28 #include <linux/module.h>
29 #include <linux/random.h>
30 #include <linux/compat.h>
31 #include <linux/security.h>
32 #include <asm/pgalloc.h>
33
34 unsigned long mmap_rnd_mask;
35 unsigned long mmap_align_mask;
36
37 static unsigned long stack_maxrandom_size(void)
38 {
39 if (!(current->flags & PF_RANDOMIZE))
40 return 0;
41 if (current->personality & ADDR_NO_RANDOMIZE)
42 return 0;
43 return STACK_RND_MASK << PAGE_SHIFT;
44 }
45
46 /*
47 * Top of mmap area (just below the process stack).
48 *
49 * Leave at least a ~32 MB hole.
50 */
51 #define MIN_GAP (32*1024*1024)
52 #define MAX_GAP (STACK_TOP/6*5)
53
54 static inline int mmap_is_legacy(void)
55 {
56 if (current->personality & ADDR_COMPAT_LAYOUT)
57 return 1;
58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
59 return 1;
60 return sysctl_legacy_va_layout;
61 }
62
63 static unsigned long mmap_rnd(void)
64 {
65 if (!(current->flags & PF_RANDOMIZE))
66 return 0;
67 if (is_32bit_task())
68 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
69 else
70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
71 }
72
73 static unsigned long mmap_base_legacy(void)
74 {
75 return TASK_UNMAPPED_BASE + mmap_rnd();
76 }
77
78 static inline unsigned long mmap_base(void)
79 {
80 unsigned long gap = rlimit(RLIMIT_STACK);
81
82 if (gap < MIN_GAP)
83 gap = MIN_GAP;
84 else if (gap > MAX_GAP)
85 gap = MAX_GAP;
86 gap &= PAGE_MASK;
87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
88 }
89
90 unsigned long
91 arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
93 {
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 struct vm_unmapped_area_info info;
97 int do_color_align;
98
99 if (len > TASK_SIZE - mmap_min_addr)
100 return -ENOMEM;
101
102 if (flags & MAP_FIXED)
103 return addr;
104
105 if (addr) {
106 addr = PAGE_ALIGN(addr);
107 vma = find_vma(mm, addr);
108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109 (!vma || addr + len <= vma->vm_start))
110 return addr;
111 }
112
113 do_color_align = 0;
114 if (filp || (flags & MAP_SHARED))
115 do_color_align = !is_32bit_task();
116
117 info.flags = 0;
118 info.length = len;
119 info.low_limit = mm->mmap_base;
120 info.high_limit = TASK_SIZE;
121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122 info.align_offset = pgoff << PAGE_SHIFT;
123 return vm_unmapped_area(&info);
124 }
125
126 unsigned long
127 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128 const unsigned long len, const unsigned long pgoff,
129 const unsigned long flags)
130 {
131 struct vm_area_struct *vma;
132 struct mm_struct *mm = current->mm;
133 unsigned long addr = addr0;
134 struct vm_unmapped_area_info info;
135 int do_color_align;
136
137 /* requested length too big for entire address space */
138 if (len > TASK_SIZE - mmap_min_addr)
139 return -ENOMEM;
140
141 if (flags & MAP_FIXED)
142 return addr;
143
144 /* requesting a specific address */
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153 do_color_align = 0;
154 if (filp || (flags & MAP_SHARED))
155 do_color_align = !is_32bit_task();
156
157 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
158 info.length = len;
159 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160 info.high_limit = mm->mmap_base;
161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162 info.align_offset = pgoff << PAGE_SHIFT;
163 addr = vm_unmapped_area(&info);
164
165 /*
166 * A failed mmap() very likely causes application failure,
167 * so fall back to the bottom-up function here. This scenario
168 * can happen with large stack limits and large mmap()
169 * allocations.
170 */
171 if (addr & ~PAGE_MASK) {
172 VM_BUG_ON(addr != -ENOMEM);
173 info.flags = 0;
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 addr = vm_unmapped_area(&info);
177 }
178
179 return addr;
180 }
181
182 unsigned long randomize_et_dyn(void)
183 {
184 unsigned long base;
185
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
187 return base + mmap_rnd();
188 }
189
190 #ifndef CONFIG_64BIT
191
192 /*
193 * This function, called very early during the creation of a new
194 * process VM image, sets up which VM layout function to use:
195 */
196 void arch_pick_mmap_layout(struct mm_struct *mm)
197 {
198 /*
199 * Fall back to the standard layout if the personality
200 * bit is set, or if the expected stack growth is unlimited:
201 */
202 if (mmap_is_legacy()) {
203 mm->mmap_base = mmap_base_legacy();
204 mm->get_unmapped_area = arch_get_unmapped_area;
205 } else {
206 mm->mmap_base = mmap_base();
207 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
208 }
209 }
210
211 #else
212
213 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
214 {
215 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
216 return 0;
217 if (!(flags & MAP_FIXED))
218 addr = 0;
219 if ((addr + len) >= TASK_SIZE)
220 return crst_table_upgrade(current->mm, 1UL << 53);
221 return 0;
222 }
223
224 static unsigned long
225 s390_get_unmapped_area(struct file *filp, unsigned long addr,
226 unsigned long len, unsigned long pgoff, unsigned long flags)
227 {
228 struct mm_struct *mm = current->mm;
229 unsigned long area;
230 int rc;
231
232 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
233 if (!(area & ~PAGE_MASK))
234 return area;
235 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
236 /* Upgrade the page table to 4 levels and retry. */
237 rc = crst_table_upgrade(mm, 1UL << 53);
238 if (rc)
239 return (unsigned long) rc;
240 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
241 }
242 return area;
243 }
244
245 static unsigned long
246 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
247 const unsigned long len, const unsigned long pgoff,
248 const unsigned long flags)
249 {
250 struct mm_struct *mm = current->mm;
251 unsigned long area;
252 int rc;
253
254 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
255 if (!(area & ~PAGE_MASK))
256 return area;
257 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
258 /* Upgrade the page table to 4 levels and retry. */
259 rc = crst_table_upgrade(mm, 1UL << 53);
260 if (rc)
261 return (unsigned long) rc;
262 area = arch_get_unmapped_area_topdown(filp, addr, len,
263 pgoff, flags);
264 }
265 return area;
266 }
267 /*
268 * This function, called very early during the creation of a new
269 * process VM image, sets up which VM layout function to use:
270 */
271 void arch_pick_mmap_layout(struct mm_struct *mm)
272 {
273 /*
274 * Fall back to the standard layout if the personality
275 * bit is set, or if the expected stack growth is unlimited:
276 */
277 if (mmap_is_legacy()) {
278 mm->mmap_base = mmap_base_legacy();
279 mm->get_unmapped_area = s390_get_unmapped_area;
280 } else {
281 mm->mmap_base = mmap_base();
282 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
283 }
284 }
285
286 static int __init setup_mmap_rnd(void)
287 {
288 struct cpuid cpu_id;
289
290 get_cpu_id(&cpu_id);
291 switch (cpu_id.machine) {
292 case 0x9672:
293 case 0x2064:
294 case 0x2066:
295 case 0x2084:
296 case 0x2086:
297 case 0x2094:
298 case 0x2096:
299 case 0x2097:
300 case 0x2098:
301 case 0x2817:
302 case 0x2818:
303 case 0x2827:
304 case 0x2828:
305 mmap_rnd_mask = 0x7ffUL;
306 mmap_align_mask = 0UL;
307 break;
308 case 0x2964: /* z13 */
309 default:
310 mmap_rnd_mask = 0x3ff80UL;
311 mmap_align_mask = 0x7fUL;
312 break;
313 }
314 return 0;
315 }
316 early_initcall(setup_mmap_rnd);
317
318 #endif