2 * flexible mmap layout support
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Started by Ingo Molnar <mingo@elte.hu>
25 #include <linux/elf-randomize.h>
26 #include <linux/personality.h>
28 #include <linux/mman.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/random.h>
32 #include <linux/compat.h>
33 #include <linux/security.h>
34 #include <asm/pgalloc.h>
37 static unsigned long stack_maxrandom_size(void)
39 if (!(current
->flags
& PF_RANDOMIZE
))
41 if (current
->personality
& ADDR_NO_RANDOMIZE
)
43 return STACK_RND_MASK
<< PAGE_SHIFT
;
47 * Top of mmap area (just below the process stack).
49 * Leave at least a ~32 MB hole.
51 #define MIN_GAP (32*1024*1024)
52 #define MAX_GAP (STACK_TOP/6*5)
54 static inline int mmap_is_legacy(void)
56 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
58 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
60 return sysctl_legacy_va_layout
;
63 unsigned long arch_mmap_rnd(void)
65 return (get_random_int() & MMAP_RND_MASK
) << PAGE_SHIFT
;
68 static unsigned long mmap_base_legacy(unsigned long rnd
)
70 return TASK_UNMAPPED_BASE
+ rnd
;
73 static inline unsigned long mmap_base(unsigned long rnd
)
75 unsigned long gap
= rlimit(RLIMIT_STACK
);
79 else if (gap
> MAX_GAP
)
82 return STACK_TOP
- stack_maxrandom_size() - rnd
- gap
;
86 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
87 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
89 struct mm_struct
*mm
= current
->mm
;
90 struct vm_area_struct
*vma
;
91 struct vm_unmapped_area_info info
;
94 if (len
> TASK_SIZE
- mmap_min_addr
)
97 if (flags
& MAP_FIXED
)
98 goto check_asce_limit
;
101 addr
= PAGE_ALIGN(addr
);
102 vma
= find_vma(mm
, addr
);
103 if (TASK_SIZE
- len
>= addr
&& addr
>= mmap_min_addr
&&
104 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
105 goto check_asce_limit
;
110 info
.low_limit
= mm
->mmap_base
;
111 info
.high_limit
= TASK_SIZE
;
112 if (filp
|| (flags
& MAP_SHARED
))
113 info
.align_mask
= MMAP_ALIGN_MASK
<< PAGE_SHIFT
;
116 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
117 addr
= vm_unmapped_area(&info
);
118 if (addr
& ~PAGE_MASK
)
122 if (addr
+ len
> current
->mm
->context
.asce_limit
) {
123 rc
= crst_table_upgrade(mm
);
125 return (unsigned long) rc
;
132 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
133 const unsigned long len
, const unsigned long pgoff
,
134 const unsigned long flags
)
136 struct vm_area_struct
*vma
;
137 struct mm_struct
*mm
= current
->mm
;
138 unsigned long addr
= addr0
;
139 struct vm_unmapped_area_info info
;
142 /* requested length too big for entire address space */
143 if (len
> TASK_SIZE
- mmap_min_addr
)
146 if (flags
& MAP_FIXED
)
147 goto check_asce_limit
;
149 /* requesting a specific address */
151 addr
= PAGE_ALIGN(addr
);
152 vma
= find_vma(mm
, addr
);
153 if (TASK_SIZE
- len
>= addr
&& addr
>= mmap_min_addr
&&
154 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
155 goto check_asce_limit
;
158 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
160 info
.low_limit
= max(PAGE_SIZE
, mmap_min_addr
);
161 info
.high_limit
= mm
->mmap_base
;
162 if (filp
|| (flags
& MAP_SHARED
))
163 info
.align_mask
= MMAP_ALIGN_MASK
<< PAGE_SHIFT
;
166 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
167 addr
= vm_unmapped_area(&info
);
170 * A failed mmap() very likely causes application failure,
171 * so fall back to the bottom-up function here. This scenario
172 * can happen with large stack limits and large mmap()
175 if (addr
& ~PAGE_MASK
) {
176 VM_BUG_ON(addr
!= -ENOMEM
);
178 info
.low_limit
= TASK_UNMAPPED_BASE
;
179 info
.high_limit
= TASK_SIZE
;
180 addr
= vm_unmapped_area(&info
);
181 if (addr
& ~PAGE_MASK
)
186 if (addr
+ len
> current
->mm
->context
.asce_limit
) {
187 rc
= crst_table_upgrade(mm
);
189 return (unsigned long) rc
;
196 * This function, called very early during the creation of a new
197 * process VM image, sets up which VM layout function to use:
199 void arch_pick_mmap_layout(struct mm_struct
*mm
)
201 unsigned long random_factor
= 0UL;
203 if (current
->flags
& PF_RANDOMIZE
)
204 random_factor
= arch_mmap_rnd();
207 * Fall back to the standard layout if the personality
208 * bit is set, or if the expected stack growth is unlimited:
210 if (mmap_is_legacy()) {
211 mm
->mmap_base
= mmap_base_legacy(random_factor
);
212 mm
->get_unmapped_area
= arch_get_unmapped_area
;
214 mm
->mmap_base
= mmap_base(random_factor
);
215 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;