]>
Commit | Line | Data |
---|---|---|
6f6c3c33 RB |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2011 Wind River Systems, | |
7 | * written by Ralf Baechle <ralf@linux-mips.org> | |
8 | */ | |
16650107 | 9 | #include <linux/compiler.h> |
db3fb45a | 10 | #include <linux/elf-randomize.h> |
6f6c3c33 RB |
11 | #include <linux/errno.h> |
12 | #include <linux/mm.h> | |
13 | #include <linux/mman.h> | |
d9ba5778 | 14 | #include <linux/export.h> |
d0be89f6 | 15 | #include <linux/personality.h> |
6f6c3c33 | 16 | #include <linux/random.h> |
3f07c014 | 17 | #include <linux/sched/signal.h> |
01042607 | 18 | #include <linux/sched/mm.h> |
6f6c3c33 RB |
19 | |
20 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | |
6f6c3c33 RB |
21 | EXPORT_SYMBOL(shm_align_mask); |
22 | ||
d0be89f6 JP |
23 | /* gap between mmap and stack */ |
24 | #define MIN_GAP (128*1024*1024UL) | |
16650107 | 25 | #define MAX_GAP ((TASK_SIZE)/6*5) |
d0be89f6 JP |
26 | |
27 | static int mmap_is_legacy(void) | |
28 | { | |
29 | if (current->personality & ADDR_COMPAT_LAYOUT) | |
30 | return 1; | |
31 | ||
32 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | |
33 | return 1; | |
34 | ||
35 | return sysctl_legacy_va_layout; | |
36 | } | |
37 | ||
38 | static unsigned long mmap_base(unsigned long rnd) | |
39 | { | |
40 | unsigned long gap = rlimit(RLIMIT_STACK); | |
41 | ||
42 | if (gap < MIN_GAP) | |
43 | gap = MIN_GAP; | |
44 | else if (gap > MAX_GAP) | |
45 | gap = MAX_GAP; | |
46 | ||
47 | return PAGE_ALIGN(TASK_SIZE - gap - rnd); | |
48 | } | |
49 | ||
16650107 | 50 | #define COLOUR_ALIGN(addr, pgoff) \ |
6f6c3c33 RB |
51 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ |
52 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | |
53 | ||
d0be89f6 JP |
54 | enum mmap_allocation_direction {UP, DOWN}; |
55 | ||
16650107 | 56 | static unsigned long arch_get_unmapped_area_common(struct file *filp, |
d0be89f6 JP |
57 | unsigned long addr0, unsigned long len, unsigned long pgoff, |
58 | unsigned long flags, enum mmap_allocation_direction dir) | |
6f6c3c33 | 59 | { |
d0be89f6 JP |
60 | struct mm_struct *mm = current->mm; |
61 | struct vm_area_struct *vma; | |
62 | unsigned long addr = addr0; | |
6f6c3c33 | 63 | int do_color_align; |
b6661861 | 64 | struct vm_unmapped_area_info info; |
6f6c3c33 | 65 | |
d0be89f6 | 66 | if (unlikely(len > TASK_SIZE)) |
6f6c3c33 RB |
67 | return -ENOMEM; |
68 | ||
69 | if (flags & MAP_FIXED) { | |
d0be89f6 | 70 | /* Even MAP_FIXED mappings must reside within TASK_SIZE */ |
6f6c3c33 RB |
71 | if (TASK_SIZE - len < addr) |
72 | return -EINVAL; | |
73 | ||
74 | /* | |
75 | * We do not accept a shared mapping if it would violate | |
76 | * cache aliasing constraints. | |
77 | */ | |
78 | if ((flags & MAP_SHARED) && | |
79 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | |
80 | return -EINVAL; | |
81 | return addr; | |
82 | } | |
83 | ||
84 | do_color_align = 0; | |
85 | if (filp || (flags & MAP_SHARED)) | |
86 | do_color_align = 1; | |
d0be89f6 JP |
87 | |
88 | /* requesting a specific address */ | |
6f6c3c33 RB |
89 | if (addr) { |
90 | if (do_color_align) | |
91 | addr = COLOUR_ALIGN(addr, pgoff); | |
92 | else | |
93 | addr = PAGE_ALIGN(addr); | |
d0be89f6 JP |
94 | |
95 | vma = find_vma(mm, addr); | |
6f6c3c33 | 96 | if (TASK_SIZE - len >= addr && |
1be7107f | 97 | (!vma || addr + len <= vm_start_gap(vma))) |
6f6c3c33 RB |
98 | return addr; |
99 | } | |
6f6c3c33 | 100 | |
b6661861 ML |
101 | info.length = len; |
102 | info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; | |
103 | info.align_offset = pgoff << PAGE_SHIFT; | |
d0be89f6 | 104 | |
b6661861 ML |
105 | if (dir == DOWN) { |
106 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
107 | info.low_limit = PAGE_SIZE; | |
108 | info.high_limit = mm->mmap_base; | |
109 | addr = vm_unmapped_area(&info); | |
110 | ||
111 | if (!(addr & ~PAGE_MASK)) | |
112 | return addr; | |
d0be89f6 | 113 | |
d0be89f6 JP |
114 | /* |
115 | * A failed mmap() very likely causes application failure, | |
116 | * so fall back to the bottom-up function here. This scenario | |
117 | * can happen with large stack limits and large mmap() | |
118 | * allocations. | |
119 | */ | |
6f6c3c33 | 120 | } |
b6661861 ML |
121 | |
122 | info.flags = 0; | |
123 | info.low_limit = mm->mmap_base; | |
124 | info.high_limit = TASK_SIZE; | |
125 | return vm_unmapped_area(&info); | |
6f6c3c33 RB |
126 | } |
127 | ||
d0be89f6 JP |
128 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, |
129 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
130 | { | |
16650107 | 131 | return arch_get_unmapped_area_common(filp, |
d0be89f6 JP |
132 | addr0, len, pgoff, flags, UP); |
133 | } | |
134 | ||
135 | /* | |
136 | * There is no need to export this but sched.h declares the function as | |
137 | * extern so making it static here results in an error. | |
138 | */ | |
139 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | |
140 | unsigned long addr0, unsigned long len, unsigned long pgoff, | |
141 | unsigned long flags) | |
142 | { | |
16650107 | 143 | return arch_get_unmapped_area_common(filp, |
d0be89f6 JP |
144 | addr0, len, pgoff, flags, DOWN); |
145 | } | |
146 | ||
2b68f6ca | 147 | unsigned long arch_mmap_rnd(void) |
1f0569df KC |
148 | { |
149 | unsigned long rnd; | |
150 | ||
109c32ff | 151 | #ifdef CONFIG_COMPAT |
1f0569df | 152 | if (TASK_IS_32BIT_ADDR) |
109c32ff | 153 | rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); |
1f0569df | 154 | else |
109c32ff MR |
155 | #endif /* CONFIG_COMPAT */ |
156 | rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); | |
1f0569df | 157 | |
109c32ff | 158 | return rnd << PAGE_SHIFT; |
1f0569df KC |
159 | } |
160 | ||
6f6c3c33 RB |
161 | void arch_pick_mmap_layout(struct mm_struct *mm) |
162 | { | |
163 | unsigned long random_factor = 0UL; | |
164 | ||
1f0569df | 165 | if (current->flags & PF_RANDOMIZE) |
2b68f6ca | 166 | random_factor = arch_mmap_rnd(); |
6f6c3c33 | 167 | |
d0be89f6 JP |
168 | if (mmap_is_legacy()) { |
169 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
170 | mm->get_unmapped_area = arch_get_unmapped_area; | |
d0be89f6 JP |
171 | } else { |
172 | mm->mmap_base = mmap_base(random_factor); | |
173 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | |
d0be89f6 | 174 | } |
6f6c3c33 RB |
175 | } |
176 | ||
177 | static inline unsigned long brk_rnd(void) | |
178 | { | |
5ef11c35 | 179 | unsigned long rnd = get_random_long(); |
6f6c3c33 RB |
180 | |
181 | rnd = rnd << PAGE_SHIFT; | |
182 | /* 8MB for 32bit, 256MB for 64bit */ | |
183 | if (TASK_IS_32BIT_ADDR) | |
184 | rnd = rnd & 0x7ffffful; | |
185 | else | |
186 | rnd = rnd & 0xffffffful; | |
187 | ||
188 | return rnd; | |
189 | } | |
190 | ||
191 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
192 | { | |
193 | unsigned long base = mm->brk; | |
194 | unsigned long ret; | |
195 | ||
196 | ret = PAGE_ALIGN(base + brk_rnd()); | |
197 | ||
198 | if (ret < mm->brk) | |
199 | return mm->brk; | |
200 | ||
201 | return ret; | |
202 | } | |
196897a2 SR |
203 | |
204 | int __virt_addr_valid(const volatile void *kaddr) | |
205 | { | |
206 | return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); | |
207 | } | |
208 | EXPORT_SYMBOL_GPL(__virt_addr_valid); |