1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
26 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28 static unsigned long get_align_mask(void)
30 /* handle 32- and 64-bit case with a single conditional */
31 if (va_align
.flags
< 0 || !(va_align
.flags
& (2 - mmap_is_ia32())))
34 if (!(current
->flags
& PF_RANDOMIZE
))
41 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42 * va_align.bits, [12:upper_bit), are set to a random value instead of
43 * zeroing them. This random value is computed once per boot. This form
44 * of ASLR is known as "per-boot ASLR".
46 * To achieve this, the random value is added to the info.align_offset
47 * value before calling vm_unmapped_area() or ORed directly to the
50 static unsigned long get_align_bits(void)
52 return va_align
.bits
& get_align_mask();
55 unsigned long align_vdso_addr(unsigned long addr
)
57 unsigned long align_mask
= get_align_mask();
58 addr
= (addr
+ align_mask
) & ~align_mask
;
59 return addr
| get_align_bits();
62 static int __init
control_va_addr_alignment(char *str
)
64 /* guard against enabling this on other CPU families */
65 if (va_align
.flags
< 0)
74 if (!strcmp(str
, "32"))
75 va_align
.flags
= ALIGN_VA_32
;
76 else if (!strcmp(str
, "64"))
77 va_align
.flags
= ALIGN_VA_64
;
78 else if (!strcmp(str
, "off"))
80 else if (!strcmp(str
, "on"))
81 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
87 __setup("align_va_addr", control_va_addr_alignment
);
89 SYSCALL_DEFINE6(mmap
, unsigned long, addr
, unsigned long, len
,
90 unsigned long, prot
, unsigned long, flags
,
91 unsigned long, fd
, unsigned long, off
)
96 return ksys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, off
>> PAGE_SHIFT
);
99 static void find_start_end(unsigned long addr
, unsigned long flags
,
100 unsigned long *begin
, unsigned long *end
)
102 if (!in_32bit_syscall() && (flags
& MAP_32BIT
)) {
103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit
105 it to that. This means we need to move the
106 unmapped base down for this case. This can give
107 conflicts with the heap, but we assume that glibc
108 malloc knows how to fall back to mmap. Give it 1GB
109 of playground for now. -AK */
112 if (current
->flags
& PF_RANDOMIZE
) {
113 *begin
= randomize_page(*begin
, 0x02000000);
118 *begin
= get_mmap_base(1);
119 if (in_32bit_syscall())
120 *end
= task_size_32bit();
122 *end
= task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
126 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
127 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
129 struct mm_struct
*mm
= current
->mm
;
130 struct vm_area_struct
*vma
;
131 struct vm_unmapped_area_info info
;
132 unsigned long begin
, end
;
134 if (flags
& MAP_FIXED
)
137 find_start_end(addr
, flags
, &begin
, &end
);
143 addr
= PAGE_ALIGN(addr
);
144 vma
= find_vma(mm
, addr
);
145 if (end
- len
>= addr
&&
146 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
152 info
.low_limit
= begin
;
153 info
.high_limit
= end
;
155 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
157 info
.align_mask
= get_align_mask();
158 info
.align_offset
+= get_align_bits();
160 return vm_unmapped_area(&info
);
164 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
165 const unsigned long len
, const unsigned long pgoff
,
166 const unsigned long flags
)
168 struct vm_area_struct
*vma
;
169 struct mm_struct
*mm
= current
->mm
;
170 unsigned long addr
= addr0
;
171 struct vm_unmapped_area_info info
;
173 /* requested length too big for entire address space */
177 /* No address checking. See comment at mmap_address_hint_valid() */
178 if (flags
& MAP_FIXED
)
181 /* for MAP_32BIT mappings we force the legacy mmap base */
182 if (!in_32bit_syscall() && (flags
& MAP_32BIT
))
185 /* requesting a specific address */
188 if (!mmap_address_hint_valid(addr
, len
))
189 goto get_unmapped_area
;
191 vma
= find_vma(mm
, addr
);
192 if (!vma
|| addr
+ len
<= vm_start_gap(vma
))
197 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
199 info
.low_limit
= PAGE_SIZE
;
200 info
.high_limit
= get_mmap_base(0);
203 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
204 * in the full address space.
206 * !in_32bit_syscall() check to avoid high addresses for x32
207 * (and make it no op on native i386).
209 if (addr
> DEFAULT_MAP_WINDOW
&& !in_32bit_syscall())
210 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
213 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
215 info
.align_mask
= get_align_mask();
216 info
.align_offset
+= get_align_bits();
218 addr
= vm_unmapped_area(&info
);
219 if (!(addr
& ~PAGE_MASK
))
221 VM_BUG_ON(addr
!= -ENOMEM
);
225 * A failed mmap() very likely causes application failure,
226 * so fall back to the bottom-up function here. This scenario
227 * can happen with large stack limits and large mmap()
230 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);