]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/kernel/sys_x86_64.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
4e950f6f 5#include <linux/fs.h>
1da177e4 6#include <linux/smp.h>
1da177e4
LT
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
cc503c1b 15#include <linux/random.h>
e9c8abb6 16#include <linux/uaccess.h>
910b2c51 17#include <linux/elf.h>
1da177e4 18
1da177e4 19#include <asm/ia32.h>
bbc1f698 20#include <asm/syscalls.h>
1da177e4 21
dfb09f9b
BP
22/*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
dfb09f9b 24 */
f9902472 25static unsigned long get_align_mask(void)
dfb09f9b 26{
dfb09f9b
BP
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
f9902472 29 return 0;
dfb09f9b
BP
30
31 if (!(current->flags & PF_RANDOMIZE))
f9902472 32 return 0;
dfb09f9b 33
f9902472
ML
34 return va_align.mask;
35}
dfb09f9b 36
4e26d11f
HMG
37/*
38 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
39 * va_align.bits, [12:upper_bit), are set to a random value instead of
40 * zeroing them. This random value is computed once per boot. This form
41 * of ASLR is known as "per-boot ASLR".
42 *
43 * To achieve this, the random value is added to the info.align_offset
44 * value before calling vm_unmapped_area() or ORed directly to the
45 * address.
46 */
47static unsigned long get_align_bits(void)
48{
49 return va_align.bits & get_align_mask();
50}
51
f9902472
ML
52unsigned long align_vdso_addr(unsigned long addr)
53{
54 unsigned long align_mask = get_align_mask();
4e26d11f
HMG
55 addr = (addr + align_mask) & ~align_mask;
56 return addr | get_align_bits();
dfb09f9b
BP
57}
58
59static int __init control_va_addr_alignment(char *str)
60{
61 /* guard against enabling this on other CPU families */
62 if (va_align.flags < 0)
63 return 1;
64
65 if (*str == 0)
66 return 1;
67
68 if (*str == '=')
69 str++;
70
71 if (!strcmp(str, "32"))
72 va_align.flags = ALIGN_VA_32;
73 else if (!strcmp(str, "64"))
74 va_align.flags = ALIGN_VA_64;
75 else if (!strcmp(str, "off"))
76 va_align.flags = 0;
77 else if (!strcmp(str, "on"))
78 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
79 else
80 return 0;
81
82 return 1;
83}
84__setup("align_va_addr", control_va_addr_alignment);
85
0ac676fb
JB
86SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
87 unsigned long, prot, unsigned long, flags,
88 unsigned long, fd, unsigned long, off)
1da177e4
LT
89{
90 long error;
1da177e4
LT
91 error = -EINVAL;
92 if (off & ~PAGE_MASK)
93 goto out;
94
f8b72560 95 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
96out:
97 return error;
98}
99
100static void find_start_end(unsigned long flags, unsigned long *begin,
101 unsigned long *end)
102{
6bd33008 103 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
1da177e4
LT
104 /* This is usually used needed to map code in small
105 model, so it needs to be in the first 31bit. Limit
106 it to that. This means we need to move the
107 unmapped base down for this case. This can give
108 conflicts with the heap, but we assume that glibc
109 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
110 of playground for now. -AK */
111 *begin = 0x40000000;
112 *end = 0x80000000;
cc503c1b 113 if (current->flags & PF_RANDOMIZE) {
9c6f0902 114 *begin = randomize_page(*begin, 0x02000000);
cc503c1b 115 }
84929801 116 } else {
41aacc1e 117 *begin = current->mm->mmap_legacy_base;
e9c8abb6 118 *end = TASK_SIZE;
84929801 119 }
e9c8abb6 120}
1da177e4
LT
121
122unsigned long
123arch_get_unmapped_area(struct file *filp, unsigned long addr,
124 unsigned long len, unsigned long pgoff, unsigned long flags)
125{
126 struct mm_struct *mm = current->mm;
127 struct vm_area_struct *vma;
f9902472 128 struct vm_unmapped_area_info info;
1da177e4 129 unsigned long begin, end;
e9c8abb6 130
11300a64
BH
131 if (flags & MAP_FIXED)
132 return addr;
133
e9c8abb6 134 find_start_end(flags, &begin, &end);
1da177e4
LT
135
136 if (len > end)
137 return -ENOMEM;
138
139 if (addr) {
140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr);
142 if (end - len >= addr &&
ddeaddb9 143 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
144 return addr;
145 }
1363c3cd 146
f9902472
ML
147 info.flags = 0;
148 info.length = len;
149 info.low_limit = begin;
150 info.high_limit = end;
4e26d11f 151 info.align_mask = 0;
7d025059 152 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
153 if (filp) {
154 info.align_mask = get_align_mask();
155 info.align_offset += get_align_bits();
156 }
f9902472 157 return vm_unmapped_area(&info);
1da177e4
LT
158}
159
cc503c1b
JK
160unsigned long
161arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162 const unsigned long len, const unsigned long pgoff,
163 const unsigned long flags)
164{
165 struct vm_area_struct *vma;
166 struct mm_struct *mm = current->mm;
f9902472
ML
167 unsigned long addr = addr0;
168 struct vm_unmapped_area_info info;
cc503c1b
JK
169
170 /* requested length too big for entire address space */
171 if (len > TASK_SIZE)
172 return -ENOMEM;
173
174 if (flags & MAP_FIXED)
175 return addr;
176
e3e81aca 177 /* for MAP_32BIT mappings we force the legacy mmap base */
6bd33008 178 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
cc503c1b
JK
179 goto bottomup;
180
181 /* requesting a specific address */
182 if (addr) {
183 addr = PAGE_ALIGN(addr);
184 vma = find_vma(mm, addr);
185 if (TASK_SIZE - len >= addr &&
ddeaddb9 186 (!vma || addr + len <= vm_start_gap(vma)))
cc503c1b
JK
187 return addr;
188 }
189
f9902472
ML
190 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
191 info.length = len;
192 info.low_limit = PAGE_SIZE;
193 info.high_limit = mm->mmap_base;
4e26d11f 194 info.align_mask = 0;
7d025059 195 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
196 if (filp) {
197 info.align_mask = get_align_mask();
198 info.align_offset += get_align_bits();
199 }
f9902472
ML
200 addr = vm_unmapped_area(&info);
201 if (!(addr & ~PAGE_MASK))
202 return addr;
203 VM_BUG_ON(addr != -ENOMEM);
b716ad95 204
cc503c1b
JK
205bottomup:
206 /*
207 * A failed mmap() very likely causes application failure,
208 * so fall back to the bottom-up function here. This scenario
209 * can happen with large stack limits and large mmap()
210 * allocations.
211 */
f9902472 212 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
cc503c1b 213}