]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/sys_x86_64.c
x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
01042607 3#include <linux/sched/mm.h>
1da177e4
LT
4#include <linux/syscalls.h>
5#include <linux/mm.h>
4e950f6f 6#include <linux/fs.h>
1da177e4 7#include <linux/smp.h>
1da177e4
LT
8#include <linux/sem.h>
9#include <linux/msg.h>
10#include <linux/shm.h>
11#include <linux/stat.h>
12#include <linux/mman.h>
13#include <linux/file.h>
14#include <linux/utsname.h>
15#include <linux/personality.h>
cc503c1b 16#include <linux/random.h>
e9c8abb6 17#include <linux/uaccess.h>
910b2c51 18#include <linux/elf.h>
1da177e4 19
1b028f78
DS
20#include <asm/elf.h>
21#include <asm/compat.h>
1da177e4 22#include <asm/ia32.h>
bbc1f698 23#include <asm/syscalls.h>
1da177e4 24
dfb09f9b
BP
25/*
26 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
dfb09f9b 27 */
f9902472 28static unsigned long get_align_mask(void)
dfb09f9b 29{
dfb09f9b
BP
30 /* handle 32- and 64-bit case with a single conditional */
31 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
f9902472 32 return 0;
dfb09f9b
BP
33
34 if (!(current->flags & PF_RANDOMIZE))
f9902472 35 return 0;
dfb09f9b 36
f9902472
ML
37 return va_align.mask;
38}
dfb09f9b 39
4e26d11f
HMG
40/*
41 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42 * va_align.bits, [12:upper_bit), are set to a random value instead of
43 * zeroing them. This random value is computed once per boot. This form
44 * of ASLR is known as "per-boot ASLR".
45 *
46 * To achieve this, the random value is added to the info.align_offset
47 * value before calling vm_unmapped_area() or ORed directly to the
48 * address.
49 */
50static unsigned long get_align_bits(void)
51{
52 return va_align.bits & get_align_mask();
53}
54
f9902472
ML
55unsigned long align_vdso_addr(unsigned long addr)
56{
57 unsigned long align_mask = get_align_mask();
4e26d11f
HMG
58 addr = (addr + align_mask) & ~align_mask;
59 return addr | get_align_bits();
dfb09f9b
BP
60}
61
62static int __init control_va_addr_alignment(char *str)
63{
64 /* guard against enabling this on other CPU families */
65 if (va_align.flags < 0)
66 return 1;
67
68 if (*str == 0)
69 return 1;
70
71 if (*str == '=')
72 str++;
73
74 if (!strcmp(str, "32"))
75 va_align.flags = ALIGN_VA_32;
76 else if (!strcmp(str, "64"))
77 va_align.flags = ALIGN_VA_64;
78 else if (!strcmp(str, "off"))
79 va_align.flags = 0;
80 else if (!strcmp(str, "on"))
81 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 else
83 return 0;
84
85 return 1;
86}
87__setup("align_va_addr", control_va_addr_alignment);
88
0ac676fb
JB
89SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
90 unsigned long, prot, unsigned long, flags,
91 unsigned long, fd, unsigned long, off)
1da177e4
LT
92{
93 long error;
1da177e4
LT
94 error = -EINVAL;
95 if (off & ~PAGE_MASK)
96 goto out;
97
f8b72560 98 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
99out:
100 return error;
101}
102
103static void find_start_end(unsigned long flags, unsigned long *begin,
104 unsigned long *end)
105{
3e6ef9c8 106 if (!in_compat_syscall() && (flags & MAP_32BIT)) {
1da177e4
LT
107 /* This is usually used needed to map code in small
108 model, so it needs to be in the first 31bit. Limit
109 it to that. This means we need to move the
110 unmapped base down for this case. This can give
111 conflicts with the heap, but we assume that glibc
112 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
113 of playground for now. -AK */
114 *begin = 0x40000000;
115 *end = 0x80000000;
cc503c1b 116 if (current->flags & PF_RANDOMIZE) {
9c6f0902 117 *begin = randomize_page(*begin, 0x02000000);
cc503c1b 118 }
1b028f78 119 return;
84929801 120 }
1b028f78
DS
121
122 *begin = get_mmap_base(1);
123 *end = in_compat_syscall() ? tasksize_32bit() : tasksize_64bit();
e9c8abb6 124}
1da177e4
LT
125
126unsigned long
127arch_get_unmapped_area(struct file *filp, unsigned long addr,
128 unsigned long len, unsigned long pgoff, unsigned long flags)
129{
130 struct mm_struct *mm = current->mm;
131 struct vm_area_struct *vma;
f9902472 132 struct vm_unmapped_area_info info;
1da177e4 133 unsigned long begin, end;
e9c8abb6 134
11300a64
BH
135 if (flags & MAP_FIXED)
136 return addr;
137
e9c8abb6 138 find_start_end(flags, &begin, &end);
1da177e4
LT
139
140 if (len > end)
141 return -ENOMEM;
142
143 if (addr) {
144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr);
146 if (end - len >= addr &&
1be7107f 147 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
148 return addr;
149 }
1363c3cd 150
f9902472
ML
151 info.flags = 0;
152 info.length = len;
153 info.low_limit = begin;
154 info.high_limit = end;
4e26d11f 155 info.align_mask = 0;
7d025059 156 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
157 if (filp) {
158 info.align_mask = get_align_mask();
159 info.align_offset += get_align_bits();
160 }
f9902472 161 return vm_unmapped_area(&info);
1da177e4
LT
162}
163
cc503c1b
JK
164unsigned long
165arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
166 const unsigned long len, const unsigned long pgoff,
167 const unsigned long flags)
168{
169 struct vm_area_struct *vma;
170 struct mm_struct *mm = current->mm;
f9902472
ML
171 unsigned long addr = addr0;
172 struct vm_unmapped_area_info info;
cc503c1b
JK
173
174 /* requested length too big for entire address space */
175 if (len > TASK_SIZE)
176 return -ENOMEM;
177
178 if (flags & MAP_FIXED)
179 return addr;
180
e3e81aca 181 /* for MAP_32BIT mappings we force the legacy mmap base */
3e6ef9c8 182 if (!in_compat_syscall() && (flags & MAP_32BIT))
cc503c1b
JK
183 goto bottomup;
184
185 /* requesting a specific address */
186 if (addr) {
187 addr = PAGE_ALIGN(addr);
188 vma = find_vma(mm, addr);
189 if (TASK_SIZE - len >= addr &&
1be7107f 190 (!vma || addr + len <= vm_start_gap(vma)))
cc503c1b
JK
191 return addr;
192 }
193
f9902472
ML
194 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
195 info.length = len;
196 info.low_limit = PAGE_SIZE;
1b028f78 197 info.high_limit = get_mmap_base(0);
4e26d11f 198 info.align_mask = 0;
7d025059 199 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
200 if (filp) {
201 info.align_mask = get_align_mask();
202 info.align_offset += get_align_bits();
203 }
f9902472
ML
204 addr = vm_unmapped_area(&info);
205 if (!(addr & ~PAGE_MASK))
206 return addr;
207 VM_BUG_ON(addr != -ENOMEM);
b716ad95 208
cc503c1b
JK
209bottomup:
210 /*
211 * A failed mmap() very likely causes application failure,
212 * so fall back to the bottom-up function here. This scenario
213 * can happen with large stack limits and large mmap()
214 * allocations.
215 */
f9902472 216 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
cc503c1b 217}