]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/sys_x86_64.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / sys_x86_64.c
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
17 #include <linux/elf.h>
18
19 #include <asm/ia32.h>
20 #include <asm/syscalls.h>
21
22 /*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24 */
25 static unsigned long get_align_mask(void)
26 {
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
29 return 0;
30
31 if (!(current->flags & PF_RANDOMIZE))
32 return 0;
33
34 return va_align.mask;
35 }
36
37 /*
38 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
39 * va_align.bits, [12:upper_bit), are set to a random value instead of
40 * zeroing them. This random value is computed once per boot. This form
41 * of ASLR is known as "per-boot ASLR".
42 *
43 * To achieve this, the random value is added to the info.align_offset
44 * value before calling vm_unmapped_area() or ORed directly to the
45 * address.
46 */
47 static unsigned long get_align_bits(void)
48 {
49 return va_align.bits & get_align_mask();
50 }
51
52 unsigned long align_vdso_addr(unsigned long addr)
53 {
54 unsigned long align_mask = get_align_mask();
55 addr = (addr + align_mask) & ~align_mask;
56 return addr | get_align_bits();
57 }
58
59 static int __init control_va_addr_alignment(char *str)
60 {
61 /* guard against enabling this on other CPU families */
62 if (va_align.flags < 0)
63 return 1;
64
65 if (*str == 0)
66 return 1;
67
68 if (*str == '=')
69 str++;
70
71 if (!strcmp(str, "32"))
72 va_align.flags = ALIGN_VA_32;
73 else if (!strcmp(str, "64"))
74 va_align.flags = ALIGN_VA_64;
75 else if (!strcmp(str, "off"))
76 va_align.flags = 0;
77 else if (!strcmp(str, "on"))
78 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
79 else
80 return 0;
81
82 return 1;
83 }
84 __setup("align_va_addr", control_va_addr_alignment);
85
86 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
87 unsigned long, prot, unsigned long, flags,
88 unsigned long, fd, unsigned long, off)
89 {
90 long error;
91 error = -EINVAL;
92 if (off & ~PAGE_MASK)
93 goto out;
94
95 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
96 out:
97 return error;
98 }
99
100 static void find_start_end(unsigned long flags, unsigned long *begin,
101 unsigned long *end)
102 {
103 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
104 /* This is usually used needed to map code in small
105 model, so it needs to be in the first 31bit. Limit
106 it to that. This means we need to move the
107 unmapped base down for this case. This can give
108 conflicts with the heap, but we assume that glibc
109 malloc knows how to fall back to mmap. Give it 1GB
110 of playground for now. -AK */
111 *begin = 0x40000000;
112 *end = 0x80000000;
113 if (current->flags & PF_RANDOMIZE) {
114 *begin = randomize_page(*begin, 0x02000000);
115 }
116 } else {
117 *begin = current->mm->mmap_legacy_base;
118 *end = TASK_SIZE;
119 }
120 }
121
122 unsigned long
123 arch_get_unmapped_area(struct file *filp, unsigned long addr,
124 unsigned long len, unsigned long pgoff, unsigned long flags)
125 {
126 struct mm_struct *mm = current->mm;
127 struct vm_area_struct *vma;
128 struct vm_unmapped_area_info info;
129 unsigned long begin, end;
130
131 if (flags & MAP_FIXED)
132 return addr;
133
134 find_start_end(flags, &begin, &end);
135
136 if (len > end)
137 return -ENOMEM;
138
139 if (addr) {
140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr);
142 if (end - len >= addr &&
143 (!vma || addr + len <= vma->vm_start))
144 return addr;
145 }
146
147 info.flags = 0;
148 info.length = len;
149 info.low_limit = begin;
150 info.high_limit = end;
151 info.align_mask = 0;
152 info.align_offset = pgoff << PAGE_SHIFT;
153 if (filp) {
154 info.align_mask = get_align_mask();
155 info.align_offset += get_align_bits();
156 }
157 return vm_unmapped_area(&info);
158 }
159
160 unsigned long
161 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162 const unsigned long len, const unsigned long pgoff,
163 const unsigned long flags)
164 {
165 struct vm_area_struct *vma;
166 struct mm_struct *mm = current->mm;
167 unsigned long addr = addr0;
168 struct vm_unmapped_area_info info;
169
170 /* requested length too big for entire address space */
171 if (len > TASK_SIZE)
172 return -ENOMEM;
173
174 if (flags & MAP_FIXED)
175 return addr;
176
177 /* for MAP_32BIT mappings we force the legacy mmap base */
178 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
179 goto bottomup;
180
181 /* requesting a specific address */
182 if (addr) {
183 addr = PAGE_ALIGN(addr);
184 vma = find_vma(mm, addr);
185 if (TASK_SIZE - len >= addr &&
186 (!vma || addr + len <= vma->vm_start))
187 return addr;
188 }
189
190 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
191 info.length = len;
192 info.low_limit = PAGE_SIZE;
193 info.high_limit = mm->mmap_base;
194 info.align_mask = 0;
195 info.align_offset = pgoff << PAGE_SHIFT;
196 if (filp) {
197 info.align_mask = get_align_mask();
198 info.align_offset += get_align_bits();
199 }
200 addr = vm_unmapped_area(&info);
201 if (!(addr & ~PAGE_MASK))
202 return addr;
203 VM_BUG_ON(addr != -ENOMEM);
204
205 bottomup:
206 /*
207 * A failed mmap() very likely causes application failure,
208 * so fall back to the bottom-up function here. This scenario
209 * can happen with large stack limits and large mmap()
210 * allocations.
211 */
212 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
213 }