]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/entry/vsyscall/vsyscall_64.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / entry / vsyscall / vsyscall_64.c
1 /*
2 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
3 *
4 * Based on the original implementation which is:
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright 2003 Andi Kleen, SuSE Labs.
7 *
8 * Parts of the original code have been moved to arch/x86/vdso/vma.c
9 *
10 * This file implements vsyscall emulation. vsyscalls are a legacy ABI:
11 * Userspace can request certain kernel services by calling fixed
12 * addresses. This concept is problematic:
13 *
14 * - It interferes with ASLR.
15 * - It's awkward to write code that lives in kernel addresses but is
16 * callable by userspace at fixed addresses.
17 * - The whole concept is impossible for 32-bit compat userspace.
18 * - UML cannot easily virtualize a vsyscall.
19 *
20 * As of mid-2014, I believe that there is no new userspace code that
21 * will use a vsyscall if the vDSO is present. I hope that there will
22 * soon be no new userspace code that will ever use a vsyscall.
23 *
24 * The code in this file emulates vsyscalls when notified of a page
25 * fault to a vsyscall address.
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/timer.h>
30 #include <linux/sched/signal.h>
31 #include <linux/mm_types.h>
32 #include <linux/syscalls.h>
33 #include <linux/ratelimit.h>
34
35 #include <asm/vsyscall.h>
36 #include <asm/unistd.h>
37 #include <asm/fixmap.h>
38 #include <asm/traps.h>
39
40 #define CREATE_TRACE_POINTS
41 #include "vsyscall_trace.h"
42
43 static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
44 #if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
45 NATIVE;
46 #elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
47 NONE;
48 #else
49 EMULATE;
50 #endif
51
52 static int __init vsyscall_setup(char *str)
53 {
54 if (str) {
55 if (!strcmp("emulate", str))
56 vsyscall_mode = EMULATE;
57 else if (!strcmp("native", str))
58 vsyscall_mode = NATIVE;
59 else if (!strcmp("none", str))
60 vsyscall_mode = NONE;
61 else
62 return -EINVAL;
63
64 return 0;
65 }
66
67 return -EINVAL;
68 }
69 early_param("vsyscall", vsyscall_setup);
70
71 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
72 const char *message)
73 {
74 if (!show_unhandled_signals)
75 return;
76
77 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
78 level, current->comm, task_pid_nr(current),
79 message, regs->ip, regs->cs,
80 regs->sp, regs->ax, regs->si, regs->di);
81 }
82
83 static int addr_to_vsyscall_nr(unsigned long addr)
84 {
85 int nr;
86
87 if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
88 return -EINVAL;
89
90 nr = (addr & 0xC00UL) >> 10;
91 if (nr >= 3)
92 return -EINVAL;
93
94 return nr;
95 }
96
97 static bool write_ok_or_segv(unsigned long ptr, size_t size)
98 {
99 /*
100 * XXX: if access_ok, get_user, and put_user handled
101 * sig_on_uaccess_err, this could go away.
102 */
103
104 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
105 siginfo_t info;
106 struct thread_struct *thread = &current->thread;
107
108 thread->error_code = 6; /* user fault, no page, write */
109 thread->cr2 = ptr;
110 thread->trap_nr = X86_TRAP_PF;
111
112 memset(&info, 0, sizeof(info));
113 info.si_signo = SIGSEGV;
114 info.si_errno = 0;
115 info.si_code = SEGV_MAPERR;
116 info.si_addr = (void __user *)ptr;
117
118 force_sig_info(SIGSEGV, &info, current);
119 return false;
120 } else {
121 return true;
122 }
123 }
124
125 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
126 {
127 struct task_struct *tsk;
128 unsigned long caller;
129 int vsyscall_nr, syscall_nr, tmp;
130 int prev_sig_on_uaccess_err;
131 long ret;
132
133 /*
134 * No point in checking CS -- the only way to get here is a user mode
135 * trap to a high address, which means that we're in 64-bit user code.
136 */
137
138 WARN_ON_ONCE(address != regs->ip);
139
140 if (vsyscall_mode == NONE) {
141 warn_bad_vsyscall(KERN_INFO, regs,
142 "vsyscall attempted with vsyscall=none");
143 return false;
144 }
145
146 vsyscall_nr = addr_to_vsyscall_nr(address);
147
148 trace_emulate_vsyscall(vsyscall_nr);
149
150 if (vsyscall_nr < 0) {
151 warn_bad_vsyscall(KERN_WARNING, regs,
152 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
153 goto sigsegv;
154 }
155
156 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
157 warn_bad_vsyscall(KERN_WARNING, regs,
158 "vsyscall with bad stack (exploit attempt?)");
159 goto sigsegv;
160 }
161
162 tsk = current;
163
164 /*
165 * Check for access_ok violations and find the syscall nr.
166 *
167 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
168 * 64-bit, so we don't need to special-case it here. For all the
169 * vsyscalls, NULL means "don't write anything" not "write it at
170 * address 0".
171 */
172 switch (vsyscall_nr) {
173 case 0:
174 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
175 !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
176 ret = -EFAULT;
177 goto check_fault;
178 }
179
180 syscall_nr = __NR_gettimeofday;
181 break;
182
183 case 1:
184 if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
185 ret = -EFAULT;
186 goto check_fault;
187 }
188
189 syscall_nr = __NR_time;
190 break;
191
192 case 2:
193 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
194 !write_ok_or_segv(regs->si, sizeof(unsigned))) {
195 ret = -EFAULT;
196 goto check_fault;
197 }
198
199 syscall_nr = __NR_getcpu;
200 break;
201 }
202
203 /*
204 * Handle seccomp. regs->ip must be the original value.
205 * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
206 *
207 * We could optimize the seccomp disabled case, but performance
208 * here doesn't matter.
209 */
210 regs->orig_ax = syscall_nr;
211 regs->ax = -ENOSYS;
212 tmp = secure_computing(NULL);
213 if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
214 warn_bad_vsyscall(KERN_DEBUG, regs,
215 "seccomp tried to change syscall nr or ip");
216 do_exit(SIGSYS);
217 }
218 regs->orig_ax = -1;
219 if (tmp)
220 goto do_ret; /* skip requested */
221
222 /*
223 * With a real vsyscall, page faults cause SIGSEGV. We want to
224 * preserve that behavior to make writing exploits harder.
225 */
226 prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
227 current->thread.sig_on_uaccess_err = 1;
228
229 ret = -EFAULT;
230 switch (vsyscall_nr) {
231 case 0:
232 ret = sys_gettimeofday(
233 (struct timeval __user *)regs->di,
234 (struct timezone __user *)regs->si);
235 break;
236
237 case 1:
238 ret = sys_time((time_t __user *)regs->di);
239 break;
240
241 case 2:
242 ret = sys_getcpu((unsigned __user *)regs->di,
243 (unsigned __user *)regs->si,
244 NULL);
245 break;
246 }
247
248 current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
249
250 check_fault:
251 if (ret == -EFAULT) {
252 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
253 warn_bad_vsyscall(KERN_INFO, regs,
254 "vsyscall fault (exploit attempt?)");
255
256 /*
257 * If we failed to generate a signal for any reason,
258 * generate one here. (This should be impossible.)
259 */
260 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
261 !sigismember(&tsk->pending.signal, SIGSEGV)))
262 goto sigsegv;
263
264 return true; /* Don't emulate the ret. */
265 }
266
267 regs->ax = ret;
268
269 do_ret:
270 /* Emulate a ret instruction. */
271 regs->ip = caller;
272 regs->sp += 8;
273 return true;
274
275 sigsegv:
276 force_sig(SIGSEGV, current);
277 return true;
278 }
279
280 /*
281 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
282 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
283 * not need special handling anymore:
284 */
285 static const char *gate_vma_name(struct vm_area_struct *vma)
286 {
287 return "[vsyscall]";
288 }
289 static const struct vm_operations_struct gate_vma_ops = {
290 .name = gate_vma_name,
291 };
292 static struct vm_area_struct gate_vma = {
293 .vm_start = VSYSCALL_ADDR,
294 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
295 .vm_page_prot = PAGE_READONLY_EXEC,
296 .vm_flags = VM_READ | VM_EXEC,
297 .vm_ops = &gate_vma_ops,
298 };
299
300 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
301 {
302 #ifdef CONFIG_COMPAT
303 if (!mm || mm->context.ia32_compat)
304 return NULL;
305 #endif
306 if (vsyscall_mode == NONE)
307 return NULL;
308 return &gate_vma;
309 }
310
311 int in_gate_area(struct mm_struct *mm, unsigned long addr)
312 {
313 struct vm_area_struct *vma = get_gate_vma(mm);
314
315 if (!vma)
316 return 0;
317
318 return (addr >= vma->vm_start) && (addr < vma->vm_end);
319 }
320
321 /*
322 * Use this when you have no reliable mm, typically from interrupt
323 * context. It is less reliable than using a task's mm and may give
324 * false positives.
325 */
326 int in_gate_area_no_mm(unsigned long addr)
327 {
328 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
329 }
330
331 void __init map_vsyscall(void)
332 {
333 extern char __vsyscall_page;
334 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
335
336 if (vsyscall_mode != NONE)
337 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
338 vsyscall_mode == NATIVE
339 ? PAGE_KERNEL_VSYSCALL
340 : PAGE_KERNEL_VVAR);
341
342 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
343 (unsigned long)VSYSCALL_ADDR);
344 }