1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
5 * Based on the original implementation which is:
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7 * Copyright 2003 Andi Kleen, SuSE Labs.
9 * Parts of the original code have been moved to arch/x86/vdso/vma.c
11 * This file implements vsyscall emulation. vsyscalls are a legacy ABI:
12 * Userspace can request certain kernel services by calling fixed
13 * addresses. This concept is problematic:
15 * - It interferes with ASLR.
16 * - It's awkward to write code that lives in kernel addresses but is
17 * callable by userspace at fixed addresses.
18 * - The whole concept is impossible for 32-bit compat userspace.
19 * - UML cannot easily virtualize a vsyscall.
21 * As of mid-2014, I believe that there is no new userspace code that
22 * will use a vsyscall if the vDSO is present. I hope that there will
23 * soon be no new userspace code that will ever use a vsyscall.
25 * The code in this file emulates vsyscalls when notified of a page
26 * fault to a vsyscall address.
29 #include <linux/kernel.h>
30 #include <linux/timer.h>
31 #include <linux/sched/signal.h>
32 #include <linux/mm_types.h>
33 #include <linux/syscalls.h>
34 #include <linux/ratelimit.h>
36 #include <asm/vsyscall.h>
37 #include <asm/unistd.h>
38 #include <asm/fixmap.h>
39 #include <asm/traps.h>
41 #define CREATE_TRACE_POINTS
42 #include "vsyscall_trace.h"
44 static enum { EMULATE
, NATIVE
, NONE
} vsyscall_mode
=
45 #if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
47 #elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
53 static int __init
vsyscall_setup(char *str
)
56 if (!strcmp("emulate", str
))
57 vsyscall_mode
= EMULATE
;
58 else if (!strcmp("native", str
))
59 vsyscall_mode
= NATIVE
;
60 else if (!strcmp("none", str
))
70 early_param("vsyscall", vsyscall_setup
);
72 static void warn_bad_vsyscall(const char *level
, struct pt_regs
*regs
,
75 if (!show_unhandled_signals
)
78 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
79 level
, current
->comm
, task_pid_nr(current
),
80 message
, regs
->ip
, regs
->cs
,
81 regs
->sp
, regs
->ax
, regs
->si
, regs
->di
);
84 static int addr_to_vsyscall_nr(unsigned long addr
)
88 if ((addr
& ~0xC00UL
) != VSYSCALL_ADDR
)
91 nr
= (addr
& 0xC00UL
) >> 10;
98 static bool write_ok_or_segv(unsigned long ptr
, size_t size
)
101 * XXX: if access_ok, get_user, and put_user handled
102 * sig_on_uaccess_err, this could go away.
105 if (!access_ok(VERIFY_WRITE
, (void __user
*)ptr
, size
)) {
107 struct thread_struct
*thread
= ¤t
->thread
;
109 thread
->error_code
= 6; /* user fault, no page, write */
111 thread
->trap_nr
= X86_TRAP_PF
;
113 memset(&info
, 0, sizeof(info
));
114 info
.si_signo
= SIGSEGV
;
116 info
.si_code
= SEGV_MAPERR
;
117 info
.si_addr
= (void __user
*)ptr
;
119 force_sig_info(SIGSEGV
, &info
, current
);
126 bool emulate_vsyscall(struct pt_regs
*regs
, unsigned long address
)
128 struct task_struct
*tsk
;
129 unsigned long caller
;
130 int vsyscall_nr
, syscall_nr
, tmp
;
131 int prev_sig_on_uaccess_err
;
135 * No point in checking CS -- the only way to get here is a user mode
136 * trap to a high address, which means that we're in 64-bit user code.
139 WARN_ON_ONCE(address
!= regs
->ip
);
141 if (vsyscall_mode
== NONE
) {
142 warn_bad_vsyscall(KERN_INFO
, regs
,
143 "vsyscall attempted with vsyscall=none");
147 vsyscall_nr
= addr_to_vsyscall_nr(address
);
149 trace_emulate_vsyscall(vsyscall_nr
);
151 if (vsyscall_nr
< 0) {
152 warn_bad_vsyscall(KERN_WARNING
, regs
,
153 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
157 if (get_user(caller
, (unsigned long __user
*)regs
->sp
) != 0) {
158 warn_bad_vsyscall(KERN_WARNING
, regs
,
159 "vsyscall with bad stack (exploit attempt?)");
166 * Check for access_ok violations and find the syscall nr.
168 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
169 * 64-bit, so we don't need to special-case it here. For all the
170 * vsyscalls, NULL means "don't write anything" not "write it at
173 switch (vsyscall_nr
) {
175 if (!write_ok_or_segv(regs
->di
, sizeof(struct timeval
)) ||
176 !write_ok_or_segv(regs
->si
, sizeof(struct timezone
))) {
181 syscall_nr
= __NR_gettimeofday
;
185 if (!write_ok_or_segv(regs
->di
, sizeof(time_t))) {
190 syscall_nr
= __NR_time
;
194 if (!write_ok_or_segv(regs
->di
, sizeof(unsigned)) ||
195 !write_ok_or_segv(regs
->si
, sizeof(unsigned))) {
200 syscall_nr
= __NR_getcpu
;
205 * Handle seccomp. regs->ip must be the original value.
206 * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
208 * We could optimize the seccomp disabled case, but performance
209 * here doesn't matter.
211 regs
->orig_ax
= syscall_nr
;
213 tmp
= secure_computing(NULL
);
214 if ((!tmp
&& regs
->orig_ax
!= syscall_nr
) || regs
->ip
!= address
) {
215 warn_bad_vsyscall(KERN_DEBUG
, regs
,
216 "seccomp tried to change syscall nr or ip");
221 goto do_ret
; /* skip requested */
224 * With a real vsyscall, page faults cause SIGSEGV. We want to
225 * preserve that behavior to make writing exploits harder.
227 prev_sig_on_uaccess_err
= current
->thread
.sig_on_uaccess_err
;
228 current
->thread
.sig_on_uaccess_err
= 1;
231 switch (vsyscall_nr
) {
233 ret
= sys_gettimeofday(
234 (struct timeval __user
*)regs
->di
,
235 (struct timezone __user
*)regs
->si
);
239 ret
= sys_time((time_t __user
*)regs
->di
);
243 ret
= sys_getcpu((unsigned __user
*)regs
->di
,
244 (unsigned __user
*)regs
->si
,
249 current
->thread
.sig_on_uaccess_err
= prev_sig_on_uaccess_err
;
252 if (ret
== -EFAULT
) {
253 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
254 warn_bad_vsyscall(KERN_INFO
, regs
,
255 "vsyscall fault (exploit attempt?)");
258 * If we failed to generate a signal for any reason,
259 * generate one here. (This should be impossible.)
261 if (WARN_ON_ONCE(!sigismember(&tsk
->pending
.signal
, SIGBUS
) &&
262 !sigismember(&tsk
->pending
.signal
, SIGSEGV
)))
265 return true; /* Don't emulate the ret. */
271 /* Emulate a ret instruction. */
277 force_sig(SIGSEGV
, current
);
282 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
283 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
284 * not need special handling anymore:
286 static const char *gate_vma_name(struct vm_area_struct
*vma
)
290 static const struct vm_operations_struct gate_vma_ops
= {
291 .name
= gate_vma_name
,
293 static struct vm_area_struct gate_vma
= {
294 .vm_start
= VSYSCALL_ADDR
,
295 .vm_end
= VSYSCALL_ADDR
+ PAGE_SIZE
,
296 .vm_page_prot
= PAGE_READONLY_EXEC
,
297 .vm_flags
= VM_READ
| VM_EXEC
,
298 .vm_ops
= &gate_vma_ops
,
301 struct vm_area_struct
*get_gate_vma(struct mm_struct
*mm
)
304 if (!mm
|| mm
->context
.ia32_compat
)
307 if (vsyscall_mode
== NONE
)
312 int in_gate_area(struct mm_struct
*mm
, unsigned long addr
)
314 struct vm_area_struct
*vma
= get_gate_vma(mm
);
319 return (addr
>= vma
->vm_start
) && (addr
< vma
->vm_end
);
323 * Use this when you have no reliable mm, typically from interrupt
324 * context. It is less reliable than using a task's mm and may give
327 int in_gate_area_no_mm(unsigned long addr
)
329 return vsyscall_mode
!= NONE
&& (addr
& PAGE_MASK
) == VSYSCALL_ADDR
;
332 void __init
map_vsyscall(void)
334 extern char __vsyscall_page
;
335 unsigned long physaddr_vsyscall
= __pa_symbol(&__vsyscall_page
);
337 if (vsyscall_mode
!= NONE
)
338 __set_fixmap(VSYSCALL_PAGE
, physaddr_vsyscall
,
339 vsyscall_mode
== NATIVE
340 ? PAGE_KERNEL_VSYSCALL
343 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE
) !=
344 (unsigned long)VSYSCALL_ADDR
);