2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/tracehook.h>
17 #include <linux/audit.h>
18 #include <linux/seccomp.h>
19 #include <linux/signal.h>
20 #include <linux/export.h>
21 #include <linux/context_tracking.h>
22 #include <linux/user-return-notifier.h>
23 #include <linux/uprobes.h>
26 #include <asm/traps.h>
28 #include <linux/uaccess.h>
29 #include <asm/cpufeature.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/syscalls.h>
34 #ifdef CONFIG_CONTEXT_TRACKING
35 /* Called on entry from user mode with IRQs off. */
36 __visible
inline void enter_from_user_mode(void)
38 CT_WARN_ON(ct_state() != CONTEXT_USER
);
42 static inline void enter_from_user_mode(void) {}
45 static void do_audit_syscall_entry(struct pt_regs
*regs
, u32 arch
)
48 if (arch
== AUDIT_ARCH_X86_64
) {
49 audit_syscall_entry(regs
->orig_ax
, regs
->di
,
50 regs
->si
, regs
->dx
, regs
->r10
);
54 audit_syscall_entry(regs
->orig_ax
, regs
->bx
,
55 regs
->cx
, regs
->dx
, regs
->si
);
60 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
61 * to skip the syscall.
63 static long syscall_trace_enter(struct pt_regs
*regs
)
65 u32 arch
= in_ia32_syscall() ? AUDIT_ARCH_I386
: AUDIT_ARCH_X86_64
;
67 struct thread_info
*ti
= current_thread_info();
68 unsigned long ret
= 0;
69 bool emulated
= false;
72 if (IS_ENABLED(CONFIG_DEBUG_ENTRY
))
73 BUG_ON(regs
!= task_pt_regs(current
));
75 work
= ACCESS_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
;
77 if (unlikely(work
& _TIF_SYSCALL_EMU
))
80 if ((emulated
|| (work
& _TIF_SYSCALL_TRACE
)) &&
81 tracehook_report_syscall_entry(regs
))
89 * Do seccomp after ptrace, to catch any tracer changes.
91 if (work
& _TIF_SECCOMP
) {
92 struct seccomp_data sd
;
95 sd
.nr
= regs
->orig_ax
;
96 sd
.instruction_pointer
= regs
->ip
;
98 if (arch
== AUDIT_ARCH_X86_64
) {
99 sd
.args
[0] = regs
->di
;
100 sd
.args
[1] = regs
->si
;
101 sd
.args
[2] = regs
->dx
;
102 sd
.args
[3] = regs
->r10
;
103 sd
.args
[4] = regs
->r8
;
104 sd
.args
[5] = regs
->r9
;
108 sd
.args
[0] = regs
->bx
;
109 sd
.args
[1] = regs
->cx
;
110 sd
.args
[2] = regs
->dx
;
111 sd
.args
[3] = regs
->si
;
112 sd
.args
[4] = regs
->di
;
113 sd
.args
[5] = regs
->bp
;
116 ret
= __secure_computing(&sd
);
122 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
123 trace_sys_enter(regs
, regs
->orig_ax
);
125 do_audit_syscall_entry(regs
, arch
);
127 return ret
?: regs
->orig_ax
;
130 #define EXIT_TO_USERMODE_LOOP_FLAGS \
131 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
132 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
134 static void exit_to_usermode_loop(struct pt_regs
*regs
, u32 cached_flags
)
137 * In order to return to user mode, we need to have IRQs off with
138 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
139 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
140 * can be set at any time on preemptable kernels if we have IRQs on,
141 * so we need to loop. Disabling preemption wouldn't help: doing the
142 * work to clear some of the flags can sleep.
145 /* We have work to do. */
148 if (cached_flags
& _TIF_NEED_RESCHED
)
151 if (cached_flags
& _TIF_UPROBE
)
152 uprobe_notify_resume(regs
);
154 /* deal with pending signal delivery */
155 if (cached_flags
& _TIF_SIGPENDING
)
158 if (cached_flags
& _TIF_NOTIFY_RESUME
) {
159 clear_thread_flag(TIF_NOTIFY_RESUME
);
160 tracehook_notify_resume(regs
);
163 if (cached_flags
& _TIF_USER_RETURN_NOTIFY
)
164 fire_user_return_notifiers();
166 /* Disable IRQs and retry */
169 cached_flags
= READ_ONCE(current_thread_info()->flags
);
171 if (!(cached_flags
& EXIT_TO_USERMODE_LOOP_FLAGS
))
176 /* Called with IRQs disabled. */
177 __visible
inline void prepare_exit_to_usermode(struct pt_regs
*regs
)
179 struct thread_info
*ti
= current_thread_info();
182 if (IS_ENABLED(CONFIG_PROVE_LOCKING
) && WARN_ON(!irqs_disabled()))
187 cached_flags
= READ_ONCE(ti
->flags
);
189 if (unlikely(cached_flags
& EXIT_TO_USERMODE_LOOP_FLAGS
))
190 exit_to_usermode_loop(regs
, cached_flags
);
194 * Compat syscalls set TS_COMPAT. Make sure we clear it before
195 * returning to user mode. We need to clear it *after* signal
196 * handling, because syscall restart has a fixup for compat
197 * syscalls. The fixup is exercised by the ptrace_syscall_32
200 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
201 * special case only applies after poking regs and before the
202 * very next return to user mode.
204 current
->thread
.status
&= ~(TS_COMPAT
|TS_I386_REGS_POKED
);
210 #define SYSCALL_EXIT_WORK_FLAGS \
211 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
212 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
214 static void syscall_slow_exit_work(struct pt_regs
*regs
, u32 cached_flags
)
218 audit_syscall_exit(regs
);
220 if (cached_flags
& _TIF_SYSCALL_TRACEPOINT
)
221 trace_sys_exit(regs
, regs
->ax
);
224 * If TIF_SYSCALL_EMU is set, we only get here because of
225 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
226 * We already reported this syscall instruction in
227 * syscall_trace_enter().
230 (cached_flags
& (_TIF_SINGLESTEP
| _TIF_SYSCALL_EMU
))
232 if (step
|| cached_flags
& _TIF_SYSCALL_TRACE
)
233 tracehook_report_syscall_exit(regs
, step
);
237 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
238 * state such that we can immediately switch to user mode.
240 __visible
inline void syscall_return_slowpath(struct pt_regs
*regs
)
242 struct thread_info
*ti
= current_thread_info();
243 u32 cached_flags
= READ_ONCE(ti
->flags
);
245 CT_WARN_ON(ct_state() != CONTEXT_KERNEL
);
247 if (IS_ENABLED(CONFIG_PROVE_LOCKING
) &&
248 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs
->orig_ax
))
252 * First do one-time work. If these work items are enabled, we
253 * want to run them exactly once per syscall exit with IRQs on.
255 if (unlikely(cached_flags
& SYSCALL_EXIT_WORK_FLAGS
))
256 syscall_slow_exit_work(regs
, cached_flags
);
259 prepare_exit_to_usermode(regs
);
263 __visible
void do_syscall_64(struct pt_regs
*regs
)
265 struct thread_info
*ti
= current_thread_info();
266 unsigned long nr
= regs
->orig_ax
;
268 enter_from_user_mode();
271 if (READ_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
)
272 nr
= syscall_trace_enter(regs
);
275 * NB: Native and x32 syscalls are dispatched from the same
276 * table. The only functional difference is the x32 bit in
277 * regs->orig_ax, which changes the behavior of some syscalls.
279 if (likely((nr
& __SYSCALL_MASK
) < NR_syscalls
)) {
280 regs
->ax
= sys_call_table
[nr
& __SYSCALL_MASK
](
281 regs
->di
, regs
->si
, regs
->dx
,
282 regs
->r10
, regs
->r8
, regs
->r9
);
285 syscall_return_slowpath(regs
);
289 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
291 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
292 * all entry and exit work and returns with IRQs off. This function is
293 * extremely hot in workloads that use it, and it's usually called from
294 * do_fast_syscall_32, so forcibly inline it to improve performance.
296 static __always_inline
void do_syscall_32_irqs_on(struct pt_regs
*regs
)
298 struct thread_info
*ti
= current_thread_info();
299 unsigned int nr
= (unsigned int)regs
->orig_ax
;
301 #ifdef CONFIG_IA32_EMULATION
302 current
->thread
.status
|= TS_COMPAT
;
305 if (READ_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
) {
307 * Subtlety here: if ptrace pokes something larger than
308 * 2^32-1 into orig_ax, this truncates it. This may or
309 * may not be necessary, but it matches the old asm
312 nr
= syscall_trace_enter(regs
);
315 if (likely(nr
< IA32_NR_syscalls
)) {
317 * It's possible that a 32-bit syscall implementation
318 * takes a 64-bit parameter but nonetheless assumes that
319 * the high bits are zero. Make sure we zero-extend all
322 regs
->ax
= ia32_sys_call_table
[nr
](
323 (unsigned int)regs
->bx
, (unsigned int)regs
->cx
,
324 (unsigned int)regs
->dx
, (unsigned int)regs
->si
,
325 (unsigned int)regs
->di
, (unsigned int)regs
->bp
);
328 syscall_return_slowpath(regs
);
331 /* Handles int $0x80 */
332 __visible
void do_int80_syscall_32(struct pt_regs
*regs
)
334 enter_from_user_mode();
336 do_syscall_32_irqs_on(regs
);
339 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
340 __visible
long do_fast_syscall_32(struct pt_regs
*regs
)
343 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
344 * convention. Adjust regs so it looks like we entered using int80.
347 unsigned long landing_pad
= (unsigned long)current
->mm
->context
.vdso
+
348 vdso_image_32
.sym_int80_landing_pad
;
351 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
352 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
355 regs
->ip
= landing_pad
;
357 enter_from_user_mode();
361 /* Fetch EBP from where the vDSO stashed it. */
365 * Micro-optimization: the pointer we're following is explicitly
366 * 32 bits, so it can't be out of range.
368 __get_user(*(u32
*)®s
->bp
,
369 (u32 __user __force
*)(unsigned long)(u32
)regs
->sp
)
371 get_user(*(u32
*)®s
->bp
,
372 (u32 __user __force
*)(unsigned long)(u32
)regs
->sp
)
376 /* User code screwed up. */
379 prepare_exit_to_usermode(regs
);
380 return 0; /* Keep it simple: use IRET. */
383 /* Now this is just like a normal syscall. */
384 do_syscall_32_irqs_on(regs
);
388 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
389 * SYSRETL is available on all 64-bit CPUs, so we don't need to
390 * bother with SYSEXIT.
392 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
393 * because the ECX fixup above will ensure that this is essentially
396 return regs
->cs
== __USER32_CS
&& regs
->ss
== __USER_DS
&&
397 regs
->ip
== landing_pad
&&
398 (regs
->flags
& (X86_EFLAGS_RF
| X86_EFLAGS_TF
)) == 0;
401 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
403 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
404 * because the ECX fixup above will ensure that this is essentially
407 * We don't allow syscalls at all from VM86 mode, but we still
408 * need to check VM, because we might be returning from sys_vm86.
410 return static_cpu_has(X86_FEATURE_SEP
) &&
411 regs
->cs
== __USER_CS
&& regs
->ss
== __USER_DS
&&
412 regs
->ip
== landing_pad
&&
413 (regs
->flags
& (X86_EFLAGS_RF
| X86_EFLAGS_TF
| X86_EFLAGS_VM
)) == 0;