]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/entry/common.c
x86/entry/64: Make cpu_entry_area.tss read-only
[mirror_ubuntu-artful-kernel.git] / arch / x86 / entry / common.c
1 /*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/uprobes.h>
25 #include <linux/livepatch.h>
26
27 #include <asm/desc.h>
28 #include <asm/traps.h>
29 #include <asm/vdso.h>
30 #include <linux/uaccess.h>
31 #include <asm/cpufeature.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/syscalls.h>
35
36 #ifdef CONFIG_CONTEXT_TRACKING
37 /* Called on entry from user mode with IRQs off. */
38 __visible inline void enter_from_user_mode(void)
39 {
40 CT_WARN_ON(ct_state() != CONTEXT_USER);
41 user_exit_irqoff();
42 }
43 #else
44 static inline void enter_from_user_mode(void) {}
45 #endif
46
47 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
48 {
49 #ifdef CONFIG_X86_64
50 if (arch == AUDIT_ARCH_X86_64) {
51 audit_syscall_entry(regs->orig_ax, regs->di,
52 regs->si, regs->dx, regs->r10);
53 } else
54 #endif
55 {
56 audit_syscall_entry(regs->orig_ax, regs->bx,
57 regs->cx, regs->dx, regs->si);
58 }
59 }
60
61 /*
62 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
63 * to skip the syscall.
64 */
65 static long syscall_trace_enter(struct pt_regs *regs)
66 {
67 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
68
69 struct thread_info *ti = current_thread_info();
70 unsigned long ret = 0;
71 bool emulated = false;
72 u32 work;
73
74 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
75 BUG_ON(regs != task_pt_regs(current));
76
77 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
78
79 if (unlikely(work & _TIF_SYSCALL_EMU))
80 emulated = true;
81
82 if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
83 tracehook_report_syscall_entry(regs))
84 return -1L;
85
86 if (emulated)
87 return -1L;
88
89 #ifdef CONFIG_SECCOMP
90 /*
91 * Do seccomp after ptrace, to catch any tracer changes.
92 */
93 if (work & _TIF_SECCOMP) {
94 struct seccomp_data sd;
95
96 sd.arch = arch;
97 sd.nr = regs->orig_ax;
98 sd.instruction_pointer = regs->ip;
99 #ifdef CONFIG_X86_64
100 if (arch == AUDIT_ARCH_X86_64) {
101 sd.args[0] = regs->di;
102 sd.args[1] = regs->si;
103 sd.args[2] = regs->dx;
104 sd.args[3] = regs->r10;
105 sd.args[4] = regs->r8;
106 sd.args[5] = regs->r9;
107 } else
108 #endif
109 {
110 sd.args[0] = regs->bx;
111 sd.args[1] = regs->cx;
112 sd.args[2] = regs->dx;
113 sd.args[3] = regs->si;
114 sd.args[4] = regs->di;
115 sd.args[5] = regs->bp;
116 }
117
118 ret = __secure_computing(&sd);
119 if (ret == -1)
120 return ret;
121 }
122 #endif
123
124 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
125 trace_sys_enter(regs, regs->orig_ax);
126
127 do_audit_syscall_entry(regs, arch);
128
129 return ret ?: regs->orig_ax;
130 }
131
132 #define EXIT_TO_USERMODE_LOOP_FLAGS \
133 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
134 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
135
136 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
137 {
138 /*
139 * In order to return to user mode, we need to have IRQs off with
140 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
141 * can be set at any time on preemptable kernels if we have IRQs on,
142 * so we need to loop. Disabling preemption wouldn't help: doing the
143 * work to clear some of the flags can sleep.
144 */
145 while (true) {
146 /* We have work to do. */
147 local_irq_enable();
148
149 if (cached_flags & _TIF_NEED_RESCHED)
150 schedule();
151
152 if (cached_flags & _TIF_UPROBE)
153 uprobe_notify_resume(regs);
154
155 /* deal with pending signal delivery */
156 if (cached_flags & _TIF_SIGPENDING)
157 do_signal(regs);
158
159 if (cached_flags & _TIF_NOTIFY_RESUME) {
160 clear_thread_flag(TIF_NOTIFY_RESUME);
161 tracehook_notify_resume(regs);
162 }
163
164 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
165 fire_user_return_notifiers();
166
167 if (cached_flags & _TIF_PATCH_PENDING)
168 klp_update_patch_state(current);
169
170 /* Disable IRQs and retry */
171 local_irq_disable();
172
173 cached_flags = READ_ONCE(current_thread_info()->flags);
174
175 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
176 break;
177 }
178 }
179
180 /* Called with IRQs disabled. */
181 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
182 {
183 struct thread_info *ti = current_thread_info();
184 u32 cached_flags;
185
186 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
187 local_irq_disable();
188
189 lockdep_sys_exit();
190
191 cached_flags = READ_ONCE(ti->flags);
192
193 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
194 exit_to_usermode_loop(regs, cached_flags);
195
196 #ifdef CONFIG_COMPAT
197 /*
198 * Compat syscalls set TS_COMPAT. Make sure we clear it before
199 * returning to user mode. We need to clear it *after* signal
200 * handling, because syscall restart has a fixup for compat
201 * syscalls. The fixup is exercised by the ptrace_syscall_32
202 * selftest.
203 *
204 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
205 * special case only applies after poking regs and before the
206 * very next return to user mode.
207 */
208 current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
209 #endif
210
211 user_enter_irqoff();
212 }
213
214 #define SYSCALL_EXIT_WORK_FLAGS \
215 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
216 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
217
218 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
219 {
220 bool step;
221
222 audit_syscall_exit(regs);
223
224 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
225 trace_sys_exit(regs, regs->ax);
226
227 /*
228 * If TIF_SYSCALL_EMU is set, we only get here because of
229 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
230 * We already reported this syscall instruction in
231 * syscall_trace_enter().
232 */
233 step = unlikely(
234 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
235 == _TIF_SINGLESTEP);
236 if (step || cached_flags & _TIF_SYSCALL_TRACE)
237 tracehook_report_syscall_exit(regs, step);
238 }
239
240 /*
241 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
242 * state such that we can immediately switch to user mode.
243 */
244 __visible inline void syscall_return_slowpath(struct pt_regs *regs)
245 {
246 struct thread_info *ti = current_thread_info();
247 u32 cached_flags = READ_ONCE(ti->flags);
248
249 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
250
251 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
252 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
253 local_irq_enable();
254
255 /*
256 * First do one-time work. If these work items are enabled, we
257 * want to run them exactly once per syscall exit with IRQs on.
258 */
259 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
260 syscall_slow_exit_work(regs, cached_flags);
261
262 local_irq_disable();
263 prepare_exit_to_usermode(regs);
264 }
265
266 #ifdef CONFIG_X86_64
267 __visible void do_syscall_64(struct pt_regs *regs)
268 {
269 struct thread_info *ti = current_thread_info();
270 unsigned long nr = regs->orig_ax;
271
272 enter_from_user_mode();
273 local_irq_enable();
274
275 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
276 nr = syscall_trace_enter(regs);
277
278 /*
279 * NB: Native and x32 syscalls are dispatched from the same
280 * table. The only functional difference is the x32 bit in
281 * regs->orig_ax, which changes the behavior of some syscalls.
282 */
283 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
284 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
285 regs->di, regs->si, regs->dx,
286 regs->r10, regs->r8, regs->r9);
287 }
288
289 syscall_return_slowpath(regs);
290 }
291 #endif
292
293 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
294 /*
295 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
296 * all entry and exit work and returns with IRQs off. This function is
297 * extremely hot in workloads that use it, and it's usually called from
298 * do_fast_syscall_32, so forcibly inline it to improve performance.
299 */
300 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
301 {
302 struct thread_info *ti = current_thread_info();
303 unsigned int nr = (unsigned int)regs->orig_ax;
304
305 #ifdef CONFIG_IA32_EMULATION
306 current->thread.status |= TS_COMPAT;
307 #endif
308
309 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
310 /*
311 * Subtlety here: if ptrace pokes something larger than
312 * 2^32-1 into orig_ax, this truncates it. This may or
313 * may not be necessary, but it matches the old asm
314 * behavior.
315 */
316 nr = syscall_trace_enter(regs);
317 }
318
319 if (likely(nr < IA32_NR_syscalls)) {
320 /*
321 * It's possible that a 32-bit syscall implementation
322 * takes a 64-bit parameter but nonetheless assumes that
323 * the high bits are zero. Make sure we zero-extend all
324 * of the args.
325 */
326 regs->ax = ia32_sys_call_table[nr](
327 (unsigned int)regs->bx, (unsigned int)regs->cx,
328 (unsigned int)regs->dx, (unsigned int)regs->si,
329 (unsigned int)regs->di, (unsigned int)regs->bp);
330 }
331
332 syscall_return_slowpath(regs);
333 }
334
335 /* Handles int $0x80 */
336 __visible void do_int80_syscall_32(struct pt_regs *regs)
337 {
338 enter_from_user_mode();
339 local_irq_enable();
340 do_syscall_32_irqs_on(regs);
341 }
342
343 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
344 __visible long do_fast_syscall_32(struct pt_regs *regs)
345 {
346 /*
347 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
348 * convention. Adjust regs so it looks like we entered using int80.
349 */
350
351 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
352 vdso_image_32.sym_int80_landing_pad;
353
354 /*
355 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
356 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
357 * Fix it up.
358 */
359 regs->ip = landing_pad;
360
361 enter_from_user_mode();
362
363 local_irq_enable();
364
365 /* Fetch EBP from where the vDSO stashed it. */
366 if (
367 #ifdef CONFIG_X86_64
368 /*
369 * Micro-optimization: the pointer we're following is explicitly
370 * 32 bits, so it can't be out of range.
371 */
372 __get_user(*(u32 *)&regs->bp,
373 (u32 __user __force *)(unsigned long)(u32)regs->sp)
374 #else
375 get_user(*(u32 *)&regs->bp,
376 (u32 __user __force *)(unsigned long)(u32)regs->sp)
377 #endif
378 ) {
379
380 /* User code screwed up. */
381 local_irq_disable();
382 regs->ax = -EFAULT;
383 prepare_exit_to_usermode(regs);
384 return 0; /* Keep it simple: use IRET. */
385 }
386
387 /* Now this is just like a normal syscall. */
388 do_syscall_32_irqs_on(regs);
389
390 #ifdef CONFIG_X86_64
391 /*
392 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
393 * SYSRETL is available on all 64-bit CPUs, so we don't need to
394 * bother with SYSEXIT.
395 *
396 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
397 * because the ECX fixup above will ensure that this is essentially
398 * never the case.
399 */
400 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
401 regs->ip == landing_pad &&
402 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
403 #else
404 /*
405 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
406 *
407 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
408 * because the ECX fixup above will ensure that this is essentially
409 * never the case.
410 *
411 * We don't allow syscalls at all from VM86 mode, but we still
412 * need to check VM, because we might be returning from sys_vm86.
413 */
414 return static_cpu_has(X86_FEATURE_SEP) &&
415 regs->cs == __USER_CS && regs->ss == __USER_DS &&
416 regs->ip == landing_pad &&
417 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
418 #endif
419 }
420 #endif