]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/kernel/ptrace_32.c
x86: TLS cleanup
[mirror_ubuntu-kernels.git] / arch / x86 / kernel / ptrace_32.c
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/errno.h>
12 #include <linux/ptrace.h>
13 #include <linux/user.h>
14 #include <linux/security.h>
15 #include <linux/audit.h>
16 #include <linux/seccomp.h>
17 #include <linux/signal.h>
18
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
21 #include <asm/system.h>
22 #include <asm/processor.h>
23 #include <asm/i387.h>
24 #include <asm/debugreg.h>
25 #include <asm/ldt.h>
26 #include <asm/desc.h>
27
28 /*
29 * does not yet catch signals sent when the child dies.
30 * in exit.c or in signal.c.
31 */
32
33 /*
34 * Determines which flags the user has access to [1 = access, 0 = no access].
35 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
36 * Also masks reserved bits (31-22, 15, 5, 3, 1).
37 */
38 #define FLAG_MASK 0x00050dd5
39
40 /* set's the trap flag. */
41 #define TRAP_FLAG 0x100
42
43 /*
44 * Offset of eflags on child stack..
45 */
46 #define EFL_OFFSET offsetof(struct pt_regs, eflags)
47
48 static inline struct pt_regs *get_child_regs(struct task_struct *task)
49 {
50 void *stack_top = (void *)task->thread.esp0;
51 return stack_top - sizeof(struct pt_regs);
52 }
53
54 /*
55 * This routine will get a word off of the processes privileged stack.
56 * the offset is bytes into the pt_regs structure on the stack.
57 * This routine assumes that all the privileged stacks are in our
58 * data space.
59 */
60 static inline int get_stack_long(struct task_struct *task, int offset)
61 {
62 unsigned char *stack;
63
64 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
65 stack += offset;
66 return (*((int *)stack));
67 }
68
69 /*
70 * This routine will put a word on the processes privileged stack.
71 * the offset is bytes into the pt_regs structure on the stack.
72 * This routine assumes that all the privileged stacks are in our
73 * data space.
74 */
75 static inline int put_stack_long(struct task_struct *task, int offset,
76 unsigned long data)
77 {
78 unsigned char * stack;
79
80 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
81 stack += offset;
82 *(unsigned long *) stack = data;
83 return 0;
84 }
85
86 static int putreg(struct task_struct *child,
87 unsigned long regno, unsigned long value)
88 {
89 switch (regno >> 2) {
90 case GS:
91 if (value && (value & 3) != 3)
92 return -EIO;
93 child->thread.gs = value;
94 return 0;
95 case DS:
96 case ES:
97 case FS:
98 if (value && (value & 3) != 3)
99 return -EIO;
100 value &= 0xffff;
101 break;
102 case SS:
103 case CS:
104 if ((value & 3) != 3)
105 return -EIO;
106 value &= 0xffff;
107 break;
108 case EFL:
109 value &= FLAG_MASK;
110 value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
111 break;
112 }
113 if (regno > FS*4)
114 regno -= 1*4;
115 put_stack_long(child, regno, value);
116 return 0;
117 }
118
119 static unsigned long getreg(struct task_struct *child,
120 unsigned long regno)
121 {
122 unsigned long retval = ~0UL;
123
124 switch (regno >> 2) {
125 case GS:
126 retval = child->thread.gs;
127 break;
128 case DS:
129 case ES:
130 case FS:
131 case SS:
132 case CS:
133 retval = 0xffff;
134 /* fall through */
135 default:
136 if (regno > FS*4)
137 regno -= 1*4;
138 retval &= get_stack_long(child, regno);
139 }
140 return retval;
141 }
142
143 #define LDT_SEGMENT 4
144
145 static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
146 {
147 unsigned long addr, seg;
148
149 addr = regs->eip;
150 seg = regs->xcs & 0xffff;
151 if (regs->eflags & VM_MASK) {
152 addr = (addr & 0xffff) + (seg << 4);
153 return addr;
154 }
155
156 /*
157 * We'll assume that the code segments in the GDT
158 * are all zero-based. That is largely true: the
159 * TLS segments are used for data, and the PNPBIOS
160 * and APM bios ones we just ignore here.
161 */
162 if (seg & LDT_SEGMENT) {
163 u32 *desc;
164 unsigned long base;
165
166 seg &= ~7UL;
167
168 mutex_lock(&child->mm->context.lock);
169 if (unlikely((seg >> 3) >= child->mm->context.size))
170 addr = -1L; /* bogus selector, access would fault */
171 else {
172 desc = child->mm->context.ldt + seg;
173 base = ((desc[0] >> 16) |
174 ((desc[1] & 0xff) << 16) |
175 (desc[1] & 0xff000000));
176
177 /* 16-bit code segment? */
178 if (!((desc[1] >> 22) & 1))
179 addr &= 0xffff;
180 addr += base;
181 }
182 mutex_unlock(&child->mm->context.lock);
183 }
184 return addr;
185 }
186
187 static inline int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
188 {
189 int i, copied;
190 unsigned char opcode[15];
191 unsigned long addr = convert_eip_to_linear(child, regs);
192
193 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
194 for (i = 0; i < copied; i++) {
195 switch (opcode[i]) {
196 /* popf and iret */
197 case 0x9d: case 0xcf:
198 return 1;
199 /* opcode and address size prefixes */
200 case 0x66: case 0x67:
201 continue;
202 /* irrelevant prefixes (segment overrides and repeats) */
203 case 0x26: case 0x2e:
204 case 0x36: case 0x3e:
205 case 0x64: case 0x65:
206 case 0xf0: case 0xf2: case 0xf3:
207 continue;
208
209 /*
210 * pushf: NOTE! We should probably not let
211 * the user see the TF bit being set. But
212 * it's more pain than it's worth to avoid
213 * it, and a debugger could emulate this
214 * all in user space if it _really_ cares.
215 */
216 case 0x9c:
217 default:
218 return 0;
219 }
220 }
221 return 0;
222 }
223
224 static void set_singlestep(struct task_struct *child)
225 {
226 struct pt_regs *regs = get_child_regs(child);
227
228 /*
229 * Always set TIF_SINGLESTEP - this guarantees that
230 * we single-step system calls etc.. This will also
231 * cause us to set TF when returning to user mode.
232 */
233 set_tsk_thread_flag(child, TIF_SINGLESTEP);
234
235 /*
236 * If TF was already set, don't do anything else
237 */
238 if (regs->eflags & TRAP_FLAG)
239 return;
240
241 /* Set TF on the kernel stack.. */
242 regs->eflags |= TRAP_FLAG;
243
244 /*
245 * ..but if TF is changed by the instruction we will trace,
246 * don't mark it as being "us" that set it, so that we
247 * won't clear it by hand later.
248 */
249 if (is_setting_trap_flag(child, regs))
250 return;
251
252 child->ptrace |= PT_DTRACE;
253 }
254
255 static void clear_singlestep(struct task_struct *child)
256 {
257 /* Always clear TIF_SINGLESTEP... */
258 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
259
260 /* But touch TF only if it was set by us.. */
261 if (child->ptrace & PT_DTRACE) {
262 struct pt_regs *regs = get_child_regs(child);
263 regs->eflags &= ~TRAP_FLAG;
264 child->ptrace &= ~PT_DTRACE;
265 }
266 }
267
268 /*
269 * Called by kernel/ptrace.c when detaching..
270 *
271 * Make sure the single step bit is not set.
272 */
273 void ptrace_disable(struct task_struct *child)
274 {
275 clear_singlestep(child);
276 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
277 }
278
279 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
280 {
281 struct user * dummy = NULL;
282 int i, ret;
283 unsigned long __user *datap = (unsigned long __user *)data;
284
285 switch (request) {
286 /* when I and D space are separate, these will need to be fixed. */
287 case PTRACE_PEEKTEXT: /* read word at location addr. */
288 case PTRACE_PEEKDATA:
289 ret = generic_ptrace_peekdata(child, addr, data);
290 break;
291
292 /* read the word at location addr in the USER area. */
293 case PTRACE_PEEKUSR: {
294 unsigned long tmp;
295
296 ret = -EIO;
297 if ((addr & 3) || addr < 0 ||
298 addr > sizeof(struct user) - 3)
299 break;
300
301 tmp = 0; /* Default return condition */
302 if(addr < FRAME_SIZE*sizeof(long))
303 tmp = getreg(child, addr);
304 if(addr >= (long) &dummy->u_debugreg[0] &&
305 addr <= (long) &dummy->u_debugreg[7]){
306 addr -= (long) &dummy->u_debugreg[0];
307 addr = addr >> 2;
308 tmp = child->thread.debugreg[addr];
309 }
310 ret = put_user(tmp, datap);
311 break;
312 }
313
314 /* when I and D space are separate, this will have to be fixed. */
315 case PTRACE_POKETEXT: /* write the word at location addr. */
316 case PTRACE_POKEDATA:
317 ret = generic_ptrace_pokedata(child, addr, data);
318 break;
319
320 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
321 ret = -EIO;
322 if ((addr & 3) || addr < 0 ||
323 addr > sizeof(struct user) - 3)
324 break;
325
326 if (addr < FRAME_SIZE*sizeof(long)) {
327 ret = putreg(child, addr, data);
328 break;
329 }
330 /* We need to be very careful here. We implicitly
331 want to modify a portion of the task_struct, and we
332 have to be selective about what portions we allow someone
333 to modify. */
334
335 ret = -EIO;
336 if(addr >= (long) &dummy->u_debugreg[0] &&
337 addr <= (long) &dummy->u_debugreg[7]){
338
339 if(addr == (long) &dummy->u_debugreg[4]) break;
340 if(addr == (long) &dummy->u_debugreg[5]) break;
341 if(addr < (long) &dummy->u_debugreg[4] &&
342 ((unsigned long) data) >= TASK_SIZE-3) break;
343
344 /* Sanity-check data. Take one half-byte at once with
345 * check = (val >> (16 + 4*i)) & 0xf. It contains the
346 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
347 * 2 and 3 are LENi. Given a list of invalid values,
348 * we do mask |= 1 << invalid_value, so that
349 * (mask >> check) & 1 is a correct test for invalid
350 * values.
351 *
352 * R/Wi contains the type of the breakpoint /
353 * watchpoint, LENi contains the length of the watched
354 * data in the watchpoint case.
355 *
356 * The invalid values are:
357 * - LENi == 0x10 (undefined), so mask |= 0x0f00.
358 * - R/Wi == 0x10 (break on I/O reads or writes), so
359 * mask |= 0x4444.
360 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
361 * 0x1110.
362 *
363 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
364 *
365 * See the Intel Manual "System Programming Guide",
366 * 15.2.4
367 *
368 * Note that LENi == 0x10 is defined on x86_64 in long
369 * mode (i.e. even for 32-bit userspace software, but
370 * 64-bit kernel), so the x86_64 mask value is 0x5454.
371 * See the AMD manual no. 24593 (AMD64 System
372 * Programming)*/
373
374 if(addr == (long) &dummy->u_debugreg[7]) {
375 data &= ~DR_CONTROL_RESERVED;
376 for(i=0; i<4; i++)
377 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
378 goto out_tsk;
379 if (data)
380 set_tsk_thread_flag(child, TIF_DEBUG);
381 else
382 clear_tsk_thread_flag(child, TIF_DEBUG);
383 }
384 addr -= (long) &dummy->u_debugreg;
385 addr = addr >> 2;
386 child->thread.debugreg[addr] = data;
387 ret = 0;
388 }
389 break;
390
391 case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */
392 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
393 case PTRACE_CONT: /* restart after signal. */
394 ret = -EIO;
395 if (!valid_signal(data))
396 break;
397 if (request == PTRACE_SYSEMU) {
398 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
399 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
400 } else if (request == PTRACE_SYSCALL) {
401 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
402 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
403 } else {
404 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
405 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
406 }
407 child->exit_code = data;
408 /* make sure the single step bit is not set. */
409 clear_singlestep(child);
410 wake_up_process(child);
411 ret = 0;
412 break;
413
414 /*
415 * make the child exit. Best I can do is send it a sigkill.
416 * perhaps it should be put in the status that it wants to
417 * exit.
418 */
419 case PTRACE_KILL:
420 ret = 0;
421 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
422 break;
423 child->exit_code = SIGKILL;
424 /* make sure the single step bit is not set. */
425 clear_singlestep(child);
426 wake_up_process(child);
427 break;
428
429 case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */
430 case PTRACE_SINGLESTEP: /* set the trap flag. */
431 ret = -EIO;
432 if (!valid_signal(data))
433 break;
434
435 if (request == PTRACE_SYSEMU_SINGLESTEP)
436 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
437 else
438 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
439
440 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
441 set_singlestep(child);
442 child->exit_code = data;
443 /* give it a chance to run. */
444 wake_up_process(child);
445 ret = 0;
446 break;
447
448 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
449 if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
450 ret = -EIO;
451 break;
452 }
453 for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
454 __put_user(getreg(child, i), datap);
455 datap++;
456 }
457 ret = 0;
458 break;
459 }
460
461 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
462 unsigned long tmp;
463 if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) {
464 ret = -EIO;
465 break;
466 }
467 for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
468 __get_user(tmp, datap);
469 putreg(child, i, tmp);
470 datap++;
471 }
472 ret = 0;
473 break;
474 }
475
476 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
477 if (!access_ok(VERIFY_WRITE, datap,
478 sizeof(struct user_i387_struct))) {
479 ret = -EIO;
480 break;
481 }
482 ret = 0;
483 if (!tsk_used_math(child))
484 init_fpu(child);
485 get_fpregs((struct user_i387_struct __user *)data, child);
486 break;
487 }
488
489 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
490 if (!access_ok(VERIFY_READ, datap,
491 sizeof(struct user_i387_struct))) {
492 ret = -EIO;
493 break;
494 }
495 set_stopped_child_used_math(child);
496 set_fpregs(child, (struct user_i387_struct __user *)data);
497 ret = 0;
498 break;
499 }
500
501 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
502 if (!access_ok(VERIFY_WRITE, datap,
503 sizeof(struct user_fxsr_struct))) {
504 ret = -EIO;
505 break;
506 }
507 if (!tsk_used_math(child))
508 init_fpu(child);
509 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
510 break;
511 }
512
513 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
514 if (!access_ok(VERIFY_READ, datap,
515 sizeof(struct user_fxsr_struct))) {
516 ret = -EIO;
517 break;
518 }
519 set_stopped_child_used_math(child);
520 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
521 break;
522 }
523
524 case PTRACE_GET_THREAD_AREA:
525 if (addr < 0)
526 return -EIO;
527 ret = do_get_thread_area(child, addr,
528 (struct user_desc __user *) data);
529 break;
530
531 case PTRACE_SET_THREAD_AREA:
532 if (addr < 0)
533 return -EIO;
534 ret = do_set_thread_area(child, addr,
535 (struct user_desc __user *) data, 0);
536 break;
537
538 default:
539 ret = ptrace_request(child, request, addr, data);
540 break;
541 }
542 out_tsk:
543 return ret;
544 }
545
546 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
547 {
548 struct siginfo info;
549
550 tsk->thread.trap_no = 1;
551 tsk->thread.error_code = error_code;
552
553 memset(&info, 0, sizeof(info));
554 info.si_signo = SIGTRAP;
555 info.si_code = TRAP_BRKPT;
556
557 /* User-mode eip? */
558 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
559
560 /* Send us the fake SIGTRAP */
561 force_sig_info(SIGTRAP, &info, tsk);
562 }
563
564 /* notification of system call entry/exit
565 * - triggered by current->work.syscall_trace
566 */
567 __attribute__((regparm(3)))
568 int do_syscall_trace(struct pt_regs *regs, int entryexit)
569 {
570 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
571 /*
572 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
573 * interception
574 */
575 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
576 int ret = 0;
577
578 /* do the secure computing check first */
579 if (!entryexit)
580 secure_computing(regs->orig_eax);
581
582 if (unlikely(current->audit_context)) {
583 if (entryexit)
584 audit_syscall_exit(AUDITSC_RESULT(regs->eax),
585 regs->eax);
586 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
587 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
588 * not used, entry.S will call us only on syscall exit, not
589 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
590 * calling send_sigtrap() on syscall entry.
591 *
592 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
593 * is_singlestep is false, despite his name, so we will still do
594 * the correct thing.
595 */
596 else if (is_singlestep)
597 goto out;
598 }
599
600 if (!(current->ptrace & PT_PTRACED))
601 goto out;
602
603 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
604 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
605 * here. We have to check this and return */
606 if (is_sysemu && entryexit)
607 return 0;
608
609 /* Fake a debug trap */
610 if (is_singlestep)
611 send_sigtrap(current, regs, 0);
612
613 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
614 goto out;
615
616 /* the 0x80 provides a way for the tracing parent to distinguish
617 between a syscall stop and SIGTRAP delivery */
618 /* Note that the debugger could change the result of test_thread_flag!*/
619 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
620
621 /*
622 * this isn't the same as continuing with a signal, but it will do
623 * for normal use. strace only continues with a signal if the
624 * stopping signal is not SIGTRAP. -brl
625 */
626 if (current->exit_code) {
627 send_sig(current->exit_code, current, 1);
628 current->exit_code = 0;
629 }
630 ret = is_sysemu;
631 out:
632 if (unlikely(current->audit_context) && !entryexit)
633 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
634 regs->ebx, regs->ecx, regs->edx, regs->esi);
635 if (ret == 0)
636 return 0;
637
638 regs->orig_eax = -1; /* force skip of syscall restarting */
639 if (unlikely(current->audit_context))
640 audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
641 return 1;
642 }