]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/kernel/ptrace_64.c
375fadc23a25317fa26ad937f1935813ef3d850d
[mirror_ubuntu-kernels.git] / arch / x86 / kernel / ptrace_64.c
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * x86-64 port 2000-2002 Andi Kleen
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/security.h>
17 #include <linux/audit.h>
18 #include <linux/seccomp.h>
19 #include <linux/signal.h>
20
21 #include <asm/uaccess.h>
22 #include <asm/pgtable.h>
23 #include <asm/system.h>
24 #include <asm/processor.h>
25 #include <asm/prctl.h>
26 #include <asm/i387.h>
27 #include <asm/debugreg.h>
28 #include <asm/ldt.h>
29 #include <asm/desc.h>
30 #include <asm/proto.h>
31 #include <asm/ia32.h>
32
33 /*
34 * does not yet catch signals sent when the child dies.
35 * in exit.c or in signal.c.
36 */
37
38 /*
39 * Determines which flags the user has access to [1 = access, 0 = no access].
40 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
41 * Also masks reserved bits (63-22, 15, 5, 3, 1).
42 */
43 #define FLAG_MASK 0x54dd5UL
44
45 /* set's the trap flag. */
46 #define TRAP_FLAG 0x100UL
47
48 /*
49 * eflags and offset of eflags on child stack..
50 */
51 #define EFLAGS offsetof(struct pt_regs, eflags)
52 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
53
54 /*
55 * this routine will get a word off of the processes privileged stack.
56 * the offset is how far from the base addr as stored in the TSS.
57 * this routine assumes that all the privileged stacks are in our
58 * data space.
59 */
60 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
61 {
62 unsigned char *stack;
63
64 stack = (unsigned char *)task->thread.rsp0;
65 stack += offset;
66 return (*((unsigned long *)stack));
67 }
68
69 /*
70 * this routine will put a word on the processes privileged stack.
71 * the offset is how far from the base addr as stored in the TSS.
72 * this routine assumes that all the privileged stacks are in our
73 * data space.
74 */
75 static inline long put_stack_long(struct task_struct *task, int offset,
76 unsigned long data)
77 {
78 unsigned char * stack;
79
80 stack = (unsigned char *) task->thread.rsp0;
81 stack += offset;
82 *(unsigned long *) stack = data;
83 return 0;
84 }
85
86 #define LDT_SEGMENT 4
87
88 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
89 {
90 unsigned long addr, seg;
91
92 addr = regs->rip;
93 seg = regs->cs & 0xffff;
94
95 /*
96 * We'll assume that the code segments in the GDT
97 * are all zero-based. That is largely true: the
98 * TLS segments are used for data, and the PNPBIOS
99 * and APM bios ones we just ignore here.
100 */
101 if (seg & LDT_SEGMENT) {
102 u32 *desc;
103 unsigned long base;
104
105 seg &= ~7UL;
106
107 mutex_lock(&child->mm->context.lock);
108 if (unlikely((seg >> 3) >= child->mm->context.size))
109 addr = -1L; /* bogus selector, access would fault */
110 else {
111 desc = child->mm->context.ldt + seg;
112 base = ((desc[0] >> 16) |
113 ((desc[1] & 0xff) << 16) |
114 (desc[1] & 0xff000000));
115
116 /* 16-bit code segment? */
117 if (!((desc[1] >> 22) & 1))
118 addr &= 0xffff;
119 addr += base;
120 }
121 mutex_unlock(&child->mm->context.lock);
122 }
123
124 return addr;
125 }
126
127 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
128 {
129 int i, copied;
130 unsigned char opcode[15];
131 unsigned long addr = convert_rip_to_linear(child, regs);
132
133 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
134 for (i = 0; i < copied; i++) {
135 switch (opcode[i]) {
136 /* popf and iret */
137 case 0x9d: case 0xcf:
138 return 1;
139
140 /* CHECKME: 64 65 */
141
142 /* opcode and address size prefixes */
143 case 0x66: case 0x67:
144 continue;
145 /* irrelevant prefixes (segment overrides and repeats) */
146 case 0x26: case 0x2e:
147 case 0x36: case 0x3e:
148 case 0x64: case 0x65:
149 case 0xf2: case 0xf3:
150 continue;
151
152 case 0x40 ... 0x4f:
153 if (regs->cs != __USER_CS)
154 /* 32-bit mode: register increment */
155 return 0;
156 /* 64-bit mode: REX prefix */
157 continue;
158
159 /* CHECKME: f2, f3 */
160
161 /*
162 * pushf: NOTE! We should probably not let
163 * the user see the TF bit being set. But
164 * it's more pain than it's worth to avoid
165 * it, and a debugger could emulate this
166 * all in user space if it _really_ cares.
167 */
168 case 0x9c:
169 default:
170 return 0;
171 }
172 }
173 return 0;
174 }
175
176 static void set_singlestep(struct task_struct *child)
177 {
178 struct pt_regs *regs = task_pt_regs(child);
179
180 /*
181 * Always set TIF_SINGLESTEP - this guarantees that
182 * we single-step system calls etc.. This will also
183 * cause us to set TF when returning to user mode.
184 */
185 set_tsk_thread_flag(child, TIF_SINGLESTEP);
186
187 /*
188 * If TF was already set, don't do anything else
189 */
190 if (regs->eflags & TRAP_FLAG)
191 return;
192
193 /* Set TF on the kernel stack.. */
194 regs->eflags |= TRAP_FLAG;
195
196 /*
197 * ..but if TF is changed by the instruction we will trace,
198 * don't mark it as being "us" that set it, so that we
199 * won't clear it by hand later.
200 */
201 if (is_setting_trap_flag(child, regs))
202 return;
203
204 child->ptrace |= PT_DTRACE;
205 }
206
207 static void clear_singlestep(struct task_struct *child)
208 {
209 /* Always clear TIF_SINGLESTEP... */
210 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
211
212 /* But touch TF only if it was set by us.. */
213 if (child->ptrace & PT_DTRACE) {
214 struct pt_regs *regs = task_pt_regs(child);
215 regs->eflags &= ~TRAP_FLAG;
216 child->ptrace &= ~PT_DTRACE;
217 }
218 }
219
220 /*
221 * Called by kernel/ptrace.c when detaching..
222 *
223 * Make sure the single step bit is not set.
224 */
225 void ptrace_disable(struct task_struct *child)
226 {
227 clear_singlestep(child);
228 }
229
230 static int putreg(struct task_struct *child,
231 unsigned long regno, unsigned long value)
232 {
233 unsigned long tmp;
234
235 switch (regno) {
236 case offsetof(struct user_regs_struct,fs):
237 if (value && (value & 3) != 3)
238 return -EIO;
239 child->thread.fsindex = value & 0xffff;
240 return 0;
241 case offsetof(struct user_regs_struct,gs):
242 if (value && (value & 3) != 3)
243 return -EIO;
244 child->thread.gsindex = value & 0xffff;
245 return 0;
246 case offsetof(struct user_regs_struct,ds):
247 if (value && (value & 3) != 3)
248 return -EIO;
249 child->thread.ds = value & 0xffff;
250 return 0;
251 case offsetof(struct user_regs_struct,es):
252 if (value && (value & 3) != 3)
253 return -EIO;
254 child->thread.es = value & 0xffff;
255 return 0;
256 case offsetof(struct user_regs_struct,ss):
257 if ((value & 3) != 3)
258 return -EIO;
259 value &= 0xffff;
260 return 0;
261 case offsetof(struct user_regs_struct,fs_base):
262 if (value >= TASK_SIZE_OF(child))
263 return -EIO;
264 /*
265 * When changing the segment base, use do_arch_prctl
266 * to set either thread.fs or thread.fsindex and the
267 * corresponding GDT slot.
268 */
269 if (child->thread.fs != value)
270 return do_arch_prctl(child, ARCH_SET_FS, value);
271 return 0;
272 case offsetof(struct user_regs_struct,gs_base):
273 /*
274 * Exactly the same here as the %fs handling above.
275 */
276 if (value >= TASK_SIZE_OF(child))
277 return -EIO;
278 if (child->thread.gs != value)
279 return do_arch_prctl(child, ARCH_SET_GS, value);
280 return 0;
281 case offsetof(struct user_regs_struct, eflags):
282 value &= FLAG_MASK;
283 tmp = get_stack_long(child, EFL_OFFSET);
284 tmp &= ~FLAG_MASK;
285 value |= tmp;
286 break;
287 case offsetof(struct user_regs_struct,cs):
288 if ((value & 3) != 3)
289 return -EIO;
290 value &= 0xffff;
291 break;
292 }
293 put_stack_long(child, regno - sizeof(struct pt_regs), value);
294 return 0;
295 }
296
297 static unsigned long getreg(struct task_struct *child, unsigned long regno)
298 {
299 unsigned long val;
300 switch (regno) {
301 case offsetof(struct user_regs_struct, fs):
302 return child->thread.fsindex;
303 case offsetof(struct user_regs_struct, gs):
304 return child->thread.gsindex;
305 case offsetof(struct user_regs_struct, ds):
306 return child->thread.ds;
307 case offsetof(struct user_regs_struct, es):
308 return child->thread.es;
309 case offsetof(struct user_regs_struct, fs_base):
310 /*
311 * do_arch_prctl may have used a GDT slot instead of
312 * the MSR. To userland, it appears the same either
313 * way, except the %fs segment selector might not be 0.
314 */
315 if (child->thread.fs != 0)
316 return child->thread.fs;
317 if (child->thread.fsindex != FS_TLS_SEL)
318 return 0;
319 return get_desc_base(&child->thread.tls_array[FS_TLS]);
320 case offsetof(struct user_regs_struct, gs_base):
321 /*
322 * Exactly the same here as the %fs handling above.
323 */
324 if (child->thread.gs != 0)
325 return child->thread.gs;
326 if (child->thread.gsindex != GS_TLS_SEL)
327 return 0;
328 return get_desc_base(&child->thread.tls_array[GS_TLS]);
329 default:
330 regno = regno - sizeof(struct pt_regs);
331 val = get_stack_long(child, regno);
332 if (test_tsk_thread_flag(child, TIF_IA32))
333 val &= 0xffffffff;
334 return val;
335 }
336
337 }
338
339 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
340 {
341 long i, ret;
342 unsigned ui;
343
344 switch (request) {
345 /* when I and D space are separate, these will need to be fixed. */
346 case PTRACE_PEEKTEXT: /* read word at location addr. */
347 case PTRACE_PEEKDATA:
348 ret = generic_ptrace_peekdata(child, addr, data);
349 break;
350
351 /* read the word at location addr in the USER area. */
352 case PTRACE_PEEKUSR: {
353 unsigned long tmp;
354
355 ret = -EIO;
356 if ((addr & 7) ||
357 addr > sizeof(struct user) - 7)
358 break;
359
360 switch (addr) {
361 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
362 tmp = getreg(child, addr);
363 break;
364 case offsetof(struct user, u_debugreg[0]):
365 tmp = child->thread.debugreg0;
366 break;
367 case offsetof(struct user, u_debugreg[1]):
368 tmp = child->thread.debugreg1;
369 break;
370 case offsetof(struct user, u_debugreg[2]):
371 tmp = child->thread.debugreg2;
372 break;
373 case offsetof(struct user, u_debugreg[3]):
374 tmp = child->thread.debugreg3;
375 break;
376 case offsetof(struct user, u_debugreg[6]):
377 tmp = child->thread.debugreg6;
378 break;
379 case offsetof(struct user, u_debugreg[7]):
380 tmp = child->thread.debugreg7;
381 break;
382 default:
383 tmp = 0;
384 break;
385 }
386 ret = put_user(tmp,(unsigned long __user *) data);
387 break;
388 }
389
390 /* when I and D space are separate, this will have to be fixed. */
391 case PTRACE_POKETEXT: /* write the word at location addr. */
392 case PTRACE_POKEDATA:
393 ret = generic_ptrace_pokedata(child, addr, data);
394 break;
395
396 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
397 {
398 int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
399 ret = -EIO;
400 if ((addr & 7) ||
401 addr > sizeof(struct user) - 7)
402 break;
403
404 switch (addr) {
405 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
406 ret = putreg(child, addr, data);
407 break;
408 /* Disallows to set a breakpoint into the vsyscall */
409 case offsetof(struct user, u_debugreg[0]):
410 if (data >= TASK_SIZE_OF(child) - dsize) break;
411 child->thread.debugreg0 = data;
412 ret = 0;
413 break;
414 case offsetof(struct user, u_debugreg[1]):
415 if (data >= TASK_SIZE_OF(child) - dsize) break;
416 child->thread.debugreg1 = data;
417 ret = 0;
418 break;
419 case offsetof(struct user, u_debugreg[2]):
420 if (data >= TASK_SIZE_OF(child) - dsize) break;
421 child->thread.debugreg2 = data;
422 ret = 0;
423 break;
424 case offsetof(struct user, u_debugreg[3]):
425 if (data >= TASK_SIZE_OF(child) - dsize) break;
426 child->thread.debugreg3 = data;
427 ret = 0;
428 break;
429 case offsetof(struct user, u_debugreg[6]):
430 if (data >> 32)
431 break;
432 child->thread.debugreg6 = data;
433 ret = 0;
434 break;
435 case offsetof(struct user, u_debugreg[7]):
436 /* See arch/i386/kernel/ptrace.c for an explanation of
437 * this awkward check.*/
438 data &= ~DR_CONTROL_RESERVED;
439 for(i=0; i<4; i++)
440 if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
441 break;
442 if (i == 4) {
443 child->thread.debugreg7 = data;
444 if (data)
445 set_tsk_thread_flag(child, TIF_DEBUG);
446 else
447 clear_tsk_thread_flag(child, TIF_DEBUG);
448 ret = 0;
449 }
450 break;
451 }
452 break;
453 }
454 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
455 case PTRACE_CONT: /* restart after signal. */
456
457 ret = -EIO;
458 if (!valid_signal(data))
459 break;
460 if (request == PTRACE_SYSCALL)
461 set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
462 else
463 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
464 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
465 child->exit_code = data;
466 /* make sure the single step bit is not set. */
467 clear_singlestep(child);
468 wake_up_process(child);
469 ret = 0;
470 break;
471
472 #ifdef CONFIG_IA32_EMULATION
473 /* This makes only sense with 32bit programs. Allow a
474 64bit debugger to fully examine them too. Better
475 don't use it against 64bit processes, use
476 PTRACE_ARCH_PRCTL instead. */
477 case PTRACE_GET_THREAD_AREA:
478 if (addr < 0)
479 return -EIO;
480 ret = do_get_thread_area(child, addr,
481 (struct user_desc __user *) data);
482
483 break;
484 case PTRACE_SET_THREAD_AREA:
485 if (addr < 0)
486 return -EIO;
487 ret = do_set_thread_area(child, addr,
488 (struct user_desc __user *) data, 0);
489 break;
490 #endif
491 /* normal 64bit interface to access TLS data.
492 Works just like arch_prctl, except that the arguments
493 are reversed. */
494 case PTRACE_ARCH_PRCTL:
495 ret = do_arch_prctl(child, data, addr);
496 break;
497
498 /*
499 * make the child exit. Best I can do is send it a sigkill.
500 * perhaps it should be put in the status that it wants to
501 * exit.
502 */
503 case PTRACE_KILL:
504 ret = 0;
505 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
506 break;
507 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
508 child->exit_code = SIGKILL;
509 /* make sure the single step bit is not set. */
510 clear_singlestep(child);
511 wake_up_process(child);
512 break;
513
514 case PTRACE_SINGLESTEP: /* set the trap flag. */
515 ret = -EIO;
516 if (!valid_signal(data))
517 break;
518 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
519 set_singlestep(child);
520 child->exit_code = data;
521 /* give it a chance to run. */
522 wake_up_process(child);
523 ret = 0;
524 break;
525
526 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
527 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
528 sizeof(struct user_regs_struct))) {
529 ret = -EIO;
530 break;
531 }
532 ret = 0;
533 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
534 ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
535 data += sizeof(long);
536 }
537 break;
538 }
539
540 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
541 unsigned long tmp;
542 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
543 sizeof(struct user_regs_struct))) {
544 ret = -EIO;
545 break;
546 }
547 ret = 0;
548 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
549 ret = __get_user(tmp, (unsigned long __user *) data);
550 if (ret)
551 break;
552 ret = putreg(child, ui, tmp);
553 if (ret)
554 break;
555 data += sizeof(long);
556 }
557 break;
558 }
559
560 case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
561 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
562 sizeof(struct user_i387_struct))) {
563 ret = -EIO;
564 break;
565 }
566 ret = get_fpregs((struct user_i387_struct __user *)data, child);
567 break;
568 }
569
570 case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
571 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
572 sizeof(struct user_i387_struct))) {
573 ret = -EIO;
574 break;
575 }
576 set_stopped_child_used_math(child);
577 ret = set_fpregs(child, (struct user_i387_struct __user *)data);
578 break;
579 }
580
581 default:
582 ret = ptrace_request(child, request, addr, data);
583 break;
584 }
585 return ret;
586 }
587
588 static void syscall_trace(struct pt_regs *regs)
589 {
590
591 #if 0
592 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
593 current->comm,
594 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
595 current_thread_info()->flags, current->ptrace);
596 #endif
597
598 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
599 ? 0x80 : 0));
600 /*
601 * this isn't the same as continuing with a signal, but it will do
602 * for normal use. strace only continues with a signal if the
603 * stopping signal is not SIGTRAP. -brl
604 */
605 if (current->exit_code) {
606 send_sig(current->exit_code, current, 1);
607 current->exit_code = 0;
608 }
609 }
610
611 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
612 {
613 /* do the secure computing check first */
614 secure_computing(regs->orig_rax);
615
616 if (test_thread_flag(TIF_SYSCALL_TRACE)
617 && (current->ptrace & PT_PTRACED))
618 syscall_trace(regs);
619
620 if (unlikely(current->audit_context)) {
621 if (test_thread_flag(TIF_IA32)) {
622 audit_syscall_entry(AUDIT_ARCH_I386,
623 regs->orig_rax,
624 regs->rbx, regs->rcx,
625 regs->rdx, regs->rsi);
626 } else {
627 audit_syscall_entry(AUDIT_ARCH_X86_64,
628 regs->orig_rax,
629 regs->rdi, regs->rsi,
630 regs->rdx, regs->r10);
631 }
632 }
633 }
634
635 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
636 {
637 if (unlikely(current->audit_context))
638 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
639
640 if ((test_thread_flag(TIF_SYSCALL_TRACE)
641 || test_thread_flag(TIF_SINGLESTEP))
642 && (current->ptrace & PT_PTRACED))
643 syscall_trace(regs);
644 }