]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/ptrace.c
Merge branches 'tracing/ftrace', 'tracing/hw-branch-tracing' and 'tracing/ring-buffer...
[mirror_ubuntu-bionic-kernel.git] / kernel / ptrace.c
1 /*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
27
28
29 /*
30 * Initialize a new task whose father had been ptraced.
31 *
32 * Called from copy_process().
33 */
34 void ptrace_fork(struct task_struct *child, unsigned long clone_flags)
35 {
36 arch_ptrace_fork(child, clone_flags);
37 }
38
39 /*
40 * ptrace a task: make the debugger its new parent and
41 * move it to the ptrace list.
42 *
43 * Must be called with the tasklist lock write-held.
44 */
45 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
46 {
47 BUG_ON(!list_empty(&child->ptrace_entry));
48 list_add(&child->ptrace_entry, &new_parent->ptraced);
49 child->parent = new_parent;
50 }
51
52 /*
53 * Turn a tracing stop into a normal stop now, since with no tracer there
54 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
55 * signal sent that would resume the child, but didn't because it was in
56 * TASK_TRACED, resume it now.
57 * Requires that irqs be disabled.
58 */
59 static void ptrace_untrace(struct task_struct *child)
60 {
61 spin_lock(&child->sighand->siglock);
62 if (task_is_traced(child)) {
63 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
64 __set_task_state(child, TASK_STOPPED);
65 } else {
66 signal_wake_up(child, 1);
67 }
68 }
69 spin_unlock(&child->sighand->siglock);
70 }
71
72 /*
73 * unptrace a task: move it back to its original parent and
74 * remove it from the ptrace list.
75 *
76 * Must be called with the tasklist lock write-held.
77 */
78 void __ptrace_unlink(struct task_struct *child)
79 {
80 BUG_ON(!child->ptrace);
81
82 child->ptrace = 0;
83 child->parent = child->real_parent;
84 list_del_init(&child->ptrace_entry);
85
86 arch_ptrace_untrace(child);
87 if (task_is_traced(child))
88 ptrace_untrace(child);
89 }
90
91 /*
92 * Check that we have indeed attached to the thing..
93 */
94 int ptrace_check_attach(struct task_struct *child, int kill)
95 {
96 int ret = -ESRCH;
97
98 /*
99 * We take the read lock around doing both checks to close a
100 * possible race where someone else was tracing our child and
101 * detached between these two checks. After this locked check,
102 * we are sure that this is our traced child and that can only
103 * be changed by us so it's not changing right after this.
104 */
105 read_lock(&tasklist_lock);
106 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
107 ret = 0;
108 /*
109 * child->sighand can't be NULL, release_task()
110 * does ptrace_unlink() before __exit_signal().
111 */
112 spin_lock_irq(&child->sighand->siglock);
113 if (task_is_stopped(child))
114 child->state = TASK_TRACED;
115 else if (!task_is_traced(child) && !kill)
116 ret = -ESRCH;
117 spin_unlock_irq(&child->sighand->siglock);
118 }
119 read_unlock(&tasklist_lock);
120
121 if (!ret && !kill)
122 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
123
124 /* All systems go.. */
125 return ret;
126 }
127
128 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
129 {
130 /* May we inspect the given task?
131 * This check is used both for attaching with ptrace
132 * and for allowing access to sensitive information in /proc.
133 *
134 * ptrace_attach denies several cases that /proc allows
135 * because setting up the necessary parent/child relationship
136 * or halting the specified task is impossible.
137 */
138 int dumpable = 0;
139 /* Don't let security modules deny introspection */
140 if (task == current)
141 return 0;
142 if (((current->uid != task->euid) ||
143 (current->uid != task->suid) ||
144 (current->uid != task->uid) ||
145 (current->gid != task->egid) ||
146 (current->gid != task->sgid) ||
147 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
148 return -EPERM;
149 smp_rmb();
150 if (task->mm)
151 dumpable = get_dumpable(task->mm);
152 if (!dumpable && !capable(CAP_SYS_PTRACE))
153 return -EPERM;
154
155 return security_ptrace_may_access(task, mode);
156 }
157
158 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
159 {
160 int err;
161 task_lock(task);
162 err = __ptrace_may_access(task, mode);
163 task_unlock(task);
164 return (!err ? true : false);
165 }
166
167 int ptrace_attach(struct task_struct *task)
168 {
169 int retval;
170 unsigned long flags;
171
172 audit_ptrace(task);
173
174 retval = -EPERM;
175 if (same_thread_group(task, current))
176 goto out;
177
178 repeat:
179 /*
180 * Nasty, nasty.
181 *
182 * We want to hold both the task-lock and the
183 * tasklist_lock for writing at the same time.
184 * But that's against the rules (tasklist_lock
185 * is taken for reading by interrupts on other
186 * cpu's that may have task_lock).
187 */
188 task_lock(task);
189 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
190 task_unlock(task);
191 do {
192 cpu_relax();
193 } while (!write_can_lock(&tasklist_lock));
194 goto repeat;
195 }
196
197 if (!task->mm)
198 goto bad;
199 /* the same process cannot be attached many times */
200 if (task->ptrace & PT_PTRACED)
201 goto bad;
202 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
203 if (retval)
204 goto bad;
205
206 /* Go */
207 task->ptrace |= PT_PTRACED;
208 if (capable(CAP_SYS_PTRACE))
209 task->ptrace |= PT_PTRACE_CAP;
210
211 __ptrace_link(task, current);
212
213 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
214 bad:
215 write_unlock_irqrestore(&tasklist_lock, flags);
216 task_unlock(task);
217 out:
218 return retval;
219 }
220
221 static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
222 {
223 child->exit_code = data;
224 /* .. re-parent .. */
225 __ptrace_unlink(child);
226 /* .. and wake it up. */
227 if (child->exit_state != EXIT_ZOMBIE)
228 wake_up_process(child);
229 }
230
231 int ptrace_detach(struct task_struct *child, unsigned int data)
232 {
233 if (!valid_signal(data))
234 return -EIO;
235
236 /* Architecture-specific hardware disable .. */
237 ptrace_disable(child);
238 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
239
240 write_lock_irq(&tasklist_lock);
241 /* protect against de_thread()->release_task() */
242 if (child->ptrace)
243 __ptrace_detach(child, data);
244 write_unlock_irq(&tasklist_lock);
245
246 return 0;
247 }
248
249 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
250 {
251 int copied = 0;
252
253 while (len > 0) {
254 char buf[128];
255 int this_len, retval;
256
257 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
258 retval = access_process_vm(tsk, src, buf, this_len, 0);
259 if (!retval) {
260 if (copied)
261 break;
262 return -EIO;
263 }
264 if (copy_to_user(dst, buf, retval))
265 return -EFAULT;
266 copied += retval;
267 src += retval;
268 dst += retval;
269 len -= retval;
270 }
271 return copied;
272 }
273
274 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
275 {
276 int copied = 0;
277
278 while (len > 0) {
279 char buf[128];
280 int this_len, retval;
281
282 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
283 if (copy_from_user(buf, src, this_len))
284 return -EFAULT;
285 retval = access_process_vm(tsk, dst, buf, this_len, 1);
286 if (!retval) {
287 if (copied)
288 break;
289 return -EIO;
290 }
291 copied += retval;
292 src += retval;
293 dst += retval;
294 len -= retval;
295 }
296 return copied;
297 }
298
299 static int ptrace_setoptions(struct task_struct *child, long data)
300 {
301 child->ptrace &= ~PT_TRACE_MASK;
302
303 if (data & PTRACE_O_TRACESYSGOOD)
304 child->ptrace |= PT_TRACESYSGOOD;
305
306 if (data & PTRACE_O_TRACEFORK)
307 child->ptrace |= PT_TRACE_FORK;
308
309 if (data & PTRACE_O_TRACEVFORK)
310 child->ptrace |= PT_TRACE_VFORK;
311
312 if (data & PTRACE_O_TRACECLONE)
313 child->ptrace |= PT_TRACE_CLONE;
314
315 if (data & PTRACE_O_TRACEEXEC)
316 child->ptrace |= PT_TRACE_EXEC;
317
318 if (data & PTRACE_O_TRACEVFORKDONE)
319 child->ptrace |= PT_TRACE_VFORK_DONE;
320
321 if (data & PTRACE_O_TRACEEXIT)
322 child->ptrace |= PT_TRACE_EXIT;
323
324 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
325 }
326
327 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
328 {
329 int error = -ESRCH;
330
331 read_lock(&tasklist_lock);
332 if (likely(child->sighand != NULL)) {
333 error = -EINVAL;
334 spin_lock_irq(&child->sighand->siglock);
335 if (likely(child->last_siginfo != NULL)) {
336 *info = *child->last_siginfo;
337 error = 0;
338 }
339 spin_unlock_irq(&child->sighand->siglock);
340 }
341 read_unlock(&tasklist_lock);
342 return error;
343 }
344
345 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
346 {
347 int error = -ESRCH;
348
349 read_lock(&tasklist_lock);
350 if (likely(child->sighand != NULL)) {
351 error = -EINVAL;
352 spin_lock_irq(&child->sighand->siglock);
353 if (likely(child->last_siginfo != NULL)) {
354 *child->last_siginfo = *info;
355 error = 0;
356 }
357 spin_unlock_irq(&child->sighand->siglock);
358 }
359 read_unlock(&tasklist_lock);
360 return error;
361 }
362
363
364 #ifdef PTRACE_SINGLESTEP
365 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
366 #else
367 #define is_singlestep(request) 0
368 #endif
369
370 #ifdef PTRACE_SINGLEBLOCK
371 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
372 #else
373 #define is_singleblock(request) 0
374 #endif
375
376 #ifdef PTRACE_SYSEMU
377 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
378 #else
379 #define is_sysemu_singlestep(request) 0
380 #endif
381
382 static int ptrace_resume(struct task_struct *child, long request, long data)
383 {
384 if (!valid_signal(data))
385 return -EIO;
386
387 if (request == PTRACE_SYSCALL)
388 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
389 else
390 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
391
392 #ifdef TIF_SYSCALL_EMU
393 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
394 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
395 else
396 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
397 #endif
398
399 if (is_singleblock(request)) {
400 if (unlikely(!arch_has_block_step()))
401 return -EIO;
402 user_enable_block_step(child);
403 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
404 if (unlikely(!arch_has_single_step()))
405 return -EIO;
406 user_enable_single_step(child);
407 }
408 else
409 user_disable_single_step(child);
410
411 child->exit_code = data;
412 wake_up_process(child);
413
414 return 0;
415 }
416
417 int ptrace_request(struct task_struct *child, long request,
418 long addr, long data)
419 {
420 int ret = -EIO;
421 siginfo_t siginfo;
422
423 switch (request) {
424 case PTRACE_PEEKTEXT:
425 case PTRACE_PEEKDATA:
426 return generic_ptrace_peekdata(child, addr, data);
427 case PTRACE_POKETEXT:
428 case PTRACE_POKEDATA:
429 return generic_ptrace_pokedata(child, addr, data);
430
431 #ifdef PTRACE_OLDSETOPTIONS
432 case PTRACE_OLDSETOPTIONS:
433 #endif
434 case PTRACE_SETOPTIONS:
435 ret = ptrace_setoptions(child, data);
436 break;
437 case PTRACE_GETEVENTMSG:
438 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
439 break;
440
441 case PTRACE_GETSIGINFO:
442 ret = ptrace_getsiginfo(child, &siginfo);
443 if (!ret)
444 ret = copy_siginfo_to_user((siginfo_t __user *) data,
445 &siginfo);
446 break;
447
448 case PTRACE_SETSIGINFO:
449 if (copy_from_user(&siginfo, (siginfo_t __user *) data,
450 sizeof siginfo))
451 ret = -EFAULT;
452 else
453 ret = ptrace_setsiginfo(child, &siginfo);
454 break;
455
456 case PTRACE_DETACH: /* detach a process that was attached. */
457 ret = ptrace_detach(child, data);
458 break;
459
460 #ifdef PTRACE_SINGLESTEP
461 case PTRACE_SINGLESTEP:
462 #endif
463 #ifdef PTRACE_SINGLEBLOCK
464 case PTRACE_SINGLEBLOCK:
465 #endif
466 #ifdef PTRACE_SYSEMU
467 case PTRACE_SYSEMU:
468 case PTRACE_SYSEMU_SINGLESTEP:
469 #endif
470 case PTRACE_SYSCALL:
471 case PTRACE_CONT:
472 return ptrace_resume(child, request, data);
473
474 case PTRACE_KILL:
475 if (child->exit_state) /* already dead */
476 return 0;
477 return ptrace_resume(child, request, SIGKILL);
478
479 default:
480 break;
481 }
482
483 return ret;
484 }
485
486 /**
487 * ptrace_traceme -- helper for PTRACE_TRACEME
488 *
489 * Performs checks and sets PT_PTRACED.
490 * Should be used by all ptrace implementations for PTRACE_TRACEME.
491 */
492 int ptrace_traceme(void)
493 {
494 int ret = -EPERM;
495
496 /*
497 * Are we already being traced?
498 */
499 repeat:
500 task_lock(current);
501 if (!(current->ptrace & PT_PTRACED)) {
502 /*
503 * See ptrace_attach() comments about the locking here.
504 */
505 unsigned long flags;
506 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
507 task_unlock(current);
508 do {
509 cpu_relax();
510 } while (!write_can_lock(&tasklist_lock));
511 goto repeat;
512 }
513
514 ret = security_ptrace_traceme(current->parent);
515
516 /*
517 * Set the ptrace bit in the process ptrace flags.
518 * Then link us on our parent's ptraced list.
519 */
520 if (!ret) {
521 current->ptrace |= PT_PTRACED;
522 __ptrace_link(current, current->real_parent);
523 }
524
525 write_unlock_irqrestore(&tasklist_lock, flags);
526 }
527 task_unlock(current);
528 return ret;
529 }
530
531 /**
532 * ptrace_get_task_struct -- grab a task struct reference for ptrace
533 * @pid: process id to grab a task_struct reference of
534 *
535 * This function is a helper for ptrace implementations. It checks
536 * permissions and then grabs a task struct for use of the actual
537 * ptrace implementation.
538 *
539 * Returns the task_struct for @pid or an ERR_PTR() on failure.
540 */
541 struct task_struct *ptrace_get_task_struct(pid_t pid)
542 {
543 struct task_struct *child;
544
545 read_lock(&tasklist_lock);
546 child = find_task_by_vpid(pid);
547 if (child)
548 get_task_struct(child);
549
550 read_unlock(&tasklist_lock);
551 if (!child)
552 return ERR_PTR(-ESRCH);
553 return child;
554 }
555
556 #ifndef arch_ptrace_attach
557 #define arch_ptrace_attach(child) do { } while (0)
558 #endif
559
560 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
561 {
562 struct task_struct *child;
563 long ret;
564
565 /*
566 * This lock_kernel fixes a subtle race with suid exec
567 */
568 lock_kernel();
569 if (request == PTRACE_TRACEME) {
570 ret = ptrace_traceme();
571 if (!ret)
572 arch_ptrace_attach(current);
573 goto out;
574 }
575
576 child = ptrace_get_task_struct(pid);
577 if (IS_ERR(child)) {
578 ret = PTR_ERR(child);
579 goto out;
580 }
581
582 if (request == PTRACE_ATTACH) {
583 ret = ptrace_attach(child);
584 /*
585 * Some architectures need to do book-keeping after
586 * a ptrace attach.
587 */
588 if (!ret)
589 arch_ptrace_attach(child);
590 goto out_put_task_struct;
591 }
592
593 ret = ptrace_check_attach(child, request == PTRACE_KILL);
594 if (ret < 0)
595 goto out_put_task_struct;
596
597 ret = arch_ptrace(child, request, addr, data);
598 if (ret < 0)
599 goto out_put_task_struct;
600
601 out_put_task_struct:
602 put_task_struct(child);
603 out:
604 unlock_kernel();
605 return ret;
606 }
607
608 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
609 {
610 unsigned long tmp;
611 int copied;
612
613 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
614 if (copied != sizeof(tmp))
615 return -EIO;
616 return put_user(tmp, (unsigned long __user *)data);
617 }
618
619 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
620 {
621 int copied;
622
623 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
624 return (copied == sizeof(data)) ? 0 : -EIO;
625 }
626
627 #if defined CONFIG_COMPAT
628 #include <linux/compat.h>
629
630 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
631 compat_ulong_t addr, compat_ulong_t data)
632 {
633 compat_ulong_t __user *datap = compat_ptr(data);
634 compat_ulong_t word;
635 siginfo_t siginfo;
636 int ret;
637
638 switch (request) {
639 case PTRACE_PEEKTEXT:
640 case PTRACE_PEEKDATA:
641 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
642 if (ret != sizeof(word))
643 ret = -EIO;
644 else
645 ret = put_user(word, datap);
646 break;
647
648 case PTRACE_POKETEXT:
649 case PTRACE_POKEDATA:
650 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
651 ret = (ret != sizeof(data) ? -EIO : 0);
652 break;
653
654 case PTRACE_GETEVENTMSG:
655 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
656 break;
657
658 case PTRACE_GETSIGINFO:
659 ret = ptrace_getsiginfo(child, &siginfo);
660 if (!ret)
661 ret = copy_siginfo_to_user32(
662 (struct compat_siginfo __user *) datap,
663 &siginfo);
664 break;
665
666 case PTRACE_SETSIGINFO:
667 memset(&siginfo, 0, sizeof siginfo);
668 if (copy_siginfo_from_user32(
669 &siginfo, (struct compat_siginfo __user *) datap))
670 ret = -EFAULT;
671 else
672 ret = ptrace_setsiginfo(child, &siginfo);
673 break;
674
675 default:
676 ret = ptrace_request(child, request, addr, data);
677 }
678
679 return ret;
680 }
681
682 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
683 compat_long_t addr, compat_long_t data)
684 {
685 struct task_struct *child;
686 long ret;
687
688 /*
689 * This lock_kernel fixes a subtle race with suid exec
690 */
691 lock_kernel();
692 if (request == PTRACE_TRACEME) {
693 ret = ptrace_traceme();
694 goto out;
695 }
696
697 child = ptrace_get_task_struct(pid);
698 if (IS_ERR(child)) {
699 ret = PTR_ERR(child);
700 goto out;
701 }
702
703 if (request == PTRACE_ATTACH) {
704 ret = ptrace_attach(child);
705 /*
706 * Some architectures need to do book-keeping after
707 * a ptrace attach.
708 */
709 if (!ret)
710 arch_ptrace_attach(child);
711 goto out_put_task_struct;
712 }
713
714 ret = ptrace_check_attach(child, request == PTRACE_KILL);
715 if (!ret)
716 ret = compat_arch_ptrace(child, request, addr, data);
717
718 out_put_task_struct:
719 put_task_struct(child);
720 out:
721 unlock_kernel();
722 return ret;
723 }
724 #endif /* CONFIG_COMPAT */