1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/ptrace.c
5 * (C) Copyright 1999 Linus Torvalds
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
11 #include <linux/capability.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/coredump.h>
16 #include <linux/sched/task.h>
17 #include <linux/errno.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/security.h>
23 #include <linux/signal.h>
24 #include <linux/uio.h>
25 #include <linux/audit.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/regset.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/cn_proc.h>
32 #include <linux/compat.h>
33 #include <linux/sched/signal.h>
34 #include <linux/minmax.h>
36 #include <asm/syscall.h> /* for syscall_get_* */
39 * Access another process' address space via ptrace.
40 * Source/target buffer must be kernel space,
41 * Do not walk the page table directly, use get_user_pages
43 int ptrace_access_vm(struct task_struct
*tsk
, unsigned long addr
,
44 void *buf
, int len
, unsigned int gup_flags
)
49 mm
= get_task_mm(tsk
);
54 (current
!= tsk
->parent
) ||
55 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
56 !ptracer_capable(tsk
, mm
->user_ns
))) {
61 ret
= __access_remote_vm(mm
, addr
, buf
, len
, gup_flags
);
68 void __ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
,
69 const struct cred
*ptracer_cred
)
71 BUG_ON(!list_empty(&child
->ptrace_entry
));
72 list_add(&child
->ptrace_entry
, &new_parent
->ptraced
);
73 child
->parent
= new_parent
;
74 child
->ptracer_cred
= get_cred(ptracer_cred
);
78 * ptrace a task: make the debugger its new parent and
79 * move it to the ptrace list.
81 * Must be called with the tasklist lock write-held.
83 static void ptrace_link(struct task_struct
*child
, struct task_struct
*new_parent
)
85 __ptrace_link(child
, new_parent
, current_cred());
89 * __ptrace_unlink - unlink ptracee and restore its execution state
90 * @child: ptracee to be unlinked
92 * Remove @child from the ptrace list, move it back to the original parent,
93 * and restore the execution state so that it conforms to the group stop
96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
97 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
99 * If the ptracer is exiting, the ptracee can be in any state.
101 * After detach, the ptracee should be in a state which conforms to the
102 * group stop. If the group is stopped or in the process of stopping, the
103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
104 * up from TASK_TRACED.
106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
108 * to but in the opposite direction of what happens while attaching to a
109 * stopped task. However, in this direction, the intermediate RUNNING
110 * state is not hidden even from the current ptracer and if it immediately
111 * re-attaches and performs a WNOHANG wait(2), it may fail.
114 * write_lock_irq(tasklist_lock)
116 void __ptrace_unlink(struct task_struct
*child
)
118 const struct cred
*old_cred
;
119 BUG_ON(!child
->ptrace
);
121 clear_task_syscall_work(child
, SYSCALL_TRACE
);
122 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
123 clear_task_syscall_work(child
, SYSCALL_EMU
);
126 child
->parent
= child
->real_parent
;
127 list_del_init(&child
->ptrace_entry
);
128 old_cred
= child
->ptracer_cred
;
129 child
->ptracer_cred
= NULL
;
132 spin_lock(&child
->sighand
->siglock
);
135 * Clear all pending traps and TRAPPING. TRAPPING should be
136 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
138 task_clear_jobctl_pending(child
, JOBCTL_TRAP_MASK
);
139 task_clear_jobctl_trapping(child
);
142 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
145 if (!(child
->flags
& PF_EXITING
) &&
146 (child
->signal
->flags
& SIGNAL_STOP_STOPPED
||
147 child
->signal
->group_stop_count
)) {
148 child
->jobctl
|= JOBCTL_STOP_PENDING
;
151 * This is only possible if this thread was cloned by the
152 * traced task running in the stopped group, set the signal
153 * for the future reports.
154 * FIXME: we should change ptrace_init_task() to handle this
157 if (!(child
->jobctl
& JOBCTL_STOP_SIGMASK
))
158 child
->jobctl
|= SIGSTOP
;
162 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
163 * @child in the butt. Note that @resume should be used iff @child
164 * is in TASK_TRACED; otherwise, we might unduly disrupt
165 * TASK_KILLABLE sleeps.
167 if (child
->jobctl
& JOBCTL_STOP_PENDING
|| task_is_traced(child
))
168 ptrace_signal_wake_up(child
, true);
170 spin_unlock(&child
->sighand
->siglock
);
173 static bool looks_like_a_spurious_pid(struct task_struct
*task
)
175 if (task
->exit_code
!= ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
))
178 if (task_pid_vnr(task
) == task
->ptrace_message
)
181 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
182 * was not wait()'ed, most probably debugger targets the old
183 * leader which was destroyed in de_thread().
188 /* Ensure that nothing can wake it up, even SIGKILL */
189 static bool ptrace_freeze_traced(struct task_struct
*task
)
193 /* Lockless, nobody but us can set this flag */
194 if (task
->jobctl
& JOBCTL_LISTENING
)
197 spin_lock_irq(&task
->sighand
->siglock
);
198 if (task_is_traced(task
) && !looks_like_a_spurious_pid(task
) &&
199 !__fatal_signal_pending(task
)) {
200 WRITE_ONCE(task
->__state
, __TASK_TRACED
);
203 spin_unlock_irq(&task
->sighand
->siglock
);
208 static void ptrace_unfreeze_traced(struct task_struct
*task
)
210 if (READ_ONCE(task
->__state
) != __TASK_TRACED
)
213 WARN_ON(!task
->ptrace
|| task
->parent
!= current
);
216 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
217 * Recheck state under the lock to close this race.
219 spin_lock_irq(&task
->sighand
->siglock
);
220 if (READ_ONCE(task
->__state
) == __TASK_TRACED
) {
221 if (__fatal_signal_pending(task
))
222 wake_up_state(task
, __TASK_TRACED
);
224 WRITE_ONCE(task
->__state
, TASK_TRACED
);
226 spin_unlock_irq(&task
->sighand
->siglock
);
230 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
231 * @child: ptracee to check for
232 * @ignore_state: don't check whether @child is currently %TASK_TRACED
234 * Check whether @child is being ptraced by %current and ready for further
235 * ptrace operations. If @ignore_state is %false, @child also should be in
236 * %TASK_TRACED state and on return the child is guaranteed to be traced
237 * and not executing. If @ignore_state is %true, @child can be in any
241 * Grabs and releases tasklist_lock and @child->sighand->siglock.
244 * 0 on success, -ESRCH if %child is not ready.
246 static int ptrace_check_attach(struct task_struct
*child
, bool ignore_state
)
251 * We take the read lock around doing both checks to close a
252 * possible race where someone else was tracing our child and
253 * detached between these two checks. After this locked check,
254 * we are sure that this is our traced child and that can only
255 * be changed by us so it's not changing right after this.
257 read_lock(&tasklist_lock
);
258 if (child
->ptrace
&& child
->parent
== current
) {
259 WARN_ON(READ_ONCE(child
->__state
) == __TASK_TRACED
);
261 * child->sighand can't be NULL, release_task()
262 * does ptrace_unlink() before __exit_signal().
264 if (ignore_state
|| ptrace_freeze_traced(child
))
267 read_unlock(&tasklist_lock
);
269 if (!ret
&& !ignore_state
) {
270 if (!wait_task_inactive(child
, __TASK_TRACED
)) {
272 * This can only happen if may_ptrace_stop() fails and
273 * ptrace_stop() changes ->state back to TASK_RUNNING,
274 * so we should not worry about leaking __TASK_TRACED.
276 WARN_ON(READ_ONCE(child
->__state
) == __TASK_TRACED
);
284 static bool ptrace_has_cap(struct user_namespace
*ns
, unsigned int mode
)
286 if (mode
& PTRACE_MODE_NOAUDIT
)
287 return ns_capable_noaudit(ns
, CAP_SYS_PTRACE
);
288 return ns_capable(ns
, CAP_SYS_PTRACE
);
291 /* Returns 0 on success, -errno on denial. */
292 static int __ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
294 const struct cred
*cred
= current_cred(), *tcred
;
295 struct mm_struct
*mm
;
299 if (!(mode
& PTRACE_MODE_FSCREDS
) == !(mode
& PTRACE_MODE_REALCREDS
)) {
300 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
304 /* May we inspect the given task?
305 * This check is used both for attaching with ptrace
306 * and for allowing access to sensitive information in /proc.
308 * ptrace_attach denies several cases that /proc allows
309 * because setting up the necessary parent/child relationship
310 * or halting the specified task is impossible.
313 /* Don't let security modules deny introspection */
314 if (same_thread_group(task
, current
))
317 if (mode
& PTRACE_MODE_FSCREDS
) {
318 caller_uid
= cred
->fsuid
;
319 caller_gid
= cred
->fsgid
;
322 * Using the euid would make more sense here, but something
323 * in userland might rely on the old behavior, and this
324 * shouldn't be a security problem since
325 * PTRACE_MODE_REALCREDS implies that the caller explicitly
326 * used a syscall that requests access to another process
327 * (and not a filesystem syscall to procfs).
329 caller_uid
= cred
->uid
;
330 caller_gid
= cred
->gid
;
332 tcred
= __task_cred(task
);
333 if (uid_eq(caller_uid
, tcred
->euid
) &&
334 uid_eq(caller_uid
, tcred
->suid
) &&
335 uid_eq(caller_uid
, tcred
->uid
) &&
336 gid_eq(caller_gid
, tcred
->egid
) &&
337 gid_eq(caller_gid
, tcred
->sgid
) &&
338 gid_eq(caller_gid
, tcred
->gid
))
340 if (ptrace_has_cap(tcred
->user_ns
, mode
))
347 * If a task drops privileges and becomes nondumpable (through a syscall
348 * like setresuid()) while we are trying to access it, we must ensure
349 * that the dumpability is read after the credentials; otherwise,
350 * we may be able to attach to a task that we shouldn't be able to
351 * attach to (as if the task had dropped privileges without becoming
353 * Pairs with a write barrier in commit_creds().
358 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
359 !ptrace_has_cap(mm
->user_ns
, mode
)))
362 return security_ptrace_access_check(task
, mode
);
365 bool ptrace_may_access(struct task_struct
*task
, unsigned int mode
)
369 err
= __ptrace_may_access(task
, mode
);
374 static int ptrace_attach(struct task_struct
*task
, long request
,
378 bool seize
= (request
== PTRACE_SEIZE
);
385 if (flags
& ~(unsigned long)PTRACE_O_MASK
)
387 flags
= PT_PTRACED
| PT_SEIZED
| (flags
<< PT_OPT_FLAG_SHIFT
);
395 if (unlikely(task
->flags
& PF_KTHREAD
))
397 if (same_thread_group(task
, current
))
401 * Protect exec's credential calculations against our interference;
402 * SUID, SGID and LSM creds get determined differently
405 retval
= -ERESTARTNOINTR
;
406 if (mutex_lock_interruptible(&task
->signal
->cred_guard_mutex
))
410 retval
= __ptrace_may_access(task
, PTRACE_MODE_ATTACH_REALCREDS
);
415 write_lock_irq(&tasklist_lock
);
417 if (unlikely(task
->exit_state
))
418 goto unlock_tasklist
;
420 goto unlock_tasklist
;
424 task
->ptrace
= flags
;
426 ptrace_link(task
, current
);
428 /* SEIZE doesn't trap tracee on attach */
430 send_sig_info(SIGSTOP
, SEND_SIG_PRIV
, task
);
432 spin_lock(&task
->sighand
->siglock
);
435 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
436 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
437 * will be cleared if the child completes the transition or any
438 * event which clears the group stop states happens. We'll wait
439 * for the transition to complete before returning from this
442 * This hides STOPPED -> RUNNING -> TRACED transition from the
443 * attaching thread but a different thread in the same group can
444 * still observe the transient RUNNING state. IOW, if another
445 * thread's WNOHANG wait(2) on the stopped tracee races against
446 * ATTACH, the wait(2) may fail due to the transient RUNNING.
448 * The following task_is_stopped() test is safe as both transitions
449 * in and out of STOPPED are protected by siglock.
451 if (task_is_stopped(task
) &&
452 task_set_jobctl_pending(task
, JOBCTL_TRAP_STOP
| JOBCTL_TRAPPING
))
453 signal_wake_up_state(task
, __TASK_STOPPED
);
455 spin_unlock(&task
->sighand
->siglock
);
459 write_unlock_irq(&tasklist_lock
);
461 mutex_unlock(&task
->signal
->cred_guard_mutex
);
465 * We do not bother to change retval or clear JOBCTL_TRAPPING
466 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
467 * not return to user-mode, it will exit and clear this bit in
468 * __ptrace_unlink() if it wasn't already cleared by the tracee;
469 * and until then nobody can ptrace this task.
471 wait_on_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
, TASK_KILLABLE
);
472 proc_ptrace_connector(task
, PTRACE_ATTACH
);
479 * ptrace_traceme -- helper for PTRACE_TRACEME
481 * Performs checks and sets PT_PTRACED.
482 * Should be used by all ptrace implementations for PTRACE_TRACEME.
484 static int ptrace_traceme(void)
488 write_lock_irq(&tasklist_lock
);
489 /* Are we already being traced? */
490 if (!current
->ptrace
) {
491 ret
= security_ptrace_traceme(current
->parent
);
493 * Check PF_EXITING to ensure ->real_parent has not passed
494 * exit_ptrace(). Otherwise we don't report the error but
495 * pretend ->real_parent untraces us right after return.
497 if (!ret
&& !(current
->real_parent
->flags
& PF_EXITING
)) {
498 current
->ptrace
= PT_PTRACED
;
499 ptrace_link(current
, current
->real_parent
);
502 write_unlock_irq(&tasklist_lock
);
508 * Called with irqs disabled, returns true if childs should reap themselves.
510 static int ignoring_children(struct sighand_struct
*sigh
)
513 spin_lock(&sigh
->siglock
);
514 ret
= (sigh
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
) ||
515 (sigh
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
);
516 spin_unlock(&sigh
->siglock
);
521 * Called with tasklist_lock held for writing.
522 * Unlink a traced task, and clean it up if it was a traced zombie.
523 * Return true if it needs to be reaped with release_task().
524 * (We can't call release_task() here because we already hold tasklist_lock.)
526 * If it's a zombie, our attachedness prevented normal parent notification
527 * or self-reaping. Do notification now if it would have happened earlier.
528 * If it should reap itself, return true.
530 * If it's our own child, there is no notification to do. But if our normal
531 * children self-reap, then this child was prevented by ptrace and we must
532 * reap it now, in that case we must also wake up sub-threads sleeping in
535 static bool __ptrace_detach(struct task_struct
*tracer
, struct task_struct
*p
)
541 if (p
->exit_state
!= EXIT_ZOMBIE
)
544 dead
= !thread_group_leader(p
);
546 if (!dead
&& thread_group_empty(p
)) {
547 if (!same_thread_group(p
->real_parent
, tracer
))
548 dead
= do_notify_parent(p
, p
->exit_signal
);
549 else if (ignoring_children(tracer
->sighand
)) {
550 __wake_up_parent(p
, tracer
);
554 /* Mark it as in the process of being reaped. */
556 p
->exit_state
= EXIT_DEAD
;
560 static int ptrace_detach(struct task_struct
*child
, unsigned int data
)
562 if (!valid_signal(data
))
565 /* Architecture-specific hardware disable .. */
566 ptrace_disable(child
);
568 write_lock_irq(&tasklist_lock
);
570 * We rely on ptrace_freeze_traced(). It can't be killed and
571 * untraced by another thread, it can't be a zombie.
573 WARN_ON(!child
->ptrace
|| child
->exit_state
);
575 * tasklist_lock avoids the race with wait_task_stopped(), see
576 * the comment in ptrace_resume().
578 child
->exit_code
= data
;
579 __ptrace_detach(current
, child
);
580 write_unlock_irq(&tasklist_lock
);
582 proc_ptrace_connector(child
, PTRACE_DETACH
);
588 * Detach all tasks we were using ptrace on. Called with tasklist held
591 void exit_ptrace(struct task_struct
*tracer
, struct list_head
*dead
)
593 struct task_struct
*p
, *n
;
595 list_for_each_entry_safe(p
, n
, &tracer
->ptraced
, ptrace_entry
) {
596 if (unlikely(p
->ptrace
& PT_EXITKILL
))
597 send_sig_info(SIGKILL
, SEND_SIG_PRIV
, p
);
599 if (__ptrace_detach(tracer
, p
))
600 list_add(&p
->ptrace_entry
, dead
);
604 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
610 int this_len
, retval
;
612 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
613 retval
= ptrace_access_vm(tsk
, src
, buf
, this_len
, FOLL_FORCE
);
620 if (copy_to_user(dst
, buf
, retval
))
630 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
636 int this_len
, retval
;
638 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
639 if (copy_from_user(buf
, src
, this_len
))
641 retval
= ptrace_access_vm(tsk
, dst
, buf
, this_len
,
642 FOLL_FORCE
| FOLL_WRITE
);
656 static int ptrace_setoptions(struct task_struct
*child
, unsigned long data
)
660 if (data
& ~(unsigned long)PTRACE_O_MASK
)
663 if (unlikely(data
& PTRACE_O_SUSPEND_SECCOMP
)) {
664 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) ||
665 !IS_ENABLED(CONFIG_SECCOMP
))
668 if (!capable(CAP_SYS_ADMIN
))
671 if (seccomp_mode(¤t
->seccomp
) != SECCOMP_MODE_DISABLED
||
672 current
->ptrace
& PT_SUSPEND_SECCOMP
)
676 /* Avoid intermediate state when all opts are cleared */
677 flags
= child
->ptrace
;
678 flags
&= ~(PTRACE_O_MASK
<< PT_OPT_FLAG_SHIFT
);
679 flags
|= (data
<< PT_OPT_FLAG_SHIFT
);
680 child
->ptrace
= flags
;
685 static int ptrace_getsiginfo(struct task_struct
*child
, kernel_siginfo_t
*info
)
690 if (lock_task_sighand(child
, &flags
)) {
692 if (likely(child
->last_siginfo
!= NULL
)) {
693 copy_siginfo(info
, child
->last_siginfo
);
696 unlock_task_sighand(child
, &flags
);
701 static int ptrace_setsiginfo(struct task_struct
*child
, const kernel_siginfo_t
*info
)
706 if (lock_task_sighand(child
, &flags
)) {
708 if (likely(child
->last_siginfo
!= NULL
)) {
709 copy_siginfo(child
->last_siginfo
, info
);
712 unlock_task_sighand(child
, &flags
);
717 static int ptrace_peek_siginfo(struct task_struct
*child
,
721 struct ptrace_peeksiginfo_args arg
;
722 struct sigpending
*pending
;
726 ret
= copy_from_user(&arg
, (void __user
*) addr
,
727 sizeof(struct ptrace_peeksiginfo_args
));
731 if (arg
.flags
& ~PTRACE_PEEKSIGINFO_SHARED
)
732 return -EINVAL
; /* unknown flags */
737 /* Ensure arg.off fits in an unsigned long */
738 if (arg
.off
> ULONG_MAX
)
741 if (arg
.flags
& PTRACE_PEEKSIGINFO_SHARED
)
742 pending
= &child
->signal
->shared_pending
;
744 pending
= &child
->pending
;
746 for (i
= 0; i
< arg
.nr
; ) {
747 kernel_siginfo_t info
;
748 unsigned long off
= arg
.off
+ i
;
751 spin_lock_irq(&child
->sighand
->siglock
);
752 list_for_each_entry(q
, &pending
->list
, list
) {
755 copy_siginfo(&info
, &q
->info
);
759 spin_unlock_irq(&child
->sighand
->siglock
);
761 if (!found
) /* beyond the end of the list */
765 if (unlikely(in_compat_syscall())) {
766 compat_siginfo_t __user
*uinfo
= compat_ptr(data
);
768 if (copy_siginfo_to_user32(uinfo
, &info
)) {
776 siginfo_t __user
*uinfo
= (siginfo_t __user
*) data
;
778 if (copy_siginfo_to_user(uinfo
, &info
)) {
784 data
+= sizeof(siginfo_t
);
787 if (signal_pending(current
))
800 static long ptrace_get_rseq_configuration(struct task_struct
*task
,
801 unsigned long size
, void __user
*data
)
803 struct ptrace_rseq_configuration conf
= {
804 .rseq_abi_pointer
= (u64
)(uintptr_t)task
->rseq
,
805 .rseq_abi_size
= sizeof(*task
->rseq
),
806 .signature
= task
->rseq_sig
,
810 size
= min_t(unsigned long, size
, sizeof(conf
));
811 if (copy_to_user(data
, &conf
, size
))
817 #ifdef PTRACE_SINGLESTEP
818 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
820 #define is_singlestep(request) 0
823 #ifdef PTRACE_SINGLEBLOCK
824 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
826 #define is_singleblock(request) 0
830 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
832 #define is_sysemu_singlestep(request) 0
835 static int ptrace_resume(struct task_struct
*child
, long request
,
840 if (!valid_signal(data
))
843 if (request
== PTRACE_SYSCALL
)
844 set_task_syscall_work(child
, SYSCALL_TRACE
);
846 clear_task_syscall_work(child
, SYSCALL_TRACE
);
848 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
849 if (request
== PTRACE_SYSEMU
|| request
== PTRACE_SYSEMU_SINGLESTEP
)
850 set_task_syscall_work(child
, SYSCALL_EMU
);
852 clear_task_syscall_work(child
, SYSCALL_EMU
);
855 if (is_singleblock(request
)) {
856 if (unlikely(!arch_has_block_step()))
858 user_enable_block_step(child
);
859 } else if (is_singlestep(request
) || is_sysemu_singlestep(request
)) {
860 if (unlikely(!arch_has_single_step()))
862 user_enable_single_step(child
);
864 user_disable_single_step(child
);
868 * Change ->exit_code and ->state under siglock to avoid the race
869 * with wait_task_stopped() in between; a non-zero ->exit_code will
870 * wrongly look like another report from tracee.
872 * Note that we need siglock even if ->exit_code == data and/or this
873 * status was not reported yet, the new status must not be cleared by
874 * wait_task_stopped() after resume.
876 * If data == 0 we do not care if wait_task_stopped() reports the old
877 * status and clears the code too; this can't race with the tracee, it
878 * takes siglock after resume.
880 need_siglock
= data
&& !thread_group_empty(current
);
882 spin_lock_irq(&child
->sighand
->siglock
);
883 child
->exit_code
= data
;
884 wake_up_state(child
, __TASK_TRACED
);
886 spin_unlock_irq(&child
->sighand
->siglock
);
891 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
893 static const struct user_regset
*
894 find_regset(const struct user_regset_view
*view
, unsigned int type
)
896 const struct user_regset
*regset
;
899 for (n
= 0; n
< view
->n
; ++n
) {
900 regset
= view
->regsets
+ n
;
901 if (regset
->core_note_type
== type
)
908 static int ptrace_regset(struct task_struct
*task
, int req
, unsigned int type
,
911 const struct user_regset_view
*view
= task_user_regset_view(task
);
912 const struct user_regset
*regset
= find_regset(view
, type
);
915 if (!regset
|| (kiov
->iov_len
% regset
->size
) != 0)
918 regset_no
= regset
- view
->regsets
;
919 kiov
->iov_len
= min(kiov
->iov_len
,
920 (__kernel_size_t
) (regset
->n
* regset
->size
));
922 if (req
== PTRACE_GETREGSET
)
923 return copy_regset_to_user(task
, view
, regset_no
, 0,
924 kiov
->iov_len
, kiov
->iov_base
);
926 return copy_regset_from_user(task
, view
, regset_no
, 0,
927 kiov
->iov_len
, kiov
->iov_base
);
931 * This is declared in linux/regset.h and defined in machine-dependent
932 * code. We put the export here, near the primary machine-neutral use,
933 * to ensure no machine forgets it.
935 EXPORT_SYMBOL_GPL(task_user_regset_view
);
938 ptrace_get_syscall_info_entry(struct task_struct
*child
, struct pt_regs
*regs
,
939 struct ptrace_syscall_info
*info
)
941 unsigned long args
[ARRAY_SIZE(info
->entry
.args
)];
944 info
->op
= PTRACE_SYSCALL_INFO_ENTRY
;
945 info
->entry
.nr
= syscall_get_nr(child
, regs
);
946 syscall_get_arguments(child
, regs
, args
);
947 for (i
= 0; i
< ARRAY_SIZE(args
); i
++)
948 info
->entry
.args
[i
] = args
[i
];
950 /* args is the last field in struct ptrace_syscall_info.entry */
951 return offsetofend(struct ptrace_syscall_info
, entry
.args
);
955 ptrace_get_syscall_info_seccomp(struct task_struct
*child
, struct pt_regs
*regs
,
956 struct ptrace_syscall_info
*info
)
959 * As struct ptrace_syscall_info.entry is currently a subset
960 * of struct ptrace_syscall_info.seccomp, it makes sense to
961 * initialize that subset using ptrace_get_syscall_info_entry().
962 * This can be reconsidered in the future if these structures
963 * diverge significantly enough.
965 ptrace_get_syscall_info_entry(child
, regs
, info
);
966 info
->op
= PTRACE_SYSCALL_INFO_SECCOMP
;
967 info
->seccomp
.ret_data
= child
->ptrace_message
;
969 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
970 return offsetofend(struct ptrace_syscall_info
, seccomp
.ret_data
);
974 ptrace_get_syscall_info_exit(struct task_struct
*child
, struct pt_regs
*regs
,
975 struct ptrace_syscall_info
*info
)
977 info
->op
= PTRACE_SYSCALL_INFO_EXIT
;
978 info
->exit
.rval
= syscall_get_error(child
, regs
);
979 info
->exit
.is_error
= !!info
->exit
.rval
;
980 if (!info
->exit
.is_error
)
981 info
->exit
.rval
= syscall_get_return_value(child
, regs
);
983 /* is_error is the last field in struct ptrace_syscall_info.exit */
984 return offsetofend(struct ptrace_syscall_info
, exit
.is_error
);
988 ptrace_get_syscall_info(struct task_struct
*child
, unsigned long user_size
,
991 struct pt_regs
*regs
= task_pt_regs(child
);
992 struct ptrace_syscall_info info
= {
993 .op
= PTRACE_SYSCALL_INFO_NONE
,
994 .arch
= syscall_get_arch(child
),
995 .instruction_pointer
= instruction_pointer(regs
),
996 .stack_pointer
= user_stack_pointer(regs
),
998 unsigned long actual_size
= offsetof(struct ptrace_syscall_info
, entry
);
999 unsigned long write_size
;
1002 * This does not need lock_task_sighand() to access
1003 * child->last_siginfo because ptrace_freeze_traced()
1004 * called earlier by ptrace_check_attach() ensures that
1005 * the tracee cannot go away and clear its last_siginfo.
1007 switch (child
->last_siginfo
? child
->last_siginfo
->si_code
: 0) {
1008 case SIGTRAP
| 0x80:
1009 switch (child
->ptrace_message
) {
1010 case PTRACE_EVENTMSG_SYSCALL_ENTRY
:
1011 actual_size
= ptrace_get_syscall_info_entry(child
, regs
,
1014 case PTRACE_EVENTMSG_SYSCALL_EXIT
:
1015 actual_size
= ptrace_get_syscall_info_exit(child
, regs
,
1020 case SIGTRAP
| (PTRACE_EVENT_SECCOMP
<< 8):
1021 actual_size
= ptrace_get_syscall_info_seccomp(child
, regs
,
1026 write_size
= min(actual_size
, user_size
);
1027 return copy_to_user(datavp
, &info
, write_size
) ? -EFAULT
: actual_size
;
1029 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1031 int ptrace_request(struct task_struct
*child
, long request
,
1032 unsigned long addr
, unsigned long data
)
1034 bool seized
= child
->ptrace
& PT_SEIZED
;
1036 kernel_siginfo_t siginfo
, *si
;
1037 void __user
*datavp
= (void __user
*) data
;
1038 unsigned long __user
*datalp
= datavp
;
1039 unsigned long flags
;
1042 case PTRACE_PEEKTEXT
:
1043 case PTRACE_PEEKDATA
:
1044 return generic_ptrace_peekdata(child
, addr
, data
);
1045 case PTRACE_POKETEXT
:
1046 case PTRACE_POKEDATA
:
1047 return generic_ptrace_pokedata(child
, addr
, data
);
1049 #ifdef PTRACE_OLDSETOPTIONS
1050 case PTRACE_OLDSETOPTIONS
:
1052 case PTRACE_SETOPTIONS
:
1053 ret
= ptrace_setoptions(child
, data
);
1055 case PTRACE_GETEVENTMSG
:
1056 ret
= put_user(child
->ptrace_message
, datalp
);
1059 case PTRACE_PEEKSIGINFO
:
1060 ret
= ptrace_peek_siginfo(child
, addr
, data
);
1063 case PTRACE_GETSIGINFO
:
1064 ret
= ptrace_getsiginfo(child
, &siginfo
);
1066 ret
= copy_siginfo_to_user(datavp
, &siginfo
);
1069 case PTRACE_SETSIGINFO
:
1070 ret
= copy_siginfo_from_user(&siginfo
, datavp
);
1072 ret
= ptrace_setsiginfo(child
, &siginfo
);
1075 case PTRACE_GETSIGMASK
: {
1078 if (addr
!= sizeof(sigset_t
)) {
1083 if (test_tsk_restore_sigmask(child
))
1084 mask
= &child
->saved_sigmask
;
1086 mask
= &child
->blocked
;
1088 if (copy_to_user(datavp
, mask
, sizeof(sigset_t
)))
1096 case PTRACE_SETSIGMASK
: {
1099 if (addr
!= sizeof(sigset_t
)) {
1104 if (copy_from_user(&new_set
, datavp
, sizeof(sigset_t
))) {
1109 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1112 * Every thread does recalc_sigpending() after resume, so
1113 * retarget_shared_pending() and recalc_sigpending() are not
1116 spin_lock_irq(&child
->sighand
->siglock
);
1117 child
->blocked
= new_set
;
1118 spin_unlock_irq(&child
->sighand
->siglock
);
1120 clear_tsk_restore_sigmask(child
);
1126 case PTRACE_INTERRUPT
:
1128 * Stop tracee without any side-effect on signal or job
1129 * control. At least one trap is guaranteed to happen
1130 * after this request. If @child is already trapped, the
1131 * current trap is not disturbed and another trap will
1132 * happen after the current trap is ended with PTRACE_CONT.
1134 * The actual trap might not be PTRACE_EVENT_STOP trap but
1135 * the pending condition is cleared regardless.
1137 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1141 * INTERRUPT doesn't disturb existing trap sans one
1142 * exception. If ptracer issued LISTEN for the current
1143 * STOP, this INTERRUPT should clear LISTEN and re-trap
1146 if (likely(task_set_jobctl_pending(child
, JOBCTL_TRAP_STOP
)))
1147 ptrace_signal_wake_up(child
, child
->jobctl
& JOBCTL_LISTENING
);
1149 unlock_task_sighand(child
, &flags
);
1155 * Listen for events. Tracee must be in STOP. It's not
1156 * resumed per-se but is not considered to be in TRACED by
1157 * wait(2) or ptrace(2). If an async event (e.g. group
1158 * stop state change) happens, tracee will enter STOP trap
1159 * again. Alternatively, ptracer can issue INTERRUPT to
1160 * finish listening and re-trap tracee into STOP.
1162 if (unlikely(!seized
|| !lock_task_sighand(child
, &flags
)))
1165 si
= child
->last_siginfo
;
1166 if (likely(si
&& (si
->si_code
>> 8) == PTRACE_EVENT_STOP
)) {
1167 child
->jobctl
|= JOBCTL_LISTENING
;
1169 * If NOTIFY is set, it means event happened between
1170 * start of this trap and now. Trigger re-trap.
1172 if (child
->jobctl
& JOBCTL_TRAP_NOTIFY
)
1173 ptrace_signal_wake_up(child
, true);
1176 unlock_task_sighand(child
, &flags
);
1179 case PTRACE_DETACH
: /* detach a process that was attached. */
1180 ret
= ptrace_detach(child
, data
);
1183 #ifdef CONFIG_BINFMT_ELF_FDPIC
1184 case PTRACE_GETFDPIC
: {
1185 struct mm_struct
*mm
= get_task_mm(child
);
1186 unsigned long tmp
= 0;
1193 case PTRACE_GETFDPIC_EXEC
:
1194 tmp
= mm
->context
.exec_fdpic_loadmap
;
1196 case PTRACE_GETFDPIC_INTERP
:
1197 tmp
= mm
->context
.interp_fdpic_loadmap
;
1204 ret
= put_user(tmp
, datalp
);
1209 #ifdef PTRACE_SINGLESTEP
1210 case PTRACE_SINGLESTEP
:
1212 #ifdef PTRACE_SINGLEBLOCK
1213 case PTRACE_SINGLEBLOCK
:
1215 #ifdef PTRACE_SYSEMU
1217 case PTRACE_SYSEMU_SINGLESTEP
:
1219 case PTRACE_SYSCALL
:
1221 return ptrace_resume(child
, request
, data
);
1224 if (child
->exit_state
) /* already dead */
1226 return ptrace_resume(child
, request
, SIGKILL
);
1228 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1229 case PTRACE_GETREGSET
:
1230 case PTRACE_SETREGSET
: {
1232 struct iovec __user
*uiov
= datavp
;
1234 if (!access_ok(uiov
, sizeof(*uiov
)))
1237 if (__get_user(kiov
.iov_base
, &uiov
->iov_base
) ||
1238 __get_user(kiov
.iov_len
, &uiov
->iov_len
))
1241 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1243 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1247 case PTRACE_GET_SYSCALL_INFO
:
1248 ret
= ptrace_get_syscall_info(child
, addr
, datavp
);
1252 case PTRACE_SECCOMP_GET_FILTER
:
1253 ret
= seccomp_get_filter(child
, addr
, datavp
);
1256 case PTRACE_SECCOMP_GET_METADATA
:
1257 ret
= seccomp_get_metadata(child
, addr
, datavp
);
1261 case PTRACE_GET_RSEQ_CONFIGURATION
:
1262 ret
= ptrace_get_rseq_configuration(child
, addr
, datavp
);
1273 #ifndef arch_ptrace_attach
1274 #define arch_ptrace_attach(child) do { } while (0)
1277 SYSCALL_DEFINE4(ptrace
, long, request
, long, pid
, unsigned long, addr
,
1278 unsigned long, data
)
1280 struct task_struct
*child
;
1283 if (request
== PTRACE_TRACEME
) {
1284 ret
= ptrace_traceme();
1286 arch_ptrace_attach(current
);
1290 child
= find_get_task_by_vpid(pid
);
1296 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1297 ret
= ptrace_attach(child
, request
, addr
, data
);
1299 * Some architectures need to do book-keeping after
1303 arch_ptrace_attach(child
);
1304 goto out_put_task_struct
;
1307 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1308 request
== PTRACE_INTERRUPT
);
1310 goto out_put_task_struct
;
1312 ret
= arch_ptrace(child
, request
, addr
, data
);
1313 if (ret
|| request
!= PTRACE_DETACH
)
1314 ptrace_unfreeze_traced(child
);
1316 out_put_task_struct
:
1317 put_task_struct(child
);
1322 int generic_ptrace_peekdata(struct task_struct
*tsk
, unsigned long addr
,
1328 copied
= ptrace_access_vm(tsk
, addr
, &tmp
, sizeof(tmp
), FOLL_FORCE
);
1329 if (copied
!= sizeof(tmp
))
1331 return put_user(tmp
, (unsigned long __user
*)data
);
1334 int generic_ptrace_pokedata(struct task_struct
*tsk
, unsigned long addr
,
1339 copied
= ptrace_access_vm(tsk
, addr
, &data
, sizeof(data
),
1340 FOLL_FORCE
| FOLL_WRITE
);
1341 return (copied
== sizeof(data
)) ? 0 : -EIO
;
1344 #if defined CONFIG_COMPAT
1346 int compat_ptrace_request(struct task_struct
*child
, compat_long_t request
,
1347 compat_ulong_t addr
, compat_ulong_t data
)
1349 compat_ulong_t __user
*datap
= compat_ptr(data
);
1350 compat_ulong_t word
;
1351 kernel_siginfo_t siginfo
;
1355 case PTRACE_PEEKTEXT
:
1356 case PTRACE_PEEKDATA
:
1357 ret
= ptrace_access_vm(child
, addr
, &word
, sizeof(word
),
1359 if (ret
!= sizeof(word
))
1362 ret
= put_user(word
, datap
);
1365 case PTRACE_POKETEXT
:
1366 case PTRACE_POKEDATA
:
1367 ret
= ptrace_access_vm(child
, addr
, &data
, sizeof(data
),
1368 FOLL_FORCE
| FOLL_WRITE
);
1369 ret
= (ret
!= sizeof(data
) ? -EIO
: 0);
1372 case PTRACE_GETEVENTMSG
:
1373 ret
= put_user((compat_ulong_t
) child
->ptrace_message
, datap
);
1376 case PTRACE_GETSIGINFO
:
1377 ret
= ptrace_getsiginfo(child
, &siginfo
);
1379 ret
= copy_siginfo_to_user32(
1380 (struct compat_siginfo __user
*) datap
,
1384 case PTRACE_SETSIGINFO
:
1385 ret
= copy_siginfo_from_user32(
1386 &siginfo
, (struct compat_siginfo __user
*) datap
);
1388 ret
= ptrace_setsiginfo(child
, &siginfo
);
1390 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1391 case PTRACE_GETREGSET
:
1392 case PTRACE_SETREGSET
:
1395 struct compat_iovec __user
*uiov
=
1396 (struct compat_iovec __user
*) datap
;
1400 if (!access_ok(uiov
, sizeof(*uiov
)))
1403 if (__get_user(ptr
, &uiov
->iov_base
) ||
1404 __get_user(len
, &uiov
->iov_len
))
1407 kiov
.iov_base
= compat_ptr(ptr
);
1410 ret
= ptrace_regset(child
, request
, addr
, &kiov
);
1412 ret
= __put_user(kiov
.iov_len
, &uiov
->iov_len
);
1418 ret
= ptrace_request(child
, request
, addr
, data
);
1424 COMPAT_SYSCALL_DEFINE4(ptrace
, compat_long_t
, request
, compat_long_t
, pid
,
1425 compat_long_t
, addr
, compat_long_t
, data
)
1427 struct task_struct
*child
;
1430 if (request
== PTRACE_TRACEME
) {
1431 ret
= ptrace_traceme();
1435 child
= find_get_task_by_vpid(pid
);
1441 if (request
== PTRACE_ATTACH
|| request
== PTRACE_SEIZE
) {
1442 ret
= ptrace_attach(child
, request
, addr
, data
);
1444 * Some architectures need to do book-keeping after
1448 arch_ptrace_attach(child
);
1449 goto out_put_task_struct
;
1452 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
||
1453 request
== PTRACE_INTERRUPT
);
1455 ret
= compat_arch_ptrace(child
, request
, addr
, data
);
1456 if (ret
|| request
!= PTRACE_DETACH
)
1457 ptrace_unfreeze_traced(child
);
1460 out_put_task_struct
:
1461 put_task_struct(child
);
1465 #endif /* CONFIG_COMPAT */