]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/ptrace.c
UBUNTU: [Config] CONFIG_SND_SOC_NAU8824=m
[mirror_ubuntu-artful-kernel.git] / kernel / ptrace.c
1 /*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched/coredump.h>
15 #include <linux/sched/task.h>
16 #include <linux/errno.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/ptrace.h>
21 #include <linux/security.h>
22 #include <linux/signal.h>
23 #include <linux/uio.h>
24 #include <linux/audit.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/syscalls.h>
27 #include <linux/uaccess.h>
28 #include <linux/regset.h>
29 #include <linux/hw_breakpoint.h>
30 #include <linux/cn_proc.h>
31 #include <linux/compat.h>
32
33 /*
34 * Access another process' address space via ptrace.
35 * Source/target buffer must be kernel space,
36 * Do not walk the page table directly, use get_user_pages
37 */
38 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
39 void *buf, int len, unsigned int gup_flags)
40 {
41 struct mm_struct *mm;
42 int ret;
43
44 mm = get_task_mm(tsk);
45 if (!mm)
46 return 0;
47
48 if (!tsk->ptrace ||
49 (current != tsk->parent) ||
50 ((get_dumpable(mm) != SUID_DUMP_USER) &&
51 !ptracer_capable(tsk, mm->user_ns))) {
52 mmput(mm);
53 return 0;
54 }
55
56 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
57 mmput(mm);
58
59 return ret;
60 }
61
62
63 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
64 const struct cred *ptracer_cred)
65 {
66 BUG_ON(!list_empty(&child->ptrace_entry));
67 list_add(&child->ptrace_entry, &new_parent->ptraced);
68 child->parent = new_parent;
69 child->ptracer_cred = get_cred(ptracer_cred);
70 }
71
72 /*
73 * ptrace a task: make the debugger its new parent and
74 * move it to the ptrace list.
75 *
76 * Must be called with the tasklist lock write-held.
77 */
78 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
79 {
80 rcu_read_lock();
81 __ptrace_link(child, new_parent, __task_cred(new_parent));
82 rcu_read_unlock();
83 }
84
85 /**
86 * __ptrace_unlink - unlink ptracee and restore its execution state
87 * @child: ptracee to be unlinked
88 *
89 * Remove @child from the ptrace list, move it back to the original parent,
90 * and restore the execution state so that it conforms to the group stop
91 * state.
92 *
93 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
94 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
95 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
96 * If the ptracer is exiting, the ptracee can be in any state.
97 *
98 * After detach, the ptracee should be in a state which conforms to the
99 * group stop. If the group is stopped or in the process of stopping, the
100 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
101 * up from TASK_TRACED.
102 *
103 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
104 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
105 * to but in the opposite direction of what happens while attaching to a
106 * stopped task. However, in this direction, the intermediate RUNNING
107 * state is not hidden even from the current ptracer and if it immediately
108 * re-attaches and performs a WNOHANG wait(2), it may fail.
109 *
110 * CONTEXT:
111 * write_lock_irq(tasklist_lock)
112 */
113 void __ptrace_unlink(struct task_struct *child)
114 {
115 const struct cred *old_cred;
116 BUG_ON(!child->ptrace);
117
118 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
119
120 child->parent = child->real_parent;
121 list_del_init(&child->ptrace_entry);
122 old_cred = child->ptracer_cred;
123 child->ptracer_cred = NULL;
124 put_cred(old_cred);
125
126 spin_lock(&child->sighand->siglock);
127 child->ptrace = 0;
128 /*
129 * Clear all pending traps and TRAPPING. TRAPPING should be
130 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
131 */
132 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
133 task_clear_jobctl_trapping(child);
134
135 /*
136 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
137 * @child isn't dead.
138 */
139 if (!(child->flags & PF_EXITING) &&
140 (child->signal->flags & SIGNAL_STOP_STOPPED ||
141 child->signal->group_stop_count)) {
142 child->jobctl |= JOBCTL_STOP_PENDING;
143
144 /*
145 * This is only possible if this thread was cloned by the
146 * traced task running in the stopped group, set the signal
147 * for the future reports.
148 * FIXME: we should change ptrace_init_task() to handle this
149 * case.
150 */
151 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
152 child->jobctl |= SIGSTOP;
153 }
154
155 /*
156 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
157 * @child in the butt. Note that @resume should be used iff @child
158 * is in TASK_TRACED; otherwise, we might unduly disrupt
159 * TASK_KILLABLE sleeps.
160 */
161 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
162 ptrace_signal_wake_up(child, true);
163
164 spin_unlock(&child->sighand->siglock);
165 }
166
167 /* Ensure that nothing can wake it up, even SIGKILL */
168 static bool ptrace_freeze_traced(struct task_struct *task)
169 {
170 bool ret = false;
171
172 /* Lockless, nobody but us can set this flag */
173 if (task->jobctl & JOBCTL_LISTENING)
174 return ret;
175
176 spin_lock_irq(&task->sighand->siglock);
177 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
178 task->state = __TASK_TRACED;
179 ret = true;
180 }
181 spin_unlock_irq(&task->sighand->siglock);
182
183 return ret;
184 }
185
186 static void ptrace_unfreeze_traced(struct task_struct *task)
187 {
188 if (task->state != __TASK_TRACED)
189 return;
190
191 WARN_ON(!task->ptrace || task->parent != current);
192
193 /*
194 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
195 * Recheck state under the lock to close this race.
196 */
197 spin_lock_irq(&task->sighand->siglock);
198 if (task->state == __TASK_TRACED) {
199 if (__fatal_signal_pending(task))
200 wake_up_state(task, __TASK_TRACED);
201 else
202 task->state = TASK_TRACED;
203 }
204 spin_unlock_irq(&task->sighand->siglock);
205 }
206
207 /**
208 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
209 * @child: ptracee to check for
210 * @ignore_state: don't check whether @child is currently %TASK_TRACED
211 *
212 * Check whether @child is being ptraced by %current and ready for further
213 * ptrace operations. If @ignore_state is %false, @child also should be in
214 * %TASK_TRACED state and on return the child is guaranteed to be traced
215 * and not executing. If @ignore_state is %true, @child can be in any
216 * state.
217 *
218 * CONTEXT:
219 * Grabs and releases tasklist_lock and @child->sighand->siglock.
220 *
221 * RETURNS:
222 * 0 on success, -ESRCH if %child is not ready.
223 */
224 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
225 {
226 int ret = -ESRCH;
227
228 /*
229 * We take the read lock around doing both checks to close a
230 * possible race where someone else was tracing our child and
231 * detached between these two checks. After this locked check,
232 * we are sure that this is our traced child and that can only
233 * be changed by us so it's not changing right after this.
234 */
235 read_lock(&tasklist_lock);
236 if (child->ptrace && child->parent == current) {
237 WARN_ON(child->state == __TASK_TRACED);
238 /*
239 * child->sighand can't be NULL, release_task()
240 * does ptrace_unlink() before __exit_signal().
241 */
242 if (ignore_state || ptrace_freeze_traced(child))
243 ret = 0;
244 }
245 read_unlock(&tasklist_lock);
246
247 if (!ret && !ignore_state) {
248 if (!wait_task_inactive(child, __TASK_TRACED)) {
249 /*
250 * This can only happen if may_ptrace_stop() fails and
251 * ptrace_stop() changes ->state back to TASK_RUNNING,
252 * so we should not worry about leaking __TASK_TRACED.
253 */
254 WARN_ON(child->state == __TASK_TRACED);
255 ret = -ESRCH;
256 }
257 }
258
259 return ret;
260 }
261
262 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
263 {
264 if (mode & PTRACE_MODE_NOAUDIT)
265 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
266 else
267 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
268 }
269
270 /* Returns 0 on success, -errno on denial. */
271 int ___ptrace_may_access(struct task_struct *cur, struct task_struct *task,
272 unsigned int mode)
273 {
274 const struct cred *cred = __task_cred(cur), *tcred;
275 struct mm_struct *mm;
276 kuid_t caller_uid;
277 kgid_t caller_gid;
278
279 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
280 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
281 return -EPERM;
282 }
283
284 /* May we inspect the given task?
285 * This check is used both for attaching with ptrace
286 * and for allowing access to sensitive information in /proc.
287 *
288 * ptrace_attach denies several cases that /proc allows
289 * because setting up the necessary parent/child relationship
290 * or halting the specified task is impossible.
291 */
292
293 /* Don't let security modules deny introspection */
294 if (same_thread_group(task, cur))
295 return 0;
296 rcu_read_lock();
297 if (mode & PTRACE_MODE_FSCREDS) {
298 caller_uid = cred->fsuid;
299 caller_gid = cred->fsgid;
300 } else {
301 /*
302 * Using the euid would make more sense here, but something
303 * in userland might rely on the old behavior, and this
304 * shouldn't be a security problem since
305 * PTRACE_MODE_REALCREDS implies that the caller explicitly
306 * used a syscall that requests access to another process
307 * (and not a filesystem syscall to procfs).
308 */
309 caller_uid = cred->uid;
310 caller_gid = cred->gid;
311 }
312 tcred = __task_cred(task);
313 if (uid_eq(caller_uid, tcred->euid) &&
314 uid_eq(caller_uid, tcred->suid) &&
315 uid_eq(caller_uid, tcred->uid) &&
316 gid_eq(caller_gid, tcred->egid) &&
317 gid_eq(caller_gid, tcred->sgid) &&
318 gid_eq(caller_gid, tcred->gid))
319 goto ok;
320 if (ptrace_has_cap(tcred->user_ns, mode))
321 goto ok;
322 rcu_read_unlock();
323 return -EPERM;
324 ok:
325 rcu_read_unlock();
326 mm = task->mm;
327 if (mm &&
328 ((get_dumpable(mm) != SUID_DUMP_USER) &&
329 !ptrace_has_cap(mm->user_ns, mode)))
330 return -EPERM;
331
332 if (!(mode & PTRACE_MODE_NOACCESS_CHK))
333 return security_ptrace_access_check(task, mode);
334
335 return 0;
336 }
337 EXPORT_SYMBOL_GPL(___ptrace_may_access);
338
339 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
340 {
341 return ___ptrace_may_access(current, task, mode);
342 }
343
344 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
345 {
346 int err;
347 task_lock(task);
348 err = __ptrace_may_access(task, mode);
349 task_unlock(task);
350 return !err;
351 }
352
353 static int ptrace_attach(struct task_struct *task, long request,
354 unsigned long addr,
355 unsigned long flags)
356 {
357 bool seize = (request == PTRACE_SEIZE);
358 int retval;
359
360 retval = -EIO;
361 if (seize) {
362 if (addr != 0)
363 goto out;
364 if (flags & ~(unsigned long)PTRACE_O_MASK)
365 goto out;
366 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
367 } else {
368 flags = PT_PTRACED;
369 }
370
371 audit_ptrace(task);
372
373 retval = -EPERM;
374 if (unlikely(task->flags & PF_KTHREAD))
375 goto out;
376 if (same_thread_group(task, current))
377 goto out;
378
379 /*
380 * Protect exec's credential calculations against our interference;
381 * SUID, SGID and LSM creds get determined differently
382 * under ptrace.
383 */
384 retval = -ERESTARTNOINTR;
385 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
386 goto out;
387
388 task_lock(task);
389 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
390 task_unlock(task);
391 if (retval)
392 goto unlock_creds;
393
394 write_lock_irq(&tasklist_lock);
395 retval = -EPERM;
396 if (unlikely(task->exit_state))
397 goto unlock_tasklist;
398 if (task->ptrace)
399 goto unlock_tasklist;
400
401 if (seize)
402 flags |= PT_SEIZED;
403 task->ptrace = flags;
404
405 ptrace_link(task, current);
406
407 /* SEIZE doesn't trap tracee on attach */
408 if (!seize)
409 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
410
411 spin_lock(&task->sighand->siglock);
412
413 /*
414 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
415 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
416 * will be cleared if the child completes the transition or any
417 * event which clears the group stop states happens. We'll wait
418 * for the transition to complete before returning from this
419 * function.
420 *
421 * This hides STOPPED -> RUNNING -> TRACED transition from the
422 * attaching thread but a different thread in the same group can
423 * still observe the transient RUNNING state. IOW, if another
424 * thread's WNOHANG wait(2) on the stopped tracee races against
425 * ATTACH, the wait(2) may fail due to the transient RUNNING.
426 *
427 * The following task_is_stopped() test is safe as both transitions
428 * in and out of STOPPED are protected by siglock.
429 */
430 if (task_is_stopped(task) &&
431 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
432 signal_wake_up_state(task, __TASK_STOPPED);
433
434 spin_unlock(&task->sighand->siglock);
435
436 retval = 0;
437 unlock_tasklist:
438 write_unlock_irq(&tasklist_lock);
439 unlock_creds:
440 mutex_unlock(&task->signal->cred_guard_mutex);
441 out:
442 if (!retval) {
443 /*
444 * We do not bother to change retval or clear JOBCTL_TRAPPING
445 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
446 * not return to user-mode, it will exit and clear this bit in
447 * __ptrace_unlink() if it wasn't already cleared by the tracee;
448 * and until then nobody can ptrace this task.
449 */
450 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
451 proc_ptrace_connector(task, PTRACE_ATTACH);
452 }
453
454 return retval;
455 }
456
457 /**
458 * ptrace_traceme -- helper for PTRACE_TRACEME
459 *
460 * Performs checks and sets PT_PTRACED.
461 * Should be used by all ptrace implementations for PTRACE_TRACEME.
462 */
463 static int ptrace_traceme(void)
464 {
465 int ret = -EPERM;
466
467 write_lock_irq(&tasklist_lock);
468 /* Are we already being traced? */
469 if (!current->ptrace) {
470 ret = security_ptrace_traceme(current->parent);
471 /*
472 * Check PF_EXITING to ensure ->real_parent has not passed
473 * exit_ptrace(). Otherwise we don't report the error but
474 * pretend ->real_parent untraces us right after return.
475 */
476 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
477 current->ptrace = PT_PTRACED;
478 ptrace_link(current, current->real_parent);
479 }
480 }
481 write_unlock_irq(&tasklist_lock);
482
483 return ret;
484 }
485
486 /*
487 * Called with irqs disabled, returns true if childs should reap themselves.
488 */
489 static int ignoring_children(struct sighand_struct *sigh)
490 {
491 int ret;
492 spin_lock(&sigh->siglock);
493 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
494 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
495 spin_unlock(&sigh->siglock);
496 return ret;
497 }
498
499 /*
500 * Called with tasklist_lock held for writing.
501 * Unlink a traced task, and clean it up if it was a traced zombie.
502 * Return true if it needs to be reaped with release_task().
503 * (We can't call release_task() here because we already hold tasklist_lock.)
504 *
505 * If it's a zombie, our attachedness prevented normal parent notification
506 * or self-reaping. Do notification now if it would have happened earlier.
507 * If it should reap itself, return true.
508 *
509 * If it's our own child, there is no notification to do. But if our normal
510 * children self-reap, then this child was prevented by ptrace and we must
511 * reap it now, in that case we must also wake up sub-threads sleeping in
512 * do_wait().
513 */
514 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
515 {
516 bool dead;
517
518 __ptrace_unlink(p);
519
520 if (p->exit_state != EXIT_ZOMBIE)
521 return false;
522
523 dead = !thread_group_leader(p);
524
525 if (!dead && thread_group_empty(p)) {
526 if (!same_thread_group(p->real_parent, tracer))
527 dead = do_notify_parent(p, p->exit_signal);
528 else if (ignoring_children(tracer->sighand)) {
529 __wake_up_parent(p, tracer);
530 dead = true;
531 }
532 }
533 /* Mark it as in the process of being reaped. */
534 if (dead)
535 p->exit_state = EXIT_DEAD;
536 return dead;
537 }
538
539 static int ptrace_detach(struct task_struct *child, unsigned int data)
540 {
541 if (!valid_signal(data))
542 return -EIO;
543
544 /* Architecture-specific hardware disable .. */
545 ptrace_disable(child);
546
547 write_lock_irq(&tasklist_lock);
548 /*
549 * We rely on ptrace_freeze_traced(). It can't be killed and
550 * untraced by another thread, it can't be a zombie.
551 */
552 WARN_ON(!child->ptrace || child->exit_state);
553 /*
554 * tasklist_lock avoids the race with wait_task_stopped(), see
555 * the comment in ptrace_resume().
556 */
557 child->exit_code = data;
558 __ptrace_detach(current, child);
559 write_unlock_irq(&tasklist_lock);
560
561 proc_ptrace_connector(child, PTRACE_DETACH);
562
563 return 0;
564 }
565
566 /*
567 * Detach all tasks we were using ptrace on. Called with tasklist held
568 * for writing.
569 */
570 void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
571 {
572 struct task_struct *p, *n;
573
574 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
575 if (unlikely(p->ptrace & PT_EXITKILL))
576 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
577
578 if (__ptrace_detach(tracer, p))
579 list_add(&p->ptrace_entry, dead);
580 }
581 }
582
583 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
584 {
585 int copied = 0;
586
587 while (len > 0) {
588 char buf[128];
589 int this_len, retval;
590
591 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
592 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
593
594 if (!retval) {
595 if (copied)
596 break;
597 return -EIO;
598 }
599 if (copy_to_user(dst, buf, retval))
600 return -EFAULT;
601 copied += retval;
602 src += retval;
603 dst += retval;
604 len -= retval;
605 }
606 return copied;
607 }
608
609 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
610 {
611 int copied = 0;
612
613 while (len > 0) {
614 char buf[128];
615 int this_len, retval;
616
617 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
618 if (copy_from_user(buf, src, this_len))
619 return -EFAULT;
620 retval = ptrace_access_vm(tsk, dst, buf, this_len,
621 FOLL_FORCE | FOLL_WRITE);
622 if (!retval) {
623 if (copied)
624 break;
625 return -EIO;
626 }
627 copied += retval;
628 src += retval;
629 dst += retval;
630 len -= retval;
631 }
632 return copied;
633 }
634
635 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
636 {
637 unsigned flags;
638
639 if (data & ~(unsigned long)PTRACE_O_MASK)
640 return -EINVAL;
641
642 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
643 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
644 !IS_ENABLED(CONFIG_SECCOMP))
645 return -EINVAL;
646
647 if (!capable(CAP_SYS_ADMIN))
648 return -EPERM;
649
650 if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
651 current->ptrace & PT_SUSPEND_SECCOMP)
652 return -EPERM;
653 }
654
655 /* Avoid intermediate state when all opts are cleared */
656 flags = child->ptrace;
657 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
658 flags |= (data << PT_OPT_FLAG_SHIFT);
659 child->ptrace = flags;
660
661 return 0;
662 }
663
664 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
665 {
666 unsigned long flags;
667 int error = -ESRCH;
668
669 if (lock_task_sighand(child, &flags)) {
670 error = -EINVAL;
671 if (likely(child->last_siginfo != NULL)) {
672 *info = *child->last_siginfo;
673 error = 0;
674 }
675 unlock_task_sighand(child, &flags);
676 }
677 return error;
678 }
679
680 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
681 {
682 unsigned long flags;
683 int error = -ESRCH;
684
685 if (lock_task_sighand(child, &flags)) {
686 error = -EINVAL;
687 if (likely(child->last_siginfo != NULL)) {
688 *child->last_siginfo = *info;
689 error = 0;
690 }
691 unlock_task_sighand(child, &flags);
692 }
693 return error;
694 }
695
696 static int ptrace_peek_siginfo(struct task_struct *child,
697 unsigned long addr,
698 unsigned long data)
699 {
700 struct ptrace_peeksiginfo_args arg;
701 struct sigpending *pending;
702 struct sigqueue *q;
703 int ret, i;
704
705 ret = copy_from_user(&arg, (void __user *) addr,
706 sizeof(struct ptrace_peeksiginfo_args));
707 if (ret)
708 return -EFAULT;
709
710 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
711 return -EINVAL; /* unknown flags */
712
713 if (arg.nr < 0)
714 return -EINVAL;
715
716 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
717 pending = &child->signal->shared_pending;
718 else
719 pending = &child->pending;
720
721 for (i = 0; i < arg.nr; ) {
722 siginfo_t info;
723 s32 off = arg.off + i;
724
725 spin_lock_irq(&child->sighand->siglock);
726 list_for_each_entry(q, &pending->list, list) {
727 if (!off--) {
728 copy_siginfo(&info, &q->info);
729 break;
730 }
731 }
732 spin_unlock_irq(&child->sighand->siglock);
733
734 if (off >= 0) /* beyond the end of the list */
735 break;
736
737 #ifdef CONFIG_COMPAT
738 if (unlikely(in_compat_syscall())) {
739 compat_siginfo_t __user *uinfo = compat_ptr(data);
740
741 if (copy_siginfo_to_user32(uinfo, &info) ||
742 __put_user(info.si_code, &uinfo->si_code)) {
743 ret = -EFAULT;
744 break;
745 }
746
747 } else
748 #endif
749 {
750 siginfo_t __user *uinfo = (siginfo_t __user *) data;
751
752 if (copy_siginfo_to_user(uinfo, &info) ||
753 __put_user(info.si_code, &uinfo->si_code)) {
754 ret = -EFAULT;
755 break;
756 }
757 }
758
759 data += sizeof(siginfo_t);
760 i++;
761
762 if (signal_pending(current))
763 break;
764
765 cond_resched();
766 }
767
768 if (i > 0)
769 return i;
770
771 return ret;
772 }
773
774 #ifdef PTRACE_SINGLESTEP
775 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
776 #else
777 #define is_singlestep(request) 0
778 #endif
779
780 #ifdef PTRACE_SINGLEBLOCK
781 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
782 #else
783 #define is_singleblock(request) 0
784 #endif
785
786 #ifdef PTRACE_SYSEMU
787 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
788 #else
789 #define is_sysemu_singlestep(request) 0
790 #endif
791
792 static int ptrace_resume(struct task_struct *child, long request,
793 unsigned long data)
794 {
795 bool need_siglock;
796
797 if (!valid_signal(data))
798 return -EIO;
799
800 if (request == PTRACE_SYSCALL)
801 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
802 else
803 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
804
805 #ifdef TIF_SYSCALL_EMU
806 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
807 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
808 else
809 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
810 #endif
811
812 if (is_singleblock(request)) {
813 if (unlikely(!arch_has_block_step()))
814 return -EIO;
815 user_enable_block_step(child);
816 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
817 if (unlikely(!arch_has_single_step()))
818 return -EIO;
819 user_enable_single_step(child);
820 } else {
821 user_disable_single_step(child);
822 }
823
824 /*
825 * Change ->exit_code and ->state under siglock to avoid the race
826 * with wait_task_stopped() in between; a non-zero ->exit_code will
827 * wrongly look like another report from tracee.
828 *
829 * Note that we need siglock even if ->exit_code == data and/or this
830 * status was not reported yet, the new status must not be cleared by
831 * wait_task_stopped() after resume.
832 *
833 * If data == 0 we do not care if wait_task_stopped() reports the old
834 * status and clears the code too; this can't race with the tracee, it
835 * takes siglock after resume.
836 */
837 need_siglock = data && !thread_group_empty(current);
838 if (need_siglock)
839 spin_lock_irq(&child->sighand->siglock);
840 child->exit_code = data;
841 wake_up_state(child, __TASK_TRACED);
842 if (need_siglock)
843 spin_unlock_irq(&child->sighand->siglock);
844
845 return 0;
846 }
847
848 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
849
850 static const struct user_regset *
851 find_regset(const struct user_regset_view *view, unsigned int type)
852 {
853 const struct user_regset *regset;
854 int n;
855
856 for (n = 0; n < view->n; ++n) {
857 regset = view->regsets + n;
858 if (regset->core_note_type == type)
859 return regset;
860 }
861
862 return NULL;
863 }
864
865 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
866 struct iovec *kiov)
867 {
868 const struct user_regset_view *view = task_user_regset_view(task);
869 const struct user_regset *regset = find_regset(view, type);
870 int regset_no;
871
872 if (!regset || (kiov->iov_len % regset->size) != 0)
873 return -EINVAL;
874
875 regset_no = regset - view->regsets;
876 kiov->iov_len = min(kiov->iov_len,
877 (__kernel_size_t) (regset->n * regset->size));
878
879 if (req == PTRACE_GETREGSET)
880 return copy_regset_to_user(task, view, regset_no, 0,
881 kiov->iov_len, kiov->iov_base);
882 else
883 return copy_regset_from_user(task, view, regset_no, 0,
884 kiov->iov_len, kiov->iov_base);
885 }
886
887 /*
888 * This is declared in linux/regset.h and defined in machine-dependent
889 * code. We put the export here, near the primary machine-neutral use,
890 * to ensure no machine forgets it.
891 */
892 EXPORT_SYMBOL_GPL(task_user_regset_view);
893 #endif
894
895 int ptrace_request(struct task_struct *child, long request,
896 unsigned long addr, unsigned long data)
897 {
898 bool seized = child->ptrace & PT_SEIZED;
899 int ret = -EIO;
900 siginfo_t siginfo, *si;
901 void __user *datavp = (void __user *) data;
902 unsigned long __user *datalp = datavp;
903 unsigned long flags;
904
905 switch (request) {
906 case PTRACE_PEEKTEXT:
907 case PTRACE_PEEKDATA:
908 return generic_ptrace_peekdata(child, addr, data);
909 case PTRACE_POKETEXT:
910 case PTRACE_POKEDATA:
911 return generic_ptrace_pokedata(child, addr, data);
912
913 #ifdef PTRACE_OLDSETOPTIONS
914 case PTRACE_OLDSETOPTIONS:
915 #endif
916 case PTRACE_SETOPTIONS:
917 ret = ptrace_setoptions(child, data);
918 break;
919 case PTRACE_GETEVENTMSG:
920 ret = put_user(child->ptrace_message, datalp);
921 break;
922
923 case PTRACE_PEEKSIGINFO:
924 ret = ptrace_peek_siginfo(child, addr, data);
925 break;
926
927 case PTRACE_GETSIGINFO:
928 ret = ptrace_getsiginfo(child, &siginfo);
929 if (!ret)
930 ret = copy_siginfo_to_user(datavp, &siginfo);
931 break;
932
933 case PTRACE_SETSIGINFO:
934 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
935 ret = -EFAULT;
936 else
937 ret = ptrace_setsiginfo(child, &siginfo);
938 break;
939
940 case PTRACE_GETSIGMASK:
941 if (addr != sizeof(sigset_t)) {
942 ret = -EINVAL;
943 break;
944 }
945
946 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
947 ret = -EFAULT;
948 else
949 ret = 0;
950
951 break;
952
953 case PTRACE_SETSIGMASK: {
954 sigset_t new_set;
955
956 if (addr != sizeof(sigset_t)) {
957 ret = -EINVAL;
958 break;
959 }
960
961 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
962 ret = -EFAULT;
963 break;
964 }
965
966 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
967
968 /*
969 * Every thread does recalc_sigpending() after resume, so
970 * retarget_shared_pending() and recalc_sigpending() are not
971 * called here.
972 */
973 spin_lock_irq(&child->sighand->siglock);
974 child->blocked = new_set;
975 spin_unlock_irq(&child->sighand->siglock);
976
977 ret = 0;
978 break;
979 }
980
981 case PTRACE_INTERRUPT:
982 /*
983 * Stop tracee without any side-effect on signal or job
984 * control. At least one trap is guaranteed to happen
985 * after this request. If @child is already trapped, the
986 * current trap is not disturbed and another trap will
987 * happen after the current trap is ended with PTRACE_CONT.
988 *
989 * The actual trap might not be PTRACE_EVENT_STOP trap but
990 * the pending condition is cleared regardless.
991 */
992 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
993 break;
994
995 /*
996 * INTERRUPT doesn't disturb existing trap sans one
997 * exception. If ptracer issued LISTEN for the current
998 * STOP, this INTERRUPT should clear LISTEN and re-trap
999 * tracee into STOP.
1000 */
1001 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1002 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1003
1004 unlock_task_sighand(child, &flags);
1005 ret = 0;
1006 break;
1007
1008 case PTRACE_LISTEN:
1009 /*
1010 * Listen for events. Tracee must be in STOP. It's not
1011 * resumed per-se but is not considered to be in TRACED by
1012 * wait(2) or ptrace(2). If an async event (e.g. group
1013 * stop state change) happens, tracee will enter STOP trap
1014 * again. Alternatively, ptracer can issue INTERRUPT to
1015 * finish listening and re-trap tracee into STOP.
1016 */
1017 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1018 break;
1019
1020 si = child->last_siginfo;
1021 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1022 child->jobctl |= JOBCTL_LISTENING;
1023 /*
1024 * If NOTIFY is set, it means event happened between
1025 * start of this trap and now. Trigger re-trap.
1026 */
1027 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1028 ptrace_signal_wake_up(child, true);
1029 ret = 0;
1030 }
1031 unlock_task_sighand(child, &flags);
1032 break;
1033
1034 case PTRACE_DETACH: /* detach a process that was attached. */
1035 ret = ptrace_detach(child, data);
1036 break;
1037
1038 #ifdef CONFIG_BINFMT_ELF_FDPIC
1039 case PTRACE_GETFDPIC: {
1040 struct mm_struct *mm = get_task_mm(child);
1041 unsigned long tmp = 0;
1042
1043 ret = -ESRCH;
1044 if (!mm)
1045 break;
1046
1047 switch (addr) {
1048 case PTRACE_GETFDPIC_EXEC:
1049 tmp = mm->context.exec_fdpic_loadmap;
1050 break;
1051 case PTRACE_GETFDPIC_INTERP:
1052 tmp = mm->context.interp_fdpic_loadmap;
1053 break;
1054 default:
1055 break;
1056 }
1057 mmput(mm);
1058
1059 ret = put_user(tmp, datalp);
1060 break;
1061 }
1062 #endif
1063
1064 #ifdef PTRACE_SINGLESTEP
1065 case PTRACE_SINGLESTEP:
1066 #endif
1067 #ifdef PTRACE_SINGLEBLOCK
1068 case PTRACE_SINGLEBLOCK:
1069 #endif
1070 #ifdef PTRACE_SYSEMU
1071 case PTRACE_SYSEMU:
1072 case PTRACE_SYSEMU_SINGLESTEP:
1073 #endif
1074 case PTRACE_SYSCALL:
1075 case PTRACE_CONT:
1076 return ptrace_resume(child, request, data);
1077
1078 case PTRACE_KILL:
1079 if (child->exit_state) /* already dead */
1080 return 0;
1081 return ptrace_resume(child, request, SIGKILL);
1082
1083 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1084 case PTRACE_GETREGSET:
1085 case PTRACE_SETREGSET: {
1086 struct iovec kiov;
1087 struct iovec __user *uiov = datavp;
1088
1089 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1090 return -EFAULT;
1091
1092 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1093 __get_user(kiov.iov_len, &uiov->iov_len))
1094 return -EFAULT;
1095
1096 ret = ptrace_regset(child, request, addr, &kiov);
1097 if (!ret)
1098 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1099 break;
1100 }
1101 #endif
1102
1103 case PTRACE_SECCOMP_GET_FILTER:
1104 ret = seccomp_get_filter(child, addr, datavp);
1105 break;
1106
1107 default:
1108 break;
1109 }
1110
1111 return ret;
1112 }
1113
1114 static struct task_struct *ptrace_get_task_struct(pid_t pid)
1115 {
1116 struct task_struct *child;
1117
1118 rcu_read_lock();
1119 child = find_task_by_vpid(pid);
1120 if (child)
1121 get_task_struct(child);
1122 rcu_read_unlock();
1123
1124 if (!child)
1125 return ERR_PTR(-ESRCH);
1126 return child;
1127 }
1128
1129 #ifndef arch_ptrace_attach
1130 #define arch_ptrace_attach(child) do { } while (0)
1131 #endif
1132
1133 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1134 unsigned long, data)
1135 {
1136 struct task_struct *child;
1137 long ret;
1138
1139 if (request == PTRACE_TRACEME) {
1140 ret = ptrace_traceme();
1141 if (!ret)
1142 arch_ptrace_attach(current);
1143 goto out;
1144 }
1145
1146 child = ptrace_get_task_struct(pid);
1147 if (IS_ERR(child)) {
1148 ret = PTR_ERR(child);
1149 goto out;
1150 }
1151
1152 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1153 ret = ptrace_attach(child, request, addr, data);
1154 /*
1155 * Some architectures need to do book-keeping after
1156 * a ptrace attach.
1157 */
1158 if (!ret)
1159 arch_ptrace_attach(child);
1160 goto out_put_task_struct;
1161 }
1162
1163 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1164 request == PTRACE_INTERRUPT);
1165 if (ret < 0)
1166 goto out_put_task_struct;
1167
1168 ret = arch_ptrace(child, request, addr, data);
1169 if (ret || request != PTRACE_DETACH)
1170 ptrace_unfreeze_traced(child);
1171
1172 out_put_task_struct:
1173 put_task_struct(child);
1174 out:
1175 return ret;
1176 }
1177
1178 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1179 unsigned long data)
1180 {
1181 unsigned long tmp;
1182 int copied;
1183
1184 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1185 if (copied != sizeof(tmp))
1186 return -EIO;
1187 return put_user(tmp, (unsigned long __user *)data);
1188 }
1189
1190 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1191 unsigned long data)
1192 {
1193 int copied;
1194
1195 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1196 FOLL_FORCE | FOLL_WRITE);
1197 return (copied == sizeof(data)) ? 0 : -EIO;
1198 }
1199
1200 #if defined CONFIG_COMPAT
1201
1202 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1203 compat_ulong_t addr, compat_ulong_t data)
1204 {
1205 compat_ulong_t __user *datap = compat_ptr(data);
1206 compat_ulong_t word;
1207 siginfo_t siginfo;
1208 int ret;
1209
1210 switch (request) {
1211 case PTRACE_PEEKTEXT:
1212 case PTRACE_PEEKDATA:
1213 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1214 FOLL_FORCE);
1215 if (ret != sizeof(word))
1216 ret = -EIO;
1217 else
1218 ret = put_user(word, datap);
1219 break;
1220
1221 case PTRACE_POKETEXT:
1222 case PTRACE_POKEDATA:
1223 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1224 FOLL_FORCE | FOLL_WRITE);
1225 ret = (ret != sizeof(data) ? -EIO : 0);
1226 break;
1227
1228 case PTRACE_GETEVENTMSG:
1229 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1230 break;
1231
1232 case PTRACE_GETSIGINFO:
1233 ret = ptrace_getsiginfo(child, &siginfo);
1234 if (!ret)
1235 ret = copy_siginfo_to_user32(
1236 (struct compat_siginfo __user *) datap,
1237 &siginfo);
1238 break;
1239
1240 case PTRACE_SETSIGINFO:
1241 memset(&siginfo, 0, sizeof siginfo);
1242 if (copy_siginfo_from_user32(
1243 &siginfo, (struct compat_siginfo __user *) datap))
1244 ret = -EFAULT;
1245 else
1246 ret = ptrace_setsiginfo(child, &siginfo);
1247 break;
1248 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1249 case PTRACE_GETREGSET:
1250 case PTRACE_SETREGSET:
1251 {
1252 struct iovec kiov;
1253 struct compat_iovec __user *uiov =
1254 (struct compat_iovec __user *) datap;
1255 compat_uptr_t ptr;
1256 compat_size_t len;
1257
1258 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1259 return -EFAULT;
1260
1261 if (__get_user(ptr, &uiov->iov_base) ||
1262 __get_user(len, &uiov->iov_len))
1263 return -EFAULT;
1264
1265 kiov.iov_base = compat_ptr(ptr);
1266 kiov.iov_len = len;
1267
1268 ret = ptrace_regset(child, request, addr, &kiov);
1269 if (!ret)
1270 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1271 break;
1272 }
1273 #endif
1274
1275 default:
1276 ret = ptrace_request(child, request, addr, data);
1277 }
1278
1279 return ret;
1280 }
1281
1282 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1283 compat_long_t, addr, compat_long_t, data)
1284 {
1285 struct task_struct *child;
1286 long ret;
1287
1288 if (request == PTRACE_TRACEME) {
1289 ret = ptrace_traceme();
1290 goto out;
1291 }
1292
1293 child = ptrace_get_task_struct(pid);
1294 if (IS_ERR(child)) {
1295 ret = PTR_ERR(child);
1296 goto out;
1297 }
1298
1299 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1300 ret = ptrace_attach(child, request, addr, data);
1301 /*
1302 * Some architectures need to do book-keeping after
1303 * a ptrace attach.
1304 */
1305 if (!ret)
1306 arch_ptrace_attach(child);
1307 goto out_put_task_struct;
1308 }
1309
1310 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1311 request == PTRACE_INTERRUPT);
1312 if (!ret) {
1313 ret = compat_arch_ptrace(child, request, addr, data);
1314 if (ret || request != PTRACE_DETACH)
1315 ptrace_unfreeze_traced(child);
1316 }
1317
1318 out_put_task_struct:
1319 put_task_struct(child);
1320 out:
1321 return ret;
1322 }
1323 #endif /* CONFIG_COMPAT */