2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/seccomp.h>
31 #include <linux/security.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/uaccess.h>
35 #include <linux/perf_event.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/regset.h>
38 #include <linux/tracehook.h>
39 #include <linux/elf.h>
41 #include <asm/compat.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/pgtable.h>
44 #include <asm/syscall.h>
45 #include <asm/traps.h>
46 #include <asm/system_misc.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
51 struct pt_regs_offset
{
56 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
57 #define REG_OFFSET_END {.name = NULL, .offset = 0}
58 #define GPR_OFFSET_NAME(r) \
59 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
61 static const struct pt_regs_offset regoffset_table
[] = {
93 {.name
= "lr", .offset
= offsetof(struct pt_regs
, regs
[30])},
96 REG_OFFSET_NAME(pstate
),
101 * regs_query_register_offset() - query register offset from its name
102 * @name: the name of a register
104 * regs_query_register_offset() returns the offset of a register in struct
105 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
107 int regs_query_register_offset(const char *name
)
109 const struct pt_regs_offset
*roff
;
111 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
112 if (!strcmp(roff
->name
, name
))
118 * regs_within_kernel_stack() - check the address in the stack
119 * @regs: pt_regs which contains kernel stack pointer.
120 * @addr: address which is checked.
122 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
123 * If @addr is within the kernel stack, it returns true. If not, returns false.
125 static bool regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
127 return ((addr
& ~(THREAD_SIZE
- 1)) ==
128 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1))) ||
129 on_irq_stack(addr
, raw_smp_processor_id());
133 * regs_get_kernel_stack_nth() - get Nth entry of the stack
134 * @regs: pt_regs which contains kernel stack pointer.
135 * @n: stack entry number.
137 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
138 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
141 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
143 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
146 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
153 * TODO: does not yet catch signals sent when the child dies.
154 * in exit.c or in signal.c.
158 * Called by kernel/ptrace.c when detaching..
160 void ptrace_disable(struct task_struct
*child
)
163 * This would be better off in core code, but PTRACE_DETACH has
164 * grown its fair share of arch-specific worts and changing it
165 * is likely to cause regressions on obscure architectures.
167 user_disable_single_step(child
);
170 #ifdef CONFIG_HAVE_HW_BREAKPOINT
172 * Handle hitting a HW-breakpoint.
174 static void ptrace_hbptriggered(struct perf_event
*bp
,
175 struct perf_sample_data
*data
,
176 struct pt_regs
*regs
)
178 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
182 .si_code
= TRAP_HWBKPT
,
183 .si_addr
= (void __user
*)(bkpt
->trigger
),
189 if (!is_compat_task())
192 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
193 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
194 info
.si_errno
= (i
<< 1) + 1;
199 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
200 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
201 info
.si_errno
= -((i
<< 1) + 1);
208 force_sig_info(SIGTRAP
, &info
, current
);
212 * Unregister breakpoints from this task and reset the pointers in
215 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
218 struct thread_struct
*t
= &tsk
->thread
;
220 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
221 if (t
->debug
.hbp_break
[i
]) {
222 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
223 t
->debug
.hbp_break
[i
] = NULL
;
227 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
228 if (t
->debug
.hbp_watch
[i
]) {
229 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
230 t
->debug
.hbp_watch
[i
] = NULL
;
235 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
237 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
240 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
241 struct task_struct
*tsk
,
244 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
247 case NT_ARM_HW_BREAK
:
248 if (idx
< ARM_MAX_BRP
)
249 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
251 case NT_ARM_HW_WATCH
:
252 if (idx
< ARM_MAX_WRP
)
253 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
260 static int ptrace_hbp_set_event(unsigned int note_type
,
261 struct task_struct
*tsk
,
263 struct perf_event
*bp
)
268 case NT_ARM_HW_BREAK
:
269 if (idx
< ARM_MAX_BRP
) {
270 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
274 case NT_ARM_HW_WATCH
:
275 if (idx
< ARM_MAX_WRP
) {
276 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
285 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
286 struct task_struct
*tsk
,
289 struct perf_event
*bp
;
290 struct perf_event_attr attr
;
294 case NT_ARM_HW_BREAK
:
295 type
= HW_BREAKPOINT_X
;
297 case NT_ARM_HW_WATCH
:
298 type
= HW_BREAKPOINT_RW
;
301 return ERR_PTR(-EINVAL
);
304 ptrace_breakpoint_init(&attr
);
307 * Initialise fields to sane defaults
308 * (i.e. values that will pass validation).
311 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
315 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
319 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
327 struct arch_hw_breakpoint_ctrl ctrl
,
328 struct perf_event_attr
*attr
)
330 int err
, len
, type
, offset
, disabled
= !ctrl
.enabled
;
332 attr
->disabled
= disabled
;
336 err
= arch_bp_generic_fields(ctrl
, &len
, &type
, &offset
);
341 case NT_ARM_HW_BREAK
:
342 if ((type
& HW_BREAKPOINT_X
) != type
)
345 case NT_ARM_HW_WATCH
:
346 if ((type
& HW_BREAKPOINT_RW
) != type
)
354 attr
->bp_type
= type
;
355 attr
->bp_addr
+= offset
;
360 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
366 case NT_ARM_HW_BREAK
:
367 num
= hw_breakpoint_slots(TYPE_INST
);
369 case NT_ARM_HW_WATCH
:
370 num
= hw_breakpoint_slots(TYPE_DATA
);
376 reg
|= debug_monitors_arch();
384 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
385 struct task_struct
*tsk
,
389 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
394 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
398 static int ptrace_hbp_get_addr(unsigned int note_type
,
399 struct task_struct
*tsk
,
403 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
408 *addr
= bp
? counter_arch_bp(bp
)->address
: 0;
412 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
413 struct task_struct
*tsk
,
416 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
419 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
424 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
425 struct task_struct
*tsk
,
430 struct perf_event
*bp
;
431 struct perf_event_attr attr
;
432 struct arch_hw_breakpoint_ctrl ctrl
;
434 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
441 decode_ctrl_reg(uctrl
, &ctrl
);
442 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
446 return modify_user_hw_breakpoint(bp
, &attr
);
449 static int ptrace_hbp_set_addr(unsigned int note_type
,
450 struct task_struct
*tsk
,
455 struct perf_event
*bp
;
456 struct perf_event_attr attr
;
458 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
466 err
= modify_user_hw_breakpoint(bp
, &attr
);
470 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
471 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
472 #define PTRACE_HBP_PAD_SZ sizeof(u32)
474 static int hw_break_get(struct task_struct
*target
,
475 const struct user_regset
*regset
,
476 unsigned int pos
, unsigned int count
,
477 void *kbuf
, void __user
*ubuf
)
479 unsigned int note_type
= regset
->core_note_type
;
480 int ret
, idx
= 0, offset
, limit
;
485 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
489 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
495 offset
= offsetof(struct user_hwdebug_state
, pad
);
496 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
497 offset
+ PTRACE_HBP_PAD_SZ
);
501 /* (address, ctrl) registers */
502 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
503 limit
= regset
->n
* regset
->size
;
504 while (count
&& offset
< limit
) {
505 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
508 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
509 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
512 offset
+= PTRACE_HBP_ADDR_SZ
;
514 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
517 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
518 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
521 offset
+= PTRACE_HBP_CTRL_SZ
;
523 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
525 offset
+ PTRACE_HBP_PAD_SZ
);
528 offset
+= PTRACE_HBP_PAD_SZ
;
535 static int hw_break_set(struct task_struct
*target
,
536 const struct user_regset
*regset
,
537 unsigned int pos
, unsigned int count
,
538 const void *kbuf
, const void __user
*ubuf
)
540 unsigned int note_type
= regset
->core_note_type
;
541 int ret
, idx
= 0, offset
, limit
;
545 /* Resource info and pad */
546 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
547 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
551 /* (address, ctrl) registers */
552 limit
= regset
->n
* regset
->size
;
553 while (count
&& offset
< limit
) {
554 if (count
< PTRACE_HBP_ADDR_SZ
)
556 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
557 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
560 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
563 offset
+= PTRACE_HBP_ADDR_SZ
;
567 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
568 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
571 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
574 offset
+= PTRACE_HBP_CTRL_SZ
;
576 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
578 offset
+ PTRACE_HBP_PAD_SZ
);
581 offset
+= PTRACE_HBP_PAD_SZ
;
587 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
589 static int gpr_get(struct task_struct
*target
,
590 const struct user_regset
*regset
,
591 unsigned int pos
, unsigned int count
,
592 void *kbuf
, void __user
*ubuf
)
594 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
595 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
598 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
599 unsigned int pos
, unsigned int count
,
600 const void *kbuf
, const void __user
*ubuf
)
603 struct user_pt_regs newregs
= task_pt_regs(target
)->user_regs
;
605 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
609 if (!valid_user_regs(&newregs
, target
))
612 task_pt_regs(target
)->user_regs
= newregs
;
617 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
619 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
620 unsigned int pos
, unsigned int count
,
621 void *kbuf
, void __user
*ubuf
)
623 struct user_fpsimd_state
*uregs
;
624 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
625 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
628 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
629 unsigned int pos
, unsigned int count
,
630 const void *kbuf
, const void __user
*ubuf
)
633 struct user_fpsimd_state newstate
=
634 target
->thread
.fpsimd_state
.user_fpsimd
;
636 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
, 0, -1);
640 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
641 fpsimd_flush_task_state(target
);
645 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
646 unsigned int pos
, unsigned int count
,
647 void *kbuf
, void __user
*ubuf
)
649 unsigned long *tls
= &target
->thread
.tp_value
;
650 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
653 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
654 unsigned int pos
, unsigned int count
,
655 const void *kbuf
, const void __user
*ubuf
)
658 unsigned long tls
= target
->thread
.tp_value
;
660 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
664 target
->thread
.tp_value
= tls
;
668 static int system_call_get(struct task_struct
*target
,
669 const struct user_regset
*regset
,
670 unsigned int pos
, unsigned int count
,
671 void *kbuf
, void __user
*ubuf
)
673 int syscallno
= task_pt_regs(target
)->syscallno
;
675 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
679 static int system_call_set(struct task_struct
*target
,
680 const struct user_regset
*regset
,
681 unsigned int pos
, unsigned int count
,
682 const void *kbuf
, const void __user
*ubuf
)
684 int syscallno
= task_pt_regs(target
)->syscallno
;
687 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
691 task_pt_regs(target
)->syscallno
= syscallno
;
695 enum aarch64_regset
{
699 #ifdef CONFIG_HAVE_HW_BREAKPOINT
706 static const struct user_regset aarch64_regsets
[] = {
708 .core_note_type
= NT_PRSTATUS
,
709 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
711 .align
= sizeof(u64
),
716 .core_note_type
= NT_PRFPREG
,
717 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
719 * We pretend we have 32-bit registers because the fpsr and
720 * fpcr are 32-bits wide.
723 .align
= sizeof(u32
),
728 .core_note_type
= NT_ARM_TLS
,
730 .size
= sizeof(void *),
731 .align
= sizeof(void *),
735 #ifdef CONFIG_HAVE_HW_BREAKPOINT
736 [REGSET_HW_BREAK
] = {
737 .core_note_type
= NT_ARM_HW_BREAK
,
738 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
740 .align
= sizeof(u32
),
744 [REGSET_HW_WATCH
] = {
745 .core_note_type
= NT_ARM_HW_WATCH
,
746 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
748 .align
= sizeof(u32
),
753 [REGSET_SYSTEM_CALL
] = {
754 .core_note_type
= NT_ARM_SYSTEM_CALL
,
757 .align
= sizeof(int),
758 .get
= system_call_get
,
759 .set
= system_call_set
,
763 static const struct user_regset_view user_aarch64_view
= {
764 .name
= "aarch64", .e_machine
= EM_AARCH64
,
765 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
769 #include <linux/compat.h>
776 static int compat_gpr_get(struct task_struct
*target
,
777 const struct user_regset
*regset
,
778 unsigned int pos
, unsigned int count
,
779 void *kbuf
, void __user
*ubuf
)
782 unsigned int i
, start
, num_regs
;
784 /* Calculate the number of AArch32 registers contained in count */
785 num_regs
= count
/ regset
->size
;
787 /* Convert pos into an register number */
788 start
= pos
/ regset
->size
;
790 if (start
+ num_regs
> regset
->n
)
793 for (i
= 0; i
< num_regs
; ++i
) {
794 unsigned int idx
= start
+ i
;
799 reg
= task_pt_regs(target
)->pc
;
802 reg
= task_pt_regs(target
)->pstate
;
805 reg
= task_pt_regs(target
)->orig_x0
;
808 reg
= task_pt_regs(target
)->regs
[idx
];
812 memcpy(kbuf
, ®
, sizeof(reg
));
815 ret
= copy_to_user(ubuf
, ®
, sizeof(reg
));
828 static int compat_gpr_set(struct task_struct
*target
,
829 const struct user_regset
*regset
,
830 unsigned int pos
, unsigned int count
,
831 const void *kbuf
, const void __user
*ubuf
)
833 struct pt_regs newregs
;
835 unsigned int i
, start
, num_regs
;
837 /* Calculate the number of AArch32 registers contained in count */
838 num_regs
= count
/ regset
->size
;
840 /* Convert pos into an register number */
841 start
= pos
/ regset
->size
;
843 if (start
+ num_regs
> regset
->n
)
846 newregs
= *task_pt_regs(target
);
848 for (i
= 0; i
< num_regs
; ++i
) {
849 unsigned int idx
= start
+ i
;
853 memcpy(®
, kbuf
, sizeof(reg
));
856 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
870 newregs
.pstate
= reg
;
873 newregs
.orig_x0
= reg
;
876 newregs
.regs
[idx
] = reg
;
881 if (valid_user_regs(&newregs
.user_regs
, target
))
882 *task_pt_regs(target
) = newregs
;
889 static int compat_vfp_get(struct task_struct
*target
,
890 const struct user_regset
*regset
,
891 unsigned int pos
, unsigned int count
,
892 void *kbuf
, void __user
*ubuf
)
894 struct user_fpsimd_state
*uregs
;
895 compat_ulong_t fpscr
;
898 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
901 * The VFP registers are packed into the fpsimd_state, so they all sit
902 * nicely together for us. We just need to create the fpscr separately.
904 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
905 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
908 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
909 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
910 ret
= put_user(fpscr
, (compat_ulong_t
*)ubuf
);
916 static int compat_vfp_set(struct task_struct
*target
,
917 const struct user_regset
*regset
,
918 unsigned int pos
, unsigned int count
,
919 const void *kbuf
, const void __user
*ubuf
)
921 struct user_fpsimd_state
*uregs
;
922 compat_ulong_t fpscr
;
925 if (pos
+ count
> VFP_STATE_SIZE
)
928 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
930 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
931 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
934 ret
= get_user(fpscr
, (compat_ulong_t
*)ubuf
);
935 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
936 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
939 fpsimd_flush_task_state(target
);
943 static int compat_tls_get(struct task_struct
*target
,
944 const struct user_regset
*regset
, unsigned int pos
,
945 unsigned int count
, void *kbuf
, void __user
*ubuf
)
947 compat_ulong_t tls
= (compat_ulong_t
)target
->thread
.tp_value
;
948 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
951 static int compat_tls_set(struct task_struct
*target
,
952 const struct user_regset
*regset
, unsigned int pos
,
953 unsigned int count
, const void *kbuf
,
954 const void __user
*ubuf
)
957 compat_ulong_t tls
= target
->thread
.tp_value
;
959 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
963 target
->thread
.tp_value
= tls
;
967 static const struct user_regset aarch32_regsets
[] = {
968 [REGSET_COMPAT_GPR
] = {
969 .core_note_type
= NT_PRSTATUS
,
970 .n
= COMPAT_ELF_NGREG
,
971 .size
= sizeof(compat_elf_greg_t
),
972 .align
= sizeof(compat_elf_greg_t
),
973 .get
= compat_gpr_get
,
974 .set
= compat_gpr_set
976 [REGSET_COMPAT_VFP
] = {
977 .core_note_type
= NT_ARM_VFP
,
978 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
979 .size
= sizeof(compat_ulong_t
),
980 .align
= sizeof(compat_ulong_t
),
981 .get
= compat_vfp_get
,
982 .set
= compat_vfp_set
986 static const struct user_regset_view user_aarch32_view
= {
987 .name
= "aarch32", .e_machine
= EM_ARM
,
988 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
991 static const struct user_regset aarch32_ptrace_regsets
[] = {
993 .core_note_type
= NT_PRSTATUS
,
994 .n
= COMPAT_ELF_NGREG
,
995 .size
= sizeof(compat_elf_greg_t
),
996 .align
= sizeof(compat_elf_greg_t
),
997 .get
= compat_gpr_get
,
998 .set
= compat_gpr_set
1001 .core_note_type
= NT_ARM_VFP
,
1002 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1003 .size
= sizeof(compat_ulong_t
),
1004 .align
= sizeof(compat_ulong_t
),
1005 .get
= compat_vfp_get
,
1006 .set
= compat_vfp_set
1009 .core_note_type
= NT_ARM_TLS
,
1011 .size
= sizeof(compat_ulong_t
),
1012 .align
= sizeof(compat_ulong_t
),
1013 .get
= compat_tls_get
,
1014 .set
= compat_tls_set
,
1016 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1017 [REGSET_HW_BREAK
] = {
1018 .core_note_type
= NT_ARM_HW_BREAK
,
1019 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1020 .size
= sizeof(u32
),
1021 .align
= sizeof(u32
),
1022 .get
= hw_break_get
,
1023 .set
= hw_break_set
,
1025 [REGSET_HW_WATCH
] = {
1026 .core_note_type
= NT_ARM_HW_WATCH
,
1027 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1028 .size
= sizeof(u32
),
1029 .align
= sizeof(u32
),
1030 .get
= hw_break_get
,
1031 .set
= hw_break_set
,
1034 [REGSET_SYSTEM_CALL
] = {
1035 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1037 .size
= sizeof(int),
1038 .align
= sizeof(int),
1039 .get
= system_call_get
,
1040 .set
= system_call_set
,
1044 static const struct user_regset_view user_aarch32_ptrace_view
= {
1045 .name
= "aarch32", .e_machine
= EM_ARM
,
1046 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
1049 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
1050 compat_ulong_t __user
*ret
)
1057 if (off
== COMPAT_PT_TEXT_ADDR
)
1058 tmp
= tsk
->mm
->start_code
;
1059 else if (off
== COMPAT_PT_DATA_ADDR
)
1060 tmp
= tsk
->mm
->start_data
;
1061 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
1062 tmp
= tsk
->mm
->end_code
;
1063 else if (off
< sizeof(compat_elf_gregset_t
))
1064 return copy_regset_to_user(tsk
, &user_aarch32_view
,
1065 REGSET_COMPAT_GPR
, off
,
1066 sizeof(compat_ulong_t
), ret
);
1067 else if (off
>= COMPAT_USER_SZ
)
1072 return put_user(tmp
, ret
);
1075 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
1079 mm_segment_t old_fs
= get_fs();
1081 if (off
& 3 || off
>= COMPAT_USER_SZ
)
1084 if (off
>= sizeof(compat_elf_gregset_t
))
1088 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
1089 REGSET_COMPAT_GPR
, off
,
1090 sizeof(compat_ulong_t
),
1097 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1100 * Convert a virtual register number into an index for a thread_info
1101 * breakpoint array. Breakpoints are identified using positive numbers
1102 * whilst watchpoints are negative. The registers are laid out as pairs
1103 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1104 * Register 0 is reserved for describing resource information.
1106 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1108 return (abs(num
) - 1) >> 1;
1111 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1113 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1116 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1117 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1119 debug_arch
= debug_monitors_arch();
1133 static int compat_ptrace_hbp_get(unsigned int note_type
,
1134 struct task_struct
*tsk
,
1141 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
1144 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1147 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1154 static int compat_ptrace_hbp_set(unsigned int note_type
,
1155 struct task_struct
*tsk
,
1162 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1166 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1169 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1175 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1176 compat_ulong_t __user
*data
)
1180 mm_segment_t old_fs
= get_fs();
1185 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1187 } else if (num
== 0) {
1188 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1191 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1196 ret
= put_user(kdata
, data
);
1201 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1202 compat_ulong_t __user
*data
)
1206 mm_segment_t old_fs
= get_fs();
1211 ret
= get_user(kdata
, data
);
1217 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1219 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1224 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1226 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1227 compat_ulong_t caddr
, compat_ulong_t cdata
)
1229 unsigned long addr
= caddr
;
1230 unsigned long data
= cdata
;
1231 void __user
*datap
= compat_ptr(data
);
1235 case PTRACE_PEEKUSR
:
1236 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1239 case PTRACE_POKEUSR
:
1240 ret
= compat_ptrace_write_user(child
, addr
, data
);
1243 case COMPAT_PTRACE_GETREGS
:
1244 ret
= copy_regset_to_user(child
,
1247 0, sizeof(compat_elf_gregset_t
),
1251 case COMPAT_PTRACE_SETREGS
:
1252 ret
= copy_regset_from_user(child
,
1255 0, sizeof(compat_elf_gregset_t
),
1259 case COMPAT_PTRACE_GET_THREAD_AREA
:
1260 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1261 (compat_ulong_t __user
*)datap
);
1264 case COMPAT_PTRACE_SET_SYSCALL
:
1265 task_pt_regs(child
)->syscallno
= data
;
1269 case COMPAT_PTRACE_GETVFPREGS
:
1270 ret
= copy_regset_to_user(child
,
1277 case COMPAT_PTRACE_SETVFPREGS
:
1278 ret
= copy_regset_from_user(child
,
1285 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1286 case COMPAT_PTRACE_GETHBPREGS
:
1287 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1290 case COMPAT_PTRACE_SETHBPREGS
:
1291 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1296 ret
= compat_ptrace_request(child
, request
, addr
,
1303 #endif /* CONFIG_COMPAT */
1305 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1307 #ifdef CONFIG_COMPAT
1309 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1310 * user_aarch32_view compatible with arm32. Native ptrace requests on
1311 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1312 * access to the TLS register.
1314 if (is_compat_task())
1315 return &user_aarch32_view
;
1316 else if (is_compat_thread(task_thread_info(task
)))
1317 return &user_aarch32_ptrace_view
;
1319 return &user_aarch64_view
;
1322 long arch_ptrace(struct task_struct
*child
, long request
,
1323 unsigned long addr
, unsigned long data
)
1325 return ptrace_request(child
, request
, addr
, data
);
1328 enum ptrace_syscall_dir
{
1329 PTRACE_SYSCALL_ENTER
= 0,
1330 PTRACE_SYSCALL_EXIT
,
1333 static void tracehook_report_syscall(struct pt_regs
*regs
,
1334 enum ptrace_syscall_dir dir
)
1337 unsigned long saved_reg
;
1340 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1341 * used to denote syscall entry/exit:
1343 regno
= (is_compat_task() ? 12 : 7);
1344 saved_reg
= regs
->regs
[regno
];
1345 regs
->regs
[regno
] = dir
;
1347 if (dir
== PTRACE_SYSCALL_EXIT
)
1348 tracehook_report_syscall_exit(regs
, 0);
1349 else if (tracehook_report_syscall_entry(regs
))
1350 regs
->syscallno
= ~0UL;
1352 regs
->regs
[regno
] = saved_reg
;
1355 asmlinkage
int syscall_trace_enter(struct pt_regs
*regs
)
1357 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1358 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1360 /* Do the secure computing after ptrace; failures should be fast. */
1361 if (secure_computing(NULL
) == -1)
1364 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1365 trace_sys_enter(regs
, regs
->syscallno
);
1367 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1368 regs
->regs
[2], regs
->regs
[3]);
1370 return regs
->syscallno
;
1373 asmlinkage
void syscall_trace_exit(struct pt_regs
*regs
)
1375 audit_syscall_exit(regs
);
1377 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1378 trace_sys_exit(regs
, regs_return_value(regs
));
1380 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1381 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1385 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1386 * Userspace cannot use these until they have an architectural meaning.
1387 * We also reserve IL for the kernel; SS is handled dynamically.
1389 #define SPSR_EL1_AARCH64_RES0_BITS \
1390 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1392 #define SPSR_EL1_AARCH32_RES0_BITS \
1393 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1395 static int valid_compat_regs(struct user_pt_regs
*regs
)
1397 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1399 if (!system_supports_mixed_endian_el0()) {
1400 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1401 regs
->pstate
|= COMPAT_PSR_E_BIT
;
1403 regs
->pstate
&= ~COMPAT_PSR_E_BIT
;
1406 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1407 (regs
->pstate
& COMPAT_PSR_A_BIT
) == 0 &&
1408 (regs
->pstate
& COMPAT_PSR_I_BIT
) == 0 &&
1409 (regs
->pstate
& COMPAT_PSR_F_BIT
) == 0) {
1414 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1417 regs
->pstate
&= COMPAT_PSR_N_BIT
| COMPAT_PSR_Z_BIT
|
1418 COMPAT_PSR_C_BIT
| COMPAT_PSR_V_BIT
|
1419 COMPAT_PSR_Q_BIT
| COMPAT_PSR_IT_MASK
|
1420 COMPAT_PSR_GE_MASK
| COMPAT_PSR_E_BIT
|
1422 regs
->pstate
|= PSR_MODE32_BIT
;
1427 static int valid_native_regs(struct user_pt_regs
*regs
)
1429 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1431 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1432 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1433 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1434 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1435 (regs
->pstate
& PSR_F_BIT
) == 0) {
1439 /* Force PSR to a valid 64-bit EL0t */
1440 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1446 * Are the current registers suitable for user mode? (used to maintain
1447 * security in signal handlers)
1449 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1451 if (!test_tsk_thread_flag(task
, TIF_SINGLESTEP
))
1452 regs
->pstate
&= ~DBG_SPSR_SS
;
1454 if (is_compat_thread(task_thread_info(task
)))
1455 return valid_compat_regs(regs
);
1457 return valid_native_regs(regs
);