2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
28 #include <linux/nospec.h>
29 #include <linux/smp.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/seccomp.h>
33 #include <linux/security.h>
34 #include <linux/init.h>
35 #include <linux/signal.h>
36 #include <linux/string.h>
37 #include <linux/uaccess.h>
38 #include <linux/perf_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/regset.h>
41 #include <linux/tracehook.h>
42 #include <linux/elf.h>
44 #include <asm/compat.h>
45 #include <asm/cpufeature.h>
46 #include <asm/debug-monitors.h>
47 #include <asm/pgtable.h>
48 #include <asm/stacktrace.h>
49 #include <asm/syscall.h>
50 #include <asm/traps.h>
51 #include <asm/system_misc.h>
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/syscalls.h>
56 struct pt_regs_offset
{
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 #define GPR_OFFSET_NAME(r) \
64 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
66 static const struct pt_regs_offset regoffset_table
[] = {
98 {.name
= "lr", .offset
= offsetof(struct pt_regs
, regs
[30])},
101 REG_OFFSET_NAME(pstate
),
106 * regs_query_register_offset() - query register offset from its name
107 * @name: the name of a register
109 * regs_query_register_offset() returns the offset of a register in struct
110 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
112 int regs_query_register_offset(const char *name
)
114 const struct pt_regs_offset
*roff
;
116 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
117 if (!strcmp(roff
->name
, name
))
123 * regs_within_kernel_stack() - check the address in the stack
124 * @regs: pt_regs which contains kernel stack pointer.
125 * @addr: address which is checked.
127 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
128 * If @addr is within the kernel stack, it returns true. If not, returns false.
130 static bool regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
132 return ((addr
& ~(THREAD_SIZE
- 1)) ==
133 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1))) ||
138 * regs_get_kernel_stack_nth() - get Nth entry of the stack
139 * @regs: pt_regs which contains kernel stack pointer.
140 * @n: stack entry number.
142 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
143 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
146 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
148 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
151 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
158 * TODO: does not yet catch signals sent when the child dies.
159 * in exit.c or in signal.c.
163 * Called by kernel/ptrace.c when detaching..
165 void ptrace_disable(struct task_struct
*child
)
168 * This would be better off in core code, but PTRACE_DETACH has
169 * grown its fair share of arch-specific worts and changing it
170 * is likely to cause regressions on obscure architectures.
172 user_disable_single_step(child
);
175 #ifdef CONFIG_HAVE_HW_BREAKPOINT
177 * Handle hitting a HW-breakpoint.
179 static void ptrace_hbptriggered(struct perf_event
*bp
,
180 struct perf_sample_data
*data
,
181 struct pt_regs
*regs
)
183 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
187 .si_code
= TRAP_HWBKPT
,
188 .si_addr
= (void __user
*)(bkpt
->trigger
),
194 if (!is_compat_task())
197 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
198 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
199 info
.si_errno
= (i
<< 1) + 1;
204 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
205 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
206 info
.si_errno
= -((i
<< 1) + 1);
213 force_sig_info(SIGTRAP
, &info
, current
);
217 * Unregister breakpoints from this task and reset the pointers in
220 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
223 struct thread_struct
*t
= &tsk
->thread
;
225 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
226 if (t
->debug
.hbp_break
[i
]) {
227 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
228 t
->debug
.hbp_break
[i
] = NULL
;
232 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
233 if (t
->debug
.hbp_watch
[i
]) {
234 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
235 t
->debug
.hbp_watch
[i
] = NULL
;
240 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
242 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
245 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
246 struct task_struct
*tsk
,
249 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
252 case NT_ARM_HW_BREAK
:
253 if (idx
>= ARM_MAX_BRP
)
255 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
256 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
258 case NT_ARM_HW_WATCH
:
259 if (idx
>= ARM_MAX_WRP
)
261 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
262 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
270 static int ptrace_hbp_set_event(unsigned int note_type
,
271 struct task_struct
*tsk
,
273 struct perf_event
*bp
)
278 case NT_ARM_HW_BREAK
:
279 if (idx
>= ARM_MAX_BRP
)
281 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
282 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
285 case NT_ARM_HW_WATCH
:
286 if (idx
>= ARM_MAX_WRP
)
288 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
289 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
298 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
299 struct task_struct
*tsk
,
302 struct perf_event
*bp
;
303 struct perf_event_attr attr
;
307 case NT_ARM_HW_BREAK
:
308 type
= HW_BREAKPOINT_X
;
310 case NT_ARM_HW_WATCH
:
311 type
= HW_BREAKPOINT_RW
;
314 return ERR_PTR(-EINVAL
);
317 ptrace_breakpoint_init(&attr
);
320 * Initialise fields to sane defaults
321 * (i.e. values that will pass validation).
324 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
328 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
332 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
339 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
340 struct arch_hw_breakpoint_ctrl ctrl
,
341 struct perf_event_attr
*attr
)
343 int err
, len
, type
, offset
, disabled
= !ctrl
.enabled
;
345 attr
->disabled
= disabled
;
349 err
= arch_bp_generic_fields(ctrl
, &len
, &type
, &offset
);
354 case NT_ARM_HW_BREAK
:
355 if ((type
& HW_BREAKPOINT_X
) != type
)
358 case NT_ARM_HW_WATCH
:
359 if ((type
& HW_BREAKPOINT_RW
) != type
)
367 attr
->bp_type
= type
;
368 attr
->bp_addr
+= offset
;
373 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
379 case NT_ARM_HW_BREAK
:
380 num
= hw_breakpoint_slots(TYPE_INST
);
382 case NT_ARM_HW_WATCH
:
383 num
= hw_breakpoint_slots(TYPE_DATA
);
389 reg
|= debug_monitors_arch();
397 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
398 struct task_struct
*tsk
,
402 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
407 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
411 static int ptrace_hbp_get_addr(unsigned int note_type
,
412 struct task_struct
*tsk
,
416 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
421 *addr
= bp
? counter_arch_bp(bp
)->address
: 0;
425 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
426 struct task_struct
*tsk
,
429 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
432 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
437 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
438 struct task_struct
*tsk
,
443 struct perf_event
*bp
;
444 struct perf_event_attr attr
;
445 struct arch_hw_breakpoint_ctrl ctrl
;
447 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
454 decode_ctrl_reg(uctrl
, &ctrl
);
455 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
459 return modify_user_hw_breakpoint(bp
, &attr
);
462 static int ptrace_hbp_set_addr(unsigned int note_type
,
463 struct task_struct
*tsk
,
468 struct perf_event
*bp
;
469 struct perf_event_attr attr
;
471 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
479 err
= modify_user_hw_breakpoint(bp
, &attr
);
483 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
484 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
485 #define PTRACE_HBP_PAD_SZ sizeof(u32)
487 static int hw_break_get(struct task_struct
*target
,
488 const struct user_regset
*regset
,
489 unsigned int pos
, unsigned int count
,
490 void *kbuf
, void __user
*ubuf
)
492 unsigned int note_type
= regset
->core_note_type
;
493 int ret
, idx
= 0, offset
, limit
;
498 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
502 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
508 offset
= offsetof(struct user_hwdebug_state
, pad
);
509 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
510 offset
+ PTRACE_HBP_PAD_SZ
);
514 /* (address, ctrl) registers */
515 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
516 limit
= regset
->n
* regset
->size
;
517 while (count
&& offset
< limit
) {
518 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
521 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
522 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
525 offset
+= PTRACE_HBP_ADDR_SZ
;
527 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
530 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
531 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
534 offset
+= PTRACE_HBP_CTRL_SZ
;
536 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
538 offset
+ PTRACE_HBP_PAD_SZ
);
541 offset
+= PTRACE_HBP_PAD_SZ
;
548 static int hw_break_set(struct task_struct
*target
,
549 const struct user_regset
*regset
,
550 unsigned int pos
, unsigned int count
,
551 const void *kbuf
, const void __user
*ubuf
)
553 unsigned int note_type
= regset
->core_note_type
;
554 int ret
, idx
= 0, offset
, limit
;
558 /* Resource info and pad */
559 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
560 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
564 /* (address, ctrl) registers */
565 limit
= regset
->n
* regset
->size
;
566 while (count
&& offset
< limit
) {
567 if (count
< PTRACE_HBP_ADDR_SZ
)
569 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
570 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
573 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
576 offset
+= PTRACE_HBP_ADDR_SZ
;
580 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
581 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
584 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
587 offset
+= PTRACE_HBP_CTRL_SZ
;
589 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
591 offset
+ PTRACE_HBP_PAD_SZ
);
594 offset
+= PTRACE_HBP_PAD_SZ
;
600 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
602 static int gpr_get(struct task_struct
*target
,
603 const struct user_regset
*regset
,
604 unsigned int pos
, unsigned int count
,
605 void *kbuf
, void __user
*ubuf
)
607 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
608 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
611 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
612 unsigned int pos
, unsigned int count
,
613 const void *kbuf
, const void __user
*ubuf
)
616 struct user_pt_regs newregs
= task_pt_regs(target
)->user_regs
;
618 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
622 if (!valid_user_regs(&newregs
, target
))
625 task_pt_regs(target
)->user_regs
= newregs
;
630 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
632 static int __fpr_get(struct task_struct
*target
,
633 const struct user_regset
*regset
,
634 unsigned int pos
, unsigned int count
,
635 void *kbuf
, void __user
*ubuf
, unsigned int start_pos
)
637 struct user_fpsimd_state
*uregs
;
639 sve_sync_to_fpsimd(target
);
641 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
643 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
,
644 start_pos
, start_pos
+ sizeof(*uregs
));
647 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
648 unsigned int pos
, unsigned int count
,
649 void *kbuf
, void __user
*ubuf
)
651 if (target
== current
)
652 fpsimd_preserve_current_state();
654 return __fpr_get(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
657 static int __fpr_set(struct task_struct
*target
,
658 const struct user_regset
*regset
,
659 unsigned int pos
, unsigned int count
,
660 const void *kbuf
, const void __user
*ubuf
,
661 unsigned int start_pos
)
664 struct user_fpsimd_state newstate
;
667 * Ensure target->thread.fpsimd_state is up to date, so that a
668 * short copyin can't resurrect stale data.
670 sve_sync_to_fpsimd(target
);
672 newstate
= target
->thread
.fpsimd_state
.user_fpsimd
;
674 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
,
675 start_pos
, start_pos
+ sizeof(newstate
));
679 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
684 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
685 unsigned int pos
, unsigned int count
,
686 const void *kbuf
, const void __user
*ubuf
)
690 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
694 sve_sync_from_fpsimd_zeropad(target
);
695 fpsimd_flush_task_state(target
);
700 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
701 unsigned int pos
, unsigned int count
,
702 void *kbuf
, void __user
*ubuf
)
704 unsigned long *tls
= &target
->thread
.tp_value
;
706 if (target
== current
)
707 tls_preserve_current_state();
709 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
712 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
713 unsigned int pos
, unsigned int count
,
714 const void *kbuf
, const void __user
*ubuf
)
717 unsigned long tls
= target
->thread
.tp_value
;
719 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
723 target
->thread
.tp_value
= tls
;
727 static int system_call_get(struct task_struct
*target
,
728 const struct user_regset
*regset
,
729 unsigned int pos
, unsigned int count
,
730 void *kbuf
, void __user
*ubuf
)
732 int syscallno
= task_pt_regs(target
)->syscallno
;
734 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
738 static int system_call_set(struct task_struct
*target
,
739 const struct user_regset
*regset
,
740 unsigned int pos
, unsigned int count
,
741 const void *kbuf
, const void __user
*ubuf
)
743 int syscallno
= task_pt_regs(target
)->syscallno
;
746 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
750 task_pt_regs(target
)->syscallno
= syscallno
;
754 #ifdef CONFIG_ARM64_SVE
756 static void sve_init_header_from_task(struct user_sve_header
*header
,
757 struct task_struct
*target
)
761 memset(header
, 0, sizeof(*header
));
763 header
->flags
= test_tsk_thread_flag(target
, TIF_SVE
) ?
764 SVE_PT_REGS_SVE
: SVE_PT_REGS_FPSIMD
;
765 if (test_tsk_thread_flag(target
, TIF_SVE_VL_INHERIT
))
766 header
->flags
|= SVE_PT_VL_INHERIT
;
768 header
->vl
= target
->thread
.sve_vl
;
769 vq
= sve_vq_from_vl(header
->vl
);
771 header
->max_vl
= sve_max_vl
;
772 if (WARN_ON(!sve_vl_valid(sve_max_vl
)))
773 header
->max_vl
= header
->vl
;
775 header
->size
= SVE_PT_SIZE(vq
, header
->flags
);
776 header
->max_size
= SVE_PT_SIZE(sve_vq_from_vl(header
->max_vl
),
780 static unsigned int sve_size_from_header(struct user_sve_header
const *header
)
782 return ALIGN(header
->size
, SVE_VQ_BYTES
);
785 static unsigned int sve_get_size(struct task_struct
*target
,
786 const struct user_regset
*regset
)
788 struct user_sve_header header
;
790 if (!system_supports_sve())
793 sve_init_header_from_task(&header
, target
);
794 return sve_size_from_header(&header
);
797 static int sve_get(struct task_struct
*target
,
798 const struct user_regset
*regset
,
799 unsigned int pos
, unsigned int count
,
800 void *kbuf
, void __user
*ubuf
)
803 struct user_sve_header header
;
805 unsigned long start
, end
;
807 if (!system_supports_sve())
811 sve_init_header_from_task(&header
, target
);
812 vq
= sve_vq_from_vl(header
.vl
);
814 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &header
,
819 if (target
== current
)
820 fpsimd_preserve_current_state();
822 /* Registers: FPSIMD-only case */
824 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
825 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
)
826 return __fpr_get(target
, regset
, pos
, count
, kbuf
, ubuf
,
827 SVE_PT_FPSIMD_OFFSET
);
829 /* Otherwise: full SVE case */
831 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
832 start
= SVE_PT_SVE_OFFSET
;
833 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
834 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
835 target
->thread
.sve_state
,
841 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
842 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
848 * Copy fpsr, and fpcr which must follow contiguously in
849 * struct fpsimd_state:
852 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
853 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
854 &target
->thread
.fpsimd_state
.fpsr
,
860 end
= sve_size_from_header(&header
);
861 return user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
865 static int sve_set(struct task_struct
*target
,
866 const struct user_regset
*regset
,
867 unsigned int pos
, unsigned int count
,
868 const void *kbuf
, const void __user
*ubuf
)
871 struct user_sve_header header
;
873 unsigned long start
, end
;
875 if (!system_supports_sve())
879 if (count
< sizeof(header
))
881 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &header
,
887 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
888 * sve_set_vector_length(), which will also validate them for us:
890 ret
= sve_set_vector_length(target
, header
.vl
,
891 ((unsigned long)header
.flags
& ~SVE_PT_REGS_MASK
) << 16);
895 /* Actual VL set may be less than the user asked for: */
896 vq
= sve_vq_from_vl(target
->thread
.sve_vl
);
898 /* Registers: FPSIMD-only case */
900 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
901 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
) {
902 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
,
903 SVE_PT_FPSIMD_OFFSET
);
904 clear_tsk_thread_flag(target
, TIF_SVE
);
908 /* Otherwise: full SVE case */
911 * If setting a different VL from the requested VL and there is
912 * register data, the data layout will be wrong: don't even
913 * try to set the registers in this case.
915 if (count
&& vq
!= sve_vq_from_vl(header
.vl
)) {
923 * Ensure target->thread.sve_state is up to date with target's
924 * FPSIMD regs, so that a short copyin leaves trailing registers
927 fpsimd_sync_to_sve(target
);
928 set_tsk_thread_flag(target
, TIF_SVE
);
930 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
931 start
= SVE_PT_SVE_OFFSET
;
932 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
933 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
934 target
->thread
.sve_state
,
940 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
941 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
947 * Copy fpsr, and fpcr which must follow contiguously in
948 * struct fpsimd_state:
951 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
952 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
953 &target
->thread
.fpsimd_state
.fpsr
,
957 fpsimd_flush_task_state(target
);
961 #endif /* CONFIG_ARM64_SVE */
963 enum aarch64_regset
{
967 #ifdef CONFIG_HAVE_HW_BREAKPOINT
972 #ifdef CONFIG_ARM64_SVE
977 static const struct user_regset aarch64_regsets
[] = {
979 .core_note_type
= NT_PRSTATUS
,
980 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
982 .align
= sizeof(u64
),
987 .core_note_type
= NT_PRFPREG
,
988 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
990 * We pretend we have 32-bit registers because the fpsr and
991 * fpcr are 32-bits wide.
994 .align
= sizeof(u32
),
999 .core_note_type
= NT_ARM_TLS
,
1001 .size
= sizeof(void *),
1002 .align
= sizeof(void *),
1006 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 [REGSET_HW_BREAK
] = {
1008 .core_note_type
= NT_ARM_HW_BREAK
,
1009 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1010 .size
= sizeof(u32
),
1011 .align
= sizeof(u32
),
1012 .get
= hw_break_get
,
1013 .set
= hw_break_set
,
1015 [REGSET_HW_WATCH
] = {
1016 .core_note_type
= NT_ARM_HW_WATCH
,
1017 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1018 .size
= sizeof(u32
),
1019 .align
= sizeof(u32
),
1020 .get
= hw_break_get
,
1021 .set
= hw_break_set
,
1024 [REGSET_SYSTEM_CALL
] = {
1025 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1027 .size
= sizeof(int),
1028 .align
= sizeof(int),
1029 .get
= system_call_get
,
1030 .set
= system_call_set
,
1032 #ifdef CONFIG_ARM64_SVE
1033 [REGSET_SVE
] = { /* Scalable Vector Extension */
1034 .core_note_type
= NT_ARM_SVE
,
1035 .n
= DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX
, SVE_PT_REGS_SVE
),
1037 .size
= SVE_VQ_BYTES
,
1038 .align
= SVE_VQ_BYTES
,
1041 .get_size
= sve_get_size
,
1046 static const struct user_regset_view user_aarch64_view
= {
1047 .name
= "aarch64", .e_machine
= EM_AARCH64
,
1048 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
1051 #ifdef CONFIG_COMPAT
1052 #include <linux/compat.h>
1054 enum compat_regset
{
1059 static int compat_gpr_get(struct task_struct
*target
,
1060 const struct user_regset
*regset
,
1061 unsigned int pos
, unsigned int count
,
1062 void *kbuf
, void __user
*ubuf
)
1065 unsigned int i
, start
, num_regs
;
1067 /* Calculate the number of AArch32 registers contained in count */
1068 num_regs
= count
/ regset
->size
;
1070 /* Convert pos into an register number */
1071 start
= pos
/ regset
->size
;
1073 if (start
+ num_regs
> regset
->n
)
1076 for (i
= 0; i
< num_regs
; ++i
) {
1077 unsigned int idx
= start
+ i
;
1082 reg
= task_pt_regs(target
)->pc
;
1085 reg
= task_pt_regs(target
)->pstate
;
1088 reg
= task_pt_regs(target
)->orig_x0
;
1091 reg
= task_pt_regs(target
)->regs
[idx
];
1095 memcpy(kbuf
, ®
, sizeof(reg
));
1096 kbuf
+= sizeof(reg
);
1098 ret
= copy_to_user(ubuf
, ®
, sizeof(reg
));
1104 ubuf
+= sizeof(reg
);
1111 static int compat_gpr_set(struct task_struct
*target
,
1112 const struct user_regset
*regset
,
1113 unsigned int pos
, unsigned int count
,
1114 const void *kbuf
, const void __user
*ubuf
)
1116 struct pt_regs newregs
;
1118 unsigned int i
, start
, num_regs
;
1120 /* Calculate the number of AArch32 registers contained in count */
1121 num_regs
= count
/ regset
->size
;
1123 /* Convert pos into an register number */
1124 start
= pos
/ regset
->size
;
1126 if (start
+ num_regs
> regset
->n
)
1129 newregs
= *task_pt_regs(target
);
1131 for (i
= 0; i
< num_regs
; ++i
) {
1132 unsigned int idx
= start
+ i
;
1136 memcpy(®
, kbuf
, sizeof(reg
));
1137 kbuf
+= sizeof(reg
);
1139 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
1145 ubuf
+= sizeof(reg
);
1153 newregs
.pstate
= reg
;
1156 newregs
.orig_x0
= reg
;
1159 newregs
.regs
[idx
] = reg
;
1164 if (valid_user_regs(&newregs
.user_regs
, target
))
1165 *task_pt_regs(target
) = newregs
;
1172 static int compat_vfp_get(struct task_struct
*target
,
1173 const struct user_regset
*regset
,
1174 unsigned int pos
, unsigned int count
,
1175 void *kbuf
, void __user
*ubuf
)
1177 struct user_fpsimd_state
*uregs
;
1178 compat_ulong_t fpscr
;
1179 int ret
, vregs_end_pos
;
1181 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
1183 if (target
== current
)
1184 fpsimd_preserve_current_state();
1187 * The VFP registers are packed into the fpsimd_state, so they all sit
1188 * nicely together for us. We just need to create the fpscr separately.
1190 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1191 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
,
1194 if (count
&& !ret
) {
1195 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
1196 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
1198 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1199 vregs_end_pos
, VFP_STATE_SIZE
);
1205 static int compat_vfp_set(struct task_struct
*target
,
1206 const struct user_regset
*regset
,
1207 unsigned int pos
, unsigned int count
,
1208 const void *kbuf
, const void __user
*ubuf
)
1210 struct user_fpsimd_state
*uregs
;
1211 compat_ulong_t fpscr
;
1212 int ret
, vregs_end_pos
;
1214 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
1216 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1217 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
1220 if (count
&& !ret
) {
1221 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1222 vregs_end_pos
, VFP_STATE_SIZE
);
1224 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
1225 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
1229 fpsimd_flush_task_state(target
);
1233 static int compat_tls_get(struct task_struct
*target
,
1234 const struct user_regset
*regset
, unsigned int pos
,
1235 unsigned int count
, void *kbuf
, void __user
*ubuf
)
1237 compat_ulong_t tls
= (compat_ulong_t
)target
->thread
.tp_value
;
1238 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1241 static int compat_tls_set(struct task_struct
*target
,
1242 const struct user_regset
*regset
, unsigned int pos
,
1243 unsigned int count
, const void *kbuf
,
1244 const void __user
*ubuf
)
1247 compat_ulong_t tls
= target
->thread
.tp_value
;
1249 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1253 target
->thread
.tp_value
= tls
;
1257 static const struct user_regset aarch32_regsets
[] = {
1258 [REGSET_COMPAT_GPR
] = {
1259 .core_note_type
= NT_PRSTATUS
,
1260 .n
= COMPAT_ELF_NGREG
,
1261 .size
= sizeof(compat_elf_greg_t
),
1262 .align
= sizeof(compat_elf_greg_t
),
1263 .get
= compat_gpr_get
,
1264 .set
= compat_gpr_set
1266 [REGSET_COMPAT_VFP
] = {
1267 .core_note_type
= NT_ARM_VFP
,
1268 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1269 .size
= sizeof(compat_ulong_t
),
1270 .align
= sizeof(compat_ulong_t
),
1271 .get
= compat_vfp_get
,
1272 .set
= compat_vfp_set
1276 static const struct user_regset_view user_aarch32_view
= {
1277 .name
= "aarch32", .e_machine
= EM_ARM
,
1278 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
1281 static const struct user_regset aarch32_ptrace_regsets
[] = {
1283 .core_note_type
= NT_PRSTATUS
,
1284 .n
= COMPAT_ELF_NGREG
,
1285 .size
= sizeof(compat_elf_greg_t
),
1286 .align
= sizeof(compat_elf_greg_t
),
1287 .get
= compat_gpr_get
,
1288 .set
= compat_gpr_set
1291 .core_note_type
= NT_ARM_VFP
,
1292 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1293 .size
= sizeof(compat_ulong_t
),
1294 .align
= sizeof(compat_ulong_t
),
1295 .get
= compat_vfp_get
,
1296 .set
= compat_vfp_set
1299 .core_note_type
= NT_ARM_TLS
,
1301 .size
= sizeof(compat_ulong_t
),
1302 .align
= sizeof(compat_ulong_t
),
1303 .get
= compat_tls_get
,
1304 .set
= compat_tls_set
,
1306 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1307 [REGSET_HW_BREAK
] = {
1308 .core_note_type
= NT_ARM_HW_BREAK
,
1309 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1310 .size
= sizeof(u32
),
1311 .align
= sizeof(u32
),
1312 .get
= hw_break_get
,
1313 .set
= hw_break_set
,
1315 [REGSET_HW_WATCH
] = {
1316 .core_note_type
= NT_ARM_HW_WATCH
,
1317 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1318 .size
= sizeof(u32
),
1319 .align
= sizeof(u32
),
1320 .get
= hw_break_get
,
1321 .set
= hw_break_set
,
1324 [REGSET_SYSTEM_CALL
] = {
1325 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1327 .size
= sizeof(int),
1328 .align
= sizeof(int),
1329 .get
= system_call_get
,
1330 .set
= system_call_set
,
1334 static const struct user_regset_view user_aarch32_ptrace_view
= {
1335 .name
= "aarch32", .e_machine
= EM_ARM
,
1336 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
1339 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
1340 compat_ulong_t __user
*ret
)
1347 if (off
== COMPAT_PT_TEXT_ADDR
)
1348 tmp
= tsk
->mm
->start_code
;
1349 else if (off
== COMPAT_PT_DATA_ADDR
)
1350 tmp
= tsk
->mm
->start_data
;
1351 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
1352 tmp
= tsk
->mm
->end_code
;
1353 else if (off
< sizeof(compat_elf_gregset_t
))
1354 return copy_regset_to_user(tsk
, &user_aarch32_view
,
1355 REGSET_COMPAT_GPR
, off
,
1356 sizeof(compat_ulong_t
), ret
);
1357 else if (off
>= COMPAT_USER_SZ
)
1362 return put_user(tmp
, ret
);
1365 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
1369 mm_segment_t old_fs
= get_fs();
1371 if (off
& 3 || off
>= COMPAT_USER_SZ
)
1374 if (off
>= sizeof(compat_elf_gregset_t
))
1378 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
1379 REGSET_COMPAT_GPR
, off
,
1380 sizeof(compat_ulong_t
),
1387 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1390 * Convert a virtual register number into an index for a thread_info
1391 * breakpoint array. Breakpoints are identified using positive numbers
1392 * whilst watchpoints are negative. The registers are laid out as pairs
1393 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1394 * Register 0 is reserved for describing resource information.
1396 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1398 return (abs(num
) - 1) >> 1;
1401 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1403 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1406 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1407 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1409 debug_arch
= debug_monitors_arch();
1423 static int compat_ptrace_hbp_get(unsigned int note_type
,
1424 struct task_struct
*tsk
,
1431 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
1434 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1437 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1444 static int compat_ptrace_hbp_set(unsigned int note_type
,
1445 struct task_struct
*tsk
,
1452 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1456 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1459 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1465 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1466 compat_ulong_t __user
*data
)
1473 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1475 } else if (num
== 0) {
1476 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1479 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1483 ret
= put_user(kdata
, data
);
1488 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1489 compat_ulong_t __user
*data
)
1497 ret
= get_user(kdata
, data
);
1502 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1504 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1508 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1510 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1511 compat_ulong_t caddr
, compat_ulong_t cdata
)
1513 unsigned long addr
= caddr
;
1514 unsigned long data
= cdata
;
1515 void __user
*datap
= compat_ptr(data
);
1519 case PTRACE_PEEKUSR
:
1520 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1523 case PTRACE_POKEUSR
:
1524 ret
= compat_ptrace_write_user(child
, addr
, data
);
1527 case COMPAT_PTRACE_GETREGS
:
1528 ret
= copy_regset_to_user(child
,
1531 0, sizeof(compat_elf_gregset_t
),
1535 case COMPAT_PTRACE_SETREGS
:
1536 ret
= copy_regset_from_user(child
,
1539 0, sizeof(compat_elf_gregset_t
),
1543 case COMPAT_PTRACE_GET_THREAD_AREA
:
1544 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1545 (compat_ulong_t __user
*)datap
);
1548 case COMPAT_PTRACE_SET_SYSCALL
:
1549 task_pt_regs(child
)->syscallno
= data
;
1553 case COMPAT_PTRACE_GETVFPREGS
:
1554 ret
= copy_regset_to_user(child
,
1561 case COMPAT_PTRACE_SETVFPREGS
:
1562 ret
= copy_regset_from_user(child
,
1569 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1570 case COMPAT_PTRACE_GETHBPREGS
:
1571 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1574 case COMPAT_PTRACE_SETHBPREGS
:
1575 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1580 ret
= compat_ptrace_request(child
, request
, addr
,
1587 #endif /* CONFIG_COMPAT */
1589 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1591 #ifdef CONFIG_COMPAT
1593 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1594 * user_aarch32_view compatible with arm32. Native ptrace requests on
1595 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1596 * access to the TLS register.
1598 if (is_compat_task())
1599 return &user_aarch32_view
;
1600 else if (is_compat_thread(task_thread_info(task
)))
1601 return &user_aarch32_ptrace_view
;
1603 return &user_aarch64_view
;
1606 long arch_ptrace(struct task_struct
*child
, long request
,
1607 unsigned long addr
, unsigned long data
)
1609 return ptrace_request(child
, request
, addr
, data
);
1612 enum ptrace_syscall_dir
{
1613 PTRACE_SYSCALL_ENTER
= 0,
1614 PTRACE_SYSCALL_EXIT
,
1617 static void tracehook_report_syscall(struct pt_regs
*regs
,
1618 enum ptrace_syscall_dir dir
)
1621 unsigned long saved_reg
;
1624 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1625 * used to denote syscall entry/exit:
1627 regno
= (is_compat_task() ? 12 : 7);
1628 saved_reg
= regs
->regs
[regno
];
1629 regs
->regs
[regno
] = dir
;
1631 if (dir
== PTRACE_SYSCALL_EXIT
)
1632 tracehook_report_syscall_exit(regs
, 0);
1633 else if (tracehook_report_syscall_entry(regs
))
1634 forget_syscall(regs
);
1636 regs
->regs
[regno
] = saved_reg
;
1639 asmlinkage
int syscall_trace_enter(struct pt_regs
*regs
)
1641 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1642 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1644 /* Do the secure computing after ptrace; failures should be fast. */
1645 if (secure_computing(NULL
) == -1)
1648 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1649 trace_sys_enter(regs
, regs
->syscallno
);
1651 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1652 regs
->regs
[2], regs
->regs
[3]);
1654 return regs
->syscallno
;
1657 asmlinkage
void syscall_trace_exit(struct pt_regs
*regs
)
1659 audit_syscall_exit(regs
);
1661 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1662 trace_sys_exit(regs
, regs_return_value(regs
));
1664 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1665 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1669 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1670 * Userspace cannot use these until they have an architectural meaning.
1671 * We also reserve IL for the kernel; SS is handled dynamically.
1673 #define SPSR_EL1_AARCH64_RES0_BITS \
1674 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1676 #define SPSR_EL1_AARCH32_RES0_BITS \
1677 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1679 static int valid_compat_regs(struct user_pt_regs
*regs
)
1681 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1683 if (!system_supports_mixed_endian_el0()) {
1684 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1685 regs
->pstate
|= COMPAT_PSR_E_BIT
;
1687 regs
->pstate
&= ~COMPAT_PSR_E_BIT
;
1690 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1691 (regs
->pstate
& COMPAT_PSR_A_BIT
) == 0 &&
1692 (regs
->pstate
& COMPAT_PSR_I_BIT
) == 0 &&
1693 (regs
->pstate
& COMPAT_PSR_F_BIT
) == 0) {
1698 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1701 regs
->pstate
&= COMPAT_PSR_N_BIT
| COMPAT_PSR_Z_BIT
|
1702 COMPAT_PSR_C_BIT
| COMPAT_PSR_V_BIT
|
1703 COMPAT_PSR_Q_BIT
| COMPAT_PSR_IT_MASK
|
1704 COMPAT_PSR_GE_MASK
| COMPAT_PSR_E_BIT
|
1706 regs
->pstate
|= PSR_MODE32_BIT
;
1711 static int valid_native_regs(struct user_pt_regs
*regs
)
1713 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1715 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1716 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1717 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1718 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1719 (regs
->pstate
& PSR_F_BIT
) == 0) {
1723 /* Force PSR to a valid 64-bit EL0t */
1724 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1730 * Are the current registers suitable for user mode? (used to maintain
1731 * security in signal handlers)
1733 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1735 if (!test_tsk_thread_flag(task
, TIF_SINGLESTEP
))
1736 regs
->pstate
&= ~DBG_SPSR_SS
;
1738 if (is_compat_thread(task_thread_info(task
)))
1739 return valid_compat_regs(regs
);
1741 return valid_native_regs(regs
);