]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/kernel/ptrace.c
d9cea7a58c09bf00254b8521953b9b35fe0bcc74
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / ptrace.c
1 /*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/mm.h>
28 #include <linux/nospec.h>
29 #include <linux/smp.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/seccomp.h>
33 #include <linux/security.h>
34 #include <linux/init.h>
35 #include <linux/signal.h>
36 #include <linux/string.h>
37 #include <linux/uaccess.h>
38 #include <linux/perf_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/regset.h>
41 #include <linux/tracehook.h>
42 #include <linux/elf.h>
43
44 #include <asm/compat.h>
45 #include <asm/cpufeature.h>
46 #include <asm/debug-monitors.h>
47 #include <asm/pgtable.h>
48 #include <asm/stacktrace.h>
49 #include <asm/syscall.h>
50 #include <asm/traps.h>
51 #include <asm/system_misc.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/syscalls.h>
55
56 struct pt_regs_offset {
57 const char *name;
58 int offset;
59 };
60
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 #define GPR_OFFSET_NAME(r) \
64 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
65
66 static const struct pt_regs_offset regoffset_table[] = {
67 GPR_OFFSET_NAME(0),
68 GPR_OFFSET_NAME(1),
69 GPR_OFFSET_NAME(2),
70 GPR_OFFSET_NAME(3),
71 GPR_OFFSET_NAME(4),
72 GPR_OFFSET_NAME(5),
73 GPR_OFFSET_NAME(6),
74 GPR_OFFSET_NAME(7),
75 GPR_OFFSET_NAME(8),
76 GPR_OFFSET_NAME(9),
77 GPR_OFFSET_NAME(10),
78 GPR_OFFSET_NAME(11),
79 GPR_OFFSET_NAME(12),
80 GPR_OFFSET_NAME(13),
81 GPR_OFFSET_NAME(14),
82 GPR_OFFSET_NAME(15),
83 GPR_OFFSET_NAME(16),
84 GPR_OFFSET_NAME(17),
85 GPR_OFFSET_NAME(18),
86 GPR_OFFSET_NAME(19),
87 GPR_OFFSET_NAME(20),
88 GPR_OFFSET_NAME(21),
89 GPR_OFFSET_NAME(22),
90 GPR_OFFSET_NAME(23),
91 GPR_OFFSET_NAME(24),
92 GPR_OFFSET_NAME(25),
93 GPR_OFFSET_NAME(26),
94 GPR_OFFSET_NAME(27),
95 GPR_OFFSET_NAME(28),
96 GPR_OFFSET_NAME(29),
97 GPR_OFFSET_NAME(30),
98 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
99 REG_OFFSET_NAME(sp),
100 REG_OFFSET_NAME(pc),
101 REG_OFFSET_NAME(pstate),
102 REG_OFFSET_END,
103 };
104
105 /**
106 * regs_query_register_offset() - query register offset from its name
107 * @name: the name of a register
108 *
109 * regs_query_register_offset() returns the offset of a register in struct
110 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
111 */
112 int regs_query_register_offset(const char *name)
113 {
114 const struct pt_regs_offset *roff;
115
116 for (roff = regoffset_table; roff->name != NULL; roff++)
117 if (!strcmp(roff->name, name))
118 return roff->offset;
119 return -EINVAL;
120 }
121
122 /**
123 * regs_within_kernel_stack() - check the address in the stack
124 * @regs: pt_regs which contains kernel stack pointer.
125 * @addr: address which is checked.
126 *
127 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
128 * If @addr is within the kernel stack, it returns true. If not, returns false.
129 */
130 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
131 {
132 return ((addr & ~(THREAD_SIZE - 1)) ==
133 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
134 on_irq_stack(addr);
135 }
136
137 /**
138 * regs_get_kernel_stack_nth() - get Nth entry of the stack
139 * @regs: pt_regs which contains kernel stack pointer.
140 * @n: stack entry number.
141 *
142 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
143 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
144 * this returns 0.
145 */
146 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
147 {
148 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
149
150 addr += n;
151 if (regs_within_kernel_stack(regs, (unsigned long)addr))
152 return *addr;
153 else
154 return 0;
155 }
156
157 /*
158 * TODO: does not yet catch signals sent when the child dies.
159 * in exit.c or in signal.c.
160 */
161
162 /*
163 * Called by kernel/ptrace.c when detaching..
164 */
165 void ptrace_disable(struct task_struct *child)
166 {
167 /*
168 * This would be better off in core code, but PTRACE_DETACH has
169 * grown its fair share of arch-specific worts and changing it
170 * is likely to cause regressions on obscure architectures.
171 */
172 user_disable_single_step(child);
173 }
174
175 #ifdef CONFIG_HAVE_HW_BREAKPOINT
176 /*
177 * Handle hitting a HW-breakpoint.
178 */
179 static void ptrace_hbptriggered(struct perf_event *bp,
180 struct perf_sample_data *data,
181 struct pt_regs *regs)
182 {
183 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
184 siginfo_t info = {
185 .si_signo = SIGTRAP,
186 .si_errno = 0,
187 .si_code = TRAP_HWBKPT,
188 .si_addr = (void __user *)(bkpt->trigger),
189 };
190
191 #ifdef CONFIG_COMPAT
192 int i;
193
194 if (!is_compat_task())
195 goto send_sig;
196
197 for (i = 0; i < ARM_MAX_BRP; ++i) {
198 if (current->thread.debug.hbp_break[i] == bp) {
199 info.si_errno = (i << 1) + 1;
200 break;
201 }
202 }
203
204 for (i = 0; i < ARM_MAX_WRP; ++i) {
205 if (current->thread.debug.hbp_watch[i] == bp) {
206 info.si_errno = -((i << 1) + 1);
207 break;
208 }
209 }
210
211 send_sig:
212 #endif
213 force_sig_info(SIGTRAP, &info, current);
214 }
215
216 /*
217 * Unregister breakpoints from this task and reset the pointers in
218 * the thread_struct.
219 */
220 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
221 {
222 int i;
223 struct thread_struct *t = &tsk->thread;
224
225 for (i = 0; i < ARM_MAX_BRP; i++) {
226 if (t->debug.hbp_break[i]) {
227 unregister_hw_breakpoint(t->debug.hbp_break[i]);
228 t->debug.hbp_break[i] = NULL;
229 }
230 }
231
232 for (i = 0; i < ARM_MAX_WRP; i++) {
233 if (t->debug.hbp_watch[i]) {
234 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
235 t->debug.hbp_watch[i] = NULL;
236 }
237 }
238 }
239
240 void ptrace_hw_copy_thread(struct task_struct *tsk)
241 {
242 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
243 }
244
245 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
246 struct task_struct *tsk,
247 unsigned long idx)
248 {
249 struct perf_event *bp = ERR_PTR(-EINVAL);
250
251 switch (note_type) {
252 case NT_ARM_HW_BREAK:
253 if (idx >= ARM_MAX_BRP)
254 goto out;
255 idx = array_index_nospec(idx, ARM_MAX_BRP);
256 bp = tsk->thread.debug.hbp_break[idx];
257 break;
258 case NT_ARM_HW_WATCH:
259 if (idx >= ARM_MAX_WRP)
260 goto out;
261 idx = array_index_nospec(idx, ARM_MAX_WRP);
262 bp = tsk->thread.debug.hbp_watch[idx];
263 break;
264 }
265
266 out:
267 return bp;
268 }
269
270 static int ptrace_hbp_set_event(unsigned int note_type,
271 struct task_struct *tsk,
272 unsigned long idx,
273 struct perf_event *bp)
274 {
275 int err = -EINVAL;
276
277 switch (note_type) {
278 case NT_ARM_HW_BREAK:
279 if (idx < ARM_MAX_BRP) {
280 tsk->thread.debug.hbp_break[idx] = bp;
281 err = 0;
282 }
283 break;
284 case NT_ARM_HW_WATCH:
285 if (idx < ARM_MAX_WRP) {
286 tsk->thread.debug.hbp_watch[idx] = bp;
287 err = 0;
288 }
289 break;
290 }
291
292 return err;
293 }
294
295 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
296 struct task_struct *tsk,
297 unsigned long idx)
298 {
299 struct perf_event *bp;
300 struct perf_event_attr attr;
301 int err, type;
302
303 switch (note_type) {
304 case NT_ARM_HW_BREAK:
305 type = HW_BREAKPOINT_X;
306 break;
307 case NT_ARM_HW_WATCH:
308 type = HW_BREAKPOINT_RW;
309 break;
310 default:
311 return ERR_PTR(-EINVAL);
312 }
313
314 ptrace_breakpoint_init(&attr);
315
316 /*
317 * Initialise fields to sane defaults
318 * (i.e. values that will pass validation).
319 */
320 attr.bp_addr = 0;
321 attr.bp_len = HW_BREAKPOINT_LEN_4;
322 attr.bp_type = type;
323 attr.disabled = 1;
324
325 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
326 if (IS_ERR(bp))
327 return bp;
328
329 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
330 if (err)
331 return ERR_PTR(err);
332
333 return bp;
334 }
335
336 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
337 struct arch_hw_breakpoint_ctrl ctrl,
338 struct perf_event_attr *attr)
339 {
340 int err, len, type, offset, disabled = !ctrl.enabled;
341
342 attr->disabled = disabled;
343 if (disabled)
344 return 0;
345
346 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
347 if (err)
348 return err;
349
350 switch (note_type) {
351 case NT_ARM_HW_BREAK:
352 if ((type & HW_BREAKPOINT_X) != type)
353 return -EINVAL;
354 break;
355 case NT_ARM_HW_WATCH:
356 if ((type & HW_BREAKPOINT_RW) != type)
357 return -EINVAL;
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 attr->bp_len = len;
364 attr->bp_type = type;
365 attr->bp_addr += offset;
366
367 return 0;
368 }
369
370 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
371 {
372 u8 num;
373 u32 reg = 0;
374
375 switch (note_type) {
376 case NT_ARM_HW_BREAK:
377 num = hw_breakpoint_slots(TYPE_INST);
378 break;
379 case NT_ARM_HW_WATCH:
380 num = hw_breakpoint_slots(TYPE_DATA);
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 reg |= debug_monitors_arch();
387 reg <<= 8;
388 reg |= num;
389
390 *info = reg;
391 return 0;
392 }
393
394 static int ptrace_hbp_get_ctrl(unsigned int note_type,
395 struct task_struct *tsk,
396 unsigned long idx,
397 u32 *ctrl)
398 {
399 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
400
401 if (IS_ERR(bp))
402 return PTR_ERR(bp);
403
404 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
405 return 0;
406 }
407
408 static int ptrace_hbp_get_addr(unsigned int note_type,
409 struct task_struct *tsk,
410 unsigned long idx,
411 u64 *addr)
412 {
413 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
414
415 if (IS_ERR(bp))
416 return PTR_ERR(bp);
417
418 *addr = bp ? counter_arch_bp(bp)->address : 0;
419 return 0;
420 }
421
422 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
423 struct task_struct *tsk,
424 unsigned long idx)
425 {
426 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
427
428 if (!bp)
429 bp = ptrace_hbp_create(note_type, tsk, idx);
430
431 return bp;
432 }
433
434 static int ptrace_hbp_set_ctrl(unsigned int note_type,
435 struct task_struct *tsk,
436 unsigned long idx,
437 u32 uctrl)
438 {
439 int err;
440 struct perf_event *bp;
441 struct perf_event_attr attr;
442 struct arch_hw_breakpoint_ctrl ctrl;
443
444 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
445 if (IS_ERR(bp)) {
446 err = PTR_ERR(bp);
447 return err;
448 }
449
450 attr = bp->attr;
451 decode_ctrl_reg(uctrl, &ctrl);
452 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
453 if (err)
454 return err;
455
456 return modify_user_hw_breakpoint(bp, &attr);
457 }
458
459 static int ptrace_hbp_set_addr(unsigned int note_type,
460 struct task_struct *tsk,
461 unsigned long idx,
462 u64 addr)
463 {
464 int err;
465 struct perf_event *bp;
466 struct perf_event_attr attr;
467
468 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
469 if (IS_ERR(bp)) {
470 err = PTR_ERR(bp);
471 return err;
472 }
473
474 attr = bp->attr;
475 attr.bp_addr = addr;
476 err = modify_user_hw_breakpoint(bp, &attr);
477 return err;
478 }
479
480 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
481 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
482 #define PTRACE_HBP_PAD_SZ sizeof(u32)
483
484 static int hw_break_get(struct task_struct *target,
485 const struct user_regset *regset,
486 unsigned int pos, unsigned int count,
487 void *kbuf, void __user *ubuf)
488 {
489 unsigned int note_type = regset->core_note_type;
490 int ret, idx = 0, offset, limit;
491 u32 info, ctrl;
492 u64 addr;
493
494 /* Resource info */
495 ret = ptrace_hbp_get_resource_info(note_type, &info);
496 if (ret)
497 return ret;
498
499 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
500 sizeof(info));
501 if (ret)
502 return ret;
503
504 /* Pad */
505 offset = offsetof(struct user_hwdebug_state, pad);
506 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
507 offset + PTRACE_HBP_PAD_SZ);
508 if (ret)
509 return ret;
510
511 /* (address, ctrl) registers */
512 offset = offsetof(struct user_hwdebug_state, dbg_regs);
513 limit = regset->n * regset->size;
514 while (count && offset < limit) {
515 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
516 if (ret)
517 return ret;
518 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
519 offset, offset + PTRACE_HBP_ADDR_SZ);
520 if (ret)
521 return ret;
522 offset += PTRACE_HBP_ADDR_SZ;
523
524 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
525 if (ret)
526 return ret;
527 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
528 offset, offset + PTRACE_HBP_CTRL_SZ);
529 if (ret)
530 return ret;
531 offset += PTRACE_HBP_CTRL_SZ;
532
533 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
534 offset,
535 offset + PTRACE_HBP_PAD_SZ);
536 if (ret)
537 return ret;
538 offset += PTRACE_HBP_PAD_SZ;
539 idx++;
540 }
541
542 return 0;
543 }
544
545 static int hw_break_set(struct task_struct *target,
546 const struct user_regset *regset,
547 unsigned int pos, unsigned int count,
548 const void *kbuf, const void __user *ubuf)
549 {
550 unsigned int note_type = regset->core_note_type;
551 int ret, idx = 0, offset, limit;
552 u32 ctrl;
553 u64 addr;
554
555 /* Resource info and pad */
556 offset = offsetof(struct user_hwdebug_state, dbg_regs);
557 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
558 if (ret)
559 return ret;
560
561 /* (address, ctrl) registers */
562 limit = regset->n * regset->size;
563 while (count && offset < limit) {
564 if (count < PTRACE_HBP_ADDR_SZ)
565 return -EINVAL;
566 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
567 offset, offset + PTRACE_HBP_ADDR_SZ);
568 if (ret)
569 return ret;
570 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
571 if (ret)
572 return ret;
573 offset += PTRACE_HBP_ADDR_SZ;
574
575 if (!count)
576 break;
577 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
578 offset, offset + PTRACE_HBP_CTRL_SZ);
579 if (ret)
580 return ret;
581 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
582 if (ret)
583 return ret;
584 offset += PTRACE_HBP_CTRL_SZ;
585
586 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
587 offset,
588 offset + PTRACE_HBP_PAD_SZ);
589 if (ret)
590 return ret;
591 offset += PTRACE_HBP_PAD_SZ;
592 idx++;
593 }
594
595 return 0;
596 }
597 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
598
599 static int gpr_get(struct task_struct *target,
600 const struct user_regset *regset,
601 unsigned int pos, unsigned int count,
602 void *kbuf, void __user *ubuf)
603 {
604 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
605 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
606 }
607
608 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
609 unsigned int pos, unsigned int count,
610 const void *kbuf, const void __user *ubuf)
611 {
612 int ret;
613 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
614
615 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
616 if (ret)
617 return ret;
618
619 if (!valid_user_regs(&newregs, target))
620 return -EINVAL;
621
622 task_pt_regs(target)->user_regs = newregs;
623 return 0;
624 }
625
626 /*
627 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
628 */
629 static int __fpr_get(struct task_struct *target,
630 const struct user_regset *regset,
631 unsigned int pos, unsigned int count,
632 void *kbuf, void __user *ubuf, unsigned int start_pos)
633 {
634 struct user_fpsimd_state *uregs;
635
636 sve_sync_to_fpsimd(target);
637
638 uregs = &target->thread.fpsimd_state.user_fpsimd;
639
640 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
641 start_pos, start_pos + sizeof(*uregs));
642 }
643
644 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
645 unsigned int pos, unsigned int count,
646 void *kbuf, void __user *ubuf)
647 {
648 if (target == current)
649 fpsimd_preserve_current_state();
650
651 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
652 }
653
654 static int __fpr_set(struct task_struct *target,
655 const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 const void *kbuf, const void __user *ubuf,
658 unsigned int start_pos)
659 {
660 int ret;
661 struct user_fpsimd_state newstate;
662
663 /*
664 * Ensure target->thread.fpsimd_state is up to date, so that a
665 * short copyin can't resurrect stale data.
666 */
667 sve_sync_to_fpsimd(target);
668
669 newstate = target->thread.fpsimd_state.user_fpsimd;
670
671 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
672 start_pos, start_pos + sizeof(newstate));
673 if (ret)
674 return ret;
675
676 target->thread.fpsimd_state.user_fpsimd = newstate;
677
678 return ret;
679 }
680
681 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
682 unsigned int pos, unsigned int count,
683 const void *kbuf, const void __user *ubuf)
684 {
685 int ret;
686
687 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
688 if (ret)
689 return ret;
690
691 sve_sync_from_fpsimd_zeropad(target);
692 fpsimd_flush_task_state(target);
693
694 return ret;
695 }
696
697 static int tls_get(struct task_struct *target, const struct user_regset *regset,
698 unsigned int pos, unsigned int count,
699 void *kbuf, void __user *ubuf)
700 {
701 unsigned long *tls = &target->thread.tp_value;
702
703 if (target == current)
704 tls_preserve_current_state();
705
706 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
707 }
708
709 static int tls_set(struct task_struct *target, const struct user_regset *regset,
710 unsigned int pos, unsigned int count,
711 const void *kbuf, const void __user *ubuf)
712 {
713 int ret;
714 unsigned long tls = target->thread.tp_value;
715
716 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
717 if (ret)
718 return ret;
719
720 target->thread.tp_value = tls;
721 return ret;
722 }
723
724 static int system_call_get(struct task_struct *target,
725 const struct user_regset *regset,
726 unsigned int pos, unsigned int count,
727 void *kbuf, void __user *ubuf)
728 {
729 int syscallno = task_pt_regs(target)->syscallno;
730
731 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
732 &syscallno, 0, -1);
733 }
734
735 static int system_call_set(struct task_struct *target,
736 const struct user_regset *regset,
737 unsigned int pos, unsigned int count,
738 const void *kbuf, const void __user *ubuf)
739 {
740 int syscallno = task_pt_regs(target)->syscallno;
741 int ret;
742
743 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
744 if (ret)
745 return ret;
746
747 task_pt_regs(target)->syscallno = syscallno;
748 return ret;
749 }
750
751 #ifdef CONFIG_ARM64_SVE
752
753 static void sve_init_header_from_task(struct user_sve_header *header,
754 struct task_struct *target)
755 {
756 unsigned int vq;
757
758 memset(header, 0, sizeof(*header));
759
760 header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
761 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
762 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
763 header->flags |= SVE_PT_VL_INHERIT;
764
765 header->vl = target->thread.sve_vl;
766 vq = sve_vq_from_vl(header->vl);
767
768 header->max_vl = sve_max_vl;
769 if (WARN_ON(!sve_vl_valid(sve_max_vl)))
770 header->max_vl = header->vl;
771
772 header->size = SVE_PT_SIZE(vq, header->flags);
773 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
774 SVE_PT_REGS_SVE);
775 }
776
777 static unsigned int sve_size_from_header(struct user_sve_header const *header)
778 {
779 return ALIGN(header->size, SVE_VQ_BYTES);
780 }
781
782 static unsigned int sve_get_size(struct task_struct *target,
783 const struct user_regset *regset)
784 {
785 struct user_sve_header header;
786
787 if (!system_supports_sve())
788 return 0;
789
790 sve_init_header_from_task(&header, target);
791 return sve_size_from_header(&header);
792 }
793
794 static int sve_get(struct task_struct *target,
795 const struct user_regset *regset,
796 unsigned int pos, unsigned int count,
797 void *kbuf, void __user *ubuf)
798 {
799 int ret;
800 struct user_sve_header header;
801 unsigned int vq;
802 unsigned long start, end;
803
804 if (!system_supports_sve())
805 return -EINVAL;
806
807 /* Header */
808 sve_init_header_from_task(&header, target);
809 vq = sve_vq_from_vl(header.vl);
810
811 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
812 0, sizeof(header));
813 if (ret)
814 return ret;
815
816 if (target == current)
817 fpsimd_preserve_current_state();
818
819 /* Registers: FPSIMD-only case */
820
821 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
822 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
823 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
824 SVE_PT_FPSIMD_OFFSET);
825
826 /* Otherwise: full SVE case */
827
828 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
829 start = SVE_PT_SVE_OFFSET;
830 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
831 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
832 target->thread.sve_state,
833 start, end);
834 if (ret)
835 return ret;
836
837 start = end;
838 end = SVE_PT_SVE_FPSR_OFFSET(vq);
839 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
840 start, end);
841 if (ret)
842 return ret;
843
844 /*
845 * Copy fpsr, and fpcr which must follow contiguously in
846 * struct fpsimd_state:
847 */
848 start = end;
849 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
850 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
851 &target->thread.fpsimd_state.fpsr,
852 start, end);
853 if (ret)
854 return ret;
855
856 start = end;
857 end = sve_size_from_header(&header);
858 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
859 start, end);
860 }
861
862 static int sve_set(struct task_struct *target,
863 const struct user_regset *regset,
864 unsigned int pos, unsigned int count,
865 const void *kbuf, const void __user *ubuf)
866 {
867 int ret;
868 struct user_sve_header header;
869 unsigned int vq;
870 unsigned long start, end;
871
872 if (!system_supports_sve())
873 return -EINVAL;
874
875 /* Header */
876 if (count < sizeof(header))
877 return -EINVAL;
878 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
879 0, sizeof(header));
880 if (ret)
881 goto out;
882
883 /*
884 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
885 * sve_set_vector_length(), which will also validate them for us:
886 */
887 ret = sve_set_vector_length(target, header.vl,
888 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
889 if (ret)
890 goto out;
891
892 /* Actual VL set may be less than the user asked for: */
893 vq = sve_vq_from_vl(target->thread.sve_vl);
894
895 /* Registers: FPSIMD-only case */
896
897 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
898 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
899 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
900 SVE_PT_FPSIMD_OFFSET);
901 clear_tsk_thread_flag(target, TIF_SVE);
902 goto out;
903 }
904
905 /* Otherwise: full SVE case */
906
907 /*
908 * If setting a different VL from the requested VL and there is
909 * register data, the data layout will be wrong: don't even
910 * try to set the registers in this case.
911 */
912 if (count && vq != sve_vq_from_vl(header.vl)) {
913 ret = -EIO;
914 goto out;
915 }
916
917 sve_alloc(target);
918
919 /*
920 * Ensure target->thread.sve_state is up to date with target's
921 * FPSIMD regs, so that a short copyin leaves trailing registers
922 * unmodified.
923 */
924 fpsimd_sync_to_sve(target);
925 set_tsk_thread_flag(target, TIF_SVE);
926
927 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
928 start = SVE_PT_SVE_OFFSET;
929 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
930 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
931 target->thread.sve_state,
932 start, end);
933 if (ret)
934 goto out;
935
936 start = end;
937 end = SVE_PT_SVE_FPSR_OFFSET(vq);
938 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
939 start, end);
940 if (ret)
941 goto out;
942
943 /*
944 * Copy fpsr, and fpcr which must follow contiguously in
945 * struct fpsimd_state:
946 */
947 start = end;
948 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
949 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
950 &target->thread.fpsimd_state.fpsr,
951 start, end);
952
953 out:
954 fpsimd_flush_task_state(target);
955 return ret;
956 }
957
958 #endif /* CONFIG_ARM64_SVE */
959
960 enum aarch64_regset {
961 REGSET_GPR,
962 REGSET_FPR,
963 REGSET_TLS,
964 #ifdef CONFIG_HAVE_HW_BREAKPOINT
965 REGSET_HW_BREAK,
966 REGSET_HW_WATCH,
967 #endif
968 REGSET_SYSTEM_CALL,
969 #ifdef CONFIG_ARM64_SVE
970 REGSET_SVE,
971 #endif
972 };
973
974 static const struct user_regset aarch64_regsets[] = {
975 [REGSET_GPR] = {
976 .core_note_type = NT_PRSTATUS,
977 .n = sizeof(struct user_pt_regs) / sizeof(u64),
978 .size = sizeof(u64),
979 .align = sizeof(u64),
980 .get = gpr_get,
981 .set = gpr_set
982 },
983 [REGSET_FPR] = {
984 .core_note_type = NT_PRFPREG,
985 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
986 /*
987 * We pretend we have 32-bit registers because the fpsr and
988 * fpcr are 32-bits wide.
989 */
990 .size = sizeof(u32),
991 .align = sizeof(u32),
992 .get = fpr_get,
993 .set = fpr_set
994 },
995 [REGSET_TLS] = {
996 .core_note_type = NT_ARM_TLS,
997 .n = 1,
998 .size = sizeof(void *),
999 .align = sizeof(void *),
1000 .get = tls_get,
1001 .set = tls_set,
1002 },
1003 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1004 [REGSET_HW_BREAK] = {
1005 .core_note_type = NT_ARM_HW_BREAK,
1006 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1007 .size = sizeof(u32),
1008 .align = sizeof(u32),
1009 .get = hw_break_get,
1010 .set = hw_break_set,
1011 },
1012 [REGSET_HW_WATCH] = {
1013 .core_note_type = NT_ARM_HW_WATCH,
1014 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1015 .size = sizeof(u32),
1016 .align = sizeof(u32),
1017 .get = hw_break_get,
1018 .set = hw_break_set,
1019 },
1020 #endif
1021 [REGSET_SYSTEM_CALL] = {
1022 .core_note_type = NT_ARM_SYSTEM_CALL,
1023 .n = 1,
1024 .size = sizeof(int),
1025 .align = sizeof(int),
1026 .get = system_call_get,
1027 .set = system_call_set,
1028 },
1029 #ifdef CONFIG_ARM64_SVE
1030 [REGSET_SVE] = { /* Scalable Vector Extension */
1031 .core_note_type = NT_ARM_SVE,
1032 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1033 SVE_VQ_BYTES),
1034 .size = SVE_VQ_BYTES,
1035 .align = SVE_VQ_BYTES,
1036 .get = sve_get,
1037 .set = sve_set,
1038 .get_size = sve_get_size,
1039 },
1040 #endif
1041 };
1042
1043 static const struct user_regset_view user_aarch64_view = {
1044 .name = "aarch64", .e_machine = EM_AARCH64,
1045 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1046 };
1047
1048 #ifdef CONFIG_COMPAT
1049 #include <linux/compat.h>
1050
1051 enum compat_regset {
1052 REGSET_COMPAT_GPR,
1053 REGSET_COMPAT_VFP,
1054 };
1055
1056 static int compat_gpr_get(struct task_struct *target,
1057 const struct user_regset *regset,
1058 unsigned int pos, unsigned int count,
1059 void *kbuf, void __user *ubuf)
1060 {
1061 int ret = 0;
1062 unsigned int i, start, num_regs;
1063
1064 /* Calculate the number of AArch32 registers contained in count */
1065 num_regs = count / regset->size;
1066
1067 /* Convert pos into an register number */
1068 start = pos / regset->size;
1069
1070 if (start + num_regs > regset->n)
1071 return -EIO;
1072
1073 for (i = 0; i < num_regs; ++i) {
1074 unsigned int idx = start + i;
1075 compat_ulong_t reg;
1076
1077 switch (idx) {
1078 case 15:
1079 reg = task_pt_regs(target)->pc;
1080 break;
1081 case 16:
1082 reg = task_pt_regs(target)->pstate;
1083 break;
1084 case 17:
1085 reg = task_pt_regs(target)->orig_x0;
1086 break;
1087 default:
1088 reg = task_pt_regs(target)->regs[idx];
1089 }
1090
1091 if (kbuf) {
1092 memcpy(kbuf, &reg, sizeof(reg));
1093 kbuf += sizeof(reg);
1094 } else {
1095 ret = copy_to_user(ubuf, &reg, sizeof(reg));
1096 if (ret) {
1097 ret = -EFAULT;
1098 break;
1099 }
1100
1101 ubuf += sizeof(reg);
1102 }
1103 }
1104
1105 return ret;
1106 }
1107
1108 static int compat_gpr_set(struct task_struct *target,
1109 const struct user_regset *regset,
1110 unsigned int pos, unsigned int count,
1111 const void *kbuf, const void __user *ubuf)
1112 {
1113 struct pt_regs newregs;
1114 int ret = 0;
1115 unsigned int i, start, num_regs;
1116
1117 /* Calculate the number of AArch32 registers contained in count */
1118 num_regs = count / regset->size;
1119
1120 /* Convert pos into an register number */
1121 start = pos / regset->size;
1122
1123 if (start + num_regs > regset->n)
1124 return -EIO;
1125
1126 newregs = *task_pt_regs(target);
1127
1128 for (i = 0; i < num_regs; ++i) {
1129 unsigned int idx = start + i;
1130 compat_ulong_t reg;
1131
1132 if (kbuf) {
1133 memcpy(&reg, kbuf, sizeof(reg));
1134 kbuf += sizeof(reg);
1135 } else {
1136 ret = copy_from_user(&reg, ubuf, sizeof(reg));
1137 if (ret) {
1138 ret = -EFAULT;
1139 break;
1140 }
1141
1142 ubuf += sizeof(reg);
1143 }
1144
1145 switch (idx) {
1146 case 15:
1147 newregs.pc = reg;
1148 break;
1149 case 16:
1150 newregs.pstate = reg;
1151 break;
1152 case 17:
1153 newregs.orig_x0 = reg;
1154 break;
1155 default:
1156 newregs.regs[idx] = reg;
1157 }
1158
1159 }
1160
1161 if (valid_user_regs(&newregs.user_regs, target))
1162 *task_pt_regs(target) = newregs;
1163 else
1164 ret = -EINVAL;
1165
1166 return ret;
1167 }
1168
1169 static int compat_vfp_get(struct task_struct *target,
1170 const struct user_regset *regset,
1171 unsigned int pos, unsigned int count,
1172 void *kbuf, void __user *ubuf)
1173 {
1174 struct user_fpsimd_state *uregs;
1175 compat_ulong_t fpscr;
1176 int ret, vregs_end_pos;
1177
1178 uregs = &target->thread.fpsimd_state.user_fpsimd;
1179
1180 if (target == current)
1181 fpsimd_preserve_current_state();
1182
1183 /*
1184 * The VFP registers are packed into the fpsimd_state, so they all sit
1185 * nicely together for us. We just need to create the fpscr separately.
1186 */
1187 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1188 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1189 0, vregs_end_pos);
1190
1191 if (count && !ret) {
1192 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1193 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1194
1195 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1196 vregs_end_pos, VFP_STATE_SIZE);
1197 }
1198
1199 return ret;
1200 }
1201
1202 static int compat_vfp_set(struct task_struct *target,
1203 const struct user_regset *regset,
1204 unsigned int pos, unsigned int count,
1205 const void *kbuf, const void __user *ubuf)
1206 {
1207 struct user_fpsimd_state *uregs;
1208 compat_ulong_t fpscr;
1209 int ret, vregs_end_pos;
1210
1211 uregs = &target->thread.fpsimd_state.user_fpsimd;
1212
1213 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1214 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1215 vregs_end_pos);
1216
1217 if (count && !ret) {
1218 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1219 vregs_end_pos, VFP_STATE_SIZE);
1220 if (!ret) {
1221 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1222 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1223 }
1224 }
1225
1226 fpsimd_flush_task_state(target);
1227 return ret;
1228 }
1229
1230 static int compat_tls_get(struct task_struct *target,
1231 const struct user_regset *regset, unsigned int pos,
1232 unsigned int count, void *kbuf, void __user *ubuf)
1233 {
1234 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
1235 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1236 }
1237
1238 static int compat_tls_set(struct task_struct *target,
1239 const struct user_regset *regset, unsigned int pos,
1240 unsigned int count, const void *kbuf,
1241 const void __user *ubuf)
1242 {
1243 int ret;
1244 compat_ulong_t tls = target->thread.tp_value;
1245
1246 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1247 if (ret)
1248 return ret;
1249
1250 target->thread.tp_value = tls;
1251 return ret;
1252 }
1253
1254 static const struct user_regset aarch32_regsets[] = {
1255 [REGSET_COMPAT_GPR] = {
1256 .core_note_type = NT_PRSTATUS,
1257 .n = COMPAT_ELF_NGREG,
1258 .size = sizeof(compat_elf_greg_t),
1259 .align = sizeof(compat_elf_greg_t),
1260 .get = compat_gpr_get,
1261 .set = compat_gpr_set
1262 },
1263 [REGSET_COMPAT_VFP] = {
1264 .core_note_type = NT_ARM_VFP,
1265 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1266 .size = sizeof(compat_ulong_t),
1267 .align = sizeof(compat_ulong_t),
1268 .get = compat_vfp_get,
1269 .set = compat_vfp_set
1270 },
1271 };
1272
1273 static const struct user_regset_view user_aarch32_view = {
1274 .name = "aarch32", .e_machine = EM_ARM,
1275 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1276 };
1277
1278 static const struct user_regset aarch32_ptrace_regsets[] = {
1279 [REGSET_GPR] = {
1280 .core_note_type = NT_PRSTATUS,
1281 .n = COMPAT_ELF_NGREG,
1282 .size = sizeof(compat_elf_greg_t),
1283 .align = sizeof(compat_elf_greg_t),
1284 .get = compat_gpr_get,
1285 .set = compat_gpr_set
1286 },
1287 [REGSET_FPR] = {
1288 .core_note_type = NT_ARM_VFP,
1289 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1290 .size = sizeof(compat_ulong_t),
1291 .align = sizeof(compat_ulong_t),
1292 .get = compat_vfp_get,
1293 .set = compat_vfp_set
1294 },
1295 [REGSET_TLS] = {
1296 .core_note_type = NT_ARM_TLS,
1297 .n = 1,
1298 .size = sizeof(compat_ulong_t),
1299 .align = sizeof(compat_ulong_t),
1300 .get = compat_tls_get,
1301 .set = compat_tls_set,
1302 },
1303 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1304 [REGSET_HW_BREAK] = {
1305 .core_note_type = NT_ARM_HW_BREAK,
1306 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1307 .size = sizeof(u32),
1308 .align = sizeof(u32),
1309 .get = hw_break_get,
1310 .set = hw_break_set,
1311 },
1312 [REGSET_HW_WATCH] = {
1313 .core_note_type = NT_ARM_HW_WATCH,
1314 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1315 .size = sizeof(u32),
1316 .align = sizeof(u32),
1317 .get = hw_break_get,
1318 .set = hw_break_set,
1319 },
1320 #endif
1321 [REGSET_SYSTEM_CALL] = {
1322 .core_note_type = NT_ARM_SYSTEM_CALL,
1323 .n = 1,
1324 .size = sizeof(int),
1325 .align = sizeof(int),
1326 .get = system_call_get,
1327 .set = system_call_set,
1328 },
1329 };
1330
1331 static const struct user_regset_view user_aarch32_ptrace_view = {
1332 .name = "aarch32", .e_machine = EM_ARM,
1333 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1334 };
1335
1336 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1337 compat_ulong_t __user *ret)
1338 {
1339 compat_ulong_t tmp;
1340
1341 if (off & 3)
1342 return -EIO;
1343
1344 if (off == COMPAT_PT_TEXT_ADDR)
1345 tmp = tsk->mm->start_code;
1346 else if (off == COMPAT_PT_DATA_ADDR)
1347 tmp = tsk->mm->start_data;
1348 else if (off == COMPAT_PT_TEXT_END_ADDR)
1349 tmp = tsk->mm->end_code;
1350 else if (off < sizeof(compat_elf_gregset_t))
1351 return copy_regset_to_user(tsk, &user_aarch32_view,
1352 REGSET_COMPAT_GPR, off,
1353 sizeof(compat_ulong_t), ret);
1354 else if (off >= COMPAT_USER_SZ)
1355 return -EIO;
1356 else
1357 tmp = 0;
1358
1359 return put_user(tmp, ret);
1360 }
1361
1362 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1363 compat_ulong_t val)
1364 {
1365 int ret;
1366 mm_segment_t old_fs = get_fs();
1367
1368 if (off & 3 || off >= COMPAT_USER_SZ)
1369 return -EIO;
1370
1371 if (off >= sizeof(compat_elf_gregset_t))
1372 return 0;
1373
1374 set_fs(KERNEL_DS);
1375 ret = copy_regset_from_user(tsk, &user_aarch32_view,
1376 REGSET_COMPAT_GPR, off,
1377 sizeof(compat_ulong_t),
1378 &val);
1379 set_fs(old_fs);
1380
1381 return ret;
1382 }
1383
1384 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1385
1386 /*
1387 * Convert a virtual register number into an index for a thread_info
1388 * breakpoint array. Breakpoints are identified using positive numbers
1389 * whilst watchpoints are negative. The registers are laid out as pairs
1390 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1391 * Register 0 is reserved for describing resource information.
1392 */
1393 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1394 {
1395 return (abs(num) - 1) >> 1;
1396 }
1397
1398 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1399 {
1400 u8 num_brps, num_wrps, debug_arch, wp_len;
1401 u32 reg = 0;
1402
1403 num_brps = hw_breakpoint_slots(TYPE_INST);
1404 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1405
1406 debug_arch = debug_monitors_arch();
1407 wp_len = 8;
1408 reg |= debug_arch;
1409 reg <<= 8;
1410 reg |= wp_len;
1411 reg <<= 8;
1412 reg |= num_wrps;
1413 reg <<= 8;
1414 reg |= num_brps;
1415
1416 *kdata = reg;
1417 return 0;
1418 }
1419
1420 static int compat_ptrace_hbp_get(unsigned int note_type,
1421 struct task_struct *tsk,
1422 compat_long_t num,
1423 u32 *kdata)
1424 {
1425 u64 addr = 0;
1426 u32 ctrl = 0;
1427
1428 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
1429
1430 if (num & 1) {
1431 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1432 *kdata = (u32)addr;
1433 } else {
1434 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1435 *kdata = ctrl;
1436 }
1437
1438 return err;
1439 }
1440
1441 static int compat_ptrace_hbp_set(unsigned int note_type,
1442 struct task_struct *tsk,
1443 compat_long_t num,
1444 u32 *kdata)
1445 {
1446 u64 addr;
1447 u32 ctrl;
1448
1449 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1450
1451 if (num & 1) {
1452 addr = *kdata;
1453 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1454 } else {
1455 ctrl = *kdata;
1456 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1457 }
1458
1459 return err;
1460 }
1461
1462 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1463 compat_ulong_t __user *data)
1464 {
1465 int ret;
1466 u32 kdata;
1467
1468 /* Watchpoint */
1469 if (num < 0) {
1470 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1471 /* Resource info */
1472 } else if (num == 0) {
1473 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1474 /* Breakpoint */
1475 } else {
1476 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1477 }
1478
1479 if (!ret)
1480 ret = put_user(kdata, data);
1481
1482 return ret;
1483 }
1484
1485 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1486 compat_ulong_t __user *data)
1487 {
1488 int ret;
1489 u32 kdata = 0;
1490
1491 if (num == 0)
1492 return 0;
1493
1494 ret = get_user(kdata, data);
1495 if (ret)
1496 return ret;
1497
1498 if (num < 0)
1499 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1500 else
1501 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1502
1503 return ret;
1504 }
1505 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1506
1507 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1508 compat_ulong_t caddr, compat_ulong_t cdata)
1509 {
1510 unsigned long addr = caddr;
1511 unsigned long data = cdata;
1512 void __user *datap = compat_ptr(data);
1513 int ret;
1514
1515 switch (request) {
1516 case PTRACE_PEEKUSR:
1517 ret = compat_ptrace_read_user(child, addr, datap);
1518 break;
1519
1520 case PTRACE_POKEUSR:
1521 ret = compat_ptrace_write_user(child, addr, data);
1522 break;
1523
1524 case COMPAT_PTRACE_GETREGS:
1525 ret = copy_regset_to_user(child,
1526 &user_aarch32_view,
1527 REGSET_COMPAT_GPR,
1528 0, sizeof(compat_elf_gregset_t),
1529 datap);
1530 break;
1531
1532 case COMPAT_PTRACE_SETREGS:
1533 ret = copy_regset_from_user(child,
1534 &user_aarch32_view,
1535 REGSET_COMPAT_GPR,
1536 0, sizeof(compat_elf_gregset_t),
1537 datap);
1538 break;
1539
1540 case COMPAT_PTRACE_GET_THREAD_AREA:
1541 ret = put_user((compat_ulong_t)child->thread.tp_value,
1542 (compat_ulong_t __user *)datap);
1543 break;
1544
1545 case COMPAT_PTRACE_SET_SYSCALL:
1546 task_pt_regs(child)->syscallno = data;
1547 ret = 0;
1548 break;
1549
1550 case COMPAT_PTRACE_GETVFPREGS:
1551 ret = copy_regset_to_user(child,
1552 &user_aarch32_view,
1553 REGSET_COMPAT_VFP,
1554 0, VFP_STATE_SIZE,
1555 datap);
1556 break;
1557
1558 case COMPAT_PTRACE_SETVFPREGS:
1559 ret = copy_regset_from_user(child,
1560 &user_aarch32_view,
1561 REGSET_COMPAT_VFP,
1562 0, VFP_STATE_SIZE,
1563 datap);
1564 break;
1565
1566 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1567 case COMPAT_PTRACE_GETHBPREGS:
1568 ret = compat_ptrace_gethbpregs(child, addr, datap);
1569 break;
1570
1571 case COMPAT_PTRACE_SETHBPREGS:
1572 ret = compat_ptrace_sethbpregs(child, addr, datap);
1573 break;
1574 #endif
1575
1576 default:
1577 ret = compat_ptrace_request(child, request, addr,
1578 data);
1579 break;
1580 }
1581
1582 return ret;
1583 }
1584 #endif /* CONFIG_COMPAT */
1585
1586 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1587 {
1588 #ifdef CONFIG_COMPAT
1589 /*
1590 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1591 * user_aarch32_view compatible with arm32. Native ptrace requests on
1592 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1593 * access to the TLS register.
1594 */
1595 if (is_compat_task())
1596 return &user_aarch32_view;
1597 else if (is_compat_thread(task_thread_info(task)))
1598 return &user_aarch32_ptrace_view;
1599 #endif
1600 return &user_aarch64_view;
1601 }
1602
1603 long arch_ptrace(struct task_struct *child, long request,
1604 unsigned long addr, unsigned long data)
1605 {
1606 return ptrace_request(child, request, addr, data);
1607 }
1608
1609 enum ptrace_syscall_dir {
1610 PTRACE_SYSCALL_ENTER = 0,
1611 PTRACE_SYSCALL_EXIT,
1612 };
1613
1614 static void tracehook_report_syscall(struct pt_regs *regs,
1615 enum ptrace_syscall_dir dir)
1616 {
1617 int regno;
1618 unsigned long saved_reg;
1619
1620 /*
1621 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1622 * used to denote syscall entry/exit:
1623 */
1624 regno = (is_compat_task() ? 12 : 7);
1625 saved_reg = regs->regs[regno];
1626 regs->regs[regno] = dir;
1627
1628 if (dir == PTRACE_SYSCALL_EXIT)
1629 tracehook_report_syscall_exit(regs, 0);
1630 else if (tracehook_report_syscall_entry(regs))
1631 forget_syscall(regs);
1632
1633 regs->regs[regno] = saved_reg;
1634 }
1635
1636 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1637 {
1638 if (test_thread_flag(TIF_SYSCALL_TRACE))
1639 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1640
1641 /* Do the secure computing after ptrace; failures should be fast. */
1642 if (secure_computing(NULL) == -1)
1643 return -1;
1644
1645 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1646 trace_sys_enter(regs, regs->syscallno);
1647
1648 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1649 regs->regs[2], regs->regs[3]);
1650
1651 return regs->syscallno;
1652 }
1653
1654 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1655 {
1656 audit_syscall_exit(regs);
1657
1658 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1659 trace_sys_exit(regs, regs_return_value(regs));
1660
1661 if (test_thread_flag(TIF_SYSCALL_TRACE))
1662 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1663 }
1664
1665 /*
1666 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1667 * Userspace cannot use these until they have an architectural meaning.
1668 * We also reserve IL for the kernel; SS is handled dynamically.
1669 */
1670 #define SPSR_EL1_AARCH64_RES0_BITS \
1671 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1672 GENMASK_ULL(5, 5))
1673 #define SPSR_EL1_AARCH32_RES0_BITS \
1674 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1675
1676 static int valid_compat_regs(struct user_pt_regs *regs)
1677 {
1678 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1679
1680 if (!system_supports_mixed_endian_el0()) {
1681 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1682 regs->pstate |= COMPAT_PSR_E_BIT;
1683 else
1684 regs->pstate &= ~COMPAT_PSR_E_BIT;
1685 }
1686
1687 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1688 (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
1689 (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
1690 (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
1691 return 1;
1692 }
1693
1694 /*
1695 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1696 * arch/arm.
1697 */
1698 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
1699 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
1700 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
1701 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
1702 COMPAT_PSR_T_BIT;
1703 regs->pstate |= PSR_MODE32_BIT;
1704
1705 return 0;
1706 }
1707
1708 static int valid_native_regs(struct user_pt_regs *regs)
1709 {
1710 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1711
1712 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1713 (regs->pstate & PSR_D_BIT) == 0 &&
1714 (regs->pstate & PSR_A_BIT) == 0 &&
1715 (regs->pstate & PSR_I_BIT) == 0 &&
1716 (regs->pstate & PSR_F_BIT) == 0) {
1717 return 1;
1718 }
1719
1720 /* Force PSR to a valid 64-bit EL0t */
1721 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1722
1723 return 0;
1724 }
1725
1726 /*
1727 * Are the current registers suitable for user mode? (used to maintain
1728 * security in signal handlers)
1729 */
1730 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1731 {
1732 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1733 regs->pstate &= ~DBG_SPSR_SS;
1734
1735 if (is_compat_thread(task_thread_info(task)))
1736 return valid_compat_regs(regs);
1737 else
1738 return valid_native_regs(regs);
1739 }