]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/kernel/ptrace.c
arm64: fix possible spectre-v1 write in ptrace_hbp_set_event()
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / ptrace.c
1 /*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/mm.h>
28 #include <linux/nospec.h>
29 #include <linux/smp.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/seccomp.h>
33 #include <linux/security.h>
34 #include <linux/init.h>
35 #include <linux/signal.h>
36 #include <linux/string.h>
37 #include <linux/uaccess.h>
38 #include <linux/perf_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/regset.h>
41 #include <linux/tracehook.h>
42 #include <linux/elf.h>
43
44 #include <asm/compat.h>
45 #include <asm/cpufeature.h>
46 #include <asm/debug-monitors.h>
47 #include <asm/pgtable.h>
48 #include <asm/stacktrace.h>
49 #include <asm/syscall.h>
50 #include <asm/traps.h>
51 #include <asm/system_misc.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/syscalls.h>
55
56 struct pt_regs_offset {
57 const char *name;
58 int offset;
59 };
60
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 #define GPR_OFFSET_NAME(r) \
64 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
65
66 static const struct pt_regs_offset regoffset_table[] = {
67 GPR_OFFSET_NAME(0),
68 GPR_OFFSET_NAME(1),
69 GPR_OFFSET_NAME(2),
70 GPR_OFFSET_NAME(3),
71 GPR_OFFSET_NAME(4),
72 GPR_OFFSET_NAME(5),
73 GPR_OFFSET_NAME(6),
74 GPR_OFFSET_NAME(7),
75 GPR_OFFSET_NAME(8),
76 GPR_OFFSET_NAME(9),
77 GPR_OFFSET_NAME(10),
78 GPR_OFFSET_NAME(11),
79 GPR_OFFSET_NAME(12),
80 GPR_OFFSET_NAME(13),
81 GPR_OFFSET_NAME(14),
82 GPR_OFFSET_NAME(15),
83 GPR_OFFSET_NAME(16),
84 GPR_OFFSET_NAME(17),
85 GPR_OFFSET_NAME(18),
86 GPR_OFFSET_NAME(19),
87 GPR_OFFSET_NAME(20),
88 GPR_OFFSET_NAME(21),
89 GPR_OFFSET_NAME(22),
90 GPR_OFFSET_NAME(23),
91 GPR_OFFSET_NAME(24),
92 GPR_OFFSET_NAME(25),
93 GPR_OFFSET_NAME(26),
94 GPR_OFFSET_NAME(27),
95 GPR_OFFSET_NAME(28),
96 GPR_OFFSET_NAME(29),
97 GPR_OFFSET_NAME(30),
98 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
99 REG_OFFSET_NAME(sp),
100 REG_OFFSET_NAME(pc),
101 REG_OFFSET_NAME(pstate),
102 REG_OFFSET_END,
103 };
104
105 /**
106 * regs_query_register_offset() - query register offset from its name
107 * @name: the name of a register
108 *
109 * regs_query_register_offset() returns the offset of a register in struct
110 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
111 */
112 int regs_query_register_offset(const char *name)
113 {
114 const struct pt_regs_offset *roff;
115
116 for (roff = regoffset_table; roff->name != NULL; roff++)
117 if (!strcmp(roff->name, name))
118 return roff->offset;
119 return -EINVAL;
120 }
121
122 /**
123 * regs_within_kernel_stack() - check the address in the stack
124 * @regs: pt_regs which contains kernel stack pointer.
125 * @addr: address which is checked.
126 *
127 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
128 * If @addr is within the kernel stack, it returns true. If not, returns false.
129 */
130 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
131 {
132 return ((addr & ~(THREAD_SIZE - 1)) ==
133 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
134 on_irq_stack(addr);
135 }
136
137 /**
138 * regs_get_kernel_stack_nth() - get Nth entry of the stack
139 * @regs: pt_regs which contains kernel stack pointer.
140 * @n: stack entry number.
141 *
142 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
143 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
144 * this returns 0.
145 */
146 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
147 {
148 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
149
150 addr += n;
151 if (regs_within_kernel_stack(regs, (unsigned long)addr))
152 return *addr;
153 else
154 return 0;
155 }
156
157 /*
158 * TODO: does not yet catch signals sent when the child dies.
159 * in exit.c or in signal.c.
160 */
161
162 /*
163 * Called by kernel/ptrace.c when detaching..
164 */
165 void ptrace_disable(struct task_struct *child)
166 {
167 /*
168 * This would be better off in core code, but PTRACE_DETACH has
169 * grown its fair share of arch-specific worts and changing it
170 * is likely to cause regressions on obscure architectures.
171 */
172 user_disable_single_step(child);
173 }
174
175 #ifdef CONFIG_HAVE_HW_BREAKPOINT
176 /*
177 * Handle hitting a HW-breakpoint.
178 */
179 static void ptrace_hbptriggered(struct perf_event *bp,
180 struct perf_sample_data *data,
181 struct pt_regs *regs)
182 {
183 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
184 siginfo_t info = {
185 .si_signo = SIGTRAP,
186 .si_errno = 0,
187 .si_code = TRAP_HWBKPT,
188 .si_addr = (void __user *)(bkpt->trigger),
189 };
190
191 #ifdef CONFIG_COMPAT
192 int i;
193
194 if (!is_compat_task())
195 goto send_sig;
196
197 for (i = 0; i < ARM_MAX_BRP; ++i) {
198 if (current->thread.debug.hbp_break[i] == bp) {
199 info.si_errno = (i << 1) + 1;
200 break;
201 }
202 }
203
204 for (i = 0; i < ARM_MAX_WRP; ++i) {
205 if (current->thread.debug.hbp_watch[i] == bp) {
206 info.si_errno = -((i << 1) + 1);
207 break;
208 }
209 }
210
211 send_sig:
212 #endif
213 force_sig_info(SIGTRAP, &info, current);
214 }
215
216 /*
217 * Unregister breakpoints from this task and reset the pointers in
218 * the thread_struct.
219 */
220 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
221 {
222 int i;
223 struct thread_struct *t = &tsk->thread;
224
225 for (i = 0; i < ARM_MAX_BRP; i++) {
226 if (t->debug.hbp_break[i]) {
227 unregister_hw_breakpoint(t->debug.hbp_break[i]);
228 t->debug.hbp_break[i] = NULL;
229 }
230 }
231
232 for (i = 0; i < ARM_MAX_WRP; i++) {
233 if (t->debug.hbp_watch[i]) {
234 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
235 t->debug.hbp_watch[i] = NULL;
236 }
237 }
238 }
239
240 void ptrace_hw_copy_thread(struct task_struct *tsk)
241 {
242 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
243 }
244
245 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
246 struct task_struct *tsk,
247 unsigned long idx)
248 {
249 struct perf_event *bp = ERR_PTR(-EINVAL);
250
251 switch (note_type) {
252 case NT_ARM_HW_BREAK:
253 if (idx >= ARM_MAX_BRP)
254 goto out;
255 idx = array_index_nospec(idx, ARM_MAX_BRP);
256 bp = tsk->thread.debug.hbp_break[idx];
257 break;
258 case NT_ARM_HW_WATCH:
259 if (idx >= ARM_MAX_WRP)
260 goto out;
261 idx = array_index_nospec(idx, ARM_MAX_WRP);
262 bp = tsk->thread.debug.hbp_watch[idx];
263 break;
264 }
265
266 out:
267 return bp;
268 }
269
270 static int ptrace_hbp_set_event(unsigned int note_type,
271 struct task_struct *tsk,
272 unsigned long idx,
273 struct perf_event *bp)
274 {
275 int err = -EINVAL;
276
277 switch (note_type) {
278 case NT_ARM_HW_BREAK:
279 if (idx >= ARM_MAX_BRP)
280 goto out;
281 idx = array_index_nospec(idx, ARM_MAX_BRP);
282 tsk->thread.debug.hbp_break[idx] = bp;
283 err = 0;
284 break;
285 case NT_ARM_HW_WATCH:
286 if (idx >= ARM_MAX_WRP)
287 goto out;
288 idx = array_index_nospec(idx, ARM_MAX_WRP);
289 tsk->thread.debug.hbp_watch[idx] = bp;
290 err = 0;
291 break;
292 }
293
294 out:
295 return err;
296 }
297
298 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
299 struct task_struct *tsk,
300 unsigned long idx)
301 {
302 struct perf_event *bp;
303 struct perf_event_attr attr;
304 int err, type;
305
306 switch (note_type) {
307 case NT_ARM_HW_BREAK:
308 type = HW_BREAKPOINT_X;
309 break;
310 case NT_ARM_HW_WATCH:
311 type = HW_BREAKPOINT_RW;
312 break;
313 default:
314 return ERR_PTR(-EINVAL);
315 }
316
317 ptrace_breakpoint_init(&attr);
318
319 /*
320 * Initialise fields to sane defaults
321 * (i.e. values that will pass validation).
322 */
323 attr.bp_addr = 0;
324 attr.bp_len = HW_BREAKPOINT_LEN_4;
325 attr.bp_type = type;
326 attr.disabled = 1;
327
328 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
329 if (IS_ERR(bp))
330 return bp;
331
332 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
333 if (err)
334 return ERR_PTR(err);
335
336 return bp;
337 }
338
339 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
340 struct arch_hw_breakpoint_ctrl ctrl,
341 struct perf_event_attr *attr)
342 {
343 int err, len, type, offset, disabled = !ctrl.enabled;
344
345 attr->disabled = disabled;
346 if (disabled)
347 return 0;
348
349 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
350 if (err)
351 return err;
352
353 switch (note_type) {
354 case NT_ARM_HW_BREAK:
355 if ((type & HW_BREAKPOINT_X) != type)
356 return -EINVAL;
357 break;
358 case NT_ARM_HW_WATCH:
359 if ((type & HW_BREAKPOINT_RW) != type)
360 return -EINVAL;
361 break;
362 default:
363 return -EINVAL;
364 }
365
366 attr->bp_len = len;
367 attr->bp_type = type;
368 attr->bp_addr += offset;
369
370 return 0;
371 }
372
373 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
374 {
375 u8 num;
376 u32 reg = 0;
377
378 switch (note_type) {
379 case NT_ARM_HW_BREAK:
380 num = hw_breakpoint_slots(TYPE_INST);
381 break;
382 case NT_ARM_HW_WATCH:
383 num = hw_breakpoint_slots(TYPE_DATA);
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 reg |= debug_monitors_arch();
390 reg <<= 8;
391 reg |= num;
392
393 *info = reg;
394 return 0;
395 }
396
397 static int ptrace_hbp_get_ctrl(unsigned int note_type,
398 struct task_struct *tsk,
399 unsigned long idx,
400 u32 *ctrl)
401 {
402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
403
404 if (IS_ERR(bp))
405 return PTR_ERR(bp);
406
407 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
408 return 0;
409 }
410
411 static int ptrace_hbp_get_addr(unsigned int note_type,
412 struct task_struct *tsk,
413 unsigned long idx,
414 u64 *addr)
415 {
416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
417
418 if (IS_ERR(bp))
419 return PTR_ERR(bp);
420
421 *addr = bp ? counter_arch_bp(bp)->address : 0;
422 return 0;
423 }
424
425 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
426 struct task_struct *tsk,
427 unsigned long idx)
428 {
429 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
430
431 if (!bp)
432 bp = ptrace_hbp_create(note_type, tsk, idx);
433
434 return bp;
435 }
436
437 static int ptrace_hbp_set_ctrl(unsigned int note_type,
438 struct task_struct *tsk,
439 unsigned long idx,
440 u32 uctrl)
441 {
442 int err;
443 struct perf_event *bp;
444 struct perf_event_attr attr;
445 struct arch_hw_breakpoint_ctrl ctrl;
446
447 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
448 if (IS_ERR(bp)) {
449 err = PTR_ERR(bp);
450 return err;
451 }
452
453 attr = bp->attr;
454 decode_ctrl_reg(uctrl, &ctrl);
455 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
456 if (err)
457 return err;
458
459 return modify_user_hw_breakpoint(bp, &attr);
460 }
461
462 static int ptrace_hbp_set_addr(unsigned int note_type,
463 struct task_struct *tsk,
464 unsigned long idx,
465 u64 addr)
466 {
467 int err;
468 struct perf_event *bp;
469 struct perf_event_attr attr;
470
471 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
472 if (IS_ERR(bp)) {
473 err = PTR_ERR(bp);
474 return err;
475 }
476
477 attr = bp->attr;
478 attr.bp_addr = addr;
479 err = modify_user_hw_breakpoint(bp, &attr);
480 return err;
481 }
482
483 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
484 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
485 #define PTRACE_HBP_PAD_SZ sizeof(u32)
486
487 static int hw_break_get(struct task_struct *target,
488 const struct user_regset *regset,
489 unsigned int pos, unsigned int count,
490 void *kbuf, void __user *ubuf)
491 {
492 unsigned int note_type = regset->core_note_type;
493 int ret, idx = 0, offset, limit;
494 u32 info, ctrl;
495 u64 addr;
496
497 /* Resource info */
498 ret = ptrace_hbp_get_resource_info(note_type, &info);
499 if (ret)
500 return ret;
501
502 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
503 sizeof(info));
504 if (ret)
505 return ret;
506
507 /* Pad */
508 offset = offsetof(struct user_hwdebug_state, pad);
509 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
510 offset + PTRACE_HBP_PAD_SZ);
511 if (ret)
512 return ret;
513
514 /* (address, ctrl) registers */
515 offset = offsetof(struct user_hwdebug_state, dbg_regs);
516 limit = regset->n * regset->size;
517 while (count && offset < limit) {
518 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
519 if (ret)
520 return ret;
521 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
522 offset, offset + PTRACE_HBP_ADDR_SZ);
523 if (ret)
524 return ret;
525 offset += PTRACE_HBP_ADDR_SZ;
526
527 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
528 if (ret)
529 return ret;
530 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
531 offset, offset + PTRACE_HBP_CTRL_SZ);
532 if (ret)
533 return ret;
534 offset += PTRACE_HBP_CTRL_SZ;
535
536 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
537 offset,
538 offset + PTRACE_HBP_PAD_SZ);
539 if (ret)
540 return ret;
541 offset += PTRACE_HBP_PAD_SZ;
542 idx++;
543 }
544
545 return 0;
546 }
547
548 static int hw_break_set(struct task_struct *target,
549 const struct user_regset *regset,
550 unsigned int pos, unsigned int count,
551 const void *kbuf, const void __user *ubuf)
552 {
553 unsigned int note_type = regset->core_note_type;
554 int ret, idx = 0, offset, limit;
555 u32 ctrl;
556 u64 addr;
557
558 /* Resource info and pad */
559 offset = offsetof(struct user_hwdebug_state, dbg_regs);
560 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
561 if (ret)
562 return ret;
563
564 /* (address, ctrl) registers */
565 limit = regset->n * regset->size;
566 while (count && offset < limit) {
567 if (count < PTRACE_HBP_ADDR_SZ)
568 return -EINVAL;
569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
570 offset, offset + PTRACE_HBP_ADDR_SZ);
571 if (ret)
572 return ret;
573 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
574 if (ret)
575 return ret;
576 offset += PTRACE_HBP_ADDR_SZ;
577
578 if (!count)
579 break;
580 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
581 offset, offset + PTRACE_HBP_CTRL_SZ);
582 if (ret)
583 return ret;
584 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
585 if (ret)
586 return ret;
587 offset += PTRACE_HBP_CTRL_SZ;
588
589 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
590 offset,
591 offset + PTRACE_HBP_PAD_SZ);
592 if (ret)
593 return ret;
594 offset += PTRACE_HBP_PAD_SZ;
595 idx++;
596 }
597
598 return 0;
599 }
600 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
601
602 static int gpr_get(struct task_struct *target,
603 const struct user_regset *regset,
604 unsigned int pos, unsigned int count,
605 void *kbuf, void __user *ubuf)
606 {
607 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
608 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
609 }
610
611 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
612 unsigned int pos, unsigned int count,
613 const void *kbuf, const void __user *ubuf)
614 {
615 int ret;
616 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
617
618 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
619 if (ret)
620 return ret;
621
622 if (!valid_user_regs(&newregs, target))
623 return -EINVAL;
624
625 task_pt_regs(target)->user_regs = newregs;
626 return 0;
627 }
628
629 /*
630 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
631 */
632 static int __fpr_get(struct task_struct *target,
633 const struct user_regset *regset,
634 unsigned int pos, unsigned int count,
635 void *kbuf, void __user *ubuf, unsigned int start_pos)
636 {
637 struct user_fpsimd_state *uregs;
638
639 sve_sync_to_fpsimd(target);
640
641 uregs = &target->thread.fpsimd_state.user_fpsimd;
642
643 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
644 start_pos, start_pos + sizeof(*uregs));
645 }
646
647 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
648 unsigned int pos, unsigned int count,
649 void *kbuf, void __user *ubuf)
650 {
651 if (target == current)
652 fpsimd_preserve_current_state();
653
654 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
655 }
656
657 static int __fpr_set(struct task_struct *target,
658 const struct user_regset *regset,
659 unsigned int pos, unsigned int count,
660 const void *kbuf, const void __user *ubuf,
661 unsigned int start_pos)
662 {
663 int ret;
664 struct user_fpsimd_state newstate;
665
666 /*
667 * Ensure target->thread.fpsimd_state is up to date, so that a
668 * short copyin can't resurrect stale data.
669 */
670 sve_sync_to_fpsimd(target);
671
672 newstate = target->thread.fpsimd_state.user_fpsimd;
673
674 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
675 start_pos, start_pos + sizeof(newstate));
676 if (ret)
677 return ret;
678
679 target->thread.fpsimd_state.user_fpsimd = newstate;
680
681 return ret;
682 }
683
684 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
685 unsigned int pos, unsigned int count,
686 const void *kbuf, const void __user *ubuf)
687 {
688 int ret;
689
690 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
691 if (ret)
692 return ret;
693
694 sve_sync_from_fpsimd_zeropad(target);
695 fpsimd_flush_task_state(target);
696
697 return ret;
698 }
699
700 static int tls_get(struct task_struct *target, const struct user_regset *regset,
701 unsigned int pos, unsigned int count,
702 void *kbuf, void __user *ubuf)
703 {
704 unsigned long *tls = &target->thread.tp_value;
705
706 if (target == current)
707 tls_preserve_current_state();
708
709 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
710 }
711
712 static int tls_set(struct task_struct *target, const struct user_regset *regset,
713 unsigned int pos, unsigned int count,
714 const void *kbuf, const void __user *ubuf)
715 {
716 int ret;
717 unsigned long tls = target->thread.tp_value;
718
719 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
720 if (ret)
721 return ret;
722
723 target->thread.tp_value = tls;
724 return ret;
725 }
726
727 static int system_call_get(struct task_struct *target,
728 const struct user_regset *regset,
729 unsigned int pos, unsigned int count,
730 void *kbuf, void __user *ubuf)
731 {
732 int syscallno = task_pt_regs(target)->syscallno;
733
734 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
735 &syscallno, 0, -1);
736 }
737
738 static int system_call_set(struct task_struct *target,
739 const struct user_regset *regset,
740 unsigned int pos, unsigned int count,
741 const void *kbuf, const void __user *ubuf)
742 {
743 int syscallno = task_pt_regs(target)->syscallno;
744 int ret;
745
746 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
747 if (ret)
748 return ret;
749
750 task_pt_regs(target)->syscallno = syscallno;
751 return ret;
752 }
753
754 #ifdef CONFIG_ARM64_SVE
755
756 static void sve_init_header_from_task(struct user_sve_header *header,
757 struct task_struct *target)
758 {
759 unsigned int vq;
760
761 memset(header, 0, sizeof(*header));
762
763 header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
764 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
765 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
766 header->flags |= SVE_PT_VL_INHERIT;
767
768 header->vl = target->thread.sve_vl;
769 vq = sve_vq_from_vl(header->vl);
770
771 header->max_vl = sve_max_vl;
772 if (WARN_ON(!sve_vl_valid(sve_max_vl)))
773 header->max_vl = header->vl;
774
775 header->size = SVE_PT_SIZE(vq, header->flags);
776 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
777 SVE_PT_REGS_SVE);
778 }
779
780 static unsigned int sve_size_from_header(struct user_sve_header const *header)
781 {
782 return ALIGN(header->size, SVE_VQ_BYTES);
783 }
784
785 static unsigned int sve_get_size(struct task_struct *target,
786 const struct user_regset *regset)
787 {
788 struct user_sve_header header;
789
790 if (!system_supports_sve())
791 return 0;
792
793 sve_init_header_from_task(&header, target);
794 return sve_size_from_header(&header);
795 }
796
797 static int sve_get(struct task_struct *target,
798 const struct user_regset *regset,
799 unsigned int pos, unsigned int count,
800 void *kbuf, void __user *ubuf)
801 {
802 int ret;
803 struct user_sve_header header;
804 unsigned int vq;
805 unsigned long start, end;
806
807 if (!system_supports_sve())
808 return -EINVAL;
809
810 /* Header */
811 sve_init_header_from_task(&header, target);
812 vq = sve_vq_from_vl(header.vl);
813
814 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
815 0, sizeof(header));
816 if (ret)
817 return ret;
818
819 if (target == current)
820 fpsimd_preserve_current_state();
821
822 /* Registers: FPSIMD-only case */
823
824 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
825 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
826 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
827 SVE_PT_FPSIMD_OFFSET);
828
829 /* Otherwise: full SVE case */
830
831 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
832 start = SVE_PT_SVE_OFFSET;
833 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
834 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
835 target->thread.sve_state,
836 start, end);
837 if (ret)
838 return ret;
839
840 start = end;
841 end = SVE_PT_SVE_FPSR_OFFSET(vq);
842 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
843 start, end);
844 if (ret)
845 return ret;
846
847 /*
848 * Copy fpsr, and fpcr which must follow contiguously in
849 * struct fpsimd_state:
850 */
851 start = end;
852 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
853 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
854 &target->thread.fpsimd_state.fpsr,
855 start, end);
856 if (ret)
857 return ret;
858
859 start = end;
860 end = sve_size_from_header(&header);
861 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
862 start, end);
863 }
864
865 static int sve_set(struct task_struct *target,
866 const struct user_regset *regset,
867 unsigned int pos, unsigned int count,
868 const void *kbuf, const void __user *ubuf)
869 {
870 int ret;
871 struct user_sve_header header;
872 unsigned int vq;
873 unsigned long start, end;
874
875 if (!system_supports_sve())
876 return -EINVAL;
877
878 /* Header */
879 if (count < sizeof(header))
880 return -EINVAL;
881 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
882 0, sizeof(header));
883 if (ret)
884 goto out;
885
886 /*
887 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
888 * sve_set_vector_length(), which will also validate them for us:
889 */
890 ret = sve_set_vector_length(target, header.vl,
891 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
892 if (ret)
893 goto out;
894
895 /* Actual VL set may be less than the user asked for: */
896 vq = sve_vq_from_vl(target->thread.sve_vl);
897
898 /* Registers: FPSIMD-only case */
899
900 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
901 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
902 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
903 SVE_PT_FPSIMD_OFFSET);
904 clear_tsk_thread_flag(target, TIF_SVE);
905 goto out;
906 }
907
908 /* Otherwise: full SVE case */
909
910 /*
911 * If setting a different VL from the requested VL and there is
912 * register data, the data layout will be wrong: don't even
913 * try to set the registers in this case.
914 */
915 if (count && vq != sve_vq_from_vl(header.vl)) {
916 ret = -EIO;
917 goto out;
918 }
919
920 sve_alloc(target);
921
922 /*
923 * Ensure target->thread.sve_state is up to date with target's
924 * FPSIMD regs, so that a short copyin leaves trailing registers
925 * unmodified.
926 */
927 fpsimd_sync_to_sve(target);
928 set_tsk_thread_flag(target, TIF_SVE);
929
930 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
931 start = SVE_PT_SVE_OFFSET;
932 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
933 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
934 target->thread.sve_state,
935 start, end);
936 if (ret)
937 goto out;
938
939 start = end;
940 end = SVE_PT_SVE_FPSR_OFFSET(vq);
941 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
942 start, end);
943 if (ret)
944 goto out;
945
946 /*
947 * Copy fpsr, and fpcr which must follow contiguously in
948 * struct fpsimd_state:
949 */
950 start = end;
951 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
952 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
953 &target->thread.fpsimd_state.fpsr,
954 start, end);
955
956 out:
957 fpsimd_flush_task_state(target);
958 return ret;
959 }
960
961 #endif /* CONFIG_ARM64_SVE */
962
963 enum aarch64_regset {
964 REGSET_GPR,
965 REGSET_FPR,
966 REGSET_TLS,
967 #ifdef CONFIG_HAVE_HW_BREAKPOINT
968 REGSET_HW_BREAK,
969 REGSET_HW_WATCH,
970 #endif
971 REGSET_SYSTEM_CALL,
972 #ifdef CONFIG_ARM64_SVE
973 REGSET_SVE,
974 #endif
975 };
976
977 static const struct user_regset aarch64_regsets[] = {
978 [REGSET_GPR] = {
979 .core_note_type = NT_PRSTATUS,
980 .n = sizeof(struct user_pt_regs) / sizeof(u64),
981 .size = sizeof(u64),
982 .align = sizeof(u64),
983 .get = gpr_get,
984 .set = gpr_set
985 },
986 [REGSET_FPR] = {
987 .core_note_type = NT_PRFPREG,
988 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
989 /*
990 * We pretend we have 32-bit registers because the fpsr and
991 * fpcr are 32-bits wide.
992 */
993 .size = sizeof(u32),
994 .align = sizeof(u32),
995 .get = fpr_get,
996 .set = fpr_set
997 },
998 [REGSET_TLS] = {
999 .core_note_type = NT_ARM_TLS,
1000 .n = 1,
1001 .size = sizeof(void *),
1002 .align = sizeof(void *),
1003 .get = tls_get,
1004 .set = tls_set,
1005 },
1006 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 [REGSET_HW_BREAK] = {
1008 .core_note_type = NT_ARM_HW_BREAK,
1009 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1010 .size = sizeof(u32),
1011 .align = sizeof(u32),
1012 .get = hw_break_get,
1013 .set = hw_break_set,
1014 },
1015 [REGSET_HW_WATCH] = {
1016 .core_note_type = NT_ARM_HW_WATCH,
1017 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1018 .size = sizeof(u32),
1019 .align = sizeof(u32),
1020 .get = hw_break_get,
1021 .set = hw_break_set,
1022 },
1023 #endif
1024 [REGSET_SYSTEM_CALL] = {
1025 .core_note_type = NT_ARM_SYSTEM_CALL,
1026 .n = 1,
1027 .size = sizeof(int),
1028 .align = sizeof(int),
1029 .get = system_call_get,
1030 .set = system_call_set,
1031 },
1032 #ifdef CONFIG_ARM64_SVE
1033 [REGSET_SVE] = { /* Scalable Vector Extension */
1034 .core_note_type = NT_ARM_SVE,
1035 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1036 SVE_VQ_BYTES),
1037 .size = SVE_VQ_BYTES,
1038 .align = SVE_VQ_BYTES,
1039 .get = sve_get,
1040 .set = sve_set,
1041 .get_size = sve_get_size,
1042 },
1043 #endif
1044 };
1045
1046 static const struct user_regset_view user_aarch64_view = {
1047 .name = "aarch64", .e_machine = EM_AARCH64,
1048 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1049 };
1050
1051 #ifdef CONFIG_COMPAT
1052 #include <linux/compat.h>
1053
1054 enum compat_regset {
1055 REGSET_COMPAT_GPR,
1056 REGSET_COMPAT_VFP,
1057 };
1058
1059 static int compat_gpr_get(struct task_struct *target,
1060 const struct user_regset *regset,
1061 unsigned int pos, unsigned int count,
1062 void *kbuf, void __user *ubuf)
1063 {
1064 int ret = 0;
1065 unsigned int i, start, num_regs;
1066
1067 /* Calculate the number of AArch32 registers contained in count */
1068 num_regs = count / regset->size;
1069
1070 /* Convert pos into an register number */
1071 start = pos / regset->size;
1072
1073 if (start + num_regs > regset->n)
1074 return -EIO;
1075
1076 for (i = 0; i < num_regs; ++i) {
1077 unsigned int idx = start + i;
1078 compat_ulong_t reg;
1079
1080 switch (idx) {
1081 case 15:
1082 reg = task_pt_regs(target)->pc;
1083 break;
1084 case 16:
1085 reg = task_pt_regs(target)->pstate;
1086 break;
1087 case 17:
1088 reg = task_pt_regs(target)->orig_x0;
1089 break;
1090 default:
1091 reg = task_pt_regs(target)->regs[idx];
1092 }
1093
1094 if (kbuf) {
1095 memcpy(kbuf, &reg, sizeof(reg));
1096 kbuf += sizeof(reg);
1097 } else {
1098 ret = copy_to_user(ubuf, &reg, sizeof(reg));
1099 if (ret) {
1100 ret = -EFAULT;
1101 break;
1102 }
1103
1104 ubuf += sizeof(reg);
1105 }
1106 }
1107
1108 return ret;
1109 }
1110
1111 static int compat_gpr_set(struct task_struct *target,
1112 const struct user_regset *regset,
1113 unsigned int pos, unsigned int count,
1114 const void *kbuf, const void __user *ubuf)
1115 {
1116 struct pt_regs newregs;
1117 int ret = 0;
1118 unsigned int i, start, num_regs;
1119
1120 /* Calculate the number of AArch32 registers contained in count */
1121 num_regs = count / regset->size;
1122
1123 /* Convert pos into an register number */
1124 start = pos / regset->size;
1125
1126 if (start + num_regs > regset->n)
1127 return -EIO;
1128
1129 newregs = *task_pt_regs(target);
1130
1131 for (i = 0; i < num_regs; ++i) {
1132 unsigned int idx = start + i;
1133 compat_ulong_t reg;
1134
1135 if (kbuf) {
1136 memcpy(&reg, kbuf, sizeof(reg));
1137 kbuf += sizeof(reg);
1138 } else {
1139 ret = copy_from_user(&reg, ubuf, sizeof(reg));
1140 if (ret) {
1141 ret = -EFAULT;
1142 break;
1143 }
1144
1145 ubuf += sizeof(reg);
1146 }
1147
1148 switch (idx) {
1149 case 15:
1150 newregs.pc = reg;
1151 break;
1152 case 16:
1153 newregs.pstate = reg;
1154 break;
1155 case 17:
1156 newregs.orig_x0 = reg;
1157 break;
1158 default:
1159 newregs.regs[idx] = reg;
1160 }
1161
1162 }
1163
1164 if (valid_user_regs(&newregs.user_regs, target))
1165 *task_pt_regs(target) = newregs;
1166 else
1167 ret = -EINVAL;
1168
1169 return ret;
1170 }
1171
1172 static int compat_vfp_get(struct task_struct *target,
1173 const struct user_regset *regset,
1174 unsigned int pos, unsigned int count,
1175 void *kbuf, void __user *ubuf)
1176 {
1177 struct user_fpsimd_state *uregs;
1178 compat_ulong_t fpscr;
1179 int ret, vregs_end_pos;
1180
1181 uregs = &target->thread.fpsimd_state.user_fpsimd;
1182
1183 if (target == current)
1184 fpsimd_preserve_current_state();
1185
1186 /*
1187 * The VFP registers are packed into the fpsimd_state, so they all sit
1188 * nicely together for us. We just need to create the fpscr separately.
1189 */
1190 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1191 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1192 0, vregs_end_pos);
1193
1194 if (count && !ret) {
1195 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1196 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1197
1198 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1199 vregs_end_pos, VFP_STATE_SIZE);
1200 }
1201
1202 return ret;
1203 }
1204
1205 static int compat_vfp_set(struct task_struct *target,
1206 const struct user_regset *regset,
1207 unsigned int pos, unsigned int count,
1208 const void *kbuf, const void __user *ubuf)
1209 {
1210 struct user_fpsimd_state *uregs;
1211 compat_ulong_t fpscr;
1212 int ret, vregs_end_pos;
1213
1214 uregs = &target->thread.fpsimd_state.user_fpsimd;
1215
1216 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1217 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1218 vregs_end_pos);
1219
1220 if (count && !ret) {
1221 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1222 vregs_end_pos, VFP_STATE_SIZE);
1223 if (!ret) {
1224 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1225 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1226 }
1227 }
1228
1229 fpsimd_flush_task_state(target);
1230 return ret;
1231 }
1232
1233 static int compat_tls_get(struct task_struct *target,
1234 const struct user_regset *regset, unsigned int pos,
1235 unsigned int count, void *kbuf, void __user *ubuf)
1236 {
1237 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
1238 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1239 }
1240
1241 static int compat_tls_set(struct task_struct *target,
1242 const struct user_regset *regset, unsigned int pos,
1243 unsigned int count, const void *kbuf,
1244 const void __user *ubuf)
1245 {
1246 int ret;
1247 compat_ulong_t tls = target->thread.tp_value;
1248
1249 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1250 if (ret)
1251 return ret;
1252
1253 target->thread.tp_value = tls;
1254 return ret;
1255 }
1256
1257 static const struct user_regset aarch32_regsets[] = {
1258 [REGSET_COMPAT_GPR] = {
1259 .core_note_type = NT_PRSTATUS,
1260 .n = COMPAT_ELF_NGREG,
1261 .size = sizeof(compat_elf_greg_t),
1262 .align = sizeof(compat_elf_greg_t),
1263 .get = compat_gpr_get,
1264 .set = compat_gpr_set
1265 },
1266 [REGSET_COMPAT_VFP] = {
1267 .core_note_type = NT_ARM_VFP,
1268 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1269 .size = sizeof(compat_ulong_t),
1270 .align = sizeof(compat_ulong_t),
1271 .get = compat_vfp_get,
1272 .set = compat_vfp_set
1273 },
1274 };
1275
1276 static const struct user_regset_view user_aarch32_view = {
1277 .name = "aarch32", .e_machine = EM_ARM,
1278 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1279 };
1280
1281 static const struct user_regset aarch32_ptrace_regsets[] = {
1282 [REGSET_GPR] = {
1283 .core_note_type = NT_PRSTATUS,
1284 .n = COMPAT_ELF_NGREG,
1285 .size = sizeof(compat_elf_greg_t),
1286 .align = sizeof(compat_elf_greg_t),
1287 .get = compat_gpr_get,
1288 .set = compat_gpr_set
1289 },
1290 [REGSET_FPR] = {
1291 .core_note_type = NT_ARM_VFP,
1292 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1293 .size = sizeof(compat_ulong_t),
1294 .align = sizeof(compat_ulong_t),
1295 .get = compat_vfp_get,
1296 .set = compat_vfp_set
1297 },
1298 [REGSET_TLS] = {
1299 .core_note_type = NT_ARM_TLS,
1300 .n = 1,
1301 .size = sizeof(compat_ulong_t),
1302 .align = sizeof(compat_ulong_t),
1303 .get = compat_tls_get,
1304 .set = compat_tls_set,
1305 },
1306 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1307 [REGSET_HW_BREAK] = {
1308 .core_note_type = NT_ARM_HW_BREAK,
1309 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1310 .size = sizeof(u32),
1311 .align = sizeof(u32),
1312 .get = hw_break_get,
1313 .set = hw_break_set,
1314 },
1315 [REGSET_HW_WATCH] = {
1316 .core_note_type = NT_ARM_HW_WATCH,
1317 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1318 .size = sizeof(u32),
1319 .align = sizeof(u32),
1320 .get = hw_break_get,
1321 .set = hw_break_set,
1322 },
1323 #endif
1324 [REGSET_SYSTEM_CALL] = {
1325 .core_note_type = NT_ARM_SYSTEM_CALL,
1326 .n = 1,
1327 .size = sizeof(int),
1328 .align = sizeof(int),
1329 .get = system_call_get,
1330 .set = system_call_set,
1331 },
1332 };
1333
1334 static const struct user_regset_view user_aarch32_ptrace_view = {
1335 .name = "aarch32", .e_machine = EM_ARM,
1336 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1337 };
1338
1339 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1340 compat_ulong_t __user *ret)
1341 {
1342 compat_ulong_t tmp;
1343
1344 if (off & 3)
1345 return -EIO;
1346
1347 if (off == COMPAT_PT_TEXT_ADDR)
1348 tmp = tsk->mm->start_code;
1349 else if (off == COMPAT_PT_DATA_ADDR)
1350 tmp = tsk->mm->start_data;
1351 else if (off == COMPAT_PT_TEXT_END_ADDR)
1352 tmp = tsk->mm->end_code;
1353 else if (off < sizeof(compat_elf_gregset_t))
1354 return copy_regset_to_user(tsk, &user_aarch32_view,
1355 REGSET_COMPAT_GPR, off,
1356 sizeof(compat_ulong_t), ret);
1357 else if (off >= COMPAT_USER_SZ)
1358 return -EIO;
1359 else
1360 tmp = 0;
1361
1362 return put_user(tmp, ret);
1363 }
1364
1365 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1366 compat_ulong_t val)
1367 {
1368 int ret;
1369 mm_segment_t old_fs = get_fs();
1370
1371 if (off & 3 || off >= COMPAT_USER_SZ)
1372 return -EIO;
1373
1374 if (off >= sizeof(compat_elf_gregset_t))
1375 return 0;
1376
1377 set_fs(KERNEL_DS);
1378 ret = copy_regset_from_user(tsk, &user_aarch32_view,
1379 REGSET_COMPAT_GPR, off,
1380 sizeof(compat_ulong_t),
1381 &val);
1382 set_fs(old_fs);
1383
1384 return ret;
1385 }
1386
1387 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1388
1389 /*
1390 * Convert a virtual register number into an index for a thread_info
1391 * breakpoint array. Breakpoints are identified using positive numbers
1392 * whilst watchpoints are negative. The registers are laid out as pairs
1393 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1394 * Register 0 is reserved for describing resource information.
1395 */
1396 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1397 {
1398 return (abs(num) - 1) >> 1;
1399 }
1400
1401 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1402 {
1403 u8 num_brps, num_wrps, debug_arch, wp_len;
1404 u32 reg = 0;
1405
1406 num_brps = hw_breakpoint_slots(TYPE_INST);
1407 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1408
1409 debug_arch = debug_monitors_arch();
1410 wp_len = 8;
1411 reg |= debug_arch;
1412 reg <<= 8;
1413 reg |= wp_len;
1414 reg <<= 8;
1415 reg |= num_wrps;
1416 reg <<= 8;
1417 reg |= num_brps;
1418
1419 *kdata = reg;
1420 return 0;
1421 }
1422
1423 static int compat_ptrace_hbp_get(unsigned int note_type,
1424 struct task_struct *tsk,
1425 compat_long_t num,
1426 u32 *kdata)
1427 {
1428 u64 addr = 0;
1429 u32 ctrl = 0;
1430
1431 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
1432
1433 if (num & 1) {
1434 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1435 *kdata = (u32)addr;
1436 } else {
1437 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1438 *kdata = ctrl;
1439 }
1440
1441 return err;
1442 }
1443
1444 static int compat_ptrace_hbp_set(unsigned int note_type,
1445 struct task_struct *tsk,
1446 compat_long_t num,
1447 u32 *kdata)
1448 {
1449 u64 addr;
1450 u32 ctrl;
1451
1452 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1453
1454 if (num & 1) {
1455 addr = *kdata;
1456 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1457 } else {
1458 ctrl = *kdata;
1459 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1460 }
1461
1462 return err;
1463 }
1464
1465 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1466 compat_ulong_t __user *data)
1467 {
1468 int ret;
1469 u32 kdata;
1470
1471 /* Watchpoint */
1472 if (num < 0) {
1473 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1474 /* Resource info */
1475 } else if (num == 0) {
1476 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1477 /* Breakpoint */
1478 } else {
1479 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1480 }
1481
1482 if (!ret)
1483 ret = put_user(kdata, data);
1484
1485 return ret;
1486 }
1487
1488 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1489 compat_ulong_t __user *data)
1490 {
1491 int ret;
1492 u32 kdata = 0;
1493
1494 if (num == 0)
1495 return 0;
1496
1497 ret = get_user(kdata, data);
1498 if (ret)
1499 return ret;
1500
1501 if (num < 0)
1502 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1503 else
1504 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1505
1506 return ret;
1507 }
1508 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1509
1510 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1511 compat_ulong_t caddr, compat_ulong_t cdata)
1512 {
1513 unsigned long addr = caddr;
1514 unsigned long data = cdata;
1515 void __user *datap = compat_ptr(data);
1516 int ret;
1517
1518 switch (request) {
1519 case PTRACE_PEEKUSR:
1520 ret = compat_ptrace_read_user(child, addr, datap);
1521 break;
1522
1523 case PTRACE_POKEUSR:
1524 ret = compat_ptrace_write_user(child, addr, data);
1525 break;
1526
1527 case COMPAT_PTRACE_GETREGS:
1528 ret = copy_regset_to_user(child,
1529 &user_aarch32_view,
1530 REGSET_COMPAT_GPR,
1531 0, sizeof(compat_elf_gregset_t),
1532 datap);
1533 break;
1534
1535 case COMPAT_PTRACE_SETREGS:
1536 ret = copy_regset_from_user(child,
1537 &user_aarch32_view,
1538 REGSET_COMPAT_GPR,
1539 0, sizeof(compat_elf_gregset_t),
1540 datap);
1541 break;
1542
1543 case COMPAT_PTRACE_GET_THREAD_AREA:
1544 ret = put_user((compat_ulong_t)child->thread.tp_value,
1545 (compat_ulong_t __user *)datap);
1546 break;
1547
1548 case COMPAT_PTRACE_SET_SYSCALL:
1549 task_pt_regs(child)->syscallno = data;
1550 ret = 0;
1551 break;
1552
1553 case COMPAT_PTRACE_GETVFPREGS:
1554 ret = copy_regset_to_user(child,
1555 &user_aarch32_view,
1556 REGSET_COMPAT_VFP,
1557 0, VFP_STATE_SIZE,
1558 datap);
1559 break;
1560
1561 case COMPAT_PTRACE_SETVFPREGS:
1562 ret = copy_regset_from_user(child,
1563 &user_aarch32_view,
1564 REGSET_COMPAT_VFP,
1565 0, VFP_STATE_SIZE,
1566 datap);
1567 break;
1568
1569 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1570 case COMPAT_PTRACE_GETHBPREGS:
1571 ret = compat_ptrace_gethbpregs(child, addr, datap);
1572 break;
1573
1574 case COMPAT_PTRACE_SETHBPREGS:
1575 ret = compat_ptrace_sethbpregs(child, addr, datap);
1576 break;
1577 #endif
1578
1579 default:
1580 ret = compat_ptrace_request(child, request, addr,
1581 data);
1582 break;
1583 }
1584
1585 return ret;
1586 }
1587 #endif /* CONFIG_COMPAT */
1588
1589 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1590 {
1591 #ifdef CONFIG_COMPAT
1592 /*
1593 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1594 * user_aarch32_view compatible with arm32. Native ptrace requests on
1595 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1596 * access to the TLS register.
1597 */
1598 if (is_compat_task())
1599 return &user_aarch32_view;
1600 else if (is_compat_thread(task_thread_info(task)))
1601 return &user_aarch32_ptrace_view;
1602 #endif
1603 return &user_aarch64_view;
1604 }
1605
1606 long arch_ptrace(struct task_struct *child, long request,
1607 unsigned long addr, unsigned long data)
1608 {
1609 return ptrace_request(child, request, addr, data);
1610 }
1611
1612 enum ptrace_syscall_dir {
1613 PTRACE_SYSCALL_ENTER = 0,
1614 PTRACE_SYSCALL_EXIT,
1615 };
1616
1617 static void tracehook_report_syscall(struct pt_regs *regs,
1618 enum ptrace_syscall_dir dir)
1619 {
1620 int regno;
1621 unsigned long saved_reg;
1622
1623 /*
1624 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1625 * used to denote syscall entry/exit:
1626 */
1627 regno = (is_compat_task() ? 12 : 7);
1628 saved_reg = regs->regs[regno];
1629 regs->regs[regno] = dir;
1630
1631 if (dir == PTRACE_SYSCALL_EXIT)
1632 tracehook_report_syscall_exit(regs, 0);
1633 else if (tracehook_report_syscall_entry(regs))
1634 forget_syscall(regs);
1635
1636 regs->regs[regno] = saved_reg;
1637 }
1638
1639 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1640 {
1641 if (test_thread_flag(TIF_SYSCALL_TRACE))
1642 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1643
1644 /* Do the secure computing after ptrace; failures should be fast. */
1645 if (secure_computing(NULL) == -1)
1646 return -1;
1647
1648 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1649 trace_sys_enter(regs, regs->syscallno);
1650
1651 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1652 regs->regs[2], regs->regs[3]);
1653
1654 return regs->syscallno;
1655 }
1656
1657 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1658 {
1659 audit_syscall_exit(regs);
1660
1661 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1662 trace_sys_exit(regs, regs_return_value(regs));
1663
1664 if (test_thread_flag(TIF_SYSCALL_TRACE))
1665 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1666 }
1667
1668 /*
1669 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1670 * Userspace cannot use these until they have an architectural meaning.
1671 * We also reserve IL for the kernel; SS is handled dynamically.
1672 */
1673 #define SPSR_EL1_AARCH64_RES0_BITS \
1674 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1675 GENMASK_ULL(5, 5))
1676 #define SPSR_EL1_AARCH32_RES0_BITS \
1677 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1678
1679 static int valid_compat_regs(struct user_pt_regs *regs)
1680 {
1681 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1682
1683 if (!system_supports_mixed_endian_el0()) {
1684 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1685 regs->pstate |= COMPAT_PSR_E_BIT;
1686 else
1687 regs->pstate &= ~COMPAT_PSR_E_BIT;
1688 }
1689
1690 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1691 (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
1692 (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
1693 (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
1694 return 1;
1695 }
1696
1697 /*
1698 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1699 * arch/arm.
1700 */
1701 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
1702 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
1703 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
1704 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
1705 COMPAT_PSR_T_BIT;
1706 regs->pstate |= PSR_MODE32_BIT;
1707
1708 return 0;
1709 }
1710
1711 static int valid_native_regs(struct user_pt_regs *regs)
1712 {
1713 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1714
1715 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1716 (regs->pstate & PSR_D_BIT) == 0 &&
1717 (regs->pstate & PSR_A_BIT) == 0 &&
1718 (regs->pstate & PSR_I_BIT) == 0 &&
1719 (regs->pstate & PSR_F_BIT) == 0) {
1720 return 1;
1721 }
1722
1723 /* Force PSR to a valid 64-bit EL0t */
1724 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1725
1726 return 0;
1727 }
1728
1729 /*
1730 * Are the current registers suitable for user mode? (used to maintain
1731 * security in signal handlers)
1732 */
1733 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1734 {
1735 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1736 regs->pstate &= ~DBG_SPSR_SS;
1737
1738 if (is_compat_thread(task_thread_info(task)))
1739 return valid_compat_regs(regs);
1740 else
1741 return valid_native_regs(regs);
1742 }