]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm64/kernel/ptrace.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-hirsute-kernel.git] / arch / arm64 / kernel / ptrace.c
CommitLineData
478fcb2c
WD
1/*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
5701ede8 22#include <linux/audit.h>
fd92d4a5 23#include <linux/compat.h>
478fcb2c 24#include <linux/kernel.h>
3f07c014 25#include <linux/sched/signal.h>
68db0cf1 26#include <linux/sched/task_stack.h>
478fcb2c
WD
27#include <linux/mm.h>
28#include <linux/smp.h>
29#include <linux/ptrace.h>
30#include <linux/user.h>
a1ae65b2 31#include <linux/seccomp.h>
478fcb2c
WD
32#include <linux/security.h>
33#include <linux/init.h>
34#include <linux/signal.h>
35#include <linux/uaccess.h>
36#include <linux/perf_event.h>
37#include <linux/hw_breakpoint.h>
38#include <linux/regset.h>
39#include <linux/tracehook.h>
40#include <linux/elf.h>
41
42#include <asm/compat.h>
43#include <asm/debug-monitors.h>
44#include <asm/pgtable.h>
5701ede8 45#include <asm/syscall.h>
478fcb2c
WD
46#include <asm/traps.h>
47#include <asm/system_misc.h>
48
055b1212
AT
49#define CREATE_TRACE_POINTS
50#include <trace/events/syscalls.h>
51
0a8ea52c
DL
52struct pt_regs_offset {
53 const char *name;
54 int offset;
55};
56
57#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
58#define REG_OFFSET_END {.name = NULL, .offset = 0}
59#define GPR_OFFSET_NAME(r) \
60 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
61
62static const struct pt_regs_offset regoffset_table[] = {
63 GPR_OFFSET_NAME(0),
64 GPR_OFFSET_NAME(1),
65 GPR_OFFSET_NAME(2),
66 GPR_OFFSET_NAME(3),
67 GPR_OFFSET_NAME(4),
68 GPR_OFFSET_NAME(5),
69 GPR_OFFSET_NAME(6),
70 GPR_OFFSET_NAME(7),
71 GPR_OFFSET_NAME(8),
72 GPR_OFFSET_NAME(9),
73 GPR_OFFSET_NAME(10),
74 GPR_OFFSET_NAME(11),
75 GPR_OFFSET_NAME(12),
76 GPR_OFFSET_NAME(13),
77 GPR_OFFSET_NAME(14),
78 GPR_OFFSET_NAME(15),
79 GPR_OFFSET_NAME(16),
80 GPR_OFFSET_NAME(17),
81 GPR_OFFSET_NAME(18),
82 GPR_OFFSET_NAME(19),
83 GPR_OFFSET_NAME(20),
84 GPR_OFFSET_NAME(21),
85 GPR_OFFSET_NAME(22),
86 GPR_OFFSET_NAME(23),
87 GPR_OFFSET_NAME(24),
88 GPR_OFFSET_NAME(25),
89 GPR_OFFSET_NAME(26),
90 GPR_OFFSET_NAME(27),
91 GPR_OFFSET_NAME(28),
92 GPR_OFFSET_NAME(29),
93 GPR_OFFSET_NAME(30),
94 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
95 REG_OFFSET_NAME(sp),
96 REG_OFFSET_NAME(pc),
97 REG_OFFSET_NAME(pstate),
98 REG_OFFSET_END,
99};
100
101/**
102 * regs_query_register_offset() - query register offset from its name
103 * @name: the name of a register
104 *
105 * regs_query_register_offset() returns the offset of a register in struct
106 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
107 */
108int regs_query_register_offset(const char *name)
109{
110 const struct pt_regs_offset *roff;
111
112 for (roff = regoffset_table; roff->name != NULL; roff++)
113 if (!strcmp(roff->name, name))
114 return roff->offset;
115 return -EINVAL;
116}
117
118/**
119 * regs_within_kernel_stack() - check the address in the stack
120 * @regs: pt_regs which contains kernel stack pointer.
121 * @addr: address which is checked.
122 *
123 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
124 * If @addr is within the kernel stack, it returns true. If not, returns false.
125 */
126static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
127{
128 return ((addr & ~(THREAD_SIZE - 1)) ==
129 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
130 on_irq_stack(addr, raw_smp_processor_id());
131}
132
133/**
134 * regs_get_kernel_stack_nth() - get Nth entry of the stack
135 * @regs: pt_regs which contains kernel stack pointer.
136 * @n: stack entry number.
137 *
138 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
139 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
140 * this returns 0.
141 */
142unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
143{
144 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
145
146 addr += n;
147 if (regs_within_kernel_stack(regs, (unsigned long)addr))
148 return *addr;
149 else
150 return 0;
151}
152
478fcb2c
WD
153/*
154 * TODO: does not yet catch signals sent when the child dies.
155 * in exit.c or in signal.c.
156 */
157
158/*
159 * Called by kernel/ptrace.c when detaching..
160 */
161void ptrace_disable(struct task_struct *child)
162{
5db4fd8c
JB
163 /*
164 * This would be better off in core code, but PTRACE_DETACH has
165 * grown its fair share of arch-specific worts and changing it
166 * is likely to cause regressions on obscure architectures.
167 */
168 user_disable_single_step(child);
478fcb2c
WD
169}
170
478fcb2c
WD
171#ifdef CONFIG_HAVE_HW_BREAKPOINT
172/*
173 * Handle hitting a HW-breakpoint.
174 */
175static void ptrace_hbptriggered(struct perf_event *bp,
176 struct perf_sample_data *data,
177 struct pt_regs *regs)
178{
179 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
180 siginfo_t info = {
181 .si_signo = SIGTRAP,
182 .si_errno = 0,
183 .si_code = TRAP_HWBKPT,
184 .si_addr = (void __user *)(bkpt->trigger),
185 };
186
187#ifdef CONFIG_COMPAT
188 int i;
189
190 if (!is_compat_task())
191 goto send_sig;
192
193 for (i = 0; i < ARM_MAX_BRP; ++i) {
194 if (current->thread.debug.hbp_break[i] == bp) {
195 info.si_errno = (i << 1) + 1;
196 break;
197 }
198 }
27d7ff27
WD
199
200 for (i = 0; i < ARM_MAX_WRP; ++i) {
478fcb2c
WD
201 if (current->thread.debug.hbp_watch[i] == bp) {
202 info.si_errno = -((i << 1) + 1);
203 break;
204 }
205 }
206
207send_sig:
208#endif
209 force_sig_info(SIGTRAP, &info, current);
210}
211
212/*
213 * Unregister breakpoints from this task and reset the pointers in
214 * the thread_struct.
215 */
216void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
217{
218 int i;
219 struct thread_struct *t = &tsk->thread;
220
221 for (i = 0; i < ARM_MAX_BRP; i++) {
222 if (t->debug.hbp_break[i]) {
223 unregister_hw_breakpoint(t->debug.hbp_break[i]);
224 t->debug.hbp_break[i] = NULL;
225 }
226 }
227
228 for (i = 0; i < ARM_MAX_WRP; i++) {
229 if (t->debug.hbp_watch[i]) {
230 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
231 t->debug.hbp_watch[i] = NULL;
232 }
233 }
234}
235
236void ptrace_hw_copy_thread(struct task_struct *tsk)
237{
238 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
239}
240
241static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
242 struct task_struct *tsk,
243 unsigned long idx)
244{
245 struct perf_event *bp = ERR_PTR(-EINVAL);
246
247 switch (note_type) {
248 case NT_ARM_HW_BREAK:
249 if (idx < ARM_MAX_BRP)
250 bp = tsk->thread.debug.hbp_break[idx];
251 break;
252 case NT_ARM_HW_WATCH:
253 if (idx < ARM_MAX_WRP)
254 bp = tsk->thread.debug.hbp_watch[idx];
255 break;
256 }
257
258 return bp;
259}
260
261static int ptrace_hbp_set_event(unsigned int note_type,
262 struct task_struct *tsk,
263 unsigned long idx,
264 struct perf_event *bp)
265{
266 int err = -EINVAL;
267
268 switch (note_type) {
269 case NT_ARM_HW_BREAK:
270 if (idx < ARM_MAX_BRP) {
271 tsk->thread.debug.hbp_break[idx] = bp;
272 err = 0;
273 }
274 break;
275 case NT_ARM_HW_WATCH:
276 if (idx < ARM_MAX_WRP) {
277 tsk->thread.debug.hbp_watch[idx] = bp;
278 err = 0;
279 }
280 break;
281 }
282
283 return err;
284}
285
286static struct perf_event *ptrace_hbp_create(unsigned int note_type,
287 struct task_struct *tsk,
288 unsigned long idx)
289{
290 struct perf_event *bp;
291 struct perf_event_attr attr;
292 int err, type;
293
294 switch (note_type) {
295 case NT_ARM_HW_BREAK:
296 type = HW_BREAKPOINT_X;
297 break;
298 case NT_ARM_HW_WATCH:
299 type = HW_BREAKPOINT_RW;
300 break;
301 default:
302 return ERR_PTR(-EINVAL);
303 }
304
305 ptrace_breakpoint_init(&attr);
306
307 /*
308 * Initialise fields to sane defaults
309 * (i.e. values that will pass validation).
310 */
311 attr.bp_addr = 0;
312 attr.bp_len = HW_BREAKPOINT_LEN_4;
313 attr.bp_type = type;
314 attr.disabled = 1;
315
316 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
317 if (IS_ERR(bp))
318 return bp;
319
320 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
321 if (err)
322 return ERR_PTR(err);
323
324 return bp;
325}
326
327static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
328 struct arch_hw_breakpoint_ctrl ctrl,
329 struct perf_event_attr *attr)
330{
b08fb180 331 int err, len, type, offset, disabled = !ctrl.enabled;
8f34a1da 332
cdc27c27
WD
333 attr->disabled = disabled;
334 if (disabled)
335 return 0;
336
b08fb180 337 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
cdc27c27
WD
338 if (err)
339 return err;
340
341 switch (note_type) {
342 case NT_ARM_HW_BREAK:
343 if ((type & HW_BREAKPOINT_X) != type)
478fcb2c 344 return -EINVAL;
cdc27c27
WD
345 break;
346 case NT_ARM_HW_WATCH:
347 if ((type & HW_BREAKPOINT_RW) != type)
348 return -EINVAL;
349 break;
350 default:
351 return -EINVAL;
478fcb2c
WD
352 }
353
354 attr->bp_len = len;
355 attr->bp_type = type;
b08fb180 356 attr->bp_addr += offset;
478fcb2c
WD
357
358 return 0;
359}
360
361static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
362{
363 u8 num;
364 u32 reg = 0;
365
366 switch (note_type) {
367 case NT_ARM_HW_BREAK:
368 num = hw_breakpoint_slots(TYPE_INST);
369 break;
370 case NT_ARM_HW_WATCH:
371 num = hw_breakpoint_slots(TYPE_DATA);
372 break;
373 default:
374 return -EINVAL;
375 }
376
377 reg |= debug_monitors_arch();
378 reg <<= 8;
379 reg |= num;
380
381 *info = reg;
382 return 0;
383}
384
385static int ptrace_hbp_get_ctrl(unsigned int note_type,
386 struct task_struct *tsk,
387 unsigned long idx,
388 u32 *ctrl)
389{
390 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
391
392 if (IS_ERR(bp))
393 return PTR_ERR(bp);
394
395 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
396 return 0;
397}
398
399static int ptrace_hbp_get_addr(unsigned int note_type,
400 struct task_struct *tsk,
401 unsigned long idx,
402 u64 *addr)
403{
404 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
405
406 if (IS_ERR(bp))
407 return PTR_ERR(bp);
408
b08fb180 409 *addr = bp ? counter_arch_bp(bp)->address : 0;
478fcb2c
WD
410 return 0;
411}
412
413static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
414 struct task_struct *tsk,
415 unsigned long idx)
416{
417 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418
419 if (!bp)
420 bp = ptrace_hbp_create(note_type, tsk, idx);
421
422 return bp;
423}
424
425static int ptrace_hbp_set_ctrl(unsigned int note_type,
426 struct task_struct *tsk,
427 unsigned long idx,
428 u32 uctrl)
429{
430 int err;
431 struct perf_event *bp;
432 struct perf_event_attr attr;
433 struct arch_hw_breakpoint_ctrl ctrl;
434
435 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
436 if (IS_ERR(bp)) {
437 err = PTR_ERR(bp);
438 return err;
439 }
440
441 attr = bp->attr;
442 decode_ctrl_reg(uctrl, &ctrl);
443 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
444 if (err)
445 return err;
446
447 return modify_user_hw_breakpoint(bp, &attr);
448}
449
450static int ptrace_hbp_set_addr(unsigned int note_type,
451 struct task_struct *tsk,
452 unsigned long idx,
453 u64 addr)
454{
455 int err;
456 struct perf_event *bp;
457 struct perf_event_attr attr;
458
459 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
460 if (IS_ERR(bp)) {
461 err = PTR_ERR(bp);
462 return err;
463 }
464
465 attr = bp->attr;
466 attr.bp_addr = addr;
467 err = modify_user_hw_breakpoint(bp, &attr);
468 return err;
469}
470
471#define PTRACE_HBP_ADDR_SZ sizeof(u64)
472#define PTRACE_HBP_CTRL_SZ sizeof(u32)
7797d17c 473#define PTRACE_HBP_PAD_SZ sizeof(u32)
478fcb2c
WD
474
475static int hw_break_get(struct task_struct *target,
476 const struct user_regset *regset,
477 unsigned int pos, unsigned int count,
478 void *kbuf, void __user *ubuf)
479{
480 unsigned int note_type = regset->core_note_type;
7797d17c 481 int ret, idx = 0, offset, limit;
478fcb2c
WD
482 u32 info, ctrl;
483 u64 addr;
484
485 /* Resource info */
486 ret = ptrace_hbp_get_resource_info(note_type, &info);
487 if (ret)
488 return ret;
489
7797d17c
WD
490 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
491 sizeof(info));
492 if (ret)
493 return ret;
494
495 /* Pad */
496 offset = offsetof(struct user_hwdebug_state, pad);
497 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
498 offset + PTRACE_HBP_PAD_SZ);
478fcb2c
WD
499 if (ret)
500 return ret;
501
502 /* (address, ctrl) registers */
7797d17c 503 offset = offsetof(struct user_hwdebug_state, dbg_regs);
478fcb2c
WD
504 limit = regset->n * regset->size;
505 while (count && offset < limit) {
506 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
507 if (ret)
508 return ret;
509 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
510 offset, offset + PTRACE_HBP_ADDR_SZ);
511 if (ret)
512 return ret;
513 offset += PTRACE_HBP_ADDR_SZ;
514
515 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
516 if (ret)
517 return ret;
518 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
519 offset, offset + PTRACE_HBP_CTRL_SZ);
520 if (ret)
521 return ret;
522 offset += PTRACE_HBP_CTRL_SZ;
7797d17c
WD
523
524 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
525 offset,
526 offset + PTRACE_HBP_PAD_SZ);
527 if (ret)
528 return ret;
529 offset += PTRACE_HBP_PAD_SZ;
478fcb2c
WD
530 idx++;
531 }
532
533 return 0;
534}
535
536static int hw_break_set(struct task_struct *target,
537 const struct user_regset *regset,
538 unsigned int pos, unsigned int count,
539 const void *kbuf, const void __user *ubuf)
540{
541 unsigned int note_type = regset->core_note_type;
7797d17c 542 int ret, idx = 0, offset, limit;
478fcb2c
WD
543 u32 ctrl;
544 u64 addr;
545
7797d17c
WD
546 /* Resource info and pad */
547 offset = offsetof(struct user_hwdebug_state, dbg_regs);
548 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
478fcb2c
WD
549 if (ret)
550 return ret;
551
552 /* (address, ctrl) registers */
553 limit = regset->n * regset->size;
554 while (count && offset < limit) {
ad9e202a
DM
555 if (count < PTRACE_HBP_ADDR_SZ)
556 return -EINVAL;
478fcb2c
WD
557 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
558 offset, offset + PTRACE_HBP_ADDR_SZ);
559 if (ret)
560 return ret;
561 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
562 if (ret)
563 return ret;
564 offset += PTRACE_HBP_ADDR_SZ;
565
ad9e202a
DM
566 if (!count)
567 break;
478fcb2c
WD
568 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
569 offset, offset + PTRACE_HBP_CTRL_SZ);
570 if (ret)
571 return ret;
572 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
573 if (ret)
574 return ret;
575 offset += PTRACE_HBP_CTRL_SZ;
7797d17c
WD
576
577 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
578 offset,
579 offset + PTRACE_HBP_PAD_SZ);
580 if (ret)
581 return ret;
582 offset += PTRACE_HBP_PAD_SZ;
478fcb2c
WD
583 idx++;
584 }
585
586 return 0;
587}
588#endif /* CONFIG_HAVE_HW_BREAKPOINT */
589
590static int gpr_get(struct task_struct *target,
591 const struct user_regset *regset,
592 unsigned int pos, unsigned int count,
593 void *kbuf, void __user *ubuf)
594{
595 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
596 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
597}
598
599static int gpr_set(struct task_struct *target, const struct user_regset *regset,
600 unsigned int pos, unsigned int count,
601 const void *kbuf, const void __user *ubuf)
602{
603 int ret;
9a17b876 604 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
478fcb2c
WD
605
606 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
607 if (ret)
608 return ret;
609
dbd4d7ca 610 if (!valid_user_regs(&newregs, target))
478fcb2c
WD
611 return -EINVAL;
612
613 task_pt_regs(target)->user_regs = newregs;
614 return 0;
615}
616
617/*
618 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
619 */
620static int fpr_get(struct task_struct *target, const struct user_regset *regset,
621 unsigned int pos, unsigned int count,
622 void *kbuf, void __user *ubuf)
623{
624 struct user_fpsimd_state *uregs;
625 uregs = &target->thread.fpsimd_state.user_fpsimd;
626 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
627}
628
629static int fpr_set(struct task_struct *target, const struct user_regset *regset,
630 unsigned int pos, unsigned int count,
631 const void *kbuf, const void __user *ubuf)
632{
633 int ret;
9a17b876
DM
634 struct user_fpsimd_state newstate =
635 target->thread.fpsimd_state.user_fpsimd;
478fcb2c
WD
636
637 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
638 if (ret)
639 return ret;
640
641 target->thread.fpsimd_state.user_fpsimd = newstate;
005f78cd 642 fpsimd_flush_task_state(target);
478fcb2c
WD
643 return ret;
644}
645
646static int tls_get(struct task_struct *target, const struct user_regset *regset,
647 unsigned int pos, unsigned int count,
648 void *kbuf, void __user *ubuf)
649{
650 unsigned long *tls = &target->thread.tp_value;
651 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
652}
653
654static int tls_set(struct task_struct *target, const struct user_regset *regset,
655 unsigned int pos, unsigned int count,
656 const void *kbuf, const void __user *ubuf)
657{
658 int ret;
9a17b876 659 unsigned long tls = target->thread.tp_value;
478fcb2c
WD
660
661 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
662 if (ret)
663 return ret;
664
665 target->thread.tp_value = tls;
666 return ret;
667}
668
766a85d7
AT
669static int system_call_get(struct task_struct *target,
670 const struct user_regset *regset,
671 unsigned int pos, unsigned int count,
672 void *kbuf, void __user *ubuf)
673{
674 int syscallno = task_pt_regs(target)->syscallno;
675
676 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
677 &syscallno, 0, -1);
678}
679
680static int system_call_set(struct task_struct *target,
681 const struct user_regset *regset,
682 unsigned int pos, unsigned int count,
683 const void *kbuf, const void __user *ubuf)
684{
9dd73f72
DM
685 int syscallno = task_pt_regs(target)->syscallno;
686 int ret;
766a85d7
AT
687
688 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
689 if (ret)
690 return ret;
691
692 task_pt_regs(target)->syscallno = syscallno;
693 return ret;
694}
695
478fcb2c
WD
696enum aarch64_regset {
697 REGSET_GPR,
698 REGSET_FPR,
699 REGSET_TLS,
700#ifdef CONFIG_HAVE_HW_BREAKPOINT
701 REGSET_HW_BREAK,
702 REGSET_HW_WATCH,
703#endif
766a85d7 704 REGSET_SYSTEM_CALL,
478fcb2c
WD
705};
706
707static const struct user_regset aarch64_regsets[] = {
708 [REGSET_GPR] = {
709 .core_note_type = NT_PRSTATUS,
710 .n = sizeof(struct user_pt_regs) / sizeof(u64),
711 .size = sizeof(u64),
712 .align = sizeof(u64),
713 .get = gpr_get,
714 .set = gpr_set
715 },
716 [REGSET_FPR] = {
717 .core_note_type = NT_PRFPREG,
718 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
719 /*
720 * We pretend we have 32-bit registers because the fpsr and
721 * fpcr are 32-bits wide.
722 */
723 .size = sizeof(u32),
724 .align = sizeof(u32),
725 .get = fpr_get,
726 .set = fpr_set
727 },
728 [REGSET_TLS] = {
729 .core_note_type = NT_ARM_TLS,
730 .n = 1,
731 .size = sizeof(void *),
732 .align = sizeof(void *),
733 .get = tls_get,
734 .set = tls_set,
735 },
736#ifdef CONFIG_HAVE_HW_BREAKPOINT
737 [REGSET_HW_BREAK] = {
738 .core_note_type = NT_ARM_HW_BREAK,
739 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
740 .size = sizeof(u32),
741 .align = sizeof(u32),
742 .get = hw_break_get,
743 .set = hw_break_set,
744 },
745 [REGSET_HW_WATCH] = {
746 .core_note_type = NT_ARM_HW_WATCH,
747 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
748 .size = sizeof(u32),
749 .align = sizeof(u32),
750 .get = hw_break_get,
751 .set = hw_break_set,
752 },
753#endif
766a85d7
AT
754 [REGSET_SYSTEM_CALL] = {
755 .core_note_type = NT_ARM_SYSTEM_CALL,
756 .n = 1,
757 .size = sizeof(int),
758 .align = sizeof(int),
759 .get = system_call_get,
760 .set = system_call_set,
761 },
478fcb2c
WD
762};
763
764static const struct user_regset_view user_aarch64_view = {
765 .name = "aarch64", .e_machine = EM_AARCH64,
766 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
767};
768
769#ifdef CONFIG_COMPAT
770#include <linux/compat.h>
771
772enum compat_regset {
773 REGSET_COMPAT_GPR,
774 REGSET_COMPAT_VFP,
775};
776
777static int compat_gpr_get(struct task_struct *target,
778 const struct user_regset *regset,
779 unsigned int pos, unsigned int count,
780 void *kbuf, void __user *ubuf)
781{
782 int ret = 0;
783 unsigned int i, start, num_regs;
784
785 /* Calculate the number of AArch32 registers contained in count */
786 num_regs = count / regset->size;
787
788 /* Convert pos into an register number */
789 start = pos / regset->size;
790
791 if (start + num_regs > regset->n)
792 return -EIO;
793
794 for (i = 0; i < num_regs; ++i) {
795 unsigned int idx = start + i;
6a2e5e52 796 compat_ulong_t reg;
478fcb2c
WD
797
798 switch (idx) {
799 case 15:
6a2e5e52 800 reg = task_pt_regs(target)->pc;
478fcb2c
WD
801 break;
802 case 16:
6a2e5e52 803 reg = task_pt_regs(target)->pstate;
478fcb2c
WD
804 break;
805 case 17:
6a2e5e52 806 reg = task_pt_regs(target)->orig_x0;
478fcb2c
WD
807 break;
808 default:
6a2e5e52 809 reg = task_pt_regs(target)->regs[idx];
478fcb2c
WD
810 }
811
2227901a
VK
812 if (kbuf) {
813 memcpy(kbuf, &reg, sizeof(reg));
814 kbuf += sizeof(reg);
815 } else {
816 ret = copy_to_user(ubuf, &reg, sizeof(reg));
85487edd
WD
817 if (ret) {
818 ret = -EFAULT;
2227901a 819 break;
85487edd 820 }
2227901a
VK
821
822 ubuf += sizeof(reg);
823 }
478fcb2c
WD
824 }
825
826 return ret;
827}
828
829static int compat_gpr_set(struct task_struct *target,
830 const struct user_regset *regset,
831 unsigned int pos, unsigned int count,
832 const void *kbuf, const void __user *ubuf)
833{
834 struct pt_regs newregs;
835 int ret = 0;
836 unsigned int i, start, num_regs;
837
838 /* Calculate the number of AArch32 registers contained in count */
839 num_regs = count / regset->size;
840
841 /* Convert pos into an register number */
842 start = pos / regset->size;
843
844 if (start + num_regs > regset->n)
845 return -EIO;
846
847 newregs = *task_pt_regs(target);
848
849 for (i = 0; i < num_regs; ++i) {
850 unsigned int idx = start + i;
6a2e5e52
ML
851 compat_ulong_t reg;
852
2227901a
VK
853 if (kbuf) {
854 memcpy(&reg, kbuf, sizeof(reg));
855 kbuf += sizeof(reg);
856 } else {
857 ret = copy_from_user(&reg, ubuf, sizeof(reg));
85487edd
WD
858 if (ret) {
859 ret = -EFAULT;
860 break;
861 }
6a2e5e52 862
2227901a
VK
863 ubuf += sizeof(reg);
864 }
478fcb2c
WD
865
866 switch (idx) {
867 case 15:
6a2e5e52 868 newregs.pc = reg;
478fcb2c
WD
869 break;
870 case 16:
6a2e5e52 871 newregs.pstate = reg;
478fcb2c
WD
872 break;
873 case 17:
6a2e5e52 874 newregs.orig_x0 = reg;
478fcb2c
WD
875 break;
876 default:
6a2e5e52 877 newregs.regs[idx] = reg;
478fcb2c
WD
878 }
879
478fcb2c
WD
880 }
881
dbd4d7ca 882 if (valid_user_regs(&newregs.user_regs, target))
478fcb2c
WD
883 *task_pt_regs(target) = newregs;
884 else
885 ret = -EINVAL;
886
478fcb2c
WD
887 return ret;
888}
889
890static int compat_vfp_get(struct task_struct *target,
891 const struct user_regset *regset,
892 unsigned int pos, unsigned int count,
893 void *kbuf, void __user *ubuf)
894{
895 struct user_fpsimd_state *uregs;
896 compat_ulong_t fpscr;
897 int ret;
898
899 uregs = &target->thread.fpsimd_state.user_fpsimd;
900
901 /*
902 * The VFP registers are packed into the fpsimd_state, so they all sit
903 * nicely together for us. We just need to create the fpscr separately.
904 */
905 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
906 VFP_STATE_SIZE - sizeof(compat_ulong_t));
907
908 if (count && !ret) {
909 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
910 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
911 ret = put_user(fpscr, (compat_ulong_t *)ubuf);
912 }
913
914 return ret;
915}
916
917static int compat_vfp_set(struct task_struct *target,
918 const struct user_regset *regset,
919 unsigned int pos, unsigned int count,
920 const void *kbuf, const void __user *ubuf)
921{
922 struct user_fpsimd_state *uregs;
923 compat_ulong_t fpscr;
924 int ret;
925
926 if (pos + count > VFP_STATE_SIZE)
927 return -EIO;
928
929 uregs = &target->thread.fpsimd_state.user_fpsimd;
930
931 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
932 VFP_STATE_SIZE - sizeof(compat_ulong_t));
933
934 if (count && !ret) {
935 ret = get_user(fpscr, (compat_ulong_t *)ubuf);
936 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
937 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
938 }
939
005f78cd 940 fpsimd_flush_task_state(target);
478fcb2c
WD
941 return ret;
942}
943
5d220ff9
CM
944static int compat_tls_get(struct task_struct *target,
945 const struct user_regset *regset, unsigned int pos,
946 unsigned int count, void *kbuf, void __user *ubuf)
947{
948 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
949 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
950}
951
952static int compat_tls_set(struct task_struct *target,
953 const struct user_regset *regset, unsigned int pos,
954 unsigned int count, const void *kbuf,
955 const void __user *ubuf)
956{
957 int ret;
a672401c 958 compat_ulong_t tls = target->thread.tp_value;
5d220ff9
CM
959
960 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
961 if (ret)
962 return ret;
963
964 target->thread.tp_value = tls;
965 return ret;
966}
967
478fcb2c
WD
968static const struct user_regset aarch32_regsets[] = {
969 [REGSET_COMPAT_GPR] = {
970 .core_note_type = NT_PRSTATUS,
971 .n = COMPAT_ELF_NGREG,
972 .size = sizeof(compat_elf_greg_t),
973 .align = sizeof(compat_elf_greg_t),
974 .get = compat_gpr_get,
975 .set = compat_gpr_set
976 },
977 [REGSET_COMPAT_VFP] = {
978 .core_note_type = NT_ARM_VFP,
979 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
980 .size = sizeof(compat_ulong_t),
981 .align = sizeof(compat_ulong_t),
982 .get = compat_vfp_get,
983 .set = compat_vfp_set
984 },
985};
986
987static const struct user_regset_view user_aarch32_view = {
988 .name = "aarch32", .e_machine = EM_ARM,
989 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
990};
991
5d220ff9
CM
992static const struct user_regset aarch32_ptrace_regsets[] = {
993 [REGSET_GPR] = {
994 .core_note_type = NT_PRSTATUS,
995 .n = COMPAT_ELF_NGREG,
996 .size = sizeof(compat_elf_greg_t),
997 .align = sizeof(compat_elf_greg_t),
998 .get = compat_gpr_get,
999 .set = compat_gpr_set
1000 },
1001 [REGSET_FPR] = {
1002 .core_note_type = NT_ARM_VFP,
1003 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1004 .size = sizeof(compat_ulong_t),
1005 .align = sizeof(compat_ulong_t),
1006 .get = compat_vfp_get,
1007 .set = compat_vfp_set
1008 },
1009 [REGSET_TLS] = {
1010 .core_note_type = NT_ARM_TLS,
1011 .n = 1,
1012 .size = sizeof(compat_ulong_t),
1013 .align = sizeof(compat_ulong_t),
1014 .get = compat_tls_get,
1015 .set = compat_tls_set,
1016 },
1017#ifdef CONFIG_HAVE_HW_BREAKPOINT
1018 [REGSET_HW_BREAK] = {
1019 .core_note_type = NT_ARM_HW_BREAK,
1020 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1021 .size = sizeof(u32),
1022 .align = sizeof(u32),
1023 .get = hw_break_get,
1024 .set = hw_break_set,
1025 },
1026 [REGSET_HW_WATCH] = {
1027 .core_note_type = NT_ARM_HW_WATCH,
1028 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1029 .size = sizeof(u32),
1030 .align = sizeof(u32),
1031 .get = hw_break_get,
1032 .set = hw_break_set,
1033 },
1034#endif
1035 [REGSET_SYSTEM_CALL] = {
1036 .core_note_type = NT_ARM_SYSTEM_CALL,
1037 .n = 1,
1038 .size = sizeof(int),
1039 .align = sizeof(int),
1040 .get = system_call_get,
1041 .set = system_call_set,
1042 },
1043};
1044
1045static const struct user_regset_view user_aarch32_ptrace_view = {
1046 .name = "aarch32", .e_machine = EM_ARM,
1047 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1048};
1049
478fcb2c
WD
1050static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1051 compat_ulong_t __user *ret)
1052{
1053 compat_ulong_t tmp;
1054
1055 if (off & 3)
1056 return -EIO;
1057
7606c37d 1058 if (off == COMPAT_PT_TEXT_ADDR)
478fcb2c 1059 tmp = tsk->mm->start_code;
7606c37d 1060 else if (off == COMPAT_PT_DATA_ADDR)
478fcb2c 1061 tmp = tsk->mm->start_data;
7606c37d 1062 else if (off == COMPAT_PT_TEXT_END_ADDR)
478fcb2c
WD
1063 tmp = tsk->mm->end_code;
1064 else if (off < sizeof(compat_elf_gregset_t))
1065 return copy_regset_to_user(tsk, &user_aarch32_view,
1066 REGSET_COMPAT_GPR, off,
1067 sizeof(compat_ulong_t), ret);
1068 else if (off >= COMPAT_USER_SZ)
1069 return -EIO;
1070 else
1071 tmp = 0;
1072
1073 return put_user(tmp, ret);
1074}
1075
1076static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1077 compat_ulong_t val)
1078{
1079 int ret;
c1688707 1080 mm_segment_t old_fs = get_fs();
478fcb2c
WD
1081
1082 if (off & 3 || off >= COMPAT_USER_SZ)
1083 return -EIO;
1084
1085 if (off >= sizeof(compat_elf_gregset_t))
1086 return 0;
1087
c1688707 1088 set_fs(KERNEL_DS);
478fcb2c
WD
1089 ret = copy_regset_from_user(tsk, &user_aarch32_view,
1090 REGSET_COMPAT_GPR, off,
1091 sizeof(compat_ulong_t),
1092 &val);
c1688707
WD
1093 set_fs(old_fs);
1094
478fcb2c
WD
1095 return ret;
1096}
1097
1098#ifdef CONFIG_HAVE_HW_BREAKPOINT
1099
1100/*
1101 * Convert a virtual register number into an index for a thread_info
1102 * breakpoint array. Breakpoints are identified using positive numbers
1103 * whilst watchpoints are negative. The registers are laid out as pairs
1104 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1105 * Register 0 is reserved for describing resource information.
1106 */
1107static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1108{
1109 return (abs(num) - 1) >> 1;
1110}
1111
1112static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1113{
1114 u8 num_brps, num_wrps, debug_arch, wp_len;
1115 u32 reg = 0;
1116
1117 num_brps = hw_breakpoint_slots(TYPE_INST);
1118 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1119
1120 debug_arch = debug_monitors_arch();
1121 wp_len = 8;
1122 reg |= debug_arch;
1123 reg <<= 8;
1124 reg |= wp_len;
1125 reg <<= 8;
1126 reg |= num_wrps;
1127 reg <<= 8;
1128 reg |= num_brps;
1129
1130 *kdata = reg;
1131 return 0;
1132}
1133
1134static int compat_ptrace_hbp_get(unsigned int note_type,
1135 struct task_struct *tsk,
1136 compat_long_t num,
1137 u32 *kdata)
1138{
1139 u64 addr = 0;
1140 u32 ctrl = 0;
1141
1142 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
1143
1144 if (num & 1) {
1145 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1146 *kdata = (u32)addr;
1147 } else {
1148 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1149 *kdata = ctrl;
1150 }
1151
1152 return err;
1153}
1154
1155static int compat_ptrace_hbp_set(unsigned int note_type,
1156 struct task_struct *tsk,
1157 compat_long_t num,
1158 u32 *kdata)
1159{
1160 u64 addr;
1161 u32 ctrl;
1162
1163 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1164
1165 if (num & 1) {
1166 addr = *kdata;
1167 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1168 } else {
1169 ctrl = *kdata;
1170 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1171 }
1172
1173 return err;
1174}
1175
1176static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1177 compat_ulong_t __user *data)
1178{
1179 int ret;
1180 u32 kdata;
1181 mm_segment_t old_fs = get_fs();
1182
1183 set_fs(KERNEL_DS);
1184 /* Watchpoint */
1185 if (num < 0) {
1186 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1187 /* Resource info */
1188 } else if (num == 0) {
1189 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1190 /* Breakpoint */
1191 } else {
1192 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1193 }
1194 set_fs(old_fs);
1195
1196 if (!ret)
1197 ret = put_user(kdata, data);
1198
1199 return ret;
1200}
1201
1202static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1203 compat_ulong_t __user *data)
1204{
1205 int ret;
1206 u32 kdata = 0;
1207 mm_segment_t old_fs = get_fs();
1208
1209 if (num == 0)
1210 return 0;
1211
1212 ret = get_user(kdata, data);
1213 if (ret)
1214 return ret;
1215
1216 set_fs(KERNEL_DS);
1217 if (num < 0)
1218 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1219 else
1220 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1221 set_fs(old_fs);
1222
1223 return ret;
1224}
1225#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1226
1227long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1228 compat_ulong_t caddr, compat_ulong_t cdata)
1229{
1230 unsigned long addr = caddr;
1231 unsigned long data = cdata;
1232 void __user *datap = compat_ptr(data);
1233 int ret;
1234
1235 switch (request) {
1236 case PTRACE_PEEKUSR:
1237 ret = compat_ptrace_read_user(child, addr, datap);
1238 break;
1239
1240 case PTRACE_POKEUSR:
1241 ret = compat_ptrace_write_user(child, addr, data);
1242 break;
1243
27aa55c5 1244 case COMPAT_PTRACE_GETREGS:
478fcb2c
WD
1245 ret = copy_regset_to_user(child,
1246 &user_aarch32_view,
1247 REGSET_COMPAT_GPR,
1248 0, sizeof(compat_elf_gregset_t),
1249 datap);
1250 break;
1251
27aa55c5 1252 case COMPAT_PTRACE_SETREGS:
478fcb2c
WD
1253 ret = copy_regset_from_user(child,
1254 &user_aarch32_view,
1255 REGSET_COMPAT_GPR,
1256 0, sizeof(compat_elf_gregset_t),
1257 datap);
1258 break;
1259
27aa55c5 1260 case COMPAT_PTRACE_GET_THREAD_AREA:
478fcb2c
WD
1261 ret = put_user((compat_ulong_t)child->thread.tp_value,
1262 (compat_ulong_t __user *)datap);
1263 break;
1264
27aa55c5 1265 case COMPAT_PTRACE_SET_SYSCALL:
478fcb2c
WD
1266 task_pt_regs(child)->syscallno = data;
1267 ret = 0;
1268 break;
1269
1270 case COMPAT_PTRACE_GETVFPREGS:
1271 ret = copy_regset_to_user(child,
1272 &user_aarch32_view,
1273 REGSET_COMPAT_VFP,
1274 0, VFP_STATE_SIZE,
1275 datap);
1276 break;
1277
1278 case COMPAT_PTRACE_SETVFPREGS:
1279 ret = copy_regset_from_user(child,
1280 &user_aarch32_view,
1281 REGSET_COMPAT_VFP,
1282 0, VFP_STATE_SIZE,
1283 datap);
1284 break;
1285
1286#ifdef CONFIG_HAVE_HW_BREAKPOINT
27aa55c5 1287 case COMPAT_PTRACE_GETHBPREGS:
478fcb2c
WD
1288 ret = compat_ptrace_gethbpregs(child, addr, datap);
1289 break;
1290
27aa55c5 1291 case COMPAT_PTRACE_SETHBPREGS:
478fcb2c
WD
1292 ret = compat_ptrace_sethbpregs(child, addr, datap);
1293 break;
1294#endif
1295
1296 default:
1297 ret = compat_ptrace_request(child, request, addr,
1298 data);
1299 break;
1300 }
1301
1302 return ret;
1303}
1304#endif /* CONFIG_COMPAT */
1305
1306const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1307{
1308#ifdef CONFIG_COMPAT
5d220ff9
CM
1309 /*
1310 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1311 * user_aarch32_view compatible with arm32. Native ptrace requests on
1312 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1313 * access to the TLS register.
1314 */
1315 if (is_compat_task())
478fcb2c 1316 return &user_aarch32_view;
5d220ff9
CM
1317 else if (is_compat_thread(task_thread_info(task)))
1318 return &user_aarch32_ptrace_view;
478fcb2c
WD
1319#endif
1320 return &user_aarch64_view;
1321}
1322
1323long arch_ptrace(struct task_struct *child, long request,
1324 unsigned long addr, unsigned long data)
1325{
1326 return ptrace_request(child, request, addr, data);
1327}
1328
3157858f
AT
1329enum ptrace_syscall_dir {
1330 PTRACE_SYSCALL_ENTER = 0,
1331 PTRACE_SYSCALL_EXIT,
1332};
1333
1334static void tracehook_report_syscall(struct pt_regs *regs,
1335 enum ptrace_syscall_dir dir)
478fcb2c 1336{
3157858f 1337 int regno;
478fcb2c
WD
1338 unsigned long saved_reg;
1339
3157858f
AT
1340 /*
1341 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1342 * used to denote syscall entry/exit:
1343 */
1344 regno = (is_compat_task() ? 12 : 7);
1345 saved_reg = regs->regs[regno];
1346 regs->regs[regno] = dir;
478fcb2c 1347
3157858f 1348 if (dir == PTRACE_SYSCALL_EXIT)
478fcb2c
WD
1349 tracehook_report_syscall_exit(regs, 0);
1350 else if (tracehook_report_syscall_entry(regs))
1351 regs->syscallno = ~0UL;
1352
3157858f
AT
1353 regs->regs[regno] = saved_reg;
1354}
1355
1356asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1357{
1358 if (test_thread_flag(TIF_SYSCALL_TRACE))
1359 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
478fcb2c 1360
a5cd110c
KC
1361 /* Do the secure computing after ptrace; failures should be fast. */
1362 if (secure_computing(NULL) == -1)
1363 return -1;
1364
055b1212
AT
1365 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1366 trace_sys_enter(regs, regs->syscallno);
1367
4913c598
EP
1368 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1369 regs->regs[2], regs->regs[3]);
5701ede8 1370
478fcb2c
WD
1371 return regs->syscallno;
1372}
3157858f
AT
1373
1374asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1375{
5701ede8
AT
1376 audit_syscall_exit(regs);
1377
055b1212
AT
1378 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1379 trace_sys_exit(regs, regs_return_value(regs));
1380
3157858f
AT
1381 if (test_thread_flag(TIF_SYSCALL_TRACE))
1382 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1383}
dbd4d7ca
MR
1384
1385/*
1386 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1387 * Userspace cannot use these until they have an architectural meaning.
1388 * We also reserve IL for the kernel; SS is handled dynamically.
1389 */
1390#define SPSR_EL1_AARCH64_RES0_BITS \
1391 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1392 GENMASK_ULL(5, 5))
1393#define SPSR_EL1_AARCH32_RES0_BITS \
1394 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1395
1396static int valid_compat_regs(struct user_pt_regs *regs)
1397{
1398 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1399
1400 if (!system_supports_mixed_endian_el0()) {
1401 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1402 regs->pstate |= COMPAT_PSR_E_BIT;
1403 else
1404 regs->pstate &= ~COMPAT_PSR_E_BIT;
1405 }
1406
1407 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1408 (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
1409 (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
1410 (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
1411 return 1;
1412 }
1413
1414 /*
1415 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1416 * arch/arm.
1417 */
1418 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
1419 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
1420 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
1421 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
1422 COMPAT_PSR_T_BIT;
1423 regs->pstate |= PSR_MODE32_BIT;
1424
1425 return 0;
1426}
1427
1428static int valid_native_regs(struct user_pt_regs *regs)
1429{
1430 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1431
1432 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1433 (regs->pstate & PSR_D_BIT) == 0 &&
1434 (regs->pstate & PSR_A_BIT) == 0 &&
1435 (regs->pstate & PSR_I_BIT) == 0 &&
1436 (regs->pstate & PSR_F_BIT) == 0) {
1437 return 1;
1438 }
1439
1440 /* Force PSR to a valid 64-bit EL0t */
1441 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1442
1443 return 0;
1444}
1445
1446/*
1447 * Are the current registers suitable for user mode? (used to maintain
1448 * security in signal handlers)
1449 */
1450int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1451{
1452 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1453 regs->pstate &= ~DBG_SPSR_SS;
1454
1455 if (is_compat_thread(task_thread_info(task)))
1456 return valid_compat_regs(regs);
1457 else
1458 return valid_native_regs(regs);
1459}