]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/kernel/ptrace.c
7d97709e715fcfb3b32f9ed37aa0ee8ab4fcae9d
[mirror_ubuntu-artful-kernel.git] / arch / mips / kernel / ptrace.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/tracehook.h>
30 #include <linux/audit.h>
31 #include <linux/seccomp.h>
32 #include <linux/ftrace.h>
33
34 #include <asm/byteorder.h>
35 #include <asm/cpu.h>
36 #include <asm/dsp.h>
37 #include <asm/fpu.h>
38 #include <asm/mipsregs.h>
39 #include <asm/mipsmtregs.h>
40 #include <asm/pgtable.h>
41 #include <asm/page.h>
42 #include <asm/syscall.h>
43 #include <asm/uaccess.h>
44 #include <asm/bootinfo.h>
45 #include <asm/reg.h>
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
49
50 /*
51 * Called by kernel/ptrace.c when detaching..
52 *
53 * Make sure single step bits etc are not set.
54 */
55 void ptrace_disable(struct task_struct *child)
56 {
57 /* Don't load the watchpoint registers for the ex-child. */
58 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
59 }
60
61 /*
62 * Read a general register set. We always use the 64-bit format, even
63 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
64 * Registers are sign extended to fill the available space.
65 */
66 int ptrace_getregs(struct task_struct *child, __s64 __user *data)
67 {
68 struct pt_regs *regs;
69 int i;
70
71 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
72 return -EIO;
73
74 regs = task_pt_regs(child);
75
76 for (i = 0; i < 32; i++)
77 __put_user((long)regs->regs[i], data + i);
78 __put_user((long)regs->lo, data + EF_LO - EF_R0);
79 __put_user((long)regs->hi, data + EF_HI - EF_R0);
80 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
81 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
82 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
83 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
84
85 return 0;
86 }
87
88 /*
89 * Write a general register set. As for PTRACE_GETREGS, we always use
90 * the 64-bit format. On a 32-bit kernel only the lower order half
91 * (according to endianness) will be used.
92 */
93 int ptrace_setregs(struct task_struct *child, __s64 __user *data)
94 {
95 struct pt_regs *regs;
96 int i;
97
98 if (!access_ok(VERIFY_READ, data, 38 * 8))
99 return -EIO;
100
101 regs = task_pt_regs(child);
102
103 for (i = 0; i < 32; i++)
104 __get_user(regs->regs[i], data + i);
105 __get_user(regs->lo, data + EF_LO - EF_R0);
106 __get_user(regs->hi, data + EF_HI - EF_R0);
107 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
108
109 /* badvaddr, status, and cause may not be written. */
110
111 return 0;
112 }
113
114 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
115 {
116 int i;
117 unsigned int tmp;
118
119 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
120 return -EIO;
121
122 if (tsk_used_math(child)) {
123 union fpureg *fregs = get_fpu_regs(child);
124 for (i = 0; i < 32; i++)
125 __put_user(get_fpr64(&fregs[i], 0),
126 i + (__u64 __user *)data);
127 } else {
128 for (i = 0; i < 32; i++)
129 __put_user((__u64) -1, i + (__u64 __user *) data);
130 }
131
132 __put_user(child->thread.fpu.fcr31, data + 64);
133
134 preempt_disable();
135 if (cpu_has_fpu) {
136 unsigned int flags;
137
138 if (cpu_has_mipsmt) {
139 unsigned int vpflags = dvpe();
140 flags = read_c0_status();
141 __enable_fpu(FPU_AS_IS);
142 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
143 write_c0_status(flags);
144 evpe(vpflags);
145 } else {
146 flags = read_c0_status();
147 __enable_fpu(FPU_AS_IS);
148 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
149 write_c0_status(flags);
150 }
151 } else {
152 tmp = 0;
153 }
154 preempt_enable();
155 __put_user(tmp, data + 65);
156
157 return 0;
158 }
159
160 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
161 {
162 union fpureg *fregs;
163 u64 fpr_val;
164 int i;
165
166 if (!access_ok(VERIFY_READ, data, 33 * 8))
167 return -EIO;
168
169 fregs = get_fpu_regs(child);
170
171 for (i = 0; i < 32; i++) {
172 __get_user(fpr_val, i + (__u64 __user *)data);
173 set_fpr64(&fregs[i], 0, fpr_val);
174 }
175
176 __get_user(child->thread.fpu.fcr31, data + 64);
177
178 /* FIR may not be written. */
179
180 return 0;
181 }
182
183 int ptrace_get_watch_regs(struct task_struct *child,
184 struct pt_watch_regs __user *addr)
185 {
186 enum pt_watch_style style;
187 int i;
188
189 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
190 return -EIO;
191 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
192 return -EIO;
193
194 #ifdef CONFIG_32BIT
195 style = pt_watch_style_mips32;
196 #define WATCH_STYLE mips32
197 #else
198 style = pt_watch_style_mips64;
199 #define WATCH_STYLE mips64
200 #endif
201
202 __put_user(style, &addr->style);
203 __put_user(current_cpu_data.watch_reg_use_cnt,
204 &addr->WATCH_STYLE.num_valid);
205 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
206 __put_user(child->thread.watch.mips3264.watchlo[i],
207 &addr->WATCH_STYLE.watchlo[i]);
208 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
209 &addr->WATCH_STYLE.watchhi[i]);
210 __put_user(current_cpu_data.watch_reg_masks[i],
211 &addr->WATCH_STYLE.watch_masks[i]);
212 }
213 for (; i < 8; i++) {
214 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
215 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
216 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
217 }
218
219 return 0;
220 }
221
222 int ptrace_set_watch_regs(struct task_struct *child,
223 struct pt_watch_regs __user *addr)
224 {
225 int i;
226 int watch_active = 0;
227 unsigned long lt[NUM_WATCH_REGS];
228 u16 ht[NUM_WATCH_REGS];
229
230 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
231 return -EIO;
232 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
233 return -EIO;
234 /* Check the values. */
235 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
236 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
237 #ifdef CONFIG_32BIT
238 if (lt[i] & __UA_LIMIT)
239 return -EINVAL;
240 #else
241 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
242 if (lt[i] & 0xffffffff80000000UL)
243 return -EINVAL;
244 } else {
245 if (lt[i] & __UA_LIMIT)
246 return -EINVAL;
247 }
248 #endif
249 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
250 if (ht[i] & ~0xff8)
251 return -EINVAL;
252 }
253 /* Install them. */
254 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
255 if (lt[i] & 7)
256 watch_active = 1;
257 child->thread.watch.mips3264.watchlo[i] = lt[i];
258 /* Set the G bit. */
259 child->thread.watch.mips3264.watchhi[i] = ht[i];
260 }
261
262 if (watch_active)
263 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
264 else
265 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
266
267 return 0;
268 }
269
270 /* regset get/set implementations */
271
272 static int gpr_get(struct task_struct *target,
273 const struct user_regset *regset,
274 unsigned int pos, unsigned int count,
275 void *kbuf, void __user *ubuf)
276 {
277 struct pt_regs *regs = task_pt_regs(target);
278
279 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
280 regs, 0, sizeof(*regs));
281 }
282
283 static int gpr_set(struct task_struct *target,
284 const struct user_regset *regset,
285 unsigned int pos, unsigned int count,
286 const void *kbuf, const void __user *ubuf)
287 {
288 struct pt_regs newregs;
289 int ret;
290
291 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
292 &newregs,
293 0, sizeof(newregs));
294 if (ret)
295 return ret;
296
297 *task_pt_regs(target) = newregs;
298
299 return 0;
300 }
301
302 static int fpr_get(struct task_struct *target,
303 const struct user_regset *regset,
304 unsigned int pos, unsigned int count,
305 void *kbuf, void __user *ubuf)
306 {
307 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
308 &target->thread.fpu,
309 0, sizeof(elf_fpregset_t));
310 /* XXX fcr31 */
311 }
312
313 static int fpr_set(struct task_struct *target,
314 const struct user_regset *regset,
315 unsigned int pos, unsigned int count,
316 const void *kbuf, const void __user *ubuf)
317 {
318 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
319 &target->thread.fpu,
320 0, sizeof(elf_fpregset_t));
321 /* XXX fcr31 */
322 }
323
324 enum mips_regset {
325 REGSET_GPR,
326 REGSET_FPR,
327 };
328
329 static const struct user_regset mips_regsets[] = {
330 [REGSET_GPR] = {
331 .core_note_type = NT_PRSTATUS,
332 .n = ELF_NGREG,
333 .size = sizeof(unsigned int),
334 .align = sizeof(unsigned int),
335 .get = gpr_get,
336 .set = gpr_set,
337 },
338 [REGSET_FPR] = {
339 .core_note_type = NT_PRFPREG,
340 .n = ELF_NFPREG,
341 .size = sizeof(elf_fpreg_t),
342 .align = sizeof(elf_fpreg_t),
343 .get = fpr_get,
344 .set = fpr_set,
345 },
346 };
347
348 static const struct user_regset_view user_mips_view = {
349 .name = "mips",
350 .e_machine = ELF_ARCH,
351 .ei_osabi = ELF_OSABI,
352 .regsets = mips_regsets,
353 .n = ARRAY_SIZE(mips_regsets),
354 };
355
356 static const struct user_regset mips64_regsets[] = {
357 [REGSET_GPR] = {
358 .core_note_type = NT_PRSTATUS,
359 .n = ELF_NGREG,
360 .size = sizeof(unsigned long),
361 .align = sizeof(unsigned long),
362 .get = gpr_get,
363 .set = gpr_set,
364 },
365 [REGSET_FPR] = {
366 .core_note_type = NT_PRFPREG,
367 .n = ELF_NFPREG,
368 .size = sizeof(elf_fpreg_t),
369 .align = sizeof(elf_fpreg_t),
370 .get = fpr_get,
371 .set = fpr_set,
372 },
373 };
374
375 static const struct user_regset_view user_mips64_view = {
376 .name = "mips",
377 .e_machine = ELF_ARCH,
378 .ei_osabi = ELF_OSABI,
379 .regsets = mips64_regsets,
380 .n = ARRAY_SIZE(mips_regsets),
381 };
382
383 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
384 {
385 #ifdef CONFIG_32BIT
386 return &user_mips_view;
387 #endif
388
389 #ifdef CONFIG_MIPS32_O32
390 if (test_thread_flag(TIF_32BIT_REGS))
391 return &user_mips_view;
392 #endif
393
394 return &user_mips64_view;
395 }
396
397 long arch_ptrace(struct task_struct *child, long request,
398 unsigned long addr, unsigned long data)
399 {
400 int ret;
401 void __user *addrp = (void __user *) addr;
402 void __user *datavp = (void __user *) data;
403 unsigned long __user *datalp = (void __user *) data;
404
405 switch (request) {
406 /* when I and D space are separate, these will need to be fixed. */
407 case PTRACE_PEEKTEXT: /* read word at location addr. */
408 case PTRACE_PEEKDATA:
409 ret = generic_ptrace_peekdata(child, addr, data);
410 break;
411
412 /* Read the word at location addr in the USER area. */
413 case PTRACE_PEEKUSR: {
414 struct pt_regs *regs;
415 union fpureg *fregs;
416 unsigned long tmp = 0;
417
418 regs = task_pt_regs(child);
419 ret = 0; /* Default return value. */
420
421 switch (addr) {
422 case 0 ... 31:
423 tmp = regs->regs[addr];
424 break;
425 case FPR_BASE ... FPR_BASE + 31:
426 if (!tsk_used_math(child)) {
427 /* FP not yet used */
428 tmp = -1;
429 break;
430 }
431 fregs = get_fpu_regs(child);
432
433 #ifdef CONFIG_32BIT
434 if (test_thread_flag(TIF_32BIT_FPREGS)) {
435 /*
436 * The odd registers are actually the high
437 * order bits of the values stored in the even
438 * registers - unless we're using r2k_switch.S.
439 */
440 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
441 addr & 1);
442 break;
443 }
444 #endif
445 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
446 break;
447 case PC:
448 tmp = regs->cp0_epc;
449 break;
450 case CAUSE:
451 tmp = regs->cp0_cause;
452 break;
453 case BADVADDR:
454 tmp = regs->cp0_badvaddr;
455 break;
456 case MMHI:
457 tmp = regs->hi;
458 break;
459 case MMLO:
460 tmp = regs->lo;
461 break;
462 #ifdef CONFIG_CPU_HAS_SMARTMIPS
463 case ACX:
464 tmp = regs->acx;
465 break;
466 #endif
467 case FPC_CSR:
468 tmp = child->thread.fpu.fcr31;
469 break;
470 case FPC_EIR: { /* implementation / version register */
471 unsigned int flags;
472 #ifdef CONFIG_MIPS_MT_SMTC
473 unsigned long irqflags;
474 unsigned int mtflags;
475 #endif /* CONFIG_MIPS_MT_SMTC */
476
477 preempt_disable();
478 if (!cpu_has_fpu) {
479 preempt_enable();
480 break;
481 }
482
483 #ifdef CONFIG_MIPS_MT_SMTC
484 /* Read-modify-write of Status must be atomic */
485 local_irq_save(irqflags);
486 mtflags = dmt();
487 #endif /* CONFIG_MIPS_MT_SMTC */
488 if (cpu_has_mipsmt) {
489 unsigned int vpflags = dvpe();
490 flags = read_c0_status();
491 __enable_fpu(FPU_AS_IS);
492 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
493 write_c0_status(flags);
494 evpe(vpflags);
495 } else {
496 flags = read_c0_status();
497 __enable_fpu(FPU_AS_IS);
498 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
499 write_c0_status(flags);
500 }
501 #ifdef CONFIG_MIPS_MT_SMTC
502 emt(mtflags);
503 local_irq_restore(irqflags);
504 #endif /* CONFIG_MIPS_MT_SMTC */
505 preempt_enable();
506 break;
507 }
508 case DSP_BASE ... DSP_BASE + 5: {
509 dspreg_t *dregs;
510
511 if (!cpu_has_dsp) {
512 tmp = 0;
513 ret = -EIO;
514 goto out;
515 }
516 dregs = __get_dsp_regs(child);
517 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
518 break;
519 }
520 case DSP_CONTROL:
521 if (!cpu_has_dsp) {
522 tmp = 0;
523 ret = -EIO;
524 goto out;
525 }
526 tmp = child->thread.dsp.dspcontrol;
527 break;
528 default:
529 tmp = 0;
530 ret = -EIO;
531 goto out;
532 }
533 ret = put_user(tmp, datalp);
534 break;
535 }
536
537 /* when I and D space are separate, this will have to be fixed. */
538 case PTRACE_POKETEXT: /* write the word at location addr. */
539 case PTRACE_POKEDATA:
540 ret = generic_ptrace_pokedata(child, addr, data);
541 break;
542
543 case PTRACE_POKEUSR: {
544 struct pt_regs *regs;
545 ret = 0;
546 regs = task_pt_regs(child);
547
548 switch (addr) {
549 case 0 ... 31:
550 regs->regs[addr] = data;
551 break;
552 case FPR_BASE ... FPR_BASE + 31: {
553 union fpureg *fregs = get_fpu_regs(child);
554
555 if (!tsk_used_math(child)) {
556 /* FP not yet used */
557 memset(&child->thread.fpu, ~0,
558 sizeof(child->thread.fpu));
559 child->thread.fpu.fcr31 = 0;
560 }
561 #ifdef CONFIG_32BIT
562 if (test_thread_flag(TIF_32BIT_FPREGS)) {
563 /*
564 * The odd registers are actually the high
565 * order bits of the values stored in the even
566 * registers - unless we're using r2k_switch.S.
567 */
568 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
569 addr & 1, data);
570 break;
571 }
572 #endif
573 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
574 break;
575 }
576 case PC:
577 regs->cp0_epc = data;
578 break;
579 case MMHI:
580 regs->hi = data;
581 break;
582 case MMLO:
583 regs->lo = data;
584 break;
585 #ifdef CONFIG_CPU_HAS_SMARTMIPS
586 case ACX:
587 regs->acx = data;
588 break;
589 #endif
590 case FPC_CSR:
591 child->thread.fpu.fcr31 = data;
592 break;
593 case DSP_BASE ... DSP_BASE + 5: {
594 dspreg_t *dregs;
595
596 if (!cpu_has_dsp) {
597 ret = -EIO;
598 break;
599 }
600
601 dregs = __get_dsp_regs(child);
602 dregs[addr - DSP_BASE] = data;
603 break;
604 }
605 case DSP_CONTROL:
606 if (!cpu_has_dsp) {
607 ret = -EIO;
608 break;
609 }
610 child->thread.dsp.dspcontrol = data;
611 break;
612 default:
613 /* The rest are not allowed. */
614 ret = -EIO;
615 break;
616 }
617 break;
618 }
619
620 case PTRACE_GETREGS:
621 ret = ptrace_getregs(child, datavp);
622 break;
623
624 case PTRACE_SETREGS:
625 ret = ptrace_setregs(child, datavp);
626 break;
627
628 case PTRACE_GETFPREGS:
629 ret = ptrace_getfpregs(child, datavp);
630 break;
631
632 case PTRACE_SETFPREGS:
633 ret = ptrace_setfpregs(child, datavp);
634 break;
635
636 case PTRACE_GET_THREAD_AREA:
637 ret = put_user(task_thread_info(child)->tp_value, datalp);
638 break;
639
640 case PTRACE_GET_WATCH_REGS:
641 ret = ptrace_get_watch_regs(child, addrp);
642 break;
643
644 case PTRACE_SET_WATCH_REGS:
645 ret = ptrace_set_watch_regs(child, addrp);
646 break;
647
648 default:
649 ret = ptrace_request(child, request, addr, data);
650 break;
651 }
652 out:
653 return ret;
654 }
655
656 /*
657 * Notification of system call entry/exit
658 * - triggered by current->work.syscall_trace
659 */
660 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
661 {
662 long ret = 0;
663 user_exit();
664
665 if (secure_computing(syscall) == -1)
666 return -1;
667
668 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
669 tracehook_report_syscall_entry(regs))
670 ret = -1;
671
672 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
673 trace_sys_enter(regs, regs->regs[2]);
674
675 audit_syscall_entry(syscall_get_arch(current, regs),
676 syscall,
677 regs->regs[4], regs->regs[5],
678 regs->regs[6], regs->regs[7]);
679 return syscall;
680 }
681
682 /*
683 * Notification of system call entry/exit
684 * - triggered by current->work.syscall_trace
685 */
686 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
687 {
688 /*
689 * We may come here right after calling schedule_user()
690 * or do_notify_resume(), in which case we can be in RCU
691 * user mode.
692 */
693 user_exit();
694
695 audit_syscall_exit(regs);
696
697 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
698 trace_sys_exit(regs, regs->regs[2]);
699
700 if (test_thread_flag(TIF_SYSCALL_TRACE))
701 tracehook_report_syscall_exit(regs, 0);
702
703 user_enter();
704 }