2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/tracehook.h>
30 #include <linux/audit.h>
31 #include <linux/seccomp.h>
32 #include <linux/ftrace.h>
34 #include <asm/byteorder.h>
38 #include <asm/mipsregs.h>
39 #include <asm/mipsmtregs.h>
40 #include <asm/pgtable.h>
42 #include <asm/syscall.h>
43 #include <asm/uaccess.h>
44 #include <asm/bootinfo.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
51 * Called by kernel/ptrace.c when detaching..
53 * Make sure single step bits etc are not set.
55 void ptrace_disable(struct task_struct
*child
)
57 /* Don't load the watchpoint registers for the ex-child. */
58 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
62 * Read a general register set. We always use the 64-bit format, even
63 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
64 * Registers are sign extended to fill the available space.
66 int ptrace_getregs(struct task_struct
*child
, __s64 __user
*data
)
71 if (!access_ok(VERIFY_WRITE
, data
, 38 * 8))
74 regs
= task_pt_regs(child
);
76 for (i
= 0; i
< 32; i
++)
77 __put_user((long)regs
->regs
[i
], data
+ i
);
78 __put_user((long)regs
->lo
, data
+ EF_LO
- EF_R0
);
79 __put_user((long)regs
->hi
, data
+ EF_HI
- EF_R0
);
80 __put_user((long)regs
->cp0_epc
, data
+ EF_CP0_EPC
- EF_R0
);
81 __put_user((long)regs
->cp0_badvaddr
, data
+ EF_CP0_BADVADDR
- EF_R0
);
82 __put_user((long)regs
->cp0_status
, data
+ EF_CP0_STATUS
- EF_R0
);
83 __put_user((long)regs
->cp0_cause
, data
+ EF_CP0_CAUSE
- EF_R0
);
89 * Write a general register set. As for PTRACE_GETREGS, we always use
90 * the 64-bit format. On a 32-bit kernel only the lower order half
91 * (according to endianness) will be used.
93 int ptrace_setregs(struct task_struct
*child
, __s64 __user
*data
)
98 if (!access_ok(VERIFY_READ
, data
, 38 * 8))
101 regs
= task_pt_regs(child
);
103 for (i
= 0; i
< 32; i
++)
104 __get_user(regs
->regs
[i
], data
+ i
);
105 __get_user(regs
->lo
, data
+ EF_LO
- EF_R0
);
106 __get_user(regs
->hi
, data
+ EF_HI
- EF_R0
);
107 __get_user(regs
->cp0_epc
, data
+ EF_CP0_EPC
- EF_R0
);
109 /* badvaddr, status, and cause may not be written. */
114 int ptrace_getfpregs(struct task_struct
*child
, __u32 __user
*data
)
119 if (!access_ok(VERIFY_WRITE
, data
, 33 * 8))
122 if (tsk_used_math(child
)) {
123 fpureg_t
*fregs
= get_fpu_regs(child
);
124 for (i
= 0; i
< 32; i
++)
125 __put_user(fregs
[i
], i
+ (__u64 __user
*) data
);
127 for (i
= 0; i
< 32; i
++)
128 __put_user((__u64
) -1, i
+ (__u64 __user
*) data
);
131 __put_user(child
->thread
.fpu
.fcr31
, data
+ 64);
137 if (cpu_has_mipsmt
) {
138 unsigned int vpflags
= dvpe();
139 flags
= read_c0_status();
141 __asm__
__volatile__("cfc1\t%0,$0" : "=r" (tmp
));
142 write_c0_status(flags
);
145 flags
= read_c0_status();
147 __asm__
__volatile__("cfc1\t%0,$0" : "=r" (tmp
));
148 write_c0_status(flags
);
154 __put_user(tmp
, data
+ 65);
159 int ptrace_setfpregs(struct task_struct
*child
, __u32 __user
*data
)
164 if (!access_ok(VERIFY_READ
, data
, 33 * 8))
167 fregs
= get_fpu_regs(child
);
169 for (i
= 0; i
< 32; i
++)
170 __get_user(fregs
[i
], i
+ (__u64 __user
*) data
);
172 __get_user(child
->thread
.fpu
.fcr31
, data
+ 64);
174 /* FIR may not be written. */
179 int ptrace_get_watch_regs(struct task_struct
*child
,
180 struct pt_watch_regs __user
*addr
)
182 enum pt_watch_style style
;
185 if (!cpu_has_watch
|| current_cpu_data
.watch_reg_use_cnt
== 0)
187 if (!access_ok(VERIFY_WRITE
, addr
, sizeof(struct pt_watch_regs
)))
191 style
= pt_watch_style_mips32
;
192 #define WATCH_STYLE mips32
194 style
= pt_watch_style_mips64
;
195 #define WATCH_STYLE mips64
198 __put_user(style
, &addr
->style
);
199 __put_user(current_cpu_data
.watch_reg_use_cnt
,
200 &addr
->WATCH_STYLE
.num_valid
);
201 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
202 __put_user(child
->thread
.watch
.mips3264
.watchlo
[i
],
203 &addr
->WATCH_STYLE
.watchlo
[i
]);
204 __put_user(child
->thread
.watch
.mips3264
.watchhi
[i
] & 0xfff,
205 &addr
->WATCH_STYLE
.watchhi
[i
]);
206 __put_user(current_cpu_data
.watch_reg_masks
[i
],
207 &addr
->WATCH_STYLE
.watch_masks
[i
]);
210 __put_user(0, &addr
->WATCH_STYLE
.watchlo
[i
]);
211 __put_user(0, &addr
->WATCH_STYLE
.watchhi
[i
]);
212 __put_user(0, &addr
->WATCH_STYLE
.watch_masks
[i
]);
218 int ptrace_set_watch_regs(struct task_struct
*child
,
219 struct pt_watch_regs __user
*addr
)
222 int watch_active
= 0;
223 unsigned long lt
[NUM_WATCH_REGS
];
224 u16 ht
[NUM_WATCH_REGS
];
226 if (!cpu_has_watch
|| current_cpu_data
.watch_reg_use_cnt
== 0)
228 if (!access_ok(VERIFY_READ
, addr
, sizeof(struct pt_watch_regs
)))
230 /* Check the values. */
231 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
232 __get_user(lt
[i
], &addr
->WATCH_STYLE
.watchlo
[i
]);
234 if (lt
[i
] & __UA_LIMIT
)
237 if (test_tsk_thread_flag(child
, TIF_32BIT_ADDR
)) {
238 if (lt
[i
] & 0xffffffff80000000UL
)
241 if (lt
[i
] & __UA_LIMIT
)
245 __get_user(ht
[i
], &addr
->WATCH_STYLE
.watchhi
[i
]);
250 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
253 child
->thread
.watch
.mips3264
.watchlo
[i
] = lt
[i
];
255 child
->thread
.watch
.mips3264
.watchhi
[i
] = ht
[i
];
259 set_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
261 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
266 /* regset get/set implementations */
268 static int gpr_get(struct task_struct
*target
,
269 const struct user_regset
*regset
,
270 unsigned int pos
, unsigned int count
,
271 void *kbuf
, void __user
*ubuf
)
273 struct pt_regs
*regs
= task_pt_regs(target
);
275 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
276 regs
, 0, sizeof(*regs
));
279 static int gpr_set(struct task_struct
*target
,
280 const struct user_regset
*regset
,
281 unsigned int pos
, unsigned int count
,
282 const void *kbuf
, const void __user
*ubuf
)
284 struct pt_regs newregs
;
287 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
293 *task_pt_regs(target
) = newregs
;
298 static int fpr_get(struct task_struct
*target
,
299 const struct user_regset
*regset
,
300 unsigned int pos
, unsigned int count
,
301 void *kbuf
, void __user
*ubuf
)
303 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
305 0, sizeof(elf_fpregset_t
));
309 static int fpr_set(struct task_struct
*target
,
310 const struct user_regset
*regset
,
311 unsigned int pos
, unsigned int count
,
312 const void *kbuf
, const void __user
*ubuf
)
314 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
316 0, sizeof(elf_fpregset_t
));
325 static const struct user_regset mips_regsets
[] = {
327 .core_note_type
= NT_PRSTATUS
,
329 .size
= sizeof(unsigned int),
330 .align
= sizeof(unsigned int),
335 .core_note_type
= NT_PRFPREG
,
337 .size
= sizeof(elf_fpreg_t
),
338 .align
= sizeof(elf_fpreg_t
),
344 static const struct user_regset_view user_mips_view
= {
346 .e_machine
= ELF_ARCH
,
347 .ei_osabi
= ELF_OSABI
,
348 .regsets
= mips_regsets
,
349 .n
= ARRAY_SIZE(mips_regsets
),
352 static const struct user_regset mips64_regsets
[] = {
354 .core_note_type
= NT_PRSTATUS
,
356 .size
= sizeof(unsigned long),
357 .align
= sizeof(unsigned long),
362 .core_note_type
= NT_PRFPREG
,
364 .size
= sizeof(elf_fpreg_t
),
365 .align
= sizeof(elf_fpreg_t
),
371 static const struct user_regset_view user_mips64_view
= {
373 .e_machine
= ELF_ARCH
,
374 .ei_osabi
= ELF_OSABI
,
375 .regsets
= mips64_regsets
,
376 .n
= ARRAY_SIZE(mips_regsets
),
379 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
382 return &user_mips_view
;
385 #ifdef CONFIG_MIPS32_O32
386 if (test_thread_flag(TIF_32BIT_REGS
))
387 return &user_mips_view
;
390 return &user_mips64_view
;
393 long arch_ptrace(struct task_struct
*child
, long request
,
394 unsigned long addr
, unsigned long data
)
397 void __user
*addrp
= (void __user
*) addr
;
398 void __user
*datavp
= (void __user
*) data
;
399 unsigned long __user
*datalp
= (void __user
*) data
;
402 /* when I and D space are separate, these will need to be fixed. */
403 case PTRACE_PEEKTEXT
: /* read word at location addr. */
404 case PTRACE_PEEKDATA
:
405 ret
= generic_ptrace_peekdata(child
, addr
, data
);
408 /* Read the word at location addr in the USER area. */
409 case PTRACE_PEEKUSR
: {
410 struct pt_regs
*regs
;
411 unsigned long tmp
= 0;
413 regs
= task_pt_regs(child
);
414 ret
= 0; /* Default return value. */
418 tmp
= regs
->regs
[addr
];
420 case FPR_BASE
... FPR_BASE
+ 31:
421 if (tsk_used_math(child
)) {
422 fpureg_t
*fregs
= get_fpu_regs(child
);
426 * The odd registers are actually the high
427 * order bits of the values stored in the even
428 * registers - unless we're using r2k_switch.S.
431 tmp
= (unsigned long) (fregs
[((addr
& ~1) - 32)] >> 32);
433 tmp
= (unsigned long) (fregs
[(addr
- 32)] & 0xffffffff);
436 tmp
= fregs
[addr
- FPR_BASE
];
439 tmp
= -1; /* FP not yet used */
446 tmp
= regs
->cp0_cause
;
449 tmp
= regs
->cp0_badvaddr
;
457 #ifdef CONFIG_CPU_HAS_SMARTMIPS
463 tmp
= child
->thread
.fpu
.fcr31
;
465 case FPC_EIR
: { /* implementation / version register */
467 #ifdef CONFIG_MIPS_MT_SMTC
468 unsigned long irqflags
;
469 unsigned int mtflags
;
470 #endif /* CONFIG_MIPS_MT_SMTC */
478 #ifdef CONFIG_MIPS_MT_SMTC
479 /* Read-modify-write of Status must be atomic */
480 local_irq_save(irqflags
);
482 #endif /* CONFIG_MIPS_MT_SMTC */
483 if (cpu_has_mipsmt
) {
484 unsigned int vpflags
= dvpe();
485 flags
= read_c0_status();
487 __asm__
__volatile__("cfc1\t%0,$0": "=r" (tmp
));
488 write_c0_status(flags
);
491 flags
= read_c0_status();
493 __asm__
__volatile__("cfc1\t%0,$0": "=r" (tmp
));
494 write_c0_status(flags
);
496 #ifdef CONFIG_MIPS_MT_SMTC
498 local_irq_restore(irqflags
);
499 #endif /* CONFIG_MIPS_MT_SMTC */
503 case DSP_BASE
... DSP_BASE
+ 5: {
511 dregs
= __get_dsp_regs(child
);
512 tmp
= (unsigned long) (dregs
[addr
- DSP_BASE
]);
521 tmp
= child
->thread
.dsp
.dspcontrol
;
528 ret
= put_user(tmp
, datalp
);
532 /* when I and D space are separate, this will have to be fixed. */
533 case PTRACE_POKETEXT
: /* write the word at location addr. */
534 case PTRACE_POKEDATA
:
535 ret
= generic_ptrace_pokedata(child
, addr
, data
);
538 case PTRACE_POKEUSR
: {
539 struct pt_regs
*regs
;
541 regs
= task_pt_regs(child
);
545 regs
->regs
[addr
] = data
;
547 case FPR_BASE
... FPR_BASE
+ 31: {
548 fpureg_t
*fregs
= get_fpu_regs(child
);
550 if (!tsk_used_math(child
)) {
551 /* FP not yet used */
552 memset(&child
->thread
.fpu
, ~0,
553 sizeof(child
->thread
.fpu
));
554 child
->thread
.fpu
.fcr31
= 0;
558 * The odd registers are actually the high order bits
559 * of the values stored in the even registers - unless
560 * we're using r2k_switch.S.
563 fregs
[(addr
& ~1) - FPR_BASE
] &= 0xffffffff;
564 fregs
[(addr
& ~1) - FPR_BASE
] |= ((unsigned long long) data
) << 32;
566 fregs
[addr
- FPR_BASE
] &= ~0xffffffffLL
;
567 fregs
[addr
- FPR_BASE
] |= data
;
571 fregs
[addr
- FPR_BASE
] = data
;
576 regs
->cp0_epc
= data
;
584 #ifdef CONFIG_CPU_HAS_SMARTMIPS
590 child
->thread
.fpu
.fcr31
= data
;
592 case DSP_BASE
... DSP_BASE
+ 5: {
600 dregs
= __get_dsp_regs(child
);
601 dregs
[addr
- DSP_BASE
] = data
;
609 child
->thread
.dsp
.dspcontrol
= data
;
612 /* The rest are not allowed. */
620 ret
= ptrace_getregs(child
, datavp
);
624 ret
= ptrace_setregs(child
, datavp
);
627 case PTRACE_GETFPREGS
:
628 ret
= ptrace_getfpregs(child
, datavp
);
631 case PTRACE_SETFPREGS
:
632 ret
= ptrace_setfpregs(child
, datavp
);
635 case PTRACE_GET_THREAD_AREA
:
636 ret
= put_user(task_thread_info(child
)->tp_value
, datalp
);
639 case PTRACE_GET_WATCH_REGS
:
640 ret
= ptrace_get_watch_regs(child
, addrp
);
643 case PTRACE_SET_WATCH_REGS
:
644 ret
= ptrace_set_watch_regs(child
, addrp
);
648 ret
= ptrace_request(child
, request
, addr
, data
);
656 * Notification of system call entry/exit
657 * - triggered by current->work.syscall_trace
659 asmlinkage
void syscall_trace_enter(struct pt_regs
*regs
)
664 /* do the secure computing check first */
665 secure_computing_strict(regs
->regs
[2]);
667 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
668 tracehook_report_syscall_entry(regs
))
671 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
672 trace_sys_enter(regs
, regs
->regs
[2]);
674 audit_syscall_entry(syscall_get_arch(),
676 regs
->regs
[4], regs
->regs
[5],
677 regs
->regs
[6], regs
->regs
[7]);
681 * Notification of system call entry/exit
682 * - triggered by current->work.syscall_trace
684 asmlinkage
void syscall_trace_leave(struct pt_regs
*regs
)
687 * We may come here right after calling schedule_user()
688 * or do_notify_resume(), in which case we can be in RCU
693 audit_syscall_exit(regs
);
695 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
696 trace_sys_exit(regs
, regs
->regs
[2]);
698 if (test_thread_flag(TIF_SYSCALL_TRACE
))
699 tracehook_report_syscall_exit(regs
, 0);