3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
33 #include <linux/module.h>
35 #include <linux/hw_breakpoint.h>
36 #include <linux/perf_event.h>
38 #include <asm/uaccess.h>
40 #include <asm/pgtable.h>
41 #include <asm/system.h>
44 * The parameter save area on the stack is used to store arguments being passed
45 * to callee function and is located at fixed offset from stack pointer.
48 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
49 #else /* CONFIG_PPC32 */
50 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
53 struct pt_regs_offset
{
58 #define STR(s) #s /* convert to string */
59 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
60 #define GPR_OFFSET_NAME(num) \
61 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 static const struct pt_regs_offset regoffset_table
[] = {
100 REG_OFFSET_NAME(link
),
101 REG_OFFSET_NAME(xer
),
102 REG_OFFSET_NAME(ccr
),
104 REG_OFFSET_NAME(softe
),
108 REG_OFFSET_NAME(trap
),
109 REG_OFFSET_NAME(dar
),
110 REG_OFFSET_NAME(dsisr
),
115 * regs_query_register_offset() - query register offset from its name
116 * @name: the name of a register
118 * regs_query_register_offset() returns the offset of a register in struct
119 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
121 int regs_query_register_offset(const char *name
)
123 const struct pt_regs_offset
*roff
;
124 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
125 if (!strcmp(roff
->name
, name
))
131 * regs_query_register_name() - query register name from its offset
132 * @offset: the offset of a register in struct pt_regs.
134 * regs_query_register_name() returns the name of a register from its
135 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
137 const char *regs_query_register_name(unsigned int offset
)
139 const struct pt_regs_offset
*roff
;
140 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
141 if (roff
->offset
== offset
)
147 * does not yet catch signals sent when the child dies.
148 * in exit.c or in signal.c.
152 * Set of msr bits that gdb can change on behalf of a process.
154 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
155 #define MSR_DEBUGCHANGE 0
157 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
161 * Max register writeable via put_reg
164 #define PT_MAX_PUT_REG PT_MQ
166 #define PT_MAX_PUT_REG PT_CCR
169 static unsigned long get_user_msr(struct task_struct
*task
)
171 return task
->thread
.regs
->msr
| task
->thread
.fpexc_mode
;
174 static int set_user_msr(struct task_struct
*task
, unsigned long msr
)
176 task
->thread
.regs
->msr
&= ~MSR_DEBUGCHANGE
;
177 task
->thread
.regs
->msr
|= msr
& MSR_DEBUGCHANGE
;
182 * We prevent mucking around with the reserved area of trap
183 * which are used internally by the kernel.
185 static int set_user_trap(struct task_struct
*task
, unsigned long trap
)
187 task
->thread
.regs
->trap
= trap
& 0xfff0;
192 * Get contents of register REGNO in task TASK.
194 unsigned long ptrace_get_reg(struct task_struct
*task
, int regno
)
196 if (task
->thread
.regs
== NULL
)
200 return get_user_msr(task
);
202 if (regno
< (sizeof(struct pt_regs
) / sizeof(unsigned long)))
203 return ((unsigned long *)task
->thread
.regs
)[regno
];
209 * Write contents of register REGNO in task TASK.
211 int ptrace_put_reg(struct task_struct
*task
, int regno
, unsigned long data
)
213 if (task
->thread
.regs
== NULL
)
217 return set_user_msr(task
, data
);
218 if (regno
== PT_TRAP
)
219 return set_user_trap(task
, data
);
221 if (regno
<= PT_MAX_PUT_REG
) {
222 ((unsigned long *)task
->thread
.regs
)[regno
] = data
;
228 static int gpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
229 unsigned int pos
, unsigned int count
,
230 void *kbuf
, void __user
*ubuf
)
234 if (target
->thread
.regs
== NULL
)
237 if (!FULL_REGS(target
->thread
.regs
)) {
238 /* We have a partial register set. Fill 14-31 with bogus values */
239 for (i
= 14; i
< 32; i
++)
240 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
243 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
245 0, offsetof(struct pt_regs
, msr
));
247 unsigned long msr
= get_user_msr(target
);
248 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &msr
,
249 offsetof(struct pt_regs
, msr
),
250 offsetof(struct pt_regs
, msr
) +
254 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
255 offsetof(struct pt_regs
, msr
) + sizeof(long));
258 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
259 &target
->thread
.regs
->orig_gpr3
,
260 offsetof(struct pt_regs
, orig_gpr3
),
261 sizeof(struct pt_regs
));
263 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
264 sizeof(struct pt_regs
), -1);
269 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
270 unsigned int pos
, unsigned int count
,
271 const void *kbuf
, const void __user
*ubuf
)
276 if (target
->thread
.regs
== NULL
)
279 CHECK_FULL_REGS(target
->thread
.regs
);
281 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
283 0, PT_MSR
* sizeof(reg
));
285 if (!ret
&& count
> 0) {
286 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
287 PT_MSR
* sizeof(reg
),
288 (PT_MSR
+ 1) * sizeof(reg
));
290 ret
= set_user_msr(target
, reg
);
293 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
294 offsetof(struct pt_regs
, msr
) + sizeof(long));
297 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
298 &target
->thread
.regs
->orig_gpr3
,
299 PT_ORIG_R3
* sizeof(reg
),
300 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
));
302 if (PT_MAX_PUT_REG
+ 1 < PT_TRAP
&& !ret
)
303 ret
= user_regset_copyin_ignore(
304 &pos
, &count
, &kbuf
, &ubuf
,
305 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
),
306 PT_TRAP
* sizeof(reg
));
308 if (!ret
&& count
> 0) {
309 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
310 PT_TRAP
* sizeof(reg
),
311 (PT_TRAP
+ 1) * sizeof(reg
));
313 ret
= set_user_trap(target
, reg
);
317 ret
= user_regset_copyin_ignore(
318 &pos
, &count
, &kbuf
, &ubuf
,
319 (PT_TRAP
+ 1) * sizeof(reg
), -1);
324 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
325 unsigned int pos
, unsigned int count
,
326 void *kbuf
, void __user
*ubuf
)
332 flush_fp_to_thread(target
);
335 /* copy to local buffer then write that out */
336 for (i
= 0; i
< 32 ; i
++)
337 buf
[i
] = target
->thread
.TS_FPR(i
);
338 memcpy(&buf
[32], &target
->thread
.fpscr
, sizeof(double));
339 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
342 BUILD_BUG_ON(offsetof(struct thread_struct
, fpscr
) !=
343 offsetof(struct thread_struct
, TS_FPR(32)));
345 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
346 &target
->thread
.fpr
, 0, -1);
350 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
351 unsigned int pos
, unsigned int count
,
352 const void *kbuf
, const void __user
*ubuf
)
358 flush_fp_to_thread(target
);
361 /* copy to local buffer then write that out */
362 i
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
365 for (i
= 0; i
< 32 ; i
++)
366 target
->thread
.TS_FPR(i
) = buf
[i
];
367 memcpy(&target
->thread
.fpscr
, &buf
[32], sizeof(double));
370 BUILD_BUG_ON(offsetof(struct thread_struct
, fpscr
) !=
371 offsetof(struct thread_struct
, TS_FPR(32)));
373 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
374 &target
->thread
.fpr
, 0, -1);
378 #ifdef CONFIG_ALTIVEC
380 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
381 * The transfer totals 34 quadword. Quadwords 0-31 contain the
382 * corresponding vector registers. Quadword 32 contains the vscr as the
383 * last word (offset 12) within that quadword. Quadword 33 contains the
384 * vrsave as the first word (offset 0) within the quadword.
386 * This definition of the VMX state is compatible with the current PPC32
387 * ptrace interface. This allows signal handling and ptrace to use the
388 * same structures. This also simplifies the implementation of a bi-arch
389 * (combined (32- and 64-bit) gdb.
392 static int vr_active(struct task_struct
*target
,
393 const struct user_regset
*regset
)
395 flush_altivec_to_thread(target
);
396 return target
->thread
.used_vr
? regset
->n
: 0;
399 static int vr_get(struct task_struct
*target
, const struct user_regset
*regset
,
400 unsigned int pos
, unsigned int count
,
401 void *kbuf
, void __user
*ubuf
)
405 flush_altivec_to_thread(target
);
407 BUILD_BUG_ON(offsetof(struct thread_struct
, vscr
) !=
408 offsetof(struct thread_struct
, vr
[32]));
410 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
411 &target
->thread
.vr
, 0,
412 33 * sizeof(vector128
));
415 * Copy out only the low-order word of vrsave.
421 memset(&vrsave
, 0, sizeof(vrsave
));
422 vrsave
.word
= target
->thread
.vrsave
;
423 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
424 33 * sizeof(vector128
), -1);
430 static int vr_set(struct task_struct
*target
, const struct user_regset
*regset
,
431 unsigned int pos
, unsigned int count
,
432 const void *kbuf
, const void __user
*ubuf
)
436 flush_altivec_to_thread(target
);
438 BUILD_BUG_ON(offsetof(struct thread_struct
, vscr
) !=
439 offsetof(struct thread_struct
, vr
[32]));
441 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
442 &target
->thread
.vr
, 0, 33 * sizeof(vector128
));
443 if (!ret
&& count
> 0) {
445 * We use only the first word of vrsave.
451 memset(&vrsave
, 0, sizeof(vrsave
));
452 vrsave
.word
= target
->thread
.vrsave
;
453 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
454 33 * sizeof(vector128
), -1);
456 target
->thread
.vrsave
= vrsave
.word
;
461 #endif /* CONFIG_ALTIVEC */
465 * Currently to set and and get all the vsx state, you need to call
466 * the fp and VMX calls as well. This only get/sets the lower 32
467 * 128bit VSX registers.
470 static int vsr_active(struct task_struct
*target
,
471 const struct user_regset
*regset
)
473 flush_vsx_to_thread(target
);
474 return target
->thread
.used_vsr
? regset
->n
: 0;
477 static int vsr_get(struct task_struct
*target
, const struct user_regset
*regset
,
478 unsigned int pos
, unsigned int count
,
479 void *kbuf
, void __user
*ubuf
)
484 flush_vsx_to_thread(target
);
486 for (i
= 0; i
< 32 ; i
++)
487 buf
[i
] = target
->thread
.fpr
[i
][TS_VSRLOWOFFSET
];
488 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
489 buf
, 0, 32 * sizeof(double));
494 static int vsr_set(struct task_struct
*target
, const struct user_regset
*regset
,
495 unsigned int pos
, unsigned int count
,
496 const void *kbuf
, const void __user
*ubuf
)
501 flush_vsx_to_thread(target
);
503 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
504 buf
, 0, 32 * sizeof(double));
505 for (i
= 0; i
< 32 ; i
++)
506 target
->thread
.fpr
[i
][TS_VSRLOWOFFSET
] = buf
[i
];
511 #endif /* CONFIG_VSX */
516 * For get_evrregs/set_evrregs functions 'data' has the following layout:
525 static int evr_active(struct task_struct
*target
,
526 const struct user_regset
*regset
)
528 flush_spe_to_thread(target
);
529 return target
->thread
.used_spe
? regset
->n
: 0;
532 static int evr_get(struct task_struct
*target
, const struct user_regset
*regset
,
533 unsigned int pos
, unsigned int count
,
534 void *kbuf
, void __user
*ubuf
)
538 flush_spe_to_thread(target
);
540 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
542 0, sizeof(target
->thread
.evr
));
544 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
545 offsetof(struct thread_struct
, spefscr
));
548 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
550 sizeof(target
->thread
.evr
), -1);
555 static int evr_set(struct task_struct
*target
, const struct user_regset
*regset
,
556 unsigned int pos
, unsigned int count
,
557 const void *kbuf
, const void __user
*ubuf
)
561 flush_spe_to_thread(target
);
563 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
565 0, sizeof(target
->thread
.evr
));
567 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
568 offsetof(struct thread_struct
, spefscr
));
571 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
573 sizeof(target
->thread
.evr
), -1);
577 #endif /* CONFIG_SPE */
581 * These are our native regset flavors.
583 enum powerpc_regset
{
586 #ifdef CONFIG_ALTIVEC
597 static const struct user_regset native_regsets
[] = {
599 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
600 .size
= sizeof(long), .align
= sizeof(long),
601 .get
= gpr_get
, .set
= gpr_set
604 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
605 .size
= sizeof(double), .align
= sizeof(double),
606 .get
= fpr_get
, .set
= fpr_set
608 #ifdef CONFIG_ALTIVEC
610 .core_note_type
= NT_PPC_VMX
, .n
= 34,
611 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
612 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
617 .core_note_type
= NT_PPC_VSX
, .n
= 32,
618 .size
= sizeof(double), .align
= sizeof(double),
619 .active
= vsr_active
, .get
= vsr_get
, .set
= vsr_set
625 .size
= sizeof(u32
), .align
= sizeof(u32
),
626 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
631 static const struct user_regset_view user_ppc_native_view
= {
632 .name
= UTS_MACHINE
, .e_machine
= ELF_ARCH
, .ei_osabi
= ELF_OSABI
,
633 .regsets
= native_regsets
, .n
= ARRAY_SIZE(native_regsets
)
637 #include <linux/compat.h>
639 static int gpr32_get(struct task_struct
*target
,
640 const struct user_regset
*regset
,
641 unsigned int pos
, unsigned int count
,
642 void *kbuf
, void __user
*ubuf
)
644 const unsigned long *regs
= &target
->thread
.regs
->gpr
[0];
645 compat_ulong_t
*k
= kbuf
;
646 compat_ulong_t __user
*u
= ubuf
;
650 if (target
->thread
.regs
== NULL
)
653 if (!FULL_REGS(target
->thread
.regs
)) {
654 /* We have a partial register set. Fill 14-31 with bogus values */
655 for (i
= 14; i
< 32; i
++)
656 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
660 count
/= sizeof(reg
);
663 for (; count
> 0 && pos
< PT_MSR
; --count
)
666 for (; count
> 0 && pos
< PT_MSR
; --count
)
667 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
670 if (count
> 0 && pos
== PT_MSR
) {
671 reg
= get_user_msr(target
);
674 else if (__put_user(reg
, u
++))
681 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
684 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
685 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
691 count
*= sizeof(reg
);
692 return user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
693 PT_REGS_COUNT
* sizeof(reg
), -1);
696 static int gpr32_set(struct task_struct
*target
,
697 const struct user_regset
*regset
,
698 unsigned int pos
, unsigned int count
,
699 const void *kbuf
, const void __user
*ubuf
)
701 unsigned long *regs
= &target
->thread
.regs
->gpr
[0];
702 const compat_ulong_t
*k
= kbuf
;
703 const compat_ulong_t __user
*u
= ubuf
;
706 if (target
->thread
.regs
== NULL
)
709 CHECK_FULL_REGS(target
->thread
.regs
);
712 count
/= sizeof(reg
);
715 for (; count
> 0 && pos
< PT_MSR
; --count
)
718 for (; count
> 0 && pos
< PT_MSR
; --count
) {
719 if (__get_user(reg
, u
++))
725 if (count
> 0 && pos
== PT_MSR
) {
728 else if (__get_user(reg
, u
++))
730 set_user_msr(target
, reg
);
736 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
)
738 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
741 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
) {
742 if (__get_user(reg
, u
++))
746 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
747 if (__get_user(reg
, u
++))
751 if (count
> 0 && pos
== PT_TRAP
) {
754 else if (__get_user(reg
, u
++))
756 set_user_trap(target
, reg
);
764 count
*= sizeof(reg
);
765 return user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
766 (PT_TRAP
+ 1) * sizeof(reg
), -1);
770 * These are the regset flavors matching the CONFIG_PPC32 native set.
772 static const struct user_regset compat_regsets
[] = {
774 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
775 .size
= sizeof(compat_long_t
), .align
= sizeof(compat_long_t
),
776 .get
= gpr32_get
, .set
= gpr32_set
779 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
780 .size
= sizeof(double), .align
= sizeof(double),
781 .get
= fpr_get
, .set
= fpr_set
783 #ifdef CONFIG_ALTIVEC
785 .core_note_type
= NT_PPC_VMX
, .n
= 34,
786 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
787 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
792 .core_note_type
= NT_PPC_SPE
, .n
= 35,
793 .size
= sizeof(u32
), .align
= sizeof(u32
),
794 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
799 static const struct user_regset_view user_ppc_compat_view
= {
800 .name
= "ppc", .e_machine
= EM_PPC
, .ei_osabi
= ELF_OSABI
,
801 .regsets
= compat_regsets
, .n
= ARRAY_SIZE(compat_regsets
)
803 #endif /* CONFIG_PPC64 */
805 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
808 if (test_tsk_thread_flag(task
, TIF_32BIT
))
809 return &user_ppc_compat_view
;
811 return &user_ppc_native_view
;
815 void user_enable_single_step(struct task_struct
*task
)
817 struct pt_regs
*regs
= task
->thread
.regs
;
820 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
821 task
->thread
.dbcr0
&= ~DBCR0_BT
;
822 task
->thread
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
825 regs
->msr
&= ~MSR_BE
;
829 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
832 void user_enable_block_step(struct task_struct
*task
)
834 struct pt_regs
*regs
= task
->thread
.regs
;
837 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
838 task
->thread
.dbcr0
&= ~DBCR0_IC
;
839 task
->thread
.dbcr0
= DBCR0_IDM
| DBCR0_BT
;
842 regs
->msr
&= ~MSR_SE
;
846 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
849 void user_disable_single_step(struct task_struct
*task
)
851 struct pt_regs
*regs
= task
->thread
.regs
;
854 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
856 * The logic to disable single stepping should be as
857 * simple as turning off the Instruction Complete flag.
858 * And, after doing so, if all debug flags are off, turn
859 * off DBCR0(IDM) and MSR(DE) .... Torez
861 task
->thread
.dbcr0
&= ~DBCR0_IC
;
863 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
865 if (!DBCR_ACTIVE_EVENTS(task
->thread
.dbcr0
,
866 task
->thread
.dbcr1
)) {
868 * All debug events were off.....
870 task
->thread
.dbcr0
&= ~DBCR0_IDM
;
871 regs
->msr
&= ~MSR_DE
;
874 regs
->msr
&= ~(MSR_SE
| MSR_BE
);
877 clear_tsk_thread_flag(task
, TIF_SINGLESTEP
);
880 #ifdef CONFIG_HAVE_HW_BREAKPOINT
881 void ptrace_triggered(struct perf_event
*bp
, int nmi
,
882 struct perf_sample_data
*data
, struct pt_regs
*regs
)
884 struct perf_event_attr attr
;
887 * Disable the breakpoint request here since ptrace has defined a
888 * one-shot behaviour for breakpoint exceptions in PPC64.
889 * The SIGTRAP signal is generated automatically for us in do_dabr().
890 * We don't have to do anything about that here
893 attr
.disabled
= true;
894 modify_user_hw_breakpoint(bp
, &attr
);
896 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
898 int ptrace_set_debugreg(struct task_struct
*task
, unsigned long addr
,
901 #ifdef CONFIG_HAVE_HW_BREAKPOINT
903 struct thread_struct
*thread
= &(task
->thread
);
904 struct perf_event
*bp
;
905 struct perf_event_attr attr
;
906 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
908 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
909 * For embedded processors we support one DAC and no IAC's at the
915 /* The bottom 3 bits in dabr are flags */
916 if ((data
& ~0x7UL
) >= TASK_SIZE
)
919 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
920 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
921 * It was assumed, on previous implementations, that 3 bits were
922 * passed together with the data address, fitting the design of the
923 * DABR register, as follows:
927 * bit 2: Breakpoint translation
929 * Thus, we use them here as so.
932 /* Ensure breakpoint translation bit is set */
933 if (data
&& !(data
& DABR_TRANSLATION
))
935 #ifdef CONFIG_HAVE_HW_BREAKPOINT
936 if (ptrace_get_breakpoints(task
) < 0)
939 bp
= thread
->ptrace_bps
[0];
940 if ((!data
) || !(data
& (DABR_DATA_WRITE
| DABR_DATA_READ
))) {
942 unregister_hw_breakpoint(bp
);
943 thread
->ptrace_bps
[0] = NULL
;
945 ptrace_put_breakpoints(task
);
950 attr
.bp_addr
= data
& ~HW_BREAKPOINT_ALIGN
;
951 arch_bp_generic_fields(data
&
952 (DABR_DATA_WRITE
| DABR_DATA_READ
),
954 ret
= modify_user_hw_breakpoint(bp
, &attr
);
956 ptrace_put_breakpoints(task
);
959 thread
->ptrace_bps
[0] = bp
;
960 ptrace_put_breakpoints(task
);
965 /* Create a new breakpoint request if one doesn't exist already */
966 hw_breakpoint_init(&attr
);
967 attr
.bp_addr
= data
& ~HW_BREAKPOINT_ALIGN
;
968 arch_bp_generic_fields(data
& (DABR_DATA_WRITE
| DABR_DATA_READ
),
971 thread
->ptrace_bps
[0] = bp
= register_user_hw_breakpoint(&attr
,
972 ptrace_triggered
, task
);
974 thread
->ptrace_bps
[0] = NULL
;
975 ptrace_put_breakpoints(task
);
979 ptrace_put_breakpoints(task
);
981 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
983 /* Move contents to the DABR register */
984 task
->thread
.dabr
= data
;
985 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
986 /* As described above, it was assumed 3 bits were passed with the data
987 * address, but we will assume only the mode bits will be passed
988 * as to not cause alignment restrictions for DAC-based processors.
991 /* DAC's hold the whole address without any mode flags */
992 task
->thread
.dac1
= data
& ~0x3UL
;
994 if (task
->thread
.dac1
== 0) {
995 dbcr_dac(task
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
996 if (!DBCR_ACTIVE_EVENTS(task
->thread
.dbcr0
,
997 task
->thread
.dbcr1
)) {
998 task
->thread
.regs
->msr
&= ~MSR_DE
;
999 task
->thread
.dbcr0
&= ~DBCR0_IDM
;
1004 /* Read or Write bits must be set */
1006 if (!(data
& 0x3UL
))
1009 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1011 task
->thread
.dbcr0
|= DBCR0_IDM
;
1013 /* Check for write and read flags and set DBCR0
1015 dbcr_dac(task
) &= ~(DBCR_DAC1R
|DBCR_DAC1W
);
1017 dbcr_dac(task
) |= DBCR_DAC1R
;
1019 dbcr_dac(task
) |= DBCR_DAC1W
;
1020 task
->thread
.regs
->msr
|= MSR_DE
;
1021 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1026 * Called by kernel/ptrace.c when detaching..
1028 * Make sure single step bits etc are not set.
1030 void ptrace_disable(struct task_struct
*child
)
1032 /* make sure the single step bit is not set. */
1033 user_disable_single_step(child
);
1036 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1037 static long set_intruction_bp(struct task_struct
*child
,
1038 struct ppc_hw_breakpoint
*bp_info
)
1041 int slot1_in_use
= ((child
->thread
.dbcr0
& DBCR0_IAC1
) != 0);
1042 int slot2_in_use
= ((child
->thread
.dbcr0
& DBCR0_IAC2
) != 0);
1043 int slot3_in_use
= ((child
->thread
.dbcr0
& DBCR0_IAC3
) != 0);
1044 int slot4_in_use
= ((child
->thread
.dbcr0
& DBCR0_IAC4
) != 0);
1046 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
1048 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
1051 if (bp_info
->addr
>= TASK_SIZE
)
1054 if (bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
) {
1056 /* Make sure range is valid. */
1057 if (bp_info
->addr2
>= TASK_SIZE
)
1060 /* We need a pair of IAC regsisters */
1061 if ((!slot1_in_use
) && (!slot2_in_use
)) {
1063 child
->thread
.iac1
= bp_info
->addr
;
1064 child
->thread
.iac2
= bp_info
->addr2
;
1065 child
->thread
.dbcr0
|= DBCR0_IAC1
;
1066 if (bp_info
->addr_mode
==
1067 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
1068 dbcr_iac_range(child
) |= DBCR_IAC12X
;
1070 dbcr_iac_range(child
) |= DBCR_IAC12I
;
1071 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1072 } else if ((!slot3_in_use
) && (!slot4_in_use
)) {
1074 child
->thread
.iac3
= bp_info
->addr
;
1075 child
->thread
.iac4
= bp_info
->addr2
;
1076 child
->thread
.dbcr0
|= DBCR0_IAC3
;
1077 if (bp_info
->addr_mode
==
1078 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
1079 dbcr_iac_range(child
) |= DBCR_IAC34X
;
1081 dbcr_iac_range(child
) |= DBCR_IAC34I
;
1086 /* We only need one. If possible leave a pair free in
1087 * case a range is needed later
1089 if (!slot1_in_use
) {
1091 * Don't use iac1 if iac1-iac2 are free and either
1092 * iac3 or iac4 (but not both) are free
1094 if (slot2_in_use
|| (slot3_in_use
== slot4_in_use
)) {
1096 child
->thread
.iac1
= bp_info
->addr
;
1097 child
->thread
.dbcr0
|= DBCR0_IAC1
;
1101 if (!slot2_in_use
) {
1103 child
->thread
.iac2
= bp_info
->addr
;
1104 child
->thread
.dbcr0
|= DBCR0_IAC2
;
1105 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1106 } else if (!slot3_in_use
) {
1108 child
->thread
.iac3
= bp_info
->addr
;
1109 child
->thread
.dbcr0
|= DBCR0_IAC3
;
1110 } else if (!slot4_in_use
) {
1112 child
->thread
.iac4
= bp_info
->addr
;
1113 child
->thread
.dbcr0
|= DBCR0_IAC4
;
1119 child
->thread
.dbcr0
|= DBCR0_IDM
;
1120 child
->thread
.regs
->msr
|= MSR_DE
;
1125 static int del_instruction_bp(struct task_struct
*child
, int slot
)
1129 if ((child
->thread
.dbcr0
& DBCR0_IAC1
) == 0)
1132 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
) {
1133 /* address range - clear slots 1 & 2 */
1134 child
->thread
.iac2
= 0;
1135 dbcr_iac_range(child
) &= ~DBCR_IAC12MODE
;
1137 child
->thread
.iac1
= 0;
1138 child
->thread
.dbcr0
&= ~DBCR0_IAC1
;
1141 if ((child
->thread
.dbcr0
& DBCR0_IAC2
) == 0)
1144 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
1145 /* used in a range */
1147 child
->thread
.iac2
= 0;
1148 child
->thread
.dbcr0
&= ~DBCR0_IAC2
;
1150 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1152 if ((child
->thread
.dbcr0
& DBCR0_IAC3
) == 0)
1155 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
) {
1156 /* address range - clear slots 3 & 4 */
1157 child
->thread
.iac4
= 0;
1158 dbcr_iac_range(child
) &= ~DBCR_IAC34MODE
;
1160 child
->thread
.iac3
= 0;
1161 child
->thread
.dbcr0
&= ~DBCR0_IAC3
;
1164 if ((child
->thread
.dbcr0
& DBCR0_IAC4
) == 0)
1167 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
1168 /* Used in a range */
1170 child
->thread
.iac4
= 0;
1171 child
->thread
.dbcr0
&= ~DBCR0_IAC4
;
1180 static int set_dac(struct task_struct
*child
, struct ppc_hw_breakpoint
*bp_info
)
1183 (bp_info
->condition_mode
>> PPC_BREAKPOINT_CONDITION_BE_SHIFT
)
1185 int condition_mode
=
1186 bp_info
->condition_mode
& PPC_BREAKPOINT_CONDITION_MODE
;
1189 if (byte_enable
&& (condition_mode
== 0))
1192 if (bp_info
->addr
>= TASK_SIZE
)
1195 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0) {
1197 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
1198 dbcr_dac(child
) |= DBCR_DAC1R
;
1199 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
1200 dbcr_dac(child
) |= DBCR_DAC1W
;
1201 child
->thread
.dac1
= (unsigned long)bp_info
->addr
;
1202 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1204 child
->thread
.dvc1
=
1205 (unsigned long)bp_info
->condition_value
;
1206 child
->thread
.dbcr2
|=
1207 ((byte_enable
<< DBCR2_DVC1BE_SHIFT
) |
1208 (condition_mode
<< DBCR2_DVC1M_SHIFT
));
1211 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1212 } else if (child
->thread
.dbcr2
& DBCR2_DAC12MODE
) {
1213 /* Both dac1 and dac2 are part of a range */
1216 } else if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0) {
1218 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
1219 dbcr_dac(child
) |= DBCR_DAC2R
;
1220 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
1221 dbcr_dac(child
) |= DBCR_DAC2W
;
1222 child
->thread
.dac2
= (unsigned long)bp_info
->addr
;
1223 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1225 child
->thread
.dvc2
=
1226 (unsigned long)bp_info
->condition_value
;
1227 child
->thread
.dbcr2
|=
1228 ((byte_enable
<< DBCR2_DVC2BE_SHIFT
) |
1229 (condition_mode
<< DBCR2_DVC2M_SHIFT
));
1234 child
->thread
.dbcr0
|= DBCR0_IDM
;
1235 child
->thread
.regs
->msr
|= MSR_DE
;
1240 static int del_dac(struct task_struct
*child
, int slot
)
1243 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0)
1246 child
->thread
.dac1
= 0;
1247 dbcr_dac(child
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
1248 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1249 if (child
->thread
.dbcr2
& DBCR2_DAC12MODE
) {
1250 child
->thread
.dac2
= 0;
1251 child
->thread
.dbcr2
&= ~DBCR2_DAC12MODE
;
1253 child
->thread
.dbcr2
&= ~(DBCR2_DVC1M
| DBCR2_DVC1BE
);
1255 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1256 child
->thread
.dvc1
= 0;
1258 } else if (slot
== 2) {
1259 if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0)
1262 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1263 if (child
->thread
.dbcr2
& DBCR2_DAC12MODE
)
1264 /* Part of a range */
1266 child
->thread
.dbcr2
&= ~(DBCR2_DVC2M
| DBCR2_DVC2BE
);
1268 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1269 child
->thread
.dvc2
= 0;
1271 child
->thread
.dac2
= 0;
1272 dbcr_dac(child
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
1278 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1280 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1281 static int set_dac_range(struct task_struct
*child
,
1282 struct ppc_hw_breakpoint
*bp_info
)
1284 int mode
= bp_info
->addr_mode
& PPC_BREAKPOINT_MODE_MASK
;
1286 /* We don't allow range watchpoints to be used with DVC */
1287 if (bp_info
->condition_mode
)
1291 * Best effort to verify the address range. The user/supervisor bits
1292 * prevent trapping in kernel space, but let's fail on an obvious bad
1293 * range. The simple test on the mask is not fool-proof, and any
1294 * exclusive range will spill over into kernel space.
1296 if (bp_info
->addr
>= TASK_SIZE
)
1298 if (mode
== PPC_BREAKPOINT_MODE_MASK
) {
1300 * dac2 is a bitmask. Don't allow a mask that makes a
1301 * kernel space address from a valid dac1 value
1303 if (~((unsigned long)bp_info
->addr2
) >= TASK_SIZE
)
1307 * For range breakpoints, addr2 must also be a valid address
1309 if (bp_info
->addr2
>= TASK_SIZE
)
1313 if (child
->thread
.dbcr0
&
1314 (DBCR0_DAC1R
| DBCR0_DAC1W
| DBCR0_DAC2R
| DBCR0_DAC2W
))
1317 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
1318 child
->thread
.dbcr0
|= (DBCR0_DAC1R
| DBCR0_IDM
);
1319 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
1320 child
->thread
.dbcr0
|= (DBCR0_DAC1W
| DBCR0_IDM
);
1321 child
->thread
.dac1
= bp_info
->addr
;
1322 child
->thread
.dac2
= bp_info
->addr2
;
1323 if (mode
== PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE
)
1324 child
->thread
.dbcr2
|= DBCR2_DAC12M
;
1325 else if (mode
== PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
1326 child
->thread
.dbcr2
|= DBCR2_DAC12MX
;
1327 else /* PPC_BREAKPOINT_MODE_MASK */
1328 child
->thread
.dbcr2
|= DBCR2_DAC12MM
;
1329 child
->thread
.regs
->msr
|= MSR_DE
;
1333 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1335 static long ppc_set_hwdebug(struct task_struct
*child
,
1336 struct ppc_hw_breakpoint
*bp_info
)
1338 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1342 if (bp_info
->version
!= 1)
1344 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1346 * Check for invalid flags and combinations
1348 if ((bp_info
->trigger_type
== 0) ||
1349 (bp_info
->trigger_type
& ~(PPC_BREAKPOINT_TRIGGER_EXECUTE
|
1350 PPC_BREAKPOINT_TRIGGER_RW
)) ||
1351 (bp_info
->addr_mode
& ~PPC_BREAKPOINT_MODE_MASK
) ||
1352 (bp_info
->condition_mode
&
1353 ~(PPC_BREAKPOINT_CONDITION_MODE
|
1354 PPC_BREAKPOINT_CONDITION_BE_ALL
)))
1356 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1357 if (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
1361 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_EXECUTE
) {
1362 if ((bp_info
->trigger_type
!= PPC_BREAKPOINT_TRIGGER_EXECUTE
) ||
1363 (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
))
1365 return set_intruction_bp(child
, bp_info
);
1367 if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_EXACT
)
1368 return set_dac(child
, bp_info
);
1370 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1371 return set_dac_range(child
, bp_info
);
1375 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1377 * We only support one data breakpoint
1379 if ((bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_RW
) == 0 ||
1380 (bp_info
->trigger_type
& ~PPC_BREAKPOINT_TRIGGER_RW
) != 0 ||
1381 bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
||
1382 bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
1385 if (child
->thread
.dabr
)
1388 if ((unsigned long)bp_info
->addr
>= TASK_SIZE
)
1391 dabr
= (unsigned long)bp_info
->addr
& ~7UL;
1392 dabr
|= DABR_TRANSLATION
;
1393 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
1394 dabr
|= DABR_DATA_READ
;
1395 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
1396 dabr
|= DABR_DATA_WRITE
;
1398 child
->thread
.dabr
= dabr
;
1401 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1404 static long ppc_del_hwdebug(struct task_struct
*child
, long addr
, long data
)
1406 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1410 rc
= del_instruction_bp(child
, (int)data
);
1412 rc
= del_dac(child
, (int)data
- 4);
1415 if (!DBCR_ACTIVE_EVENTS(child
->thread
.dbcr0
,
1416 child
->thread
.dbcr1
)) {
1417 child
->thread
.dbcr0
&= ~DBCR0_IDM
;
1418 child
->thread
.regs
->msr
&= ~MSR_DE
;
1425 if (child
->thread
.dabr
== 0)
1428 child
->thread
.dabr
= 0;
1435 * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
1436 * we mark them as obsolete now, they will be removed in a future version
1438 static long arch_ptrace_old(struct task_struct
*child
, long request
,
1439 unsigned long addr
, unsigned long data
)
1441 void __user
*datavp
= (void __user
*) data
;
1444 case PPC_PTRACE_GETREGS
: /* Get GPRs 0 - 31. */
1445 return copy_regset_to_user(child
, &user_ppc_native_view
,
1446 REGSET_GPR
, 0, 32 * sizeof(long),
1449 case PPC_PTRACE_SETREGS
: /* Set GPRs 0 - 31. */
1450 return copy_regset_from_user(child
, &user_ppc_native_view
,
1451 REGSET_GPR
, 0, 32 * sizeof(long),
1454 case PPC_PTRACE_GETFPREGS
: /* Get FPRs 0 - 31. */
1455 return copy_regset_to_user(child
, &user_ppc_native_view
,
1456 REGSET_FPR
, 0, 32 * sizeof(double),
1459 case PPC_PTRACE_SETFPREGS
: /* Set FPRs 0 - 31. */
1460 return copy_regset_from_user(child
, &user_ppc_native_view
,
1461 REGSET_FPR
, 0, 32 * sizeof(double),
1468 long arch_ptrace(struct task_struct
*child
, long request
,
1469 unsigned long addr
, unsigned long data
)
1472 void __user
*datavp
= (void __user
*) data
;
1473 unsigned long __user
*datalp
= datavp
;
1476 /* read the word at location addr in the USER area. */
1477 case PTRACE_PEEKUSR
: {
1478 unsigned long index
, tmp
;
1481 /* convert to index and check */
1484 if ((addr
& 3) || (index
> PT_FPSCR
)
1485 || (child
->thread
.regs
== NULL
))
1488 if ((addr
& 7) || (index
> PT_FPSCR
))
1492 CHECK_FULL_REGS(child
->thread
.regs
);
1493 if (index
< PT_FPR0
) {
1494 tmp
= ptrace_get_reg(child
, (int) index
);
1496 flush_fp_to_thread(child
);
1497 tmp
= ((unsigned long *)child
->thread
.fpr
)
1498 [TS_FPRWIDTH
* (index
- PT_FPR0
)];
1500 ret
= put_user(tmp
, datalp
);
1504 /* write the word at location addr in the USER area */
1505 case PTRACE_POKEUSR
: {
1506 unsigned long index
;
1509 /* convert to index and check */
1512 if ((addr
& 3) || (index
> PT_FPSCR
)
1513 || (child
->thread
.regs
== NULL
))
1516 if ((addr
& 7) || (index
> PT_FPSCR
))
1520 CHECK_FULL_REGS(child
->thread
.regs
);
1521 if (index
< PT_FPR0
) {
1522 ret
= ptrace_put_reg(child
, index
, data
);
1524 flush_fp_to_thread(child
);
1525 ((unsigned long *)child
->thread
.fpr
)
1526 [TS_FPRWIDTH
* (index
- PT_FPR0
)] = data
;
1532 case PPC_PTRACE_GETHWDBGINFO
: {
1533 struct ppc_debug_info dbginfo
;
1535 dbginfo
.version
= 1;
1536 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1537 dbginfo
.num_instruction_bps
= CONFIG_PPC_ADV_DEBUG_IACS
;
1538 dbginfo
.num_data_bps
= CONFIG_PPC_ADV_DEBUG_DACS
;
1539 dbginfo
.num_condition_regs
= CONFIG_PPC_ADV_DEBUG_DVCS
;
1540 dbginfo
.data_bp_alignment
= 4;
1541 dbginfo
.sizeof_condition
= 4;
1542 dbginfo
.features
= PPC_DEBUG_FEATURE_INSN_BP_RANGE
|
1543 PPC_DEBUG_FEATURE_INSN_BP_MASK
;
1544 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1546 PPC_DEBUG_FEATURE_DATA_BP_RANGE
|
1547 PPC_DEBUG_FEATURE_DATA_BP_MASK
;
1549 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1550 dbginfo
.num_instruction_bps
= 0;
1551 dbginfo
.num_data_bps
= 1;
1552 dbginfo
.num_condition_regs
= 0;
1554 dbginfo
.data_bp_alignment
= 8;
1556 dbginfo
.data_bp_alignment
= 4;
1558 dbginfo
.sizeof_condition
= 0;
1559 dbginfo
.features
= 0;
1560 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1562 if (!access_ok(VERIFY_WRITE
, datavp
,
1563 sizeof(struct ppc_debug_info
)))
1565 ret
= __copy_to_user(datavp
, &dbginfo
,
1566 sizeof(struct ppc_debug_info
)) ?
1571 case PPC_PTRACE_SETHWDEBUG
: {
1572 struct ppc_hw_breakpoint bp_info
;
1574 if (!access_ok(VERIFY_READ
, datavp
,
1575 sizeof(struct ppc_hw_breakpoint
)))
1577 ret
= __copy_from_user(&bp_info
, datavp
,
1578 sizeof(struct ppc_hw_breakpoint
)) ?
1581 ret
= ppc_set_hwdebug(child
, &bp_info
);
1585 case PPC_PTRACE_DELHWDEBUG
: {
1586 ret
= ppc_del_hwdebug(child
, addr
, data
);
1590 case PTRACE_GET_DEBUGREG
: {
1592 /* We only support one DABR and no IABRS at the moment */
1595 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1596 ret
= put_user(child
->thread
.dac1
, datalp
);
1598 ret
= put_user(child
->thread
.dabr
, datalp
);
1603 case PTRACE_SET_DEBUGREG
:
1604 ret
= ptrace_set_debugreg(child
, addr
, data
);
1608 case PTRACE_GETREGS64
:
1610 case PTRACE_GETREGS
: /* Get all pt_regs from the child. */
1611 return copy_regset_to_user(child
, &user_ppc_native_view
,
1613 0, sizeof(struct pt_regs
),
1617 case PTRACE_SETREGS64
:
1619 case PTRACE_SETREGS
: /* Set all gp regs in the child. */
1620 return copy_regset_from_user(child
, &user_ppc_native_view
,
1622 0, sizeof(struct pt_regs
),
1625 case PTRACE_GETFPREGS
: /* Get the child FPU state (FPR0...31 + FPSCR) */
1626 return copy_regset_to_user(child
, &user_ppc_native_view
,
1628 0, sizeof(elf_fpregset_t
),
1631 case PTRACE_SETFPREGS
: /* Set the child FPU state (FPR0...31 + FPSCR) */
1632 return copy_regset_from_user(child
, &user_ppc_native_view
,
1634 0, sizeof(elf_fpregset_t
),
1637 #ifdef CONFIG_ALTIVEC
1638 case PTRACE_GETVRREGS
:
1639 return copy_regset_to_user(child
, &user_ppc_native_view
,
1641 0, (33 * sizeof(vector128
) +
1645 case PTRACE_SETVRREGS
:
1646 return copy_regset_from_user(child
, &user_ppc_native_view
,
1648 0, (33 * sizeof(vector128
) +
1653 case PTRACE_GETVSRREGS
:
1654 return copy_regset_to_user(child
, &user_ppc_native_view
,
1656 0, 32 * sizeof(double),
1659 case PTRACE_SETVSRREGS
:
1660 return copy_regset_from_user(child
, &user_ppc_native_view
,
1662 0, 32 * sizeof(double),
1666 case PTRACE_GETEVRREGS
:
1667 /* Get the child spe register state. */
1668 return copy_regset_to_user(child
, &user_ppc_native_view
,
1669 REGSET_SPE
, 0, 35 * sizeof(u32
),
1672 case PTRACE_SETEVRREGS
:
1673 /* Set the child spe register state. */
1674 return copy_regset_from_user(child
, &user_ppc_native_view
,
1675 REGSET_SPE
, 0, 35 * sizeof(u32
),
1679 /* Old reverse args ptrace callss */
1680 case PPC_PTRACE_GETREGS
: /* Get GPRs 0 - 31. */
1681 case PPC_PTRACE_SETREGS
: /* Set GPRs 0 - 31. */
1682 case PPC_PTRACE_GETFPREGS
: /* Get FPRs 0 - 31. */
1683 case PPC_PTRACE_SETFPREGS
: /* Get FPRs 0 - 31. */
1684 ret
= arch_ptrace_old(child
, request
, addr
, data
);
1688 ret
= ptrace_request(child
, request
, addr
, data
);
1695 * We must return the syscall number to actually look up in the table.
1696 * This can be -1L to skip running any syscall at all.
1698 long do_syscall_trace_enter(struct pt_regs
*regs
)
1702 secure_computing(regs
->gpr
[0]);
1704 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
1705 tracehook_report_syscall_entry(regs
))
1707 * Tracing decided this syscall should not happen.
1708 * We'll return a bogus call number to get an ENOSYS
1709 * error, but leave the original number in regs->gpr[0].
1713 if (unlikely(current
->audit_context
)) {
1715 if (!is_32bit_task())
1716 audit_syscall_entry(AUDIT_ARCH_PPC64
,
1718 regs
->gpr
[3], regs
->gpr
[4],
1719 regs
->gpr
[5], regs
->gpr
[6]);
1722 audit_syscall_entry(AUDIT_ARCH_PPC
,
1724 regs
->gpr
[3] & 0xffffffff,
1725 regs
->gpr
[4] & 0xffffffff,
1726 regs
->gpr
[5] & 0xffffffff,
1727 regs
->gpr
[6] & 0xffffffff);
1730 return ret
?: regs
->gpr
[0];
1733 void do_syscall_trace_leave(struct pt_regs
*regs
)
1737 if (unlikely(current
->audit_context
))
1738 audit_syscall_exit((regs
->ccr
&0x10000000)?AUDITSC_FAILURE
:AUDITSC_SUCCESS
,
1741 step
= test_thread_flag(TIF_SINGLESTEP
);
1742 if (step
|| test_thread_flag(TIF_SYSCALL_TRACE
))
1743 tracehook_report_syscall_exit(regs
, step
);