]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/m32r/kernel/ptrace.c | |
3 | * | |
4 | * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi | |
5 | * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto | |
6 | * | |
7 | * Original x86 implementation: | |
8 | * By Ross Biro 1/23/92 | |
9 | * edited by Linus Torvalds | |
10 | * | |
11 | * Some code taken from sh version: | |
12 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | |
13 | * Some code taken from arm version: | |
14 | * Copyright (C) 2000 Russell King | |
15 | */ | |
16 | ||
1da177e4 LT |
17 | #include <linux/kernel.h> |
18 | #include <linux/sched.h> | |
19 | #include <linux/mm.h> | |
cfcd8c4f | 20 | #include <linux/err.h> |
1da177e4 | 21 | #include <linux/smp.h> |
1da177e4 LT |
22 | #include <linux/errno.h> |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/user.h> | |
25 | #include <linux/string.h> | |
7ed20e1a | 26 | #include <linux/signal.h> |
1da177e4 LT |
27 | |
28 | #include <asm/cacheflush.h> | |
29 | #include <asm/io.h> | |
7c0f6ba6 | 30 | #include <linux/uaccess.h> |
1da177e4 | 31 | #include <asm/pgtable.h> |
1da177e4 LT |
32 | #include <asm/processor.h> |
33 | #include <asm/mmu_context.h> | |
34 | ||
1da177e4 LT |
35 | /* |
36 | * This routine will get a word off of the process kernel stack. | |
37 | */ | |
38 | static inline unsigned long int | |
39 | get_stack_long(struct task_struct *task, int offset) | |
40 | { | |
41 | unsigned long *stack; | |
42 | ||
6c3559fc | 43 | stack = (unsigned long *)task_pt_regs(task); |
1da177e4 LT |
44 | |
45 | return stack[offset]; | |
46 | } | |
47 | ||
48 | /* | |
49 | * This routine will put a word on the process kernel stack. | |
50 | */ | |
51 | static inline int | |
52 | put_stack_long(struct task_struct *task, int offset, unsigned long data) | |
53 | { | |
54 | unsigned long *stack; | |
55 | ||
6c3559fc | 56 | stack = (unsigned long *)task_pt_regs(task); |
1da177e4 LT |
57 | stack[offset] = data; |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | static int reg_offset[] = { | |
63 | PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7, | |
64 | PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU, | |
65 | }; | |
66 | ||
67 | /* | |
68 | * Read the word at offset "off" into the "struct user". We | |
69 | * actually access the pt_regs stored on the kernel stack. | |
70 | */ | |
71 | static int ptrace_read_user(struct task_struct *tsk, unsigned long off, | |
72 | unsigned long __user *data) | |
73 | { | |
74 | unsigned long tmp; | |
75 | #ifndef NO_FPU | |
76 | struct user * dummy = NULL; | |
77 | #endif | |
78 | ||
d5a6d173 | 79 | if ((off & 3) || off > sizeof(struct user) - 3) |
1da177e4 LT |
80 | return -EIO; |
81 | ||
82 | off >>= 2; | |
83 | switch (off) { | |
84 | case PT_EVB: | |
85 | __asm__ __volatile__ ( | |
86 | "mvfc %0, cr5 \n\t" | |
87 | : "=r" (tmp) | |
88 | ); | |
89 | break; | |
90 | case PT_CBR: { | |
91 | unsigned long psw; | |
92 | psw = get_stack_long(tsk, PT_PSW); | |
93 | tmp = ((psw >> 8) & 1); | |
94 | } | |
95 | break; | |
96 | case PT_PSW: { | |
97 | unsigned long psw, bbpsw; | |
98 | psw = get_stack_long(tsk, PT_PSW); | |
99 | bbpsw = get_stack_long(tsk, PT_BBPSW); | |
100 | tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8); | |
101 | } | |
102 | break; | |
103 | case PT_PC: | |
104 | tmp = get_stack_long(tsk, PT_BPC); | |
105 | break; | |
106 | case PT_BPC: | |
107 | off = PT_BBPC; | |
108 | /* fall through */ | |
109 | default: | |
110 | if (off < (sizeof(struct pt_regs) >> 2)) | |
111 | tmp = get_stack_long(tsk, off); | |
112 | #ifndef NO_FPU | |
113 | else if (off >= (long)(&dummy->fpu >> 2) && | |
114 | off < (long)(&dummy->u_fpvalid >> 2)) { | |
115 | if (!tsk_used_math(tsk)) { | |
116 | if (off == (long)(&dummy->fpu.fpscr >> 2)) | |
117 | tmp = FPSCR_INIT; | |
118 | else | |
119 | tmp = 0; | |
120 | } else | |
121 | tmp = ((long *)(&tsk->thread.fpu >> 2)) | |
122 | [off - (long)&dummy->fpu]; | |
123 | } else if (off == (long)(&dummy->u_fpvalid >> 2)) | |
124 | tmp = !!tsk_used_math(tsk); | |
125 | #endif /* not NO_FPU */ | |
126 | else | |
127 | tmp = 0; | |
128 | } | |
129 | ||
130 | return put_user(tmp, data); | |
131 | } | |
132 | ||
133 | static int ptrace_write_user(struct task_struct *tsk, unsigned long off, | |
134 | unsigned long data) | |
135 | { | |
136 | int ret = -EIO; | |
137 | #ifndef NO_FPU | |
138 | struct user * dummy = NULL; | |
139 | #endif | |
140 | ||
d5a6d173 | 141 | if ((off & 3) || off > sizeof(struct user) - 3) |
1da177e4 LT |
142 | return -EIO; |
143 | ||
144 | off >>= 2; | |
145 | switch (off) { | |
146 | case PT_EVB: | |
147 | case PT_BPC: | |
148 | case PT_SPI: | |
149 | /* We don't allow to modify evb. */ | |
150 | ret = 0; | |
151 | break; | |
152 | case PT_PSW: | |
153 | case PT_CBR: { | |
154 | /* We allow to modify only cbr in psw */ | |
155 | unsigned long psw; | |
156 | psw = get_stack_long(tsk, PT_PSW); | |
157 | psw = (psw & ~0x100) | ((data & 1) << 8); | |
158 | ret = put_stack_long(tsk, PT_PSW, psw); | |
159 | } | |
160 | break; | |
161 | case PT_PC: | |
162 | off = PT_BPC; | |
163 | data &= ~1; | |
164 | /* fall through */ | |
165 | default: | |
166 | if (off < (sizeof(struct pt_regs) >> 2)) | |
167 | ret = put_stack_long(tsk, off, data); | |
168 | #ifndef NO_FPU | |
169 | else if (off >= (long)(&dummy->fpu >> 2) && | |
170 | off < (long)(&dummy->u_fpvalid >> 2)) { | |
171 | set_stopped_child_used_math(tsk); | |
172 | ((long *)&tsk->thread.fpu) | |
173 | [off - (long)&dummy->fpu] = data; | |
174 | ret = 0; | |
175 | } else if (off == (long)(&dummy->u_fpvalid >> 2)) { | |
176 | conditional_stopped_child_used_math(data, tsk); | |
177 | ret = 0; | |
178 | } | |
179 | #endif /* not NO_FPU */ | |
180 | break; | |
181 | } | |
182 | ||
183 | return ret; | |
184 | } | |
185 | ||
186 | /* | |
187 | * Get all user integer registers. | |
188 | */ | |
189 | static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) | |
190 | { | |
6c3559fc | 191 | struct pt_regs *regs = task_pt_regs(tsk); |
1da177e4 LT |
192 | |
193 | return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; | |
194 | } | |
195 | ||
196 | /* | |
197 | * Set all user integer registers. | |
198 | */ | |
199 | static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) | |
200 | { | |
201 | struct pt_regs newregs; | |
202 | int ret; | |
203 | ||
204 | ret = -EFAULT; | |
205 | if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { | |
6c3559fc | 206 | struct pt_regs *regs = task_pt_regs(tsk); |
1da177e4 LT |
207 | *regs = newregs; |
208 | ret = 0; | |
209 | } | |
210 | ||
211 | return ret; | |
212 | } | |
213 | ||
214 | ||
215 | static inline int | |
216 | check_condition_bit(struct task_struct *child) | |
217 | { | |
218 | return (int)((get_stack_long(child, PT_PSW) >> 8) & 1); | |
219 | } | |
220 | ||
221 | static int | |
222 | check_condition_src(unsigned long op, unsigned long regno1, | |
223 | unsigned long regno2, struct task_struct *child) | |
224 | { | |
225 | unsigned long reg1, reg2; | |
226 | ||
227 | reg2 = get_stack_long(child, reg_offset[regno2]); | |
228 | ||
229 | switch (op) { | |
230 | case 0x0: /* BEQ */ | |
231 | reg1 = get_stack_long(child, reg_offset[regno1]); | |
232 | return reg1 == reg2; | |
233 | case 0x1: /* BNE */ | |
234 | reg1 = get_stack_long(child, reg_offset[regno1]); | |
235 | return reg1 != reg2; | |
236 | case 0x8: /* BEQZ */ | |
237 | return reg2 == 0; | |
238 | case 0x9: /* BNEZ */ | |
239 | return reg2 != 0; | |
240 | case 0xa: /* BLTZ */ | |
241 | return (int)reg2 < 0; | |
242 | case 0xb: /* BGEZ */ | |
243 | return (int)reg2 >= 0; | |
244 | case 0xc: /* BLEZ */ | |
245 | return (int)reg2 <= 0; | |
246 | case 0xd: /* BGTZ */ | |
247 | return (int)reg2 > 0; | |
248 | default: | |
249 | /* never reached */ | |
250 | return 0; | |
251 | } | |
252 | } | |
253 | ||
254 | static void | |
255 | compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc, | |
256 | unsigned long *next_pc, | |
257 | struct task_struct *child) | |
258 | { | |
259 | unsigned long op, op2, op3; | |
260 | unsigned long disp; | |
261 | unsigned long regno; | |
262 | int parallel = 0; | |
263 | ||
264 | if (insn & 0x00008000) | |
265 | parallel = 1; | |
266 | if (pc & 3) | |
267 | insn &= 0x7fff; /* right slot */ | |
268 | else | |
269 | insn >>= 16; /* left slot */ | |
270 | ||
271 | op = (insn >> 12) & 0xf; | |
272 | op2 = (insn >> 8) & 0xf; | |
273 | op3 = (insn >> 4) & 0xf; | |
274 | ||
275 | if (op == 0x7) { | |
276 | switch (op2) { | |
277 | case 0xd: /* BNC */ | |
278 | case 0x9: /* BNCL */ | |
279 | if (!check_condition_bit(child)) { | |
280 | disp = (long)(insn << 24) >> 22; | |
281 | *next_pc = (pc & ~0x3) + disp; | |
282 | return; | |
283 | } | |
284 | break; | |
285 | case 0x8: /* BCL */ | |
286 | case 0xc: /* BC */ | |
287 | if (check_condition_bit(child)) { | |
288 | disp = (long)(insn << 24) >> 22; | |
289 | *next_pc = (pc & ~0x3) + disp; | |
290 | return; | |
291 | } | |
292 | break; | |
293 | case 0xe: /* BL */ | |
294 | case 0xf: /* BRA */ | |
295 | disp = (long)(insn << 24) >> 22; | |
296 | *next_pc = (pc & ~0x3) + disp; | |
297 | return; | |
298 | break; | |
299 | } | |
300 | } else if (op == 0x1) { | |
301 | switch (op2) { | |
302 | case 0x0: | |
303 | if (op3 == 0xf) { /* TRAP */ | |
304 | #if 1 | |
305 | /* pass through */ | |
306 | #else | |
307 | /* kernel space is not allowed as next_pc */ | |
308 | unsigned long evb; | |
309 | unsigned long trapno; | |
310 | trapno = insn & 0xf; | |
311 | __asm__ __volatile__ ( | |
312 | "mvfc %0, cr5\n" | |
313 | :"=r"(evb) | |
314 | : | |
315 | ); | |
316 | *next_pc = evb + (trapno << 2); | |
317 | return; | |
318 | #endif | |
319 | } else if (op3 == 0xd) { /* RTE */ | |
320 | *next_pc = get_stack_long(child, PT_BPC); | |
321 | return; | |
322 | } | |
323 | break; | |
324 | case 0xc: /* JC */ | |
325 | if (op3 == 0xc && check_condition_bit(child)) { | |
326 | regno = insn & 0xf; | |
327 | *next_pc = get_stack_long(child, | |
328 | reg_offset[regno]); | |
329 | return; | |
330 | } | |
331 | break; | |
332 | case 0xd: /* JNC */ | |
333 | if (op3 == 0xc && !check_condition_bit(child)) { | |
334 | regno = insn & 0xf; | |
335 | *next_pc = get_stack_long(child, | |
336 | reg_offset[regno]); | |
337 | return; | |
338 | } | |
339 | break; | |
340 | case 0xe: /* JL */ | |
341 | case 0xf: /* JMP */ | |
342 | if (op3 == 0xc) { /* JMP */ | |
343 | regno = insn & 0xf; | |
344 | *next_pc = get_stack_long(child, | |
345 | reg_offset[regno]); | |
346 | return; | |
347 | } | |
348 | break; | |
349 | } | |
350 | } | |
351 | if (parallel) | |
352 | *next_pc = pc + 4; | |
353 | else | |
354 | *next_pc = pc + 2; | |
355 | } | |
356 | ||
357 | static void | |
358 | compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc, | |
359 | unsigned long *next_pc, | |
360 | struct task_struct *child) | |
361 | { | |
362 | unsigned long op; | |
363 | unsigned long op2; | |
364 | unsigned long disp; | |
365 | unsigned long regno1, regno2; | |
366 | ||
367 | op = (insn >> 28) & 0xf; | |
368 | if (op == 0xf) { /* branch 24-bit relative */ | |
369 | op2 = (insn >> 24) & 0xf; | |
370 | switch (op2) { | |
371 | case 0xd: /* BNC */ | |
372 | case 0x9: /* BNCL */ | |
373 | if (!check_condition_bit(child)) { | |
374 | disp = (long)(insn << 8) >> 6; | |
375 | *next_pc = (pc & ~0x3) + disp; | |
376 | return; | |
377 | } | |
378 | break; | |
379 | case 0x8: /* BCL */ | |
380 | case 0xc: /* BC */ | |
381 | if (check_condition_bit(child)) { | |
382 | disp = (long)(insn << 8) >> 6; | |
383 | *next_pc = (pc & ~0x3) + disp; | |
384 | return; | |
385 | } | |
386 | break; | |
387 | case 0xe: /* BL */ | |
388 | case 0xf: /* BRA */ | |
389 | disp = (long)(insn << 8) >> 6; | |
390 | *next_pc = (pc & ~0x3) + disp; | |
391 | return; | |
392 | } | |
393 | } else if (op == 0xb) { /* branch 16-bit relative */ | |
394 | op2 = (insn >> 20) & 0xf; | |
395 | switch (op2) { | |
396 | case 0x0: /* BEQ */ | |
397 | case 0x1: /* BNE */ | |
398 | case 0x8: /* BEQZ */ | |
399 | case 0x9: /* BNEZ */ | |
400 | case 0xa: /* BLTZ */ | |
401 | case 0xb: /* BGEZ */ | |
402 | case 0xc: /* BLEZ */ | |
403 | case 0xd: /* BGTZ */ | |
404 | regno1 = ((insn >> 24) & 0xf); | |
405 | regno2 = ((insn >> 16) & 0xf); | |
406 | if (check_condition_src(op2, regno1, regno2, child)) { | |
407 | disp = (long)(insn << 16) >> 14; | |
408 | *next_pc = (pc & ~0x3) + disp; | |
409 | return; | |
410 | } | |
411 | break; | |
412 | } | |
413 | } | |
414 | *next_pc = pc + 4; | |
415 | } | |
416 | ||
417 | static inline void | |
418 | compute_next_pc(unsigned long insn, unsigned long pc, | |
419 | unsigned long *next_pc, struct task_struct *child) | |
420 | { | |
421 | if (insn & 0x80000000) | |
422 | compute_next_pc_for_32bit_insn(insn, pc, next_pc, child); | |
423 | else | |
424 | compute_next_pc_for_16bit_insn(insn, pc, next_pc, child); | |
425 | } | |
426 | ||
427 | static int | |
428 | register_debug_trap(struct task_struct *child, unsigned long next_pc, | |
429 | unsigned long next_insn, unsigned long *code) | |
430 | { | |
431 | struct debug_trap *p = &child->thread.debug_trap; | |
432 | unsigned long addr = next_pc & ~3; | |
433 | ||
434 | if (p->nr_trap == MAX_TRAPS) { | |
435 | printk("kernel BUG at %s %d: p->nr_trap = %d\n", | |
436 | __FILE__, __LINE__, p->nr_trap); | |
437 | return -1; | |
438 | } | |
439 | p->addr[p->nr_trap] = addr; | |
440 | p->insn[p->nr_trap] = next_insn; | |
441 | p->nr_trap++; | |
442 | if (next_pc & 3) { | |
443 | *code = (next_insn & 0xffff0000) | 0x10f1; | |
444 | /* xxx --> TRAP1 */ | |
445 | } else { | |
446 | if ((next_insn & 0x80000000) || (next_insn & 0x8000)) { | |
447 | *code = 0x10f17000; | |
448 | /* TRAP1 --> NOP */ | |
449 | } else { | |
450 | *code = (next_insn & 0xffff) | 0x10f10000; | |
451 | /* TRAP1 --> xxx */ | |
452 | } | |
453 | } | |
454 | return 0; | |
455 | } | |
456 | ||
457 | static int | |
458 | unregister_debug_trap(struct task_struct *child, unsigned long addr, | |
459 | unsigned long *code) | |
460 | { | |
461 | struct debug_trap *p = &child->thread.debug_trap; | |
462 | int i; | |
463 | ||
464 | /* Search debug trap entry. */ | |
465 | for (i = 0; i < p->nr_trap; i++) { | |
466 | if (p->addr[i] == addr) | |
467 | break; | |
468 | } | |
469 | if (i >= p->nr_trap) { | |
470 | /* The trap may be requested from debugger. | |
471 | * ptrace should do nothing in this case. | |
472 | */ | |
473 | return 0; | |
474 | } | |
475 | ||
ec9674e7 | 476 | /* Recover original instruction code. */ |
1da177e4 LT |
477 | *code = p->insn[i]; |
478 | ||
479 | /* Shift debug trap entries. */ | |
480 | while (i < p->nr_trap - 1) { | |
481 | p->insn[i] = p->insn[i + 1]; | |
482 | p->addr[i] = p->addr[i + 1]; | |
483 | i++; | |
484 | } | |
485 | p->nr_trap--; | |
486 | return 1; | |
487 | } | |
488 | ||
489 | static void | |
490 | unregister_all_debug_traps(struct task_struct *child) | |
491 | { | |
492 | struct debug_trap *p = &child->thread.debug_trap; | |
493 | int i; | |
494 | ||
495 | for (i = 0; i < p->nr_trap; i++) | |
f307ab6d LS |
496 | access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), |
497 | FOLL_FORCE | FOLL_WRITE); | |
1da177e4 LT |
498 | p->nr_trap = 0; |
499 | } | |
500 | ||
501 | static inline void | |
502 | invalidate_cache(void) | |
503 | { | |
504 | #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) | |
505 | ||
506 | _flush_cache_copyback_all(); | |
507 | ||
508 | #else /* ! CONFIG_CHIP_M32700 */ | |
509 | ||
510 | /* Invalidate cache */ | |
511 | __asm__ __volatile__ ( | |
512 | "ldi r0, #-1 \n\t" | |
513 | "ldi r1, #0 \n\t" | |
514 | "stb r1, @r0 ; cache off \n\t" | |
515 | "; \n\t" | |
516 | "ldi r0, #-2 \n\t" | |
517 | "ldi r1, #1 \n\t" | |
518 | "stb r1, @r0 ; cache invalidate \n\t" | |
519 | ".fillinsn \n" | |
520 | "0: \n\t" | |
521 | "ldb r1, @r0 ; invalidate check \n\t" | |
522 | "bnez r1, 0b \n\t" | |
523 | "; \n\t" | |
524 | "ldi r0, #-1 \n\t" | |
525 | "ldi r1, #1 \n\t" | |
526 | "stb r1, @r0 ; cache on \n\t" | |
527 | : : : "r0", "r1", "memory" | |
528 | ); | |
529 | /* FIXME: copying-back d-cache and invalidating i-cache are needed. | |
530 | */ | |
531 | #endif /* CONFIG_CHIP_M32700 */ | |
532 | } | |
533 | ||
534 | /* Embed a debug trap (TRAP1) code */ | |
535 | static int | |
536 | embed_debug_trap(struct task_struct *child, unsigned long next_pc) | |
537 | { | |
538 | unsigned long next_insn, code; | |
539 | unsigned long addr = next_pc & ~3; | |
540 | ||
f307ab6d LS |
541 | if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), |
542 | FOLL_FORCE) | |
1da177e4 LT |
543 | != sizeof(next_insn)) { |
544 | return -1; /* error */ | |
545 | } | |
546 | ||
547 | /* Set a trap code. */ | |
548 | if (register_debug_trap(child, next_pc, next_insn, &code)) { | |
549 | return -1; /* error */ | |
550 | } | |
f307ab6d LS |
551 | if (access_process_vm(child, addr, &code, sizeof(code), |
552 | FOLL_FORCE | FOLL_WRITE) | |
1da177e4 LT |
553 | != sizeof(code)) { |
554 | return -1; /* error */ | |
555 | } | |
556 | return 0; /* success */ | |
557 | } | |
558 | ||
559 | void | |
560 | withdraw_debug_trap(struct pt_regs *regs) | |
561 | { | |
562 | unsigned long addr; | |
563 | unsigned long code; | |
564 | ||
565 | addr = (regs->bpc - 2) & ~3; | |
566 | regs->bpc -= 2; | |
567 | if (unregister_debug_trap(current, addr, &code)) { | |
f307ab6d LS |
568 | access_process_vm(current, addr, &code, sizeof(code), |
569 | FOLL_FORCE | FOLL_WRITE); | |
1da177e4 LT |
570 | invalidate_cache(); |
571 | } | |
572 | } | |
573 | ||
0ac15559 | 574 | void |
1da177e4 LT |
575 | init_debug_traps(struct task_struct *child) |
576 | { | |
577 | struct debug_trap *p = &child->thread.debug_trap; | |
578 | int i; | |
579 | p->nr_trap = 0; | |
580 | for (i = 0; i < MAX_TRAPS; i++) { | |
581 | p->addr[i] = 0; | |
582 | p->insn[i] = 0; | |
583 | } | |
584 | } | |
585 | ||
e34112e3 CH |
586 | void user_enable_single_step(struct task_struct *child) |
587 | { | |
588 | unsigned long next_pc; | |
589 | unsigned long pc, insn; | |
590 | ||
591 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
592 | ||
593 | /* Compute next pc. */ | |
594 | pc = get_stack_long(child, PT_BPC); | |
595 | ||
f307ab6d LS |
596 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), |
597 | FOLL_FORCE) | |
e34112e3 | 598 | != sizeof(insn)) |
a6b20297 | 599 | return; |
e34112e3 CH |
600 | |
601 | compute_next_pc(insn, pc, &next_pc, child); | |
602 | if (next_pc & 0x80000000) | |
a6b20297 | 603 | return; |
e34112e3 CH |
604 | |
605 | if (embed_debug_trap(child, next_pc)) | |
a6b20297 | 606 | return; |
e34112e3 CH |
607 | |
608 | invalidate_cache(); | |
609 | } | |
610 | ||
611 | void user_disable_single_step(struct task_struct *child) | |
612 | { | |
613 | unregister_all_debug_traps(child); | |
614 | invalidate_cache(); | |
615 | } | |
1da177e4 LT |
616 | |
617 | /* | |
618 | * Called by kernel/ptrace.c when detaching.. | |
619 | * | |
620 | * Make sure single step bits etc are not set. | |
621 | */ | |
622 | void ptrace_disable(struct task_struct *child) | |
623 | { | |
624 | /* nothing to do.. */ | |
625 | } | |
626 | ||
0ac15559 | 627 | long |
9b05a69e NK |
628 | arch_ptrace(struct task_struct *child, long request, |
629 | unsigned long addr, unsigned long data) | |
1da177e4 | 630 | { |
1da177e4 | 631 | int ret; |
a68caa03 | 632 | unsigned long __user *datap = (unsigned long __user *) data; |
1da177e4 LT |
633 | |
634 | switch (request) { | |
635 | /* | |
636 | * read word at location "addr" in the child process. | |
637 | */ | |
638 | case PTRACE_PEEKTEXT: | |
639 | case PTRACE_PEEKDATA: | |
76647323 | 640 | ret = generic_ptrace_peekdata(child, addr, data); |
1da177e4 LT |
641 | break; |
642 | ||
643 | /* | |
644 | * read the word at location addr in the USER area. | |
645 | */ | |
646 | case PTRACE_PEEKUSR: | |
a68caa03 | 647 | ret = ptrace_read_user(child, addr, datap); |
1da177e4 LT |
648 | break; |
649 | ||
650 | /* | |
651 | * write the word at location addr. | |
652 | */ | |
653 | case PTRACE_POKETEXT: | |
654 | case PTRACE_POKEDATA: | |
f284ce72 AD |
655 | ret = generic_ptrace_pokedata(child, addr, data); |
656 | if (ret == 0 && request == PTRACE_POKETEXT) | |
657 | invalidate_cache(); | |
1da177e4 LT |
658 | break; |
659 | ||
660 | /* | |
661 | * write the word at location addr in the USER area. | |
662 | */ | |
663 | case PTRACE_POKEUSR: | |
664 | ret = ptrace_write_user(child, addr, data); | |
665 | break; | |
666 | ||
1da177e4 | 667 | case PTRACE_GETREGS: |
a68caa03 | 668 | ret = ptrace_getregs(child, datap); |
1da177e4 LT |
669 | break; |
670 | ||
671 | case PTRACE_SETREGS: | |
a68caa03 | 672 | ret = ptrace_setregs(child, datap); |
1da177e4 LT |
673 | break; |
674 | ||
675 | default: | |
676 | ret = ptrace_request(child, request, addr, data); | |
677 | break; | |
678 | } | |
679 | ||
680 | return ret; | |
681 | } | |
682 | ||
1da177e4 LT |
683 | /* notification of system call entry/exit |
684 | * - triggered by current->work.syscall_trace | |
685 | */ | |
686 | void do_syscall_trace(void) | |
687 | { | |
688 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | |
689 | return; | |
690 | if (!(current->ptrace & PT_PTRACED)) | |
691 | return; | |
692 | /* the 0x80 provides a way for the tracing parent to distinguish | |
693 | between a syscall stop and SIGTRAP delivery */ | |
694 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | |
695 | ? 0x80 : 0)); | |
696 | ||
697 | /* | |
698 | * this isn't the same as continuing with a signal, but it will do | |
699 | * for normal use. strace only continues with a signal if the | |
700 | * stopping signal is not SIGTRAP. -brl | |
701 | */ | |
702 | if (current->exit_code) { | |
703 | send_sig(current->exit_code, current, 1); | |
704 | current->exit_code = 0; | |
705 | } | |
706 | } |