]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/ptrace.c
Merge branch 'acpica' into release
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / ptrace.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/smp.h>
24 #include <linux/user.h>
25 #include <linux/security.h>
26 #include <linux/audit.h>
27 #include <linux/seccomp.h>
28
29 #include <asm/byteorder.h>
30 #include <asm/cpu.h>
31 #include <asm/dsp.h>
32 #include <asm/fpu.h>
33 #include <asm/mipsregs.h>
34 #include <asm/mipsmtregs.h>
35 #include <asm/pgtable.h>
36 #include <asm/page.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/bootinfo.h>
40 #include <asm/reg.h>
41
42 /*
43 * Called by kernel/ptrace.c when detaching..
44 *
45 * Make sure single step bits etc are not set.
46 */
47 void ptrace_disable(struct task_struct *child)
48 {
49 /* Don't load the watchpoint registers for the ex-child. */
50 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
51 }
52
53 /*
54 * Read a general register set. We always use the 64-bit format, even
55 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
56 * Registers are sign extended to fill the available space.
57 */
58 int ptrace_getregs(struct task_struct *child, __s64 __user *data)
59 {
60 struct pt_regs *regs;
61 int i;
62
63 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
64 return -EIO;
65
66 regs = task_pt_regs(child);
67
68 for (i = 0; i < 32; i++)
69 __put_user((long)regs->regs[i], data + i);
70 __put_user((long)regs->lo, data + EF_LO - EF_R0);
71 __put_user((long)regs->hi, data + EF_HI - EF_R0);
72 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
73 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
74 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
75 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
76
77 return 0;
78 }
79
80 /*
81 * Write a general register set. As for PTRACE_GETREGS, we always use
82 * the 64-bit format. On a 32-bit kernel only the lower order half
83 * (according to endianness) will be used.
84 */
85 int ptrace_setregs(struct task_struct *child, __s64 __user *data)
86 {
87 struct pt_regs *regs;
88 int i;
89
90 if (!access_ok(VERIFY_READ, data, 38 * 8))
91 return -EIO;
92
93 regs = task_pt_regs(child);
94
95 for (i = 0; i < 32; i++)
96 __get_user(regs->regs[i], data + i);
97 __get_user(regs->lo, data + EF_LO - EF_R0);
98 __get_user(regs->hi, data + EF_HI - EF_R0);
99 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
100
101 /* badvaddr, status, and cause may not be written. */
102
103 return 0;
104 }
105
106 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
107 {
108 int i;
109 unsigned int tmp;
110
111 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
112 return -EIO;
113
114 if (tsk_used_math(child)) {
115 fpureg_t *fregs = get_fpu_regs(child);
116 for (i = 0; i < 32; i++)
117 __put_user(fregs[i], i + (__u64 __user *) data);
118 } else {
119 for (i = 0; i < 32; i++)
120 __put_user((__u64) -1, i + (__u64 __user *) data);
121 }
122
123 __put_user(child->thread.fpu.fcr31, data + 64);
124
125 preempt_disable();
126 if (cpu_has_fpu) {
127 unsigned int flags;
128
129 if (cpu_has_mipsmt) {
130 unsigned int vpflags = dvpe();
131 flags = read_c0_status();
132 __enable_fpu();
133 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
134 write_c0_status(flags);
135 evpe(vpflags);
136 } else {
137 flags = read_c0_status();
138 __enable_fpu();
139 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
140 write_c0_status(flags);
141 }
142 } else {
143 tmp = 0;
144 }
145 preempt_enable();
146 __put_user(tmp, data + 65);
147
148 return 0;
149 }
150
151 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
152 {
153 fpureg_t *fregs;
154 int i;
155
156 if (!access_ok(VERIFY_READ, data, 33 * 8))
157 return -EIO;
158
159 fregs = get_fpu_regs(child);
160
161 for (i = 0; i < 32; i++)
162 __get_user(fregs[i], i + (__u64 __user *) data);
163
164 __get_user(child->thread.fpu.fcr31, data + 64);
165
166 /* FIR may not be written. */
167
168 return 0;
169 }
170
171 int ptrace_get_watch_regs(struct task_struct *child,
172 struct pt_watch_regs __user *addr)
173 {
174 enum pt_watch_style style;
175 int i;
176
177 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
178 return -EIO;
179 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
180 return -EIO;
181
182 #ifdef CONFIG_32BIT
183 style = pt_watch_style_mips32;
184 #define WATCH_STYLE mips32
185 #else
186 style = pt_watch_style_mips64;
187 #define WATCH_STYLE mips64
188 #endif
189
190 __put_user(style, &addr->style);
191 __put_user(current_cpu_data.watch_reg_use_cnt,
192 &addr->WATCH_STYLE.num_valid);
193 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
194 __put_user(child->thread.watch.mips3264.watchlo[i],
195 &addr->WATCH_STYLE.watchlo[i]);
196 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
197 &addr->WATCH_STYLE.watchhi[i]);
198 __put_user(current_cpu_data.watch_reg_masks[i],
199 &addr->WATCH_STYLE.watch_masks[i]);
200 }
201 for (; i < 8; i++) {
202 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
203 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
204 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
205 }
206
207 return 0;
208 }
209
210 int ptrace_set_watch_regs(struct task_struct *child,
211 struct pt_watch_regs __user *addr)
212 {
213 int i;
214 int watch_active = 0;
215 unsigned long lt[NUM_WATCH_REGS];
216 u16 ht[NUM_WATCH_REGS];
217
218 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
219 return -EIO;
220 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
221 return -EIO;
222 /* Check the values. */
223 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
224 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
225 #ifdef CONFIG_32BIT
226 if (lt[i] & __UA_LIMIT)
227 return -EINVAL;
228 #else
229 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
230 if (lt[i] & 0xffffffff80000000UL)
231 return -EINVAL;
232 } else {
233 if (lt[i] & __UA_LIMIT)
234 return -EINVAL;
235 }
236 #endif
237 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
238 if (ht[i] & ~0xff8)
239 return -EINVAL;
240 }
241 /* Install them. */
242 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
243 if (lt[i] & 7)
244 watch_active = 1;
245 child->thread.watch.mips3264.watchlo[i] = lt[i];
246 /* Set the G bit. */
247 child->thread.watch.mips3264.watchhi[i] = ht[i];
248 }
249
250 if (watch_active)
251 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
252 else
253 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
254
255 return 0;
256 }
257
258 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
259 {
260 int ret;
261
262 switch (request) {
263 /* when I and D space are separate, these will need to be fixed. */
264 case PTRACE_PEEKTEXT: /* read word at location addr. */
265 case PTRACE_PEEKDATA:
266 ret = generic_ptrace_peekdata(child, addr, data);
267 break;
268
269 /* Read the word at location addr in the USER area. */
270 case PTRACE_PEEKUSR: {
271 struct pt_regs *regs;
272 unsigned long tmp = 0;
273
274 regs = task_pt_regs(child);
275 ret = 0; /* Default return value. */
276
277 switch (addr) {
278 case 0 ... 31:
279 tmp = regs->regs[addr];
280 break;
281 case FPR_BASE ... FPR_BASE + 31:
282 if (tsk_used_math(child)) {
283 fpureg_t *fregs = get_fpu_regs(child);
284
285 #ifdef CONFIG_32BIT
286 /*
287 * The odd registers are actually the high
288 * order bits of the values stored in the even
289 * registers - unless we're using r2k_switch.S.
290 */
291 if (addr & 1)
292 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
293 else
294 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
295 #endif
296 #ifdef CONFIG_64BIT
297 tmp = fregs[addr - FPR_BASE];
298 #endif
299 } else {
300 tmp = -1; /* FP not yet used */
301 }
302 break;
303 case PC:
304 tmp = regs->cp0_epc;
305 break;
306 case CAUSE:
307 tmp = regs->cp0_cause;
308 break;
309 case BADVADDR:
310 tmp = regs->cp0_badvaddr;
311 break;
312 case MMHI:
313 tmp = regs->hi;
314 break;
315 case MMLO:
316 tmp = regs->lo;
317 break;
318 #ifdef CONFIG_CPU_HAS_SMARTMIPS
319 case ACX:
320 tmp = regs->acx;
321 break;
322 #endif
323 case FPC_CSR:
324 tmp = child->thread.fpu.fcr31;
325 break;
326 case FPC_EIR: { /* implementation / version register */
327 unsigned int flags;
328 #ifdef CONFIG_MIPS_MT_SMTC
329 unsigned long irqflags;
330 unsigned int mtflags;
331 #endif /* CONFIG_MIPS_MT_SMTC */
332
333 preempt_disable();
334 if (!cpu_has_fpu) {
335 preempt_enable();
336 break;
337 }
338
339 #ifdef CONFIG_MIPS_MT_SMTC
340 /* Read-modify-write of Status must be atomic */
341 local_irq_save(irqflags);
342 mtflags = dmt();
343 #endif /* CONFIG_MIPS_MT_SMTC */
344 if (cpu_has_mipsmt) {
345 unsigned int vpflags = dvpe();
346 flags = read_c0_status();
347 __enable_fpu();
348 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
349 write_c0_status(flags);
350 evpe(vpflags);
351 } else {
352 flags = read_c0_status();
353 __enable_fpu();
354 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
355 write_c0_status(flags);
356 }
357 #ifdef CONFIG_MIPS_MT_SMTC
358 emt(mtflags);
359 local_irq_restore(irqflags);
360 #endif /* CONFIG_MIPS_MT_SMTC */
361 preempt_enable();
362 break;
363 }
364 case DSP_BASE ... DSP_BASE + 5: {
365 dspreg_t *dregs;
366
367 if (!cpu_has_dsp) {
368 tmp = 0;
369 ret = -EIO;
370 goto out;
371 }
372 dregs = __get_dsp_regs(child);
373 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
374 break;
375 }
376 case DSP_CONTROL:
377 if (!cpu_has_dsp) {
378 tmp = 0;
379 ret = -EIO;
380 goto out;
381 }
382 tmp = child->thread.dsp.dspcontrol;
383 break;
384 default:
385 tmp = 0;
386 ret = -EIO;
387 goto out;
388 }
389 ret = put_user(tmp, (unsigned long __user *) data);
390 break;
391 }
392
393 /* when I and D space are separate, this will have to be fixed. */
394 case PTRACE_POKETEXT: /* write the word at location addr. */
395 case PTRACE_POKEDATA:
396 ret = generic_ptrace_pokedata(child, addr, data);
397 break;
398
399 case PTRACE_POKEUSR: {
400 struct pt_regs *regs;
401 ret = 0;
402 regs = task_pt_regs(child);
403
404 switch (addr) {
405 case 0 ... 31:
406 regs->regs[addr] = data;
407 break;
408 case FPR_BASE ... FPR_BASE + 31: {
409 fpureg_t *fregs = get_fpu_regs(child);
410
411 if (!tsk_used_math(child)) {
412 /* FP not yet used */
413 memset(&child->thread.fpu, ~0,
414 sizeof(child->thread.fpu));
415 child->thread.fpu.fcr31 = 0;
416 }
417 #ifdef CONFIG_32BIT
418 /*
419 * The odd registers are actually the high order bits
420 * of the values stored in the even registers - unless
421 * we're using r2k_switch.S.
422 */
423 if (addr & 1) {
424 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
425 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
426 } else {
427 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
428 fregs[addr - FPR_BASE] |= data;
429 }
430 #endif
431 #ifdef CONFIG_64BIT
432 fregs[addr - FPR_BASE] = data;
433 #endif
434 break;
435 }
436 case PC:
437 regs->cp0_epc = data;
438 break;
439 case MMHI:
440 regs->hi = data;
441 break;
442 case MMLO:
443 regs->lo = data;
444 break;
445 #ifdef CONFIG_CPU_HAS_SMARTMIPS
446 case ACX:
447 regs->acx = data;
448 break;
449 #endif
450 case FPC_CSR:
451 child->thread.fpu.fcr31 = data;
452 break;
453 case DSP_BASE ... DSP_BASE + 5: {
454 dspreg_t *dregs;
455
456 if (!cpu_has_dsp) {
457 ret = -EIO;
458 break;
459 }
460
461 dregs = __get_dsp_regs(child);
462 dregs[addr - DSP_BASE] = data;
463 break;
464 }
465 case DSP_CONTROL:
466 if (!cpu_has_dsp) {
467 ret = -EIO;
468 break;
469 }
470 child->thread.dsp.dspcontrol = data;
471 break;
472 default:
473 /* The rest are not allowed. */
474 ret = -EIO;
475 break;
476 }
477 break;
478 }
479
480 case PTRACE_GETREGS:
481 ret = ptrace_getregs(child, (__s64 __user *) data);
482 break;
483
484 case PTRACE_SETREGS:
485 ret = ptrace_setregs(child, (__s64 __user *) data);
486 break;
487
488 case PTRACE_GETFPREGS:
489 ret = ptrace_getfpregs(child, (__u32 __user *) data);
490 break;
491
492 case PTRACE_SETFPREGS:
493 ret = ptrace_setfpregs(child, (__u32 __user *) data);
494 break;
495
496 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
497 case PTRACE_CONT: { /* restart after signal. */
498 ret = -EIO;
499 if (!valid_signal(data))
500 break;
501 if (request == PTRACE_SYSCALL) {
502 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
503 }
504 else {
505 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
506 }
507 child->exit_code = data;
508 wake_up_process(child);
509 ret = 0;
510 break;
511 }
512
513 /*
514 * make the child exit. Best I can do is send it a sigkill.
515 * perhaps it should be put in the status that it wants to
516 * exit.
517 */
518 case PTRACE_KILL:
519 ret = 0;
520 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
521 break;
522 child->exit_code = SIGKILL;
523 wake_up_process(child);
524 break;
525
526 case PTRACE_GET_THREAD_AREA:
527 ret = put_user(task_thread_info(child)->tp_value,
528 (unsigned long __user *) data);
529 break;
530
531 case PTRACE_GET_WATCH_REGS:
532 ret = ptrace_get_watch_regs(child,
533 (struct pt_watch_regs __user *) addr);
534 break;
535
536 case PTRACE_SET_WATCH_REGS:
537 ret = ptrace_set_watch_regs(child,
538 (struct pt_watch_regs __user *) addr);
539 break;
540
541 default:
542 ret = ptrace_request(child, request, addr, data);
543 break;
544 }
545 out:
546 return ret;
547 }
548
549 static inline int audit_arch(void)
550 {
551 int arch = EM_MIPS;
552 #ifdef CONFIG_64BIT
553 arch |= __AUDIT_ARCH_64BIT;
554 #endif
555 #if defined(__LITTLE_ENDIAN)
556 arch |= __AUDIT_ARCH_LE;
557 #endif
558 return arch;
559 }
560
561 /*
562 * Notification of system call entry/exit
563 * - triggered by current->work.syscall_trace
564 */
565 asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
566 {
567 /* do the secure computing check first */
568 if (!entryexit)
569 secure_computing(regs->regs[0]);
570
571 if (unlikely(current->audit_context) && entryexit)
572 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
573 regs->regs[2]);
574
575 if (!(current->ptrace & PT_PTRACED))
576 goto out;
577
578 if (!test_thread_flag(TIF_SYSCALL_TRACE))
579 goto out;
580
581 /* The 0x80 provides a way for the tracing parent to distinguish
582 between a syscall stop and SIGTRAP delivery */
583 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
584 0x80 : 0));
585
586 /*
587 * this isn't the same as continuing with a signal, but it will do
588 * for normal use. strace only continues with a signal if the
589 * stopping signal is not SIGTRAP. -brl
590 */
591 if (current->exit_code) {
592 send_sig(current->exit_code, current, 1);
593 current->exit_code = 0;
594 }
595
596 out:
597 if (unlikely(current->audit_context) && !entryexit)
598 audit_syscall_entry(audit_arch(), regs->regs[0],
599 regs->regs[4], regs->regs[5],
600 regs->regs[6], regs->regs[7]);
601 }