]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/ptrace.c
UML: change sigcontext fields to match x86
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/* By Ross Biro 1/23/92 */
2/*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
1da177e4
LT
11#include <linux/errno.h>
12#include <linux/ptrace.h>
13#include <linux/user.h>
14#include <linux/security.h>
15#include <linux/audit.h>
16#include <linux/seccomp.h>
7ed20e1a 17#include <linux/signal.h>
1da177e4
LT
18
19#include <asm/uaccess.h>
20#include <asm/pgtable.h>
21#include <asm/system.h>
22#include <asm/processor.h>
23#include <asm/i387.h>
24#include <asm/debugreg.h>
25#include <asm/ldt.h>
26#include <asm/desc.h>
2047b08b
RM
27#include <asm/prctl.h>
28#include <asm/proto.h>
1da177e4
LT
29
30/*
31 * does not yet catch signals sent when the child dies.
32 * in exit.c or in signal.c.
33 */
34
9f155b98
CE
35/*
36 * Determines which flags the user has access to [1 = access, 0 = no access].
9f155b98 37 */
e39c2891
RM
38#define FLAG_MASK_32 ((unsigned long) \
39 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
40 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
41 X86_EFLAGS_SF | X86_EFLAGS_TF | \
42 X86_EFLAGS_DF | X86_EFLAGS_OF | \
43 X86_EFLAGS_RF | X86_EFLAGS_AC))
44
2047b08b
RM
45/*
46 * Determines whether a value may be installed in a segment register.
47 */
48static inline bool invalid_selector(u16 value)
49{
50 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
51}
52
53#ifdef CONFIG_X86_32
54
e39c2891 55#define FLAG_MASK FLAG_MASK_32
1da177e4 56
62a97d44 57static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
1da177e4 58{
65ea5b03 59 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
06ee1b68 60 regno >>= 2;
62a97d44
RM
61 if (regno > FS)
62 --regno;
65ea5b03 63 return &regs->bx + regno;
1da177e4
LT
64}
65
06ee1b68 66static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
1da177e4 67{
06ee1b68
RM
68 /*
69 * Returning the value truncates it to 16 bits.
70 */
71 unsigned int retval;
72 if (offset != offsetof(struct user_regs_struct, gs))
73 retval = *pt_regs_access(task_pt_regs(task), offset);
74 else {
75 retval = task->thread.gs;
76 if (task == current)
77 savesegment(gs, retval);
78 }
79 return retval;
80}
81
82static int set_segment_reg(struct task_struct *task,
83 unsigned long offset, u16 value)
84{
85 /*
86 * The value argument was already truncated to 16 bits.
87 */
2047b08b 88 if (invalid_selector(value))
06ee1b68
RM
89 return -EIO;
90
91 if (offset != offsetof(struct user_regs_struct, gs))
92 *pt_regs_access(task_pt_regs(task), offset) = value;
93 else {
94 task->thread.gs = value;
95 if (task == current)
5fd4d16b
RM
96 /*
97 * The user-mode %gs is not affected by
98 * kernel entry, so we must update the CPU.
99 */
100 loadsegment(gs, value);
1da177e4 101 }
06ee1b68 102
1da177e4
LT
103 return 0;
104}
105
2047b08b
RM
106static unsigned long debugreg_addr_limit(struct task_struct *task)
107{
108 return TASK_SIZE - 3;
109}
110
111#else /* CONFIG_X86_64 */
112
113#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
114
115static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
116{
117 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
118 return &regs->r15 + (offset / sizeof(regs->r15));
119}
120
121static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
122{
123 /*
124 * Returning the value truncates it to 16 bits.
125 */
126 unsigned int seg;
127
128 switch (offset) {
129 case offsetof(struct user_regs_struct, fs):
130 if (task == current) {
131 /* Older gas can't assemble movq %?s,%r?? */
132 asm("movl %%fs,%0" : "=r" (seg));
133 return seg;
134 }
135 return task->thread.fsindex;
136 case offsetof(struct user_regs_struct, gs):
137 if (task == current) {
138 asm("movl %%gs,%0" : "=r" (seg));
139 return seg;
140 }
141 return task->thread.gsindex;
142 case offsetof(struct user_regs_struct, ds):
143 if (task == current) {
144 asm("movl %%ds,%0" : "=r" (seg));
145 return seg;
146 }
147 return task->thread.ds;
148 case offsetof(struct user_regs_struct, es):
149 if (task == current) {
150 asm("movl %%es,%0" : "=r" (seg));
151 return seg;
152 }
153 return task->thread.es;
154
155 case offsetof(struct user_regs_struct, cs):
156 case offsetof(struct user_regs_struct, ss):
157 break;
158 }
159 return *pt_regs_access(task_pt_regs(task), offset);
160}
161
162static int set_segment_reg(struct task_struct *task,
163 unsigned long offset, u16 value)
164{
165 /*
166 * The value argument was already truncated to 16 bits.
167 */
168 if (invalid_selector(value))
169 return -EIO;
170
171 switch (offset) {
172 case offsetof(struct user_regs_struct,fs):
173 /*
174 * If this is setting fs as for normal 64-bit use but
175 * setting fs_base has implicitly changed it, leave it.
176 */
177 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
178 task->thread.fs != 0) ||
179 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
180 task->thread.fs == 0))
181 break;
182 task->thread.fsindex = value;
183 if (task == current)
184 loadsegment(fs, task->thread.fsindex);
185 break;
186 case offsetof(struct user_regs_struct,gs):
187 /*
188 * If this is setting gs as for normal 64-bit use but
189 * setting gs_base has implicitly changed it, leave it.
190 */
191 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
192 task->thread.gs != 0) ||
193 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
194 task->thread.gs == 0))
195 break;
196 task->thread.gsindex = value;
197 if (task == current)
198 load_gs_index(task->thread.gsindex);
199 break;
200 case offsetof(struct user_regs_struct,ds):
201 task->thread.ds = value;
202 if (task == current)
203 loadsegment(ds, task->thread.ds);
204 break;
205 case offsetof(struct user_regs_struct,es):
206 task->thread.es = value;
207 if (task == current)
208 loadsegment(es, task->thread.es);
209 break;
210
211 /*
212 * Can't actually change these in 64-bit mode.
213 */
214 case offsetof(struct user_regs_struct,cs):
215#ifdef CONFIG_IA32_EMULATION
216 if (test_tsk_thread_flag(task, TIF_IA32))
217 task_pt_regs(task)->cs = value;
2047b08b 218#endif
cb757c41 219 break;
2047b08b
RM
220 case offsetof(struct user_regs_struct,ss):
221#ifdef CONFIG_IA32_EMULATION
222 if (test_tsk_thread_flag(task, TIF_IA32))
223 task_pt_regs(task)->ss = value;
2047b08b 224#endif
cb757c41 225 break;
2047b08b
RM
226 }
227
228 return 0;
229}
230
231static unsigned long debugreg_addr_limit(struct task_struct *task)
232{
233#ifdef CONFIG_IA32_EMULATION
234 if (test_tsk_thread_flag(task, TIF_IA32))
235 return IA32_PAGE_OFFSET - 3;
236#endif
237 return TASK_SIZE64 - 7;
238}
239
240#endif /* CONFIG_X86_32 */
241
06ee1b68 242static unsigned long get_flags(struct task_struct *task)
1da177e4 243{
06ee1b68
RM
244 unsigned long retval = task_pt_regs(task)->flags;
245
246 /*
247 * If the debugger set TF, hide it from the readout.
248 */
249 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
250 retval &= ~X86_EFLAGS_TF;
1da177e4 251
1da177e4
LT
252 return retval;
253}
254
06ee1b68
RM
255static int set_flags(struct task_struct *task, unsigned long value)
256{
257 struct pt_regs *regs = task_pt_regs(task);
258
259 /*
260 * If the user value contains TF, mark that
261 * it was not "us" (the debugger) that set it.
262 * If not, make sure it stays set if we had.
263 */
264 if (value & X86_EFLAGS_TF)
265 clear_tsk_thread_flag(task, TIF_FORCED_TF);
266 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
267 value |= X86_EFLAGS_TF;
268
269 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
270
271 return 0;
272}
273
274static int putreg(struct task_struct *child,
275 unsigned long offset, unsigned long value)
276{
277 switch (offset) {
278 case offsetof(struct user_regs_struct, cs):
279 case offsetof(struct user_regs_struct, ds):
280 case offsetof(struct user_regs_struct, es):
281 case offsetof(struct user_regs_struct, fs):
282 case offsetof(struct user_regs_struct, gs):
283 case offsetof(struct user_regs_struct, ss):
284 return set_segment_reg(child, offset, value);
285
286 case offsetof(struct user_regs_struct, flags):
287 return set_flags(child, value);
2047b08b
RM
288
289#ifdef CONFIG_X86_64
290 case offsetof(struct user_regs_struct,fs_base):
291 if (value >= TASK_SIZE_OF(child))
292 return -EIO;
293 /*
294 * When changing the segment base, use do_arch_prctl
295 * to set either thread.fs or thread.fsindex and the
296 * corresponding GDT slot.
297 */
298 if (child->thread.fs != value)
299 return do_arch_prctl(child, ARCH_SET_FS, value);
300 return 0;
301 case offsetof(struct user_regs_struct,gs_base):
302 /*
303 * Exactly the same here as the %fs handling above.
304 */
305 if (value >= TASK_SIZE_OF(child))
306 return -EIO;
307 if (child->thread.gs != value)
308 return do_arch_prctl(child, ARCH_SET_GS, value);
309 return 0;
310#endif
06ee1b68
RM
311 }
312
313 *pt_regs_access(task_pt_regs(child), offset) = value;
314 return 0;
315}
316
317static unsigned long getreg(struct task_struct *task, unsigned long offset)
318{
319 switch (offset) {
320 case offsetof(struct user_regs_struct, cs):
321 case offsetof(struct user_regs_struct, ds):
322 case offsetof(struct user_regs_struct, es):
323 case offsetof(struct user_regs_struct, fs):
324 case offsetof(struct user_regs_struct, gs):
325 case offsetof(struct user_regs_struct, ss):
326 return get_segment_reg(task, offset);
327
328 case offsetof(struct user_regs_struct, flags):
329 return get_flags(task);
2047b08b
RM
330
331#ifdef CONFIG_X86_64
332 case offsetof(struct user_regs_struct, fs_base): {
333 /*
334 * do_arch_prctl may have used a GDT slot instead of
335 * the MSR. To userland, it appears the same either
336 * way, except the %fs segment selector might not be 0.
337 */
338 unsigned int seg = task->thread.fsindex;
339 if (task->thread.fs != 0)
340 return task->thread.fs;
341 if (task == current)
342 asm("movl %%fs,%0" : "=r" (seg));
343 if (seg != FS_TLS_SEL)
344 return 0;
345 return get_desc_base(&task->thread.tls_array[FS_TLS]);
346 }
347 case offsetof(struct user_regs_struct, gs_base): {
348 /*
349 * Exactly the same here as the %fs handling above.
350 */
351 unsigned int seg = task->thread.gsindex;
352 if (task->thread.gs != 0)
353 return task->thread.gs;
354 if (task == current)
355 asm("movl %%gs,%0" : "=r" (seg));
356 if (seg != GS_TLS_SEL)
357 return 0;
358 return get_desc_base(&task->thread.tls_array[GS_TLS]);
359 }
360#endif
06ee1b68
RM
361 }
362
363 return *pt_regs_access(task_pt_regs(task), offset);
364}
365
d9771e8c
RM
366/*
367 * This function is trivial and will be inlined by the compiler.
368 * Having it separates the implementation details of debug
369 * registers from the interface details of ptrace.
370 */
371static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
372{
0f534093
RM
373 switch (n) {
374 case 0: return child->thread.debugreg0;
375 case 1: return child->thread.debugreg1;
376 case 2: return child->thread.debugreg2;
377 case 3: return child->thread.debugreg3;
378 case 6: return child->thread.debugreg6;
379 case 7: return child->thread.debugreg7;
380 }
381 return 0;
d9771e8c
RM
382}
383
384static int ptrace_set_debugreg(struct task_struct *child,
385 int n, unsigned long data)
386{
0f534093
RM
387 int i;
388
d9771e8c
RM
389 if (unlikely(n == 4 || n == 5))
390 return -EIO;
391
2047b08b 392 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
d9771e8c
RM
393 return -EIO;
394
0f534093
RM
395 switch (n) {
396 case 0: child->thread.debugreg0 = data; break;
397 case 1: child->thread.debugreg1 = data; break;
398 case 2: child->thread.debugreg2 = data; break;
399 case 3: child->thread.debugreg3 = data; break;
400
401 case 6:
2047b08b
RM
402 if ((data & ~0xffffffffUL) != 0)
403 return -EIO;
0f534093
RM
404 child->thread.debugreg6 = data;
405 break;
406
407 case 7:
d9771e8c
RM
408 /*
409 * Sanity-check data. Take one half-byte at once with
410 * check = (val >> (16 + 4*i)) & 0xf. It contains the
411 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
412 * 2 and 3 are LENi. Given a list of invalid values,
413 * we do mask |= 1 << invalid_value, so that
414 * (mask >> check) & 1 is a correct test for invalid
415 * values.
416 *
417 * R/Wi contains the type of the breakpoint /
418 * watchpoint, LENi contains the length of the watched
419 * data in the watchpoint case.
420 *
421 * The invalid values are:
2047b08b 422 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
d9771e8c
RM
423 * - R/Wi == 0x10 (break on I/O reads or writes), so
424 * mask |= 0x4444.
425 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
426 * 0x1110.
427 *
428 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
429 *
430 * See the Intel Manual "System Programming Guide",
431 * 15.2.4
432 *
433 * Note that LENi == 0x10 is defined on x86_64 in long
434 * mode (i.e. even for 32-bit userspace software, but
435 * 64-bit kernel), so the x86_64 mask value is 0x5454.
436 * See the AMD manual no. 24593 (AMD64 System Programming)
437 */
2047b08b
RM
438#ifdef CONFIG_X86_32
439#define DR7_MASK 0x5f54
440#else
441#define DR7_MASK 0x5554
442#endif
d9771e8c
RM
443 data &= ~DR_CONTROL_RESERVED;
444 for (i = 0; i < 4; i++)
2047b08b 445 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
d9771e8c 446 return -EIO;
0f534093 447 child->thread.debugreg7 = data;
d9771e8c
RM
448 if (data)
449 set_tsk_thread_flag(child, TIF_DEBUG);
450 else
451 clear_tsk_thread_flag(child, TIF_DEBUG);
0f534093 452 break;
d9771e8c
RM
453 }
454
d9771e8c
RM
455 return 0;
456}
457
1da177e4
LT
458/*
459 * Called by kernel/ptrace.c when detaching..
460 *
461 * Make sure the single step bit is not set.
462 */
463void ptrace_disable(struct task_struct *child)
9e714bed 464{
7f232343 465 user_disable_single_step(child);
e9c86c78 466#ifdef TIF_SYSCALL_EMU
ab1c23c2 467 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
e9c86c78 468#endif
1da177e4
LT
469}
470
481bed45 471long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1da177e4 472{
1da177e4
LT
473 int i, ret;
474 unsigned long __user *datap = (unsigned long __user *)data;
475
1da177e4
LT
476 switch (request) {
477 /* when I and D space are separate, these will need to be fixed. */
9e714bed 478 case PTRACE_PEEKTEXT: /* read word at location addr. */
76647323
AD
479 case PTRACE_PEEKDATA:
480 ret = generic_ptrace_peekdata(child, addr, data);
1da177e4 481 break;
1da177e4
LT
482
483 /* read the word at location addr in the USER area. */
484 case PTRACE_PEEKUSR: {
485 unsigned long tmp;
486
487 ret = -EIO;
e9c86c78
RM
488 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
489 addr >= sizeof(struct user))
1da177e4
LT
490 break;
491
492 tmp = 0; /* Default return condition */
e9c86c78 493 if (addr < sizeof(struct user_regs_struct))
1da177e4 494 tmp = getreg(child, addr);
e9c86c78
RM
495 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
496 addr <= offsetof(struct user, u_debugreg[7])) {
497 addr -= offsetof(struct user, u_debugreg[0]);
498 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1da177e4
LT
499 }
500 ret = put_user(tmp, datap);
501 break;
502 }
503
504 /* when I and D space are separate, this will have to be fixed. */
505 case PTRACE_POKETEXT: /* write the word at location addr. */
506 case PTRACE_POKEDATA:
f284ce72 507 ret = generic_ptrace_pokedata(child, addr, data);
1da177e4
LT
508 break;
509
510 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
511 ret = -EIO;
e9c86c78
RM
512 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
513 addr >= sizeof(struct user))
1da177e4
LT
514 break;
515
e9c86c78 516 if (addr < sizeof(struct user_regs_struct))
1da177e4 517 ret = putreg(child, addr, data);
e9c86c78
RM
518 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
519 addr <= offsetof(struct user, u_debugreg[7])) {
520 addr -= offsetof(struct user, u_debugreg[0]);
521 ret = ptrace_set_debugreg(child,
522 addr / sizeof(data), data);
1da177e4 523 }
e9c86c78 524 break;
1da177e4 525
1da177e4 526 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
e9c86c78 527 if (!access_ok(VERIFY_WRITE, datap, sizeof(struct user_regs_struct))) {
1da177e4
LT
528 ret = -EIO;
529 break;
530 }
e9c86c78 531 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
1da177e4
LT
532 __put_user(getreg(child, i), datap);
533 datap++;
534 }
535 ret = 0;
536 break;
537 }
538
539 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
540 unsigned long tmp;
e9c86c78 541 if (!access_ok(VERIFY_READ, datap, sizeof(struct user_regs_struct))) {
1da177e4
LT
542 ret = -EIO;
543 break;
544 }
e9c86c78 545 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
1da177e4
LT
546 __get_user(tmp, datap);
547 putreg(child, i, tmp);
548 datap++;
549 }
550 ret = 0;
551 break;
552 }
553
554 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
555 if (!access_ok(VERIFY_WRITE, datap,
556 sizeof(struct user_i387_struct))) {
557 ret = -EIO;
558 break;
559 }
560 ret = 0;
561 if (!tsk_used_math(child))
562 init_fpu(child);
563 get_fpregs((struct user_i387_struct __user *)data, child);
564 break;
565 }
566
567 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
568 if (!access_ok(VERIFY_READ, datap,
569 sizeof(struct user_i387_struct))) {
570 ret = -EIO;
571 break;
572 }
573 set_stopped_child_used_math(child);
574 set_fpregs(child, (struct user_i387_struct __user *)data);
575 ret = 0;
576 break;
577 }
578
e9c86c78 579#ifdef CONFIG_X86_32
1da177e4
LT
580 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
581 if (!access_ok(VERIFY_WRITE, datap,
582 sizeof(struct user_fxsr_struct))) {
583 ret = -EIO;
584 break;
585 }
586 if (!tsk_used_math(child))
587 init_fpu(child);
588 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
589 break;
590 }
591
592 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
593 if (!access_ok(VERIFY_READ, datap,
594 sizeof(struct user_fxsr_struct))) {
595 ret = -EIO;
596 break;
597 }
598 set_stopped_child_used_math(child);
599 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
600 break;
601 }
e9c86c78 602#endif
1da177e4 603
e9c86c78 604#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1da177e4 605 case PTRACE_GET_THREAD_AREA:
efd1ca52
RM
606 if (addr < 0)
607 return -EIO;
608 ret = do_get_thread_area(child, addr,
609 (struct user_desc __user *) data);
1da177e4
LT
610 break;
611
612 case PTRACE_SET_THREAD_AREA:
efd1ca52
RM
613 if (addr < 0)
614 return -EIO;
615 ret = do_set_thread_area(child, addr,
616 (struct user_desc __user *) data, 0);
1da177e4 617 break;
e9c86c78
RM
618#endif
619
620#ifdef CONFIG_X86_64
621 /* normal 64bit interface to access TLS data.
622 Works just like arch_prctl, except that the arguments
623 are reversed. */
624 case PTRACE_ARCH_PRCTL:
625 ret = do_arch_prctl(child, data, addr);
626 break;
627#endif
1da177e4
LT
628
629 default:
630 ret = ptrace_request(child, request, addr, data);
631 break;
632 }
d9771e8c 633
1da177e4
LT
634 return ret;
635}
636
cb757c41
RM
637#ifdef CONFIG_IA32_EMULATION
638
099cd6e9
RM
639#include <linux/compat.h>
640#include <linux/syscalls.h>
641#include <asm/ia32.h>
642#include <asm/fpu32.h>
cb757c41
RM
643#include <asm/user32.h>
644
645#define R32(l,q) \
646 case offsetof(struct user32, regs.l): \
647 regs->q = value; break
648
649#define SEG32(rs) \
650 case offsetof(struct user32, regs.rs): \
651 return set_segment_reg(child, \
652 offsetof(struct user_regs_struct, rs), \
653 value); \
654 break
655
656static int putreg32(struct task_struct *child, unsigned regno, u32 value)
657{
658 struct pt_regs *regs = task_pt_regs(child);
659
660 switch (regno) {
661
662 SEG32(cs);
663 SEG32(ds);
664 SEG32(es);
665 SEG32(fs);
666 SEG32(gs);
667 SEG32(ss);
668
669 R32(ebx, bx);
670 R32(ecx, cx);
671 R32(edx, dx);
672 R32(edi, di);
673 R32(esi, si);
674 R32(ebp, bp);
675 R32(eax, ax);
676 R32(orig_eax, orig_ax);
677 R32(eip, ip);
678 R32(esp, sp);
679
680 case offsetof(struct user32, regs.eflags):
681 return set_flags(child, value);
682
683 case offsetof(struct user32, u_debugreg[0]) ...
684 offsetof(struct user32, u_debugreg[7]):
685 regno -= offsetof(struct user32, u_debugreg[0]);
686 return ptrace_set_debugreg(child, regno / 4, value);
687
688 default:
689 if (regno > sizeof(struct user32) || (regno & 3))
690 return -EIO;
691
692 /*
693 * Other dummy fields in the virtual user structure
694 * are ignored
695 */
696 break;
697 }
698 return 0;
699}
700
701#undef R32
702#undef SEG32
703
704#define R32(l,q) \
705 case offsetof(struct user32, regs.l): \
706 *val = regs->q; break
707
708#define SEG32(rs) \
709 case offsetof(struct user32, regs.rs): \
710 *val = get_segment_reg(child, \
711 offsetof(struct user_regs_struct, rs)); \
712 break
713
714static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
715{
716 struct pt_regs *regs = task_pt_regs(child);
717
718 switch (regno) {
719
720 SEG32(ds);
721 SEG32(es);
722 SEG32(fs);
723 SEG32(gs);
724
725 R32(cs, cs);
726 R32(ss, ss);
727 R32(ebx, bx);
728 R32(ecx, cx);
729 R32(edx, dx);
730 R32(edi, di);
731 R32(esi, si);
732 R32(ebp, bp);
733 R32(eax, ax);
734 R32(orig_eax, orig_ax);
735 R32(eip, ip);
736 R32(esp, sp);
737
738 case offsetof(struct user32, regs.eflags):
739 *val = get_flags(child);
740 break;
741
742 case offsetof(struct user32, u_debugreg[0]) ...
743 offsetof(struct user32, u_debugreg[7]):
744 regno -= offsetof(struct user32, u_debugreg[0]);
745 *val = ptrace_get_debugreg(child, regno / 4);
746 break;
747
748 default:
749 if (regno > sizeof(struct user32) || (regno & 3))
750 return -EIO;
751
752 /*
753 * Other dummy fields in the virtual user structure
754 * are ignored
755 */
756 *val = 0;
757 break;
758 }
759 return 0;
760}
761
762#undef R32
763#undef SEG32
764
099cd6e9
RM
765static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
766{
767 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
768 compat_siginfo_t __user *si32 = compat_ptr(data);
769 siginfo_t ssi;
770 int ret;
771
772 if (request == PTRACE_SETSIGINFO) {
773 memset(&ssi, 0, sizeof(siginfo_t));
774 ret = copy_siginfo_from_user32(&ssi, si32);
775 if (ret)
776 return ret;
777 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
778 return -EFAULT;
779 }
780 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
781 if (ret)
782 return ret;
783 if (request == PTRACE_GETSIGINFO) {
784 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
785 return -EFAULT;
786 ret = copy_siginfo_to_user32(si32, &ssi);
787 }
788 return ret;
789}
790
791asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
792{
793 struct task_struct *child;
794 struct pt_regs *childregs;
795 void __user *datap = compat_ptr(data);
796 int ret;
797 __u32 val;
798
799 switch (request) {
800 case PTRACE_TRACEME:
801 case PTRACE_ATTACH:
802 case PTRACE_KILL:
803 case PTRACE_CONT:
804 case PTRACE_SINGLESTEP:
805 case PTRACE_SINGLEBLOCK:
806 case PTRACE_DETACH:
807 case PTRACE_SYSCALL:
808 case PTRACE_OLDSETOPTIONS:
809 case PTRACE_SETOPTIONS:
810 case PTRACE_SET_THREAD_AREA:
811 case PTRACE_GET_THREAD_AREA:
812 return sys_ptrace(request, pid, addr, data);
813
814 default:
815 return -EINVAL;
816
817 case PTRACE_PEEKTEXT:
818 case PTRACE_PEEKDATA:
819 case PTRACE_POKEDATA:
820 case PTRACE_POKETEXT:
821 case PTRACE_POKEUSR:
822 case PTRACE_PEEKUSR:
823 case PTRACE_GETREGS:
824 case PTRACE_SETREGS:
825 case PTRACE_SETFPREGS:
826 case PTRACE_GETFPREGS:
827 case PTRACE_SETFPXREGS:
828 case PTRACE_GETFPXREGS:
829 case PTRACE_GETEVENTMSG:
830 break;
831
832 case PTRACE_SETSIGINFO:
833 case PTRACE_GETSIGINFO:
834 return ptrace32_siginfo(request, pid, addr, data);
835 }
836
837 child = ptrace_get_task_struct(pid);
838 if (IS_ERR(child))
839 return PTR_ERR(child);
840
841 ret = ptrace_check_attach(child, request == PTRACE_KILL);
842 if (ret < 0)
843 goto out;
844
845 childregs = task_pt_regs(child);
846
847 switch (request) {
848 case PTRACE_PEEKDATA:
849 case PTRACE_PEEKTEXT:
850 ret = 0;
851 if (access_process_vm(child, addr, &val, sizeof(u32), 0) !=
852 sizeof(u32))
853 ret = -EIO;
854 else
855 ret = put_user(val, (unsigned int __user *)datap);
856 break;
857
858 case PTRACE_POKEDATA:
859 case PTRACE_POKETEXT:
860 ret = 0;
861 if (access_process_vm(child, addr, &data, sizeof(u32), 1) !=
862 sizeof(u32))
863 ret = -EIO;
864 break;
865
866 case PTRACE_PEEKUSR:
867 ret = getreg32(child, addr, &val);
868 if (ret == 0)
869 ret = put_user(val, (__u32 __user *)datap);
870 break;
871
872 case PTRACE_POKEUSR:
873 ret = putreg32(child, addr, data);
874 break;
875
876 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
877 int i;
878
879 if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
880 ret = -EIO;
881 break;
882 }
883 ret = 0;
884 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(__u32)) {
885 getreg32(child, i, &val);
886 ret |= __put_user(val, (u32 __user *)datap);
887 datap += sizeof(u32);
888 }
889 break;
890 }
891
892 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
893 unsigned long tmp;
894 int i;
895
896 if (!access_ok(VERIFY_READ, datap, 16*4)) {
897 ret = -EIO;
898 break;
899 }
900 ret = 0;
901 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(u32)) {
902 ret |= __get_user(tmp, (u32 __user *)datap);
903 putreg32(child, i, tmp);
904 datap += sizeof(u32);
905 }
906 break;
907 }
908
909 case PTRACE_GETFPREGS:
910 ret = -EIO;
911 if (!access_ok(VERIFY_READ, compat_ptr(data),
912 sizeof(struct user_i387_struct)))
913 break;
914 save_i387_ia32(child, datap, childregs, 1);
915 ret = 0;
916 break;
917
918 case PTRACE_SETFPREGS:
919 ret = -EIO;
920 if (!access_ok(VERIFY_WRITE, datap,
921 sizeof(struct user_i387_struct)))
922 break;
923 ret = 0;
924 /* don't check EFAULT to be bug-to-bug compatible to i386 */
925 restore_i387_ia32(child, datap, 1);
926 break;
927
928 case PTRACE_GETFPXREGS: {
929 struct user32_fxsr_struct __user *u = datap;
930
931 init_fpu(child);
932 ret = -EIO;
933 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
934 break;
935 ret = -EFAULT;
936 if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
937 break;
938 ret = __put_user(childregs->cs, &u->fcs);
939 ret |= __put_user(child->thread.ds, &u->fos);
940 break;
941 }
942 case PTRACE_SETFPXREGS: {
943 struct user32_fxsr_struct __user *u = datap;
944
945 unlazy_fpu(child);
946 ret = -EIO;
947 if (!access_ok(VERIFY_READ, u, sizeof(*u)))
948 break;
949 /*
950 * no checking to be bug-to-bug compatible with i386.
951 * but silence warning
952 */
953 if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
954 ;
955 set_stopped_child_used_math(child);
956 child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
957 ret = 0;
958 break;
959 }
960
961 case PTRACE_GETEVENTMSG:
962 ret = put_user(child->ptrace_message,
963 (unsigned int __user *)compat_ptr(data));
964 break;
965
966 default:
967 BUG();
968 }
969
970 out:
971 put_task_struct(child);
972 return ret;
973}
974
cb757c41
RM
975#endif /* CONFIG_IA32_EMULATION */
976
86976cd8
RM
977#ifdef CONFIG_X86_32
978
1da177e4
LT
979void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
980{
981 struct siginfo info;
982
983 tsk->thread.trap_no = 1;
984 tsk->thread.error_code = error_code;
985
986 memset(&info, 0, sizeof(info));
987 info.si_signo = SIGTRAP;
988 info.si_code = TRAP_BRKPT;
989
65ea5b03
PA
990 /* User-mode ip? */
991 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1da177e4 992
27b46d76 993 /* Send us the fake SIGTRAP */
1da177e4
LT
994 force_sig_info(SIGTRAP, &info, tsk);
995}
996
997/* notification of system call entry/exit
998 * - triggered by current->work.syscall_trace
999 */
1000__attribute__((regparm(3)))
ed75e8d5 1001int do_syscall_trace(struct pt_regs *regs, int entryexit)
1da177e4 1002{
4c7fc722
AA
1003 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
1004 /*
1005 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
1006 * interception
1007 */
1b38f006 1008 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
4c7fc722 1009 int ret = 0;
1b38f006 1010
1da177e4 1011 /* do the secure computing check first */
4c7fc722 1012 if (!entryexit)
65ea5b03 1013 secure_computing(regs->orig_ax);
1da177e4 1014
ab1c23c2
BS
1015 if (unlikely(current->audit_context)) {
1016 if (entryexit)
65ea5b03
PA
1017 audit_syscall_exit(AUDITSC_RESULT(regs->ax),
1018 regs->ax);
ab1c23c2
BS
1019 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
1020 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
1021 * not used, entry.S will call us only on syscall exit, not
1022 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
1023 * calling send_sigtrap() on syscall entry.
1024 *
1025 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
1026 * is_singlestep is false, despite his name, so we will still do
1027 * the correct thing.
1028 */
1029 else if (is_singlestep)
1030 goto out;
1031 }
1da177e4
LT
1032
1033 if (!(current->ptrace & PT_PTRACED))
2fd6f58b 1034 goto out;
1da177e4 1035
1b38f006
BS
1036 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
1037 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
1038 * here. We have to check this and return */
1039 if (is_sysemu && entryexit)
1040 return 0;
ed75e8d5 1041
1da177e4 1042 /* Fake a debug trap */
c8c86cec 1043 if (is_singlestep)
1da177e4
LT
1044 send_sigtrap(current, regs, 0);
1045
c8c86cec 1046 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
2fd6f58b 1047 goto out;
1da177e4
LT
1048
1049 /* the 0x80 provides a way for the tracing parent to distinguish
1050 between a syscall stop and SIGTRAP delivery */
ed75e8d5 1051 /* Note that the debugger could change the result of test_thread_flag!*/
4c7fc722 1052 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
1da177e4
LT
1053
1054 /*
1055 * this isn't the same as continuing with a signal, but it will do
1056 * for normal use. strace only continues with a signal if the
1057 * stopping signal is not SIGTRAP. -brl
1058 */
1059 if (current->exit_code) {
1060 send_sig(current->exit_code, current, 1);
1061 current->exit_code = 0;
1062 }
ed75e8d5 1063 ret = is_sysemu;
4c7fc722 1064out:
2fd6f58b 1065 if (unlikely(current->audit_context) && !entryexit)
65ea5b03
PA
1066 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
1067 regs->bx, regs->cx, regs->dx, regs->si);
c8c86cec
BS
1068 if (ret == 0)
1069 return 0;
1070
65ea5b03 1071 regs->orig_ax = -1; /* force skip of syscall restarting */
c8c86cec 1072 if (unlikely(current->audit_context))
65ea5b03 1073 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
c8c86cec 1074 return 1;
1da177e4 1075}
86976cd8
RM
1076
1077#else /* CONFIG_X86_64 */
1078
1079static void syscall_trace(struct pt_regs *regs)
1080{
1081
1082#if 0
1083 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1084 current->comm,
1085 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1086 current_thread_info()->flags, current->ptrace);
1087#endif
1088
1089 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1090 ? 0x80 : 0));
1091 /*
1092 * this isn't the same as continuing with a signal, but it will do
1093 * for normal use. strace only continues with a signal if the
1094 * stopping signal is not SIGTRAP. -brl
1095 */
1096 if (current->exit_code) {
1097 send_sig(current->exit_code, current, 1);
1098 current->exit_code = 0;
1099 }
1100}
1101
1102asmlinkage void syscall_trace_enter(struct pt_regs *regs)
1103{
1104 /* do the secure computing check first */
1105 secure_computing(regs->orig_ax);
1106
1107 if (test_thread_flag(TIF_SYSCALL_TRACE)
1108 && (current->ptrace & PT_PTRACED))
1109 syscall_trace(regs);
1110
1111 if (unlikely(current->audit_context)) {
1112 if (test_thread_flag(TIF_IA32)) {
1113 audit_syscall_entry(AUDIT_ARCH_I386,
1114 regs->orig_ax,
1115 regs->bx, regs->cx,
1116 regs->dx, regs->si);
1117 } else {
1118 audit_syscall_entry(AUDIT_ARCH_X86_64,
1119 regs->orig_ax,
1120 regs->di, regs->si,
1121 regs->dx, regs->r10);
1122 }
1123 }
1124}
1125
1126asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1127{
1128 if (unlikely(current->audit_context))
1129 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1130
1131 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1132 || test_thread_flag(TIF_SINGLESTEP))
1133 && (current->ptrace & PT_PTRACED))
1134 syscall_trace(regs);
1135}
1136
1137#endif /* CONFIG_X86_32 */