1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
22 #include <asm/uaccess.h>
23 #include <asm/pgtable.h>
24 #include <asm/system.h>
25 #include <asm/processor.h>
27 #include <asm/debugreg.h>
30 #include <asm/prctl.h>
31 #include <asm/proto.h>
36 * The maximal size of a BTS buffer per traced task in number of BTS
39 #define PTRACE_BTS_BUFFER_MAX 4000
42 * does not yet catch signals sent when the child dies.
43 * in exit.c or in signal.c.
47 * Determines which flags the user has access to [1 = access, 0 = no access].
49 #define FLAG_MASK_32 ((unsigned long) \
50 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
51 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
52 X86_EFLAGS_SF | X86_EFLAGS_TF | \
53 X86_EFLAGS_DF | X86_EFLAGS_OF | \
54 X86_EFLAGS_RF | X86_EFLAGS_AC))
57 * Determines whether a value may be installed in a segment register.
59 static inline bool invalid_selector(u16 value
)
61 return unlikely(value
!= 0 && (value
& SEGMENT_RPL_MASK
) != USER_RPL
);
66 #define FLAG_MASK FLAG_MASK_32
68 static long *pt_regs_access(struct pt_regs
*regs
, unsigned long regno
)
70 BUILD_BUG_ON(offsetof(struct pt_regs
, bx
) != 0);
74 return ®s
->bx
+ regno
;
77 static u16
get_segment_reg(struct task_struct
*task
, unsigned long offset
)
80 * Returning the value truncates it to 16 bits.
83 if (offset
!= offsetof(struct user_regs_struct
, gs
))
84 retval
= *pt_regs_access(task_pt_regs(task
), offset
);
86 retval
= task
->thread
.gs
;
88 savesegment(gs
, retval
);
93 static int set_segment_reg(struct task_struct
*task
,
94 unsigned long offset
, u16 value
)
97 * The value argument was already truncated to 16 bits.
99 if (invalid_selector(value
))
102 if (offset
!= offsetof(struct user_regs_struct
, gs
))
103 *pt_regs_access(task_pt_regs(task
), offset
) = value
;
105 task
->thread
.gs
= value
;
108 * The user-mode %gs is not affected by
109 * kernel entry, so we must update the CPU.
111 loadsegment(gs
, value
);
117 static unsigned long debugreg_addr_limit(struct task_struct
*task
)
119 return TASK_SIZE
- 3;
122 #else /* CONFIG_X86_64 */
124 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
126 static unsigned long *pt_regs_access(struct pt_regs
*regs
, unsigned long offset
)
128 BUILD_BUG_ON(offsetof(struct pt_regs
, r15
) != 0);
129 return ®s
->r15
+ (offset
/ sizeof(regs
->r15
));
132 static u16
get_segment_reg(struct task_struct
*task
, unsigned long offset
)
135 * Returning the value truncates it to 16 bits.
140 case offsetof(struct user_regs_struct
, fs
):
141 if (task
== current
) {
142 /* Older gas can't assemble movq %?s,%r?? */
143 asm("movl %%fs,%0" : "=r" (seg
));
146 return task
->thread
.fsindex
;
147 case offsetof(struct user_regs_struct
, gs
):
148 if (task
== current
) {
149 asm("movl %%gs,%0" : "=r" (seg
));
152 return task
->thread
.gsindex
;
153 case offsetof(struct user_regs_struct
, ds
):
154 if (task
== current
) {
155 asm("movl %%ds,%0" : "=r" (seg
));
158 return task
->thread
.ds
;
159 case offsetof(struct user_regs_struct
, es
):
160 if (task
== current
) {
161 asm("movl %%es,%0" : "=r" (seg
));
164 return task
->thread
.es
;
166 case offsetof(struct user_regs_struct
, cs
):
167 case offsetof(struct user_regs_struct
, ss
):
170 return *pt_regs_access(task_pt_regs(task
), offset
);
173 static int set_segment_reg(struct task_struct
*task
,
174 unsigned long offset
, u16 value
)
177 * The value argument was already truncated to 16 bits.
179 if (invalid_selector(value
))
183 case offsetof(struct user_regs_struct
,fs
):
185 * If this is setting fs as for normal 64-bit use but
186 * setting fs_base has implicitly changed it, leave it.
188 if ((value
== FS_TLS_SEL
&& task
->thread
.fsindex
== 0 &&
189 task
->thread
.fs
!= 0) ||
190 (value
== 0 && task
->thread
.fsindex
== FS_TLS_SEL
&&
191 task
->thread
.fs
== 0))
193 task
->thread
.fsindex
= value
;
195 loadsegment(fs
, task
->thread
.fsindex
);
197 case offsetof(struct user_regs_struct
,gs
):
199 * If this is setting gs as for normal 64-bit use but
200 * setting gs_base has implicitly changed it, leave it.
202 if ((value
== GS_TLS_SEL
&& task
->thread
.gsindex
== 0 &&
203 task
->thread
.gs
!= 0) ||
204 (value
== 0 && task
->thread
.gsindex
== GS_TLS_SEL
&&
205 task
->thread
.gs
== 0))
207 task
->thread
.gsindex
= value
;
209 load_gs_index(task
->thread
.gsindex
);
211 case offsetof(struct user_regs_struct
,ds
):
212 task
->thread
.ds
= value
;
214 loadsegment(ds
, task
->thread
.ds
);
216 case offsetof(struct user_regs_struct
,es
):
217 task
->thread
.es
= value
;
219 loadsegment(es
, task
->thread
.es
);
223 * Can't actually change these in 64-bit mode.
225 case offsetof(struct user_regs_struct
,cs
):
226 #ifdef CONFIG_IA32_EMULATION
227 if (test_tsk_thread_flag(task
, TIF_IA32
))
228 task_pt_regs(task
)->cs
= value
;
231 case offsetof(struct user_regs_struct
,ss
):
232 #ifdef CONFIG_IA32_EMULATION
233 if (test_tsk_thread_flag(task
, TIF_IA32
))
234 task_pt_regs(task
)->ss
= value
;
242 static unsigned long debugreg_addr_limit(struct task_struct
*task
)
244 #ifdef CONFIG_IA32_EMULATION
245 if (test_tsk_thread_flag(task
, TIF_IA32
))
246 return IA32_PAGE_OFFSET
- 3;
248 return TASK_SIZE64
- 7;
251 #endif /* CONFIG_X86_32 */
253 static unsigned long get_flags(struct task_struct
*task
)
255 unsigned long retval
= task_pt_regs(task
)->flags
;
258 * If the debugger set TF, hide it from the readout.
260 if (test_tsk_thread_flag(task
, TIF_FORCED_TF
))
261 retval
&= ~X86_EFLAGS_TF
;
266 static int set_flags(struct task_struct
*task
, unsigned long value
)
268 struct pt_regs
*regs
= task_pt_regs(task
);
271 * If the user value contains TF, mark that
272 * it was not "us" (the debugger) that set it.
273 * If not, make sure it stays set if we had.
275 if (value
& X86_EFLAGS_TF
)
276 clear_tsk_thread_flag(task
, TIF_FORCED_TF
);
277 else if (test_tsk_thread_flag(task
, TIF_FORCED_TF
))
278 value
|= X86_EFLAGS_TF
;
280 regs
->flags
= (regs
->flags
& ~FLAG_MASK
) | (value
& FLAG_MASK
);
285 static int putreg(struct task_struct
*child
,
286 unsigned long offset
, unsigned long value
)
289 case offsetof(struct user_regs_struct
, cs
):
290 case offsetof(struct user_regs_struct
, ds
):
291 case offsetof(struct user_regs_struct
, es
):
292 case offsetof(struct user_regs_struct
, fs
):
293 case offsetof(struct user_regs_struct
, gs
):
294 case offsetof(struct user_regs_struct
, ss
):
295 return set_segment_reg(child
, offset
, value
);
297 case offsetof(struct user_regs_struct
, flags
):
298 return set_flags(child
, value
);
301 case offsetof(struct user_regs_struct
,fs_base
):
302 if (value
>= TASK_SIZE_OF(child
))
305 * When changing the segment base, use do_arch_prctl
306 * to set either thread.fs or thread.fsindex and the
307 * corresponding GDT slot.
309 if (child
->thread
.fs
!= value
)
310 return do_arch_prctl(child
, ARCH_SET_FS
, value
);
312 case offsetof(struct user_regs_struct
,gs_base
):
314 * Exactly the same here as the %fs handling above.
316 if (value
>= TASK_SIZE_OF(child
))
318 if (child
->thread
.gs
!= value
)
319 return do_arch_prctl(child
, ARCH_SET_GS
, value
);
324 *pt_regs_access(task_pt_regs(child
), offset
) = value
;
328 static unsigned long getreg(struct task_struct
*task
, unsigned long offset
)
331 case offsetof(struct user_regs_struct
, cs
):
332 case offsetof(struct user_regs_struct
, ds
):
333 case offsetof(struct user_regs_struct
, es
):
334 case offsetof(struct user_regs_struct
, fs
):
335 case offsetof(struct user_regs_struct
, gs
):
336 case offsetof(struct user_regs_struct
, ss
):
337 return get_segment_reg(task
, offset
);
339 case offsetof(struct user_regs_struct
, flags
):
340 return get_flags(task
);
343 case offsetof(struct user_regs_struct
, fs_base
): {
345 * do_arch_prctl may have used a GDT slot instead of
346 * the MSR. To userland, it appears the same either
347 * way, except the %fs segment selector might not be 0.
349 unsigned int seg
= task
->thread
.fsindex
;
350 if (task
->thread
.fs
!= 0)
351 return task
->thread
.fs
;
353 asm("movl %%fs,%0" : "=r" (seg
));
354 if (seg
!= FS_TLS_SEL
)
356 return get_desc_base(&task
->thread
.tls_array
[FS_TLS
]);
358 case offsetof(struct user_regs_struct
, gs_base
): {
360 * Exactly the same here as the %fs handling above.
362 unsigned int seg
= task
->thread
.gsindex
;
363 if (task
->thread
.gs
!= 0)
364 return task
->thread
.gs
;
366 asm("movl %%gs,%0" : "=r" (seg
));
367 if (seg
!= GS_TLS_SEL
)
369 return get_desc_base(&task
->thread
.tls_array
[GS_TLS
]);
374 return *pt_regs_access(task_pt_regs(task
), offset
);
378 * This function is trivial and will be inlined by the compiler.
379 * Having it separates the implementation details of debug
380 * registers from the interface details of ptrace.
382 static unsigned long ptrace_get_debugreg(struct task_struct
*child
, int n
)
385 case 0: return child
->thread
.debugreg0
;
386 case 1: return child
->thread
.debugreg1
;
387 case 2: return child
->thread
.debugreg2
;
388 case 3: return child
->thread
.debugreg3
;
389 case 6: return child
->thread
.debugreg6
;
390 case 7: return child
->thread
.debugreg7
;
395 static int ptrace_set_debugreg(struct task_struct
*child
,
396 int n
, unsigned long data
)
400 if (unlikely(n
== 4 || n
== 5))
403 if (n
< 4 && unlikely(data
>= debugreg_addr_limit(child
)))
407 case 0: child
->thread
.debugreg0
= data
; break;
408 case 1: child
->thread
.debugreg1
= data
; break;
409 case 2: child
->thread
.debugreg2
= data
; break;
410 case 3: child
->thread
.debugreg3
= data
; break;
413 if ((data
& ~0xffffffffUL
) != 0)
415 child
->thread
.debugreg6
= data
;
420 * Sanity-check data. Take one half-byte at once with
421 * check = (val >> (16 + 4*i)) & 0xf. It contains the
422 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
423 * 2 and 3 are LENi. Given a list of invalid values,
424 * we do mask |= 1 << invalid_value, so that
425 * (mask >> check) & 1 is a correct test for invalid
428 * R/Wi contains the type of the breakpoint /
429 * watchpoint, LENi contains the length of the watched
430 * data in the watchpoint case.
432 * The invalid values are:
433 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
434 * - R/Wi == 0x10 (break on I/O reads or writes), so
436 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
439 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
441 * See the Intel Manual "System Programming Guide",
444 * Note that LENi == 0x10 is defined on x86_64 in long
445 * mode (i.e. even for 32-bit userspace software, but
446 * 64-bit kernel), so the x86_64 mask value is 0x5454.
447 * See the AMD manual no. 24593 (AMD64 System Programming)
450 #define DR7_MASK 0x5f54
452 #define DR7_MASK 0x5554
454 data
&= ~DR_CONTROL_RESERVED
;
455 for (i
= 0; i
< 4; i
++)
456 if ((DR7_MASK
>> ((data
>> (16 + 4*i
)) & 0xf)) & 1)
458 child
->thread
.debugreg7
= data
;
460 set_tsk_thread_flag(child
, TIF_DEBUG
);
462 clear_tsk_thread_flag(child
, TIF_DEBUG
);
469 static int ptrace_bts_max_buffer_size(void)
471 return PTRACE_BTS_BUFFER_MAX
;
474 static int ptrace_bts_get_buffer_size(struct task_struct
*child
)
476 if (!child
->thread
.ds_area_msr
)
479 return ds_get_bts_size((void *)child
->thread
.ds_area_msr
);
482 static int ptrace_bts_get_index(struct task_struct
*child
)
484 if (!child
->thread
.ds_area_msr
)
487 return ds_get_bts_index((void *)child
->thread
.ds_area_msr
);
490 static int ptrace_bts_read_record(struct task_struct
*child
,
492 struct bts_struct __user
*out
)
494 struct bts_struct ret
;
497 if (!child
->thread
.ds_area_msr
)
500 retval
= ds_read_bts((void *)child
->thread
.ds_area_msr
,
505 if (copy_to_user(out
, &ret
, sizeof(ret
)))
511 static int ptrace_bts_write_record(struct task_struct
*child
,
512 const struct bts_struct
*in
)
516 if (!child
->thread
.ds_area_msr
)
519 retval
= ds_write_bts((void *)child
->thread
.ds_area_msr
, in
);
526 static int ptrace_bts_config(struct task_struct
*child
,
527 unsigned long options
)
529 unsigned long debugctl_mask
= ds_debugctl_mask();
532 retval
= ptrace_bts_get_buffer_size(child
);
538 if (options
& PTRACE_BTS_O_TRACE_TASK
) {
539 child
->thread
.debugctlmsr
|= debugctl_mask
;
540 set_tsk_thread_flag(child
, TIF_DEBUGCTLMSR
);
542 /* there is no way for us to check whether we 'own'
543 * the respective bits in the DEBUGCTL MSR, we're
545 child
->thread
.debugctlmsr
&= ~debugctl_mask
;
547 if (!child
->thread
.debugctlmsr
)
548 clear_tsk_thread_flag(child
, TIF_DEBUGCTLMSR
);
551 if (options
& PTRACE_BTS_O_TIMESTAMPS
)
552 set_tsk_thread_flag(child
, TIF_BTS_TRACE_TS
);
554 clear_tsk_thread_flag(child
, TIF_BTS_TRACE_TS
);
559 static int ptrace_bts_status(struct task_struct
*child
)
561 unsigned long debugctl_mask
= ds_debugctl_mask();
562 int retval
, status
= 0;
564 retval
= ptrace_bts_get_buffer_size(child
);
570 if (ptrace_bts_get_buffer_size(child
) <= 0)
573 if (test_tsk_thread_flag(child
, TIF_DEBUGCTLMSR
) &&
574 child
->thread
.debugctlmsr
& debugctl_mask
)
575 status
|= PTRACE_BTS_O_TRACE_TASK
;
576 if (test_tsk_thread_flag(child
, TIF_BTS_TRACE_TS
))
577 status
|= PTRACE_BTS_O_TIMESTAMPS
;
582 static int ptrace_bts_allocate_bts(struct task_struct
*child
,
588 if (size_in_records
< 0)
591 if (size_in_records
> ptrace_bts_max_buffer_size())
594 if (size_in_records
== 0) {
595 ptrace_bts_config(child
, /* options = */ 0);
597 retval
= ds_allocate(&ds
, size_in_records
);
602 if (child
->thread
.ds_area_msr
)
603 ds_free((void **)&child
->thread
.ds_area_msr
);
605 child
->thread
.ds_area_msr
= (unsigned long)ds
;
606 if (child
->thread
.ds_area_msr
)
607 set_tsk_thread_flag(child
, TIF_DS_AREA_MSR
);
609 clear_tsk_thread_flag(child
, TIF_DS_AREA_MSR
);
614 void ptrace_bts_take_timestamp(struct task_struct
*tsk
,
615 enum bts_qualifier qualifier
)
617 struct bts_struct rec
= {
618 .qualifier
= qualifier
,
619 .variant
.timestamp
= sched_clock()
622 if (ptrace_bts_get_buffer_size(tsk
) <= 0)
625 ptrace_bts_write_record(tsk
, &rec
);
629 * Called by kernel/ptrace.c when detaching..
631 * Make sure the single step bit is not set.
633 void ptrace_disable(struct task_struct
*child
)
635 user_disable_single_step(child
);
636 #ifdef TIF_SYSCALL_EMU
637 clear_tsk_thread_flag(child
, TIF_SYSCALL_EMU
);
639 ptrace_bts_config(child
, /* options = */ 0);
640 if (child
->thread
.ds_area_msr
) {
641 ds_free((void **)&child
->thread
.ds_area_msr
);
642 clear_tsk_thread_flag(child
, TIF_DS_AREA_MSR
);
646 long arch_ptrace(struct task_struct
*child
, long request
, long addr
, long data
)
649 unsigned long __user
*datap
= (unsigned long __user
*)data
;
652 /* when I and D space are separate, these will need to be fixed. */
653 case PTRACE_PEEKTEXT
: /* read word at location addr. */
654 case PTRACE_PEEKDATA
:
655 ret
= generic_ptrace_peekdata(child
, addr
, data
);
658 /* read the word at location addr in the USER area. */
659 case PTRACE_PEEKUSR
: {
663 if ((addr
& (sizeof(data
) - 1)) || addr
< 0 ||
664 addr
>= sizeof(struct user
))
667 tmp
= 0; /* Default return condition */
668 if (addr
< sizeof(struct user_regs_struct
))
669 tmp
= getreg(child
, addr
);
670 else if (addr
>= offsetof(struct user
, u_debugreg
[0]) &&
671 addr
<= offsetof(struct user
, u_debugreg
[7])) {
672 addr
-= offsetof(struct user
, u_debugreg
[0]);
673 tmp
= ptrace_get_debugreg(child
, addr
/ sizeof(data
));
675 ret
= put_user(tmp
, datap
);
679 /* when I and D space are separate, this will have to be fixed. */
680 case PTRACE_POKETEXT
: /* write the word at location addr. */
681 case PTRACE_POKEDATA
:
682 ret
= generic_ptrace_pokedata(child
, addr
, data
);
685 case PTRACE_POKEUSR
: /* write the word at location addr in the USER area */
687 if ((addr
& (sizeof(data
) - 1)) || addr
< 0 ||
688 addr
>= sizeof(struct user
))
691 if (addr
< sizeof(struct user_regs_struct
))
692 ret
= putreg(child
, addr
, data
);
693 else if (addr
>= offsetof(struct user
, u_debugreg
[0]) &&
694 addr
<= offsetof(struct user
, u_debugreg
[7])) {
695 addr
-= offsetof(struct user
, u_debugreg
[0]);
696 ret
= ptrace_set_debugreg(child
,
697 addr
/ sizeof(data
), data
);
701 case PTRACE_GETREGS
: { /* Get all gp regs from the child. */
702 if (!access_ok(VERIFY_WRITE
, datap
, sizeof(struct user_regs_struct
))) {
706 for (i
= 0; i
< sizeof(struct user_regs_struct
); i
+= sizeof(long)) {
707 __put_user(getreg(child
, i
), datap
);
714 case PTRACE_SETREGS
: { /* Set all gp regs in the child. */
716 if (!access_ok(VERIFY_READ
, datap
, sizeof(struct user_regs_struct
))) {
720 for (i
= 0; i
< sizeof(struct user_regs_struct
); i
+= sizeof(long)) {
721 __get_user(tmp
, datap
);
722 putreg(child
, i
, tmp
);
729 case PTRACE_GETFPREGS
: { /* Get the child FPU state. */
730 if (!access_ok(VERIFY_WRITE
, datap
,
731 sizeof(struct user_i387_struct
))) {
736 if (!tsk_used_math(child
))
738 get_fpregs((struct user_i387_struct __user
*)data
, child
);
742 case PTRACE_SETFPREGS
: { /* Set the child FPU state. */
743 if (!access_ok(VERIFY_READ
, datap
,
744 sizeof(struct user_i387_struct
))) {
748 set_stopped_child_used_math(child
);
749 set_fpregs(child
, (struct user_i387_struct __user
*)data
);
755 case PTRACE_GETFPXREGS
: { /* Get the child extended FPU state. */
756 if (!access_ok(VERIFY_WRITE
, datap
,
757 sizeof(struct user_fxsr_struct
))) {
761 if (!tsk_used_math(child
))
763 ret
= get_fpxregs((struct user_fxsr_struct __user
*)data
, child
);
767 case PTRACE_SETFPXREGS
: { /* Set the child extended FPU state. */
768 if (!access_ok(VERIFY_READ
, datap
,
769 sizeof(struct user_fxsr_struct
))) {
773 set_stopped_child_used_math(child
);
774 ret
= set_fpxregs(child
, (struct user_fxsr_struct __user
*)data
);
779 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
780 case PTRACE_GET_THREAD_AREA
:
783 ret
= do_get_thread_area(child
, addr
,
784 (struct user_desc __user
*) data
);
787 case PTRACE_SET_THREAD_AREA
:
790 ret
= do_set_thread_area(child
, addr
,
791 (struct user_desc __user
*) data
, 0);
796 /* normal 64bit interface to access TLS data.
797 Works just like arch_prctl, except that the arguments
799 case PTRACE_ARCH_PRCTL
:
800 ret
= do_arch_prctl(child
, data
, addr
);
804 case PTRACE_BTS_MAX_BUFFER_SIZE
:
805 ret
= ptrace_bts_max_buffer_size();
808 case PTRACE_BTS_ALLOCATE_BUFFER
:
809 ret
= ptrace_bts_allocate_bts(child
, data
);
812 case PTRACE_BTS_GET_BUFFER_SIZE
:
813 ret
= ptrace_bts_get_buffer_size(child
);
816 case PTRACE_BTS_GET_INDEX
:
817 ret
= ptrace_bts_get_index(child
);
820 case PTRACE_BTS_READ_RECORD
:
821 ret
= ptrace_bts_read_record
823 (struct bts_struct __user
*) addr
);
826 case PTRACE_BTS_CONFIG
:
827 ret
= ptrace_bts_config(child
, data
);
830 case PTRACE_BTS_STATUS
:
831 ret
= ptrace_bts_status(child
);
835 ret
= ptrace_request(child
, request
, addr
, data
);
842 #ifdef CONFIG_IA32_EMULATION
844 #include <linux/compat.h>
845 #include <linux/syscalls.h>
846 #include <asm/ia32.h>
847 #include <asm/fpu32.h>
848 #include <asm/user32.h>
851 case offsetof(struct user32, regs.l): \
852 regs->q = value; break
855 case offsetof(struct user32, regs.rs): \
856 return set_segment_reg(child, \
857 offsetof(struct user_regs_struct, rs), \
861 static int putreg32(struct task_struct
*child
, unsigned regno
, u32 value
)
863 struct pt_regs
*regs
= task_pt_regs(child
);
881 R32(orig_eax
, orig_ax
);
885 case offsetof(struct user32
, regs
.eflags
):
886 return set_flags(child
, value
);
888 case offsetof(struct user32
, u_debugreg
[0]) ...
889 offsetof(struct user32
, u_debugreg
[7]):
890 regno
-= offsetof(struct user32
, u_debugreg
[0]);
891 return ptrace_set_debugreg(child
, regno
/ 4, value
);
894 if (regno
> sizeof(struct user32
) || (regno
& 3))
898 * Other dummy fields in the virtual user structure
910 case offsetof(struct user32, regs.l): \
911 *val = regs->q; break
914 case offsetof(struct user32, regs.rs): \
915 *val = get_segment_reg(child, \
916 offsetof(struct user_regs_struct, rs)); \
919 static int getreg32(struct task_struct
*child
, unsigned regno
, u32
*val
)
921 struct pt_regs
*regs
= task_pt_regs(child
);
939 R32(orig_eax
, orig_ax
);
943 case offsetof(struct user32
, regs
.eflags
):
944 *val
= get_flags(child
);
947 case offsetof(struct user32
, u_debugreg
[0]) ...
948 offsetof(struct user32
, u_debugreg
[7]):
949 regno
-= offsetof(struct user32
, u_debugreg
[0]);
950 *val
= ptrace_get_debugreg(child
, regno
/ 4);
954 if (regno
> sizeof(struct user32
) || (regno
& 3))
958 * Other dummy fields in the virtual user structure
970 static long ptrace32_siginfo(unsigned request
, u32 pid
, u32 addr
, u32 data
)
972 siginfo_t __user
*si
= compat_alloc_user_space(sizeof(siginfo_t
));
973 compat_siginfo_t __user
*si32
= compat_ptr(data
);
977 if (request
== PTRACE_SETSIGINFO
) {
978 memset(&ssi
, 0, sizeof(siginfo_t
));
979 ret
= copy_siginfo_from_user32(&ssi
, si32
);
982 if (copy_to_user(si
, &ssi
, sizeof(siginfo_t
)))
985 ret
= sys_ptrace(request
, pid
, addr
, (unsigned long)si
);
988 if (request
== PTRACE_GETSIGINFO
) {
989 if (copy_from_user(&ssi
, si
, sizeof(siginfo_t
)))
991 ret
= copy_siginfo_to_user32(si32
, &ssi
);
996 asmlinkage
long sys32_ptrace(long request
, u32 pid
, u32 addr
, u32 data
)
998 struct task_struct
*child
;
999 struct pt_regs
*childregs
;
1000 void __user
*datap
= compat_ptr(data
);
1005 case PTRACE_TRACEME
:
1009 case PTRACE_SINGLESTEP
:
1010 case PTRACE_SINGLEBLOCK
:
1012 case PTRACE_SYSCALL
:
1013 case PTRACE_OLDSETOPTIONS
:
1014 case PTRACE_SETOPTIONS
:
1015 case PTRACE_SET_THREAD_AREA
:
1016 case PTRACE_GET_THREAD_AREA
:
1017 case PTRACE_BTS_MAX_BUFFER_SIZE
:
1018 case PTRACE_BTS_ALLOCATE_BUFFER
:
1019 case PTRACE_BTS_GET_BUFFER_SIZE
:
1020 case PTRACE_BTS_GET_INDEX
:
1021 case PTRACE_BTS_READ_RECORD
:
1022 case PTRACE_BTS_CONFIG
:
1023 case PTRACE_BTS_STATUS
:
1024 return sys_ptrace(request
, pid
, addr
, data
);
1029 case PTRACE_PEEKTEXT
:
1030 case PTRACE_PEEKDATA
:
1031 case PTRACE_POKEDATA
:
1032 case PTRACE_POKETEXT
:
1033 case PTRACE_POKEUSR
:
1034 case PTRACE_PEEKUSR
:
1035 case PTRACE_GETREGS
:
1036 case PTRACE_SETREGS
:
1037 case PTRACE_SETFPREGS
:
1038 case PTRACE_GETFPREGS
:
1039 case PTRACE_SETFPXREGS
:
1040 case PTRACE_GETFPXREGS
:
1041 case PTRACE_GETEVENTMSG
:
1044 case PTRACE_SETSIGINFO
:
1045 case PTRACE_GETSIGINFO
:
1046 return ptrace32_siginfo(request
, pid
, addr
, data
);
1049 child
= ptrace_get_task_struct(pid
);
1051 return PTR_ERR(child
);
1053 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
);
1057 childregs
= task_pt_regs(child
);
1060 case PTRACE_PEEKDATA
:
1061 case PTRACE_PEEKTEXT
:
1063 if (access_process_vm(child
, addr
, &val
, sizeof(u32
), 0) !=
1067 ret
= put_user(val
, (unsigned int __user
*)datap
);
1070 case PTRACE_POKEDATA
:
1071 case PTRACE_POKETEXT
:
1073 if (access_process_vm(child
, addr
, &data
, sizeof(u32
), 1) !=
1078 case PTRACE_PEEKUSR
:
1079 ret
= getreg32(child
, addr
, &val
);
1081 ret
= put_user(val
, (__u32 __user
*)datap
);
1084 case PTRACE_POKEUSR
:
1085 ret
= putreg32(child
, addr
, data
);
1088 case PTRACE_GETREGS
: { /* Get all gp regs from the child. */
1091 if (!access_ok(VERIFY_WRITE
, datap
, 16*4)) {
1096 for (i
= 0; i
< sizeof(struct user_regs_struct32
); i
+= sizeof(__u32
)) {
1097 getreg32(child
, i
, &val
);
1098 ret
|= __put_user(val
, (u32 __user
*)datap
);
1099 datap
+= sizeof(u32
);
1104 case PTRACE_SETREGS
: { /* Set all gp regs in the child. */
1108 if (!access_ok(VERIFY_READ
, datap
, 16*4)) {
1113 for (i
= 0; i
< sizeof(struct user_regs_struct32
); i
+= sizeof(u32
)) {
1114 ret
|= __get_user(tmp
, (u32 __user
*)datap
);
1115 putreg32(child
, i
, tmp
);
1116 datap
+= sizeof(u32
);
1121 case PTRACE_GETFPREGS
:
1123 if (!access_ok(VERIFY_READ
, compat_ptr(data
),
1124 sizeof(struct user_i387_struct
)))
1126 save_i387_ia32(child
, datap
, childregs
, 1);
1130 case PTRACE_SETFPREGS
:
1132 if (!access_ok(VERIFY_WRITE
, datap
,
1133 sizeof(struct user_i387_struct
)))
1136 /* don't check EFAULT to be bug-to-bug compatible to i386 */
1137 restore_i387_ia32(child
, datap
, 1);
1140 case PTRACE_GETFPXREGS
: {
1141 struct user32_fxsr_struct __user
*u
= datap
;
1145 if (!access_ok(VERIFY_WRITE
, u
, sizeof(*u
)))
1148 if (__copy_to_user(u
, &child
->thread
.i387
.fxsave
, sizeof(*u
)))
1150 ret
= __put_user(childregs
->cs
, &u
->fcs
);
1151 ret
|= __put_user(child
->thread
.ds
, &u
->fos
);
1154 case PTRACE_SETFPXREGS
: {
1155 struct user32_fxsr_struct __user
*u
= datap
;
1159 if (!access_ok(VERIFY_READ
, u
, sizeof(*u
)))
1162 * no checking to be bug-to-bug compatible with i386.
1163 * but silence warning
1165 if (__copy_from_user(&child
->thread
.i387
.fxsave
, u
, sizeof(*u
)))
1167 set_stopped_child_used_math(child
);
1168 child
->thread
.i387
.fxsave
.mxcsr
&= mxcsr_feature_mask
;
1173 case PTRACE_GETEVENTMSG
:
1174 ret
= put_user(child
->ptrace_message
,
1175 (unsigned int __user
*)compat_ptr(data
));
1183 put_task_struct(child
);
1187 #endif /* CONFIG_IA32_EMULATION */
1189 #ifdef CONFIG_X86_32
1191 void send_sigtrap(struct task_struct
*tsk
, struct pt_regs
*regs
, int error_code
)
1193 struct siginfo info
;
1195 tsk
->thread
.trap_no
= 1;
1196 tsk
->thread
.error_code
= error_code
;
1198 memset(&info
, 0, sizeof(info
));
1199 info
.si_signo
= SIGTRAP
;
1200 info
.si_code
= TRAP_BRKPT
;
1203 info
.si_addr
= user_mode_vm(regs
) ? (void __user
*) regs
->ip
: NULL
;
1205 /* Send us the fake SIGTRAP */
1206 force_sig_info(SIGTRAP
, &info
, tsk
);
1209 /* notification of system call entry/exit
1210 * - triggered by current->work.syscall_trace
1212 __attribute__((regparm(3)))
1213 int do_syscall_trace(struct pt_regs
*regs
, int entryexit
)
1215 int is_sysemu
= test_thread_flag(TIF_SYSCALL_EMU
);
1217 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
1220 int is_singlestep
= !is_sysemu
&& test_thread_flag(TIF_SINGLESTEP
);
1223 /* do the secure computing check first */
1225 secure_computing(regs
->orig_ax
);
1227 if (unlikely(current
->audit_context
)) {
1229 audit_syscall_exit(AUDITSC_RESULT(regs
->ax
),
1231 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
1232 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
1233 * not used, entry.S will call us only on syscall exit, not
1234 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
1235 * calling send_sigtrap() on syscall entry.
1237 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
1238 * is_singlestep is false, despite his name, so we will still do
1239 * the correct thing.
1241 else if (is_singlestep
)
1245 if (!(current
->ptrace
& PT_PTRACED
))
1248 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
1249 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
1250 * here. We have to check this and return */
1251 if (is_sysemu
&& entryexit
)
1254 /* Fake a debug trap */
1256 send_sigtrap(current
, regs
, 0);
1258 if (!test_thread_flag(TIF_SYSCALL_TRACE
) && !is_sysemu
)
1261 /* the 0x80 provides a way for the tracing parent to distinguish
1262 between a syscall stop and SIGTRAP delivery */
1263 /* Note that the debugger could change the result of test_thread_flag!*/
1264 ptrace_notify(SIGTRAP
| ((current
->ptrace
& PT_TRACESYSGOOD
) ? 0x80:0));
1267 * this isn't the same as continuing with a signal, but it will do
1268 * for normal use. strace only continues with a signal if the
1269 * stopping signal is not SIGTRAP. -brl
1271 if (current
->exit_code
) {
1272 send_sig(current
->exit_code
, current
, 1);
1273 current
->exit_code
= 0;
1277 if (unlikely(current
->audit_context
) && !entryexit
)
1278 audit_syscall_entry(AUDIT_ARCH_I386
, regs
->orig_ax
,
1279 regs
->bx
, regs
->cx
, regs
->dx
, regs
->si
);
1283 regs
->orig_ax
= -1; /* force skip of syscall restarting */
1284 if (unlikely(current
->audit_context
))
1285 audit_syscall_exit(AUDITSC_RESULT(regs
->ax
), regs
->ax
);
1289 #else /* CONFIG_X86_64 */
1291 static void syscall_trace(struct pt_regs
*regs
)
1295 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1297 regs
->ip
, regs
->sp
, regs
->ax
, regs
->orig_ax
, __builtin_return_address(0),
1298 current_thread_info()->flags
, current
->ptrace
);
1301 ptrace_notify(SIGTRAP
| ((current
->ptrace
& PT_TRACESYSGOOD
)
1304 * this isn't the same as continuing with a signal, but it will do
1305 * for normal use. strace only continues with a signal if the
1306 * stopping signal is not SIGTRAP. -brl
1308 if (current
->exit_code
) {
1309 send_sig(current
->exit_code
, current
, 1);
1310 current
->exit_code
= 0;
1314 asmlinkage
void syscall_trace_enter(struct pt_regs
*regs
)
1316 /* do the secure computing check first */
1317 secure_computing(regs
->orig_ax
);
1319 if (test_thread_flag(TIF_SYSCALL_TRACE
)
1320 && (current
->ptrace
& PT_PTRACED
))
1321 syscall_trace(regs
);
1323 if (unlikely(current
->audit_context
)) {
1324 if (test_thread_flag(TIF_IA32
)) {
1325 audit_syscall_entry(AUDIT_ARCH_I386
,
1328 regs
->dx
, regs
->si
);
1330 audit_syscall_entry(AUDIT_ARCH_X86_64
,
1333 regs
->dx
, regs
->r10
);
1338 asmlinkage
void syscall_trace_leave(struct pt_regs
*regs
)
1340 if (unlikely(current
->audit_context
))
1341 audit_syscall_exit(AUDITSC_RESULT(regs
->ax
), regs
->ax
);
1343 if ((test_thread_flag(TIF_SYSCALL_TRACE
)
1344 || test_thread_flag(TIF_SINGLESTEP
))
1345 && (current
->ptrace
& PT_PTRACED
))
1346 syscall_trace(regs
);
1349 #endif /* CONFIG_X86_32 */