]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/asm-x86/ptrace.h
66ff7bd4737990175ee1db3889b9734a0d486b97
[mirror_ubuntu-bionic-kernel.git] / include / asm-x86 / ptrace.h
1 #ifndef ASM_X86__PTRACE_H
2 #define ASM_X86__PTRACE_H
3
4 #include <linux/compiler.h> /* For __user */
5 #include <asm/ptrace-abi.h>
6 #include <asm/processor-flags.h>
7
8 #ifdef __KERNEL__
9 #include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
10 #include <asm/segment.h>
11 #endif
12
13 #ifndef __ASSEMBLY__
14
15 #ifdef __i386__
16 /* this struct defines the way the registers are stored on the
17 stack during a system call. */
18
19 #ifndef __KERNEL__
20
21 struct pt_regs {
22 long ebx;
23 long ecx;
24 long edx;
25 long esi;
26 long edi;
27 long ebp;
28 long eax;
29 int xds;
30 int xes;
31 int xfs;
32 /* int gs; */
33 long orig_eax;
34 long eip;
35 int xcs;
36 long eflags;
37 long esp;
38 int xss;
39 };
40
41 #else /* __KERNEL__ */
42
43 struct pt_regs {
44 unsigned long bx;
45 unsigned long cx;
46 unsigned long dx;
47 unsigned long si;
48 unsigned long di;
49 unsigned long bp;
50 unsigned long ax;
51 unsigned long ds;
52 unsigned long es;
53 unsigned long fs;
54 /* int gs; */
55 unsigned long orig_ax;
56 unsigned long ip;
57 unsigned long cs;
58 unsigned long flags;
59 unsigned long sp;
60 unsigned long ss;
61 };
62
63 #endif /* __KERNEL__ */
64
65 #else /* __i386__ */
66
67 #ifndef __KERNEL__
68
69 struct pt_regs {
70 unsigned long r15;
71 unsigned long r14;
72 unsigned long r13;
73 unsigned long r12;
74 unsigned long rbp;
75 unsigned long rbx;
76 /* arguments: non interrupts/non tracing syscalls only save upto here*/
77 unsigned long r11;
78 unsigned long r10;
79 unsigned long r9;
80 unsigned long r8;
81 unsigned long rax;
82 unsigned long rcx;
83 unsigned long rdx;
84 unsigned long rsi;
85 unsigned long rdi;
86 unsigned long orig_rax;
87 /* end of arguments */
88 /* cpu exception frame or undefined */
89 unsigned long rip;
90 unsigned long cs;
91 unsigned long eflags;
92 unsigned long rsp;
93 unsigned long ss;
94 /* top of stack page */
95 };
96
97 #else /* __KERNEL__ */
98
99 struct pt_regs {
100 unsigned long r15;
101 unsigned long r14;
102 unsigned long r13;
103 unsigned long r12;
104 unsigned long bp;
105 unsigned long bx;
106 /* arguments: non interrupts/non tracing syscalls only save upto here*/
107 unsigned long r11;
108 unsigned long r10;
109 unsigned long r9;
110 unsigned long r8;
111 unsigned long ax;
112 unsigned long cx;
113 unsigned long dx;
114 unsigned long si;
115 unsigned long di;
116 unsigned long orig_ax;
117 /* end of arguments */
118 /* cpu exception frame or undefined */
119 unsigned long ip;
120 unsigned long cs;
121 unsigned long flags;
122 unsigned long sp;
123 unsigned long ss;
124 /* top of stack page */
125 };
126
127 #endif /* __KERNEL__ */
128 #endif /* !__i386__ */
129
130 #ifdef __KERNEL__
131
132 /* the DS BTS struct is used for ptrace as well */
133 #include <asm/ds.h>
134
135 struct task_struct;
136
137 extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
138
139 extern unsigned long profile_pc(struct pt_regs *regs);
140
141 extern unsigned long
142 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
143
144 #ifdef CONFIG_X86_32
145 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
146 int error_code);
147 #else
148 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149 #endif
150
151 extern long syscall_trace_enter(struct pt_regs *);
152 extern void syscall_trace_leave(struct pt_regs *);
153
154 static inline unsigned long regs_return_value(struct pt_regs *regs)
155 {
156 return regs->ax;
157 }
158
159 /*
160 * user_mode_vm(regs) determines whether a register set came from user mode.
161 * This is true if V8086 mode was enabled OR if the register set was from
162 * protected mode with RPL-3 CS value. This tricky test checks that with
163 * one comparison. Many places in the kernel can bypass this full check
164 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
165 */
166 static inline int user_mode(struct pt_regs *regs)
167 {
168 #ifdef CONFIG_X86_32
169 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
170 #else
171 return !!(regs->cs & 3);
172 #endif
173 }
174
175 static inline int user_mode_vm(struct pt_regs *regs)
176 {
177 #ifdef CONFIG_X86_32
178 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
179 USER_RPL;
180 #else
181 return user_mode(regs);
182 #endif
183 }
184
185 static inline int v8086_mode(struct pt_regs *regs)
186 {
187 #ifdef CONFIG_X86_32
188 return (regs->flags & X86_VM_MASK);
189 #else
190 return 0; /* No V86 mode support in long mode */
191 #endif
192 }
193
194 /*
195 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
196 * when it traps. So regs will be the current sp.
197 *
198 * This is valid only for kernel mode traps.
199 */
200 static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
201 {
202 #ifdef CONFIG_X86_32
203 return (unsigned long)regs;
204 #else
205 return regs->sp;
206 #endif
207 }
208
209 static inline unsigned long instruction_pointer(struct pt_regs *regs)
210 {
211 return regs->ip;
212 }
213
214 static inline unsigned long frame_pointer(struct pt_regs *regs)
215 {
216 return regs->bp;
217 }
218
219 /*
220 * These are defined as per linux/ptrace.h, which see.
221 */
222 #define arch_has_single_step() (1)
223 extern void user_enable_single_step(struct task_struct *);
224 extern void user_disable_single_step(struct task_struct *);
225
226 extern void user_enable_block_step(struct task_struct *);
227 #ifdef CONFIG_X86_DEBUGCTLMSR
228 #define arch_has_block_step() (1)
229 #else
230 #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
231 #endif
232
233 struct user_desc;
234 extern int do_get_thread_area(struct task_struct *p, int idx,
235 struct user_desc __user *info);
236 extern int do_set_thread_area(struct task_struct *p, int idx,
237 struct user_desc __user *info, int can_allocate);
238
239 #define __ARCH_WANT_COMPAT_SYS_PTRACE
240
241 #endif /* __KERNEL__ */
242
243 #endif /* !__ASSEMBLY__ */
244
245 #endif /* ASM_X86__PTRACE_H */