]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/blackfin/kernel/ptrace.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-bionic-kernel.git] / arch / blackfin / kernel / ptrace.c
1 /*
2 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
3 * these modifications are Copyright 2004-2010 Analog Devices Inc.
4 *
5 * Licensed under the GPL-2
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/elf.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/regset.h>
18 #include <linux/signal.h>
19 #include <linux/tracehook.h>
20 #include <linux/uaccess.h>
21
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/dma.h>
27 #include <asm/fixed_code.h>
28 #include <asm/cacheflush.h>
29 #include <asm/mem_map.h>
30 #include <asm/mmu_context.h>
31
32 /*
33 * does not yet catch signals sent when the child dies.
34 * in exit.c or in signal.c.
35 */
36
37 /*
38 * Get contents of register REGNO in task TASK.
39 */
40 static inline long
41 get_reg(struct task_struct *task, unsigned long regno,
42 unsigned long __user *datap)
43 {
44 long tmp;
45 struct pt_regs *regs = task_pt_regs(task);
46
47 if (regno & 3 || regno > PT_LAST_PSEUDO)
48 return -EIO;
49
50 switch (regno) {
51 case PT_TEXT_ADDR:
52 tmp = task->mm->start_code;
53 break;
54 case PT_TEXT_END_ADDR:
55 tmp = task->mm->end_code;
56 break;
57 case PT_DATA_ADDR:
58 tmp = task->mm->start_data;
59 break;
60 case PT_USP:
61 tmp = task->thread.usp;
62 break;
63 default:
64 if (regno < sizeof(*regs)) {
65 void *reg_ptr = regs;
66 tmp = *(long *)(reg_ptr + regno);
67 } else
68 return -EIO;
69 }
70
71 return put_user(tmp, datap);
72 }
73
74 /*
75 * Write contents of register REGNO in task TASK.
76 */
77 static inline int
78 put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
79 {
80 struct pt_regs *regs = task_pt_regs(task);
81
82 if (regno & 3 || regno > PT_LAST_PSEUDO)
83 return -EIO;
84
85 switch (regno) {
86 case PT_PC:
87 /*********************************************************************/
88 /* At this point the kernel is most likely in exception. */
89 /* The RETX register will be used to populate the pc of the process. */
90 /*********************************************************************/
91 regs->retx = data;
92 regs->pc = data;
93 break;
94 case PT_RETX:
95 break; /* regs->retx = data; break; */
96 case PT_USP:
97 regs->usp = data;
98 task->thread.usp = data;
99 break;
100 case PT_SYSCFG: /* don't let userspace screw with this */
101 if ((data & ~1) != 0x6)
102 pr_warning("ptrace: ignore syscfg write of %#lx\n", data);
103 break; /* regs->syscfg = data; break; */
104 default:
105 if (regno < sizeof(*regs)) {
106 void *reg_offset = regs;
107 *(long *)(reg_offset + regno) = data;
108 }
109 /* Ignore writes to pseudo registers */
110 }
111
112 return 0;
113 }
114
115 /*
116 * check that an address falls within the bounds of the target process's memory mappings
117 */
118 int
119 is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len)
120 {
121 bool valid;
122 struct vm_area_struct *vma;
123 struct sram_list_struct *sraml;
124
125 /* overflow */
126 if (start + len < start)
127 return -EIO;
128
129 down_read(&child->mm->mmap_sem);
130 vma = find_vma(child->mm, start);
131 valid = vma && start >= vma->vm_start && start + len <= vma->vm_end;
132 up_read(&child->mm->mmap_sem);
133 if (valid)
134 return 0;
135
136 for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
137 if (start >= (unsigned long)sraml->addr
138 && start + len < (unsigned long)sraml->addr + sraml->length)
139 return 0;
140
141 if (start >= FIXED_CODE_START && start + len < FIXED_CODE_END)
142 return 0;
143
144 #ifdef CONFIG_APP_STACK_L1
145 if (child->mm->context.l1_stack_save)
146 if (start >= (unsigned long)l1_stack_base &&
147 start + len < (unsigned long)l1_stack_base + l1_stack_len)
148 return 0;
149 #endif
150
151 return -EIO;
152 }
153
154 /*
155 * retrieve the contents of Blackfin userspace general registers
156 */
157 static int genregs_get(struct task_struct *target,
158 const struct user_regset *regset,
159 unsigned int pos, unsigned int count,
160 void *kbuf, void __user *ubuf)
161 {
162 struct pt_regs *regs = task_pt_regs(target);
163 int ret;
164
165 /* This sucks ... */
166 regs->usp = target->thread.usp;
167
168 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
169 regs, 0, sizeof(*regs));
170 if (ret < 0)
171 return ret;
172
173 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
174 sizeof(*regs), -1);
175 }
176
177 /*
178 * update the contents of the Blackfin userspace general registers
179 */
180 static int genregs_set(struct task_struct *target,
181 const struct user_regset *regset,
182 unsigned int pos, unsigned int count,
183 const void *kbuf, const void __user *ubuf)
184 {
185 struct pt_regs *regs = task_pt_regs(target);
186 int ret;
187
188 /* Don't let people set SYSCFG (it's at the end of pt_regs) */
189 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
190 regs, 0, PT_SYSCFG);
191 if (ret < 0)
192 return ret;
193
194 /* This sucks ... */
195 target->thread.usp = regs->usp;
196 /* regs->retx = regs->pc; */
197
198 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
199 PT_SYSCFG, -1);
200 }
201
202 /*
203 * Define the register sets available on the Blackfin under Linux
204 */
205 enum bfin_regset {
206 REGSET_GENERAL,
207 };
208
209 static const struct user_regset bfin_regsets[] = {
210 [REGSET_GENERAL] = {
211 .core_note_type = NT_PRSTATUS,
212 .n = sizeof(struct pt_regs) / sizeof(long),
213 .size = sizeof(long),
214 .align = sizeof(long),
215 .get = genregs_get,
216 .set = genregs_set,
217 },
218 };
219
220 static const struct user_regset_view user_bfin_native_view = {
221 .name = "Blackfin",
222 .e_machine = EM_BLACKFIN,
223 .regsets = bfin_regsets,
224 .n = ARRAY_SIZE(bfin_regsets),
225 };
226
227 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
228 {
229 return &user_bfin_native_view;
230 }
231
232 void user_enable_single_step(struct task_struct *child)
233 {
234 struct pt_regs *regs = task_pt_regs(child);
235 regs->syscfg |= SYSCFG_SSSTEP;
236
237 set_tsk_thread_flag(child, TIF_SINGLESTEP);
238 }
239
240 void user_disable_single_step(struct task_struct *child)
241 {
242 struct pt_regs *regs = task_pt_regs(child);
243 regs->syscfg &= ~SYSCFG_SSSTEP;
244
245 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
246 }
247
248 long arch_ptrace(struct task_struct *child, long request,
249 unsigned long addr, unsigned long data)
250 {
251 int ret;
252 unsigned long __user *datap = (unsigned long __user *)data;
253 void *paddr = (void *)addr;
254
255 switch (request) {
256 /* when I and D space are separate, these will need to be fixed. */
257 case PTRACE_PEEKDATA:
258 pr_debug("ptrace: PEEKDATA\n");
259 /* fall through */
260 case PTRACE_PEEKTEXT: /* read word at location addr. */
261 {
262 unsigned long tmp = 0;
263 int copied = 0, to_copy = sizeof(tmp);
264
265 ret = -EIO;
266 pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %i\n", addr, to_copy);
267 if (is_user_addr_valid(child, addr, to_copy) < 0)
268 break;
269 pr_debug("ptrace: user address is valid\n");
270
271 switch (bfin_mem_access_type(addr, to_copy)) {
272 case BFIN_MEM_ACCESS_CORE:
273 case BFIN_MEM_ACCESS_CORE_ONLY:
274 copied = ptrace_access_vm(child, addr, &tmp,
275 to_copy, FOLL_FORCE);
276 if (copied)
277 break;
278
279 /* hrm, why didn't that work ... maybe no mapping */
280 if (addr >= FIXED_CODE_START &&
281 addr + to_copy <= FIXED_CODE_END) {
282 copy_from_user_page(0, 0, 0, &tmp, paddr, to_copy);
283 copied = to_copy;
284 } else if (addr >= BOOT_ROM_START) {
285 memcpy(&tmp, paddr, to_copy);
286 copied = to_copy;
287 }
288
289 break;
290 case BFIN_MEM_ACCESS_DMA:
291 if (safe_dma_memcpy(&tmp, paddr, to_copy))
292 copied = to_copy;
293 break;
294 case BFIN_MEM_ACCESS_ITEST:
295 if (isram_memcpy(&tmp, paddr, to_copy))
296 copied = to_copy;
297 break;
298 default:
299 copied = 0;
300 break;
301 }
302
303 pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
304 if (copied == to_copy)
305 ret = put_user(tmp, datap);
306 break;
307 }
308
309 /* when I and D space are separate, this will have to be fixed. */
310 case PTRACE_POKEDATA:
311 pr_debug("ptrace: PTRACE_PEEKDATA\n");
312 /* fall through */
313 case PTRACE_POKETEXT: /* write the word at location addr. */
314 {
315 int copied = 0, to_copy = sizeof(data);
316
317 ret = -EIO;
318 pr_debug("ptrace: POKETEXT at addr 0x%08lx + %i bytes %lx\n",
319 addr, to_copy, data);
320 if (is_user_addr_valid(child, addr, to_copy) < 0)
321 break;
322 pr_debug("ptrace: user address is valid\n");
323
324 switch (bfin_mem_access_type(addr, to_copy)) {
325 case BFIN_MEM_ACCESS_CORE:
326 case BFIN_MEM_ACCESS_CORE_ONLY:
327 copied = ptrace_access_vm(child, addr, &data,
328 to_copy,
329 FOLL_FORCE | FOLL_WRITE);
330 break;
331 case BFIN_MEM_ACCESS_DMA:
332 if (safe_dma_memcpy(paddr, &data, to_copy))
333 copied = to_copy;
334 break;
335 case BFIN_MEM_ACCESS_ITEST:
336 if (isram_memcpy(paddr, &data, to_copy))
337 copied = to_copy;
338 break;
339 default:
340 copied = 0;
341 break;
342 }
343
344 pr_debug("ptrace: copied size %d\n", copied);
345 if (copied == to_copy)
346 ret = 0;
347 break;
348 }
349
350 case PTRACE_PEEKUSR:
351 switch (addr) {
352 #ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
353 case PT_FDPIC_EXEC:
354 request = PTRACE_GETFDPIC;
355 addr = PTRACE_GETFDPIC_EXEC;
356 goto case_default;
357 case PT_FDPIC_INTERP:
358 request = PTRACE_GETFDPIC;
359 addr = PTRACE_GETFDPIC_INTERP;
360 goto case_default;
361 #endif
362 default:
363 ret = get_reg(child, addr, datap);
364 }
365 pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret);
366 break;
367
368 case PTRACE_POKEUSR:
369 ret = put_reg(child, addr, data);
370 pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret);
371 break;
372
373 case PTRACE_GETREGS:
374 pr_debug("ptrace: PTRACE_GETREGS\n");
375 return copy_regset_to_user(child, &user_bfin_native_view,
376 REGSET_GENERAL,
377 0, sizeof(struct pt_regs),
378 datap);
379
380 case PTRACE_SETREGS:
381 pr_debug("ptrace: PTRACE_SETREGS\n");
382 return copy_regset_from_user(child, &user_bfin_native_view,
383 REGSET_GENERAL,
384 0, sizeof(struct pt_regs),
385 datap);
386
387 case_default:
388 default:
389 ret = ptrace_request(child, request, addr, data);
390 break;
391 }
392
393 return ret;
394 }
395
396 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
397 {
398 int ret = 0;
399
400 if (test_thread_flag(TIF_SYSCALL_TRACE))
401 ret = tracehook_report_syscall_entry(regs);
402
403 return ret;
404 }
405
406 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
407 {
408 int step;
409
410 step = test_thread_flag(TIF_SINGLESTEP);
411 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
412 tracehook_report_syscall_exit(regs, step);
413 }