]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/s390/kernel/ptrace.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kernel / ptrace.c
1 /*
2 * Ptrace user space interface.
3 *
4 * Copyright IBM Corp. 1999, 2010
5 * Author(s): Denis Joseph Barrow
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/signal.h>
20 #include <linux/elf.h>
21 #include <linux/regset.h>
22 #include <linux/tracehook.h>
23 #include <linux/seccomp.h>
24 #include <linux/compat.h>
25 #include <trace/syscall.h>
26 #include <asm/segment.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgalloc.h>
30 #include <linux/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/switch_to.h>
33 #include "entry.h"
34
35 #ifdef CONFIG_COMPAT
36 #include "compat_ptrace.h"
37 #endif
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/syscalls.h>
41
42 void update_cr_regs(struct task_struct *task)
43 {
44 struct pt_regs *regs = task_pt_regs(task);
45 struct thread_struct *thread = &task->thread;
46 struct per_regs old, new;
47
48 /* Take care of the enable/disable of transactional execution. */
49 if (MACHINE_HAS_TE) {
50 unsigned long cr, cr_new;
51
52 __ctl_store(cr, 0, 0);
53 /* Set or clear transaction execution TXC bit 8. */
54 cr_new = cr | (1UL << 55);
55 if (task->thread.per_flags & PER_FLAG_NO_TE)
56 cr_new &= ~(1UL << 55);
57 if (cr_new != cr)
58 __ctl_load(cr_new, 0, 0);
59 /* Set or clear transaction execution TDC bits 62 and 63. */
60 __ctl_store(cr, 2, 2);
61 cr_new = cr & ~3UL;
62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
64 cr_new |= 1UL;
65 else
66 cr_new |= 2UL;
67 }
68 if (cr_new != cr)
69 __ctl_load(cr_new, 2, 2);
70 }
71 /* Copy user specified PER registers */
72 new.control = thread->per_user.control;
73 new.start = thread->per_user.start;
74 new.end = thread->per_user.end;
75
76 /* merge TIF_SINGLE_STEP into user specified PER registers. */
77 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
78 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
79 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
80 new.control |= PER_EVENT_BRANCH;
81 else
82 new.control |= PER_EVENT_IFETCH;
83 new.control |= PER_CONTROL_SUSPENSION;
84 new.control |= PER_EVENT_TRANSACTION_END;
85 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
86 new.control |= PER_EVENT_IFETCH;
87 new.start = 0;
88 new.end = -1UL;
89 }
90
91 /* Take care of the PER enablement bit in the PSW. */
92 if (!(new.control & PER_EVENT_MASK)) {
93 regs->psw.mask &= ~PSW_MASK_PER;
94 return;
95 }
96 regs->psw.mask |= PSW_MASK_PER;
97 __ctl_store(old, 9, 11);
98 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
99 __ctl_load(new, 9, 11);
100 }
101
102 void user_enable_single_step(struct task_struct *task)
103 {
104 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
105 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
106 }
107
108 void user_disable_single_step(struct task_struct *task)
109 {
110 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
111 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
112 }
113
114 void user_enable_block_step(struct task_struct *task)
115 {
116 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
117 set_tsk_thread_flag(task, TIF_BLOCK_STEP);
118 }
119
120 /*
121 * Called by kernel/ptrace.c when detaching..
122 *
123 * Clear all debugging related fields.
124 */
125 void ptrace_disable(struct task_struct *task)
126 {
127 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
128 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
129 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
130 clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
131 task->thread.per_flags = 0;
132 }
133
134 #define __ADDR_MASK 7
135
136 static inline unsigned long __peek_user_per(struct task_struct *child,
137 addr_t addr)
138 {
139 struct per_struct_kernel *dummy = NULL;
140
141 if (addr == (addr_t) &dummy->cr9)
142 /* Control bits of the active per set. */
143 return test_thread_flag(TIF_SINGLE_STEP) ?
144 PER_EVENT_IFETCH : child->thread.per_user.control;
145 else if (addr == (addr_t) &dummy->cr10)
146 /* Start address of the active per set. */
147 return test_thread_flag(TIF_SINGLE_STEP) ?
148 0 : child->thread.per_user.start;
149 else if (addr == (addr_t) &dummy->cr11)
150 /* End address of the active per set. */
151 return test_thread_flag(TIF_SINGLE_STEP) ?
152 -1UL : child->thread.per_user.end;
153 else if (addr == (addr_t) &dummy->bits)
154 /* Single-step bit. */
155 return test_thread_flag(TIF_SINGLE_STEP) ?
156 (1UL << (BITS_PER_LONG - 1)) : 0;
157 else if (addr == (addr_t) &dummy->starting_addr)
158 /* Start address of the user specified per set. */
159 return child->thread.per_user.start;
160 else if (addr == (addr_t) &dummy->ending_addr)
161 /* End address of the user specified per set. */
162 return child->thread.per_user.end;
163 else if (addr == (addr_t) &dummy->perc_atmid)
164 /* PER code, ATMID and AI of the last PER trap */
165 return (unsigned long)
166 child->thread.per_event.cause << (BITS_PER_LONG - 16);
167 else if (addr == (addr_t) &dummy->address)
168 /* Address of the last PER trap */
169 return child->thread.per_event.address;
170 else if (addr == (addr_t) &dummy->access_id)
171 /* Access id of the last PER trap */
172 return (unsigned long)
173 child->thread.per_event.paid << (BITS_PER_LONG - 8);
174 return 0;
175 }
176
177 /*
178 * Read the word at offset addr from the user area of a process. The
179 * trouble here is that the information is littered over different
180 * locations. The process registers are found on the kernel stack,
181 * the floating point stuff and the trace settings are stored in
182 * the task structure. In addition the different structures in
183 * struct user contain pad bytes that should be read as zeroes.
184 * Lovely...
185 */
186 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
187 {
188 struct user *dummy = NULL;
189 addr_t offset, tmp;
190
191 if (addr < (addr_t) &dummy->regs.acrs) {
192 /*
193 * psw and gprs are stored on the stack
194 */
195 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
196 if (addr == (addr_t) &dummy->regs.psw.mask) {
197 /* Return a clean psw mask. */
198 tmp &= PSW_MASK_USER | PSW_MASK_RI;
199 tmp |= PSW_USER_BITS;
200 }
201
202 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
203 /*
204 * access registers are stored in the thread structure
205 */
206 offset = addr - (addr_t) &dummy->regs.acrs;
207 /*
208 * Very special case: old & broken 64 bit gdb reading
209 * from acrs[15]. Result is a 64 bit value. Read the
210 * 32 bit acrs[15] value and shift it by 32. Sick...
211 */
212 if (addr == (addr_t) &dummy->regs.acrs[15])
213 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
214 else
215 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
216
217 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
218 /*
219 * orig_gpr2 is stored on the kernel stack
220 */
221 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
222
223 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
224 /*
225 * prevent reads of padding hole between
226 * orig_gpr2 and fp_regs on s390.
227 */
228 tmp = 0;
229
230 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
231 /*
232 * floating point control reg. is in the thread structure
233 */
234 tmp = child->thread.fpu.fpc;
235 tmp <<= BITS_PER_LONG - 32;
236
237 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
238 /*
239 * floating point regs. are either in child->thread.fpu
240 * or the child->thread.fpu.vxrs array
241 */
242 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
243 if (MACHINE_HAS_VX)
244 tmp = *(addr_t *)
245 ((addr_t) child->thread.fpu.vxrs + 2*offset);
246 else
247 tmp = *(addr_t *)
248 ((addr_t) child->thread.fpu.fprs + offset);
249
250 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
251 /*
252 * Handle access to the per_info structure.
253 */
254 addr -= (addr_t) &dummy->regs.per_info;
255 tmp = __peek_user_per(child, addr);
256
257 } else
258 tmp = 0;
259
260 return tmp;
261 }
262
263 static int
264 peek_user(struct task_struct *child, addr_t addr, addr_t data)
265 {
266 addr_t tmp, mask;
267
268 /*
269 * Stupid gdb peeks/pokes the access registers in 64 bit with
270 * an alignment of 4. Programmers from hell...
271 */
272 mask = __ADDR_MASK;
273 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
274 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
275 mask = 3;
276 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
277 return -EIO;
278
279 tmp = __peek_user(child, addr);
280 return put_user(tmp, (addr_t __user *) data);
281 }
282
283 static inline void __poke_user_per(struct task_struct *child,
284 addr_t addr, addr_t data)
285 {
286 struct per_struct_kernel *dummy = NULL;
287
288 /*
289 * There are only three fields in the per_info struct that the
290 * debugger user can write to.
291 * 1) cr9: the debugger wants to set a new PER event mask
292 * 2) starting_addr: the debugger wants to set a new starting
293 * address to use with the PER event mask.
294 * 3) ending_addr: the debugger wants to set a new ending
295 * address to use with the PER event mask.
296 * The user specified PER event mask and the start and end
297 * addresses are used only if single stepping is not in effect.
298 * Writes to any other field in per_info are ignored.
299 */
300 if (addr == (addr_t) &dummy->cr9)
301 /* PER event mask of the user specified per set. */
302 child->thread.per_user.control =
303 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
304 else if (addr == (addr_t) &dummy->starting_addr)
305 /* Starting address of the user specified per set. */
306 child->thread.per_user.start = data;
307 else if (addr == (addr_t) &dummy->ending_addr)
308 /* Ending address of the user specified per set. */
309 child->thread.per_user.end = data;
310 }
311
312 /*
313 * Write a word to the user area of a process at location addr. This
314 * operation does have an additional problem compared to peek_user.
315 * Stores to the program status word and on the floating point
316 * control register needs to get checked for validity.
317 */
318 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
319 {
320 struct user *dummy = NULL;
321 addr_t offset;
322
323 if (addr < (addr_t) &dummy->regs.acrs) {
324 /*
325 * psw and gprs are stored on the stack
326 */
327 if (addr == (addr_t) &dummy->regs.psw.mask) {
328 unsigned long mask = PSW_MASK_USER;
329
330 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
331 if ((data ^ PSW_USER_BITS) & ~mask)
332 /* Invalid psw mask. */
333 return -EINVAL;
334 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
335 /* Invalid address-space-control bits */
336 return -EINVAL;
337 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
338 /* Invalid addressing mode bits */
339 return -EINVAL;
340 }
341 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
342
343 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
344 /*
345 * access registers are stored in the thread structure
346 */
347 offset = addr - (addr_t) &dummy->regs.acrs;
348 /*
349 * Very special case: old & broken 64 bit gdb writing
350 * to acrs[15] with a 64 bit value. Ignore the lower
351 * half of the value and write the upper 32 bit to
352 * acrs[15]. Sick...
353 */
354 if (addr == (addr_t) &dummy->regs.acrs[15])
355 child->thread.acrs[15] = (unsigned int) (data >> 32);
356 else
357 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
358
359 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
360 /*
361 * orig_gpr2 is stored on the kernel stack
362 */
363 task_pt_regs(child)->orig_gpr2 = data;
364
365 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
366 /*
367 * prevent writes of padding hole between
368 * orig_gpr2 and fp_regs on s390.
369 */
370 return 0;
371
372 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
373 /*
374 * floating point control reg. is in the thread structure
375 */
376 if ((unsigned int) data != 0 ||
377 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
378 return -EINVAL;
379 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
380
381 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
382 /*
383 * floating point regs. are either in child->thread.fpu
384 * or the child->thread.fpu.vxrs array
385 */
386 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
387 if (MACHINE_HAS_VX)
388 *(addr_t *)((addr_t)
389 child->thread.fpu.vxrs + 2*offset) = data;
390 else
391 *(addr_t *)((addr_t)
392 child->thread.fpu.fprs + offset) = data;
393
394 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
395 /*
396 * Handle access to the per_info structure.
397 */
398 addr -= (addr_t) &dummy->regs.per_info;
399 __poke_user_per(child, addr, data);
400
401 }
402
403 return 0;
404 }
405
406 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
407 {
408 addr_t mask;
409
410 /*
411 * Stupid gdb peeks/pokes the access registers in 64 bit with
412 * an alignment of 4. Programmers from hell indeed...
413 */
414 mask = __ADDR_MASK;
415 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
416 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
417 mask = 3;
418 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
419 return -EIO;
420
421 return __poke_user(child, addr, data);
422 }
423
424 long arch_ptrace(struct task_struct *child, long request,
425 unsigned long addr, unsigned long data)
426 {
427 ptrace_area parea;
428 int copied, ret;
429
430 switch (request) {
431 case PTRACE_PEEKUSR:
432 /* read the word at location addr in the USER area. */
433 return peek_user(child, addr, data);
434
435 case PTRACE_POKEUSR:
436 /* write the word at location addr in the USER area */
437 return poke_user(child, addr, data);
438
439 case PTRACE_PEEKUSR_AREA:
440 case PTRACE_POKEUSR_AREA:
441 if (copy_from_user(&parea, (void __force __user *) addr,
442 sizeof(parea)))
443 return -EFAULT;
444 addr = parea.kernel_addr;
445 data = parea.process_addr;
446 copied = 0;
447 while (copied < parea.len) {
448 if (request == PTRACE_PEEKUSR_AREA)
449 ret = peek_user(child, addr, data);
450 else {
451 addr_t utmp;
452 if (get_user(utmp,
453 (addr_t __force __user *) data))
454 return -EFAULT;
455 ret = poke_user(child, addr, utmp);
456 }
457 if (ret)
458 return ret;
459 addr += sizeof(unsigned long);
460 data += sizeof(unsigned long);
461 copied += sizeof(unsigned long);
462 }
463 return 0;
464 case PTRACE_GET_LAST_BREAK:
465 put_user(child->thread.last_break,
466 (unsigned long __user *) data);
467 return 0;
468 case PTRACE_ENABLE_TE:
469 if (!MACHINE_HAS_TE)
470 return -EIO;
471 child->thread.per_flags &= ~PER_FLAG_NO_TE;
472 return 0;
473 case PTRACE_DISABLE_TE:
474 if (!MACHINE_HAS_TE)
475 return -EIO;
476 child->thread.per_flags |= PER_FLAG_NO_TE;
477 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
478 return 0;
479 case PTRACE_TE_ABORT_RAND:
480 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
481 return -EIO;
482 switch (data) {
483 case 0UL:
484 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
485 break;
486 case 1UL:
487 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
488 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
489 break;
490 case 2UL:
491 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
492 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
493 break;
494 default:
495 return -EINVAL;
496 }
497 return 0;
498 default:
499 return ptrace_request(child, request, addr, data);
500 }
501 }
502
503 #ifdef CONFIG_COMPAT
504 /*
505 * Now the fun part starts... a 31 bit program running in the
506 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
507 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
508 * to handle, the difference to the 64 bit versions of the requests
509 * is that the access is done in multiples of 4 byte instead of
510 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
511 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
512 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
513 * is a 31 bit program too, the content of struct user can be
514 * emulated. A 31 bit program peeking into the struct user of
515 * a 64 bit program is a no-no.
516 */
517
518 /*
519 * Same as peek_user_per but for a 31 bit program.
520 */
521 static inline __u32 __peek_user_per_compat(struct task_struct *child,
522 addr_t addr)
523 {
524 struct compat_per_struct_kernel *dummy32 = NULL;
525
526 if (addr == (addr_t) &dummy32->cr9)
527 /* Control bits of the active per set. */
528 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
529 PER_EVENT_IFETCH : child->thread.per_user.control;
530 else if (addr == (addr_t) &dummy32->cr10)
531 /* Start address of the active per set. */
532 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
533 0 : child->thread.per_user.start;
534 else if (addr == (addr_t) &dummy32->cr11)
535 /* End address of the active per set. */
536 return test_thread_flag(TIF_SINGLE_STEP) ?
537 PSW32_ADDR_INSN : child->thread.per_user.end;
538 else if (addr == (addr_t) &dummy32->bits)
539 /* Single-step bit. */
540 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
541 0x80000000 : 0;
542 else if (addr == (addr_t) &dummy32->starting_addr)
543 /* Start address of the user specified per set. */
544 return (__u32) child->thread.per_user.start;
545 else if (addr == (addr_t) &dummy32->ending_addr)
546 /* End address of the user specified per set. */
547 return (__u32) child->thread.per_user.end;
548 else if (addr == (addr_t) &dummy32->perc_atmid)
549 /* PER code, ATMID and AI of the last PER trap */
550 return (__u32) child->thread.per_event.cause << 16;
551 else if (addr == (addr_t) &dummy32->address)
552 /* Address of the last PER trap */
553 return (__u32) child->thread.per_event.address;
554 else if (addr == (addr_t) &dummy32->access_id)
555 /* Access id of the last PER trap */
556 return (__u32) child->thread.per_event.paid << 24;
557 return 0;
558 }
559
560 /*
561 * Same as peek_user but for a 31 bit program.
562 */
563 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
564 {
565 struct compat_user *dummy32 = NULL;
566 addr_t offset;
567 __u32 tmp;
568
569 if (addr < (addr_t) &dummy32->regs.acrs) {
570 struct pt_regs *regs = task_pt_regs(child);
571 /*
572 * psw and gprs are stored on the stack
573 */
574 if (addr == (addr_t) &dummy32->regs.psw.mask) {
575 /* Fake a 31 bit psw mask. */
576 tmp = (__u32)(regs->psw.mask >> 32);
577 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
578 tmp |= PSW32_USER_BITS;
579 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
580 /* Fake a 31 bit psw address. */
581 tmp = (__u32) regs->psw.addr |
582 (__u32)(regs->psw.mask & PSW_MASK_BA);
583 } else {
584 /* gpr 0-15 */
585 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
586 }
587 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
588 /*
589 * access registers are stored in the thread structure
590 */
591 offset = addr - (addr_t) &dummy32->regs.acrs;
592 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
593
594 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
595 /*
596 * orig_gpr2 is stored on the kernel stack
597 */
598 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
599
600 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
601 /*
602 * prevent reads of padding hole between
603 * orig_gpr2 and fp_regs on s390.
604 */
605 tmp = 0;
606
607 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
608 /*
609 * floating point control reg. is in the thread structure
610 */
611 tmp = child->thread.fpu.fpc;
612
613 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
614 /*
615 * floating point regs. are either in child->thread.fpu
616 * or the child->thread.fpu.vxrs array
617 */
618 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
619 if (MACHINE_HAS_VX)
620 tmp = *(__u32 *)
621 ((addr_t) child->thread.fpu.vxrs + 2*offset);
622 else
623 tmp = *(__u32 *)
624 ((addr_t) child->thread.fpu.fprs + offset);
625
626 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
627 /*
628 * Handle access to the per_info structure.
629 */
630 addr -= (addr_t) &dummy32->regs.per_info;
631 tmp = __peek_user_per_compat(child, addr);
632
633 } else
634 tmp = 0;
635
636 return tmp;
637 }
638
639 static int peek_user_compat(struct task_struct *child,
640 addr_t addr, addr_t data)
641 {
642 __u32 tmp;
643
644 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
645 return -EIO;
646
647 tmp = __peek_user_compat(child, addr);
648 return put_user(tmp, (__u32 __user *) data);
649 }
650
651 /*
652 * Same as poke_user_per but for a 31 bit program.
653 */
654 static inline void __poke_user_per_compat(struct task_struct *child,
655 addr_t addr, __u32 data)
656 {
657 struct compat_per_struct_kernel *dummy32 = NULL;
658
659 if (addr == (addr_t) &dummy32->cr9)
660 /* PER event mask of the user specified per set. */
661 child->thread.per_user.control =
662 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
663 else if (addr == (addr_t) &dummy32->starting_addr)
664 /* Starting address of the user specified per set. */
665 child->thread.per_user.start = data;
666 else if (addr == (addr_t) &dummy32->ending_addr)
667 /* Ending address of the user specified per set. */
668 child->thread.per_user.end = data;
669 }
670
671 /*
672 * Same as poke_user but for a 31 bit program.
673 */
674 static int __poke_user_compat(struct task_struct *child,
675 addr_t addr, addr_t data)
676 {
677 struct compat_user *dummy32 = NULL;
678 __u32 tmp = (__u32) data;
679 addr_t offset;
680
681 if (addr < (addr_t) &dummy32->regs.acrs) {
682 struct pt_regs *regs = task_pt_regs(child);
683 /*
684 * psw, gprs, acrs and orig_gpr2 are stored on the stack
685 */
686 if (addr == (addr_t) &dummy32->regs.psw.mask) {
687 __u32 mask = PSW32_MASK_USER;
688
689 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
690 /* Build a 64 bit psw mask from 31 bit mask. */
691 if ((tmp ^ PSW32_USER_BITS) & ~mask)
692 /* Invalid psw mask. */
693 return -EINVAL;
694 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
695 /* Invalid address-space-control bits */
696 return -EINVAL;
697 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
698 (regs->psw.mask & PSW_MASK_BA) |
699 (__u64)(tmp & mask) << 32;
700 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
701 /* Build a 64 bit psw address from 31 bit address. */
702 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
703 /* Transfer 31 bit amode bit to psw mask. */
704 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
705 (__u64)(tmp & PSW32_ADDR_AMODE);
706 } else {
707 /* gpr 0-15 */
708 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
709 }
710 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
711 /*
712 * access registers are stored in the thread structure
713 */
714 offset = addr - (addr_t) &dummy32->regs.acrs;
715 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
716
717 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
718 /*
719 * orig_gpr2 is stored on the kernel stack
720 */
721 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
722
723 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
724 /*
725 * prevent writess of padding hole between
726 * orig_gpr2 and fp_regs on s390.
727 */
728 return 0;
729
730 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
731 /*
732 * floating point control reg. is in the thread structure
733 */
734 if (test_fp_ctl(tmp))
735 return -EINVAL;
736 child->thread.fpu.fpc = data;
737
738 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
739 /*
740 * floating point regs. are either in child->thread.fpu
741 * or the child->thread.fpu.vxrs array
742 */
743 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
744 if (MACHINE_HAS_VX)
745 *(__u32 *)((addr_t)
746 child->thread.fpu.vxrs + 2*offset) = tmp;
747 else
748 *(__u32 *)((addr_t)
749 child->thread.fpu.fprs + offset) = tmp;
750
751 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
752 /*
753 * Handle access to the per_info structure.
754 */
755 addr -= (addr_t) &dummy32->regs.per_info;
756 __poke_user_per_compat(child, addr, data);
757 }
758
759 return 0;
760 }
761
762 static int poke_user_compat(struct task_struct *child,
763 addr_t addr, addr_t data)
764 {
765 if (!is_compat_task() || (addr & 3) ||
766 addr > sizeof(struct compat_user) - 3)
767 return -EIO;
768
769 return __poke_user_compat(child, addr, data);
770 }
771
772 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
773 compat_ulong_t caddr, compat_ulong_t cdata)
774 {
775 unsigned long addr = caddr;
776 unsigned long data = cdata;
777 compat_ptrace_area parea;
778 int copied, ret;
779
780 switch (request) {
781 case PTRACE_PEEKUSR:
782 /* read the word at location addr in the USER area. */
783 return peek_user_compat(child, addr, data);
784
785 case PTRACE_POKEUSR:
786 /* write the word at location addr in the USER area */
787 return poke_user_compat(child, addr, data);
788
789 case PTRACE_PEEKUSR_AREA:
790 case PTRACE_POKEUSR_AREA:
791 if (copy_from_user(&parea, (void __force __user *) addr,
792 sizeof(parea)))
793 return -EFAULT;
794 addr = parea.kernel_addr;
795 data = parea.process_addr;
796 copied = 0;
797 while (copied < parea.len) {
798 if (request == PTRACE_PEEKUSR_AREA)
799 ret = peek_user_compat(child, addr, data);
800 else {
801 __u32 utmp;
802 if (get_user(utmp,
803 (__u32 __force __user *) data))
804 return -EFAULT;
805 ret = poke_user_compat(child, addr, utmp);
806 }
807 if (ret)
808 return ret;
809 addr += sizeof(unsigned int);
810 data += sizeof(unsigned int);
811 copied += sizeof(unsigned int);
812 }
813 return 0;
814 case PTRACE_GET_LAST_BREAK:
815 put_user(child->thread.last_break,
816 (unsigned int __user *) data);
817 return 0;
818 }
819 return compat_ptrace_request(child, request, addr, data);
820 }
821 #endif
822
823 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
824 {
825 unsigned long mask = -1UL;
826
827 /*
828 * The sysc_tracesys code in entry.S stored the system
829 * call number to gprs[2].
830 */
831 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
832 (tracehook_report_syscall_entry(regs) ||
833 regs->gprs[2] >= NR_syscalls)) {
834 /*
835 * Tracing decided this syscall should not happen or the
836 * debugger stored an invalid system call number. Skip
837 * the system call and the system call restart handling.
838 */
839 clear_pt_regs_flag(regs, PIF_SYSCALL);
840 return -1;
841 }
842
843 /* Do the secure computing check after ptrace. */
844 if (secure_computing(NULL)) {
845 /* seccomp failures shouldn't expose any additional code. */
846 return -1;
847 }
848
849 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
850 trace_sys_enter(regs, regs->gprs[2]);
851
852 if (is_compat_task())
853 mask = 0xffffffff;
854
855 audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
856 regs->gprs[3] &mask, regs->gprs[4] &mask,
857 regs->gprs[5] &mask);
858
859 return regs->gprs[2];
860 }
861
862 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
863 {
864 audit_syscall_exit(regs);
865
866 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
867 trace_sys_exit(regs, regs->gprs[2]);
868
869 if (test_thread_flag(TIF_SYSCALL_TRACE))
870 tracehook_report_syscall_exit(regs, 0);
871 }
872
873 /*
874 * user_regset definitions.
875 */
876
877 static int s390_regs_get(struct task_struct *target,
878 const struct user_regset *regset,
879 unsigned int pos, unsigned int count,
880 void *kbuf, void __user *ubuf)
881 {
882 if (target == current)
883 save_access_regs(target->thread.acrs);
884
885 if (kbuf) {
886 unsigned long *k = kbuf;
887 while (count > 0) {
888 *k++ = __peek_user(target, pos);
889 count -= sizeof(*k);
890 pos += sizeof(*k);
891 }
892 } else {
893 unsigned long __user *u = ubuf;
894 while (count > 0) {
895 if (__put_user(__peek_user(target, pos), u++))
896 return -EFAULT;
897 count -= sizeof(*u);
898 pos += sizeof(*u);
899 }
900 }
901 return 0;
902 }
903
904 static int s390_regs_set(struct task_struct *target,
905 const struct user_regset *regset,
906 unsigned int pos, unsigned int count,
907 const void *kbuf, const void __user *ubuf)
908 {
909 int rc = 0;
910
911 if (target == current)
912 save_access_regs(target->thread.acrs);
913
914 if (kbuf) {
915 const unsigned long *k = kbuf;
916 while (count > 0 && !rc) {
917 rc = __poke_user(target, pos, *k++);
918 count -= sizeof(*k);
919 pos += sizeof(*k);
920 }
921 } else {
922 const unsigned long __user *u = ubuf;
923 while (count > 0 && !rc) {
924 unsigned long word;
925 rc = __get_user(word, u++);
926 if (rc)
927 break;
928 rc = __poke_user(target, pos, word);
929 count -= sizeof(*u);
930 pos += sizeof(*u);
931 }
932 }
933
934 if (rc == 0 && target == current)
935 restore_access_regs(target->thread.acrs);
936
937 return rc;
938 }
939
940 static int s390_fpregs_get(struct task_struct *target,
941 const struct user_regset *regset, unsigned int pos,
942 unsigned int count, void *kbuf, void __user *ubuf)
943 {
944 _s390_fp_regs fp_regs;
945
946 if (target == current)
947 save_fpu_regs();
948
949 fp_regs.fpc = target->thread.fpu.fpc;
950 fpregs_store(&fp_regs, &target->thread.fpu);
951
952 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
953 &fp_regs, 0, -1);
954 }
955
956 static int s390_fpregs_set(struct task_struct *target,
957 const struct user_regset *regset, unsigned int pos,
958 unsigned int count, const void *kbuf,
959 const void __user *ubuf)
960 {
961 int rc = 0;
962 freg_t fprs[__NUM_FPRS];
963
964 if (target == current)
965 save_fpu_regs();
966
967 if (MACHINE_HAS_VX)
968 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
969 else
970 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
971
972 /* If setting FPC, must validate it first. */
973 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
974 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
975 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
976 0, offsetof(s390_fp_regs, fprs));
977 if (rc)
978 return rc;
979 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
980 return -EINVAL;
981 target->thread.fpu.fpc = ufpc[0];
982 }
983
984 if (rc == 0 && count > 0)
985 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
986 fprs, offsetof(s390_fp_regs, fprs), -1);
987 if (rc)
988 return rc;
989
990 if (MACHINE_HAS_VX)
991 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
992 else
993 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
994
995 return rc;
996 }
997
998 static int s390_last_break_get(struct task_struct *target,
999 const struct user_regset *regset,
1000 unsigned int pos, unsigned int count,
1001 void *kbuf, void __user *ubuf)
1002 {
1003 if (count > 0) {
1004 if (kbuf) {
1005 unsigned long *k = kbuf;
1006 *k = target->thread.last_break;
1007 } else {
1008 unsigned long __user *u = ubuf;
1009 if (__put_user(target->thread.last_break, u))
1010 return -EFAULT;
1011 }
1012 }
1013 return 0;
1014 }
1015
1016 static int s390_last_break_set(struct task_struct *target,
1017 const struct user_regset *regset,
1018 unsigned int pos, unsigned int count,
1019 const void *kbuf, const void __user *ubuf)
1020 {
1021 return 0;
1022 }
1023
1024 static int s390_tdb_get(struct task_struct *target,
1025 const struct user_regset *regset,
1026 unsigned int pos, unsigned int count,
1027 void *kbuf, void __user *ubuf)
1028 {
1029 struct pt_regs *regs = task_pt_regs(target);
1030 unsigned char *data;
1031
1032 if (!(regs->int_code & 0x200))
1033 return -ENODATA;
1034 data = target->thread.trap_tdb;
1035 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1036 }
1037
1038 static int s390_tdb_set(struct task_struct *target,
1039 const struct user_regset *regset,
1040 unsigned int pos, unsigned int count,
1041 const void *kbuf, const void __user *ubuf)
1042 {
1043 return 0;
1044 }
1045
1046 static int s390_vxrs_low_get(struct task_struct *target,
1047 const struct user_regset *regset,
1048 unsigned int pos, unsigned int count,
1049 void *kbuf, void __user *ubuf)
1050 {
1051 __u64 vxrs[__NUM_VXRS_LOW];
1052 int i;
1053
1054 if (!MACHINE_HAS_VX)
1055 return -ENODEV;
1056 if (target == current)
1057 save_fpu_regs();
1058 for (i = 0; i < __NUM_VXRS_LOW; i++)
1059 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1060 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1061 }
1062
1063 static int s390_vxrs_low_set(struct task_struct *target,
1064 const struct user_regset *regset,
1065 unsigned int pos, unsigned int count,
1066 const void *kbuf, const void __user *ubuf)
1067 {
1068 __u64 vxrs[__NUM_VXRS_LOW];
1069 int i, rc;
1070
1071 if (!MACHINE_HAS_VX)
1072 return -ENODEV;
1073 if (target == current)
1074 save_fpu_regs();
1075
1076 for (i = 0; i < __NUM_VXRS_LOW; i++)
1077 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1078
1079 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1080 if (rc == 0)
1081 for (i = 0; i < __NUM_VXRS_LOW; i++)
1082 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1083
1084 return rc;
1085 }
1086
1087 static int s390_vxrs_high_get(struct task_struct *target,
1088 const struct user_regset *regset,
1089 unsigned int pos, unsigned int count,
1090 void *kbuf, void __user *ubuf)
1091 {
1092 __vector128 vxrs[__NUM_VXRS_HIGH];
1093
1094 if (!MACHINE_HAS_VX)
1095 return -ENODEV;
1096 if (target == current)
1097 save_fpu_regs();
1098 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1099
1100 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1101 }
1102
1103 static int s390_vxrs_high_set(struct task_struct *target,
1104 const struct user_regset *regset,
1105 unsigned int pos, unsigned int count,
1106 const void *kbuf, const void __user *ubuf)
1107 {
1108 int rc;
1109
1110 if (!MACHINE_HAS_VX)
1111 return -ENODEV;
1112 if (target == current)
1113 save_fpu_regs();
1114
1115 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1116 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1117 return rc;
1118 }
1119
1120 static int s390_system_call_get(struct task_struct *target,
1121 const struct user_regset *regset,
1122 unsigned int pos, unsigned int count,
1123 void *kbuf, void __user *ubuf)
1124 {
1125 unsigned int *data = &target->thread.system_call;
1126 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1127 data, 0, sizeof(unsigned int));
1128 }
1129
1130 static int s390_system_call_set(struct task_struct *target,
1131 const struct user_regset *regset,
1132 unsigned int pos, unsigned int count,
1133 const void *kbuf, const void __user *ubuf)
1134 {
1135 unsigned int *data = &target->thread.system_call;
1136 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1137 data, 0, sizeof(unsigned int));
1138 }
1139
1140 static const struct user_regset s390_regsets[] = {
1141 {
1142 .core_note_type = NT_PRSTATUS,
1143 .n = sizeof(s390_regs) / sizeof(long),
1144 .size = sizeof(long),
1145 .align = sizeof(long),
1146 .get = s390_regs_get,
1147 .set = s390_regs_set,
1148 },
1149 {
1150 .core_note_type = NT_PRFPREG,
1151 .n = sizeof(s390_fp_regs) / sizeof(long),
1152 .size = sizeof(long),
1153 .align = sizeof(long),
1154 .get = s390_fpregs_get,
1155 .set = s390_fpregs_set,
1156 },
1157 {
1158 .core_note_type = NT_S390_SYSTEM_CALL,
1159 .n = 1,
1160 .size = sizeof(unsigned int),
1161 .align = sizeof(unsigned int),
1162 .get = s390_system_call_get,
1163 .set = s390_system_call_set,
1164 },
1165 {
1166 .core_note_type = NT_S390_LAST_BREAK,
1167 .n = 1,
1168 .size = sizeof(long),
1169 .align = sizeof(long),
1170 .get = s390_last_break_get,
1171 .set = s390_last_break_set,
1172 },
1173 {
1174 .core_note_type = NT_S390_TDB,
1175 .n = 1,
1176 .size = 256,
1177 .align = 1,
1178 .get = s390_tdb_get,
1179 .set = s390_tdb_set,
1180 },
1181 {
1182 .core_note_type = NT_S390_VXRS_LOW,
1183 .n = __NUM_VXRS_LOW,
1184 .size = sizeof(__u64),
1185 .align = sizeof(__u64),
1186 .get = s390_vxrs_low_get,
1187 .set = s390_vxrs_low_set,
1188 },
1189 {
1190 .core_note_type = NT_S390_VXRS_HIGH,
1191 .n = __NUM_VXRS_HIGH,
1192 .size = sizeof(__vector128),
1193 .align = sizeof(__vector128),
1194 .get = s390_vxrs_high_get,
1195 .set = s390_vxrs_high_set,
1196 },
1197 };
1198
1199 static const struct user_regset_view user_s390_view = {
1200 .name = UTS_MACHINE,
1201 .e_machine = EM_S390,
1202 .regsets = s390_regsets,
1203 .n = ARRAY_SIZE(s390_regsets)
1204 };
1205
1206 #ifdef CONFIG_COMPAT
1207 static int s390_compat_regs_get(struct task_struct *target,
1208 const struct user_regset *regset,
1209 unsigned int pos, unsigned int count,
1210 void *kbuf, void __user *ubuf)
1211 {
1212 if (target == current)
1213 save_access_regs(target->thread.acrs);
1214
1215 if (kbuf) {
1216 compat_ulong_t *k = kbuf;
1217 while (count > 0) {
1218 *k++ = __peek_user_compat(target, pos);
1219 count -= sizeof(*k);
1220 pos += sizeof(*k);
1221 }
1222 } else {
1223 compat_ulong_t __user *u = ubuf;
1224 while (count > 0) {
1225 if (__put_user(__peek_user_compat(target, pos), u++))
1226 return -EFAULT;
1227 count -= sizeof(*u);
1228 pos += sizeof(*u);
1229 }
1230 }
1231 return 0;
1232 }
1233
1234 static int s390_compat_regs_set(struct task_struct *target,
1235 const struct user_regset *regset,
1236 unsigned int pos, unsigned int count,
1237 const void *kbuf, const void __user *ubuf)
1238 {
1239 int rc = 0;
1240
1241 if (target == current)
1242 save_access_regs(target->thread.acrs);
1243
1244 if (kbuf) {
1245 const compat_ulong_t *k = kbuf;
1246 while (count > 0 && !rc) {
1247 rc = __poke_user_compat(target, pos, *k++);
1248 count -= sizeof(*k);
1249 pos += sizeof(*k);
1250 }
1251 } else {
1252 const compat_ulong_t __user *u = ubuf;
1253 while (count > 0 && !rc) {
1254 compat_ulong_t word;
1255 rc = __get_user(word, u++);
1256 if (rc)
1257 break;
1258 rc = __poke_user_compat(target, pos, word);
1259 count -= sizeof(*u);
1260 pos += sizeof(*u);
1261 }
1262 }
1263
1264 if (rc == 0 && target == current)
1265 restore_access_regs(target->thread.acrs);
1266
1267 return rc;
1268 }
1269
1270 static int s390_compat_regs_high_get(struct task_struct *target,
1271 const struct user_regset *regset,
1272 unsigned int pos, unsigned int count,
1273 void *kbuf, void __user *ubuf)
1274 {
1275 compat_ulong_t *gprs_high;
1276
1277 gprs_high = (compat_ulong_t *)
1278 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1279 if (kbuf) {
1280 compat_ulong_t *k = kbuf;
1281 while (count > 0) {
1282 *k++ = *gprs_high;
1283 gprs_high += 2;
1284 count -= sizeof(*k);
1285 }
1286 } else {
1287 compat_ulong_t __user *u = ubuf;
1288 while (count > 0) {
1289 if (__put_user(*gprs_high, u++))
1290 return -EFAULT;
1291 gprs_high += 2;
1292 count -= sizeof(*u);
1293 }
1294 }
1295 return 0;
1296 }
1297
1298 static int s390_compat_regs_high_set(struct task_struct *target,
1299 const struct user_regset *regset,
1300 unsigned int pos, unsigned int count,
1301 const void *kbuf, const void __user *ubuf)
1302 {
1303 compat_ulong_t *gprs_high;
1304 int rc = 0;
1305
1306 gprs_high = (compat_ulong_t *)
1307 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1308 if (kbuf) {
1309 const compat_ulong_t *k = kbuf;
1310 while (count > 0) {
1311 *gprs_high = *k++;
1312 *gprs_high += 2;
1313 count -= sizeof(*k);
1314 }
1315 } else {
1316 const compat_ulong_t __user *u = ubuf;
1317 while (count > 0 && !rc) {
1318 unsigned long word;
1319 rc = __get_user(word, u++);
1320 if (rc)
1321 break;
1322 *gprs_high = word;
1323 *gprs_high += 2;
1324 count -= sizeof(*u);
1325 }
1326 }
1327
1328 return rc;
1329 }
1330
1331 static int s390_compat_last_break_get(struct task_struct *target,
1332 const struct user_regset *regset,
1333 unsigned int pos, unsigned int count,
1334 void *kbuf, void __user *ubuf)
1335 {
1336 compat_ulong_t last_break;
1337
1338 if (count > 0) {
1339 last_break = target->thread.last_break;
1340 if (kbuf) {
1341 unsigned long *k = kbuf;
1342 *k = last_break;
1343 } else {
1344 unsigned long __user *u = ubuf;
1345 if (__put_user(last_break, u))
1346 return -EFAULT;
1347 }
1348 }
1349 return 0;
1350 }
1351
1352 static int s390_compat_last_break_set(struct task_struct *target,
1353 const struct user_regset *regset,
1354 unsigned int pos, unsigned int count,
1355 const void *kbuf, const void __user *ubuf)
1356 {
1357 return 0;
1358 }
1359
1360 static const struct user_regset s390_compat_regsets[] = {
1361 {
1362 .core_note_type = NT_PRSTATUS,
1363 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1364 .size = sizeof(compat_long_t),
1365 .align = sizeof(compat_long_t),
1366 .get = s390_compat_regs_get,
1367 .set = s390_compat_regs_set,
1368 },
1369 {
1370 .core_note_type = NT_PRFPREG,
1371 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1372 .size = sizeof(compat_long_t),
1373 .align = sizeof(compat_long_t),
1374 .get = s390_fpregs_get,
1375 .set = s390_fpregs_set,
1376 },
1377 {
1378 .core_note_type = NT_S390_SYSTEM_CALL,
1379 .n = 1,
1380 .size = sizeof(compat_uint_t),
1381 .align = sizeof(compat_uint_t),
1382 .get = s390_system_call_get,
1383 .set = s390_system_call_set,
1384 },
1385 {
1386 .core_note_type = NT_S390_LAST_BREAK,
1387 .n = 1,
1388 .size = sizeof(long),
1389 .align = sizeof(long),
1390 .get = s390_compat_last_break_get,
1391 .set = s390_compat_last_break_set,
1392 },
1393 {
1394 .core_note_type = NT_S390_TDB,
1395 .n = 1,
1396 .size = 256,
1397 .align = 1,
1398 .get = s390_tdb_get,
1399 .set = s390_tdb_set,
1400 },
1401 {
1402 .core_note_type = NT_S390_VXRS_LOW,
1403 .n = __NUM_VXRS_LOW,
1404 .size = sizeof(__u64),
1405 .align = sizeof(__u64),
1406 .get = s390_vxrs_low_get,
1407 .set = s390_vxrs_low_set,
1408 },
1409 {
1410 .core_note_type = NT_S390_VXRS_HIGH,
1411 .n = __NUM_VXRS_HIGH,
1412 .size = sizeof(__vector128),
1413 .align = sizeof(__vector128),
1414 .get = s390_vxrs_high_get,
1415 .set = s390_vxrs_high_set,
1416 },
1417 {
1418 .core_note_type = NT_S390_HIGH_GPRS,
1419 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1420 .size = sizeof(compat_long_t),
1421 .align = sizeof(compat_long_t),
1422 .get = s390_compat_regs_high_get,
1423 .set = s390_compat_regs_high_set,
1424 },
1425 };
1426
1427 static const struct user_regset_view user_s390_compat_view = {
1428 .name = "s390",
1429 .e_machine = EM_S390,
1430 .regsets = s390_compat_regsets,
1431 .n = ARRAY_SIZE(s390_compat_regsets)
1432 };
1433 #endif
1434
1435 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1436 {
1437 #ifdef CONFIG_COMPAT
1438 if (test_tsk_thread_flag(task, TIF_31BIT))
1439 return &user_s390_compat_view;
1440 #endif
1441 return &user_s390_view;
1442 }
1443
1444 static const char *gpr_names[NUM_GPRS] = {
1445 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1446 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1447 };
1448
1449 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1450 {
1451 if (offset >= NUM_GPRS)
1452 return 0;
1453 return regs->gprs[offset];
1454 }
1455
1456 int regs_query_register_offset(const char *name)
1457 {
1458 unsigned long offset;
1459
1460 if (!name || *name != 'r')
1461 return -EINVAL;
1462 if (kstrtoul(name + 1, 10, &offset))
1463 return -EINVAL;
1464 if (offset >= NUM_GPRS)
1465 return -EINVAL;
1466 return offset;
1467 }
1468
1469 const char *regs_query_register_name(unsigned int offset)
1470 {
1471 if (offset >= NUM_GPRS)
1472 return NULL;
1473 return gpr_names[offset];
1474 }
1475
1476 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1477 {
1478 unsigned long ksp = kernel_stack_pointer(regs);
1479
1480 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1481 }
1482
1483 /**
1484 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1485 * @regs:pt_regs which contains kernel stack pointer.
1486 * @n:stack entry number.
1487 *
1488 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1489 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1490 * this returns 0.
1491 */
1492 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1493 {
1494 unsigned long addr;
1495
1496 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1497 if (!regs_within_kernel_stack(regs, addr))
1498 return 0;
1499 return *(unsigned long *)addr;
1500 }