]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/kernel/ptrace.c
Merge branch 'sbp2-spindown' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kernel / ptrace.c
1 /*
2 * arch/s390/kernel/ptrace.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */
24
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/errno.h>
31 #include <linux/ptrace.h>
32 #include <linux/user.h>
33 #include <linux/security.h>
34 #include <linux/audit.h>
35 #include <linux/signal.h>
36 #include <linux/elf.h>
37 #include <linux/regset.h>
38
39 #include <asm/segment.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/pgalloc.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
46 #include "entry.h"
47
48 #ifdef CONFIG_COMPAT
49 #include "compat_ptrace.h"
50 #endif
51
52 enum s390_regset {
53 REGSET_GENERAL,
54 REGSET_FP,
55 };
56
57 static void
58 FixPerRegisters(struct task_struct *task)
59 {
60 struct pt_regs *regs;
61 per_struct *per_info;
62
63 regs = task_pt_regs(task);
64 per_info = (per_struct *) &task->thread.per_info;
65 per_info->control_regs.bits.em_instruction_fetch =
66 per_info->single_step | per_info->instruction_fetch;
67
68 if (per_info->single_step) {
69 per_info->control_regs.bits.starting_addr = 0;
70 #ifdef CONFIG_COMPAT
71 if (test_thread_flag(TIF_31BIT))
72 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
73 else
74 #endif
75 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
76 } else {
77 per_info->control_regs.bits.starting_addr =
78 per_info->starting_addr;
79 per_info->control_regs.bits.ending_addr =
80 per_info->ending_addr;
81 }
82 /*
83 * if any of the control reg tracing bits are on
84 * we switch on per in the psw
85 */
86 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
87 regs->psw.mask |= PSW_MASK_PER;
88 else
89 regs->psw.mask &= ~PSW_MASK_PER;
90
91 if (per_info->control_regs.bits.em_storage_alteration)
92 per_info->control_regs.bits.storage_alt_space_ctl = 1;
93 else
94 per_info->control_regs.bits.storage_alt_space_ctl = 0;
95 }
96
97 void user_enable_single_step(struct task_struct *task)
98 {
99 task->thread.per_info.single_step = 1;
100 FixPerRegisters(task);
101 }
102
103 void user_disable_single_step(struct task_struct *task)
104 {
105 task->thread.per_info.single_step = 0;
106 FixPerRegisters(task);
107 }
108
109 /*
110 * Called by kernel/ptrace.c when detaching..
111 *
112 * Make sure single step bits etc are not set.
113 */
114 void
115 ptrace_disable(struct task_struct *child)
116 {
117 /* make sure the single step bit is not set. */
118 user_disable_single_step(child);
119 }
120
121 #ifndef CONFIG_64BIT
122 # define __ADDR_MASK 3
123 #else
124 # define __ADDR_MASK 7
125 #endif
126
127 /*
128 * Read the word at offset addr from the user area of a process. The
129 * trouble here is that the information is littered over different
130 * locations. The process registers are found on the kernel stack,
131 * the floating point stuff and the trace settings are stored in
132 * the task structure. In addition the different structures in
133 * struct user contain pad bytes that should be read as zeroes.
134 * Lovely...
135 */
136 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
137 {
138 struct user *dummy = NULL;
139 addr_t offset, tmp;
140
141 if (addr < (addr_t) &dummy->regs.acrs) {
142 /*
143 * psw and gprs are stored on the stack
144 */
145 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
146 if (addr == (addr_t) &dummy->regs.psw.mask)
147 /* Remove per bit from user psw. */
148 tmp &= ~PSW_MASK_PER;
149
150 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
151 /*
152 * access registers are stored in the thread structure
153 */
154 offset = addr - (addr_t) &dummy->regs.acrs;
155 #ifdef CONFIG_64BIT
156 /*
157 * Very special case: old & broken 64 bit gdb reading
158 * from acrs[15]. Result is a 64 bit value. Read the
159 * 32 bit acrs[15] value and shift it by 32. Sick...
160 */
161 if (addr == (addr_t) &dummy->regs.acrs[15])
162 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
163 else
164 #endif
165 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
166
167 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
168 /*
169 * orig_gpr2 is stored on the kernel stack
170 */
171 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
172
173 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
174 /*
175 * floating point regs. are stored in the thread structure
176 */
177 offset = addr - (addr_t) &dummy->regs.fp_regs;
178 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
179 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
180 tmp &= (unsigned long) FPC_VALID_MASK
181 << (BITS_PER_LONG - 32);
182
183 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
184 /*
185 * per_info is found in the thread structure
186 */
187 offset = addr - (addr_t) &dummy->regs.per_info;
188 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
189
190 } else
191 tmp = 0;
192
193 return tmp;
194 }
195
196 static int
197 peek_user(struct task_struct *child, addr_t addr, addr_t data)
198 {
199 struct user *dummy = NULL;
200 addr_t tmp, mask;
201
202 /*
203 * Stupid gdb peeks/pokes the access registers in 64 bit with
204 * an alignment of 4. Programmers from hell...
205 */
206 mask = __ADDR_MASK;
207 #ifdef CONFIG_64BIT
208 if (addr >= (addr_t) &dummy->regs.acrs &&
209 addr < (addr_t) &dummy->regs.orig_gpr2)
210 mask = 3;
211 #endif
212 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
213 return -EIO;
214
215 tmp = __peek_user(child, addr);
216 return put_user(tmp, (addr_t __user *) data);
217 }
218
219 /*
220 * Write a word to the user area of a process at location addr. This
221 * operation does have an additional problem compared to peek_user.
222 * Stores to the program status word and on the floating point
223 * control register needs to get checked for validity.
224 */
225 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
226 {
227 struct user *dummy = NULL;
228 addr_t offset;
229
230 if (addr < (addr_t) &dummy->regs.acrs) {
231 /*
232 * psw and gprs are stored on the stack
233 */
234 if (addr == (addr_t) &dummy->regs.psw.mask &&
235 #ifdef CONFIG_COMPAT
236 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
237 #endif
238 data != PSW_MASK_MERGE(psw_user_bits, data))
239 /* Invalid psw mask. */
240 return -EINVAL;
241 #ifndef CONFIG_64BIT
242 if (addr == (addr_t) &dummy->regs.psw.addr)
243 /* I'd like to reject addresses without the
244 high order bit but older gdb's rely on it */
245 data |= PSW_ADDR_AMODE;
246 #endif
247 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
248
249 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
250 /*
251 * access registers are stored in the thread structure
252 */
253 offset = addr - (addr_t) &dummy->regs.acrs;
254 #ifdef CONFIG_64BIT
255 /*
256 * Very special case: old & broken 64 bit gdb writing
257 * to acrs[15] with a 64 bit value. Ignore the lower
258 * half of the value and write the upper 32 bit to
259 * acrs[15]. Sick...
260 */
261 if (addr == (addr_t) &dummy->regs.acrs[15])
262 child->thread.acrs[15] = (unsigned int) (data >> 32);
263 else
264 #endif
265 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
266
267 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
268 /*
269 * orig_gpr2 is stored on the kernel stack
270 */
271 task_pt_regs(child)->orig_gpr2 = data;
272
273 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
274 /*
275 * floating point regs. are stored in the thread structure
276 */
277 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
278 (data & ~((unsigned long) FPC_VALID_MASK
279 << (BITS_PER_LONG - 32))) != 0)
280 return -EINVAL;
281 offset = addr - (addr_t) &dummy->regs.fp_regs;
282 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
283
284 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
285 /*
286 * per_info is found in the thread structure
287 */
288 offset = addr - (addr_t) &dummy->regs.per_info;
289 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
290
291 }
292
293 FixPerRegisters(child);
294 return 0;
295 }
296
297 static int
298 poke_user(struct task_struct *child, addr_t addr, addr_t data)
299 {
300 struct user *dummy = NULL;
301 addr_t mask;
302
303 /*
304 * Stupid gdb peeks/pokes the access registers in 64 bit with
305 * an alignment of 4. Programmers from hell indeed...
306 */
307 mask = __ADDR_MASK;
308 #ifdef CONFIG_64BIT
309 if (addr >= (addr_t) &dummy->regs.acrs &&
310 addr < (addr_t) &dummy->regs.orig_gpr2)
311 mask = 3;
312 #endif
313 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
314 return -EIO;
315
316 return __poke_user(child, addr, data);
317 }
318
319 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
320 {
321 ptrace_area parea;
322 int copied, ret;
323
324 switch (request) {
325 case PTRACE_PEEKTEXT:
326 case PTRACE_PEEKDATA:
327 /* Remove high order bit from address (only for 31 bit). */
328 addr &= PSW_ADDR_INSN;
329 /* read word at location addr. */
330 return generic_ptrace_peekdata(child, addr, data);
331
332 case PTRACE_PEEKUSR:
333 /* read the word at location addr in the USER area. */
334 return peek_user(child, addr, data);
335
336 case PTRACE_POKETEXT:
337 case PTRACE_POKEDATA:
338 /* Remove high order bit from address (only for 31 bit). */
339 addr &= PSW_ADDR_INSN;
340 /* write the word at location addr. */
341 return generic_ptrace_pokedata(child, addr, data);
342
343 case PTRACE_POKEUSR:
344 /* write the word at location addr in the USER area */
345 return poke_user(child, addr, data);
346
347 case PTRACE_PEEKUSR_AREA:
348 case PTRACE_POKEUSR_AREA:
349 if (copy_from_user(&parea, (void __force __user *) addr,
350 sizeof(parea)))
351 return -EFAULT;
352 addr = parea.kernel_addr;
353 data = parea.process_addr;
354 copied = 0;
355 while (copied < parea.len) {
356 if (request == PTRACE_PEEKUSR_AREA)
357 ret = peek_user(child, addr, data);
358 else {
359 addr_t utmp;
360 if (get_user(utmp,
361 (addr_t __force __user *) data))
362 return -EFAULT;
363 ret = poke_user(child, addr, utmp);
364 }
365 if (ret)
366 return ret;
367 addr += sizeof(unsigned long);
368 data += sizeof(unsigned long);
369 copied += sizeof(unsigned long);
370 }
371 return 0;
372 }
373 return ptrace_request(child, request, addr, data);
374 }
375
376 #ifdef CONFIG_COMPAT
377 /*
378 * Now the fun part starts... a 31 bit program running in the
379 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
380 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
381 * to handle, the difference to the 64 bit versions of the requests
382 * is that the access is done in multiples of 4 byte instead of
383 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
384 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
385 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
386 * is a 31 bit program too, the content of struct user can be
387 * emulated. A 31 bit program peeking into the struct user of
388 * a 64 bit program is a no-no.
389 */
390
391 /*
392 * Same as peek_user but for a 31 bit program.
393 */
394 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
395 {
396 struct user32 *dummy32 = NULL;
397 per_struct32 *dummy_per32 = NULL;
398 addr_t offset;
399 __u32 tmp;
400
401 if (addr < (addr_t) &dummy32->regs.acrs) {
402 /*
403 * psw and gprs are stored on the stack
404 */
405 if (addr == (addr_t) &dummy32->regs.psw.mask) {
406 /* Fake a 31 bit psw mask. */
407 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
408 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
409 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
410 /* Fake a 31 bit psw address. */
411 tmp = (__u32) task_pt_regs(child)->psw.addr |
412 PSW32_ADDR_AMODE31;
413 } else {
414 /* gpr 0-15 */
415 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
416 addr*2 + 4);
417 }
418 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
419 /*
420 * access registers are stored in the thread structure
421 */
422 offset = addr - (addr_t) &dummy32->regs.acrs;
423 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
424
425 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
426 /*
427 * orig_gpr2 is stored on the kernel stack
428 */
429 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
430
431 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
432 /*
433 * floating point regs. are stored in the thread structure
434 */
435 offset = addr - (addr_t) &dummy32->regs.fp_regs;
436 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
437
438 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
439 /*
440 * per_info is found in the thread structure
441 */
442 offset = addr - (addr_t) &dummy32->regs.per_info;
443 /* This is magic. See per_struct and per_struct32. */
444 if ((offset >= (addr_t) &dummy_per32->control_regs &&
445 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
446 (offset >= (addr_t) &dummy_per32->starting_addr &&
447 offset <= (addr_t) &dummy_per32->ending_addr) ||
448 offset == (addr_t) &dummy_per32->lowcore.words.address)
449 offset = offset*2 + 4;
450 else
451 offset = offset*2;
452 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
453
454 } else
455 tmp = 0;
456
457 return tmp;
458 }
459
460 static int peek_user_compat(struct task_struct *child,
461 addr_t addr, addr_t data)
462 {
463 __u32 tmp;
464
465 if (!test_thread_flag(TIF_31BIT) ||
466 (addr & 3) || addr > sizeof(struct user) - 3)
467 return -EIO;
468
469 tmp = __peek_user_compat(child, addr);
470 return put_user(tmp, (__u32 __user *) data);
471 }
472
473 /*
474 * Same as poke_user but for a 31 bit program.
475 */
476 static int __poke_user_compat(struct task_struct *child,
477 addr_t addr, addr_t data)
478 {
479 struct user32 *dummy32 = NULL;
480 per_struct32 *dummy_per32 = NULL;
481 __u32 tmp = (__u32) data;
482 addr_t offset;
483
484 if (addr < (addr_t) &dummy32->regs.acrs) {
485 /*
486 * psw, gprs, acrs and orig_gpr2 are stored on the stack
487 */
488 if (addr == (addr_t) &dummy32->regs.psw.mask) {
489 /* Build a 64 bit psw mask from 31 bit mask. */
490 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
491 /* Invalid psw mask. */
492 return -EINVAL;
493 task_pt_regs(child)->psw.mask =
494 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
495 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
496 /* Build a 64 bit psw address from 31 bit address. */
497 task_pt_regs(child)->psw.addr =
498 (__u64) tmp & PSW32_ADDR_INSN;
499 } else {
500 /* gpr 0-15 */
501 *(__u32*)((addr_t) &task_pt_regs(child)->psw
502 + addr*2 + 4) = tmp;
503 }
504 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
505 /*
506 * access registers are stored in the thread structure
507 */
508 offset = addr - (addr_t) &dummy32->regs.acrs;
509 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
510
511 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
512 /*
513 * orig_gpr2 is stored on the kernel stack
514 */
515 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
516
517 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
518 /*
519 * floating point regs. are stored in the thread structure
520 */
521 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
522 (tmp & ~FPC_VALID_MASK) != 0)
523 /* Invalid floating point control. */
524 return -EINVAL;
525 offset = addr - (addr_t) &dummy32->regs.fp_regs;
526 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
527
528 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
529 /*
530 * per_info is found in the thread structure.
531 */
532 offset = addr - (addr_t) &dummy32->regs.per_info;
533 /*
534 * This is magic. See per_struct and per_struct32.
535 * By incident the offsets in per_struct are exactly
536 * twice the offsets in per_struct32 for all fields.
537 * The 8 byte fields need special handling though,
538 * because the second half (bytes 4-7) is needed and
539 * not the first half.
540 */
541 if ((offset >= (addr_t) &dummy_per32->control_regs &&
542 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
543 (offset >= (addr_t) &dummy_per32->starting_addr &&
544 offset <= (addr_t) &dummy_per32->ending_addr) ||
545 offset == (addr_t) &dummy_per32->lowcore.words.address)
546 offset = offset*2 + 4;
547 else
548 offset = offset*2;
549 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
550
551 }
552
553 FixPerRegisters(child);
554 return 0;
555 }
556
557 static int poke_user_compat(struct task_struct *child,
558 addr_t addr, addr_t data)
559 {
560 if (!test_thread_flag(TIF_31BIT) ||
561 (addr & 3) || addr > sizeof(struct user32) - 3)
562 return -EIO;
563
564 return __poke_user_compat(child, addr, data);
565 }
566
567 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
568 compat_ulong_t caddr, compat_ulong_t cdata)
569 {
570 unsigned long addr = caddr;
571 unsigned long data = cdata;
572 ptrace_area_emu31 parea;
573 int copied, ret;
574
575 switch (request) {
576 case PTRACE_PEEKUSR:
577 /* read the word at location addr in the USER area. */
578 return peek_user_compat(child, addr, data);
579
580 case PTRACE_POKEUSR:
581 /* write the word at location addr in the USER area */
582 return poke_user_compat(child, addr, data);
583
584 case PTRACE_PEEKUSR_AREA:
585 case PTRACE_POKEUSR_AREA:
586 if (copy_from_user(&parea, (void __force __user *) addr,
587 sizeof(parea)))
588 return -EFAULT;
589 addr = parea.kernel_addr;
590 data = parea.process_addr;
591 copied = 0;
592 while (copied < parea.len) {
593 if (request == PTRACE_PEEKUSR_AREA)
594 ret = peek_user_compat(child, addr, data);
595 else {
596 __u32 utmp;
597 if (get_user(utmp,
598 (__u32 __force __user *) data))
599 return -EFAULT;
600 ret = poke_user_compat(child, addr, utmp);
601 }
602 if (ret)
603 return ret;
604 addr += sizeof(unsigned int);
605 data += sizeof(unsigned int);
606 copied += sizeof(unsigned int);
607 }
608 return 0;
609 }
610 return compat_ptrace_request(child, request, addr, data);
611 }
612 #endif
613
614 asmlinkage void
615 syscall_trace(struct pt_regs *regs, int entryexit)
616 {
617 if (unlikely(current->audit_context) && entryexit)
618 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
619
620 if (!test_thread_flag(TIF_SYSCALL_TRACE))
621 goto out;
622 if (!(current->ptrace & PT_PTRACED))
623 goto out;
624 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
625 ? 0x80 : 0));
626
627 /*
628 * If the debuffer has set an invalid system call number,
629 * we prepare to skip the system call restart handling.
630 */
631 if (!entryexit && regs->gprs[2] >= NR_syscalls)
632 regs->trap = -1;
633
634 /*
635 * this isn't the same as continuing with a signal, but it will do
636 * for normal use. strace only continues with a signal if the
637 * stopping signal is not SIGTRAP. -brl
638 */
639 if (current->exit_code) {
640 send_sig(current->exit_code, current, 1);
641 current->exit_code = 0;
642 }
643 out:
644 if (unlikely(current->audit_context) && !entryexit)
645 audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
646 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
647 regs->gprs[4], regs->gprs[5]);
648 }
649
650 /*
651 * user_regset definitions.
652 */
653
654 static int s390_regs_get(struct task_struct *target,
655 const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
658 {
659 if (target == current)
660 save_access_regs(target->thread.acrs);
661
662 if (kbuf) {
663 unsigned long *k = kbuf;
664 while (count > 0) {
665 *k++ = __peek_user(target, pos);
666 count -= sizeof(*k);
667 pos += sizeof(*k);
668 }
669 } else {
670 unsigned long __user *u = ubuf;
671 while (count > 0) {
672 if (__put_user(__peek_user(target, pos), u++))
673 return -EFAULT;
674 count -= sizeof(*u);
675 pos += sizeof(*u);
676 }
677 }
678 return 0;
679 }
680
681 static int s390_regs_set(struct task_struct *target,
682 const struct user_regset *regset,
683 unsigned int pos, unsigned int count,
684 const void *kbuf, const void __user *ubuf)
685 {
686 int rc = 0;
687
688 if (target == current)
689 save_access_regs(target->thread.acrs);
690
691 if (kbuf) {
692 const unsigned long *k = kbuf;
693 while (count > 0 && !rc) {
694 rc = __poke_user(target, pos, *k++);
695 count -= sizeof(*k);
696 pos += sizeof(*k);
697 }
698 } else {
699 const unsigned long __user *u = ubuf;
700 while (count > 0 && !rc) {
701 unsigned long word;
702 rc = __get_user(word, u++);
703 if (rc)
704 break;
705 rc = __poke_user(target, pos, word);
706 count -= sizeof(*u);
707 pos += sizeof(*u);
708 }
709 }
710
711 if (rc == 0 && target == current)
712 restore_access_regs(target->thread.acrs);
713
714 return rc;
715 }
716
717 static int s390_fpregs_get(struct task_struct *target,
718 const struct user_regset *regset, unsigned int pos,
719 unsigned int count, void *kbuf, void __user *ubuf)
720 {
721 if (target == current)
722 save_fp_regs(&target->thread.fp_regs);
723
724 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
725 &target->thread.fp_regs, 0, -1);
726 }
727
728 static int s390_fpregs_set(struct task_struct *target,
729 const struct user_regset *regset, unsigned int pos,
730 unsigned int count, const void *kbuf,
731 const void __user *ubuf)
732 {
733 int rc = 0;
734
735 if (target == current)
736 save_fp_regs(&target->thread.fp_regs);
737
738 /* If setting FPC, must validate it first. */
739 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
740 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
741 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
742 0, offsetof(s390_fp_regs, fprs));
743 if (rc)
744 return rc;
745 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
746 return -EINVAL;
747 target->thread.fp_regs.fpc = fpc[0];
748 }
749
750 if (rc == 0 && count > 0)
751 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
752 target->thread.fp_regs.fprs,
753 offsetof(s390_fp_regs, fprs), -1);
754
755 if (rc == 0 && target == current)
756 restore_fp_regs(&target->thread.fp_regs);
757
758 return rc;
759 }
760
761 static const struct user_regset s390_regsets[] = {
762 [REGSET_GENERAL] = {
763 .core_note_type = NT_PRSTATUS,
764 .n = sizeof(s390_regs) / sizeof(long),
765 .size = sizeof(long),
766 .align = sizeof(long),
767 .get = s390_regs_get,
768 .set = s390_regs_set,
769 },
770 [REGSET_FP] = {
771 .core_note_type = NT_PRFPREG,
772 .n = sizeof(s390_fp_regs) / sizeof(long),
773 .size = sizeof(long),
774 .align = sizeof(long),
775 .get = s390_fpregs_get,
776 .set = s390_fpregs_set,
777 },
778 };
779
780 static const struct user_regset_view user_s390_view = {
781 .name = UTS_MACHINE,
782 .e_machine = EM_S390,
783 .regsets = s390_regsets,
784 .n = ARRAY_SIZE(s390_regsets)
785 };
786
787 #ifdef CONFIG_COMPAT
788 static int s390_compat_regs_get(struct task_struct *target,
789 const struct user_regset *regset,
790 unsigned int pos, unsigned int count,
791 void *kbuf, void __user *ubuf)
792 {
793 if (target == current)
794 save_access_regs(target->thread.acrs);
795
796 if (kbuf) {
797 compat_ulong_t *k = kbuf;
798 while (count > 0) {
799 *k++ = __peek_user_compat(target, pos);
800 count -= sizeof(*k);
801 pos += sizeof(*k);
802 }
803 } else {
804 compat_ulong_t __user *u = ubuf;
805 while (count > 0) {
806 if (__put_user(__peek_user_compat(target, pos), u++))
807 return -EFAULT;
808 count -= sizeof(*u);
809 pos += sizeof(*u);
810 }
811 }
812 return 0;
813 }
814
815 static int s390_compat_regs_set(struct task_struct *target,
816 const struct user_regset *regset,
817 unsigned int pos, unsigned int count,
818 const void *kbuf, const void __user *ubuf)
819 {
820 int rc = 0;
821
822 if (target == current)
823 save_access_regs(target->thread.acrs);
824
825 if (kbuf) {
826 const compat_ulong_t *k = kbuf;
827 while (count > 0 && !rc) {
828 rc = __poke_user_compat(target, pos, *k++);
829 count -= sizeof(*k);
830 pos += sizeof(*k);
831 }
832 } else {
833 const compat_ulong_t __user *u = ubuf;
834 while (count > 0 && !rc) {
835 compat_ulong_t word;
836 rc = __get_user(word, u++);
837 if (rc)
838 break;
839 rc = __poke_user_compat(target, pos, word);
840 count -= sizeof(*u);
841 pos += sizeof(*u);
842 }
843 }
844
845 if (rc == 0 && target == current)
846 restore_access_regs(target->thread.acrs);
847
848 return rc;
849 }
850
851 static const struct user_regset s390_compat_regsets[] = {
852 [REGSET_GENERAL] = {
853 .core_note_type = NT_PRSTATUS,
854 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
855 .size = sizeof(compat_long_t),
856 .align = sizeof(compat_long_t),
857 .get = s390_compat_regs_get,
858 .set = s390_compat_regs_set,
859 },
860 [REGSET_FP] = {
861 .core_note_type = NT_PRFPREG,
862 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
863 .size = sizeof(compat_long_t),
864 .align = sizeof(compat_long_t),
865 .get = s390_fpregs_get,
866 .set = s390_fpregs_set,
867 },
868 };
869
870 static const struct user_regset_view user_s390_compat_view = {
871 .name = "s390",
872 .e_machine = EM_S390,
873 .regsets = s390_compat_regsets,
874 .n = ARRAY_SIZE(s390_compat_regsets)
875 };
876 #endif
877
878 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
879 {
880 #ifdef CONFIG_COMPAT
881 if (test_tsk_thread_flag(task, TIF_31BIT))
882 return &user_s390_compat_view;
883 #endif
884 return &user_s390_view;
885 }