]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kernel/ptrace.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kernel / ptrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
5e9a2692 3 * Ptrace user space interface.
1da177e4 4 *
a53c8fab 5 * Copyright IBM Corp. 1999, 2010
5e9a2692 6 * Author(s): Denis Joseph Barrow
1da177e4 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
1da177e4
LT
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
68db0cf1 12#include <linux/sched/task_stack.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/smp.h>
1da177e4
LT
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/audit.h>
7ed20e1a 20#include <linux/signal.h>
63506c41
MS
21#include <linux/elf.h>
22#include <linux/regset.h>
753c4dd6 23#include <linux/tracehook.h>
bcf5cef7 24#include <linux/seccomp.h>
048cd4e5 25#include <linux/compat.h>
9bf1226b 26#include <trace/syscall.h>
1da177e4 27#include <asm/page.h>
7c0f6ba6 28#include <linux/uaccess.h>
778959db 29#include <asm/unistd.h>
a0616cde 30#include <asm/switch_to.h>
262832bc
AF
31#include <asm/runtime_instr.h>
32#include <asm/facility.h>
33
a806170e 34#include "entry.h"
1da177e4 35
347a8dc3 36#ifdef CONFIG_COMPAT
1da177e4
LT
37#include "compat_ptrace.h"
38#endif
39
1c569f02
JS
40#define CREATE_TRACE_POINTS
41#include <trace/events/syscalls.h>
5e9ad7df 42
64597f9d 43void update_cr_regs(struct task_struct *task)
1da177e4 44{
5e9a2692
MS
45 struct pt_regs *regs = task_pt_regs(task);
46 struct thread_struct *thread = &task->thread;
a45aff52 47 struct per_regs old, new;
ad3bc0ac
MS
48 union ctlreg0 cr0_old, cr0_new;
49 union ctlreg2 cr2_old, cr2_new;
916cda1a
MS
50 int cr0_changed, cr2_changed;
51
ad3bc0ac
MS
52 __ctl_store(cr0_old.val, 0, 0);
53 __ctl_store(cr2_old.val, 2, 2);
916cda1a
MS
54 cr0_new = cr0_old;
55 cr2_new = cr2_old;
d35339a4 56 /* Take care of the enable/disable of transactional execution. */
9977e886 57 if (MACHINE_HAS_TE) {
9977e886 58 /* Set or clear transaction execution TXC bit 8. */
ad3bc0ac 59 cr0_new.tcx = 1;
9977e886 60 if (task->thread.per_flags & PER_FLAG_NO_TE)
ad3bc0ac 61 cr0_new.tcx = 0;
9977e886 62 /* Set or clear transaction execution TDC bits 62 and 63. */
ad3bc0ac 63 cr2_new.tdc = 0;
9977e886
HB
64 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
ad3bc0ac 66 cr2_new.tdc = 1;
9977e886 67 else
ad3bc0ac 68 cr2_new.tdc = 2;
64597f9d 69 }
d35339a4 70 }
916cda1a
MS
71 /* Take care of enable/disable of guarded storage. */
72 if (MACHINE_HAS_GS) {
ad3bc0ac 73 cr2_new.gse = 0;
916cda1a 74 if (task->thread.gs_cb)
ad3bc0ac 75 cr2_new.gse = 1;
916cda1a
MS
76 }
77 /* Load control register 0/2 iff changed */
ad3bc0ac
MS
78 cr0_changed = cr0_new.val != cr0_old.val;
79 cr2_changed = cr2_new.val != cr2_old.val;
916cda1a 80 if (cr0_changed)
ad3bc0ac 81 __ctl_load(cr0_new.val, 0, 0);
916cda1a 82 if (cr2_changed)
ad3bc0ac 83 __ctl_load(cr2_new.val, 2, 2);
a45aff52
MS
84 /* Copy user specified PER registers */
85 new.control = thread->per_user.control;
86 new.start = thread->per_user.start;
87 new.end = thread->per_user.end;
88
89 /* merge TIF_SINGLE_STEP into user specified PER registers. */
2a0a5b22
JW
90 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
818a330c
MS
92 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93 new.control |= PER_EVENT_BRANCH;
94 else
95 new.control |= PER_EVENT_IFETCH;
d35339a4
MS
96 new.control |= PER_CONTROL_SUSPENSION;
97 new.control |= PER_EVENT_TRANSACTION_END;
2a0a5b22
JW
98 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99 new.control |= PER_EVENT_IFETCH;
a45aff52 100 new.start = 0;
9cb1ccec 101 new.end = -1UL;
a45aff52 102 }
5e9a2692
MS
103
104 /* Take care of the PER enablement bit in the PSW. */
a45aff52 105 if (!(new.control & PER_EVENT_MASK)) {
1da177e4 106 regs->psw.mask &= ~PSW_MASK_PER;
5e9a2692 107 return;
c3311c13 108 }
5e9a2692
MS
109 regs->psw.mask |= PSW_MASK_PER;
110 __ctl_store(old, 9, 11);
a45aff52
MS
111 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112 __ctl_load(new, 9, 11);
1da177e4
LT
113}
114
0ac30be4 115void user_enable_single_step(struct task_struct *task)
1da177e4 116{
818a330c 117 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
5e9a2692 118 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
1da177e4
LT
119}
120
0ac30be4 121void user_disable_single_step(struct task_struct *task)
1da177e4 122{
818a330c 123 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
5e9a2692 124 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
1da177e4
LT
125}
126
818a330c
MS
127void user_enable_block_step(struct task_struct *task)
128{
129 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130 set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131}
132
1da177e4
LT
133/*
134 * Called by kernel/ptrace.c when detaching..
135 *
5e9a2692 136 * Clear all debugging related fields.
1da177e4 137 */
5e9a2692 138void ptrace_disable(struct task_struct *task)
1da177e4 139{
5e9a2692
MS
140 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
d3a73acb 143 clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
d35339a4 144 task->thread.per_flags = 0;
1da177e4
LT
145}
146
5a79859a 147#define __ADDR_MASK 7
1da177e4 148
5e9a2692
MS
149static inline unsigned long __peek_user_per(struct task_struct *child,
150 addr_t addr)
151{
152 struct per_struct_kernel *dummy = NULL;
153
154 if (addr == (addr_t) &dummy->cr9)
155 /* Control bits of the active per set. */
156 return test_thread_flag(TIF_SINGLE_STEP) ?
157 PER_EVENT_IFETCH : child->thread.per_user.control;
158 else if (addr == (addr_t) &dummy->cr10)
159 /* Start address of the active per set. */
160 return test_thread_flag(TIF_SINGLE_STEP) ?
161 0 : child->thread.per_user.start;
162 else if (addr == (addr_t) &dummy->cr11)
163 /* End address of the active per set. */
164 return test_thread_flag(TIF_SINGLE_STEP) ?
9cb1ccec 165 -1UL : child->thread.per_user.end;
5e9a2692
MS
166 else if (addr == (addr_t) &dummy->bits)
167 /* Single-step bit. */
168 return test_thread_flag(TIF_SINGLE_STEP) ?
169 (1UL << (BITS_PER_LONG - 1)) : 0;
170 else if (addr == (addr_t) &dummy->starting_addr)
171 /* Start address of the user specified per set. */
172 return child->thread.per_user.start;
173 else if (addr == (addr_t) &dummy->ending_addr)
174 /* End address of the user specified per set. */
175 return child->thread.per_user.end;
176 else if (addr == (addr_t) &dummy->perc_atmid)
177 /* PER code, ATMID and AI of the last PER trap */
178 return (unsigned long)
179 child->thread.per_event.cause << (BITS_PER_LONG - 16);
180 else if (addr == (addr_t) &dummy->address)
181 /* Address of the last PER trap */
182 return child->thread.per_event.address;
183 else if (addr == (addr_t) &dummy->access_id)
184 /* Access id of the last PER trap */
185 return (unsigned long)
186 child->thread.per_event.paid << (BITS_PER_LONG - 8);
187 return 0;
188}
189
1da177e4
LT
190/*
191 * Read the word at offset addr from the user area of a process. The
192 * trouble here is that the information is littered over different
193 * locations. The process registers are found on the kernel stack,
194 * the floating point stuff and the trace settings are stored in
195 * the task structure. In addition the different structures in
196 * struct user contain pad bytes that should be read as zeroes.
197 * Lovely...
198 */
63506c41 199static unsigned long __peek_user(struct task_struct *child, addr_t addr)
1da177e4
LT
200{
201 struct user *dummy = NULL;
63506c41 202 addr_t offset, tmp;
1da177e4
LT
203
204 if (addr < (addr_t) &dummy->regs.acrs) {
205 /*
206 * psw and gprs are stored on the stack
207 */
c7584fb6 208 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
5ebf250d 209 if (addr == (addr_t) &dummy->regs.psw.mask) {
b50511e4 210 /* Return a clean psw mask. */
5ebf250d
HC
211 tmp &= PSW_MASK_USER | PSW_MASK_RI;
212 tmp |= PSW_USER_BITS;
213 }
1da177e4
LT
214
215 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216 /*
217 * access registers are stored in the thread structure
218 */
219 offset = addr - (addr_t) &dummy->regs.acrs;
778959db
MS
220 /*
221 * Very special case: old & broken 64 bit gdb reading
222 * from acrs[15]. Result is a 64 bit value. Read the
223 * 32 bit acrs[15] value and shift it by 32. Sick...
224 */
225 if (addr == (addr_t) &dummy->regs.acrs[15])
226 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227 else
5a79859a 228 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
1da177e4
LT
229
230 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231 /*
232 * orig_gpr2 is stored on the kernel stack
233 */
c7584fb6 234 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
1da177e4 235
3d6e48f4
JW
236 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
237 /*
238 * prevent reads of padding hole between
239 * orig_gpr2 and fp_regs on s390.
240 */
241 tmp = 0;
242
86c558e8
MS
243 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244 /*
245 * floating point control reg. is in the thread structure
246 */
904818e2 247 tmp = child->thread.fpu.fpc;
86c558e8
MS
248 tmp <<= BITS_PER_LONG - 32;
249
1da177e4 250 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
86c558e8 251 /*
904818e2
HB
252 * floating point regs. are either in child->thread.fpu
253 * or the child->thread.fpu.vxrs array
1da177e4 254 */
86c558e8 255 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
b5510d9b 256 if (MACHINE_HAS_VX)
86c558e8 257 tmp = *(addr_t *)
904818e2 258 ((addr_t) child->thread.fpu.vxrs + 2*offset);
86c558e8 259 else
86c558e8 260 tmp = *(addr_t *)
55a423b6 261 ((addr_t) child->thread.fpu.fprs + offset);
1da177e4
LT
262
263 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264 /*
5e9a2692 265 * Handle access to the per_info structure.
1da177e4 266 */
5e9a2692
MS
267 addr -= (addr_t) &dummy->regs.per_info;
268 tmp = __peek_user_per(child, addr);
1da177e4
LT
269
270 } else
271 tmp = 0;
272
63506c41 273 return tmp;
1da177e4
LT
274}
275
1da177e4 276static int
63506c41 277peek_user(struct task_struct *child, addr_t addr, addr_t data)
1da177e4 278{
63506c41 279 addr_t tmp, mask;
1da177e4
LT
280
281 /*
282 * Stupid gdb peeks/pokes the access registers in 64 bit with
63506c41 283 * an alignment of 4. Programmers from hell...
1da177e4 284 */
778959db 285 mask = __ADDR_MASK;
547e3cec
MS
286 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
778959db 288 mask = 3;
778959db 289 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
1da177e4
LT
290 return -EIO;
291
63506c41
MS
292 tmp = __peek_user(child, addr);
293 return put_user(tmp, (addr_t __user *) data);
294}
295
5e9a2692
MS
296static inline void __poke_user_per(struct task_struct *child,
297 addr_t addr, addr_t data)
298{
299 struct per_struct_kernel *dummy = NULL;
300
301 /*
302 * There are only three fields in the per_info struct that the
303 * debugger user can write to.
304 * 1) cr9: the debugger wants to set a new PER event mask
305 * 2) starting_addr: the debugger wants to set a new starting
306 * address to use with the PER event mask.
307 * 3) ending_addr: the debugger wants to set a new ending
308 * address to use with the PER event mask.
309 * The user specified PER event mask and the start and end
310 * addresses are used only if single stepping is not in effect.
311 * Writes to any other field in per_info are ignored.
312 */
313 if (addr == (addr_t) &dummy->cr9)
314 /* PER event mask of the user specified per set. */
315 child->thread.per_user.control =
316 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317 else if (addr == (addr_t) &dummy->starting_addr)
318 /* Starting address of the user specified per set. */
319 child->thread.per_user.start = data;
320 else if (addr == (addr_t) &dummy->ending_addr)
321 /* Ending address of the user specified per set. */
322 child->thread.per_user.end = data;
323}
324
873e5a76
SS
325static void fixup_int_code(struct task_struct *child, addr_t data)
326{
327 struct pt_regs *regs = task_pt_regs(child);
328 int ilc = regs->int_code >> 16;
329 u16 insn;
330
331 if (ilc > 6)
332 return;
333
334 if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
335 &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
336 return;
337
338 /* double check that tracee stopped on svc instruction */
339 if ((insn >> 8) != 0xa)
340 return;
341
342 regs->int_code = 0x20000 | (data & 0xffff);
343}
63506c41
MS
344/*
345 * Write a word to the user area of a process at location addr. This
346 * operation does have an additional problem compared to peek_user.
347 * Stores to the program status word and on the floating point
348 * control register needs to get checked for validity.
349 */
350static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
351{
352 struct user *dummy = NULL;
d4e81b35 353 addr_t offset;
63506c41 354
873e5a76 355
1da177e4 356 if (addr < (addr_t) &dummy->regs.acrs) {
873e5a76 357 struct pt_regs *regs = task_pt_regs(child);
1da177e4
LT
358 /*
359 * psw and gprs are stored on the stack
360 */
5ebf250d
HC
361 if (addr == (addr_t) &dummy->regs.psw.mask) {
362 unsigned long mask = PSW_MASK_USER;
363
364 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
dab6cf55
MS
365 if ((data ^ PSW_USER_BITS) & ~mask)
366 /* Invalid psw mask. */
367 return -EINVAL;
368 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
369 /* Invalid address-space-control bits */
5ebf250d
HC
370 return -EINVAL;
371 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
dab6cf55 372 /* Invalid addressing mode bits */
5ebf250d
HC
373 return -EINVAL;
374 }
873e5a76
SS
375
376 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
377 addr == offsetof(struct user, regs.gprs[2]))
378 fixup_int_code(child, data);
379 *(addr_t *)((addr_t) &regs->psw + addr) = data;
1da177e4
LT
380
381 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
382 /*
383 * access registers are stored in the thread structure
384 */
385 offset = addr - (addr_t) &dummy->regs.acrs;
778959db
MS
386 /*
387 * Very special case: old & broken 64 bit gdb writing
388 * to acrs[15] with a 64 bit value. Ignore the lower
389 * half of the value and write the upper 32 bit to
390 * acrs[15]. Sick...
391 */
392 if (addr == (addr_t) &dummy->regs.acrs[15])
393 child->thread.acrs[15] = (unsigned int) (data >> 32);
394 else
5a79859a 395 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
1da177e4
LT
396
397 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
398 /*
399 * orig_gpr2 is stored on the kernel stack
400 */
c7584fb6 401 task_pt_regs(child)->orig_gpr2 = data;
1da177e4 402
3d6e48f4
JW
403 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
404 /*
405 * prevent writes of padding hole between
406 * orig_gpr2 and fp_regs on s390.
407 */
408 return 0;
409
86c558e8
MS
410 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
411 /*
412 * floating point control reg. is in the thread structure
413 */
414 if ((unsigned int) data != 0 ||
415 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
416 return -EINVAL;
904818e2 417 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
86c558e8 418
1da177e4
LT
419 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
420 /*
904818e2
HB
421 * floating point regs. are either in child->thread.fpu
422 * or the child->thread.fpu.vxrs array
1da177e4 423 */
86c558e8 424 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
b5510d9b 425 if (MACHINE_HAS_VX)
86c558e8 426 *(addr_t *)((addr_t)
904818e2 427 child->thread.fpu.vxrs + 2*offset) = data;
86c558e8 428 else
86c558e8 429 *(addr_t *)((addr_t)
55a423b6 430 child->thread.fpu.fprs + offset) = data;
1da177e4
LT
431
432 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
433 /*
5e9a2692 434 * Handle access to the per_info structure.
1da177e4 435 */
5e9a2692
MS
436 addr -= (addr_t) &dummy->regs.per_info;
437 __poke_user_per(child, addr, data);
1da177e4
LT
438
439 }
440
1da177e4
LT
441 return 0;
442}
443
5e9a2692 444static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
63506c41 445{
63506c41
MS
446 addr_t mask;
447
448 /*
449 * Stupid gdb peeks/pokes the access registers in 64 bit with
450 * an alignment of 4. Programmers from hell indeed...
451 */
452 mask = __ADDR_MASK;
547e3cec
MS
453 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
454 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
63506c41 455 mask = 3;
63506c41
MS
456 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
457 return -EIO;
458
459 return __poke_user(child, addr, data);
460}
461
9b05a69e
NK
462long arch_ptrace(struct task_struct *child, long request,
463 unsigned long addr, unsigned long data)
1da177e4 464{
1da177e4
LT
465 ptrace_area parea;
466 int copied, ret;
467
468 switch (request) {
1da177e4
LT
469 case PTRACE_PEEKUSR:
470 /* read the word at location addr in the USER area. */
471 return peek_user(child, addr, data);
472
1da177e4
LT
473 case PTRACE_POKEUSR:
474 /* write the word at location addr in the USER area */
475 return poke_user(child, addr, data);
476
477 case PTRACE_PEEKUSR_AREA:
478 case PTRACE_POKEUSR_AREA:
2b67fc46 479 if (copy_from_user(&parea, (void __force __user *) addr,
1da177e4
LT
480 sizeof(parea)))
481 return -EFAULT;
482 addr = parea.kernel_addr;
483 data = parea.process_addr;
484 copied = 0;
485 while (copied < parea.len) {
486 if (request == PTRACE_PEEKUSR_AREA)
487 ret = peek_user(child, addr, data);
488 else {
2b67fc46
HC
489 addr_t utmp;
490 if (get_user(utmp,
491 (addr_t __force __user *) data))
1da177e4 492 return -EFAULT;
2b67fc46 493 ret = poke_user(child, addr, utmp);
1da177e4
LT
494 }
495 if (ret)
496 return ret;
497 addr += sizeof(unsigned long);
498 data += sizeof(unsigned long);
499 copied += sizeof(unsigned long);
500 }
501 return 0;
86f2552b 502 case PTRACE_GET_LAST_BREAK:
ef280c85 503 put_user(child->thread.last_break,
86f2552b
MS
504 (unsigned long __user *) data);
505 return 0;
d35339a4
MS
506 case PTRACE_ENABLE_TE:
507 if (!MACHINE_HAS_TE)
508 return -EIO;
509 child->thread.per_flags &= ~PER_FLAG_NO_TE;
510 return 0;
511 case PTRACE_DISABLE_TE:
512 if (!MACHINE_HAS_TE)
513 return -EIO;
514 child->thread.per_flags |= PER_FLAG_NO_TE;
64597f9d
MM
515 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
516 return 0;
517 case PTRACE_TE_ABORT_RAND:
518 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
519 return -EIO;
520 switch (data) {
521 case 0UL:
522 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
523 break;
524 case 1UL:
525 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
526 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
527 break;
528 case 2UL:
529 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
530 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
531 break;
532 default:
533 return -EINVAL;
534 }
d35339a4 535 return 0;
07805ac8 536 default:
07805ac8 537 return ptrace_request(child, request, addr, data);
1da177e4 538 }
1da177e4
LT
539}
540
347a8dc3 541#ifdef CONFIG_COMPAT
1da177e4
LT
542/*
543 * Now the fun part starts... a 31 bit program running in the
544 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
545 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
546 * to handle, the difference to the 64 bit versions of the requests
547 * is that the access is done in multiples of 4 byte instead of
548 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
549 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
550 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
551 * is a 31 bit program too, the content of struct user can be
552 * emulated. A 31 bit program peeking into the struct user of
553 * a 64 bit program is a no-no.
554 */
555
5e9a2692
MS
556/*
557 * Same as peek_user_per but for a 31 bit program.
558 */
559static inline __u32 __peek_user_per_compat(struct task_struct *child,
560 addr_t addr)
561{
562 struct compat_per_struct_kernel *dummy32 = NULL;
563
564 if (addr == (addr_t) &dummy32->cr9)
565 /* Control bits of the active per set. */
566 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
567 PER_EVENT_IFETCH : child->thread.per_user.control;
568 else if (addr == (addr_t) &dummy32->cr10)
569 /* Start address of the active per set. */
570 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
571 0 : child->thread.per_user.start;
572 else if (addr == (addr_t) &dummy32->cr11)
573 /* End address of the active per set. */
574 return test_thread_flag(TIF_SINGLE_STEP) ?
575 PSW32_ADDR_INSN : child->thread.per_user.end;
576 else if (addr == (addr_t) &dummy32->bits)
577 /* Single-step bit. */
578 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
579 0x80000000 : 0;
580 else if (addr == (addr_t) &dummy32->starting_addr)
581 /* Start address of the user specified per set. */
582 return (__u32) child->thread.per_user.start;
583 else if (addr == (addr_t) &dummy32->ending_addr)
584 /* End address of the user specified per set. */
585 return (__u32) child->thread.per_user.end;
586 else if (addr == (addr_t) &dummy32->perc_atmid)
587 /* PER code, ATMID and AI of the last PER trap */
588 return (__u32) child->thread.per_event.cause << 16;
589 else if (addr == (addr_t) &dummy32->address)
590 /* Address of the last PER trap */
591 return (__u32) child->thread.per_event.address;
592 else if (addr == (addr_t) &dummy32->access_id)
593 /* Access id of the last PER trap */
594 return (__u32) child->thread.per_event.paid << 24;
595 return 0;
596}
597
1da177e4
LT
598/*
599 * Same as peek_user but for a 31 bit program.
600 */
63506c41 601static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
1da177e4 602{
5e9a2692 603 struct compat_user *dummy32 = NULL;
1da177e4
LT
604 addr_t offset;
605 __u32 tmp;
606
1da177e4 607 if (addr < (addr_t) &dummy32->regs.acrs) {
b50511e4 608 struct pt_regs *regs = task_pt_regs(child);
1da177e4
LT
609 /*
610 * psw and gprs are stored on the stack
611 */
612 if (addr == (addr_t) &dummy32->regs.psw.mask) {
613 /* Fake a 31 bit psw mask. */
b50511e4 614 tmp = (__u32)(regs->psw.mask >> 32);
5ebf250d 615 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
f26946d7 616 tmp |= PSW32_USER_BITS;
1da177e4
LT
617 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
618 /* Fake a 31 bit psw address. */
d4e81b35
MS
619 tmp = (__u32) regs->psw.addr |
620 (__u32)(regs->psw.mask & PSW_MASK_BA);
1da177e4
LT
621 } else {
622 /* gpr 0-15 */
b50511e4 623 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
1da177e4
LT
624 }
625 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
626 /*
627 * access registers are stored in the thread structure
628 */
629 offset = addr - (addr_t) &dummy32->regs.acrs;
630 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
631
632 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
633 /*
634 * orig_gpr2 is stored on the kernel stack
635 */
c7584fb6 636 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
1da177e4 637
3d6e48f4
JW
638 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
639 /*
640 * prevent reads of padding hole between
641 * orig_gpr2 and fp_regs on s390.
642 */
643 tmp = 0;
644
86c558e8
MS
645 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
646 /*
647 * floating point control reg. is in the thread structure
648 */
904818e2 649 tmp = child->thread.fpu.fpc;
86c558e8 650
1da177e4
LT
651 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
652 /*
904818e2
HB
653 * floating point regs. are either in child->thread.fpu
654 * or the child->thread.fpu.vxrs array
1da177e4 655 */
86c558e8 656 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
b5510d9b 657 if (MACHINE_HAS_VX)
86c558e8 658 tmp = *(__u32 *)
904818e2 659 ((addr_t) child->thread.fpu.vxrs + 2*offset);
86c558e8 660 else
86c558e8 661 tmp = *(__u32 *)
55a423b6 662 ((addr_t) child->thread.fpu.fprs + offset);
1da177e4
LT
663
664 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
665 /*
5e9a2692 666 * Handle access to the per_info structure.
1da177e4 667 */
5e9a2692
MS
668 addr -= (addr_t) &dummy32->regs.per_info;
669 tmp = __peek_user_per_compat(child, addr);
1da177e4
LT
670
671 } else
672 tmp = 0;
673
63506c41
MS
674 return tmp;
675}
676
677static int peek_user_compat(struct task_struct *child,
678 addr_t addr, addr_t data)
679{
680 __u32 tmp;
681
7757591a 682 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
63506c41
MS
683 return -EIO;
684
685 tmp = __peek_user_compat(child, addr);
1da177e4
LT
686 return put_user(tmp, (__u32 __user *) data);
687}
688
5e9a2692
MS
689/*
690 * Same as poke_user_per but for a 31 bit program.
691 */
692static inline void __poke_user_per_compat(struct task_struct *child,
693 addr_t addr, __u32 data)
694{
695 struct compat_per_struct_kernel *dummy32 = NULL;
696
697 if (addr == (addr_t) &dummy32->cr9)
698 /* PER event mask of the user specified per set. */
699 child->thread.per_user.control =
700 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
701 else if (addr == (addr_t) &dummy32->starting_addr)
702 /* Starting address of the user specified per set. */
703 child->thread.per_user.start = data;
704 else if (addr == (addr_t) &dummy32->ending_addr)
705 /* Ending address of the user specified per set. */
706 child->thread.per_user.end = data;
707}
708
1da177e4
LT
709/*
710 * Same as poke_user but for a 31 bit program.
711 */
63506c41
MS
712static int __poke_user_compat(struct task_struct *child,
713 addr_t addr, addr_t data)
1da177e4 714{
5e9a2692 715 struct compat_user *dummy32 = NULL;
63506c41 716 __u32 tmp = (__u32) data;
1da177e4 717 addr_t offset;
1da177e4
LT
718
719 if (addr < (addr_t) &dummy32->regs.acrs) {
b50511e4 720 struct pt_regs *regs = task_pt_regs(child);
1da177e4
LT
721 /*
722 * psw, gprs, acrs and orig_gpr2 are stored on the stack
723 */
724 if (addr == (addr_t) &dummy32->regs.psw.mask) {
5ebf250d
HC
725 __u32 mask = PSW32_MASK_USER;
726
727 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
1da177e4 728 /* Build a 64 bit psw mask from 31 bit mask. */
dab6cf55 729 if ((tmp ^ PSW32_USER_BITS) & ~mask)
1da177e4
LT
730 /* Invalid psw mask. */
731 return -EINVAL;
dab6cf55
MS
732 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
733 /* Invalid address-space-control bits */
734 return -EINVAL;
b50511e4 735 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
d4e81b35 736 (regs->psw.mask & PSW_MASK_BA) |
5ebf250d 737 (__u64)(tmp & mask) << 32;
1da177e4
LT
738 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
739 /* Build a 64 bit psw address from 31 bit address. */
b50511e4 740 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
d4e81b35
MS
741 /* Transfer 31 bit amode bit to psw mask. */
742 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
743 (__u64)(tmp & PSW32_ADDR_AMODE);
1da177e4 744 } else {
873e5a76
SS
745
746 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
747 addr == offsetof(struct compat_user, regs.gprs[2]))
748 fixup_int_code(child, data);
1da177e4 749 /* gpr 0-15 */
b50511e4 750 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
1da177e4
LT
751 }
752 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
753 /*
754 * access registers are stored in the thread structure
755 */
756 offset = addr - (addr_t) &dummy32->regs.acrs;
757 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
758
759 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
760 /*
761 * orig_gpr2 is stored on the kernel stack
762 */
c7584fb6 763 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
1da177e4 764
3d6e48f4
JW
765 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
766 /*
767 * prevent writess of padding hole between
768 * orig_gpr2 and fp_regs on s390.
769 */
770 return 0;
771
86c558e8 772 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
1da177e4 773 /*
86c558e8 774 * floating point control reg. is in the thread structure
1da177e4 775 */
86c558e8 776 if (test_fp_ctl(tmp))
1da177e4 777 return -EINVAL;
904818e2 778 child->thread.fpu.fpc = data;
86c558e8
MS
779
780 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
781 /*
904818e2
HB
782 * floating point regs. are either in child->thread.fpu
783 * or the child->thread.fpu.vxrs array
86c558e8
MS
784 */
785 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
b5510d9b 786 if (MACHINE_HAS_VX)
86c558e8 787 *(__u32 *)((addr_t)
904818e2 788 child->thread.fpu.vxrs + 2*offset) = tmp;
86c558e8 789 else
86c558e8 790 *(__u32 *)((addr_t)
55a423b6 791 child->thread.fpu.fprs + offset) = tmp;
1da177e4
LT
792
793 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
794 /*
5e9a2692 795 * Handle access to the per_info structure.
1da177e4 796 */
5e9a2692
MS
797 addr -= (addr_t) &dummy32->regs.per_info;
798 __poke_user_per_compat(child, addr, data);
1da177e4
LT
799 }
800
1da177e4
LT
801 return 0;
802}
803
63506c41
MS
804static int poke_user_compat(struct task_struct *child,
805 addr_t addr, addr_t data)
806{
5e9a2692
MS
807 if (!is_compat_task() || (addr & 3) ||
808 addr > sizeof(struct compat_user) - 3)
63506c41
MS
809 return -EIO;
810
811 return __poke_user_compat(child, addr, data);
812}
813
b499d76b
RM
814long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
815 compat_ulong_t caddr, compat_ulong_t cdata)
1da177e4 816{
b499d76b
RM
817 unsigned long addr = caddr;
818 unsigned long data = cdata;
5e9a2692 819 compat_ptrace_area parea;
1da177e4
LT
820 int copied, ret;
821
822 switch (request) {
1da177e4
LT
823 case PTRACE_PEEKUSR:
824 /* read the word at location addr in the USER area. */
63506c41 825 return peek_user_compat(child, addr, data);
1da177e4 826
1da177e4
LT
827 case PTRACE_POKEUSR:
828 /* write the word at location addr in the USER area */
63506c41 829 return poke_user_compat(child, addr, data);
1da177e4
LT
830
831 case PTRACE_PEEKUSR_AREA:
832 case PTRACE_POKEUSR_AREA:
2b67fc46 833 if (copy_from_user(&parea, (void __force __user *) addr,
1da177e4
LT
834 sizeof(parea)))
835 return -EFAULT;
836 addr = parea.kernel_addr;
837 data = parea.process_addr;
838 copied = 0;
839 while (copied < parea.len) {
840 if (request == PTRACE_PEEKUSR_AREA)
63506c41 841 ret = peek_user_compat(child, addr, data);
1da177e4 842 else {
2b67fc46
HC
843 __u32 utmp;
844 if (get_user(utmp,
845 (__u32 __force __user *) data))
1da177e4 846 return -EFAULT;
63506c41 847 ret = poke_user_compat(child, addr, utmp);
1da177e4
LT
848 }
849 if (ret)
850 return ret;
851 addr += sizeof(unsigned int);
852 data += sizeof(unsigned int);
853 copied += sizeof(unsigned int);
854 }
855 return 0;
86f2552b 856 case PTRACE_GET_LAST_BREAK:
ef280c85 857 put_user(child->thread.last_break,
86f2552b
MS
858 (unsigned int __user *) data);
859 return 0;
1da177e4 860 }
b499d76b 861 return compat_ptrace_request(child, request, addr, data);
1da177e4
LT
862}
863#endif
864
753c4dd6 865asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
1da177e4 866{
da7f750c 867 unsigned long mask = -1UL;
cd29fa79 868 long ret = -1;
1da177e4 869
664f5f8d
SS
870 if (is_compat_task())
871 mask = 0xffffffff;
872
c5c3a6d8 873 /*
753c4dd6
MS
874 * The sysc_tracesys code in entry.S stored the system
875 * call number to gprs[2].
c5c3a6d8 876 */
753c4dd6 877 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
00332c16 878 tracehook_report_syscall_entry(regs)) {
753c4dd6 879 /*
00332c16 880 * Tracing decided this syscall should not happen. Skip
753c4dd6
MS
881 * the system call and the system call restart handling.
882 */
cd29fa79 883 goto skip;
0208b944
KC
884 }
885
664f5f8d 886#ifdef CONFIG_SECCOMP
0208b944 887 /* Do the secure computing check after ptrace. */
664f5f8d
SS
888 if (unlikely(test_thread_flag(TIF_SECCOMP))) {
889 struct seccomp_data sd;
890
891 if (is_compat_task()) {
892 sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
893 sd.arch = AUDIT_ARCH_S390;
894 } else {
895 sd.instruction_pointer = regs->psw.addr;
896 sd.arch = AUDIT_ARCH_S390X;
897 }
898
cd29fa79 899 sd.nr = regs->int_code & 0xffff;
664f5f8d
SS
900 sd.args[0] = regs->orig_gpr2 & mask;
901 sd.args[1] = regs->gprs[3] & mask;
902 sd.args[2] = regs->gprs[4] & mask;
903 sd.args[3] = regs->gprs[5] & mask;
904 sd.args[4] = regs->gprs[6] & mask;
905 sd.args[5] = regs->gprs[7] & mask;
906
907 if (__secure_computing(&sd) == -1)
cd29fa79 908 goto skip;
1da177e4 909 }
664f5f8d 910#endif /* CONFIG_SECCOMP */
753c4dd6 911
66700001 912 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
cd29fa79 913 trace_sys_enter(regs, regs->int_code & 0xffff);
9bf1226b 914
da7f750c 915
cd29fa79 916 audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
797cee98
LT
917 regs->gprs[3] &mask, regs->gprs[4] &mask,
918 regs->gprs[5] &mask);
0208b944 919
cd29fa79
SS
920 if ((signed long)regs->gprs[2] >= NR_syscalls) {
921 regs->gprs[2] = -ENOSYS;
922 ret = -ENOSYS;
923 }
0208b944 924 return regs->gprs[2];
cd29fa79
SS
925skip:
926 clear_pt_regs_flag(regs, PIF_SYSCALL);
927 return ret;
753c4dd6
MS
928}
929
930asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
931{
d7e7528b 932 audit_syscall_exit(regs);
753c4dd6 933
66700001 934 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1c569f02 935 trace_sys_exit(regs, regs->gprs[2]);
9bf1226b 936
753c4dd6
MS
937 if (test_thread_flag(TIF_SYSCALL_TRACE))
938 tracehook_report_syscall_exit(regs, 0);
1da177e4 939}
63506c41
MS
940
941/*
942 * user_regset definitions.
943 */
944
945static int s390_regs_get(struct task_struct *target,
946 const struct user_regset *regset,
b69c6320 947 struct membuf to)
63506c41 948{
b69c6320 949 unsigned pos;
63506c41
MS
950 if (target == current)
951 save_access_regs(target->thread.acrs);
952
b69c6320
AV
953 for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
954 membuf_store(&to, __peek_user(target, pos));
63506c41
MS
955 return 0;
956}
957
958static int s390_regs_set(struct task_struct *target,
959 const struct user_regset *regset,
960 unsigned int pos, unsigned int count,
961 const void *kbuf, const void __user *ubuf)
962{
963 int rc = 0;
964
965 if (target == current)
966 save_access_regs(target->thread.acrs);
967
968 if (kbuf) {
969 const unsigned long *k = kbuf;
970 while (count > 0 && !rc) {
971 rc = __poke_user(target, pos, *k++);
972 count -= sizeof(*k);
973 pos += sizeof(*k);
974 }
975 } else {
976 const unsigned long __user *u = ubuf;
977 while (count > 0 && !rc) {
978 unsigned long word;
979 rc = __get_user(word, u++);
980 if (rc)
981 break;
982 rc = __poke_user(target, pos, word);
983 count -= sizeof(*u);
984 pos += sizeof(*u);
985 }
986 }
987
988 if (rc == 0 && target == current)
989 restore_access_regs(target->thread.acrs);
990
991 return rc;
992}
993
994static int s390_fpregs_get(struct task_struct *target,
b69c6320
AV
995 const struct user_regset *regset,
996 struct membuf to)
63506c41 997{
904818e2
HB
998 _s390_fp_regs fp_regs;
999
1000 if (target == current)
d0164ee2 1001 save_fpu_regs();
904818e2
HB
1002
1003 fp_regs.fpc = target->thread.fpu.fpc;
1004 fpregs_store(&fp_regs, &target->thread.fpu);
63506c41 1005
b69c6320 1006 return membuf_write(&to, &fp_regs, sizeof(fp_regs));
63506c41
MS
1007}
1008
1009static int s390_fpregs_set(struct task_struct *target,
1010 const struct user_regset *regset, unsigned int pos,
1011 unsigned int count, const void *kbuf,
1012 const void __user *ubuf)
1013{
1014 int rc = 0;
904818e2 1015 freg_t fprs[__NUM_FPRS];
63506c41 1016
904818e2 1017 if (target == current)
d0164ee2 1018 save_fpu_regs();
63506c41 1019
9dce990d
MS
1020 if (MACHINE_HAS_VX)
1021 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1022 else
1023 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1024
63506c41
MS
1025 /* If setting FPC, must validate it first. */
1026 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
904818e2 1027 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
4725c860 1028 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
63506c41
MS
1029 0, offsetof(s390_fp_regs, fprs));
1030 if (rc)
1031 return rc;
4725c860 1032 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
63506c41 1033 return -EINVAL;
904818e2 1034 target->thread.fpu.fpc = ufpc[0];
63506c41
MS
1035 }
1036
1037 if (rc == 0 && count > 0)
1038 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
904818e2
HB
1039 fprs, offsetof(s390_fp_regs, fprs), -1);
1040 if (rc)
1041 return rc;
63506c41 1042
b5510d9b 1043 if (MACHINE_HAS_VX)
904818e2
HB
1044 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1045 else
1046 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1047
63506c41
MS
1048 return rc;
1049}
1050
86f2552b
MS
1051static int s390_last_break_get(struct task_struct *target,
1052 const struct user_regset *regset,
b69c6320 1053 struct membuf to)
86f2552b 1054{
b69c6320 1055 return membuf_store(&to, target->thread.last_break);
86f2552b
MS
1056}
1057
b934069c
MS
1058static int s390_last_break_set(struct task_struct *target,
1059 const struct user_regset *regset,
1060 unsigned int pos, unsigned int count,
1061 const void *kbuf, const void __user *ubuf)
1062{
1063 return 0;
1064}
1065
d35339a4
MS
1066static int s390_tdb_get(struct task_struct *target,
1067 const struct user_regset *regset,
b69c6320 1068 struct membuf to)
d35339a4
MS
1069{
1070 struct pt_regs *regs = task_pt_regs(target);
d35339a4
MS
1071
1072 if (!(regs->int_code & 0x200))
1073 return -ENODATA;
b69c6320 1074 return membuf_write(&to, target->thread.trap_tdb, 256);
d35339a4
MS
1075}
1076
1077static int s390_tdb_set(struct task_struct *target,
1078 const struct user_regset *regset,
1079 unsigned int pos, unsigned int count,
1080 const void *kbuf, const void __user *ubuf)
1081{
1082 return 0;
1083}
1084
80703617
MS
1085static int s390_vxrs_low_get(struct task_struct *target,
1086 const struct user_regset *regset,
b69c6320 1087 struct membuf to)
80703617
MS
1088{
1089 __u64 vxrs[__NUM_VXRS_LOW];
1090 int i;
1091
7490daf0
MS
1092 if (!MACHINE_HAS_VX)
1093 return -ENODEV;
b5510d9b
HB
1094 if (target == current)
1095 save_fpu_regs();
1096 for (i = 0; i < __NUM_VXRS_LOW; i++)
1097 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
b69c6320 1098 return membuf_write(&to, vxrs, sizeof(vxrs));
80703617
MS
1099}
1100
1101static int s390_vxrs_low_set(struct task_struct *target,
1102 const struct user_regset *regset,
1103 unsigned int pos, unsigned int count,
1104 const void *kbuf, const void __user *ubuf)
1105{
1106 __u64 vxrs[__NUM_VXRS_LOW];
1107 int i, rc;
1108
7490daf0
MS
1109 if (!MACHINE_HAS_VX)
1110 return -ENODEV;
b5510d9b 1111 if (target == current)
d0164ee2 1112 save_fpu_regs();
80703617 1113
9dce990d
MS
1114 for (i = 0; i < __NUM_VXRS_LOW; i++)
1115 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1116
80703617 1117 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
9977e886 1118 if (rc == 0)
80703617 1119 for (i = 0; i < __NUM_VXRS_LOW; i++)
904818e2 1120 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
80703617
MS
1121
1122 return rc;
1123}
1124
1125static int s390_vxrs_high_get(struct task_struct *target,
1126 const struct user_regset *regset,
b69c6320 1127 struct membuf to)
80703617 1128{
7490daf0
MS
1129 if (!MACHINE_HAS_VX)
1130 return -ENODEV;
b5510d9b
HB
1131 if (target == current)
1132 save_fpu_regs();
b69c6320
AV
1133 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
1134 __NUM_VXRS_HIGH * sizeof(__vector128));
80703617
MS
1135}
1136
1137static int s390_vxrs_high_set(struct task_struct *target,
1138 const struct user_regset *regset,
1139 unsigned int pos, unsigned int count,
1140 const void *kbuf, const void __user *ubuf)
1141{
1142 int rc;
1143
7490daf0
MS
1144 if (!MACHINE_HAS_VX)
1145 return -ENODEV;
b5510d9b 1146 if (target == current)
d0164ee2 1147 save_fpu_regs();
80703617
MS
1148
1149 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
904818e2 1150 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
80703617
MS
1151 return rc;
1152}
1153
20b40a79
MS
1154static int s390_system_call_get(struct task_struct *target,
1155 const struct user_regset *regset,
b69c6320 1156 struct membuf to)
20b40a79 1157{
b69c6320 1158 return membuf_store(&to, target->thread.system_call);
20b40a79
MS
1159}
1160
1161static int s390_system_call_set(struct task_struct *target,
1162 const struct user_regset *regset,
1163 unsigned int pos, unsigned int count,
1164 const void *kbuf, const void __user *ubuf)
1165{
f8fc82b4 1166 unsigned int *data = &target->thread.system_call;
20b40a79
MS
1167 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1168 data, 0, sizeof(unsigned int));
1169}
1170
916cda1a
MS
1171static int s390_gs_cb_get(struct task_struct *target,
1172 const struct user_regset *regset,
b69c6320 1173 struct membuf to)
916cda1a
MS
1174{
1175 struct gs_cb *data = target->thread.gs_cb;
1176
1177 if (!MACHINE_HAS_GS)
1178 return -ENODEV;
1179 if (!data)
1180 return -ENODATA;
f5bbd721
MS
1181 if (target == current)
1182 save_gs_cb(data);
b69c6320 1183 return membuf_write(&to, data, sizeof(struct gs_cb));
916cda1a
MS
1184}
1185
1186static int s390_gs_cb_set(struct task_struct *target,
1187 const struct user_regset *regset,
1188 unsigned int pos, unsigned int count,
1189 const void *kbuf, const void __user *ubuf)
1190{
5ef2d523 1191 struct gs_cb gs_cb = { }, *data = NULL;
f5bbd721 1192 int rc;
916cda1a 1193
e525f8a6
MS
1194 if (!MACHINE_HAS_GS)
1195 return -ENODEV;
5ef2d523 1196 if (!target->thread.gs_cb) {
e525f8a6
MS
1197 data = kzalloc(sizeof(*data), GFP_KERNEL);
1198 if (!data)
1199 return -ENOMEM;
e525f8a6 1200 }
5ef2d523
HC
1201 if (!target->thread.gs_cb)
1202 gs_cb.gsd = 25;
1203 else if (target == current)
1204 save_gs_cb(&gs_cb);
1205 else
1206 gs_cb = *target->thread.gs_cb;
f5bbd721 1207 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
5ef2d523
HC
1208 &gs_cb, 0, sizeof(gs_cb));
1209 if (rc) {
1210 kfree(data);
1211 return -EFAULT;
1212 }
1213 preempt_disable();
1214 if (!target->thread.gs_cb)
1215 target->thread.gs_cb = data;
1216 *target->thread.gs_cb = gs_cb;
1217 if (target == current) {
1218 __ctl_set_bit(2, 4);
1219 restore_gs_cb(target->thread.gs_cb);
1220 }
1221 preempt_enable();
f5bbd721 1222 return rc;
e525f8a6
MS
1223}
1224
1225static int s390_gs_bc_get(struct task_struct *target,
1226 const struct user_regset *regset,
b69c6320 1227 struct membuf to)
e525f8a6
MS
1228{
1229 struct gs_cb *data = target->thread.gs_bc_cb;
1230
916cda1a
MS
1231 if (!MACHINE_HAS_GS)
1232 return -ENODEV;
1233 if (!data)
1234 return -ENODATA;
b69c6320 1235 return membuf_write(&to, data, sizeof(struct gs_cb));
e525f8a6
MS
1236}
1237
1238static int s390_gs_bc_set(struct task_struct *target,
1239 const struct user_regset *regset,
1240 unsigned int pos, unsigned int count,
1241 const void *kbuf, const void __user *ubuf)
1242{
1243 struct gs_cb *data = target->thread.gs_bc_cb;
1244
1245 if (!MACHINE_HAS_GS)
1246 return -ENODEV;
1247 if (!data) {
1248 data = kzalloc(sizeof(*data), GFP_KERNEL);
1249 if (!data)
1250 return -ENOMEM;
1251 target->thread.gs_bc_cb = data;
1252 }
916cda1a
MS
1253 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1254 data, 0, sizeof(struct gs_cb));
1255}
1256
262832bc
AF
1257static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1258{
1259 return (cb->rca & 0x1f) == 0 &&
1260 (cb->roa & 0xfff) == 0 &&
1261 (cb->rla & 0xfff) == 0xfff &&
1262 cb->s == 1 &&
1263 cb->k == 1 &&
1264 cb->h == 0 &&
1265 cb->reserved1 == 0 &&
1266 cb->ps == 1 &&
1267 cb->qs == 0 &&
1268 cb->pc == 1 &&
1269 cb->qc == 0 &&
1270 cb->reserved2 == 0 &&
262832bc
AF
1271 cb->reserved3 == 0 &&
1272 cb->reserved4 == 0 &&
1273 cb->reserved5 == 0 &&
1274 cb->reserved6 == 0 &&
1275 cb->reserved7 == 0 &&
1276 cb->reserved8 == 0 &&
1277 cb->rla >= cb->roa &&
1278 cb->rca >= cb->roa &&
1279 cb->rca <= cb->rla+1 &&
1280 cb->m < 3;
1281}
1282
1283static int s390_runtime_instr_get(struct task_struct *target,
1284 const struct user_regset *regset,
b69c6320 1285 struct membuf to)
262832bc
AF
1286{
1287 struct runtime_instr_cb *data = target->thread.ri_cb;
1288
1289 if (!test_facility(64))
1290 return -ENODEV;
1291 if (!data)
1292 return -ENODATA;
1293
b69c6320 1294 return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
262832bc
AF
1295}
1296
1297static int s390_runtime_instr_set(struct task_struct *target,
1298 const struct user_regset *regset,
1299 unsigned int pos, unsigned int count,
1300 const void *kbuf, const void __user *ubuf)
1301{
1302 struct runtime_instr_cb ri_cb = { }, *data = NULL;
1303 int rc;
1304
1305 if (!test_facility(64))
1306 return -ENODEV;
1307
1308 if (!target->thread.ri_cb) {
1309 data = kzalloc(sizeof(*data), GFP_KERNEL);
1310 if (!data)
1311 return -ENOMEM;
1312 }
1313
1314 if (target->thread.ri_cb) {
1315 if (target == current)
1316 store_runtime_instr_cb(&ri_cb);
1317 else
1318 ri_cb = *target->thread.ri_cb;
1319 }
1320
1321 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322 &ri_cb, 0, sizeof(struct runtime_instr_cb));
1323 if (rc) {
1324 kfree(data);
1325 return -EFAULT;
1326 }
1327
1328 if (!is_ri_cb_valid(&ri_cb)) {
1329 kfree(data);
1330 return -EINVAL;
1331 }
fd78c594
HC
1332 /*
1333 * Override access key in any case, since user space should
1334 * not be able to set it, nor should it care about it.
1335 */
1336 ri_cb.key = PAGE_DEFAULT_KEY >> 4;
262832bc
AF
1337 preempt_disable();
1338 if (!target->thread.ri_cb)
1339 target->thread.ri_cb = data;
1340 *target->thread.ri_cb = ri_cb;
1341 if (target == current)
1342 load_runtime_instr_cb(target->thread.ri_cb);
1343 preempt_enable();
1344
1345 return 0;
1346}
1347
63506c41 1348static const struct user_regset s390_regsets[] = {
80703617 1349 {
63506c41
MS
1350 .core_note_type = NT_PRSTATUS,
1351 .n = sizeof(s390_regs) / sizeof(long),
1352 .size = sizeof(long),
1353 .align = sizeof(long),
b69c6320 1354 .regset_get = s390_regs_get,
63506c41
MS
1355 .set = s390_regs_set,
1356 },
80703617 1357 {
63506c41
MS
1358 .core_note_type = NT_PRFPREG,
1359 .n = sizeof(s390_fp_regs) / sizeof(long),
1360 .size = sizeof(long),
1361 .align = sizeof(long),
b69c6320 1362 .regset_get = s390_fpregs_get,
63506c41
MS
1363 .set = s390_fpregs_set,
1364 },
80703617
MS
1365 {
1366 .core_note_type = NT_S390_SYSTEM_CALL,
1367 .n = 1,
1368 .size = sizeof(unsigned int),
1369 .align = sizeof(unsigned int),
b69c6320 1370 .regset_get = s390_system_call_get,
80703617
MS
1371 .set = s390_system_call_set,
1372 },
80703617 1373 {
86f2552b
MS
1374 .core_note_type = NT_S390_LAST_BREAK,
1375 .n = 1,
1376 .size = sizeof(long),
1377 .align = sizeof(long),
b69c6320 1378 .regset_get = s390_last_break_get,
b934069c 1379 .set = s390_last_break_set,
86f2552b 1380 },
80703617 1381 {
d35339a4
MS
1382 .core_note_type = NT_S390_TDB,
1383 .n = 1,
1384 .size = 256,
1385 .align = 1,
b69c6320 1386 .regset_get = s390_tdb_get,
d35339a4
MS
1387 .set = s390_tdb_set,
1388 },
80703617
MS
1389 {
1390 .core_note_type = NT_S390_VXRS_LOW,
1391 .n = __NUM_VXRS_LOW,
1392 .size = sizeof(__u64),
1393 .align = sizeof(__u64),
b69c6320 1394 .regset_get = s390_vxrs_low_get,
80703617 1395 .set = s390_vxrs_low_set,
20b40a79 1396 },
80703617
MS
1397 {
1398 .core_note_type = NT_S390_VXRS_HIGH,
1399 .n = __NUM_VXRS_HIGH,
1400 .size = sizeof(__vector128),
1401 .align = sizeof(__vector128),
b69c6320 1402 .regset_get = s390_vxrs_high_get,
80703617 1403 .set = s390_vxrs_high_set,
20b40a79 1404 },
916cda1a
MS
1405 {
1406 .core_note_type = NT_S390_GS_CB,
1407 .n = sizeof(struct gs_cb) / sizeof(__u64),
1408 .size = sizeof(__u64),
1409 .align = sizeof(__u64),
b69c6320 1410 .regset_get = s390_gs_cb_get,
916cda1a
MS
1411 .set = s390_gs_cb_set,
1412 },
e525f8a6
MS
1413 {
1414 .core_note_type = NT_S390_GS_BC,
1415 .n = sizeof(struct gs_cb) / sizeof(__u64),
1416 .size = sizeof(__u64),
1417 .align = sizeof(__u64),
b69c6320 1418 .regset_get = s390_gs_bc_get,
e525f8a6
MS
1419 .set = s390_gs_bc_set,
1420 },
262832bc
AF
1421 {
1422 .core_note_type = NT_S390_RI_CB,
1423 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1424 .size = sizeof(__u64),
1425 .align = sizeof(__u64),
b69c6320 1426 .regset_get = s390_runtime_instr_get,
262832bc
AF
1427 .set = s390_runtime_instr_set,
1428 },
63506c41
MS
1429};
1430
1431static const struct user_regset_view user_s390_view = {
0ba57780 1432 .name = "s390x",
63506c41
MS
1433 .e_machine = EM_S390,
1434 .regsets = s390_regsets,
1435 .n = ARRAY_SIZE(s390_regsets)
1436};
1437
1438#ifdef CONFIG_COMPAT
1439static int s390_compat_regs_get(struct task_struct *target,
1440 const struct user_regset *regset,
b69c6320 1441 struct membuf to)
63506c41 1442{
b69c6320
AV
1443 unsigned n;
1444
63506c41
MS
1445 if (target == current)
1446 save_access_regs(target->thread.acrs);
1447
b69c6320
AV
1448 for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
1449 membuf_store(&to, __peek_user_compat(target, n));
63506c41
MS
1450 return 0;
1451}
1452
1453static int s390_compat_regs_set(struct task_struct *target,
1454 const struct user_regset *regset,
1455 unsigned int pos, unsigned int count,
1456 const void *kbuf, const void __user *ubuf)
1457{
1458 int rc = 0;
1459
1460 if (target == current)
1461 save_access_regs(target->thread.acrs);
1462
1463 if (kbuf) {
1464 const compat_ulong_t *k = kbuf;
1465 while (count > 0 && !rc) {
1466 rc = __poke_user_compat(target, pos, *k++);
1467 count -= sizeof(*k);
1468 pos += sizeof(*k);
1469 }
1470 } else {
1471 const compat_ulong_t __user *u = ubuf;
1472 while (count > 0 && !rc) {
1473 compat_ulong_t word;
1474 rc = __get_user(word, u++);
1475 if (rc)
1476 break;
1477 rc = __poke_user_compat(target, pos, word);
1478 count -= sizeof(*u);
1479 pos += sizeof(*u);
1480 }
1481 }
1482
1483 if (rc == 0 && target == current)
1484 restore_access_regs(target->thread.acrs);
1485
1486 return rc;
1487}
1488
ea2a4d3a
HC
1489static int s390_compat_regs_high_get(struct task_struct *target,
1490 const struct user_regset *regset,
b69c6320 1491 struct membuf to)
ea2a4d3a
HC
1492{
1493 compat_ulong_t *gprs_high;
b69c6320 1494 int i;
ea2a4d3a 1495
b69c6320
AV
1496 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
1497 for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
1498 membuf_store(&to, *gprs_high);
ea2a4d3a
HC
1499 return 0;
1500}
1501
1502static int s390_compat_regs_high_set(struct task_struct *target,
1503 const struct user_regset *regset,
1504 unsigned int pos, unsigned int count,
1505 const void *kbuf, const void __user *ubuf)
1506{
1507 compat_ulong_t *gprs_high;
1508 int rc = 0;
1509
1510 gprs_high = (compat_ulong_t *)
1511 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1512 if (kbuf) {
1513 const compat_ulong_t *k = kbuf;
1514 while (count > 0) {
1515 *gprs_high = *k++;
1516 *gprs_high += 2;
1517 count -= sizeof(*k);
1518 }
1519 } else {
1520 const compat_ulong_t __user *u = ubuf;
1521 while (count > 0 && !rc) {
1522 unsigned long word;
1523 rc = __get_user(word, u++);
1524 if (rc)
1525 break;
1526 *gprs_high = word;
1527 *gprs_high += 2;
1528 count -= sizeof(*u);
1529 }
1530 }
1531
1532 return rc;
1533}
1534
86f2552b
MS
1535static int s390_compat_last_break_get(struct task_struct *target,
1536 const struct user_regset *regset,
b69c6320 1537 struct membuf to)
86f2552b 1538{
b69c6320 1539 compat_ulong_t last_break = target->thread.last_break;
86f2552b 1540
b69c6320 1541 return membuf_store(&to, (unsigned long)last_break);
86f2552b
MS
1542}
1543
b934069c
MS
1544static int s390_compat_last_break_set(struct task_struct *target,
1545 const struct user_regset *regset,
1546 unsigned int pos, unsigned int count,
1547 const void *kbuf, const void __user *ubuf)
1548{
1549 return 0;
1550}
1551
63506c41 1552static const struct user_regset s390_compat_regsets[] = {
80703617 1553 {
63506c41
MS
1554 .core_note_type = NT_PRSTATUS,
1555 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1556 .size = sizeof(compat_long_t),
1557 .align = sizeof(compat_long_t),
b69c6320 1558 .regset_get = s390_compat_regs_get,
63506c41
MS
1559 .set = s390_compat_regs_set,
1560 },
80703617 1561 {
63506c41
MS
1562 .core_note_type = NT_PRFPREG,
1563 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1564 .size = sizeof(compat_long_t),
1565 .align = sizeof(compat_long_t),
b69c6320 1566 .regset_get = s390_fpregs_get,
63506c41
MS
1567 .set = s390_fpregs_set,
1568 },
80703617
MS
1569 {
1570 .core_note_type = NT_S390_SYSTEM_CALL,
1571 .n = 1,
1572 .size = sizeof(compat_uint_t),
1573 .align = sizeof(compat_uint_t),
b69c6320 1574 .regset_get = s390_system_call_get,
80703617
MS
1575 .set = s390_system_call_set,
1576 },
1577 {
86f2552b
MS
1578 .core_note_type = NT_S390_LAST_BREAK,
1579 .n = 1,
1580 .size = sizeof(long),
1581 .align = sizeof(long),
b69c6320 1582 .regset_get = s390_compat_last_break_get,
b934069c 1583 .set = s390_compat_last_break_set,
86f2552b 1584 },
80703617 1585 {
d35339a4
MS
1586 .core_note_type = NT_S390_TDB,
1587 .n = 1,
1588 .size = 256,
1589 .align = 1,
b69c6320 1590 .regset_get = s390_tdb_get,
d35339a4
MS
1591 .set = s390_tdb_set,
1592 },
80703617
MS
1593 {
1594 .core_note_type = NT_S390_VXRS_LOW,
1595 .n = __NUM_VXRS_LOW,
1596 .size = sizeof(__u64),
1597 .align = sizeof(__u64),
b69c6320 1598 .regset_get = s390_vxrs_low_get,
80703617
MS
1599 .set = s390_vxrs_low_set,
1600 },
1601 {
1602 .core_note_type = NT_S390_VXRS_HIGH,
1603 .n = __NUM_VXRS_HIGH,
1604 .size = sizeof(__vector128),
1605 .align = sizeof(__vector128),
b69c6320 1606 .regset_get = s390_vxrs_high_get,
80703617 1607 .set = s390_vxrs_high_set,
20b40a79 1608 },
80703617 1609 {
622e99bf 1610 .core_note_type = NT_S390_HIGH_GPRS,
ea2a4d3a
HC
1611 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1612 .size = sizeof(compat_long_t),
1613 .align = sizeof(compat_long_t),
b69c6320 1614 .regset_get = s390_compat_regs_high_get,
ea2a4d3a
HC
1615 .set = s390_compat_regs_high_set,
1616 },
916cda1a
MS
1617 {
1618 .core_note_type = NT_S390_GS_CB,
1619 .n = sizeof(struct gs_cb) / sizeof(__u64),
1620 .size = sizeof(__u64),
1621 .align = sizeof(__u64),
b69c6320 1622 .regset_get = s390_gs_cb_get,
916cda1a
MS
1623 .set = s390_gs_cb_set,
1624 },
9d0ca444
MS
1625 {
1626 .core_note_type = NT_S390_GS_BC,
1627 .n = sizeof(struct gs_cb) / sizeof(__u64),
1628 .size = sizeof(__u64),
1629 .align = sizeof(__u64),
b69c6320 1630 .regset_get = s390_gs_bc_get,
9d0ca444
MS
1631 .set = s390_gs_bc_set,
1632 },
262832bc
AF
1633 {
1634 .core_note_type = NT_S390_RI_CB,
1635 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1636 .size = sizeof(__u64),
1637 .align = sizeof(__u64),
b69c6320 1638 .regset_get = s390_runtime_instr_get,
262832bc
AF
1639 .set = s390_runtime_instr_set,
1640 },
63506c41
MS
1641};
1642
1643static const struct user_regset_view user_s390_compat_view = {
1644 .name = "s390",
1645 .e_machine = EM_S390,
1646 .regsets = s390_compat_regsets,
1647 .n = ARRAY_SIZE(s390_compat_regsets)
1648};
1649#endif
1650
1651const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1652{
1653#ifdef CONFIG_COMPAT
1654 if (test_tsk_thread_flag(task, TIF_31BIT))
1655 return &user_s390_compat_view;
1656#endif
1657 return &user_s390_view;
1658}
952974ac
HC
1659
1660static const char *gpr_names[NUM_GPRS] = {
1661 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1662 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1663};
1664
1665unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1666{
1667 if (offset >= NUM_GPRS)
1668 return 0;
1669 return regs->gprs[offset];
1670}
1671
1672int regs_query_register_offset(const char *name)
1673{
1674 unsigned long offset;
1675
1676 if (!name || *name != 'r')
1677 return -EINVAL;
958d9072 1678 if (kstrtoul(name + 1, 10, &offset))
952974ac
HC
1679 return -EINVAL;
1680 if (offset >= NUM_GPRS)
1681 return -EINVAL;
1682 return offset;
1683}
1684
1685const char *regs_query_register_name(unsigned int offset)
1686{
1687 if (offset >= NUM_GPRS)
1688 return NULL;
1689 return gpr_names[offset];
1690}
1691
1692static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1693{
1694 unsigned long ksp = kernel_stack_pointer(regs);
1695
1696 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1697}
1698
1699/**
1700 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1701 * @regs:pt_regs which contains kernel stack pointer.
1702 * @n:stack entry number.
1703 *
1704 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1705 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1706 * this returns 0.
1707 */
1708unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1709{
1710 unsigned long addr;
1711
1712 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1713 if (!regs_within_kernel_stack(regs, addr))
1714 return 0;
1715 return *(unsigned long *)addr;
1716}