]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/ia64/kernel/ptrace.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[mirror_ubuntu-hirsute-kernel.git] / arch / ia64 / kernel / ptrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * Kernel support for the ptrace() and syscall tracing interfaces.
4 *
5 * Copyright (C) 1999-2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
c70f8f68
SL
7 * Copyright (C) 2006 Intel Co
8 * 2006-08-12 - IA64 Native Utrace implementation support added by
9 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
1da177e4
LT
10 *
11 * Derived from the x86 and Alpha versions.
12 */
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/sched.h>
29930025 15#include <linux/sched/task.h>
68db0cf1 16#include <linux/sched/task_stack.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
1da177e4
LT
20#include <linux/user.h>
21#include <linux/security.h>
22#include <linux/audit.h>
7ed20e1a 23#include <linux/signal.h>
c70f8f68
SL
24#include <linux/regset.h>
25#include <linux/elf.h>
f14488cc 26#include <linux/tracehook.h>
1da177e4 27
1da177e4
LT
28#include <asm/processor.h>
29#include <asm/ptrace_offsets.h>
30#include <asm/rse.h>
7c0f6ba6 31#include <linux/uaccess.h>
1da177e4 32#include <asm/unwind.h>
1da177e4
LT
33
34#include "entry.h"
35
36/*
37 * Bits in the PSR that we allow ptrace() to change:
38 * be, up, ac, mfl, mfh (the user mask; five bits total)
39 * db (debug breakpoint fault; one bit)
40 * id (instruction debug fault disable; one bit)
41 * dd (data debug fault disable; one bit)
42 * ri (restart instruction; two bits)
43 * is (instruction set; one bit)
44 */
45#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
46 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
47
48#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
49#define PFM_MASK MASK(38)
50
51#define PTRACE_DEBUG 0
52
53#if PTRACE_DEBUG
54# define dprintk(format...) printk(format)
55# define inline
56#else
57# define dprintk(format...)
58#endif
59
60/* Return TRUE if PT was created due to kernel-entry via a system-call. */
61
62static inline int
63in_syscall (struct pt_regs *pt)
64{
65 return (long) pt->cr_ifs >= 0;
66}
67
68/*
69 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
70 * bitset where bit i is set iff the NaT bit of register i is set.
71 */
72unsigned long
73ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
74{
75# define GET_BITS(first, last, unat) \
76 ({ \
77 unsigned long bit = ia64_unat_pos(&pt->r##first); \
78 unsigned long nbits = (last - first + 1); \
79 unsigned long mask = MASK(nbits) << first; \
80 unsigned long dist; \
81 if (bit < first) \
82 dist = 64 + bit - first; \
83 else \
84 dist = bit - first; \
85 ia64_rotr(unat, dist) & mask; \
86 })
87 unsigned long val;
88
89 /*
90 * Registers that are stored consecutively in struct pt_regs
91 * can be handled in parallel. If the register order in
92 * struct_pt_regs changes, this code MUST be updated.
93 */
94 val = GET_BITS( 1, 1, scratch_unat);
95 val |= GET_BITS( 2, 3, scratch_unat);
96 val |= GET_BITS(12, 13, scratch_unat);
97 val |= GET_BITS(14, 14, scratch_unat);
98 val |= GET_BITS(15, 15, scratch_unat);
99 val |= GET_BITS( 8, 11, scratch_unat);
100 val |= GET_BITS(16, 31, scratch_unat);
101 return val;
102
103# undef GET_BITS
104}
105
106/*
107 * Set the NaT bits for the scratch registers according to NAT and
108 * return the resulting unat (assuming the scratch registers are
109 * stored in PT).
110 */
111unsigned long
112ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
113{
114# define PUT_BITS(first, last, nat) \
115 ({ \
116 unsigned long bit = ia64_unat_pos(&pt->r##first); \
117 unsigned long nbits = (last - first + 1); \
118 unsigned long mask = MASK(nbits) << first; \
119 long dist; \
120 if (bit < first) \
121 dist = 64 + bit - first; \
122 else \
123 dist = bit - first; \
124 ia64_rotl(nat & mask, dist); \
125 })
126 unsigned long scratch_unat;
127
128 /*
129 * Registers that are stored consecutively in struct pt_regs
130 * can be handled in parallel. If the register order in
131 * struct_pt_regs changes, this code MUST be updated.
132 */
133 scratch_unat = PUT_BITS( 1, 1, nat);
134 scratch_unat |= PUT_BITS( 2, 3, nat);
135 scratch_unat |= PUT_BITS(12, 13, nat);
136 scratch_unat |= PUT_BITS(14, 14, nat);
137 scratch_unat |= PUT_BITS(15, 15, nat);
138 scratch_unat |= PUT_BITS( 8, 11, nat);
139 scratch_unat |= PUT_BITS(16, 31, nat);
140
141 return scratch_unat;
142
143# undef PUT_BITS
144}
145
146#define IA64_MLX_TEMPLATE 0x2
147#define IA64_MOVL_OPCODE 6
148
149void
150ia64_increment_ip (struct pt_regs *regs)
151{
152 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
153
154 if (ri > 2) {
155 ri = 0;
156 regs->cr_iip += 16;
157 } else if (ri == 2) {
158 get_user(w0, (char __user *) regs->cr_iip + 0);
159 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
160 /*
161 * rfi'ing to slot 2 of an MLX bundle causes
162 * an illegal operation fault. We don't want
163 * that to happen...
164 */
165 ri = 0;
166 regs->cr_iip += 16;
167 }
168 }
169 ia64_psr(regs)->ri = ri;
170}
171
172void
173ia64_decrement_ip (struct pt_regs *regs)
174{
175 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
176
177 if (ia64_psr(regs)->ri == 0) {
178 regs->cr_iip -= 16;
179 ri = 2;
180 get_user(w0, (char __user *) regs->cr_iip + 0);
181 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
182 /*
183 * rfi'ing to slot 2 of an MLX bundle causes
184 * an illegal operation fault. We don't want
185 * that to happen...
186 */
187 ri = 1;
188 }
189 }
190 ia64_psr(regs)->ri = ri;
191}
192
193/*
194 * This routine is used to read an rnat bits that are stored on the
195 * kernel backing store. Since, in general, the alignment of the user
196 * and kernel are different, this is not completely trivial. In
197 * essence, we need to construct the user RNAT based on up to two
198 * kernel RNAT values and/or the RNAT value saved in the child's
199 * pt_regs.
200 *
201 * user rbs
202 *
203 * +--------+ <-- lowest address
204 * | slot62 |
205 * +--------+
206 * | rnat | 0x....1f8
207 * +--------+
208 * | slot00 | \
209 * +--------+ |
210 * | slot01 | > child_regs->ar_rnat
211 * +--------+ |
212 * | slot02 | / kernel rbs
213 * +--------+ +--------+
214 * <- child_regs->ar_bspstore | slot61 | <-- krbs
215 * +- - - - + +--------+
216 * | slot62 |
217 * +- - - - + +--------+
218 * | rnat |
219 * +- - - - + +--------+
220 * vrnat | slot00 |
221 * +- - - - + +--------+
222 * = =
223 * +--------+
224 * | slot00 | \
225 * +--------+ |
226 * | slot01 | > child_stack->ar_rnat
227 * +--------+ |
228 * | slot02 | /
229 * +--------+
230 * <--- child_stack->ar_bspstore
231 *
232 * The way to think of this code is as follows: bit 0 in the user rnat
233 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
234 * value. The kernel rnat value holding this bit is stored in
235 * variable rnat0. rnat1 is loaded with the kernel rnat value that
236 * form the upper bits of the user rnat value.
237 *
238 * Boundary cases:
239 *
240 * o when reading the rnat "below" the first rnat slot on the kernel
241 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
242 * merged in from pt->ar_rnat.
243 *
244 * o when reading the rnat "above" the last rnat slot on the kernel
245 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
246 */
247static unsigned long
248get_rnat (struct task_struct *task, struct switch_stack *sw,
249 unsigned long *krbs, unsigned long *urnat_addr,
250 unsigned long *urbs_end)
251{
252 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
253 unsigned long umask = 0, mask, m;
254 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
255 long num_regs, nbits;
256 struct pt_regs *pt;
257
6450578f 258 pt = task_pt_regs(task);
1da177e4
LT
259 kbsp = (unsigned long *) sw->ar_bspstore;
260 ubspstore = (unsigned long *) pt->ar_bspstore;
261
262 if (urbs_end < urnat_addr)
263 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
264 else
265 nbits = 63;
266 mask = MASK(nbits);
267 /*
268 * First, figure out which bit number slot 0 in user-land maps
269 * to in the kernel rnat. Do this by figuring out how many
270 * register slots we're beyond the user's backingstore and
271 * then computing the equivalent address in kernel space.
272 */
273 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
274 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
275 shift = ia64_rse_slot_num(slot0_kaddr);
276 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
277 rnat0_kaddr = rnat1_kaddr - 64;
278
279 if (ubspstore + 63 > urnat_addr) {
280 /* some bits need to be merged in from pt->ar_rnat */
281 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
282 urnat = (pt->ar_rnat & umask);
283 mask &= ~umask;
284 if (!mask)
285 return urnat;
286 }
287
288 m = mask << shift;
289 if (rnat0_kaddr >= kbsp)
290 rnat0 = sw->ar_rnat;
291 else if (rnat0_kaddr > krbs)
292 rnat0 = *rnat0_kaddr;
293 urnat |= (rnat0 & m) >> shift;
294
295 m = mask >> (63 - shift);
296 if (rnat1_kaddr >= kbsp)
297 rnat1 = sw->ar_rnat;
298 else if (rnat1_kaddr > krbs)
299 rnat1 = *rnat1_kaddr;
300 urnat |= (rnat1 & m) << (63 - shift);
301 return urnat;
302}
303
304/*
305 * The reverse of get_rnat.
306 */
307static void
308put_rnat (struct task_struct *task, struct switch_stack *sw,
309 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
310 unsigned long *urbs_end)
311{
312 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
313 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
314 long num_regs, nbits;
315 struct pt_regs *pt;
316 unsigned long cfm, *urbs_kargs;
317
6450578f 318 pt = task_pt_regs(task);
1da177e4
LT
319 kbsp = (unsigned long *) sw->ar_bspstore;
320 ubspstore = (unsigned long *) pt->ar_bspstore;
321
322 urbs_kargs = urbs_end;
323 if (in_syscall(pt)) {
324 /*
325 * If entered via syscall, don't allow user to set rnat bits
326 * for syscall args.
327 */
328 cfm = pt->cr_ifs;
329 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
330 }
331
332 if (urbs_kargs >= urnat_addr)
333 nbits = 63;
334 else {
335 if ((urnat_addr - 63) >= urbs_kargs)
336 return;
337 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
338 }
339 mask = MASK(nbits);
340
341 /*
342 * First, figure out which bit number slot 0 in user-land maps
343 * to in the kernel rnat. Do this by figuring out how many
344 * register slots we're beyond the user's backingstore and
345 * then computing the equivalent address in kernel space.
346 */
347 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
348 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
349 shift = ia64_rse_slot_num(slot0_kaddr);
350 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
351 rnat0_kaddr = rnat1_kaddr - 64;
352
353 if (ubspstore + 63 > urnat_addr) {
354 /* some bits need to be place in pt->ar_rnat: */
355 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
356 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
357 mask &= ~umask;
358 if (!mask)
359 return;
360 }
361 /*
362 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
363 * rnat slot is ignored. so we don't have to clear it here.
364 */
365 rnat0 = (urnat << shift);
366 m = mask << shift;
367 if (rnat0_kaddr >= kbsp)
368 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
369 else if (rnat0_kaddr > krbs)
370 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
371
372 rnat1 = (urnat >> (63 - shift));
373 m = mask >> (63 - shift);
374 if (rnat1_kaddr >= kbsp)
375 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
376 else if (rnat1_kaddr > krbs)
377 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
378}
379
380static inline int
381on_kernel_rbs (unsigned long addr, unsigned long bspstore,
382 unsigned long urbs_end)
383{
384 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
385 urbs_end);
386 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
387}
388
389/*
390 * Read a word from the user-level backing store of task CHILD. ADDR
391 * is the user-level address to read the word from, VAL a pointer to
392 * the return value, and USER_BSP gives the end of the user-level
393 * backing store (i.e., it's the address that would be in ar.bsp after
394 * the user executed a "cover" instruction).
395 *
396 * This routine takes care of accessing the kernel register backing
397 * store for those registers that got spilled there. It also takes
398 * care of calculating the appropriate RNaT collection words.
399 */
400long
401ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
402 unsigned long user_rbs_end, unsigned long addr, long *val)
403{
404 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
405 struct pt_regs *child_regs;
406 size_t copied;
407 long ret;
408
409 urbs_end = (long *) user_rbs_end;
410 laddr = (unsigned long *) addr;
6450578f 411 child_regs = task_pt_regs(child);
1da177e4
LT
412 bspstore = (unsigned long *) child_regs->ar_bspstore;
413 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
414 if (on_kernel_rbs(addr, (unsigned long) bspstore,
415 (unsigned long) urbs_end))
416 {
417 /*
418 * Attempt to read the RBS in an area that's actually
419 * on the kernel RBS => read the corresponding bits in
420 * the kernel RBS.
421 */
422 rnat_addr = ia64_rse_rnat_addr(laddr);
423 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
424
425 if (laddr == rnat_addr) {
426 /* return NaT collection word itself */
427 *val = ret;
428 return 0;
429 }
430
431 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
432 /*
433 * It is implementation dependent whether the
434 * data portion of a NaT value gets saved on a
435 * st8.spill or RSE spill (e.g., see EAS 2.6,
436 * 4.4.4.6 Register Spill and Fill). To get
437 * consistent behavior across all possible
438 * IA-64 implementations, we return zero in
439 * this case.
440 */
441 *val = 0;
442 return 0;
443 }
444
445 if (laddr < urbs_end) {
446 /*
447 * The desired word is on the kernel RBS and
448 * is not a NaT.
449 */
450 regnum = ia64_rse_num_regs(bspstore, laddr);
451 *val = *ia64_rse_skip_regs(krbs, regnum);
452 return 0;
453 }
454 }
f307ab6d 455 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
1da177e4
LT
456 if (copied != sizeof(ret))
457 return -EIO;
458 *val = ret;
459 return 0;
460}
461
462long
463ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
464 unsigned long user_rbs_end, unsigned long addr, long val)
465{
466 unsigned long *bspstore, *krbs, regnum, *laddr;
467 unsigned long *urbs_end = (long *) user_rbs_end;
468 struct pt_regs *child_regs;
469
470 laddr = (unsigned long *) addr;
6450578f 471 child_regs = task_pt_regs(child);
1da177e4
LT
472 bspstore = (unsigned long *) child_regs->ar_bspstore;
473 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
474 if (on_kernel_rbs(addr, (unsigned long) bspstore,
475 (unsigned long) urbs_end))
476 {
477 /*
478 * Attempt to write the RBS in an area that's actually
479 * on the kernel RBS => write the corresponding bits
480 * in the kernel RBS.
481 */
482 if (ia64_rse_is_rnat_slot(laddr))
483 put_rnat(child, child_stack, krbs, laddr, val,
484 urbs_end);
485 else {
486 if (laddr < urbs_end) {
487 regnum = ia64_rse_num_regs(bspstore, laddr);
488 *ia64_rse_skip_regs(krbs, regnum) = val;
489 }
490 }
f307ab6d
LS
491 } else if (access_process_vm(child, addr, &val, sizeof(val),
492 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
493 != sizeof(val))
494 return -EIO;
495 return 0;
496}
497
498/*
499 * Calculate the address of the end of the user-level register backing
500 * store. This is the address that would have been stored in ar.bsp
501 * if the user had executed a "cover" instruction right before
502 * entering the kernel. If CFMP is not NULL, it is used to return the
503 * "current frame mask" that was active at the time the kernel was
504 * entered.
505 */
506unsigned long
507ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
508 unsigned long *cfmp)
509{
510 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
511 long ndirty;
512
513 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514 bspstore = (unsigned long *) pt->ar_bspstore;
515 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
516
517 if (in_syscall(pt))
518 ndirty += (cfm & 0x7f);
519 else
520 cfm &= ~(1UL << 63); /* clear valid bit */
521
522 if (cfmp)
523 *cfmp = cfm;
524 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
525}
526
527/*
528 * Synchronize (i.e, write) the RSE backing store living in kernel
529 * space to the VM of the CHILD task. SW and PT are the pointers to
530 * the switch_stack and pt_regs structures, respectively.
531 * USER_RBS_END is the user-level address at which the backing store
532 * ends.
533 */
534long
535ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
536 unsigned long user_rbs_start, unsigned long user_rbs_end)
537{
538 unsigned long addr, val;
539 long ret;
540
541 /* now copy word for word from kernel rbs to user rbs: */
542 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544 if (ret < 0)
545 return ret;
f307ab6d
LS
546 if (access_process_vm(child, addr, &val, sizeof(val),
547 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
548 != sizeof(val))
549 return -EIO;
550 }
551 return 0;
552}
553
3b2ce0b1
PT
554static long
555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
556 unsigned long user_rbs_start, unsigned long user_rbs_end)
557{
558 unsigned long addr, val;
559 long ret;
560
561 /* now copy word for word from user rbs to kernel rbs: */
562 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
f307ab6d
LS
563 if (access_process_vm(child, addr, &val, sizeof(val),
564 FOLL_FORCE)
3b2ce0b1
PT
565 != sizeof(val))
566 return -EIO;
567
568 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
569 if (ret < 0)
570 return ret;
571 }
572 return 0;
573}
574
575typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
576 unsigned long, unsigned long);
577
578static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579{
580 struct pt_regs *pt;
581 unsigned long urbs_end;
582 syncfunc_t fn = arg;
583
584 if (unw_unwind_to_user(info) < 0)
585 return;
586 pt = task_pt_regs(info->task);
587 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
588
589 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
590}
591
592/*
593 * when a thread is stopped (ptraced), debugger might change thread's user
594 * stack (change memory directly), and we must avoid the RSE stored in kernel
595 * to override user stack (user space's RSE is newer than kernel's in the
596 * case). To workaround the issue, we copy kernel RSE to user RSE before the
597 * task is stopped, so user RSE has updated data. we then copy user RSE to
598 * kernel after the task is resummed from traced stop and kernel will use the
599 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600 * synchronize user RSE to kernel.
601 */
602void ia64_ptrace_stop(void)
603{
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return;
f14488cc 606 set_notify_resume(current);
3b2ce0b1
PT
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608}
609
610/*
611 * This is called to read back the register backing store.
612 */
613void ia64_sync_krbs(void)
614{
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
3b2ce0b1
PT
616
617 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
618}
619
aa91a2e9
PT
620/*
621 * After PTRACE_ATTACH, a thread's register backing store area in user
622 * space is assumed to contain correct data whenever the thread is
623 * stopped. arch_ptrace_stop takes care of this on tracing stops.
624 * But if the child was already stopped for job control when we attach
625 * to it, then it might not ever get into ptrace_stop by the time we
626 * want to examine the user memory containing the RBS.
627 */
628void
629ptrace_attach_sync_user_rbs (struct task_struct *child)
630{
631 int stopped = 0;
632 struct unw_frame_info info;
633
634 /*
635 * If the child is in TASK_STOPPED, we need to change that to
636 * TASK_TRACED momentarily while we operate on it. This ensures
637 * that the child won't be woken up and return to user mode while
638 * we are doing the sync. (It can only be woken up for SIGKILL.)
639 */
640
641 read_lock(&tasklist_lock);
ffdf9185 642 if (child->sighand) {
aa91a2e9
PT
643 spin_lock_irq(&child->sighand->siglock);
644 if (child->state == TASK_STOPPED &&
645 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
f14488cc 646 set_notify_resume(child);
aa91a2e9
PT
647
648 child->state = TASK_TRACED;
649 stopped = 1;
650 }
651 spin_unlock_irq(&child->sighand->siglock);
652 }
653 read_unlock(&tasklist_lock);
654
655 if (!stopped)
656 return;
657
658 unw_init_from_blocked_task(&info, child);
659 do_sync_rbs(&info, ia64_sync_user_rbs);
660
661 /*
662 * Now move the child back into TASK_STOPPED if it should be in a
663 * job control stop, so that SIGCONT can be used to wake it up.
664 */
665 read_lock(&tasklist_lock);
ffdf9185 666 if (child->sighand) {
aa91a2e9
PT
667 spin_lock_irq(&child->sighand->siglock);
668 if (child->state == TASK_TRACED &&
669 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
670 child->state = TASK_STOPPED;
671 }
672 spin_unlock_irq(&child->sighand->siglock);
673 }
674 read_unlock(&tasklist_lock);
675}
676
1da177e4
LT
677/*
678 * Write f32-f127 back to task->thread.fph if it has been modified.
679 */
680inline void
681ia64_flush_fph (struct task_struct *task)
682{
6450578f 683 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4 684
05062d96
PC
685 /*
686 * Prevent migrating this task while
687 * we're fiddling with the FPU state
688 */
689 preempt_disable();
1da177e4
LT
690 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
691 psr->mfh = 0;
692 task->thread.flags |= IA64_THREAD_FPH_VALID;
693 ia64_save_fpu(&task->thread.fph[0]);
694 }
05062d96 695 preempt_enable();
1da177e4
LT
696}
697
698/*
699 * Sync the fph state of the task so that it can be manipulated
700 * through thread.fph. If necessary, f32-f127 are written back to
701 * thread.fph or, if the fph state hasn't been used before, thread.fph
702 * is cleared to zeroes. Also, access to f32-f127 is disabled to
703 * ensure that the task picks up the state from thread.fph when it
704 * executes again.
705 */
706void
707ia64_sync_fph (struct task_struct *task)
708{
6450578f 709 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4
LT
710
711 ia64_flush_fph(task);
712 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
713 task->thread.flags |= IA64_THREAD_FPH_VALID;
714 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
715 }
716 ia64_drop_fpu(task);
717 psr->dfh = 1;
718}
719
1da177e4
LT
720/*
721 * Change the machine-state of CHILD such that it will return via the normal
722 * kernel exit-path, rather than the syscall-exit path.
723 */
724static void
725convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
726 unsigned long cfm)
727{
728 struct unw_frame_info info, prev_info;
02a017a9 729 unsigned long ip, sp, pr;
1da177e4
LT
730
731 unw_init_from_blocked_task(&info, child);
732 while (1) {
733 prev_info = info;
734 if (unw_unwind(&info) < 0)
735 return;
02a017a9
DMT
736
737 unw_get_sp(&info, &sp);
738 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
739 < IA64_PT_REGS_SIZE) {
740 dprintk("ptrace.%s: ran off the top of the kernel "
d4ed8084 741 "stack\n", __func__);
02a017a9
DMT
742 return;
743 }
744 if (unw_get_pr (&prev_info, &pr) < 0) {
745 unw_get_rp(&prev_info, &ip);
746 dprintk("ptrace.%s: failed to read "
747 "predicate register (ip=0x%lx)\n",
d4ed8084 748 __func__, ip);
1da177e4 749 return;
02a017a9
DMT
750 }
751 if (unw_is_intr_frame(&info)
752 && (pr & (1UL << PRED_USER_STACK)))
1da177e4
LT
753 break;
754 }
755
7f9eaedf
DMT
756 /*
757 * Note: at the time of this call, the target task is blocked
758 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
759 * (aka, "pLvSys") we redirect execution from
760 * .work_pending_syscall_end to .work_processed_kernel.
761 */
1da177e4 762 unw_get_pr(&prev_info, &pr);
7f9eaedf 763 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
1da177e4
LT
764 pr |= (1UL << PRED_NON_SYSCALL);
765 unw_set_pr(&prev_info, pr);
766
767 pt->cr_ifs = (1UL << 63) | cfm;
7f9eaedf
DMT
768 /*
769 * Clear the memory that is NOT written on syscall-entry to
770 * ensure we do not leak kernel-state to user when execution
771 * resumes.
772 */
773 pt->r2 = 0;
774 pt->r3 = 0;
775 pt->r14 = 0;
776 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
777 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
778 pt->b7 = 0;
779 pt->ar_ccv = 0;
780 pt->ar_csd = 0;
781 pt->ar_ssd = 0;
1da177e4
LT
782}
783
784static int
785access_nat_bits (struct task_struct *child, struct pt_regs *pt,
786 struct unw_frame_info *info,
787 unsigned long *data, int write_access)
788{
789 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
790 char nat = 0;
791
792 if (write_access) {
793 nat_bits = *data;
794 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
795 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
796 dprintk("ptrace: failed to set ar.unat\n");
797 return -1;
798 }
799 for (regnum = 4; regnum <= 7; ++regnum) {
800 unw_get_gr(info, regnum, &dummy, &nat);
801 unw_set_gr(info, regnum, dummy,
802 (nat_bits >> regnum) & 1);
803 }
804 } else {
805 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
806 dprintk("ptrace: failed to read ar.unat\n");
807 return -1;
808 }
809 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
810 for (regnum = 4; regnum <= 7; ++regnum) {
811 unw_get_gr(info, regnum, &dummy, &nat);
812 nat_bits |= (nat != 0) << regnum;
813 }
814 *data = nat_bits;
815 }
816 return 0;
817}
818
819static int
820access_uarea (struct task_struct *child, unsigned long addr,
4cd8dc83 821 unsigned long *data, int write_access);
1da177e4
LT
822
823static long
824ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
825{
826 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
827 struct unw_frame_info info;
828 struct ia64_fpreg fpval;
829 struct switch_stack *sw;
830 struct pt_regs *pt;
831 long ret, retval = 0;
832 char nat = 0;
833 int i;
834
96d4f267 835 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
836 return -EIO;
837
6450578f 838 pt = task_pt_regs(child);
1da177e4
LT
839 sw = (struct switch_stack *) (child->thread.ksp + 16);
840 unw_init_from_blocked_task(&info, child);
841 if (unw_unwind_to_user(&info) < 0) {
842 return -EIO;
843 }
844
845 if (((unsigned long) ppr & 0x7) != 0) {
846 dprintk("ptrace:unaligned register address %p\n", ppr);
847 return -EIO;
848 }
849
850 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
851 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
852 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
853 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
854 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
855 || access_uarea(child, PT_CFM, &cfm, 0)
856 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
857 return -EIO;
858
859 /* control regs */
860
861 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
862 retval |= __put_user(psr, &ppr->cr_ipsr);
863
864 /* app regs */
865
866 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
867 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
868 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
869 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
870 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
871 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
872
873 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
874 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
875 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
876 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
877 retval |= __put_user(cfm, &ppr->cfm);
878
879 /* gr1-gr3 */
880
881 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
882 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
883
884 /* gr4-gr7 */
885
886 for (i = 4; i < 8; i++) {
887 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
888 return -EIO;
889 retval |= __put_user(val, &ppr->gr[i]);
890 }
891
892 /* gr8-gr11 */
893
894 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
895
896 /* gr12-gr15 */
897
898 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
899 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
900 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
901
902 /* gr16-gr31 */
903
904 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
905
906 /* b0 */
907
908 retval |= __put_user(pt->b0, &ppr->br[0]);
909
910 /* b1-b5 */
911
912 for (i = 1; i < 6; i++) {
913 if (unw_access_br(&info, i, &val, 0) < 0)
914 return -EIO;
915 __put_user(val, &ppr->br[i]);
916 }
917
918 /* b6-b7 */
919
920 retval |= __put_user(pt->b6, &ppr->br[6]);
921 retval |= __put_user(pt->b7, &ppr->br[7]);
922
923 /* fr2-fr5 */
924
925 for (i = 2; i < 6; i++) {
926 if (unw_get_fr(&info, i, &fpval) < 0)
927 return -EIO;
928 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
929 }
930
931 /* fr6-fr11 */
932
933 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
934 sizeof(struct ia64_fpreg) * 6);
935
936 /* fp scratch regs(12-15) */
937
938 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
939 sizeof(struct ia64_fpreg) * 4);
940
941 /* fr16-fr31 */
942
943 for (i = 16; i < 32; i++) {
944 if (unw_get_fr(&info, i, &fpval) < 0)
945 return -EIO;
946 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
947 }
948
949 /* fph */
950
951 ia64_flush_fph(child);
952 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
953 sizeof(ppr->fr[32]) * 96);
954
955 /* preds */
956
957 retval |= __put_user(pt->pr, &ppr->pr);
958
959 /* nat bits */
960
961 retval |= __put_user(nat_bits, &ppr->nat);
962
963 ret = retval ? -EIO : 0;
964 return ret;
965}
966
967static long
968ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
969{
4ea78729 970 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1da177e4
LT
971 struct unw_frame_info info;
972 struct switch_stack *sw;
973 struct ia64_fpreg fpval;
974 struct pt_regs *pt;
975 long ret, retval = 0;
976 int i;
977
978 memset(&fpval, 0, sizeof(fpval));
979
96d4f267 980 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
981 return -EIO;
982
6450578f 983 pt = task_pt_regs(child);
1da177e4
LT
984 sw = (struct switch_stack *) (child->thread.ksp + 16);
985 unw_init_from_blocked_task(&info, child);
986 if (unw_unwind_to_user(&info) < 0) {
987 return -EIO;
988 }
989
990 if (((unsigned long) ppr & 0x7) != 0) {
991 dprintk("ptrace:unaligned register address %p\n", ppr);
992 return -EIO;
993 }
994
995 /* control regs */
996
997 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
998 retval |= __get_user(psr, &ppr->cr_ipsr);
999
1000 /* app regs */
1001
1002 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
4ea78729 1003 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1da177e4
LT
1004 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1005 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1006 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1007 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1008
1009 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1010 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1011 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1012 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1013 retval |= __get_user(cfm, &ppr->cfm);
1014
1015 /* gr1-gr3 */
1016
1017 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1018 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1019
1020 /* gr4-gr7 */
1021
1022 for (i = 4; i < 8; i++) {
1023 retval |= __get_user(val, &ppr->gr[i]);
1024 /* NaT bit will be set via PT_NAT_BITS: */
1025 if (unw_set_gr(&info, i, val, 0) < 0)
1026 return -EIO;
1027 }
1028
1029 /* gr8-gr11 */
1030
1031 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1032
1033 /* gr12-gr15 */
1034
1035 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1036 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1037 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1038
1039 /* gr16-gr31 */
1040
1041 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1042
1043 /* b0 */
1044
1045 retval |= __get_user(pt->b0, &ppr->br[0]);
1046
1047 /* b1-b5 */
1048
1049 for (i = 1; i < 6; i++) {
1050 retval |= __get_user(val, &ppr->br[i]);
1051 unw_set_br(&info, i, val);
1052 }
1053
1054 /* b6-b7 */
1055
1056 retval |= __get_user(pt->b6, &ppr->br[6]);
1057 retval |= __get_user(pt->b7, &ppr->br[7]);
1058
1059 /* fr2-fr5 */
1060
1061 for (i = 2; i < 6; i++) {
1062 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1063 if (unw_set_fr(&info, i, fpval) < 0)
1064 return -EIO;
1065 }
1066
1067 /* fr6-fr11 */
1068
1069 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1070 sizeof(ppr->fr[6]) * 6);
1071
1072 /* fp scratch regs(12-15) */
1073
1074 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1075 sizeof(ppr->fr[12]) * 4);
1076
1077 /* fr16-fr31 */
1078
1079 for (i = 16; i < 32; i++) {
1080 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1081 sizeof(fpval));
1082 if (unw_set_fr(&info, i, fpval) < 0)
1083 return -EIO;
1084 }
1085
1086 /* fph */
1087
1088 ia64_sync_fph(child);
1089 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1090 sizeof(ppr->fr[32]) * 96);
1091
1092 /* preds */
1093
1094 retval |= __get_user(pt->pr, &ppr->pr);
1095
1096 /* nat bits */
1097
1098 retval |= __get_user(nat_bits, &ppr->nat);
1099
1100 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
4ea78729 1101 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1da177e4
LT
1102 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1103 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1104 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1105 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1106 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1107 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1108
1109 ret = retval ? -EIO : 0;
1110 return ret;
1111}
1112
8db3f525
PT
1113void
1114user_enable_single_step (struct task_struct *child)
1115{
1116 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1117
1118 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1119 child_psr->ss = 1;
1120}
1121
1122void
1123user_enable_block_step (struct task_struct *child)
1124{
1125 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1126
1127 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1128 child_psr->tb = 1;
1129}
1130
1131void
1132user_disable_single_step (struct task_struct *child)
1133{
1134 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1135
1136 /* make sure the single step/taken-branch trap bits are not set: */
1137 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1138 child_psr->ss = 0;
1139 child_psr->tb = 0;
1140}
1141
1da177e4
LT
1142/*
1143 * Called by kernel/ptrace.c when detaching..
1144 *
1145 * Make sure the single step bit is not set.
1146 */
1147void
1148ptrace_disable (struct task_struct *child)
1149{
aa17f6f9 1150 user_disable_single_step(child);
1da177e4
LT
1151}
1152
eac738e6 1153long
9b05a69e
NK
1154arch_ptrace (struct task_struct *child, long request,
1155 unsigned long addr, unsigned long data)
1da177e4 1156{
1da177e4 1157 switch (request) {
aa17f6f9
PT
1158 case PTRACE_PEEKTEXT:
1159 case PTRACE_PEEKDATA:
1da177e4 1160 /* read word at location addr */
84d77d3f 1161 if (ptrace_access_vm(child, addr, &data, sizeof(data),
f307ab6d 1162 FOLL_FORCE)
aa17f6f9
PT
1163 != sizeof(data))
1164 return -EIO;
1165 /* ensure return value is not mistaken for error code */
972559a0 1166 force_successful_syscall_return();
aa17f6f9 1167 return data;
1da177e4 1168
972559a0
PT
1169 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1170 * by the generic ptrace_request().
1171 */
1da177e4 1172
aa17f6f9 1173 case PTRACE_PEEKUSR:
1da177e4 1174 /* read the word at addr in the USER area */
aa17f6f9
PT
1175 if (access_uarea(child, addr, &data, 0) < 0)
1176 return -EIO;
1177 /* ensure return value is not mistaken for error code */
1da177e4 1178 force_successful_syscall_return();
aa17f6f9 1179 return data;
1da177e4 1180
aa17f6f9 1181 case PTRACE_POKEUSR:
1da177e4 1182 /* write the word at addr in the USER area */
aa17f6f9
PT
1183 if (access_uarea(child, addr, &data, 1) < 0)
1184 return -EIO;
1185 return 0;
1da177e4 1186
aa17f6f9 1187 case PTRACE_OLD_GETSIGINFO:
1da177e4 1188 /* for backwards-compatibility */
aa17f6f9 1189 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1da177e4 1190
aa17f6f9 1191 case PTRACE_OLD_SETSIGINFO:
1da177e4 1192 /* for backwards-compatibility */
aa17f6f9
PT
1193 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1194
1195 case PTRACE_GETREGS:
1196 return ptrace_getregs(child,
1197 (struct pt_all_user_regs __user *) data);
1198
1199 case PTRACE_SETREGS:
1200 return ptrace_setregs(child,
1201 (struct pt_all_user_regs __user *) data);
1202
1203 default:
1204 return ptrace_request(child, request, addr, data);
1da177e4 1205 }
1da177e4
LT
1206}
1207
1208
1da177e4
LT
1209/* "asmlinkage" so the input arguments are preserved... */
1210
f14488cc 1211asmlinkage long
1da177e4
LT
1212syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1213 long arg4, long arg5, long arg6, long arg7,
1214 struct pt_regs regs)
1215{
f14488cc
SL
1216 if (test_thread_flag(TIF_SYSCALL_TRACE))
1217 if (tracehook_report_syscall_entry(&regs))
1218 return -ENOSYS;
1da177e4 1219
3b2ce0b1
PT
1220 /* copy user rbs to kernel rbs */
1221 if (test_thread_flag(TIF_RESTORE_RSE))
1222 ia64_sync_krbs();
1223
2fd6f58b 1224
91397401 1225 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1da177e4 1226
f14488cc 1227 return 0;
1da177e4
LT
1228}
1229
1230/* "asmlinkage" so the input arguments are preserved... */
1231
1232asmlinkage void
1233syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1234 long arg4, long arg5, long arg6, long arg7,
1235 struct pt_regs regs)
1236{
f14488cc
SL
1237 int step;
1238
d7e7528b 1239 audit_syscall_exit(&regs);
1da177e4 1240
f14488cc
SL
1241 step = test_thread_flag(TIF_SINGLESTEP);
1242 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1243 tracehook_report_syscall_exit(&regs, step);
3b2ce0b1
PT
1244
1245 /* copy user rbs to kernel rbs */
1246 if (test_thread_flag(TIF_RESTORE_RSE))
1247 ia64_sync_krbs();
1da177e4 1248}
c70f8f68
SL
1249
1250/* Utrace implementation starts here */
1251struct regset_get {
1252 void *kbuf;
1253 void __user *ubuf;
1254};
1255
1256struct regset_set {
1257 const void *kbuf;
1258 const void __user *ubuf;
1259};
1260
1261struct regset_getset {
1262 struct task_struct *target;
1263 const struct user_regset *regset;
1264 union {
1265 struct regset_get get;
1266 struct regset_set set;
1267 } u;
1268 unsigned int pos;
1269 unsigned int count;
1270 int ret;
1271};
1272
e2115cf3 1273static const ptrdiff_t pt_offsets[32] =
4c35bf3a
AV
1274{
1275#define R(n) offsetof(struct pt_regs, r##n)
1276 [0] = -1, R(1), R(2), R(3),
1277 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
1278 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
e2115cf3
AV
1279 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1280 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
4c35bf3a
AV
1281#undef R
1282};
1283
c70f8f68
SL
1284static int
1285access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1286 unsigned long addr, unsigned long *data, int write_access)
1287{
4c35bf3a
AV
1288 struct pt_regs *pt = task_pt_regs(target);
1289 unsigned reg = addr / sizeof(unsigned long);
1290 ptrdiff_t d = pt_offsets[reg];
1291
1292 if (d >= 0) {
1293 unsigned long *ptr = (void *)pt + d;
1294 if (write_access)
1295 *ptr = *data;
1296 else
1297 *data = *ptr;
1298 return 0;
1299 } else {
1300 char nat = 0;
c70f8f68
SL
1301 if (write_access) {
1302 /* read NaT bit first: */
1303 unsigned long dummy;
4c35bf3a 1304 int ret = unw_get_gr(info, reg, &dummy, &nat);
c70f8f68
SL
1305 if (ret < 0)
1306 return ret;
1307 }
4c35bf3a 1308 return unw_access_gr(info, reg, data, &nat, write_access);
c70f8f68 1309 }
c70f8f68
SL
1310}
1311
1312static int
1313access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1314 unsigned long addr, unsigned long *data, int write_access)
1315{
1316 struct pt_regs *pt;
1317 unsigned long *ptr = NULL;
1318
1319 pt = task_pt_regs(target);
1320 switch (addr) {
1321 case ELF_BR_OFFSET(0):
1322 ptr = &pt->b0;
1323 break;
1324 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1325 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1326 data, write_access);
1327 case ELF_BR_OFFSET(6):
1328 ptr = &pt->b6;
1329 break;
1330 case ELF_BR_OFFSET(7):
1331 ptr = &pt->b7;
1332 }
1333 if (write_access)
1334 *ptr = *data;
1335 else
1336 *data = *ptr;
1337 return 0;
1338}
1339
1340static int
1341access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1342 unsigned long addr, unsigned long *data, int write_access)
1343{
1344 struct pt_regs *pt;
1345 unsigned long cfm, urbs_end;
1346 unsigned long *ptr = NULL;
1347
1348 pt = task_pt_regs(target);
1349 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1350 switch (addr) {
1351 case ELF_AR_RSC_OFFSET:
1352 /* force PL3 */
1353 if (write_access)
1354 pt->ar_rsc = *data | (3 << 2);
1355 else
1356 *data = pt->ar_rsc;
1357 return 0;
1358 case ELF_AR_BSP_OFFSET:
1359 /*
1360 * By convention, we use PT_AR_BSP to refer to
1361 * the end of the user-level backing store.
1362 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1363 * to get the real value of ar.bsp at the time
1364 * the kernel was entered.
1365 *
1366 * Furthermore, when changing the contents of
1367 * PT_AR_BSP (or PT_CFM) while the task is
1368 * blocked in a system call, convert the state
1369 * so that the non-system-call exit
1370 * path is used. This ensures that the proper
1371 * state will be picked up when resuming
1372 * execution. However, it *also* means that
1373 * once we write PT_AR_BSP/PT_CFM, it won't be
1374 * possible to modify the syscall arguments of
1375 * the pending system call any longer. This
1376 * shouldn't be an issue because modifying
1377 * PT_AR_BSP/PT_CFM generally implies that
1378 * we're either abandoning the pending system
1379 * call or that we defer it's re-execution
1380 * (e.g., due to GDB doing an inferior
1381 * function call).
1382 */
1383 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1384 if (write_access) {
1385 if (*data != urbs_end) {
1386 if (in_syscall(pt))
1387 convert_to_non_syscall(target,
1388 pt,
1389 cfm);
1390 /*
1391 * Simulate user-level write
1392 * of ar.bsp:
1393 */
1394 pt->loadrs = 0;
1395 pt->ar_bspstore = *data;
1396 }
1397 } else
1398 *data = urbs_end;
1399 return 0;
1400 case ELF_AR_BSPSTORE_OFFSET:
1401 ptr = &pt->ar_bspstore;
1402 break;
1403 case ELF_AR_RNAT_OFFSET:
1404 ptr = &pt->ar_rnat;
1405 break;
1406 case ELF_AR_CCV_OFFSET:
1407 ptr = &pt->ar_ccv;
1408 break;
1409 case ELF_AR_UNAT_OFFSET:
1410 ptr = &pt->ar_unat;
1411 break;
1412 case ELF_AR_FPSR_OFFSET:
1413 ptr = &pt->ar_fpsr;
1414 break;
1415 case ELF_AR_PFS_OFFSET:
1416 ptr = &pt->ar_pfs;
1417 break;
1418 case ELF_AR_LC_OFFSET:
1419 return unw_access_ar(info, UNW_AR_LC, data,
1420 write_access);
1421 case ELF_AR_EC_OFFSET:
1422 return unw_access_ar(info, UNW_AR_EC, data,
1423 write_access);
1424 case ELF_AR_CSD_OFFSET:
1425 ptr = &pt->ar_csd;
1426 break;
1427 case ELF_AR_SSD_OFFSET:
1428 ptr = &pt->ar_ssd;
1429 }
1430 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1431 switch (addr) {
1432 case ELF_CR_IIP_OFFSET:
1433 ptr = &pt->cr_iip;
1434 break;
1435 case ELF_CFM_OFFSET:
1436 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1437 if (write_access) {
1438 if (((cfm ^ *data) & PFM_MASK) != 0) {
1439 if (in_syscall(pt))
1440 convert_to_non_syscall(target,
1441 pt,
1442 cfm);
1443 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1444 | (*data & PFM_MASK));
1445 }
1446 } else
1447 *data = cfm;
1448 return 0;
1449 case ELF_CR_IPSR_OFFSET:
1450 if (write_access) {
1451 unsigned long tmp = *data;
1452 /* psr.ri==3 is a reserved value: SDM 2:25 */
1453 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1454 tmp &= ~IA64_PSR_RI;
1455 pt->cr_ipsr = ((tmp & IPSR_MASK)
1456 | (pt->cr_ipsr & ~IPSR_MASK));
1457 } else
1458 *data = (pt->cr_ipsr & IPSR_MASK);
1459 return 0;
1460 }
1461 } else if (addr == ELF_NAT_OFFSET)
1462 return access_nat_bits(target, pt, info,
1463 data, write_access);
1464 else if (addr == ELF_PR_OFFSET)
1465 ptr = &pt->pr;
1466 else
1467 return -1;
1468
1469 if (write_access)
1470 *ptr = *data;
1471 else
1472 *data = *ptr;
1473
1474 return 0;
1475}
1476
1477static int
1478access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1479 unsigned long addr, unsigned long *data, int write_access)
1480{
e2115cf3 1481 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31))
c70f8f68
SL
1482 return access_elf_gpreg(target, info, addr, data, write_access);
1483 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1484 return access_elf_breg(target, info, addr, data, write_access);
1485 else
1486 return access_elf_areg(target, info, addr, data, write_access);
1487}
1488
4ff8a356
AV
1489struct regset_membuf {
1490 struct membuf to;
1491 int ret;
1492};
1493
c70f8f68
SL
1494void do_gpregs_get(struct unw_frame_info *info, void *arg)
1495{
4ff8a356
AV
1496 struct regset_membuf *dst = arg;
1497 struct membuf to = dst->to;
1498 unsigned int n;
1499 elf_greg_t reg;
c70f8f68
SL
1500
1501 if (unw_unwind_to_user(info) < 0)
1502 return;
1503
1504 /*
1505 * coredump format:
1506 * r0-r31
1507 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1508 * predicate registers (p0-p63)
1509 * b0-b7
1510 * ip cfm user-mask
1511 * ar.rsc ar.bsp ar.bspstore ar.rnat
1512 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1513 */
1514
1515
1516 /* Skip r0 */
4ff8a356
AV
1517 membuf_zero(&to, 8);
1518 for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) {
1519 if (access_elf_reg(info->task, info, n, &reg, 0) < 0) {
1520 dst->ret = -EIO;
c70f8f68 1521 return;
a79ca8e7 1522 }
4ff8a356 1523 membuf_store(&to, reg);
c70f8f68 1524 }
c70f8f68
SL
1525}
1526
1527void do_gpregs_set(struct unw_frame_info *info, void *arg)
1528{
c70f8f68 1529 struct regset_getset *dst = arg;
c70f8f68
SL
1530
1531 if (unw_unwind_to_user(info) < 0)
1532 return;
1533
a79ca8e7
AV
1534 if (!dst->count)
1535 return;
c70f8f68 1536 /* Skip r0 */
a79ca8e7 1537 if (dst->pos < ELF_GR_OFFSET(1)) {
c70f8f68
SL
1538 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1539 &dst->u.set.kbuf,
1540 &dst->u.set.ubuf,
1541 0, ELF_GR_OFFSET(1));
c70f8f68
SL
1542 if (dst->ret)
1543 return;
c70f8f68
SL
1544 }
1545
a79ca8e7
AV
1546 while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
1547 unsigned int n, from, to;
1548 elf_greg_t tmp[16];
c70f8f68 1549
a79ca8e7
AV
1550 from = dst->pos;
1551 to = from + sizeof(tmp);
1552 if (to > ELF_AR_END_OFFSET)
1553 to = ELF_AR_END_OFFSET;
1554 /* get up to 16 values */
c70f8f68
SL
1555 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1556 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
a79ca8e7 1557 from, to);
c70f8f68
SL
1558 if (dst->ret)
1559 return;
a79ca8e7
AV
1560 /* now copy them into registers */
1561 for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++)
1562 if (access_elf_reg(dst->target, info, from,
1563 &tmp[n], 1) < 0) {
c70f8f68
SL
1564 dst->ret = -EIO;
1565 return;
1566 }
1567 }
1568}
1569
1570#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1571
1572void do_fpregs_get(struct unw_frame_info *info, void *arg)
1573{
4ff8a356
AV
1574 struct task_struct *task = info->task;
1575 struct regset_membuf *dst = arg;
1576 struct membuf to = dst->to;
1577 elf_fpreg_t reg;
1578 unsigned int n;
c70f8f68
SL
1579
1580 if (unw_unwind_to_user(info) < 0)
1581 return;
1582
1583 /* Skip pos 0 and 1 */
4ff8a356 1584 membuf_zero(&to, 2 * sizeof(elf_fpreg_t));
c70f8f68
SL
1585
1586 /* fr2-fr31 */
4ff8a356
AV
1587 for (n = 2; to.left && n < 32; n++) {
1588 if (unw_get_fr(info, n, &reg)) {
1589 dst->ret = -EIO;
c70f8f68 1590 return;
4ff8a356
AV
1591 }
1592 membuf_write(&to, &reg, sizeof(reg));
c70f8f68
SL
1593 }
1594
1595 /* fph */
4ff8a356
AV
1596 if (!to.left)
1597 return;
1598
1599 ia64_flush_fph(task);
1600 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1601 membuf_write(&to, &task->thread.fph, 96 * sizeof(reg));
1602 else
1603 membuf_zero(&to, 96 * sizeof(reg));
c70f8f68
SL
1604}
1605
1606void do_fpregs_set(struct unw_frame_info *info, void *arg)
1607{
1608 struct regset_getset *dst = arg;
1609 elf_fpreg_t fpreg, tmp[30];
1610 int index, start, end;
1611
1612 if (unw_unwind_to_user(info) < 0)
1613 return;
1614
1615 /* Skip pos 0 and 1 */
1616 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1617 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1618 &dst->u.set.kbuf,
1619 &dst->u.set.ubuf,
1620 0, ELF_FP_OFFSET(2));
1621 if (dst->count == 0 || dst->ret)
1622 return;
1623 }
1624
1625 /* fr2-fr31 */
1626 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1627 start = dst->pos;
1628 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1629 dst->pos + dst->count);
1630 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1631 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1632 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1633 if (dst->ret)
1634 return;
1635
1636 if (start & 0xF) { /* only write high part */
1637 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1638 &fpreg)) {
1639 dst->ret = -EIO;
1640 return;
1641 }
1642 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1643 = fpreg.u.bits[0];
1644 start &= ~0xFUL;
1645 }
1646 if (end & 0xF) { /* only write low part */
1647 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1648 &fpreg)) {
1649 dst->ret = -EIO;
1650 return;
1651 }
1652 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1653 = fpreg.u.bits[1];
1654 end = (end + 0xF) & ~0xFUL;
1655 }
1656
1657 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1658 index = start / sizeof(elf_fpreg_t);
1659 if (unw_set_fr(info, index, tmp[index - 2])) {
1660 dst->ret = -EIO;
1661 return;
1662 }
1663 }
1664 if (dst->ret || dst->count == 0)
1665 return;
1666 }
1667
1668 /* fph */
1669 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1670 ia64_sync_fph(dst->target);
1671 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1672 &dst->u.set.kbuf,
1673 &dst->u.set.ubuf,
1674 &dst->target->thread.fph,
1675 ELF_FP_OFFSET(32), -1);
1676 }
1677}
1678
4ff8a356
AV
1679static void
1680unwind_and_call(void (*call)(struct unw_frame_info *, void *),
1681 struct task_struct *target, void *data)
1682{
1683 if (target == current)
1684 unw_init_running(call, data);
1685 else {
1686 struct unw_frame_info info;
1687 memset(&info, 0, sizeof(info));
1688 unw_init_from_blocked_task(&info, target);
1689 (*call)(&info, data);
1690 }
1691}
1692
c70f8f68
SL
1693static int
1694do_regset_call(void (*call)(struct unw_frame_info *, void *),
1695 struct task_struct *target,
1696 const struct user_regset *regset,
1697 unsigned int pos, unsigned int count,
1698 const void *kbuf, const void __user *ubuf)
1699{
1700 struct regset_getset info = { .target = target, .regset = regset,
1701 .pos = pos, .count = count,
1702 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1703 .ret = 0 };
4ff8a356 1704 unwind_and_call(call, target, &info);
c70f8f68
SL
1705 return info.ret;
1706}
1707
1708static int
1709gpregs_get(struct task_struct *target,
1710 const struct user_regset *regset,
4ff8a356 1711 struct membuf to)
c70f8f68 1712{
4ff8a356
AV
1713 struct regset_membuf info = {.to = to};
1714 unwind_and_call(do_gpregs_get, target, &info);
1715 return info.ret;
c70f8f68
SL
1716}
1717
1718static int gpregs_set(struct task_struct *target,
1719 const struct user_regset *regset,
1720 unsigned int pos, unsigned int count,
1721 const void *kbuf, const void __user *ubuf)
1722{
1723 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1724 kbuf, ubuf);
1725}
1726
1727static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1728{
1729 do_sync_rbs(info, ia64_sync_user_rbs);
1730}
1731
1732/*
1733 * This is called to write back the register backing store.
1734 * ptrace does this before it stops, so that a tracer reading the user
1735 * memory after the thread stops will get the current register data.
1736 */
1737static int
1738gpregs_writeback(struct task_struct *target,
1739 const struct user_regset *regset,
1740 int now)
1741{
1742 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1743 return 0;
f14488cc 1744 set_notify_resume(target);
c70f8f68
SL
1745 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1746 NULL, NULL);
1747}
1748
1749static int
1750fpregs_active(struct task_struct *target, const struct user_regset *regset)
1751{
1752 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1753}
1754
1755static int fpregs_get(struct task_struct *target,
1756 const struct user_regset *regset,
4ff8a356 1757 struct membuf to)
c70f8f68 1758{
4ff8a356
AV
1759 struct regset_membuf info = {.to = to};
1760 unwind_and_call(do_fpregs_get, target, &info);
1761 return info.ret;
c70f8f68
SL
1762}
1763
1764static int fpregs_set(struct task_struct *target,
1765 const struct user_regset *regset,
1766 unsigned int pos, unsigned int count,
1767 const void *kbuf, const void __user *ubuf)
1768{
1769 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1770 kbuf, ubuf);
1771}
1772
4cd8dc83
SL
1773static int
1774access_uarea(struct task_struct *child, unsigned long addr,
1775 unsigned long *data, int write_access)
1776{
1777 unsigned int pos = -1; /* an invalid value */
4cd8dc83
SL
1778 unsigned long *ptr, regnum;
1779
1780 if ((addr & 0x7) != 0) {
1781 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1782 return -1;
1783 }
1784 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1785 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1786 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1787 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1788 dprintk("ptrace: rejecting access to register "
1789 "address 0x%lx\n", addr);
1790 return -1;
1791 }
1792
1793 switch (addr) {
1794 case PT_F32 ... (PT_F127 + 15):
1795 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1796 break;
1797 case PT_F2 ... (PT_F5 + 15):
1798 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1799 break;
1800 case PT_F10 ... (PT_F31 + 15):
1801 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1802 break;
1803 case PT_F6 ... (PT_F9 + 15):
1804 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1805 break;
1806 }
1807
1808 if (pos != -1) {
e3fdfa37
AV
1809 unsigned reg = pos / sizeof(elf_fpreg_t);
1810 int which_half = (pos / sizeof(unsigned long)) & 1;
1811
1812 if (reg < 32) { /* fr2-fr31 */
1813 struct unw_frame_info info;
1814 elf_fpreg_t fpreg;
1815
1816 memset(&info, 0, sizeof(info));
1817 unw_init_from_blocked_task(&info, child);
1818 if (unw_unwind_to_user(&info) < 0)
1819 return 0;
1820
1821 if (unw_get_fr(&info, reg, &fpreg))
1822 return -1;
1823 if (write_access) {
1824 fpreg.u.bits[which_half] = *data;
1825 if (unw_set_fr(&info, reg, fpreg))
1826 return -1;
1827 } else {
1828 *data = fpreg.u.bits[which_half];
1829 }
1830 } else { /* fph */
1831 elf_fpreg_t *p = &child->thread.fph[reg - 32];
1832 unsigned long *bits = &p->u.bits[which_half];
1833
1834 ia64_sync_fph(child);
1835 if (write_access)
1836 *bits = *data;
1837 else if (child->thread.flags & IA64_THREAD_FPH_VALID)
1838 *data = *bits;
1839 else
1840 *data = 0;
1841 }
4cd8dc83
SL
1842 return 0;
1843 }
1844
1845 switch (addr) {
1846 case PT_NAT_BITS:
1847 pos = ELF_NAT_OFFSET;
1848 break;
1849 case PT_R4 ... PT_R7:
1850 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1851 break;
1852 case PT_B1 ... PT_B5:
1853 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1854 break;
1855 case PT_AR_EC:
1856 pos = ELF_AR_EC_OFFSET;
1857 break;
1858 case PT_AR_LC:
1859 pos = ELF_AR_LC_OFFSET;
1860 break;
1861 case PT_CR_IPSR:
1862 pos = ELF_CR_IPSR_OFFSET;
1863 break;
1864 case PT_CR_IIP:
1865 pos = ELF_CR_IIP_OFFSET;
1866 break;
1867 case PT_CFM:
1868 pos = ELF_CFM_OFFSET;
1869 break;
1870 case PT_AR_UNAT:
1871 pos = ELF_AR_UNAT_OFFSET;
1872 break;
1873 case PT_AR_PFS:
1874 pos = ELF_AR_PFS_OFFSET;
1875 break;
1876 case PT_AR_RSC:
1877 pos = ELF_AR_RSC_OFFSET;
1878 break;
1879 case PT_AR_RNAT:
1880 pos = ELF_AR_RNAT_OFFSET;
1881 break;
1882 case PT_AR_BSPSTORE:
1883 pos = ELF_AR_BSPSTORE_OFFSET;
1884 break;
1885 case PT_PR:
1886 pos = ELF_PR_OFFSET;
1887 break;
1888 case PT_B6:
1889 pos = ELF_BR_OFFSET(6);
1890 break;
1891 case PT_AR_BSP:
1892 pos = ELF_AR_BSP_OFFSET;
1893 break;
1894 case PT_R1 ... PT_R3:
1895 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
1896 break;
1897 case PT_R12 ... PT_R15:
1898 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
1899 break;
1900 case PT_R8 ... PT_R11:
1901 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
1902 break;
1903 case PT_R16 ... PT_R31:
1904 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
1905 break;
1906 case PT_AR_CCV:
1907 pos = ELF_AR_CCV_OFFSET;
1908 break;
1909 case PT_AR_FPSR:
1910 pos = ELF_AR_FPSR_OFFSET;
1911 break;
1912 case PT_B0:
1913 pos = ELF_BR_OFFSET(0);
1914 break;
1915 case PT_B7:
1916 pos = ELF_BR_OFFSET(7);
1917 break;
1918 case PT_AR_CSD:
1919 pos = ELF_AR_CSD_OFFSET;
1920 break;
1921 case PT_AR_SSD:
1922 pos = ELF_AR_SSD_OFFSET;
1923 break;
1924 }
1925
1926 if (pos != -1) {
6bc4f16c
AV
1927 struct unw_frame_info info;
1928
1929 memset(&info, 0, sizeof(info));
1930 unw_init_from_blocked_task(&info, child);
1931 if (unw_unwind_to_user(&info) < 0)
1932 return 0;
1933
1934 return access_elf_reg(child, &info, pos, data, write_access);
4cd8dc83
SL
1935 }
1936
1937 /* access debug registers */
1938 if (addr >= PT_IBR) {
1939 regnum = (addr - PT_IBR) >> 3;
1940 ptr = &child->thread.ibr[0];
1941 } else {
1942 regnum = (addr - PT_DBR) >> 3;
1943 ptr = &child->thread.dbr[0];
1944 }
1945
1946 if (regnum >= 8) {
1947 dprintk("ptrace: rejecting access to register "
1948 "address 0x%lx\n", addr);
1949 return -1;
1950 }
4cd8dc83
SL
1951
1952 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
1953 child->thread.flags |= IA64_THREAD_DBG_VALID;
1954 memset(child->thread.dbr, 0,
1955 sizeof(child->thread.dbr));
1956 memset(child->thread.ibr, 0,
1957 sizeof(child->thread.ibr));
1958 }
1959
1960 ptr += regnum;
1961
1962 if ((regnum & 1) && write_access) {
1963 /* don't let the user set kernel-level breakpoints: */
1964 *ptr = *data & ~(7UL << 56);
1965 return 0;
1966 }
1967 if (write_access)
1968 *ptr = *data;
1969 else
1970 *data = *ptr;
1971 return 0;
1972}
1973
c70f8f68
SL
1974static const struct user_regset native_regsets[] = {
1975 {
1976 .core_note_type = NT_PRSTATUS,
1977 .n = ELF_NGREG,
1978 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
4ff8a356 1979 .regset_get = gpregs_get, .set = gpregs_set,
c70f8f68
SL
1980 .writeback = gpregs_writeback
1981 },
1982 {
1983 .core_note_type = NT_PRFPREG,
1984 .n = ELF_NFPREG,
1985 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
4ff8a356 1986 .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active
c70f8f68
SL
1987 },
1988};
1989
1990static const struct user_regset_view user_ia64_view = {
1991 .name = "ia64",
1992 .e_machine = EM_IA_64,
1993 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1994};
1995
1996const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
1997{
1998 return &user_ia64_view;
1999}
cfb361f1
SL
2000
2001struct syscall_get_set_args {
2002 unsigned int i;
2003 unsigned int n;
2004 unsigned long *args;
2005 struct pt_regs *regs;
2006 int rw;
2007};
2008
2009static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2010{
2011 struct syscall_get_set_args *args = data;
2012 struct pt_regs *pt = args->regs;
2013 unsigned long *krbs, cfm, ndirty;
2014 int i, count;
2015
2016 if (unw_unwind_to_user(info) < 0)
2017 return;
2018
2019 cfm = pt->cr_ifs;
2020 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2021 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2022
2023 count = 0;
2024 if (in_syscall(pt))
2025 count = min_t(int, args->n, cfm & 0x7f);
2026
2027 for (i = 0; i < count; i++) {
2028 if (args->rw)
2029 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2030 args->args[i];
2031 else
2032 args->args[i] = *ia64_rse_skip_regs(krbs,
2033 ndirty + i + args->i);
2034 }
2035
2036 if (!args->rw) {
2037 while (i < args->n) {
2038 args->args[i] = 0;
2039 i++;
2040 }
2041 }
2042}
2043
2044void ia64_syscall_get_set_arguments(struct task_struct *task,
32d92586 2045 struct pt_regs *regs, unsigned long *args, int rw)
cfb361f1
SL
2046{
2047 struct syscall_get_set_args data = {
32d92586
SRV
2048 .i = 0,
2049 .n = 6,
cfb361f1
SL
2050 .args = args,
2051 .regs = regs,
2052 .rw = rw,
2053 };
2054
2055 if (task == current)
2056 unw_init_running(syscall_get_set_args_cb, &data);
2057 else {
2058 struct unw_frame_info ufi;
2059 memset(&ufi, 0, sizeof(ufi));
2060 unw_init_from_blocked_task(&ufi, task);
2061 syscall_get_set_args_cb(&ufi, &data);
2062 }
2063}