]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/ia64/kernel/ptrace.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[mirror_ubuntu-hirsute-kernel.git] / arch / ia64 / kernel / ptrace.c
1 /*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 *
10 * Derived from the x86 and Alpha versions.
11 */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/signal.h>
22 #include <linux/regset.h>
23 #include <linux/elf.h>
24 #include <linux/tracehook.h>
25
26 #include <asm/pgtable.h>
27 #include <asm/processor.h>
28 #include <asm/ptrace_offsets.h>
29 #include <asm/rse.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/unwind.h>
33 #ifdef CONFIG_PERFMON
34 #include <asm/perfmon.h>
35 #endif
36
37 #include "entry.h"
38
39 /*
40 * Bits in the PSR that we allow ptrace() to change:
41 * be, up, ac, mfl, mfh (the user mask; five bits total)
42 * db (debug breakpoint fault; one bit)
43 * id (instruction debug fault disable; one bit)
44 * dd (data debug fault disable; one bit)
45 * ri (restart instruction; two bits)
46 * is (instruction set; one bit)
47 */
48 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
50
51 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
52 #define PFM_MASK MASK(38)
53
54 #define PTRACE_DEBUG 0
55
56 #if PTRACE_DEBUG
57 # define dprintk(format...) printk(format)
58 # define inline
59 #else
60 # define dprintk(format...)
61 #endif
62
63 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
64
65 static inline int
66 in_syscall (struct pt_regs *pt)
67 {
68 return (long) pt->cr_ifs >= 0;
69 }
70
71 /*
72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
73 * bitset where bit i is set iff the NaT bit of register i is set.
74 */
75 unsigned long
76 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
77 {
78 # define GET_BITS(first, last, unat) \
79 ({ \
80 unsigned long bit = ia64_unat_pos(&pt->r##first); \
81 unsigned long nbits = (last - first + 1); \
82 unsigned long mask = MASK(nbits) << first; \
83 unsigned long dist; \
84 if (bit < first) \
85 dist = 64 + bit - first; \
86 else \
87 dist = bit - first; \
88 ia64_rotr(unat, dist) & mask; \
89 })
90 unsigned long val;
91
92 /*
93 * Registers that are stored consecutively in struct pt_regs
94 * can be handled in parallel. If the register order in
95 * struct_pt_regs changes, this code MUST be updated.
96 */
97 val = GET_BITS( 1, 1, scratch_unat);
98 val |= GET_BITS( 2, 3, scratch_unat);
99 val |= GET_BITS(12, 13, scratch_unat);
100 val |= GET_BITS(14, 14, scratch_unat);
101 val |= GET_BITS(15, 15, scratch_unat);
102 val |= GET_BITS( 8, 11, scratch_unat);
103 val |= GET_BITS(16, 31, scratch_unat);
104 return val;
105
106 # undef GET_BITS
107 }
108
109 /*
110 * Set the NaT bits for the scratch registers according to NAT and
111 * return the resulting unat (assuming the scratch registers are
112 * stored in PT).
113 */
114 unsigned long
115 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
116 {
117 # define PUT_BITS(first, last, nat) \
118 ({ \
119 unsigned long bit = ia64_unat_pos(&pt->r##first); \
120 unsigned long nbits = (last - first + 1); \
121 unsigned long mask = MASK(nbits) << first; \
122 long dist; \
123 if (bit < first) \
124 dist = 64 + bit - first; \
125 else \
126 dist = bit - first; \
127 ia64_rotl(nat & mask, dist); \
128 })
129 unsigned long scratch_unat;
130
131 /*
132 * Registers that are stored consecutively in struct pt_regs
133 * can be handled in parallel. If the register order in
134 * struct_pt_regs changes, this code MUST be updated.
135 */
136 scratch_unat = PUT_BITS( 1, 1, nat);
137 scratch_unat |= PUT_BITS( 2, 3, nat);
138 scratch_unat |= PUT_BITS(12, 13, nat);
139 scratch_unat |= PUT_BITS(14, 14, nat);
140 scratch_unat |= PUT_BITS(15, 15, nat);
141 scratch_unat |= PUT_BITS( 8, 11, nat);
142 scratch_unat |= PUT_BITS(16, 31, nat);
143
144 return scratch_unat;
145
146 # undef PUT_BITS
147 }
148
149 #define IA64_MLX_TEMPLATE 0x2
150 #define IA64_MOVL_OPCODE 6
151
152 void
153 ia64_increment_ip (struct pt_regs *regs)
154 {
155 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
156
157 if (ri > 2) {
158 ri = 0;
159 regs->cr_iip += 16;
160 } else if (ri == 2) {
161 get_user(w0, (char __user *) regs->cr_iip + 0);
162 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
163 /*
164 * rfi'ing to slot 2 of an MLX bundle causes
165 * an illegal operation fault. We don't want
166 * that to happen...
167 */
168 ri = 0;
169 regs->cr_iip += 16;
170 }
171 }
172 ia64_psr(regs)->ri = ri;
173 }
174
175 void
176 ia64_decrement_ip (struct pt_regs *regs)
177 {
178 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
179
180 if (ia64_psr(regs)->ri == 0) {
181 regs->cr_iip -= 16;
182 ri = 2;
183 get_user(w0, (char __user *) regs->cr_iip + 0);
184 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
185 /*
186 * rfi'ing to slot 2 of an MLX bundle causes
187 * an illegal operation fault. We don't want
188 * that to happen...
189 */
190 ri = 1;
191 }
192 }
193 ia64_psr(regs)->ri = ri;
194 }
195
196 /*
197 * This routine is used to read an rnat bits that are stored on the
198 * kernel backing store. Since, in general, the alignment of the user
199 * and kernel are different, this is not completely trivial. In
200 * essence, we need to construct the user RNAT based on up to two
201 * kernel RNAT values and/or the RNAT value saved in the child's
202 * pt_regs.
203 *
204 * user rbs
205 *
206 * +--------+ <-- lowest address
207 * | slot62 |
208 * +--------+
209 * | rnat | 0x....1f8
210 * +--------+
211 * | slot00 | \
212 * +--------+ |
213 * | slot01 | > child_regs->ar_rnat
214 * +--------+ |
215 * | slot02 | / kernel rbs
216 * +--------+ +--------+
217 * <- child_regs->ar_bspstore | slot61 | <-- krbs
218 * +- - - - + +--------+
219 * | slot62 |
220 * +- - - - + +--------+
221 * | rnat |
222 * +- - - - + +--------+
223 * vrnat | slot00 |
224 * +- - - - + +--------+
225 * = =
226 * +--------+
227 * | slot00 | \
228 * +--------+ |
229 * | slot01 | > child_stack->ar_rnat
230 * +--------+ |
231 * | slot02 | /
232 * +--------+
233 * <--- child_stack->ar_bspstore
234 *
235 * The way to think of this code is as follows: bit 0 in the user rnat
236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
237 * value. The kernel rnat value holding this bit is stored in
238 * variable rnat0. rnat1 is loaded with the kernel rnat value that
239 * form the upper bits of the user rnat value.
240 *
241 * Boundary cases:
242 *
243 * o when reading the rnat "below" the first rnat slot on the kernel
244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
245 * merged in from pt->ar_rnat.
246 *
247 * o when reading the rnat "above" the last rnat slot on the kernel
248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
249 */
250 static unsigned long
251 get_rnat (struct task_struct *task, struct switch_stack *sw,
252 unsigned long *krbs, unsigned long *urnat_addr,
253 unsigned long *urbs_end)
254 {
255 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
256 unsigned long umask = 0, mask, m;
257 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
258 long num_regs, nbits;
259 struct pt_regs *pt;
260
261 pt = task_pt_regs(task);
262 kbsp = (unsigned long *) sw->ar_bspstore;
263 ubspstore = (unsigned long *) pt->ar_bspstore;
264
265 if (urbs_end < urnat_addr)
266 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
267 else
268 nbits = 63;
269 mask = MASK(nbits);
270 /*
271 * First, figure out which bit number slot 0 in user-land maps
272 * to in the kernel rnat. Do this by figuring out how many
273 * register slots we're beyond the user's backingstore and
274 * then computing the equivalent address in kernel space.
275 */
276 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
277 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
278 shift = ia64_rse_slot_num(slot0_kaddr);
279 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
280 rnat0_kaddr = rnat1_kaddr - 64;
281
282 if (ubspstore + 63 > urnat_addr) {
283 /* some bits need to be merged in from pt->ar_rnat */
284 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
285 urnat = (pt->ar_rnat & umask);
286 mask &= ~umask;
287 if (!mask)
288 return urnat;
289 }
290
291 m = mask << shift;
292 if (rnat0_kaddr >= kbsp)
293 rnat0 = sw->ar_rnat;
294 else if (rnat0_kaddr > krbs)
295 rnat0 = *rnat0_kaddr;
296 urnat |= (rnat0 & m) >> shift;
297
298 m = mask >> (63 - shift);
299 if (rnat1_kaddr >= kbsp)
300 rnat1 = sw->ar_rnat;
301 else if (rnat1_kaddr > krbs)
302 rnat1 = *rnat1_kaddr;
303 urnat |= (rnat1 & m) << (63 - shift);
304 return urnat;
305 }
306
307 /*
308 * The reverse of get_rnat.
309 */
310 static void
311 put_rnat (struct task_struct *task, struct switch_stack *sw,
312 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
313 unsigned long *urbs_end)
314 {
315 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
316 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
317 long num_regs, nbits;
318 struct pt_regs *pt;
319 unsigned long cfm, *urbs_kargs;
320
321 pt = task_pt_regs(task);
322 kbsp = (unsigned long *) sw->ar_bspstore;
323 ubspstore = (unsigned long *) pt->ar_bspstore;
324
325 urbs_kargs = urbs_end;
326 if (in_syscall(pt)) {
327 /*
328 * If entered via syscall, don't allow user to set rnat bits
329 * for syscall args.
330 */
331 cfm = pt->cr_ifs;
332 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
333 }
334
335 if (urbs_kargs >= urnat_addr)
336 nbits = 63;
337 else {
338 if ((urnat_addr - 63) >= urbs_kargs)
339 return;
340 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
341 }
342 mask = MASK(nbits);
343
344 /*
345 * First, figure out which bit number slot 0 in user-land maps
346 * to in the kernel rnat. Do this by figuring out how many
347 * register slots we're beyond the user's backingstore and
348 * then computing the equivalent address in kernel space.
349 */
350 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
351 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
352 shift = ia64_rse_slot_num(slot0_kaddr);
353 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
354 rnat0_kaddr = rnat1_kaddr - 64;
355
356 if (ubspstore + 63 > urnat_addr) {
357 /* some bits need to be place in pt->ar_rnat: */
358 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
359 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
360 mask &= ~umask;
361 if (!mask)
362 return;
363 }
364 /*
365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
366 * rnat slot is ignored. so we don't have to clear it here.
367 */
368 rnat0 = (urnat << shift);
369 m = mask << shift;
370 if (rnat0_kaddr >= kbsp)
371 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
372 else if (rnat0_kaddr > krbs)
373 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
374
375 rnat1 = (urnat >> (63 - shift));
376 m = mask >> (63 - shift);
377 if (rnat1_kaddr >= kbsp)
378 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
379 else if (rnat1_kaddr > krbs)
380 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
381 }
382
383 static inline int
384 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
385 unsigned long urbs_end)
386 {
387 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
388 urbs_end);
389 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
390 }
391
392 /*
393 * Read a word from the user-level backing store of task CHILD. ADDR
394 * is the user-level address to read the word from, VAL a pointer to
395 * the return value, and USER_BSP gives the end of the user-level
396 * backing store (i.e., it's the address that would be in ar.bsp after
397 * the user executed a "cover" instruction).
398 *
399 * This routine takes care of accessing the kernel register backing
400 * store for those registers that got spilled there. It also takes
401 * care of calculating the appropriate RNaT collection words.
402 */
403 long
404 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
405 unsigned long user_rbs_end, unsigned long addr, long *val)
406 {
407 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
408 struct pt_regs *child_regs;
409 size_t copied;
410 long ret;
411
412 urbs_end = (long *) user_rbs_end;
413 laddr = (unsigned long *) addr;
414 child_regs = task_pt_regs(child);
415 bspstore = (unsigned long *) child_regs->ar_bspstore;
416 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
417 if (on_kernel_rbs(addr, (unsigned long) bspstore,
418 (unsigned long) urbs_end))
419 {
420 /*
421 * Attempt to read the RBS in an area that's actually
422 * on the kernel RBS => read the corresponding bits in
423 * the kernel RBS.
424 */
425 rnat_addr = ia64_rse_rnat_addr(laddr);
426 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
427
428 if (laddr == rnat_addr) {
429 /* return NaT collection word itself */
430 *val = ret;
431 return 0;
432 }
433
434 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
435 /*
436 * It is implementation dependent whether the
437 * data portion of a NaT value gets saved on a
438 * st8.spill or RSE spill (e.g., see EAS 2.6,
439 * 4.4.4.6 Register Spill and Fill). To get
440 * consistent behavior across all possible
441 * IA-64 implementations, we return zero in
442 * this case.
443 */
444 *val = 0;
445 return 0;
446 }
447
448 if (laddr < urbs_end) {
449 /*
450 * The desired word is on the kernel RBS and
451 * is not a NaT.
452 */
453 regnum = ia64_rse_num_regs(bspstore, laddr);
454 *val = *ia64_rse_skip_regs(krbs, regnum);
455 return 0;
456 }
457 }
458 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
459 if (copied != sizeof(ret))
460 return -EIO;
461 *val = ret;
462 return 0;
463 }
464
465 long
466 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 unsigned long user_rbs_end, unsigned long addr, long val)
468 {
469 unsigned long *bspstore, *krbs, regnum, *laddr;
470 unsigned long *urbs_end = (long *) user_rbs_end;
471 struct pt_regs *child_regs;
472
473 laddr = (unsigned long *) addr;
474 child_regs = task_pt_regs(child);
475 bspstore = (unsigned long *) child_regs->ar_bspstore;
476 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
477 if (on_kernel_rbs(addr, (unsigned long) bspstore,
478 (unsigned long) urbs_end))
479 {
480 /*
481 * Attempt to write the RBS in an area that's actually
482 * on the kernel RBS => write the corresponding bits
483 * in the kernel RBS.
484 */
485 if (ia64_rse_is_rnat_slot(laddr))
486 put_rnat(child, child_stack, krbs, laddr, val,
487 urbs_end);
488 else {
489 if (laddr < urbs_end) {
490 regnum = ia64_rse_num_regs(bspstore, laddr);
491 *ia64_rse_skip_regs(krbs, regnum) = val;
492 }
493 }
494 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
495 != sizeof(val))
496 return -EIO;
497 return 0;
498 }
499
500 /*
501 * Calculate the address of the end of the user-level register backing
502 * store. This is the address that would have been stored in ar.bsp
503 * if the user had executed a "cover" instruction right before
504 * entering the kernel. If CFMP is not NULL, it is used to return the
505 * "current frame mask" that was active at the time the kernel was
506 * entered.
507 */
508 unsigned long
509 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
510 unsigned long *cfmp)
511 {
512 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
513 long ndirty;
514
515 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
516 bspstore = (unsigned long *) pt->ar_bspstore;
517 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
518
519 if (in_syscall(pt))
520 ndirty += (cfm & 0x7f);
521 else
522 cfm &= ~(1UL << 63); /* clear valid bit */
523
524 if (cfmp)
525 *cfmp = cfm;
526 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
527 }
528
529 /*
530 * Synchronize (i.e, write) the RSE backing store living in kernel
531 * space to the VM of the CHILD task. SW and PT are the pointers to
532 * the switch_stack and pt_regs structures, respectively.
533 * USER_RBS_END is the user-level address at which the backing store
534 * ends.
535 */
536 long
537 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
538 unsigned long user_rbs_start, unsigned long user_rbs_end)
539 {
540 unsigned long addr, val;
541 long ret;
542
543 /* now copy word for word from kernel rbs to user rbs: */
544 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
545 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
546 if (ret < 0)
547 return ret;
548 if (access_process_vm(child, addr, &val, sizeof(val), 1)
549 != sizeof(val))
550 return -EIO;
551 }
552 return 0;
553 }
554
555 static long
556 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
557 unsigned long user_rbs_start, unsigned long user_rbs_end)
558 {
559 unsigned long addr, val;
560 long ret;
561
562 /* now copy word for word from user rbs to kernel rbs: */
563 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
564 if (access_process_vm(child, addr, &val, sizeof(val), 0)
565 != sizeof(val))
566 return -EIO;
567
568 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
569 if (ret < 0)
570 return ret;
571 }
572 return 0;
573 }
574
575 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
576 unsigned long, unsigned long);
577
578 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579 {
580 struct pt_regs *pt;
581 unsigned long urbs_end;
582 syncfunc_t fn = arg;
583
584 if (unw_unwind_to_user(info) < 0)
585 return;
586 pt = task_pt_regs(info->task);
587 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
588
589 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
590 }
591
592 /*
593 * when a thread is stopped (ptraced), debugger might change thread's user
594 * stack (change memory directly), and we must avoid the RSE stored in kernel
595 * to override user stack (user space's RSE is newer than kernel's in the
596 * case). To workaround the issue, we copy kernel RSE to user RSE before the
597 * task is stopped, so user RSE has updated data. we then copy user RSE to
598 * kernel after the task is resummed from traced stop and kernel will use the
599 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600 * synchronize user RSE to kernel.
601 */
602 void ia64_ptrace_stop(void)
603 {
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return;
606 set_notify_resume(current);
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608 }
609
610 /*
611 * This is called to read back the register backing store.
612 */
613 void ia64_sync_krbs(void)
614 {
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
616
617 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
618 }
619
620 /*
621 * After PTRACE_ATTACH, a thread's register backing store area in user
622 * space is assumed to contain correct data whenever the thread is
623 * stopped. arch_ptrace_stop takes care of this on tracing stops.
624 * But if the child was already stopped for job control when we attach
625 * to it, then it might not ever get into ptrace_stop by the time we
626 * want to examine the user memory containing the RBS.
627 */
628 void
629 ptrace_attach_sync_user_rbs (struct task_struct *child)
630 {
631 int stopped = 0;
632 struct unw_frame_info info;
633
634 /*
635 * If the child is in TASK_STOPPED, we need to change that to
636 * TASK_TRACED momentarily while we operate on it. This ensures
637 * that the child won't be woken up and return to user mode while
638 * we are doing the sync. (It can only be woken up for SIGKILL.)
639 */
640
641 read_lock(&tasklist_lock);
642 if (child->signal) {
643 spin_lock_irq(&child->sighand->siglock);
644 if (child->state == TASK_STOPPED &&
645 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
646 set_notify_resume(child);
647
648 child->state = TASK_TRACED;
649 stopped = 1;
650 }
651 spin_unlock_irq(&child->sighand->siglock);
652 }
653 read_unlock(&tasklist_lock);
654
655 if (!stopped)
656 return;
657
658 unw_init_from_blocked_task(&info, child);
659 do_sync_rbs(&info, ia64_sync_user_rbs);
660
661 /*
662 * Now move the child back into TASK_STOPPED if it should be in a
663 * job control stop, so that SIGCONT can be used to wake it up.
664 */
665 read_lock(&tasklist_lock);
666 if (child->signal) {
667 spin_lock_irq(&child->sighand->siglock);
668 if (child->state == TASK_TRACED &&
669 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
670 child->state = TASK_STOPPED;
671 }
672 spin_unlock_irq(&child->sighand->siglock);
673 }
674 read_unlock(&tasklist_lock);
675 }
676
677 static inline int
678 thread_matches (struct task_struct *thread, unsigned long addr)
679 {
680 unsigned long thread_rbs_end;
681 struct pt_regs *thread_regs;
682
683 if (ptrace_check_attach(thread, 0) < 0)
684 /*
685 * If the thread is not in an attachable state, we'll
686 * ignore it. The net effect is that if ADDR happens
687 * to overlap with the portion of the thread's
688 * register backing store that is currently residing
689 * on the thread's kernel stack, then ptrace() may end
690 * up accessing a stale value. But if the thread
691 * isn't stopped, that's a problem anyhow, so we're
692 * doing as well as we can...
693 */
694 return 0;
695
696 thread_regs = task_pt_regs(thread);
697 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
698 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
699 return 0;
700
701 return 1; /* looks like we've got a winner */
702 }
703
704 /*
705 * Write f32-f127 back to task->thread.fph if it has been modified.
706 */
707 inline void
708 ia64_flush_fph (struct task_struct *task)
709 {
710 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
711
712 /*
713 * Prevent migrating this task while
714 * we're fiddling with the FPU state
715 */
716 preempt_disable();
717 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
718 psr->mfh = 0;
719 task->thread.flags |= IA64_THREAD_FPH_VALID;
720 ia64_save_fpu(&task->thread.fph[0]);
721 }
722 preempt_enable();
723 }
724
725 /*
726 * Sync the fph state of the task so that it can be manipulated
727 * through thread.fph. If necessary, f32-f127 are written back to
728 * thread.fph or, if the fph state hasn't been used before, thread.fph
729 * is cleared to zeroes. Also, access to f32-f127 is disabled to
730 * ensure that the task picks up the state from thread.fph when it
731 * executes again.
732 */
733 void
734 ia64_sync_fph (struct task_struct *task)
735 {
736 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
737
738 ia64_flush_fph(task);
739 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
740 task->thread.flags |= IA64_THREAD_FPH_VALID;
741 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
742 }
743 ia64_drop_fpu(task);
744 psr->dfh = 1;
745 }
746
747 /*
748 * Change the machine-state of CHILD such that it will return via the normal
749 * kernel exit-path, rather than the syscall-exit path.
750 */
751 static void
752 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
753 unsigned long cfm)
754 {
755 struct unw_frame_info info, prev_info;
756 unsigned long ip, sp, pr;
757
758 unw_init_from_blocked_task(&info, child);
759 while (1) {
760 prev_info = info;
761 if (unw_unwind(&info) < 0)
762 return;
763
764 unw_get_sp(&info, &sp);
765 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
766 < IA64_PT_REGS_SIZE) {
767 dprintk("ptrace.%s: ran off the top of the kernel "
768 "stack\n", __func__);
769 return;
770 }
771 if (unw_get_pr (&prev_info, &pr) < 0) {
772 unw_get_rp(&prev_info, &ip);
773 dprintk("ptrace.%s: failed to read "
774 "predicate register (ip=0x%lx)\n",
775 __func__, ip);
776 return;
777 }
778 if (unw_is_intr_frame(&info)
779 && (pr & (1UL << PRED_USER_STACK)))
780 break;
781 }
782
783 /*
784 * Note: at the time of this call, the target task is blocked
785 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
786 * (aka, "pLvSys") we redirect execution from
787 * .work_pending_syscall_end to .work_processed_kernel.
788 */
789 unw_get_pr(&prev_info, &pr);
790 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
791 pr |= (1UL << PRED_NON_SYSCALL);
792 unw_set_pr(&prev_info, pr);
793
794 pt->cr_ifs = (1UL << 63) | cfm;
795 /*
796 * Clear the memory that is NOT written on syscall-entry to
797 * ensure we do not leak kernel-state to user when execution
798 * resumes.
799 */
800 pt->r2 = 0;
801 pt->r3 = 0;
802 pt->r14 = 0;
803 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
804 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
805 pt->b7 = 0;
806 pt->ar_ccv = 0;
807 pt->ar_csd = 0;
808 pt->ar_ssd = 0;
809 }
810
811 static int
812 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
813 struct unw_frame_info *info,
814 unsigned long *data, int write_access)
815 {
816 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
817 char nat = 0;
818
819 if (write_access) {
820 nat_bits = *data;
821 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
822 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
823 dprintk("ptrace: failed to set ar.unat\n");
824 return -1;
825 }
826 for (regnum = 4; regnum <= 7; ++regnum) {
827 unw_get_gr(info, regnum, &dummy, &nat);
828 unw_set_gr(info, regnum, dummy,
829 (nat_bits >> regnum) & 1);
830 }
831 } else {
832 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
833 dprintk("ptrace: failed to read ar.unat\n");
834 return -1;
835 }
836 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
837 for (regnum = 4; regnum <= 7; ++regnum) {
838 unw_get_gr(info, regnum, &dummy, &nat);
839 nat_bits |= (nat != 0) << regnum;
840 }
841 *data = nat_bits;
842 }
843 return 0;
844 }
845
846 static int
847 access_uarea (struct task_struct *child, unsigned long addr,
848 unsigned long *data, int write_access);
849
850 static long
851 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
852 {
853 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
854 struct unw_frame_info info;
855 struct ia64_fpreg fpval;
856 struct switch_stack *sw;
857 struct pt_regs *pt;
858 long ret, retval = 0;
859 char nat = 0;
860 int i;
861
862 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
863 return -EIO;
864
865 pt = task_pt_regs(child);
866 sw = (struct switch_stack *) (child->thread.ksp + 16);
867 unw_init_from_blocked_task(&info, child);
868 if (unw_unwind_to_user(&info) < 0) {
869 return -EIO;
870 }
871
872 if (((unsigned long) ppr & 0x7) != 0) {
873 dprintk("ptrace:unaligned register address %p\n", ppr);
874 return -EIO;
875 }
876
877 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
878 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
879 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
880 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
881 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
882 || access_uarea(child, PT_CFM, &cfm, 0)
883 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
884 return -EIO;
885
886 /* control regs */
887
888 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
889 retval |= __put_user(psr, &ppr->cr_ipsr);
890
891 /* app regs */
892
893 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
894 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
895 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
896 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
897 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
898 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
899
900 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
901 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
902 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
903 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
904 retval |= __put_user(cfm, &ppr->cfm);
905
906 /* gr1-gr3 */
907
908 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
909 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
910
911 /* gr4-gr7 */
912
913 for (i = 4; i < 8; i++) {
914 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
915 return -EIO;
916 retval |= __put_user(val, &ppr->gr[i]);
917 }
918
919 /* gr8-gr11 */
920
921 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
922
923 /* gr12-gr15 */
924
925 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
926 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
927 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
928
929 /* gr16-gr31 */
930
931 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
932
933 /* b0 */
934
935 retval |= __put_user(pt->b0, &ppr->br[0]);
936
937 /* b1-b5 */
938
939 for (i = 1; i < 6; i++) {
940 if (unw_access_br(&info, i, &val, 0) < 0)
941 return -EIO;
942 __put_user(val, &ppr->br[i]);
943 }
944
945 /* b6-b7 */
946
947 retval |= __put_user(pt->b6, &ppr->br[6]);
948 retval |= __put_user(pt->b7, &ppr->br[7]);
949
950 /* fr2-fr5 */
951
952 for (i = 2; i < 6; i++) {
953 if (unw_get_fr(&info, i, &fpval) < 0)
954 return -EIO;
955 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
956 }
957
958 /* fr6-fr11 */
959
960 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
961 sizeof(struct ia64_fpreg) * 6);
962
963 /* fp scratch regs(12-15) */
964
965 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
966 sizeof(struct ia64_fpreg) * 4);
967
968 /* fr16-fr31 */
969
970 for (i = 16; i < 32; i++) {
971 if (unw_get_fr(&info, i, &fpval) < 0)
972 return -EIO;
973 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
974 }
975
976 /* fph */
977
978 ia64_flush_fph(child);
979 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
980 sizeof(ppr->fr[32]) * 96);
981
982 /* preds */
983
984 retval |= __put_user(pt->pr, &ppr->pr);
985
986 /* nat bits */
987
988 retval |= __put_user(nat_bits, &ppr->nat);
989
990 ret = retval ? -EIO : 0;
991 return ret;
992 }
993
994 static long
995 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
996 {
997 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
998 struct unw_frame_info info;
999 struct switch_stack *sw;
1000 struct ia64_fpreg fpval;
1001 struct pt_regs *pt;
1002 long ret, retval = 0;
1003 int i;
1004
1005 memset(&fpval, 0, sizeof(fpval));
1006
1007 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1008 return -EIO;
1009
1010 pt = task_pt_regs(child);
1011 sw = (struct switch_stack *) (child->thread.ksp + 16);
1012 unw_init_from_blocked_task(&info, child);
1013 if (unw_unwind_to_user(&info) < 0) {
1014 return -EIO;
1015 }
1016
1017 if (((unsigned long) ppr & 0x7) != 0) {
1018 dprintk("ptrace:unaligned register address %p\n", ppr);
1019 return -EIO;
1020 }
1021
1022 /* control regs */
1023
1024 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1025 retval |= __get_user(psr, &ppr->cr_ipsr);
1026
1027 /* app regs */
1028
1029 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1030 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1031 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1032 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1033 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1034 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1035
1036 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1037 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1038 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1039 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1040 retval |= __get_user(cfm, &ppr->cfm);
1041
1042 /* gr1-gr3 */
1043
1044 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1045 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1046
1047 /* gr4-gr7 */
1048
1049 for (i = 4; i < 8; i++) {
1050 retval |= __get_user(val, &ppr->gr[i]);
1051 /* NaT bit will be set via PT_NAT_BITS: */
1052 if (unw_set_gr(&info, i, val, 0) < 0)
1053 return -EIO;
1054 }
1055
1056 /* gr8-gr11 */
1057
1058 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1059
1060 /* gr12-gr15 */
1061
1062 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1063 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1064 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1065
1066 /* gr16-gr31 */
1067
1068 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1069
1070 /* b0 */
1071
1072 retval |= __get_user(pt->b0, &ppr->br[0]);
1073
1074 /* b1-b5 */
1075
1076 for (i = 1; i < 6; i++) {
1077 retval |= __get_user(val, &ppr->br[i]);
1078 unw_set_br(&info, i, val);
1079 }
1080
1081 /* b6-b7 */
1082
1083 retval |= __get_user(pt->b6, &ppr->br[6]);
1084 retval |= __get_user(pt->b7, &ppr->br[7]);
1085
1086 /* fr2-fr5 */
1087
1088 for (i = 2; i < 6; i++) {
1089 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1090 if (unw_set_fr(&info, i, fpval) < 0)
1091 return -EIO;
1092 }
1093
1094 /* fr6-fr11 */
1095
1096 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1097 sizeof(ppr->fr[6]) * 6);
1098
1099 /* fp scratch regs(12-15) */
1100
1101 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1102 sizeof(ppr->fr[12]) * 4);
1103
1104 /* fr16-fr31 */
1105
1106 for (i = 16; i < 32; i++) {
1107 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1108 sizeof(fpval));
1109 if (unw_set_fr(&info, i, fpval) < 0)
1110 return -EIO;
1111 }
1112
1113 /* fph */
1114
1115 ia64_sync_fph(child);
1116 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1117 sizeof(ppr->fr[32]) * 96);
1118
1119 /* preds */
1120
1121 retval |= __get_user(pt->pr, &ppr->pr);
1122
1123 /* nat bits */
1124
1125 retval |= __get_user(nat_bits, &ppr->nat);
1126
1127 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1128 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1129 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1130 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1131 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1132 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1133 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1134 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1135
1136 ret = retval ? -EIO : 0;
1137 return ret;
1138 }
1139
1140 void
1141 user_enable_single_step (struct task_struct *child)
1142 {
1143 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1144
1145 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1146 child_psr->ss = 1;
1147 }
1148
1149 void
1150 user_enable_block_step (struct task_struct *child)
1151 {
1152 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1153
1154 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1155 child_psr->tb = 1;
1156 }
1157
1158 void
1159 user_disable_single_step (struct task_struct *child)
1160 {
1161 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1162
1163 /* make sure the single step/taken-branch trap bits are not set: */
1164 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1165 child_psr->ss = 0;
1166 child_psr->tb = 0;
1167 }
1168
1169 /*
1170 * Called by kernel/ptrace.c when detaching..
1171 *
1172 * Make sure the single step bit is not set.
1173 */
1174 void
1175 ptrace_disable (struct task_struct *child)
1176 {
1177 user_disable_single_step(child);
1178 }
1179
1180 long
1181 arch_ptrace (struct task_struct *child, long request, long addr, long data)
1182 {
1183 switch (request) {
1184 case PTRACE_PEEKTEXT:
1185 case PTRACE_PEEKDATA:
1186 /* read word at location addr */
1187 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1188 != sizeof(data))
1189 return -EIO;
1190 /* ensure return value is not mistaken for error code */
1191 force_successful_syscall_return();
1192 return data;
1193
1194 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1195 * by the generic ptrace_request().
1196 */
1197
1198 case PTRACE_PEEKUSR:
1199 /* read the word at addr in the USER area */
1200 if (access_uarea(child, addr, &data, 0) < 0)
1201 return -EIO;
1202 /* ensure return value is not mistaken for error code */
1203 force_successful_syscall_return();
1204 return data;
1205
1206 case PTRACE_POKEUSR:
1207 /* write the word at addr in the USER area */
1208 if (access_uarea(child, addr, &data, 1) < 0)
1209 return -EIO;
1210 return 0;
1211
1212 case PTRACE_OLD_GETSIGINFO:
1213 /* for backwards-compatibility */
1214 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215
1216 case PTRACE_OLD_SETSIGINFO:
1217 /* for backwards-compatibility */
1218 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219
1220 case PTRACE_GETREGS:
1221 return ptrace_getregs(child,
1222 (struct pt_all_user_regs __user *) data);
1223
1224 case PTRACE_SETREGS:
1225 return ptrace_setregs(child,
1226 (struct pt_all_user_regs __user *) data);
1227
1228 default:
1229 return ptrace_request(child, request, addr, data);
1230 }
1231 }
1232
1233
1234 /* "asmlinkage" so the input arguments are preserved... */
1235
1236 asmlinkage long
1237 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1238 long arg4, long arg5, long arg6, long arg7,
1239 struct pt_regs regs)
1240 {
1241 if (test_thread_flag(TIF_SYSCALL_TRACE))
1242 if (tracehook_report_syscall_entry(&regs))
1243 return -ENOSYS;
1244
1245 /* copy user rbs to kernel rbs */
1246 if (test_thread_flag(TIF_RESTORE_RSE))
1247 ia64_sync_krbs();
1248
1249 if (unlikely(current->audit_context)) {
1250 long syscall;
1251 int arch;
1252
1253 syscall = regs.r15;
1254 arch = AUDIT_ARCH_IA64;
1255
1256 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1257 }
1258
1259 return 0;
1260 }
1261
1262 /* "asmlinkage" so the input arguments are preserved... */
1263
1264 asmlinkage void
1265 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1266 long arg4, long arg5, long arg6, long arg7,
1267 struct pt_regs regs)
1268 {
1269 int step;
1270
1271 if (unlikely(current->audit_context)) {
1272 int success = AUDITSC_RESULT(regs.r10);
1273 long result = regs.r8;
1274
1275 if (success != AUDITSC_SUCCESS)
1276 result = -result;
1277 audit_syscall_exit(success, result);
1278 }
1279
1280 step = test_thread_flag(TIF_SINGLESTEP);
1281 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1282 tracehook_report_syscall_exit(&regs, step);
1283
1284 /* copy user rbs to kernel rbs */
1285 if (test_thread_flag(TIF_RESTORE_RSE))
1286 ia64_sync_krbs();
1287 }
1288
1289 /* Utrace implementation starts here */
1290 struct regset_get {
1291 void *kbuf;
1292 void __user *ubuf;
1293 };
1294
1295 struct regset_set {
1296 const void *kbuf;
1297 const void __user *ubuf;
1298 };
1299
1300 struct regset_getset {
1301 struct task_struct *target;
1302 const struct user_regset *regset;
1303 union {
1304 struct regset_get get;
1305 struct regset_set set;
1306 } u;
1307 unsigned int pos;
1308 unsigned int count;
1309 int ret;
1310 };
1311
1312 static int
1313 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1314 unsigned long addr, unsigned long *data, int write_access)
1315 {
1316 struct pt_regs *pt;
1317 unsigned long *ptr = NULL;
1318 int ret;
1319 char nat = 0;
1320
1321 pt = task_pt_regs(target);
1322 switch (addr) {
1323 case ELF_GR_OFFSET(1):
1324 ptr = &pt->r1;
1325 break;
1326 case ELF_GR_OFFSET(2):
1327 case ELF_GR_OFFSET(3):
1328 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1329 break;
1330 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1331 if (write_access) {
1332 /* read NaT bit first: */
1333 unsigned long dummy;
1334
1335 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1336 if (ret < 0)
1337 return ret;
1338 }
1339 return unw_access_gr(info, addr/8, data, &nat, write_access);
1340 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1341 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1342 break;
1343 case ELF_GR_OFFSET(12):
1344 case ELF_GR_OFFSET(13):
1345 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1346 break;
1347 case ELF_GR_OFFSET(14):
1348 ptr = &pt->r14;
1349 break;
1350 case ELF_GR_OFFSET(15):
1351 ptr = &pt->r15;
1352 }
1353 if (write_access)
1354 *ptr = *data;
1355 else
1356 *data = *ptr;
1357 return 0;
1358 }
1359
1360 static int
1361 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1362 unsigned long addr, unsigned long *data, int write_access)
1363 {
1364 struct pt_regs *pt;
1365 unsigned long *ptr = NULL;
1366
1367 pt = task_pt_regs(target);
1368 switch (addr) {
1369 case ELF_BR_OFFSET(0):
1370 ptr = &pt->b0;
1371 break;
1372 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1373 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1374 data, write_access);
1375 case ELF_BR_OFFSET(6):
1376 ptr = &pt->b6;
1377 break;
1378 case ELF_BR_OFFSET(7):
1379 ptr = &pt->b7;
1380 }
1381 if (write_access)
1382 *ptr = *data;
1383 else
1384 *data = *ptr;
1385 return 0;
1386 }
1387
1388 static int
1389 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1390 unsigned long addr, unsigned long *data, int write_access)
1391 {
1392 struct pt_regs *pt;
1393 unsigned long cfm, urbs_end;
1394 unsigned long *ptr = NULL;
1395
1396 pt = task_pt_regs(target);
1397 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1398 switch (addr) {
1399 case ELF_AR_RSC_OFFSET:
1400 /* force PL3 */
1401 if (write_access)
1402 pt->ar_rsc = *data | (3 << 2);
1403 else
1404 *data = pt->ar_rsc;
1405 return 0;
1406 case ELF_AR_BSP_OFFSET:
1407 /*
1408 * By convention, we use PT_AR_BSP to refer to
1409 * the end of the user-level backing store.
1410 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1411 * to get the real value of ar.bsp at the time
1412 * the kernel was entered.
1413 *
1414 * Furthermore, when changing the contents of
1415 * PT_AR_BSP (or PT_CFM) while the task is
1416 * blocked in a system call, convert the state
1417 * so that the non-system-call exit
1418 * path is used. This ensures that the proper
1419 * state will be picked up when resuming
1420 * execution. However, it *also* means that
1421 * once we write PT_AR_BSP/PT_CFM, it won't be
1422 * possible to modify the syscall arguments of
1423 * the pending system call any longer. This
1424 * shouldn't be an issue because modifying
1425 * PT_AR_BSP/PT_CFM generally implies that
1426 * we're either abandoning the pending system
1427 * call or that we defer it's re-execution
1428 * (e.g., due to GDB doing an inferior
1429 * function call).
1430 */
1431 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1432 if (write_access) {
1433 if (*data != urbs_end) {
1434 if (in_syscall(pt))
1435 convert_to_non_syscall(target,
1436 pt,
1437 cfm);
1438 /*
1439 * Simulate user-level write
1440 * of ar.bsp:
1441 */
1442 pt->loadrs = 0;
1443 pt->ar_bspstore = *data;
1444 }
1445 } else
1446 *data = urbs_end;
1447 return 0;
1448 case ELF_AR_BSPSTORE_OFFSET:
1449 ptr = &pt->ar_bspstore;
1450 break;
1451 case ELF_AR_RNAT_OFFSET:
1452 ptr = &pt->ar_rnat;
1453 break;
1454 case ELF_AR_CCV_OFFSET:
1455 ptr = &pt->ar_ccv;
1456 break;
1457 case ELF_AR_UNAT_OFFSET:
1458 ptr = &pt->ar_unat;
1459 break;
1460 case ELF_AR_FPSR_OFFSET:
1461 ptr = &pt->ar_fpsr;
1462 break;
1463 case ELF_AR_PFS_OFFSET:
1464 ptr = &pt->ar_pfs;
1465 break;
1466 case ELF_AR_LC_OFFSET:
1467 return unw_access_ar(info, UNW_AR_LC, data,
1468 write_access);
1469 case ELF_AR_EC_OFFSET:
1470 return unw_access_ar(info, UNW_AR_EC, data,
1471 write_access);
1472 case ELF_AR_CSD_OFFSET:
1473 ptr = &pt->ar_csd;
1474 break;
1475 case ELF_AR_SSD_OFFSET:
1476 ptr = &pt->ar_ssd;
1477 }
1478 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1479 switch (addr) {
1480 case ELF_CR_IIP_OFFSET:
1481 ptr = &pt->cr_iip;
1482 break;
1483 case ELF_CFM_OFFSET:
1484 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1485 if (write_access) {
1486 if (((cfm ^ *data) & PFM_MASK) != 0) {
1487 if (in_syscall(pt))
1488 convert_to_non_syscall(target,
1489 pt,
1490 cfm);
1491 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1492 | (*data & PFM_MASK));
1493 }
1494 } else
1495 *data = cfm;
1496 return 0;
1497 case ELF_CR_IPSR_OFFSET:
1498 if (write_access) {
1499 unsigned long tmp = *data;
1500 /* psr.ri==3 is a reserved value: SDM 2:25 */
1501 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1502 tmp &= ~IA64_PSR_RI;
1503 pt->cr_ipsr = ((tmp & IPSR_MASK)
1504 | (pt->cr_ipsr & ~IPSR_MASK));
1505 } else
1506 *data = (pt->cr_ipsr & IPSR_MASK);
1507 return 0;
1508 }
1509 } else if (addr == ELF_NAT_OFFSET)
1510 return access_nat_bits(target, pt, info,
1511 data, write_access);
1512 else if (addr == ELF_PR_OFFSET)
1513 ptr = &pt->pr;
1514 else
1515 return -1;
1516
1517 if (write_access)
1518 *ptr = *data;
1519 else
1520 *data = *ptr;
1521
1522 return 0;
1523 }
1524
1525 static int
1526 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1527 unsigned long addr, unsigned long *data, int write_access)
1528 {
1529 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1530 return access_elf_gpreg(target, info, addr, data, write_access);
1531 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1532 return access_elf_breg(target, info, addr, data, write_access);
1533 else
1534 return access_elf_areg(target, info, addr, data, write_access);
1535 }
1536
1537 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1538 {
1539 struct pt_regs *pt;
1540 struct regset_getset *dst = arg;
1541 elf_greg_t tmp[16];
1542 unsigned int i, index, min_copy;
1543
1544 if (unw_unwind_to_user(info) < 0)
1545 return;
1546
1547 /*
1548 * coredump format:
1549 * r0-r31
1550 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1551 * predicate registers (p0-p63)
1552 * b0-b7
1553 * ip cfm user-mask
1554 * ar.rsc ar.bsp ar.bspstore ar.rnat
1555 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1556 */
1557
1558
1559 /* Skip r0 */
1560 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1561 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1562 &dst->u.get.kbuf,
1563 &dst->u.get.ubuf,
1564 0, ELF_GR_OFFSET(1));
1565 if (dst->ret || dst->count == 0)
1566 return;
1567 }
1568
1569 /* gr1 - gr15 */
1570 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1571 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1572 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1573 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1574 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1575 index++)
1576 if (access_elf_reg(dst->target, info, i,
1577 &tmp[index], 0) < 0) {
1578 dst->ret = -EIO;
1579 return;
1580 }
1581 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1582 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1583 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1584 if (dst->ret || dst->count == 0)
1585 return;
1586 }
1587
1588 /* r16-r31 */
1589 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1590 pt = task_pt_regs(dst->target);
1591 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1592 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1593 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1594 if (dst->ret || dst->count == 0)
1595 return;
1596 }
1597
1598 /* nat, pr, b0 - b7 */
1599 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1600 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1601 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1602 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1603 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1604 index++)
1605 if (access_elf_reg(dst->target, info, i,
1606 &tmp[index], 0) < 0) {
1607 dst->ret = -EIO;
1608 return;
1609 }
1610 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1611 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1612 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1613 if (dst->ret || dst->count == 0)
1614 return;
1615 }
1616
1617 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1618 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1619 */
1620 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1621 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1622 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1623 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1624 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1625 index++)
1626 if (access_elf_reg(dst->target, info, i,
1627 &tmp[index], 0) < 0) {
1628 dst->ret = -EIO;
1629 return;
1630 }
1631 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1632 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1633 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1634 }
1635 }
1636
1637 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1638 {
1639 struct pt_regs *pt;
1640 struct regset_getset *dst = arg;
1641 elf_greg_t tmp[16];
1642 unsigned int i, index;
1643
1644 if (unw_unwind_to_user(info) < 0)
1645 return;
1646
1647 /* Skip r0 */
1648 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1649 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1650 &dst->u.set.kbuf,
1651 &dst->u.set.ubuf,
1652 0, ELF_GR_OFFSET(1));
1653 if (dst->ret || dst->count == 0)
1654 return;
1655 }
1656
1657 /* gr1-gr15 */
1658 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1659 i = dst->pos;
1660 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1661 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1662 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1663 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1664 if (dst->ret)
1665 return;
1666 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1667 if (access_elf_reg(dst->target, info, i,
1668 &tmp[index], 1) < 0) {
1669 dst->ret = -EIO;
1670 return;
1671 }
1672 if (dst->count == 0)
1673 return;
1674 }
1675
1676 /* gr16-gr31 */
1677 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1678 pt = task_pt_regs(dst->target);
1679 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1680 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1681 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1682 if (dst->ret || dst->count == 0)
1683 return;
1684 }
1685
1686 /* nat, pr, b0 - b7 */
1687 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1688 i = dst->pos;
1689 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1690 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1691 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1692 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1693 if (dst->ret)
1694 return;
1695 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1696 if (access_elf_reg(dst->target, info, i,
1697 &tmp[index], 1) < 0) {
1698 dst->ret = -EIO;
1699 return;
1700 }
1701 if (dst->count == 0)
1702 return;
1703 }
1704
1705 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1706 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1707 */
1708 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1709 i = dst->pos;
1710 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1711 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1712 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1713 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1714 if (dst->ret)
1715 return;
1716 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1717 if (access_elf_reg(dst->target, info, i,
1718 &tmp[index], 1) < 0) {
1719 dst->ret = -EIO;
1720 return;
1721 }
1722 }
1723 }
1724
1725 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1726
1727 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1728 {
1729 struct regset_getset *dst = arg;
1730 struct task_struct *task = dst->target;
1731 elf_fpreg_t tmp[30];
1732 int index, min_copy, i;
1733
1734 if (unw_unwind_to_user(info) < 0)
1735 return;
1736
1737 /* Skip pos 0 and 1 */
1738 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1739 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1740 &dst->u.get.kbuf,
1741 &dst->u.get.ubuf,
1742 0, ELF_FP_OFFSET(2));
1743 if (dst->count == 0 || dst->ret)
1744 return;
1745 }
1746
1747 /* fr2-fr31 */
1748 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1749 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1750
1751 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1752 dst->pos + dst->count);
1753 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1754 index++)
1755 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1756 &tmp[index])) {
1757 dst->ret = -EIO;
1758 return;
1759 }
1760 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1761 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1762 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1763 if (dst->count == 0 || dst->ret)
1764 return;
1765 }
1766
1767 /* fph */
1768 if (dst->count > 0) {
1769 ia64_flush_fph(dst->target);
1770 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1771 dst->ret = user_regset_copyout(
1772 &dst->pos, &dst->count,
1773 &dst->u.get.kbuf, &dst->u.get.ubuf,
1774 &dst->target->thread.fph,
1775 ELF_FP_OFFSET(32), -1);
1776 else
1777 /* Zero fill instead. */
1778 dst->ret = user_regset_copyout_zero(
1779 &dst->pos, &dst->count,
1780 &dst->u.get.kbuf, &dst->u.get.ubuf,
1781 ELF_FP_OFFSET(32), -1);
1782 }
1783 }
1784
1785 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1786 {
1787 struct regset_getset *dst = arg;
1788 elf_fpreg_t fpreg, tmp[30];
1789 int index, start, end;
1790
1791 if (unw_unwind_to_user(info) < 0)
1792 return;
1793
1794 /* Skip pos 0 and 1 */
1795 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1796 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1797 &dst->u.set.kbuf,
1798 &dst->u.set.ubuf,
1799 0, ELF_FP_OFFSET(2));
1800 if (dst->count == 0 || dst->ret)
1801 return;
1802 }
1803
1804 /* fr2-fr31 */
1805 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1806 start = dst->pos;
1807 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1808 dst->pos + dst->count);
1809 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1810 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1811 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1812 if (dst->ret)
1813 return;
1814
1815 if (start & 0xF) { /* only write high part */
1816 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1817 &fpreg)) {
1818 dst->ret = -EIO;
1819 return;
1820 }
1821 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1822 = fpreg.u.bits[0];
1823 start &= ~0xFUL;
1824 }
1825 if (end & 0xF) { /* only write low part */
1826 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1827 &fpreg)) {
1828 dst->ret = -EIO;
1829 return;
1830 }
1831 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1832 = fpreg.u.bits[1];
1833 end = (end + 0xF) & ~0xFUL;
1834 }
1835
1836 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1837 index = start / sizeof(elf_fpreg_t);
1838 if (unw_set_fr(info, index, tmp[index - 2])) {
1839 dst->ret = -EIO;
1840 return;
1841 }
1842 }
1843 if (dst->ret || dst->count == 0)
1844 return;
1845 }
1846
1847 /* fph */
1848 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1849 ia64_sync_fph(dst->target);
1850 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1851 &dst->u.set.kbuf,
1852 &dst->u.set.ubuf,
1853 &dst->target->thread.fph,
1854 ELF_FP_OFFSET(32), -1);
1855 }
1856 }
1857
1858 static int
1859 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1860 struct task_struct *target,
1861 const struct user_regset *regset,
1862 unsigned int pos, unsigned int count,
1863 const void *kbuf, const void __user *ubuf)
1864 {
1865 struct regset_getset info = { .target = target, .regset = regset,
1866 .pos = pos, .count = count,
1867 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1868 .ret = 0 };
1869
1870 if (target == current)
1871 unw_init_running(call, &info);
1872 else {
1873 struct unw_frame_info ufi;
1874 memset(&ufi, 0, sizeof(ufi));
1875 unw_init_from_blocked_task(&ufi, target);
1876 (*call)(&ufi, &info);
1877 }
1878
1879 return info.ret;
1880 }
1881
1882 static int
1883 gpregs_get(struct task_struct *target,
1884 const struct user_regset *regset,
1885 unsigned int pos, unsigned int count,
1886 void *kbuf, void __user *ubuf)
1887 {
1888 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1889 kbuf, ubuf);
1890 }
1891
1892 static int gpregs_set(struct task_struct *target,
1893 const struct user_regset *regset,
1894 unsigned int pos, unsigned int count,
1895 const void *kbuf, const void __user *ubuf)
1896 {
1897 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1898 kbuf, ubuf);
1899 }
1900
1901 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1902 {
1903 do_sync_rbs(info, ia64_sync_user_rbs);
1904 }
1905
1906 /*
1907 * This is called to write back the register backing store.
1908 * ptrace does this before it stops, so that a tracer reading the user
1909 * memory after the thread stops will get the current register data.
1910 */
1911 static int
1912 gpregs_writeback(struct task_struct *target,
1913 const struct user_regset *regset,
1914 int now)
1915 {
1916 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1917 return 0;
1918 set_notify_resume(target);
1919 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1920 NULL, NULL);
1921 }
1922
1923 static int
1924 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1925 {
1926 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1927 }
1928
1929 static int fpregs_get(struct task_struct *target,
1930 const struct user_regset *regset,
1931 unsigned int pos, unsigned int count,
1932 void *kbuf, void __user *ubuf)
1933 {
1934 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1935 kbuf, ubuf);
1936 }
1937
1938 static int fpregs_set(struct task_struct *target,
1939 const struct user_regset *regset,
1940 unsigned int pos, unsigned int count,
1941 const void *kbuf, const void __user *ubuf)
1942 {
1943 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1944 kbuf, ubuf);
1945 }
1946
1947 static int
1948 access_uarea(struct task_struct *child, unsigned long addr,
1949 unsigned long *data, int write_access)
1950 {
1951 unsigned int pos = -1; /* an invalid value */
1952 int ret;
1953 unsigned long *ptr, regnum;
1954
1955 if ((addr & 0x7) != 0) {
1956 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1957 return -1;
1958 }
1959 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1960 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1961 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1962 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1963 dprintk("ptrace: rejecting access to register "
1964 "address 0x%lx\n", addr);
1965 return -1;
1966 }
1967
1968 switch (addr) {
1969 case PT_F32 ... (PT_F127 + 15):
1970 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1971 break;
1972 case PT_F2 ... (PT_F5 + 15):
1973 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1974 break;
1975 case PT_F10 ... (PT_F31 + 15):
1976 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1977 break;
1978 case PT_F6 ... (PT_F9 + 15):
1979 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1980 break;
1981 }
1982
1983 if (pos != -1) {
1984 if (write_access)
1985 ret = fpregs_set(child, NULL, pos,
1986 sizeof(unsigned long), data, NULL);
1987 else
1988 ret = fpregs_get(child, NULL, pos,
1989 sizeof(unsigned long), data, NULL);
1990 if (ret != 0)
1991 return -1;
1992 return 0;
1993 }
1994
1995 switch (addr) {
1996 case PT_NAT_BITS:
1997 pos = ELF_NAT_OFFSET;
1998 break;
1999 case PT_R4 ... PT_R7:
2000 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2001 break;
2002 case PT_B1 ... PT_B5:
2003 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2004 break;
2005 case PT_AR_EC:
2006 pos = ELF_AR_EC_OFFSET;
2007 break;
2008 case PT_AR_LC:
2009 pos = ELF_AR_LC_OFFSET;
2010 break;
2011 case PT_CR_IPSR:
2012 pos = ELF_CR_IPSR_OFFSET;
2013 break;
2014 case PT_CR_IIP:
2015 pos = ELF_CR_IIP_OFFSET;
2016 break;
2017 case PT_CFM:
2018 pos = ELF_CFM_OFFSET;
2019 break;
2020 case PT_AR_UNAT:
2021 pos = ELF_AR_UNAT_OFFSET;
2022 break;
2023 case PT_AR_PFS:
2024 pos = ELF_AR_PFS_OFFSET;
2025 break;
2026 case PT_AR_RSC:
2027 pos = ELF_AR_RSC_OFFSET;
2028 break;
2029 case PT_AR_RNAT:
2030 pos = ELF_AR_RNAT_OFFSET;
2031 break;
2032 case PT_AR_BSPSTORE:
2033 pos = ELF_AR_BSPSTORE_OFFSET;
2034 break;
2035 case PT_PR:
2036 pos = ELF_PR_OFFSET;
2037 break;
2038 case PT_B6:
2039 pos = ELF_BR_OFFSET(6);
2040 break;
2041 case PT_AR_BSP:
2042 pos = ELF_AR_BSP_OFFSET;
2043 break;
2044 case PT_R1 ... PT_R3:
2045 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2046 break;
2047 case PT_R12 ... PT_R15:
2048 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2049 break;
2050 case PT_R8 ... PT_R11:
2051 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2052 break;
2053 case PT_R16 ... PT_R31:
2054 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2055 break;
2056 case PT_AR_CCV:
2057 pos = ELF_AR_CCV_OFFSET;
2058 break;
2059 case PT_AR_FPSR:
2060 pos = ELF_AR_FPSR_OFFSET;
2061 break;
2062 case PT_B0:
2063 pos = ELF_BR_OFFSET(0);
2064 break;
2065 case PT_B7:
2066 pos = ELF_BR_OFFSET(7);
2067 break;
2068 case PT_AR_CSD:
2069 pos = ELF_AR_CSD_OFFSET;
2070 break;
2071 case PT_AR_SSD:
2072 pos = ELF_AR_SSD_OFFSET;
2073 break;
2074 }
2075
2076 if (pos != -1) {
2077 if (write_access)
2078 ret = gpregs_set(child, NULL, pos,
2079 sizeof(unsigned long), data, NULL);
2080 else
2081 ret = gpregs_get(child, NULL, pos,
2082 sizeof(unsigned long), data, NULL);
2083 if (ret != 0)
2084 return -1;
2085 return 0;
2086 }
2087
2088 /* access debug registers */
2089 if (addr >= PT_IBR) {
2090 regnum = (addr - PT_IBR) >> 3;
2091 ptr = &child->thread.ibr[0];
2092 } else {
2093 regnum = (addr - PT_DBR) >> 3;
2094 ptr = &child->thread.dbr[0];
2095 }
2096
2097 if (regnum >= 8) {
2098 dprintk("ptrace: rejecting access to register "
2099 "address 0x%lx\n", addr);
2100 return -1;
2101 }
2102 #ifdef CONFIG_PERFMON
2103 /*
2104 * Check if debug registers are used by perfmon. This
2105 * test must be done once we know that we can do the
2106 * operation, i.e. the arguments are all valid, but
2107 * before we start modifying the state.
2108 *
2109 * Perfmon needs to keep a count of how many processes
2110 * are trying to modify the debug registers for system
2111 * wide monitoring sessions.
2112 *
2113 * We also include read access here, because they may
2114 * cause the PMU-installed debug register state
2115 * (dbr[], ibr[]) to be reset. The two arrays are also
2116 * used by perfmon, but we do not use
2117 * IA64_THREAD_DBG_VALID. The registers are restored
2118 * by the PMU context switch code.
2119 */
2120 if (pfm_use_debug_registers(child))
2121 return -1;
2122 #endif
2123
2124 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2125 child->thread.flags |= IA64_THREAD_DBG_VALID;
2126 memset(child->thread.dbr, 0,
2127 sizeof(child->thread.dbr));
2128 memset(child->thread.ibr, 0,
2129 sizeof(child->thread.ibr));
2130 }
2131
2132 ptr += regnum;
2133
2134 if ((regnum & 1) && write_access) {
2135 /* don't let the user set kernel-level breakpoints: */
2136 *ptr = *data & ~(7UL << 56);
2137 return 0;
2138 }
2139 if (write_access)
2140 *ptr = *data;
2141 else
2142 *data = *ptr;
2143 return 0;
2144 }
2145
2146 static const struct user_regset native_regsets[] = {
2147 {
2148 .core_note_type = NT_PRSTATUS,
2149 .n = ELF_NGREG,
2150 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2151 .get = gpregs_get, .set = gpregs_set,
2152 .writeback = gpregs_writeback
2153 },
2154 {
2155 .core_note_type = NT_PRFPREG,
2156 .n = ELF_NFPREG,
2157 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2158 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2159 },
2160 };
2161
2162 static const struct user_regset_view user_ia64_view = {
2163 .name = "ia64",
2164 .e_machine = EM_IA_64,
2165 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2166 };
2167
2168 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2169 {
2170 return &user_ia64_view;
2171 }
2172
2173 struct syscall_get_set_args {
2174 unsigned int i;
2175 unsigned int n;
2176 unsigned long *args;
2177 struct pt_regs *regs;
2178 int rw;
2179 };
2180
2181 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2182 {
2183 struct syscall_get_set_args *args = data;
2184 struct pt_regs *pt = args->regs;
2185 unsigned long *krbs, cfm, ndirty;
2186 int i, count;
2187
2188 if (unw_unwind_to_user(info) < 0)
2189 return;
2190
2191 cfm = pt->cr_ifs;
2192 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2193 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2194
2195 count = 0;
2196 if (in_syscall(pt))
2197 count = min_t(int, args->n, cfm & 0x7f);
2198
2199 for (i = 0; i < count; i++) {
2200 if (args->rw)
2201 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2202 args->args[i];
2203 else
2204 args->args[i] = *ia64_rse_skip_regs(krbs,
2205 ndirty + i + args->i);
2206 }
2207
2208 if (!args->rw) {
2209 while (i < args->n) {
2210 args->args[i] = 0;
2211 i++;
2212 }
2213 }
2214 }
2215
2216 void ia64_syscall_get_set_arguments(struct task_struct *task,
2217 struct pt_regs *regs, unsigned int i, unsigned int n,
2218 unsigned long *args, int rw)
2219 {
2220 struct syscall_get_set_args data = {
2221 .i = i,
2222 .n = n,
2223 .args = args,
2224 .regs = regs,
2225 .rw = rw,
2226 };
2227
2228 if (task == current)
2229 unw_init_running(syscall_get_set_args_cb, &data);
2230 else {
2231 struct unw_frame_info ufi;
2232 memset(&ufi, 0, sizeof(ufi));
2233 unw_init_from_blocked_task(&ufi, task);
2234 syscall_get_set_args_cb(&ufi, &data);
2235 }
2236 }