]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/ptrace.c
powerpc/ptrace: Enable support for NT_PPC_CVMX
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / ptrace.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
b123923d 11 * and Paul Mackerras (paulus@samba.org).
1da177e4
LT
12 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
1da177e4
LT
22#include <linux/errno.h>
23#include <linux/ptrace.h>
f65255e8 24#include <linux/regset.h>
4f72c427 25#include <linux/tracehook.h>
3caf06c6 26#include <linux/elf.h>
1da177e4
LT
27#include <linux/user.h>
28#include <linux/security.h>
7ed20e1a 29#include <linux/signal.h>
ea9c102c
DW
30#include <linux/seccomp.h>
31#include <linux/audit.h>
02424d89 32#include <trace/syscall.h>
5aae8a53
P
33#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h>
22ecbe8d 35#include <linux/context_tracking.h>
1da177e4
LT
36
37#include <asm/uaccess.h>
38#include <asm/page.h>
39#include <asm/pgtable.h>
ae3a197e 40#include <asm/switch_to.h>
21a62902 41
02424d89
IM
42#define CREATE_TRACE_POINTS
43#include <trace/events/syscalls.h>
44
359e4284
MS
45/*
46 * The parameter save area on the stack is used to store arguments being passed
47 * to callee function and is located at fixed offset from stack pointer.
48 */
49#ifdef CONFIG_PPC32
50#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
51#else /* CONFIG_PPC32 */
52#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
53#endif
54
55struct pt_regs_offset {
56 const char *name;
57 int offset;
58};
59
60#define STR(s) #s /* convert to string */
61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62#define GPR_OFFSET_NAME(num) \
343c3327 63 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
359e4284
MS
64 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
65#define REG_OFFSET_END {.name = NULL, .offset = 0}
66
8c13f599
AK
67#define TVSO(f) (offsetof(struct thread_vr_state, f))
68
359e4284
MS
69static const struct pt_regs_offset regoffset_table[] = {
70 GPR_OFFSET_NAME(0),
71 GPR_OFFSET_NAME(1),
72 GPR_OFFSET_NAME(2),
73 GPR_OFFSET_NAME(3),
74 GPR_OFFSET_NAME(4),
75 GPR_OFFSET_NAME(5),
76 GPR_OFFSET_NAME(6),
77 GPR_OFFSET_NAME(7),
78 GPR_OFFSET_NAME(8),
79 GPR_OFFSET_NAME(9),
80 GPR_OFFSET_NAME(10),
81 GPR_OFFSET_NAME(11),
82 GPR_OFFSET_NAME(12),
83 GPR_OFFSET_NAME(13),
84 GPR_OFFSET_NAME(14),
85 GPR_OFFSET_NAME(15),
86 GPR_OFFSET_NAME(16),
87 GPR_OFFSET_NAME(17),
88 GPR_OFFSET_NAME(18),
89 GPR_OFFSET_NAME(19),
90 GPR_OFFSET_NAME(20),
91 GPR_OFFSET_NAME(21),
92 GPR_OFFSET_NAME(22),
93 GPR_OFFSET_NAME(23),
94 GPR_OFFSET_NAME(24),
95 GPR_OFFSET_NAME(25),
96 GPR_OFFSET_NAME(26),
97 GPR_OFFSET_NAME(27),
98 GPR_OFFSET_NAME(28),
99 GPR_OFFSET_NAME(29),
100 GPR_OFFSET_NAME(30),
101 GPR_OFFSET_NAME(31),
102 REG_OFFSET_NAME(nip),
103 REG_OFFSET_NAME(msr),
104 REG_OFFSET_NAME(ctr),
105 REG_OFFSET_NAME(link),
106 REG_OFFSET_NAME(xer),
107 REG_OFFSET_NAME(ccr),
108#ifdef CONFIG_PPC64
109 REG_OFFSET_NAME(softe),
110#else
111 REG_OFFSET_NAME(mq),
112#endif
113 REG_OFFSET_NAME(trap),
114 REG_OFFSET_NAME(dar),
115 REG_OFFSET_NAME(dsisr),
116 REG_OFFSET_END,
117};
118
119/**
120 * regs_query_register_offset() - query register offset from its name
121 * @name: the name of a register
122 *
123 * regs_query_register_offset() returns the offset of a register in struct
124 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
125 */
126int regs_query_register_offset(const char *name)
127{
128 const struct pt_regs_offset *roff;
129 for (roff = regoffset_table; roff->name != NULL; roff++)
130 if (!strcmp(roff->name, name))
131 return roff->offset;
132 return -EINVAL;
133}
134
135/**
136 * regs_query_register_name() - query register name from its offset
137 * @offset: the offset of a register in struct pt_regs.
138 *
139 * regs_query_register_name() returns the name of a register from its
140 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
141 */
142const char *regs_query_register_name(unsigned int offset)
143{
144 const struct pt_regs_offset *roff;
145 for (roff = regoffset_table; roff->name != NULL; roff++)
146 if (roff->offset == offset)
147 return roff->name;
148 return NULL;
149}
150
abd06505
BH
151/*
152 * does not yet catch signals sent when the child dies.
153 * in exit.c or in signal.c.
154 */
155
156/*
157 * Set of msr bits that gdb can change on behalf of a process.
158 */
172ae2e7 159#ifdef CONFIG_PPC_ADV_DEBUG_REGS
abd06505 160#define MSR_DEBUGCHANGE 0
1da177e4 161#else
abd06505 162#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
1da177e4 163#endif
acd89828 164
1da177e4 165/*
abd06505 166 * Max register writeable via put_reg
1da177e4 167 */
abd06505
BH
168#ifdef CONFIG_PPC32
169#define PT_MAX_PUT_REG PT_MQ
170#else
171#define PT_MAX_PUT_REG PT_CCR
172#endif
1da177e4 173
26f77130
RM
174static unsigned long get_user_msr(struct task_struct *task)
175{
176 return task->thread.regs->msr | task->thread.fpexc_mode;
177}
178
179static int set_user_msr(struct task_struct *task, unsigned long msr)
180{
181 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
182 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
183 return 0;
184}
185
25847fb1
AK
186#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
187static unsigned long get_user_ckpt_msr(struct task_struct *task)
188{
189 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
190}
191
192static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
193{
194 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
195 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
196 return 0;
197}
198
199static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
200{
201 task->thread.ckpt_regs.trap = trap & 0xfff0;
202 return 0;
203}
204#endif
205
1715a826 206#ifdef CONFIG_PPC64
ee4a3916 207static int get_user_dscr(struct task_struct *task, unsigned long *data)
1715a826 208{
ee4a3916
AK
209 *data = task->thread.dscr;
210 return 0;
1715a826
AK
211}
212
213static int set_user_dscr(struct task_struct *task, unsigned long dscr)
214{
215 task->thread.dscr = dscr;
216 task->thread.dscr_inherit = 1;
217 return 0;
218}
219#else
ee4a3916 220static int get_user_dscr(struct task_struct *task, unsigned long *data)
1715a826
AK
221{
222 return -EIO;
223}
224
225static int set_user_dscr(struct task_struct *task, unsigned long dscr)
226{
227 return -EIO;
228}
229#endif
230
26f77130
RM
231/*
232 * We prevent mucking around with the reserved area of trap
233 * which are used internally by the kernel.
234 */
235static int set_user_trap(struct task_struct *task, unsigned long trap)
236{
237 task->thread.regs->trap = trap & 0xfff0;
238 return 0;
239}
240
865418d8
BH
241/*
242 * Get contents of register REGNO in task TASK.
243 */
ee4a3916 244int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
865418d8 245{
ee4a3916 246 if ((task->thread.regs == NULL) || !data)
865418d8
BH
247 return -EIO;
248
ee4a3916
AK
249 if (regno == PT_MSR) {
250 *data = get_user_msr(task);
251 return 0;
252 }
865418d8 253
1715a826 254 if (regno == PT_DSCR)
ee4a3916 255 return get_user_dscr(task, data);
1715a826 256
ee4a3916
AK
257 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
258 *data = ((unsigned long *)task->thread.regs)[regno];
259 return 0;
260 }
865418d8
BH
261
262 return -EIO;
263}
264
265/*
266 * Write contents of register REGNO in task TASK.
267 */
268int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
269{
270 if (task->thread.regs == NULL)
271 return -EIO;
272
26f77130
RM
273 if (regno == PT_MSR)
274 return set_user_msr(task, data);
275 if (regno == PT_TRAP)
276 return set_user_trap(task, data);
1715a826
AK
277 if (regno == PT_DSCR)
278 return set_user_dscr(task, data);
26f77130
RM
279
280 if (regno <= PT_MAX_PUT_REG) {
865418d8
BH
281 ((unsigned long *)task->thread.regs)[regno] = data;
282 return 0;
283 }
284 return -EIO;
285}
286
44dd3f50
RM
287static int gpr_get(struct task_struct *target, const struct user_regset *regset,
288 unsigned int pos, unsigned int count,
289 void *kbuf, void __user *ubuf)
290{
a71f5d5d 291 int i, ret;
44dd3f50
RM
292
293 if (target->thread.regs == NULL)
294 return -EIO;
295
a71f5d5d
MW
296 if (!FULL_REGS(target->thread.regs)) {
297 /* We have a partial register set. Fill 14-31 with bogus values */
298 for (i = 14; i < 32; i++)
299 target->thread.regs->gpr[i] = NV_REG_POISON;
300 }
44dd3f50
RM
301
302 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
303 target->thread.regs,
304 0, offsetof(struct pt_regs, msr));
305 if (!ret) {
306 unsigned long msr = get_user_msr(target);
307 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
308 offsetof(struct pt_regs, msr),
309 offsetof(struct pt_regs, msr) +
310 sizeof(msr));
311 }
312
313 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
314 offsetof(struct pt_regs, msr) + sizeof(long));
315
316 if (!ret)
317 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
318 &target->thread.regs->orig_gpr3,
319 offsetof(struct pt_regs, orig_gpr3),
320 sizeof(struct pt_regs));
321 if (!ret)
322 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
323 sizeof(struct pt_regs), -1);
324
325 return ret;
326}
327
328static int gpr_set(struct task_struct *target, const struct user_regset *regset,
329 unsigned int pos, unsigned int count,
330 const void *kbuf, const void __user *ubuf)
331{
332 unsigned long reg;
333 int ret;
334
335 if (target->thread.regs == NULL)
336 return -EIO;
337
338 CHECK_FULL_REGS(target->thread.regs);
339
340 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
341 target->thread.regs,
342 0, PT_MSR * sizeof(reg));
343
344 if (!ret && count > 0) {
345 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
346 PT_MSR * sizeof(reg),
347 (PT_MSR + 1) * sizeof(reg));
348 if (!ret)
349 ret = set_user_msr(target, reg);
350 }
351
352 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
353 offsetof(struct pt_regs, msr) + sizeof(long));
354
355 if (!ret)
356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
357 &target->thread.regs->orig_gpr3,
358 PT_ORIG_R3 * sizeof(reg),
359 (PT_MAX_PUT_REG + 1) * sizeof(reg));
360
361 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
362 ret = user_regset_copyin_ignore(
363 &pos, &count, &kbuf, &ubuf,
364 (PT_MAX_PUT_REG + 1) * sizeof(reg),
365 PT_TRAP * sizeof(reg));
366
367 if (!ret && count > 0) {
368 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
369 PT_TRAP * sizeof(reg),
370 (PT_TRAP + 1) * sizeof(reg));
371 if (!ret)
372 ret = set_user_trap(target, reg);
373 }
374
375 if (!ret)
376 ret = user_regset_copyin_ignore(
377 &pos, &count, &kbuf, &ubuf,
378 (PT_TRAP + 1) * sizeof(reg), -1);
379
380 return ret;
381}
865418d8 382
1ec8549d
AK
383/*
384 * When the transaction is active, 'transact_fp' holds the current running
385 * value of all FPR registers and 'fp_state' holds the last checkpointed
386 * value of all FPR registers for the current transaction. When transaction
387 * is not active 'fp_state' holds the current running state of all the FPR
388 * registers. So this function which returns the current running values of
389 * all the FPR registers, needs to know whether any transaction is active
390 * or not.
391 *
392 * Userspace interface buffer layout:
393 *
394 * struct data {
395 * u64 fpr[32];
396 * u64 fpscr;
397 * };
398 *
399 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
400 * which determines the final code in this function. All the combinations of
401 * these two config options are possible except the one below as transactional
402 * memory config pulls in CONFIG_VSX automatically.
403 *
404 * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
405 */
f65255e8
RM
406static int fpr_get(struct task_struct *target, const struct user_regset *regset,
407 unsigned int pos, unsigned int count,
408 void *kbuf, void __user *ubuf)
409{
c6e6771b 410#ifdef CONFIG_VSX
de79f7b9 411 u64 buf[33];
c6e6771b
MN
412 int i;
413#endif
f65255e8
RM
414 flush_fp_to_thread(target);
415
1ec8549d
AK
416#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
417 /* copy to local buffer then write that out */
418 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
419 flush_altivec_to_thread(target);
420 flush_tmregs_to_thread(target);
421 for (i = 0; i < 32 ; i++)
422 buf[i] = target->thread.TS_TRANS_FPR(i);
423 buf[32] = target->thread.transact_fp.fpscr;
424 } else {
425 for (i = 0; i < 32 ; i++)
426 buf[i] = target->thread.TS_FPR(i);
427 buf[32] = target->thread.fp_state.fpscr;
428 }
429 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
430#endif
431
432#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
c6e6771b
MN
433 /* copy to local buffer then write that out */
434 for (i = 0; i < 32 ; i++)
435 buf[i] = target->thread.TS_FPR(i);
de79f7b9 436 buf[32] = target->thread.fp_state.fpscr;
c6e6771b 437 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1ec8549d 438#endif
c6e6771b 439
1ec8549d 440#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
de79f7b9 441 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
1e407ee3 442 offsetof(struct thread_fp_state, fpr[32]));
f65255e8
RM
443
444 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
de79f7b9 445 &target->thread.fp_state, 0, -1);
c6e6771b 446#endif
f65255e8
RM
447}
448
1ec8549d
AK
449/*
450 * When the transaction is active, 'transact_fp' holds the current running
451 * value of all FPR registers and 'fp_state' holds the last checkpointed
452 * value of all FPR registers for the current transaction. When transaction
453 * is not active 'fp_state' holds the current running state of all the FPR
454 * registers. So this function which setss the current running values of
455 * all the FPR registers, needs to know whether any transaction is active
456 * or not.
457 *
458 * Userspace interface buffer layout:
459 *
460 * struct data {
461 * u64 fpr[32];
462 * u64 fpscr;
463 * };
464 *
465 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
466 * which determines the final code in this function. All the combinations of
467 * these two config options are possible except the one below as transactional
468 * memory config pulls in CONFIG_VSX automatically.
469 *
470 * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
471 */
f65255e8
RM
472static int fpr_set(struct task_struct *target, const struct user_regset *regset,
473 unsigned int pos, unsigned int count,
474 const void *kbuf, const void __user *ubuf)
475{
c6e6771b 476#ifdef CONFIG_VSX
de79f7b9 477 u64 buf[33];
c6e6771b
MN
478 int i;
479#endif
f65255e8
RM
480 flush_fp_to_thread(target);
481
1ec8549d
AK
482#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
483 /* copy to local buffer then write that out */
484 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
485 if (i)
486 return i;
487
488 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
489 flush_altivec_to_thread(target);
490 flush_tmregs_to_thread(target);
491 for (i = 0; i < 32 ; i++)
492 target->thread.TS_TRANS_FPR(i) = buf[i];
493 target->thread.transact_fp.fpscr = buf[32];
494 } else {
495 for (i = 0; i < 32 ; i++)
496 target->thread.TS_FPR(i) = buf[i];
497 target->thread.fp_state.fpscr = buf[32];
498 }
499 return 0;
500#endif
501
502#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
c6e6771b
MN
503 /* copy to local buffer then write that out */
504 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
505 if (i)
506 return i;
507 for (i = 0; i < 32 ; i++)
508 target->thread.TS_FPR(i) = buf[i];
de79f7b9 509 target->thread.fp_state.fpscr = buf[32];
c6e6771b 510 return 0;
1ec8549d
AK
511#endif
512
513#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
de79f7b9 514 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
1e407ee3 515 offsetof(struct thread_fp_state, fpr[32]));
f65255e8
RM
516
517 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
de79f7b9 518 &target->thread.fp_state, 0, -1);
c6e6771b 519#endif
f65255e8
RM
520}
521
865418d8
BH
522#ifdef CONFIG_ALTIVEC
523/*
524 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
525 * The transfer totals 34 quadword. Quadwords 0-31 contain the
526 * corresponding vector registers. Quadword 32 contains the vscr as the
527 * last word (offset 12) within that quadword. Quadword 33 contains the
528 * vrsave as the first word (offset 0) within the quadword.
529 *
530 * This definition of the VMX state is compatible with the current PPC32
531 * ptrace interface. This allows signal handling and ptrace to use the
532 * same structures. This also simplifies the implementation of a bi-arch
533 * (combined (32- and 64-bit) gdb.
534 */
535
3caf06c6
RM
536static int vr_active(struct task_struct *target,
537 const struct user_regset *regset)
538{
539 flush_altivec_to_thread(target);
540 return target->thread.used_vr ? regset->n : 0;
541}
542
d844e279
AK
543/*
544 * When the transaction is active, 'transact_vr' holds the current running
545 * value of all the VMX registers and 'vr_state' holds the last checkpointed
546 * value of all the VMX registers for the current transaction to fall back
547 * on in case it aborts. When transaction is not active 'vr_state' holds
548 * the current running state of all the VMX registers. So this function which
549 * gets the current running values of all the VMX registers, needs to know
550 * whether any transaction is active or not.
551 *
552 * Userspace interface buffer layout:
553 *
554 * struct data {
555 * vector128 vr[32];
556 * vector128 vscr;
557 * vector128 vrsave;
558 * };
559 */
3caf06c6
RM
560static int vr_get(struct task_struct *target, const struct user_regset *regset,
561 unsigned int pos, unsigned int count,
562 void *kbuf, void __user *ubuf)
563{
d844e279 564 struct thread_vr_state *addr;
3caf06c6
RM
565 int ret;
566
567 flush_altivec_to_thread(target);
568
de79f7b9
PM
569 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
570 offsetof(struct thread_vr_state, vr[32]));
3caf06c6 571
d844e279
AK
572#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
573 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
574 flush_fp_to_thread(target);
575 flush_tmregs_to_thread(target);
576 addr = &target->thread.transact_vr;
577 } else {
578 addr = &target->thread.vr_state;
579 }
580#else
581 addr = &target->thread.vr_state;
582#endif
3caf06c6 583 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
d844e279 584 addr, 0,
3caf06c6
RM
585 33 * sizeof(vector128));
586 if (!ret) {
587 /*
588 * Copy out only the low-order word of vrsave.
589 */
590 union {
591 elf_vrreg_t reg;
592 u32 word;
593 } vrsave;
594 memset(&vrsave, 0, sizeof(vrsave));
d844e279
AK
595
596#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
597 if (MSR_TM_ACTIVE(target->thread.regs->msr))
598 vrsave.word = target->thread.transact_vrsave;
599 else
600 vrsave.word = target->thread.vrsave;
601#else
3caf06c6 602 vrsave.word = target->thread.vrsave;
d844e279
AK
603#endif
604
3caf06c6
RM
605 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
606 33 * sizeof(vector128), -1);
607 }
608
609 return ret;
610}
611
d844e279
AK
612/*
613 * When the transaction is active, 'transact_vr' holds the current running
614 * value of all the VMX registers and 'vr_state' holds the last checkpointed
615 * value of all the VMX registers for the current transaction to fall back
616 * on in case it aborts. When transaction is not active 'vr_state' holds
617 * the current running state of all the VMX registers. So this function which
618 * sets the current running values of all the VMX registers, needs to know
619 * whether any transaction is active or not.
620 *
621 * Userspace interface buffer layout:
622 *
623 * struct data {
624 * vector128 vr[32];
625 * vector128 vscr;
626 * vector128 vrsave;
627 * };
628 */
3caf06c6
RM
629static int vr_set(struct task_struct *target, const struct user_regset *regset,
630 unsigned int pos, unsigned int count,
631 const void *kbuf, const void __user *ubuf)
632{
d844e279 633 struct thread_vr_state *addr;
3caf06c6
RM
634 int ret;
635
636 flush_altivec_to_thread(target);
637
de79f7b9
PM
638 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
639 offsetof(struct thread_vr_state, vr[32]));
3caf06c6 640
d844e279
AK
641#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
642 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
643 flush_fp_to_thread(target);
644 flush_tmregs_to_thread(target);
645 addr = &target->thread.transact_vr;
646 } else {
647 addr = &target->thread.vr_state;
648 }
649#else
650 addr = &target->thread.vr_state;
651#endif
3caf06c6 652 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
d844e279 653 addr, 0,
de79f7b9 654 33 * sizeof(vector128));
3caf06c6
RM
655 if (!ret && count > 0) {
656 /*
657 * We use only the first word of vrsave.
658 */
659 union {
660 elf_vrreg_t reg;
661 u32 word;
662 } vrsave;
663 memset(&vrsave, 0, sizeof(vrsave));
d844e279
AK
664
665#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
666 if (MSR_TM_ACTIVE(target->thread.regs->msr))
667 vrsave.word = target->thread.transact_vrsave;
668 else
669 vrsave.word = target->thread.vrsave;
670#else
3caf06c6 671 vrsave.word = target->thread.vrsave;
d844e279 672#endif
3caf06c6
RM
673 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
674 33 * sizeof(vector128), -1);
d844e279
AK
675 if (!ret) {
676
677#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
678 if (MSR_TM_ACTIVE(target->thread.regs->msr))
679 target->thread.transact_vrsave = vrsave.word;
680 else
681 target->thread.vrsave = vrsave.word;
682#else
3caf06c6 683 target->thread.vrsave = vrsave.word;
d844e279
AK
684#endif
685 }
3caf06c6
RM
686 }
687
688 return ret;
689}
865418d8
BH
690#endif /* CONFIG_ALTIVEC */
691
ce48b210
MN
692#ifdef CONFIG_VSX
693/*
694 * Currently to set and and get all the vsx state, you need to call
25985edc 695 * the fp and VMX calls as well. This only get/sets the lower 32
ce48b210
MN
696 * 128bit VSX registers.
697 */
698
699static int vsr_active(struct task_struct *target,
700 const struct user_regset *regset)
701{
702 flush_vsx_to_thread(target);
703 return target->thread.used_vsr ? regset->n : 0;
704}
705
94b7d361
AK
706/*
707 * When the transaction is active, 'transact_fp' holds the current running
708 * value of all FPR registers and 'fp_state' holds the last checkpointed
709 * value of all FPR registers for the current transaction. When transaction
710 * is not active 'fp_state' holds the current running state of all the FPR
711 * registers. So this function which returns the current running values of
712 * all the FPR registers, needs to know whether any transaction is active
713 * or not.
714 *
715 * Userspace interface buffer layout:
716 *
717 * struct data {
718 * u64 vsx[32];
719 * };
720 */
ce48b210
MN
721static int vsr_get(struct task_struct *target, const struct user_regset *regset,
722 unsigned int pos, unsigned int count,
723 void *kbuf, void __user *ubuf)
724{
de79f7b9 725 u64 buf[32];
f3e909c2 726 int ret, i;
ce48b210 727
94b7d361
AK
728#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
729 flush_fp_to_thread(target);
730 flush_altivec_to_thread(target);
731 flush_tmregs_to_thread(target);
732#endif
ce48b210
MN
733 flush_vsx_to_thread(target);
734
94b7d361
AK
735#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
736 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
737 for (i = 0; i < 32 ; i++)
738 buf[i] = target->thread.
739 transact_fp.fpr[i][TS_VSRLOWOFFSET];
740 } else {
741 for (i = 0; i < 32 ; i++)
742 buf[i] = target->thread.
743 fp_state.fpr[i][TS_VSRLOWOFFSET];
744 }
745#else
f3e909c2 746 for (i = 0; i < 32 ; i++)
de79f7b9 747 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
94b7d361 748#endif
ce48b210 749 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
f3e909c2 750 buf, 0, 32 * sizeof(double));
ce48b210
MN
751
752 return ret;
753}
754
94b7d361
AK
755/*
756 * When the transaction is active, 'transact_fp' holds the current running
757 * value of all FPR registers and 'fp_state' holds the last checkpointed
758 * value of all FPR registers for the current transaction. When transaction
759 * is not active 'fp_state' holds the current running state of all the FPR
760 * registers. So this function which sets the current running values of all
761 * the FPR registers, needs to know whether any transaction is active or not.
762 *
763 * Userspace interface buffer layout:
764 *
765 * struct data {
766 * u64 vsx[32];
767 * };
768 */
ce48b210
MN
769static int vsr_set(struct task_struct *target, const struct user_regset *regset,
770 unsigned int pos, unsigned int count,
771 const void *kbuf, const void __user *ubuf)
772{
de79f7b9 773 u64 buf[32];
f3e909c2 774 int ret,i;
ce48b210 775
94b7d361
AK
776#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
777 flush_fp_to_thread(target);
778 flush_altivec_to_thread(target);
779 flush_tmregs_to_thread(target);
780#endif
ce48b210
MN
781 flush_vsx_to_thread(target);
782
783 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
f3e909c2 784 buf, 0, 32 * sizeof(double));
94b7d361
AK
785
786#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
787 if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
788 for (i = 0; i < 32 ; i++)
789 target->thread.transact_fp.
790 fpr[i][TS_VSRLOWOFFSET] = buf[i];
791 } else {
792 for (i = 0; i < 32 ; i++)
793 target->thread.fp_state.
794 fpr[i][TS_VSRLOWOFFSET] = buf[i];
795 }
796#else
f3e909c2 797 for (i = 0; i < 32 ; i++)
de79f7b9 798 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
94b7d361 799#endif
f3e909c2 800
ce48b210
MN
801
802 return ret;
803}
804#endif /* CONFIG_VSX */
805
865418d8
BH
806#ifdef CONFIG_SPE
807
808/*
809 * For get_evrregs/set_evrregs functions 'data' has the following layout:
810 *
811 * struct {
812 * u32 evr[32];
813 * u64 acc;
814 * u32 spefscr;
815 * }
816 */
817
a4e4b175
RM
818static int evr_active(struct task_struct *target,
819 const struct user_regset *regset)
865418d8 820{
a4e4b175
RM
821 flush_spe_to_thread(target);
822 return target->thread.used_spe ? regset->n : 0;
823}
865418d8 824
a4e4b175
RM
825static int evr_get(struct task_struct *target, const struct user_regset *regset,
826 unsigned int pos, unsigned int count,
827 void *kbuf, void __user *ubuf)
828{
829 int ret;
865418d8 830
a4e4b175 831 flush_spe_to_thread(target);
865418d8 832
a4e4b175
RM
833 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
834 &target->thread.evr,
835 0, sizeof(target->thread.evr));
865418d8 836
a4e4b175
RM
837 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
838 offsetof(struct thread_struct, spefscr));
839
840 if (!ret)
841 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
842 &target->thread.acc,
843 sizeof(target->thread.evr), -1);
844
845 return ret;
846}
847
848static int evr_set(struct task_struct *target, const struct user_regset *regset,
849 unsigned int pos, unsigned int count,
850 const void *kbuf, const void __user *ubuf)
851{
852 int ret;
853
854 flush_spe_to_thread(target);
855
856 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
857 &target->thread.evr,
858 0, sizeof(target->thread.evr));
865418d8 859
a4e4b175
RM
860 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
861 offsetof(struct thread_struct, spefscr));
862
863 if (!ret)
864 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
865 &target->thread.acc,
866 sizeof(target->thread.evr), -1);
867
868 return ret;
865418d8 869}
865418d8
BH
870#endif /* CONFIG_SPE */
871
25847fb1
AK
872#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
873/**
874 * tm_cgpr_active - get active number of registers in CGPR
875 * @target: The target task.
876 * @regset: The user regset structure.
877 *
878 * This function checks for the active number of available
879 * regisers in transaction checkpointed GPR category.
880 */
881static int tm_cgpr_active(struct task_struct *target,
882 const struct user_regset *regset)
883{
884 if (!cpu_has_feature(CPU_FTR_TM))
885 return -ENODEV;
886
887 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
888 return 0;
889
890 return regset->n;
891}
892
893/**
894 * tm_cgpr_get - get CGPR registers
895 * @target: The target task.
896 * @regset: The user regset structure.
897 * @pos: The buffer position.
898 * @count: Number of bytes to copy.
899 * @kbuf: Kernel buffer to copy from.
900 * @ubuf: User buffer to copy into.
901 *
902 * This function gets transaction checkpointed GPR registers.
903 *
904 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
905 * GPR register values for the current transaction to fall back on if it
906 * aborts in between. This function gets those checkpointed GPR registers.
907 * The userspace interface buffer layout is as follows.
908 *
909 * struct data {
910 * struct pt_regs ckpt_regs;
911 * };
912 */
913static int tm_cgpr_get(struct task_struct *target,
914 const struct user_regset *regset,
915 unsigned int pos, unsigned int count,
916 void *kbuf, void __user *ubuf)
917{
918 int ret;
919
920 if (!cpu_has_feature(CPU_FTR_TM))
921 return -ENODEV;
922
923 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
924 return -ENODATA;
925
926 flush_fp_to_thread(target);
927 flush_altivec_to_thread(target);
928 flush_tmregs_to_thread(target);
929
930 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
931 &target->thread.ckpt_regs,
932 0, offsetof(struct pt_regs, msr));
933 if (!ret) {
934 unsigned long msr = get_user_ckpt_msr(target);
935
936 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
937 offsetof(struct pt_regs, msr),
938 offsetof(struct pt_regs, msr) +
939 sizeof(msr));
940 }
941
942 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
943 offsetof(struct pt_regs, msr) + sizeof(long));
944
945 if (!ret)
946 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
947 &target->thread.ckpt_regs.orig_gpr3,
948 offsetof(struct pt_regs, orig_gpr3),
949 sizeof(struct pt_regs));
950 if (!ret)
951 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
952 sizeof(struct pt_regs), -1);
953
954 return ret;
955}
956
957/*
958 * tm_cgpr_set - set the CGPR registers
959 * @target: The target task.
960 * @regset: The user regset structure.
961 * @pos: The buffer position.
962 * @count: Number of bytes to copy.
963 * @kbuf: Kernel buffer to copy into.
964 * @ubuf: User buffer to copy from.
965 *
966 * This function sets in transaction checkpointed GPR registers.
967 *
968 * When the transaction is active, 'ckpt_regs' holds the checkpointed
969 * GPR register values for the current transaction to fall back on if it
970 * aborts in between. This function sets those checkpointed GPR registers.
971 * The userspace interface buffer layout is as follows.
972 *
973 * struct data {
974 * struct pt_regs ckpt_regs;
975 * };
976 */
977static int tm_cgpr_set(struct task_struct *target,
978 const struct user_regset *regset,
979 unsigned int pos, unsigned int count,
980 const void *kbuf, const void __user *ubuf)
981{
982 unsigned long reg;
983 int ret;
984
985 if (!cpu_has_feature(CPU_FTR_TM))
986 return -ENODEV;
987
988 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
989 return -ENODATA;
990
991 flush_fp_to_thread(target);
992 flush_altivec_to_thread(target);
993 flush_tmregs_to_thread(target);
994
995 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
996 &target->thread.ckpt_regs,
997 0, PT_MSR * sizeof(reg));
998
999 if (!ret && count > 0) {
1000 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1001 PT_MSR * sizeof(reg),
1002 (PT_MSR + 1) * sizeof(reg));
1003 if (!ret)
1004 ret = set_user_ckpt_msr(target, reg);
1005 }
1006
1007 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
1008 offsetof(struct pt_regs, msr) + sizeof(long));
1009
1010 if (!ret)
1011 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1012 &target->thread.ckpt_regs.orig_gpr3,
1013 PT_ORIG_R3 * sizeof(reg),
1014 (PT_MAX_PUT_REG + 1) * sizeof(reg));
1015
1016 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
1017 ret = user_regset_copyin_ignore(
1018 &pos, &count, &kbuf, &ubuf,
1019 (PT_MAX_PUT_REG + 1) * sizeof(reg),
1020 PT_TRAP * sizeof(reg));
1021
1022 if (!ret && count > 0) {
1023 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1024 PT_TRAP * sizeof(reg),
1025 (PT_TRAP + 1) * sizeof(reg));
1026 if (!ret)
1027 ret = set_user_ckpt_trap(target, reg);
1028 }
1029
1030 if (!ret)
1031 ret = user_regset_copyin_ignore(
1032 &pos, &count, &kbuf, &ubuf,
1033 (PT_TRAP + 1) * sizeof(reg), -1);
1034
1035 return ret;
1036}
19cbcbf7
AK
1037
1038/**
1039 * tm_cfpr_active - get active number of registers in CFPR
1040 * @target: The target task.
1041 * @regset: The user regset structure.
1042 *
1043 * This function checks for the active number of available
1044 * regisers in transaction checkpointed FPR category.
1045 */
1046static int tm_cfpr_active(struct task_struct *target,
1047 const struct user_regset *regset)
1048{
1049 if (!cpu_has_feature(CPU_FTR_TM))
1050 return -ENODEV;
1051
1052 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1053 return 0;
1054
1055 return regset->n;
1056}
1057
1058/**
1059 * tm_cfpr_get - get CFPR registers
1060 * @target: The target task.
1061 * @regset: The user regset structure.
1062 * @pos: The buffer position.
1063 * @count: Number of bytes to copy.
1064 * @kbuf: Kernel buffer to copy from.
1065 * @ubuf: User buffer to copy into.
1066 *
1067 * This function gets in transaction checkpointed FPR registers.
1068 *
1069 * When the transaction is active 'fp_state' holds the checkpointed
1070 * values for the current transaction to fall back on if it aborts
1071 * in between. This function gets those checkpointed FPR registers.
1072 * The userspace interface buffer layout is as follows.
1073 *
1074 * struct data {
1075 * u64 fpr[32];
1076 * u64 fpscr;
1077 *};
1078 */
1079static int tm_cfpr_get(struct task_struct *target,
1080 const struct user_regset *regset,
1081 unsigned int pos, unsigned int count,
1082 void *kbuf, void __user *ubuf)
1083{
1084 u64 buf[33];
1085 int i;
1086
1087 if (!cpu_has_feature(CPU_FTR_TM))
1088 return -ENODEV;
1089
1090 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1091 return -ENODATA;
1092
1093 flush_fp_to_thread(target);
1094 flush_altivec_to_thread(target);
1095 flush_tmregs_to_thread(target);
1096
1097 /* copy to local buffer then write that out */
1098 for (i = 0; i < 32 ; i++)
1099 buf[i] = target->thread.TS_FPR(i);
1100 buf[32] = target->thread.fp_state.fpscr;
1101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1102}
1103
1104/**
1105 * tm_cfpr_set - set CFPR registers
1106 * @target: The target task.
1107 * @regset: The user regset structure.
1108 * @pos: The buffer position.
1109 * @count: Number of bytes to copy.
1110 * @kbuf: Kernel buffer to copy into.
1111 * @ubuf: User buffer to copy from.
1112 *
1113 * This function sets in transaction checkpointed FPR registers.
1114 *
1115 * When the transaction is active 'fp_state' holds the checkpointed
1116 * FPR register values for the current transaction to fall back on
1117 * if it aborts in between. This function sets these checkpointed
1118 * FPR registers. The userspace interface buffer layout is as follows.
1119 *
1120 * struct data {
1121 * u64 fpr[32];
1122 * u64 fpscr;
1123 *};
1124 */
1125static int tm_cfpr_set(struct task_struct *target,
1126 const struct user_regset *regset,
1127 unsigned int pos, unsigned int count,
1128 const void *kbuf, const void __user *ubuf)
1129{
1130 u64 buf[33];
1131 int i;
1132
1133 if (!cpu_has_feature(CPU_FTR_TM))
1134 return -ENODEV;
1135
1136 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1137 return -ENODATA;
1138
1139 flush_fp_to_thread(target);
1140 flush_altivec_to_thread(target);
1141 flush_tmregs_to_thread(target);
1142
1143 /* copy to local buffer then write that out */
1144 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1145 if (i)
1146 return i;
1147 for (i = 0; i < 32 ; i++)
1148 target->thread.TS_FPR(i) = buf[i];
1149 target->thread.fp_state.fpscr = buf[32];
1150 return 0;
1151}
8c13f599
AK
1152
1153/**
1154 * tm_cvmx_active - get active number of registers in CVMX
1155 * @target: The target task.
1156 * @regset: The user regset structure.
1157 *
1158 * This function checks for the active number of available
1159 * regisers in checkpointed VMX category.
1160 */
1161static int tm_cvmx_active(struct task_struct *target,
1162 const struct user_regset *regset)
1163{
1164 if (!cpu_has_feature(CPU_FTR_TM))
1165 return -ENODEV;
1166
1167 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1168 return 0;
1169
1170 return regset->n;
1171}
1172
1173/**
1174 * tm_cvmx_get - get CMVX registers
1175 * @target: The target task.
1176 * @regset: The user regset structure.
1177 * @pos: The buffer position.
1178 * @count: Number of bytes to copy.
1179 * @kbuf: Kernel buffer to copy from.
1180 * @ubuf: User buffer to copy into.
1181 *
1182 * This function gets in transaction checkpointed VMX registers.
1183 *
1184 * When the transaction is active 'vr_state' and 'vr_save' hold
1185 * the checkpointed values for the current transaction to fall
1186 * back on if it aborts in between. The userspace interface buffer
1187 * layout is as follows.
1188 *
1189 * struct data {
1190 * vector128 vr[32];
1191 * vector128 vscr;
1192 * vector128 vrsave;
1193 *};
1194 */
1195static int tm_cvmx_get(struct task_struct *target,
1196 const struct user_regset *regset,
1197 unsigned int pos, unsigned int count,
1198 void *kbuf, void __user *ubuf)
1199{
1200 int ret;
1201
1202 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1203
1204 if (!cpu_has_feature(CPU_FTR_TM))
1205 return -ENODEV;
1206
1207 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1208 return -ENODATA;
1209
1210 /* Flush the state */
1211 flush_fp_to_thread(target);
1212 flush_altivec_to_thread(target);
1213 flush_tmregs_to_thread(target);
1214
1215 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1216 &target->thread.vr_state, 0,
1217 33 * sizeof(vector128));
1218 if (!ret) {
1219 /*
1220 * Copy out only the low-order word of vrsave.
1221 */
1222 union {
1223 elf_vrreg_t reg;
1224 u32 word;
1225 } vrsave;
1226 memset(&vrsave, 0, sizeof(vrsave));
1227 vrsave.word = target->thread.vrsave;
1228 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1229 33 * sizeof(vector128), -1);
1230 }
1231
1232 return ret;
1233}
1234
1235/**
1236 * tm_cvmx_set - set CMVX registers
1237 * @target: The target task.
1238 * @regset: The user regset structure.
1239 * @pos: The buffer position.
1240 * @count: Number of bytes to copy.
1241 * @kbuf: Kernel buffer to copy into.
1242 * @ubuf: User buffer to copy from.
1243 *
1244 * This function sets in transaction checkpointed VMX registers.
1245 *
1246 * When the transaction is active 'vr_state' and 'vr_save' hold
1247 * the checkpointed values for the current transaction to fall
1248 * back on if it aborts in between. The userspace interface buffer
1249 * layout is as follows.
1250 *
1251 * struct data {
1252 * vector128 vr[32];
1253 * vector128 vscr;
1254 * vector128 vrsave;
1255 *};
1256 */
1257static int tm_cvmx_set(struct task_struct *target,
1258 const struct user_regset *regset,
1259 unsigned int pos, unsigned int count,
1260 const void *kbuf, const void __user *ubuf)
1261{
1262 int ret;
1263
1264 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1265
1266 if (!cpu_has_feature(CPU_FTR_TM))
1267 return -ENODEV;
1268
1269 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1270 return -ENODATA;
1271
1272 flush_fp_to_thread(target);
1273 flush_altivec_to_thread(target);
1274 flush_tmregs_to_thread(target);
1275
1276 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1277 &target->thread.vr_state, 0,
1278 33 * sizeof(vector128));
1279 if (!ret && count > 0) {
1280 /*
1281 * We use only the low-order word of vrsave.
1282 */
1283 union {
1284 elf_vrreg_t reg;
1285 u32 word;
1286 } vrsave;
1287 memset(&vrsave, 0, sizeof(vrsave));
1288 vrsave.word = target->thread.vrsave;
1289 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1290 33 * sizeof(vector128), -1);
1291 if (!ret)
1292 target->thread.vrsave = vrsave.word;
1293 }
1294
1295 return ret;
1296}
25847fb1 1297#endif
865418d8 1298
80fdf470
RM
1299/*
1300 * These are our native regset flavors.
1301 */
1302enum powerpc_regset {
1303 REGSET_GPR,
1304 REGSET_FPR,
1305#ifdef CONFIG_ALTIVEC
1306 REGSET_VMX,
1307#endif
ce48b210
MN
1308#ifdef CONFIG_VSX
1309 REGSET_VSX,
1310#endif
80fdf470
RM
1311#ifdef CONFIG_SPE
1312 REGSET_SPE,
1313#endif
25847fb1
AK
1314#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1315 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
19cbcbf7 1316 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
8c13f599 1317 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
25847fb1 1318#endif
80fdf470
RM
1319};
1320
1321static const struct user_regset native_regsets[] = {
1322 [REGSET_GPR] = {
1323 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1324 .size = sizeof(long), .align = sizeof(long),
1325 .get = gpr_get, .set = gpr_set
1326 },
1327 [REGSET_FPR] = {
1328 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1329 .size = sizeof(double), .align = sizeof(double),
1330 .get = fpr_get, .set = fpr_set
1331 },
1332#ifdef CONFIG_ALTIVEC
1333 [REGSET_VMX] = {
1334 .core_note_type = NT_PPC_VMX, .n = 34,
1335 .size = sizeof(vector128), .align = sizeof(vector128),
1336 .active = vr_active, .get = vr_get, .set = vr_set
1337 },
1338#endif
ce48b210
MN
1339#ifdef CONFIG_VSX
1340 [REGSET_VSX] = {
f3e909c2
MN
1341 .core_note_type = NT_PPC_VSX, .n = 32,
1342 .size = sizeof(double), .align = sizeof(double),
ce48b210
MN
1343 .active = vsr_active, .get = vsr_get, .set = vsr_set
1344 },
1345#endif
80fdf470
RM
1346#ifdef CONFIG_SPE
1347 [REGSET_SPE] = {
a0b38b4e 1348 .core_note_type = NT_PPC_SPE, .n = 35,
80fdf470
RM
1349 .size = sizeof(u32), .align = sizeof(u32),
1350 .active = evr_active, .get = evr_get, .set = evr_set
1351 },
1352#endif
25847fb1
AK
1353#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1354 [REGSET_TM_CGPR] = {
1355 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1356 .size = sizeof(long), .align = sizeof(long),
1357 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1358 },
19cbcbf7
AK
1359 [REGSET_TM_CFPR] = {
1360 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1361 .size = sizeof(double), .align = sizeof(double),
1362 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1363 },
8c13f599
AK
1364 [REGSET_TM_CVMX] = {
1365 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1366 .size = sizeof(vector128), .align = sizeof(vector128),
1367 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1368 },
25847fb1 1369#endif
80fdf470
RM
1370};
1371
1372static const struct user_regset_view user_ppc_native_view = {
1373 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1374 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1375};
1376
fa8f5cb0
RM
1377#ifdef CONFIG_PPC64
1378#include <linux/compat.h>
1379
04fcadce 1380static int gpr32_get_common(struct task_struct *target,
fa8f5cb0
RM
1381 const struct user_regset *regset,
1382 unsigned int pos, unsigned int count,
04fcadce 1383 void *kbuf, void __user *ubuf, bool tm_active)
fa8f5cb0
RM
1384{
1385 const unsigned long *regs = &target->thread.regs->gpr[0];
04fcadce 1386 const unsigned long *ckpt_regs;
fa8f5cb0
RM
1387 compat_ulong_t *k = kbuf;
1388 compat_ulong_t __user *u = ubuf;
1389 compat_ulong_t reg;
a71f5d5d 1390 int i;
fa8f5cb0 1391
04fcadce
AK
1392#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1393 ckpt_regs = &target->thread.ckpt_regs.gpr[0];
1394#endif
1395 if (tm_active) {
1396 regs = ckpt_regs;
1397 } else {
1398 if (target->thread.regs == NULL)
1399 return -EIO;
1400
1401 if (!FULL_REGS(target->thread.regs)) {
1402 /*
1403 * We have a partial register set.
1404 * Fill 14-31 with bogus values.
1405 */
1406 for (i = 14; i < 32; i++)
1407 target->thread.regs->gpr[i] = NV_REG_POISON;
1408 }
a71f5d5d 1409 }
fa8f5cb0
RM
1410
1411 pos /= sizeof(reg);
1412 count /= sizeof(reg);
1413
1414 if (kbuf)
1415 for (; count > 0 && pos < PT_MSR; --count)
1416 *k++ = regs[pos++];
1417 else
1418 for (; count > 0 && pos < PT_MSR; --count)
1419 if (__put_user((compat_ulong_t) regs[pos++], u++))
1420 return -EFAULT;
1421
1422 if (count > 0 && pos == PT_MSR) {
1423 reg = get_user_msr(target);
1424 if (kbuf)
1425 *k++ = reg;
1426 else if (__put_user(reg, u++))
1427 return -EFAULT;
1428 ++pos;
1429 --count;
1430 }
1431
1432 if (kbuf)
1433 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1434 *k++ = regs[pos++];
1435 else
1436 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1437 if (__put_user((compat_ulong_t) regs[pos++], u++))
1438 return -EFAULT;
1439
1440 kbuf = k;
1441 ubuf = u;
1442 pos *= sizeof(reg);
1443 count *= sizeof(reg);
1444 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1445 PT_REGS_COUNT * sizeof(reg), -1);
1446}
1447
04fcadce 1448static int gpr32_set_common(struct task_struct *target,
fa8f5cb0
RM
1449 const struct user_regset *regset,
1450 unsigned int pos, unsigned int count,
04fcadce 1451 const void *kbuf, const void __user *ubuf, bool tm_active)
fa8f5cb0
RM
1452{
1453 unsigned long *regs = &target->thread.regs->gpr[0];
04fcadce 1454 unsigned long *ckpt_regs;
fa8f5cb0
RM
1455 const compat_ulong_t *k = kbuf;
1456 const compat_ulong_t __user *u = ubuf;
1457 compat_ulong_t reg;
1458
04fcadce
AK
1459#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1460 ckpt_regs = &target->thread.ckpt_regs.gpr[0];
1461#endif
fa8f5cb0 1462
04fcadce
AK
1463 if (tm_active) {
1464 regs = ckpt_regs;
1465 } else {
1466 regs = &target->thread.regs->gpr[0];
1467
1468 if (target->thread.regs == NULL)
1469 return -EIO;
1470
1471 CHECK_FULL_REGS(target->thread.regs);
1472 }
fa8f5cb0
RM
1473
1474 pos /= sizeof(reg);
1475 count /= sizeof(reg);
1476
1477 if (kbuf)
1478 for (; count > 0 && pos < PT_MSR; --count)
1479 regs[pos++] = *k++;
1480 else
1481 for (; count > 0 && pos < PT_MSR; --count) {
1482 if (__get_user(reg, u++))
1483 return -EFAULT;
1484 regs[pos++] = reg;
1485 }
1486
1487
1488 if (count > 0 && pos == PT_MSR) {
1489 if (kbuf)
1490 reg = *k++;
1491 else if (__get_user(reg, u++))
1492 return -EFAULT;
1493 set_user_msr(target, reg);
1494 ++pos;
1495 --count;
1496 }
1497
c2372eb9 1498 if (kbuf) {
fa8f5cb0
RM
1499 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
1500 regs[pos++] = *k++;
c2372eb9
RM
1501 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
1502 ++k;
1503 } else {
fa8f5cb0
RM
1504 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
1505 if (__get_user(reg, u++))
1506 return -EFAULT;
1507 regs[pos++] = reg;
1508 }
c2372eb9
RM
1509 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
1510 if (__get_user(reg, u++))
1511 return -EFAULT;
1512 }
fa8f5cb0
RM
1513
1514 if (count > 0 && pos == PT_TRAP) {
1515 if (kbuf)
1516 reg = *k++;
1517 else if (__get_user(reg, u++))
1518 return -EFAULT;
1519 set_user_trap(target, reg);
1520 ++pos;
1521 --count;
1522 }
1523
1524 kbuf = k;
1525 ubuf = u;
1526 pos *= sizeof(reg);
1527 count *= sizeof(reg);
1528 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
1529 (PT_TRAP + 1) * sizeof(reg), -1);
1530}
1531
25847fb1
AK
1532#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1533static int tm_cgpr32_get(struct task_struct *target,
1534 const struct user_regset *regset,
1535 unsigned int pos, unsigned int count,
1536 void *kbuf, void __user *ubuf)
1537{
1538 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
1539}
1540
1541static int tm_cgpr32_set(struct task_struct *target,
1542 const struct user_regset *regset,
1543 unsigned int pos, unsigned int count,
1544 const void *kbuf, const void __user *ubuf)
1545{
1546 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
1547}
1548#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1549
04fcadce
AK
1550static int gpr32_get(struct task_struct *target,
1551 const struct user_regset *regset,
1552 unsigned int pos, unsigned int count,
1553 void *kbuf, void __user *ubuf)
1554{
1555 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
1556}
1557
1558static int gpr32_set(struct task_struct *target,
1559 const struct user_regset *regset,
1560 unsigned int pos, unsigned int count,
1561 const void *kbuf, const void __user *ubuf)
1562{
1563 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
1564}
1565
fa8f5cb0
RM
1566/*
1567 * These are the regset flavors matching the CONFIG_PPC32 native set.
1568 */
1569static const struct user_regset compat_regsets[] = {
1570 [REGSET_GPR] = {
1571 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1572 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
1573 .get = gpr32_get, .set = gpr32_set
1574 },
1575 [REGSET_FPR] = {
1576 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1577 .size = sizeof(double), .align = sizeof(double),
1578 .get = fpr_get, .set = fpr_set
1579 },
1580#ifdef CONFIG_ALTIVEC
1581 [REGSET_VMX] = {
1582 .core_note_type = NT_PPC_VMX, .n = 34,
1583 .size = sizeof(vector128), .align = sizeof(vector128),
1584 .active = vr_active, .get = vr_get, .set = vr_set
1585 },
1586#endif
1587#ifdef CONFIG_SPE
1588 [REGSET_SPE] = {
24f1a849 1589 .core_note_type = NT_PPC_SPE, .n = 35,
fa8f5cb0
RM
1590 .size = sizeof(u32), .align = sizeof(u32),
1591 .active = evr_active, .get = evr_get, .set = evr_set
1592 },
1593#endif
25847fb1
AK
1594#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1595 [REGSET_TM_CGPR] = {
1596 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1597 .size = sizeof(long), .align = sizeof(long),
1598 .active = tm_cgpr_active,
1599 .get = tm_cgpr32_get, .set = tm_cgpr32_set
1600 },
19cbcbf7
AK
1601 [REGSET_TM_CFPR] = {
1602 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1603 .size = sizeof(double), .align = sizeof(double),
1604 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1605 },
8c13f599
AK
1606 [REGSET_TM_CVMX] = {
1607 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1608 .size = sizeof(vector128), .align = sizeof(vector128),
1609 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1610 },
25847fb1 1611#endif
fa8f5cb0
RM
1612};
1613
1614static const struct user_regset_view user_ppc_compat_view = {
1615 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
1616 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
1617};
1618#endif /* CONFIG_PPC64 */
1619
80fdf470
RM
1620const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1621{
fa8f5cb0
RM
1622#ifdef CONFIG_PPC64
1623 if (test_tsk_thread_flag(task, TIF_32BIT))
1624 return &user_ppc_compat_view;
1625#endif
80fdf470
RM
1626 return &user_ppc_native_view;
1627}
1628
1629
2a84b0d7 1630void user_enable_single_step(struct task_struct *task)
865418d8
BH
1631{
1632 struct pt_regs *regs = task->thread.regs;
1633
1634 if (regs != NULL) {
172ae2e7 1635#ifdef CONFIG_PPC_ADV_DEBUG_REGS
51ae8d4a
BB
1636 task->thread.debug.dbcr0 &= ~DBCR0_BT;
1637 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
865418d8
BH
1638 regs->msr |= MSR_DE;
1639#else
ec097c84 1640 regs->msr &= ~MSR_BE;
865418d8
BH
1641 regs->msr |= MSR_SE;
1642#endif
1643 }
1644 set_tsk_thread_flag(task, TIF_SINGLESTEP);
1645}
1646
ec097c84
RM
1647void user_enable_block_step(struct task_struct *task)
1648{
1649 struct pt_regs *regs = task->thread.regs;
1650
1651 if (regs != NULL) {
172ae2e7 1652#ifdef CONFIG_PPC_ADV_DEBUG_REGS
51ae8d4a
BB
1653 task->thread.debug.dbcr0 &= ~DBCR0_IC;
1654 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
ec097c84
RM
1655 regs->msr |= MSR_DE;
1656#else
1657 regs->msr &= ~MSR_SE;
1658 regs->msr |= MSR_BE;
1659#endif
1660 }
1661 set_tsk_thread_flag(task, TIF_SINGLESTEP);
1662}
1663
2a84b0d7 1664void user_disable_single_step(struct task_struct *task)
865418d8
BH
1665{
1666 struct pt_regs *regs = task->thread.regs;
1667
1668 if (regs != NULL) {
172ae2e7 1669#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3bffb652
DK
1670 /*
1671 * The logic to disable single stepping should be as
1672 * simple as turning off the Instruction Complete flag.
1673 * And, after doing so, if all debug flags are off, turn
1674 * off DBCR0(IDM) and MSR(DE) .... Torez
1675 */
682775b8 1676 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
3bffb652
DK
1677 /*
1678 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
1679 */
51ae8d4a
BB
1680 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1681 task->thread.debug.dbcr1)) {
3bffb652
DK
1682 /*
1683 * All debug events were off.....
1684 */
51ae8d4a 1685 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
28477fb1
DK
1686 regs->msr &= ~MSR_DE;
1687 }
865418d8 1688#else
ec097c84 1689 regs->msr &= ~(MSR_SE | MSR_BE);
865418d8
BH
1690#endif
1691 }
1692 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1693}
1694
5aae8a53 1695#ifdef CONFIG_HAVE_HW_BREAKPOINT
a8b0ca17 1696void ptrace_triggered(struct perf_event *bp,
5aae8a53
P
1697 struct perf_sample_data *data, struct pt_regs *regs)
1698{
1699 struct perf_event_attr attr;
1700
1701 /*
1702 * Disable the breakpoint request here since ptrace has defined a
1703 * one-shot behaviour for breakpoint exceptions in PPC64.
1704 * The SIGTRAP signal is generated automatically for us in do_dabr().
1705 * We don't have to do anything about that here
1706 */
1707 attr = bp->attr;
1708 attr.disabled = true;
1709 modify_user_hw_breakpoint(bp, &attr);
1710}
1711#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1712
e51df2c1 1713static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
abd06505
BH
1714 unsigned long data)
1715{
5aae8a53
P
1716#ifdef CONFIG_HAVE_HW_BREAKPOINT
1717 int ret;
1718 struct thread_struct *thread = &(task->thread);
1719 struct perf_event *bp;
1720 struct perf_event_attr attr;
1721#endif /* CONFIG_HAVE_HW_BREAKPOINT */
9422de3e
MN
1722#ifndef CONFIG_PPC_ADV_DEBUG_REGS
1723 struct arch_hw_breakpoint hw_brk;
1724#endif
5aae8a53 1725
d6a61bfc
LM
1726 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
1727 * For embedded processors we support one DAC and no IAC's at the
1728 * moment.
1729 */
abd06505
BH
1730 if (addr > 0)
1731 return -EINVAL;
1732
2325f0a0 1733 /* The bottom 3 bits in dabr are flags */
abd06505
BH
1734 if ((data & ~0x7UL) >= TASK_SIZE)
1735 return -EIO;
1736
172ae2e7 1737#ifndef CONFIG_PPC_ADV_DEBUG_REGS
d6a61bfc
LM
1738 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
1739 * It was assumed, on previous implementations, that 3 bits were
1740 * passed together with the data address, fitting the design of the
1741 * DABR register, as follows:
1742 *
1743 * bit 0: Read flag
1744 * bit 1: Write flag
1745 * bit 2: Breakpoint translation
1746 *
1747 * Thus, we use them here as so.
1748 */
1749
1750 /* Ensure breakpoint translation bit is set */
9422de3e 1751 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
abd06505 1752 return -EIO;
9422de3e
MN
1753 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
1754 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
1755 hw_brk.len = 8;
5aae8a53
P
1756#ifdef CONFIG_HAVE_HW_BREAKPOINT
1757 bp = thread->ptrace_bps[0];
9422de3e 1758 if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
5aae8a53
P
1759 if (bp) {
1760 unregister_hw_breakpoint(bp);
1761 thread->ptrace_bps[0] = NULL;
1762 }
1763 return 0;
1764 }
1765 if (bp) {
1766 attr = bp->attr;
9422de3e
MN
1767 attr.bp_addr = hw_brk.address;
1768 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
a53fd61a
AP
1769
1770 /* Enable breakpoint */
1771 attr.disabled = false;
1772
5aae8a53 1773 ret = modify_user_hw_breakpoint(bp, &attr);
925f83c0 1774 if (ret) {
5aae8a53 1775 return ret;
925f83c0 1776 }
5aae8a53 1777 thread->ptrace_bps[0] = bp;
9422de3e 1778 thread->hw_brk = hw_brk;
5aae8a53
P
1779 return 0;
1780 }
1781
1782 /* Create a new breakpoint request if one doesn't exist already */
1783 hw_breakpoint_init(&attr);
9422de3e
MN
1784 attr.bp_addr = hw_brk.address;
1785 arch_bp_generic_fields(hw_brk.type,
1786 &attr.bp_type);
5aae8a53
P
1787
1788 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
4dc0da86 1789 ptrace_triggered, NULL, task);
5aae8a53
P
1790 if (IS_ERR(bp)) {
1791 thread->ptrace_bps[0] = NULL;
1792 return PTR_ERR(bp);
1793 }
1794
1795#endif /* CONFIG_HAVE_HW_BREAKPOINT */
9422de3e 1796 task->thread.hw_brk = hw_brk;
172ae2e7 1797#else /* CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc
LM
1798 /* As described above, it was assumed 3 bits were passed with the data
1799 * address, but we will assume only the mode bits will be passed
1800 * as to not cause alignment restrictions for DAC-based processors.
1801 */
1802
1803 /* DAC's hold the whole address without any mode flags */
51ae8d4a 1804 task->thread.debug.dac1 = data & ~0x3UL;
3bffb652 1805
51ae8d4a 1806 if (task->thread.debug.dac1 == 0) {
3bffb652 1807 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
51ae8d4a
BB
1808 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1809 task->thread.debug.dbcr1)) {
3bffb652 1810 task->thread.regs->msr &= ~MSR_DE;
51ae8d4a 1811 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
3bffb652 1812 }
d6a61bfc
LM
1813 return 0;
1814 }
1815
1816 /* Read or Write bits must be set */
1817
1818 if (!(data & 0x3UL))
1819 return -EINVAL;
1820
1821 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1822 register */
51ae8d4a 1823 task->thread.debug.dbcr0 |= DBCR0_IDM;
d6a61bfc
LM
1824
1825 /* Check for write and read flags and set DBCR0
1826 accordingly */
3bffb652 1827 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
d6a61bfc 1828 if (data & 0x1UL)
3bffb652 1829 dbcr_dac(task) |= DBCR_DAC1R;
d6a61bfc 1830 if (data & 0x2UL)
3bffb652 1831 dbcr_dac(task) |= DBCR_DAC1W;
d6a61bfc 1832 task->thread.regs->msr |= MSR_DE;
172ae2e7 1833#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
abd06505
BH
1834 return 0;
1835}
abd06505 1836
1da177e4
LT
1837/*
1838 * Called by kernel/ptrace.c when detaching..
1839 *
1840 * Make sure single step bits etc are not set.
1841 */
1842void ptrace_disable(struct task_struct *child)
1843{
1844 /* make sure the single step bit is not set. */
2a84b0d7 1845 user_disable_single_step(child);
1da177e4
LT
1846}
1847
3bffb652 1848#ifdef CONFIG_PPC_ADV_DEBUG_REGS
84295dfc 1849static long set_instruction_bp(struct task_struct *child,
3bffb652
DK
1850 struct ppc_hw_breakpoint *bp_info)
1851{
1852 int slot;
51ae8d4a
BB
1853 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
1854 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
1855 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
1856 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
3bffb652
DK
1857
1858 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1859 slot2_in_use = 1;
1860 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1861 slot4_in_use = 1;
1862
1863 if (bp_info->addr >= TASK_SIZE)
1864 return -EIO;
1865
1866 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1867
1868 /* Make sure range is valid. */
1869 if (bp_info->addr2 >= TASK_SIZE)
1870 return -EIO;
1871
1872 /* We need a pair of IAC regsisters */
1873 if ((!slot1_in_use) && (!slot2_in_use)) {
1874 slot = 1;
51ae8d4a
BB
1875 child->thread.debug.iac1 = bp_info->addr;
1876 child->thread.debug.iac2 = bp_info->addr2;
1877 child->thread.debug.dbcr0 |= DBCR0_IAC1;
3bffb652
DK
1878 if (bp_info->addr_mode ==
1879 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1880 dbcr_iac_range(child) |= DBCR_IAC12X;
1881 else
1882 dbcr_iac_range(child) |= DBCR_IAC12I;
1883#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1884 } else if ((!slot3_in_use) && (!slot4_in_use)) {
1885 slot = 3;
51ae8d4a
BB
1886 child->thread.debug.iac3 = bp_info->addr;
1887 child->thread.debug.iac4 = bp_info->addr2;
1888 child->thread.debug.dbcr0 |= DBCR0_IAC3;
3bffb652
DK
1889 if (bp_info->addr_mode ==
1890 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1891 dbcr_iac_range(child) |= DBCR_IAC34X;
1892 else
1893 dbcr_iac_range(child) |= DBCR_IAC34I;
1894#endif
1895 } else
1896 return -ENOSPC;
1897 } else {
1898 /* We only need one. If possible leave a pair free in
1899 * case a range is needed later
1900 */
1901 if (!slot1_in_use) {
1902 /*
1903 * Don't use iac1 if iac1-iac2 are free and either
1904 * iac3 or iac4 (but not both) are free
1905 */
1906 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1907 slot = 1;
51ae8d4a
BB
1908 child->thread.debug.iac1 = bp_info->addr;
1909 child->thread.debug.dbcr0 |= DBCR0_IAC1;
3bffb652
DK
1910 goto out;
1911 }
1912 }
1913 if (!slot2_in_use) {
1914 slot = 2;
51ae8d4a
BB
1915 child->thread.debug.iac2 = bp_info->addr;
1916 child->thread.debug.dbcr0 |= DBCR0_IAC2;
3bffb652
DK
1917#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1918 } else if (!slot3_in_use) {
1919 slot = 3;
51ae8d4a
BB
1920 child->thread.debug.iac3 = bp_info->addr;
1921 child->thread.debug.dbcr0 |= DBCR0_IAC3;
3bffb652
DK
1922 } else if (!slot4_in_use) {
1923 slot = 4;
51ae8d4a
BB
1924 child->thread.debug.iac4 = bp_info->addr;
1925 child->thread.debug.dbcr0 |= DBCR0_IAC4;
3bffb652
DK
1926#endif
1927 } else
1928 return -ENOSPC;
1929 }
1930out:
51ae8d4a 1931 child->thread.debug.dbcr0 |= DBCR0_IDM;
3bffb652
DK
1932 child->thread.regs->msr |= MSR_DE;
1933
1934 return slot;
1935}
1936
1937static int del_instruction_bp(struct task_struct *child, int slot)
1938{
1939 switch (slot) {
1940 case 1:
51ae8d4a 1941 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
3bffb652
DK
1942 return -ENOENT;
1943
1944 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1945 /* address range - clear slots 1 & 2 */
51ae8d4a 1946 child->thread.debug.iac2 = 0;
3bffb652
DK
1947 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1948 }
51ae8d4a
BB
1949 child->thread.debug.iac1 = 0;
1950 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
3bffb652
DK
1951 break;
1952 case 2:
51ae8d4a 1953 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
3bffb652
DK
1954 return -ENOENT;
1955
1956 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1957 /* used in a range */
1958 return -EINVAL;
51ae8d4a
BB
1959 child->thread.debug.iac2 = 0;
1960 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
3bffb652
DK
1961 break;
1962#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1963 case 3:
51ae8d4a 1964 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
3bffb652
DK
1965 return -ENOENT;
1966
1967 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1968 /* address range - clear slots 3 & 4 */
51ae8d4a 1969 child->thread.debug.iac4 = 0;
3bffb652
DK
1970 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1971 }
51ae8d4a
BB
1972 child->thread.debug.iac3 = 0;
1973 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
3bffb652
DK
1974 break;
1975 case 4:
51ae8d4a 1976 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
3bffb652
DK
1977 return -ENOENT;
1978
1979 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1980 /* Used in a range */
1981 return -EINVAL;
51ae8d4a
BB
1982 child->thread.debug.iac4 = 0;
1983 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
3bffb652
DK
1984 break;
1985#endif
1986 default:
1987 return -EINVAL;
1988 }
1989 return 0;
1990}
1991
1992static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1993{
1994 int byte_enable =
1995 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
1996 & 0xf;
1997 int condition_mode =
1998 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1999 int slot;
2000
2001 if (byte_enable && (condition_mode == 0))
2002 return -EINVAL;
2003
2004 if (bp_info->addr >= TASK_SIZE)
2005 return -EIO;
2006
2007 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2008 slot = 1;
2009 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2010 dbcr_dac(child) |= DBCR_DAC1R;
2011 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2012 dbcr_dac(child) |= DBCR_DAC1W;
51ae8d4a 2013 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
3bffb652
DK
2014#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2015 if (byte_enable) {
51ae8d4a 2016 child->thread.debug.dvc1 =
3bffb652 2017 (unsigned long)bp_info->condition_value;
51ae8d4a 2018 child->thread.debug.dbcr2 |=
3bffb652
DK
2019 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2020 (condition_mode << DBCR2_DVC1M_SHIFT));
2021 }
2022#endif
2023#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
51ae8d4a 2024 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
3bffb652
DK
2025 /* Both dac1 and dac2 are part of a range */
2026 return -ENOSPC;
2027#endif
2028 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2029 slot = 2;
2030 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2031 dbcr_dac(child) |= DBCR_DAC2R;
2032 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2033 dbcr_dac(child) |= DBCR_DAC2W;
51ae8d4a 2034 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
3bffb652
DK
2035#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2036 if (byte_enable) {
51ae8d4a 2037 child->thread.debug.dvc2 =
3bffb652 2038 (unsigned long)bp_info->condition_value;
51ae8d4a 2039 child->thread.debug.dbcr2 |=
3bffb652
DK
2040 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2041 (condition_mode << DBCR2_DVC2M_SHIFT));
2042 }
2043#endif
2044 } else
2045 return -ENOSPC;
51ae8d4a 2046 child->thread.debug.dbcr0 |= DBCR0_IDM;
3bffb652
DK
2047 child->thread.regs->msr |= MSR_DE;
2048
2049 return slot + 4;
2050}
2051
2052static int del_dac(struct task_struct *child, int slot)
2053{
2054 if (slot == 1) {
30124d11 2055 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
3bffb652
DK
2056 return -ENOENT;
2057
51ae8d4a 2058 child->thread.debug.dac1 = 0;
3bffb652
DK
2059 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2060#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
51ae8d4a
BB
2061 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2062 child->thread.debug.dac2 = 0;
2063 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
3bffb652 2064 }
51ae8d4a 2065 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
3bffb652
DK
2066#endif
2067#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
51ae8d4a 2068 child->thread.debug.dvc1 = 0;
3bffb652
DK
2069#endif
2070 } else if (slot == 2) {
30124d11 2071 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
3bffb652
DK
2072 return -ENOENT;
2073
2074#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
51ae8d4a 2075 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
3bffb652
DK
2076 /* Part of a range */
2077 return -EINVAL;
51ae8d4a 2078 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
3bffb652
DK
2079#endif
2080#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
51ae8d4a 2081 child->thread.debug.dvc2 = 0;
3bffb652 2082#endif
51ae8d4a 2083 child->thread.debug.dac2 = 0;
3bffb652
DK
2084 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2085 } else
2086 return -EINVAL;
2087
2088 return 0;
2089}
2090#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2091
2092#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2093static int set_dac_range(struct task_struct *child,
2094 struct ppc_hw_breakpoint *bp_info)
2095{
2096 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2097
2098 /* We don't allow range watchpoints to be used with DVC */
2099 if (bp_info->condition_mode)
2100 return -EINVAL;
2101
2102 /*
2103 * Best effort to verify the address range. The user/supervisor bits
2104 * prevent trapping in kernel space, but let's fail on an obvious bad
2105 * range. The simple test on the mask is not fool-proof, and any
2106 * exclusive range will spill over into kernel space.
2107 */
2108 if (bp_info->addr >= TASK_SIZE)
2109 return -EIO;
2110 if (mode == PPC_BREAKPOINT_MODE_MASK) {
2111 /*
2112 * dac2 is a bitmask. Don't allow a mask that makes a
2113 * kernel space address from a valid dac1 value
2114 */
2115 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2116 return -EIO;
2117 } else {
2118 /*
2119 * For range breakpoints, addr2 must also be a valid address
2120 */
2121 if (bp_info->addr2 >= TASK_SIZE)
2122 return -EIO;
2123 }
2124
51ae8d4a 2125 if (child->thread.debug.dbcr0 &
3bffb652
DK
2126 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2127 return -ENOSPC;
2128
2129 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
51ae8d4a 2130 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
3bffb652 2131 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
51ae8d4a
BB
2132 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2133 child->thread.debug.dac1 = bp_info->addr;
2134 child->thread.debug.dac2 = bp_info->addr2;
3bffb652 2135 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
51ae8d4a 2136 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
3bffb652 2137 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
51ae8d4a 2138 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
3bffb652 2139 else /* PPC_BREAKPOINT_MODE_MASK */
51ae8d4a 2140 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
3bffb652
DK
2141 child->thread.regs->msr |= MSR_DE;
2142
2143 return 5;
2144}
2145#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2146
3162d92d
DK
2147static long ppc_set_hwdebug(struct task_struct *child,
2148 struct ppc_hw_breakpoint *bp_info)
2149{
6c7a2856
P
2150#ifdef CONFIG_HAVE_HW_BREAKPOINT
2151 int len = 0;
2152 struct thread_struct *thread = &(child->thread);
2153 struct perf_event *bp;
2154 struct perf_event_attr attr;
2155#endif /* CONFIG_HAVE_HW_BREAKPOINT */
4dfbf290 2156#ifndef CONFIG_PPC_ADV_DEBUG_REGS
9422de3e 2157 struct arch_hw_breakpoint brk;
4dfbf290
AS
2158#endif
2159
3bffb652
DK
2160 if (bp_info->version != 1)
2161 return -ENOTSUPP;
2162#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2163 /*
2164 * Check for invalid flags and combinations
2165 */
2166 if ((bp_info->trigger_type == 0) ||
2167 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2168 PPC_BREAKPOINT_TRIGGER_RW)) ||
2169 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2170 (bp_info->condition_mode &
2171 ~(PPC_BREAKPOINT_CONDITION_MODE |
2172 PPC_BREAKPOINT_CONDITION_BE_ALL)))
2173 return -EINVAL;
2174#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2175 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2176 return -EINVAL;
2177#endif
2178
2179 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2180 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2181 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2182 return -EINVAL;
84295dfc 2183 return set_instruction_bp(child, bp_info);
3bffb652
DK
2184 }
2185 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2186 return set_dac(child, bp_info);
2187
2188#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2189 return set_dac_range(child, bp_info);
2190#else
2191 return -EINVAL;
2192#endif
2193#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
3162d92d 2194 /*
3bffb652 2195 * We only support one data breakpoint
3162d92d 2196 */
4dfbf290
AS
2197 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2198 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
4dfbf290 2199 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
3162d92d
DK
2200 return -EINVAL;
2201
3162d92d
DK
2202 if ((unsigned long)bp_info->addr >= TASK_SIZE)
2203 return -EIO;
2204
9422de3e
MN
2205 brk.address = bp_info->addr & ~7UL;
2206 brk.type = HW_BRK_TYPE_TRANSLATE;
2bb78efa 2207 brk.len = 8;
4dfbf290 2208 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
9422de3e 2209 brk.type |= HW_BRK_TYPE_READ;
4dfbf290 2210 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
9422de3e 2211 brk.type |= HW_BRK_TYPE_WRITE;
6c7a2856 2212#ifdef CONFIG_HAVE_HW_BREAKPOINT
6c7a2856
P
2213 /*
2214 * Check if the request is for 'range' breakpoints. We can
2215 * support it if range < 8 bytes.
2216 */
6961ed96 2217 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
6c7a2856 2218 len = bp_info->addr2 - bp_info->addr;
6961ed96 2219 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
b0b0aa9c 2220 len = 1;
6961ed96 2221 else
6c7a2856 2222 return -EINVAL;
6c7a2856 2223 bp = thread->ptrace_bps[0];
6961ed96 2224 if (bp)
6c7a2856 2225 return -ENOSPC;
6c7a2856
P
2226
2227 /* Create a new breakpoint request if one doesn't exist already */
2228 hw_breakpoint_init(&attr);
2229 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2230 attr.bp_len = len;
9422de3e 2231 arch_bp_generic_fields(brk.type, &attr.bp_type);
6c7a2856
P
2232
2233 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2234 ptrace_triggered, NULL, child);
2235 if (IS_ERR(bp)) {
2236 thread->ptrace_bps[0] = NULL;
6c7a2856
P
2237 return PTR_ERR(bp);
2238 }
2239
6c7a2856
P
2240 return 1;
2241#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2242
2243 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2244 return -EINVAL;
2245
9422de3e 2246 if (child->thread.hw_brk.address)
6c7a2856 2247 return -ENOSPC;
4dfbf290 2248
9422de3e 2249 child->thread.hw_brk = brk;
3bffb652 2250
3162d92d 2251 return 1;
3bffb652 2252#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
3162d92d
DK
2253}
2254
ec1b33dc 2255static long ppc_del_hwdebug(struct task_struct *child, long data)
3162d92d 2256{
6c7a2856
P
2257#ifdef CONFIG_HAVE_HW_BREAKPOINT
2258 int ret = 0;
2259 struct thread_struct *thread = &(child->thread);
2260 struct perf_event *bp;
2261#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652
DK
2262#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2263 int rc;
2264
2265 if (data <= 4)
2266 rc = del_instruction_bp(child, (int)data);
2267 else
2268 rc = del_dac(child, (int)data - 4);
2269
2270 if (!rc) {
51ae8d4a
BB
2271 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2272 child->thread.debug.dbcr1)) {
2273 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
3bffb652
DK
2274 child->thread.regs->msr &= ~MSR_DE;
2275 }
2276 }
2277 return rc;
2278#else
3162d92d
DK
2279 if (data != 1)
2280 return -EINVAL;
6c7a2856
P
2281
2282#ifdef CONFIG_HAVE_HW_BREAKPOINT
6c7a2856
P
2283 bp = thread->ptrace_bps[0];
2284 if (bp) {
2285 unregister_hw_breakpoint(bp);
2286 thread->ptrace_bps[0] = NULL;
2287 } else
2288 ret = -ENOENT;
6c7a2856
P
2289 return ret;
2290#else /* CONFIG_HAVE_HW_BREAKPOINT */
9422de3e 2291 if (child->thread.hw_brk.address == 0)
3162d92d
DK
2292 return -ENOENT;
2293
9422de3e
MN
2294 child->thread.hw_brk.address = 0;
2295 child->thread.hw_brk.type = 0;
6c7a2856 2296#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 2297
3162d92d 2298 return 0;
3bffb652 2299#endif
3162d92d
DK
2300}
2301
9b05a69e
NK
2302long arch_ptrace(struct task_struct *child, long request,
2303 unsigned long addr, unsigned long data)
1da177e4 2304{
1da177e4 2305 int ret = -EPERM;
f68d2048
NK
2306 void __user *datavp = (void __user *) data;
2307 unsigned long __user *datalp = datavp;
1da177e4 2308
1da177e4 2309 switch (request) {
1da177e4 2310 /* read the word at location addr in the USER area. */
1da177e4
LT
2311 case PTRACE_PEEKUSR: {
2312 unsigned long index, tmp;
2313
2314 ret = -EIO;
2315 /* convert to index and check */
e8a30302 2316#ifdef CONFIG_PPC32
9b05a69e 2317 index = addr >> 2;
e8a30302
SR
2318 if ((addr & 3) || (index > PT_FPSCR)
2319 || (child->thread.regs == NULL))
2320#else
9b05a69e 2321 index = addr >> 3;
e8a30302
SR
2322 if ((addr & 7) || (index > PT_FPSCR))
2323#endif
1da177e4
LT
2324 break;
2325
2326 CHECK_FULL_REGS(child->thread.regs);
2327 if (index < PT_FPR0) {
ee4a3916
AK
2328 ret = ptrace_get_reg(child, (int) index, &tmp);
2329 if (ret)
2330 break;
1da177e4 2331 } else {
e69b742a
BH
2332 unsigned int fpidx = index - PT_FPR0;
2333
e8a30302 2334 flush_fp_to_thread(child);
e69b742a 2335 if (fpidx < (PT_FPSCR - PT_FPR0))
36aa1b18 2336 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
87fec051 2337 sizeof(long));
e69b742a 2338 else
de79f7b9 2339 tmp = child->thread.fp_state.fpscr;
1da177e4 2340 }
f68d2048 2341 ret = put_user(tmp, datalp);
1da177e4
LT
2342 break;
2343 }
2344
1da177e4
LT
2345 /* write the word at location addr in the USER area */
2346 case PTRACE_POKEUSR: {
2347 unsigned long index;
2348
2349 ret = -EIO;
2350 /* convert to index and check */
e8a30302 2351#ifdef CONFIG_PPC32
9b05a69e 2352 index = addr >> 2;
e8a30302
SR
2353 if ((addr & 3) || (index > PT_FPSCR)
2354 || (child->thread.regs == NULL))
2355#else
9b05a69e 2356 index = addr >> 3;
e8a30302
SR
2357 if ((addr & 7) || (index > PT_FPSCR))
2358#endif
1da177e4
LT
2359 break;
2360
2361 CHECK_FULL_REGS(child->thread.regs);
1da177e4 2362 if (index < PT_FPR0) {
865418d8 2363 ret = ptrace_put_reg(child, index, data);
1da177e4 2364 } else {
e69b742a
BH
2365 unsigned int fpidx = index - PT_FPR0;
2366
e8a30302 2367 flush_fp_to_thread(child);
e69b742a 2368 if (fpidx < (PT_FPSCR - PT_FPR0))
36aa1b18 2369 memcpy(&child->thread.TS_FPR(fpidx), &data,
87fec051 2370 sizeof(long));
e69b742a 2371 else
de79f7b9 2372 child->thread.fp_state.fpscr = data;
1da177e4
LT
2373 ret = 0;
2374 }
2375 break;
2376 }
2377
3162d92d
DK
2378 case PPC_PTRACE_GETHWDBGINFO: {
2379 struct ppc_debug_info dbginfo;
2380
2381 dbginfo.version = 1;
3bffb652
DK
2382#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2383 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2384 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
2385 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
2386 dbginfo.data_bp_alignment = 4;
2387 dbginfo.sizeof_condition = 4;
2388 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
2389 PPC_DEBUG_FEATURE_INSN_BP_MASK;
2390#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2391 dbginfo.features |=
2392 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
2393 PPC_DEBUG_FEATURE_DATA_BP_MASK;
2394#endif
2395#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3162d92d
DK
2396 dbginfo.num_instruction_bps = 0;
2397 dbginfo.num_data_bps = 1;
2398 dbginfo.num_condition_regs = 0;
2399#ifdef CONFIG_PPC64
2400 dbginfo.data_bp_alignment = 8;
2401#else
2402 dbginfo.data_bp_alignment = 4;
2403#endif
2404 dbginfo.sizeof_condition = 0;
6c7a2856
P
2405#ifdef CONFIG_HAVE_HW_BREAKPOINT
2406 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
517b7314
MN
2407 if (cpu_has_feature(CPU_FTR_DAWR))
2408 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
6c7a2856 2409#else
3162d92d 2410 dbginfo.features = 0;
6c7a2856 2411#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 2412#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3162d92d 2413
f68d2048 2414 if (!access_ok(VERIFY_WRITE, datavp,
3162d92d
DK
2415 sizeof(struct ppc_debug_info)))
2416 return -EFAULT;
f68d2048
NK
2417 ret = __copy_to_user(datavp, &dbginfo,
2418 sizeof(struct ppc_debug_info)) ?
3162d92d
DK
2419 -EFAULT : 0;
2420 break;
2421 }
2422
2423 case PPC_PTRACE_SETHWDEBUG: {
2424 struct ppc_hw_breakpoint bp_info;
2425
f68d2048 2426 if (!access_ok(VERIFY_READ, datavp,
3162d92d
DK
2427 sizeof(struct ppc_hw_breakpoint)))
2428 return -EFAULT;
f68d2048 2429 ret = __copy_from_user(&bp_info, datavp,
3162d92d
DK
2430 sizeof(struct ppc_hw_breakpoint)) ?
2431 -EFAULT : 0;
2432 if (!ret)
2433 ret = ppc_set_hwdebug(child, &bp_info);
2434 break;
2435 }
2436
2437 case PPC_PTRACE_DELHWDEBUG: {
ec1b33dc 2438 ret = ppc_del_hwdebug(child, data);
3162d92d
DK
2439 break;
2440 }
2441
e8a30302 2442 case PTRACE_GET_DEBUGREG: {
9422de3e
MN
2443#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2444 unsigned long dabr_fake;
2445#endif
e8a30302
SR
2446 ret = -EINVAL;
2447 /* We only support one DABR and no IABRS at the moment */
2448 if (addr > 0)
2449 break;
3bffb652 2450#ifdef CONFIG_PPC_ADV_DEBUG_REGS
51ae8d4a 2451 ret = put_user(child->thread.debug.dac1, datalp);
3bffb652 2452#else
9422de3e
MN
2453 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
2454 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
2455 ret = put_user(dabr_fake, datalp);
3bffb652 2456#endif
e8a30302
SR
2457 break;
2458 }
2459
2460 case PTRACE_SET_DEBUGREG:
2461 ret = ptrace_set_debugreg(child, addr, data);
2462 break;
e8a30302 2463
e17666ba
BH
2464#ifdef CONFIG_PPC64
2465 case PTRACE_GETREGS64:
2466#endif
c391cd00
RM
2467 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
2468 return copy_regset_to_user(child, &user_ppc_native_view,
2469 REGSET_GPR,
2470 0, sizeof(struct pt_regs),
f68d2048 2471 datavp);
e8a30302 2472
e17666ba
BH
2473#ifdef CONFIG_PPC64
2474 case PTRACE_SETREGS64:
2475#endif
c391cd00
RM
2476 case PTRACE_SETREGS: /* Set all gp regs in the child. */
2477 return copy_regset_from_user(child, &user_ppc_native_view,
2478 REGSET_GPR,
2479 0, sizeof(struct pt_regs),
f68d2048 2480 datavp);
c391cd00
RM
2481
2482 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
2483 return copy_regset_to_user(child, &user_ppc_native_view,
2484 REGSET_FPR,
2485 0, sizeof(elf_fpregset_t),
f68d2048 2486 datavp);
c391cd00
RM
2487
2488 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
2489 return copy_regset_from_user(child, &user_ppc_native_view,
2490 REGSET_FPR,
2491 0, sizeof(elf_fpregset_t),
f68d2048 2492 datavp);
e8a30302 2493
1da177e4
LT
2494#ifdef CONFIG_ALTIVEC
2495 case PTRACE_GETVRREGS:
c391cd00
RM
2496 return copy_regset_to_user(child, &user_ppc_native_view,
2497 REGSET_VMX,
2498 0, (33 * sizeof(vector128) +
2499 sizeof(u32)),
f68d2048 2500 datavp);
1da177e4
LT
2501
2502 case PTRACE_SETVRREGS:
c391cd00
RM
2503 return copy_regset_from_user(child, &user_ppc_native_view,
2504 REGSET_VMX,
2505 0, (33 * sizeof(vector128) +
2506 sizeof(u32)),
f68d2048 2507 datavp);
1da177e4 2508#endif
ce48b210
MN
2509#ifdef CONFIG_VSX
2510 case PTRACE_GETVSRREGS:
2511 return copy_regset_to_user(child, &user_ppc_native_view,
2512 REGSET_VSX,
1ac42ef8 2513 0, 32 * sizeof(double),
f68d2048 2514 datavp);
ce48b210
MN
2515
2516 case PTRACE_SETVSRREGS:
2517 return copy_regset_from_user(child, &user_ppc_native_view,
2518 REGSET_VSX,
1ac42ef8 2519 0, 32 * sizeof(double),
f68d2048 2520 datavp);
ce48b210 2521#endif
1da177e4
LT
2522#ifdef CONFIG_SPE
2523 case PTRACE_GETEVRREGS:
2524 /* Get the child spe register state. */
c391cd00
RM
2525 return copy_regset_to_user(child, &user_ppc_native_view,
2526 REGSET_SPE, 0, 35 * sizeof(u32),
f68d2048 2527 datavp);
1da177e4
LT
2528
2529 case PTRACE_SETEVRREGS:
2530 /* Set the child spe register state. */
c391cd00
RM
2531 return copy_regset_from_user(child, &user_ppc_native_view,
2532 REGSET_SPE, 0, 35 * sizeof(u32),
f68d2048 2533 datavp);
1da177e4
LT
2534#endif
2535
2536 default:
2537 ret = ptrace_request(child, request, addr, data);
2538 break;
2539 }
1da177e4
LT
2540 return ret;
2541}
2542
2449acc5
ME
2543#ifdef CONFIG_SECCOMP
2544static int do_seccomp(struct pt_regs *regs)
2545{
2546 if (!test_thread_flag(TIF_SECCOMP))
2547 return 0;
2548
2549 /*
2550 * The ABI we present to seccomp tracers is that r3 contains
2551 * the syscall return value and orig_gpr3 contains the first
2552 * syscall parameter. This is different to the ptrace ABI where
2553 * both r3 and orig_gpr3 contain the first syscall parameter.
2554 */
2555 regs->gpr[3] = -ENOSYS;
2556
2557 /*
2558 * We use the __ version here because we have already checked
2559 * TIF_SECCOMP. If this fails, there is nothing left to do, we
2560 * have already loaded -ENOSYS into r3, or seccomp has put
2561 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
2562 */
2f275de5 2563 if (__secure_computing(NULL))
2449acc5
ME
2564 return -1;
2565
2566 /*
2567 * The syscall was allowed by seccomp, restore the register
1addc57e 2568 * state to what audit expects.
2449acc5
ME
2569 * Note that we use orig_gpr3, which means a seccomp tracer can
2570 * modify the first syscall parameter (in orig_gpr3) and also
2571 * allow the syscall to proceed.
2572 */
2573 regs->gpr[3] = regs->orig_gpr3;
2574
2575 return 0;
2576}
2577#else
2578static inline int do_seccomp(struct pt_regs *regs) { return 0; }
2579#endif /* CONFIG_SECCOMP */
2580
d3837414
ME
2581/**
2582 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
2583 * @regs: the pt_regs of the task to trace (current)
2584 *
2585 * Performs various types of tracing on syscall entry. This includes seccomp,
2586 * ptrace, syscall tracepoints and audit.
2587 *
2588 * The pt_regs are potentially visible to userspace via ptrace, so their
2589 * contents is ABI.
2590 *
2591 * One or more of the tracers may modify the contents of pt_regs, in particular
2592 * to modify arguments or even the syscall number itself.
2593 *
2594 * It's also possible that a tracer can choose to reject the system call. In
2595 * that case this function will return an illegal syscall number, and will put
2596 * an appropriate return value in regs->r3.
2597 *
2598 * Return: the (possibly changed) syscall number.
4f72c427
RM
2599 */
2600long do_syscall_trace_enter(struct pt_regs *regs)
1da177e4 2601{
22ecbe8d
LZ
2602 user_exit();
2603
1addc57e
KC
2604 /*
2605 * The tracer may decide to abort the syscall, if so tracehook
2606 * will return !0. Note that the tracer may also just change
2607 * regs->gpr[0] to an invalid syscall number, that is handled
2608 * below on the exit path.
2609 */
2610 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
2611 tracehook_report_syscall_entry(regs))
2612 goto skip;
2613
2614 /* Run seccomp after ptrace; allow it to set gpr[3]. */
2449acc5
ME
2615 if (do_seccomp(regs))
2616 return -1;
e8a30302 2617
1addc57e
KC
2618 /* Avoid trace and audit when syscall is invalid. */
2619 if (regs->gpr[0] >= NR_syscalls)
2620 goto skip;
ea9c102c 2621
02424d89
IM
2622 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2623 trace_sys_enter(regs, regs->gpr[0]);
2624
cfcd1705 2625#ifdef CONFIG_PPC64
b05d8447 2626 if (!is_32bit_task())
91397401 2627 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
b05d8447
EP
2628 regs->gpr[5], regs->gpr[6]);
2629 else
e8a30302 2630#endif
91397401 2631 audit_syscall_entry(regs->gpr[0],
b05d8447
EP
2632 regs->gpr[3] & 0xffffffff,
2633 regs->gpr[4] & 0xffffffff,
2634 regs->gpr[5] & 0xffffffff,
2635 regs->gpr[6] & 0xffffffff);
4f72c427 2636
d3837414
ME
2637 /* Return the possibly modified but valid syscall number */
2638 return regs->gpr[0];
1addc57e
KC
2639
2640skip:
2641 /*
2642 * If we are aborting explicitly, or if the syscall number is
2643 * now invalid, set the return value to -ENOSYS.
2644 */
2645 regs->gpr[3] = -ENOSYS;
2646 return -1;
ea9c102c
DW
2647}
2648
2649void do_syscall_trace_leave(struct pt_regs *regs)
2650{
4f72c427
RM
2651 int step;
2652
d7e7528b 2653 audit_syscall_exit(regs);
ea9c102c 2654
02424d89
IM
2655 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2656 trace_sys_exit(regs, regs->result);
2657
4f72c427
RM
2658 step = test_thread_flag(TIF_SINGLESTEP);
2659 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
2660 tracehook_report_syscall_exit(regs, step);
22ecbe8d
LZ
2661
2662 user_enter();
ea9c102c 2663}