]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/lib/sstep.c
96283499664bd83bc400f2e51cfe38809d7dbd32
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / lib / sstep.c
1 /*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
20
21 extern char system_call_common[];
22
23 #ifdef CONFIG_PPC64
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
26 #else
27 #define MSR_MASK 0x87c0ffff
28 #endif
29
30 /* Bits in XER */
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
34
35 #ifdef CONFIG_PPC_FPU
36 /*
37 * Functions in ldstfp.S
38 */
39 extern int do_lfs(int rn, unsigned long ea);
40 extern int do_lfd(int rn, unsigned long ea);
41 extern int do_stfs(int rn, unsigned long ea);
42 extern int do_stfd(int rn, unsigned long ea);
43 extern int do_lvx(int rn, unsigned long ea);
44 extern int do_stvx(int rn, unsigned long ea);
45 extern void load_vsrn(int vsr, const void *p);
46 extern void store_vsrn(int vsr, void *p);
47 extern void conv_sp_to_dp(const float *sp, double *dp);
48 extern void conv_dp_to_sp(const double *dp, float *sp);
49 #endif
50
51 #ifdef __powerpc64__
52 /*
53 * Functions in quad.S
54 */
55 extern int do_lq(unsigned long ea, unsigned long *regs);
56 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
57 extern int do_lqarx(unsigned long ea, unsigned long *regs);
58 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
59 unsigned int *crp);
60 #endif
61
62 #ifdef __LITTLE_ENDIAN__
63 #define IS_LE 1
64 #define IS_BE 0
65 #else
66 #define IS_LE 0
67 #define IS_BE 1
68 #endif
69
70 /*
71 * Emulate the truncation of 64 bit values in 32-bit mode.
72 */
73 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
74 unsigned long val)
75 {
76 #ifdef __powerpc64__
77 if ((msr & MSR_64BIT) == 0)
78 val &= 0xffffffffUL;
79 #endif
80 return val;
81 }
82
83 /*
84 * Determine whether a conditional branch instruction would branch.
85 */
86 static nokprobe_inline int branch_taken(unsigned int instr,
87 const struct pt_regs *regs,
88 struct instruction_op *op)
89 {
90 unsigned int bo = (instr >> 21) & 0x1f;
91 unsigned int bi;
92
93 if ((bo & 4) == 0) {
94 /* decrement counter */
95 op->type |= DECCTR;
96 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
97 return 0;
98 }
99 if ((bo & 0x10) == 0) {
100 /* check bit from CR */
101 bi = (instr >> 16) & 0x1f;
102 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
103 return 0;
104 }
105 return 1;
106 }
107
108 static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
109 {
110 if (!user_mode(regs))
111 return 1;
112 return __access_ok(ea, nb, USER_DS);
113 }
114
115 /*
116 * Calculate effective address for a D-form instruction
117 */
118 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
119 const struct pt_regs *regs)
120 {
121 int ra;
122 unsigned long ea;
123
124 ra = (instr >> 16) & 0x1f;
125 ea = (signed short) instr; /* sign-extend */
126 if (ra)
127 ea += regs->gpr[ra];
128
129 return ea;
130 }
131
132 #ifdef __powerpc64__
133 /*
134 * Calculate effective address for a DS-form instruction
135 */
136 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
137 const struct pt_regs *regs)
138 {
139 int ra;
140 unsigned long ea;
141
142 ra = (instr >> 16) & 0x1f;
143 ea = (signed short) (instr & ~3); /* sign-extend */
144 if (ra)
145 ea += regs->gpr[ra];
146
147 return ea;
148 }
149
150 /*
151 * Calculate effective address for a DQ-form instruction
152 */
153 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
154 const struct pt_regs *regs)
155 {
156 int ra;
157 unsigned long ea;
158
159 ra = (instr >> 16) & 0x1f;
160 ea = (signed short) (instr & ~0xf); /* sign-extend */
161 if (ra)
162 ea += regs->gpr[ra];
163
164 return ea;
165 }
166 #endif /* __powerpc64 */
167
168 /*
169 * Calculate effective address for an X-form instruction
170 */
171 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
172 const struct pt_regs *regs)
173 {
174 int ra, rb;
175 unsigned long ea;
176
177 ra = (instr >> 16) & 0x1f;
178 rb = (instr >> 11) & 0x1f;
179 ea = regs->gpr[rb];
180 if (ra)
181 ea += regs->gpr[ra];
182
183 return ea;
184 }
185
186 /*
187 * Return the largest power of 2, not greater than sizeof(unsigned long),
188 * such that x is a multiple of it.
189 */
190 static nokprobe_inline unsigned long max_align(unsigned long x)
191 {
192 x |= sizeof(unsigned long);
193 return x & -x; /* isolates rightmost bit */
194 }
195
196
197 static nokprobe_inline unsigned long byterev_2(unsigned long x)
198 {
199 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
200 }
201
202 static nokprobe_inline unsigned long byterev_4(unsigned long x)
203 {
204 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
205 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
206 }
207
208 #ifdef __powerpc64__
209 static nokprobe_inline unsigned long byterev_8(unsigned long x)
210 {
211 return (byterev_4(x) << 32) | byterev_4(x >> 32);
212 }
213 #endif
214
215 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
216 unsigned long ea, int nb)
217 {
218 int err = 0;
219 unsigned long x = 0;
220
221 switch (nb) {
222 case 1:
223 err = __get_user(x, (unsigned char __user *) ea);
224 break;
225 case 2:
226 err = __get_user(x, (unsigned short __user *) ea);
227 break;
228 case 4:
229 err = __get_user(x, (unsigned int __user *) ea);
230 break;
231 #ifdef __powerpc64__
232 case 8:
233 err = __get_user(x, (unsigned long __user *) ea);
234 break;
235 #endif
236 }
237 if (!err)
238 *dest = x;
239 return err;
240 }
241
242 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
243 unsigned long ea, int nb, struct pt_regs *regs)
244 {
245 int err;
246 unsigned long x, b, c;
247 #ifdef __LITTLE_ENDIAN__
248 int len = nb; /* save a copy of the length for byte reversal */
249 #endif
250
251 /* unaligned, do this in pieces */
252 x = 0;
253 for (; nb > 0; nb -= c) {
254 #ifdef __LITTLE_ENDIAN__
255 c = 1;
256 #endif
257 #ifdef __BIG_ENDIAN__
258 c = max_align(ea);
259 #endif
260 if (c > nb)
261 c = max_align(nb);
262 err = read_mem_aligned(&b, ea, c);
263 if (err)
264 return err;
265 x = (x << (8 * c)) + b;
266 ea += c;
267 }
268 #ifdef __LITTLE_ENDIAN__
269 switch (len) {
270 case 2:
271 *dest = byterev_2(x);
272 break;
273 case 4:
274 *dest = byterev_4(x);
275 break;
276 #ifdef __powerpc64__
277 case 8:
278 *dest = byterev_8(x);
279 break;
280 #endif
281 }
282 #endif
283 #ifdef __BIG_ENDIAN__
284 *dest = x;
285 #endif
286 return 0;
287 }
288
289 /*
290 * Read memory at address ea for nb bytes, return 0 for success
291 * or -EFAULT if an error occurred.
292 */
293 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
294 struct pt_regs *regs)
295 {
296 if (!address_ok(regs, ea, nb))
297 return -EFAULT;
298 if ((ea & (nb - 1)) == 0)
299 return read_mem_aligned(dest, ea, nb);
300 return read_mem_unaligned(dest, ea, nb, regs);
301 }
302 NOKPROBE_SYMBOL(read_mem);
303
304 static nokprobe_inline int write_mem_aligned(unsigned long val,
305 unsigned long ea, int nb)
306 {
307 int err = 0;
308
309 switch (nb) {
310 case 1:
311 err = __put_user(val, (unsigned char __user *) ea);
312 break;
313 case 2:
314 err = __put_user(val, (unsigned short __user *) ea);
315 break;
316 case 4:
317 err = __put_user(val, (unsigned int __user *) ea);
318 break;
319 #ifdef __powerpc64__
320 case 8:
321 err = __put_user(val, (unsigned long __user *) ea);
322 break;
323 #endif
324 }
325 return err;
326 }
327
328 static nokprobe_inline int write_mem_unaligned(unsigned long val,
329 unsigned long ea, int nb, struct pt_regs *regs)
330 {
331 int err;
332 unsigned long c;
333
334 #ifdef __LITTLE_ENDIAN__
335 switch (nb) {
336 case 2:
337 val = byterev_2(val);
338 break;
339 case 4:
340 val = byterev_4(val);
341 break;
342 #ifdef __powerpc64__
343 case 8:
344 val = byterev_8(val);
345 break;
346 #endif
347 }
348 #endif
349 /* unaligned or little-endian, do this in pieces */
350 for (; nb > 0; nb -= c) {
351 #ifdef __LITTLE_ENDIAN__
352 c = 1;
353 #endif
354 #ifdef __BIG_ENDIAN__
355 c = max_align(ea);
356 #endif
357 if (c > nb)
358 c = max_align(nb);
359 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
360 if (err)
361 return err;
362 ea += c;
363 }
364 return 0;
365 }
366
367 /*
368 * Write memory at address ea for nb bytes, return 0 for success
369 * or -EFAULT if an error occurred.
370 */
371 static int write_mem(unsigned long val, unsigned long ea, int nb,
372 struct pt_regs *regs)
373 {
374 if (!address_ok(regs, ea, nb))
375 return -EFAULT;
376 if ((ea & (nb - 1)) == 0)
377 return write_mem_aligned(val, ea, nb);
378 return write_mem_unaligned(val, ea, nb, regs);
379 }
380 NOKPROBE_SYMBOL(write_mem);
381
382 #ifdef CONFIG_PPC_FPU
383 /*
384 * Check the address and alignment, and call func to do the actual
385 * load or store.
386 */
387 static int do_fp_load(int rn, int (*func)(int, unsigned long),
388 unsigned long ea, int nb,
389 struct pt_regs *regs)
390 {
391 int err;
392 union {
393 double dbl;
394 unsigned long ul[2];
395 struct {
396 #ifdef __BIG_ENDIAN__
397 unsigned _pad_;
398 unsigned word;
399 #endif
400 #ifdef __LITTLE_ENDIAN__
401 unsigned word;
402 unsigned _pad_;
403 #endif
404 } single;
405 } data;
406 unsigned long ptr;
407
408 if (!address_ok(regs, ea, nb))
409 return -EFAULT;
410 if ((ea & 3) == 0)
411 return (*func)(rn, ea);
412 ptr = (unsigned long) &data.ul;
413 if (sizeof(unsigned long) == 8 || nb == 4) {
414 err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
415 if (nb == 4)
416 ptr = (unsigned long)&(data.single.word);
417 } else {
418 /* reading a double on 32-bit */
419 err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
420 if (!err)
421 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
422 }
423 if (err)
424 return err;
425 return (*func)(rn, ptr);
426 }
427 NOKPROBE_SYMBOL(do_fp_load);
428
429 static int do_fp_store(int rn, int (*func)(int, unsigned long),
430 unsigned long ea, int nb,
431 struct pt_regs *regs)
432 {
433 int err;
434 union {
435 double dbl;
436 unsigned long ul[2];
437 struct {
438 #ifdef __BIG_ENDIAN__
439 unsigned _pad_;
440 unsigned word;
441 #endif
442 #ifdef __LITTLE_ENDIAN__
443 unsigned word;
444 unsigned _pad_;
445 #endif
446 } single;
447 } data;
448 unsigned long ptr;
449
450 if (!address_ok(regs, ea, nb))
451 return -EFAULT;
452 if ((ea & 3) == 0)
453 return (*func)(rn, ea);
454 ptr = (unsigned long) &data.ul[0];
455 if (sizeof(unsigned long) == 8 || nb == 4) {
456 if (nb == 4)
457 ptr = (unsigned long)&(data.single.word);
458 err = (*func)(rn, ptr);
459 if (err)
460 return err;
461 err = write_mem_unaligned(data.ul[0], ea, nb, regs);
462 } else {
463 /* writing a double on 32-bit */
464 err = (*func)(rn, ptr);
465 if (err)
466 return err;
467 err = write_mem_unaligned(data.ul[0], ea, 4, regs);
468 if (!err)
469 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
470 }
471 return err;
472 }
473 NOKPROBE_SYMBOL(do_fp_store);
474 #endif
475
476 #ifdef CONFIG_ALTIVEC
477 /* For Altivec/VMX, no need to worry about alignment */
478 static nokprobe_inline int do_vec_load(int rn, int (*func)(int, unsigned long),
479 unsigned long ea, struct pt_regs *regs)
480 {
481 if (!address_ok(regs, ea & ~0xfUL, 16))
482 return -EFAULT;
483 return (*func)(rn, ea);
484 }
485
486 static nokprobe_inline int do_vec_store(int rn, int (*func)(int, unsigned long),
487 unsigned long ea, struct pt_regs *regs)
488 {
489 if (!address_ok(regs, ea & ~0xfUL, 16))
490 return -EFAULT;
491 return (*func)(rn, ea);
492 }
493 #endif /* CONFIG_ALTIVEC */
494
495 #ifdef __powerpc64__
496 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
497 int reg)
498 {
499 int err;
500
501 if (!address_ok(regs, ea, 16))
502 return -EFAULT;
503 /* if aligned, should be atomic */
504 if ((ea & 0xf) == 0)
505 return do_lq(ea, &regs->gpr[reg]);
506
507 err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
508 if (!err)
509 err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
510 return err;
511 }
512
513 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
514 int reg)
515 {
516 int err;
517
518 if (!address_ok(regs, ea, 16))
519 return -EFAULT;
520 /* if aligned, should be atomic */
521 if ((ea & 0xf) == 0)
522 return do_stq(ea, regs->gpr[reg], regs->gpr[reg + 1]);
523
524 err = write_mem(regs->gpr[reg + IS_LE], ea, 8, regs);
525 if (!err)
526 err = write_mem(regs->gpr[reg + IS_BE], ea + 8, 8, regs);
527 return err;
528 }
529 #endif /* __powerpc64 */
530
531 #ifdef CONFIG_VSX
532 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
533 const void *mem)
534 {
535 int size, read_size;
536 int i, j;
537 const unsigned int *wp;
538 const unsigned short *hp;
539 const unsigned char *bp;
540
541 size = GETSIZE(op->type);
542 reg->d[0] = reg->d[1] = 0;
543
544 switch (op->element_size) {
545 case 16:
546 /* whole vector; lxv[x] or lxvl[l] */
547 if (size == 0)
548 break;
549 memcpy(reg, mem, size);
550 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
551 /* reverse 16 bytes */
552 unsigned long tmp;
553 tmp = byterev_8(reg->d[0]);
554 reg->d[0] = byterev_8(reg->d[1]);
555 reg->d[1] = tmp;
556 }
557 break;
558 case 8:
559 /* scalar loads, lxvd2x, lxvdsx */
560 read_size = (size >= 8) ? 8 : size;
561 i = IS_LE ? 8 : 8 - read_size;
562 memcpy(&reg->b[i], mem, read_size);
563 if (size < 8) {
564 if (op->type & SIGNEXT) {
565 /* size == 4 is the only case here */
566 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
567 } else if (op->vsx_flags & VSX_FPCONV) {
568 preempt_disable();
569 conv_sp_to_dp(&reg->fp[1 + IS_LE],
570 &reg->dp[IS_LE]);
571 preempt_enable();
572 }
573 } else {
574 if (size == 16)
575 reg->d[IS_BE] = *(unsigned long *)(mem + 8);
576 else if (op->vsx_flags & VSX_SPLAT)
577 reg->d[IS_BE] = reg->d[IS_LE];
578 }
579 break;
580 case 4:
581 /* lxvw4x, lxvwsx */
582 wp = mem;
583 for (j = 0; j < size / 4; ++j) {
584 i = IS_LE ? 3 - j : j;
585 reg->w[i] = *wp++;
586 }
587 if (op->vsx_flags & VSX_SPLAT) {
588 u32 val = reg->w[IS_LE ? 3 : 0];
589 for (; j < 4; ++j) {
590 i = IS_LE ? 3 - j : j;
591 reg->w[i] = val;
592 }
593 }
594 break;
595 case 2:
596 /* lxvh8x */
597 hp = mem;
598 for (j = 0; j < size / 2; ++j) {
599 i = IS_LE ? 7 - j : j;
600 reg->h[i] = *hp++;
601 }
602 break;
603 case 1:
604 /* lxvb16x */
605 bp = mem;
606 for (j = 0; j < size; ++j) {
607 i = IS_LE ? 15 - j : j;
608 reg->b[i] = *bp++;
609 }
610 break;
611 }
612 }
613 EXPORT_SYMBOL_GPL(emulate_vsx_load);
614 NOKPROBE_SYMBOL(emulate_vsx_load);
615
616 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
617 void *mem)
618 {
619 int size, write_size;
620 int i, j;
621 union vsx_reg buf;
622 unsigned int *wp;
623 unsigned short *hp;
624 unsigned char *bp;
625
626 size = GETSIZE(op->type);
627
628 switch (op->element_size) {
629 case 16:
630 /* stxv, stxvx, stxvl, stxvll */
631 if (size == 0)
632 break;
633 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
634 /* reverse 16 bytes */
635 buf.d[0] = byterev_8(reg->d[1]);
636 buf.d[1] = byterev_8(reg->d[0]);
637 reg = &buf;
638 }
639 memcpy(mem, reg, size);
640 break;
641 case 8:
642 /* scalar stores, stxvd2x */
643 write_size = (size >= 8) ? 8 : size;
644 i = IS_LE ? 8 : 8 - write_size;
645 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
646 buf.d[0] = buf.d[1] = 0;
647 preempt_disable();
648 conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
649 preempt_enable();
650 reg = &buf;
651 }
652 memcpy(mem, &reg->b[i], write_size);
653 if (size == 16)
654 memcpy(mem + 8, &reg->d[IS_BE], 8);
655 break;
656 case 4:
657 /* stxvw4x */
658 wp = mem;
659 for (j = 0; j < size / 4; ++j) {
660 i = IS_LE ? 3 - j : j;
661 *wp++ = reg->w[i];
662 }
663 break;
664 case 2:
665 /* stxvh8x */
666 hp = mem;
667 for (j = 0; j < size / 2; ++j) {
668 i = IS_LE ? 7 - j : j;
669 *hp++ = reg->h[i];
670 }
671 break;
672 case 1:
673 /* stvxb16x */
674 bp = mem;
675 for (j = 0; j < size; ++j) {
676 i = IS_LE ? 15 - j : j;
677 *bp++ = reg->b[i];
678 }
679 break;
680 }
681 }
682 EXPORT_SYMBOL_GPL(emulate_vsx_store);
683 NOKPROBE_SYMBOL(emulate_vsx_store);
684 #endif /* CONFIG_VSX */
685
686 #define __put_user_asmx(x, addr, err, op, cr) \
687 __asm__ __volatile__( \
688 "1: " op " %2,0,%3\n" \
689 " mfcr %1\n" \
690 "2:\n" \
691 ".section .fixup,\"ax\"\n" \
692 "3: li %0,%4\n" \
693 " b 2b\n" \
694 ".previous\n" \
695 EX_TABLE(1b, 3b) \
696 : "=r" (err), "=r" (cr) \
697 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
698
699 #define __get_user_asmx(x, addr, err, op) \
700 __asm__ __volatile__( \
701 "1: "op" %1,0,%2\n" \
702 "2:\n" \
703 ".section .fixup,\"ax\"\n" \
704 "3: li %0,%3\n" \
705 " b 2b\n" \
706 ".previous\n" \
707 EX_TABLE(1b, 3b) \
708 : "=r" (err), "=r" (x) \
709 : "r" (addr), "i" (-EFAULT), "0" (err))
710
711 #define __cacheop_user_asmx(addr, err, op) \
712 __asm__ __volatile__( \
713 "1: "op" 0,%1\n" \
714 "2:\n" \
715 ".section .fixup,\"ax\"\n" \
716 "3: li %0,%3\n" \
717 " b 2b\n" \
718 ".previous\n" \
719 EX_TABLE(1b, 3b) \
720 : "=r" (err) \
721 : "r" (addr), "i" (-EFAULT), "0" (err))
722
723 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
724 struct instruction_op *op, int rd)
725 {
726 long val = regs->gpr[rd];
727
728 op->type |= SETCC;
729 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
730 #ifdef __powerpc64__
731 if (!(regs->msr & MSR_64BIT))
732 val = (int) val;
733 #endif
734 if (val < 0)
735 op->ccval |= 0x80000000;
736 else if (val > 0)
737 op->ccval |= 0x40000000;
738 else
739 op->ccval |= 0x20000000;
740 }
741
742 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
743 struct instruction_op *op, int rd,
744 unsigned long val1, unsigned long val2,
745 unsigned long carry_in)
746 {
747 unsigned long val = val1 + val2;
748
749 if (carry_in)
750 ++val;
751 op->type = COMPUTE + SETREG + SETXER;
752 op->reg = rd;
753 op->val = val;
754 #ifdef __powerpc64__
755 if (!(regs->msr & MSR_64BIT)) {
756 val = (unsigned int) val;
757 val1 = (unsigned int) val1;
758 }
759 #endif
760 op->xerval = regs->xer;
761 if (val < val1 || (carry_in && val == val1))
762 op->xerval |= XER_CA;
763 else
764 op->xerval &= ~XER_CA;
765 }
766
767 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
768 struct instruction_op *op,
769 long v1, long v2, int crfld)
770 {
771 unsigned int crval, shift;
772
773 op->type = COMPUTE + SETCC;
774 crval = (regs->xer >> 31) & 1; /* get SO bit */
775 if (v1 < v2)
776 crval |= 8;
777 else if (v1 > v2)
778 crval |= 4;
779 else
780 crval |= 2;
781 shift = (7 - crfld) * 4;
782 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
783 }
784
785 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
786 struct instruction_op *op,
787 unsigned long v1,
788 unsigned long v2, int crfld)
789 {
790 unsigned int crval, shift;
791
792 op->type = COMPUTE + SETCC;
793 crval = (regs->xer >> 31) & 1; /* get SO bit */
794 if (v1 < v2)
795 crval |= 8;
796 else if (v1 > v2)
797 crval |= 4;
798 else
799 crval |= 2;
800 shift = (7 - crfld) * 4;
801 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
802 }
803
804 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
805 struct instruction_op *op,
806 unsigned long v1, unsigned long v2)
807 {
808 unsigned long long out_val, mask;
809 int i;
810
811 out_val = 0;
812 for (i = 0; i < 8; i++) {
813 mask = 0xffUL << (i * 8);
814 if ((v1 & mask) == (v2 & mask))
815 out_val |= mask;
816 }
817 op->val = out_val;
818 }
819
820 /*
821 * The size parameter is used to adjust the equivalent popcnt instruction.
822 * popcntb = 8, popcntw = 32, popcntd = 64
823 */
824 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
825 struct instruction_op *op,
826 unsigned long v1, int size)
827 {
828 unsigned long long out = v1;
829
830 out -= (out >> 1) & 0x5555555555555555;
831 out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
832 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
833
834 if (size == 8) { /* popcntb */
835 op->val = out;
836 return;
837 }
838 out += out >> 8;
839 out += out >> 16;
840 if (size == 32) { /* popcntw */
841 op->val = out & 0x0000003f0000003f;
842 return;
843 }
844
845 out = (out + (out >> 32)) & 0x7f;
846 op->val = out; /* popcntd */
847 }
848
849 #ifdef CONFIG_PPC64
850 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
851 struct instruction_op *op,
852 unsigned long v1, unsigned long v2)
853 {
854 unsigned char perm, idx;
855 unsigned int i;
856
857 perm = 0;
858 for (i = 0; i < 8; i++) {
859 idx = (v1 >> (i * 8)) & 0xff;
860 if (idx < 64)
861 if (v2 & PPC_BIT(idx))
862 perm |= 1 << i;
863 }
864 op->val = perm;
865 }
866 #endif /* CONFIG_PPC64 */
867 /*
868 * The size parameter adjusts the equivalent prty instruction.
869 * prtyw = 32, prtyd = 64
870 */
871 static nokprobe_inline void do_prty(const struct pt_regs *regs,
872 struct instruction_op *op,
873 unsigned long v, int size)
874 {
875 unsigned long long res = v ^ (v >> 8);
876
877 res ^= res >> 16;
878 if (size == 32) { /* prtyw */
879 op->val = res & 0x0000000100000001;
880 return;
881 }
882
883 res ^= res >> 32;
884 op->val = res & 1; /*prtyd */
885 }
886
887 static nokprobe_inline int trap_compare(long v1, long v2)
888 {
889 int ret = 0;
890
891 if (v1 < v2)
892 ret |= 0x10;
893 else if (v1 > v2)
894 ret |= 0x08;
895 else
896 ret |= 0x04;
897 if ((unsigned long)v1 < (unsigned long)v2)
898 ret |= 0x02;
899 else if ((unsigned long)v1 > (unsigned long)v2)
900 ret |= 0x01;
901 return ret;
902 }
903
904 /*
905 * Elements of 32-bit rotate and mask instructions.
906 */
907 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
908 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
909 #ifdef __powerpc64__
910 #define MASK64_L(mb) (~0UL >> (mb))
911 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
912 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
913 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
914 #else
915 #define DATA32(x) (x)
916 #endif
917 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
918
919 /*
920 * Decode an instruction, and return information about it in *op
921 * without changing *regs.
922 * Integer arithmetic and logical instructions, branches, and barrier
923 * instructions can be emulated just using the information in *op.
924 *
925 * Return value is 1 if the instruction can be emulated just by
926 * updating *regs with the information in *op, -1 if we need the
927 * GPRs but *regs doesn't contain the full register set, or 0
928 * otherwise.
929 */
930 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
931 unsigned int instr)
932 {
933 unsigned int opcode, ra, rb, rd, spr, u;
934 unsigned long int imm;
935 unsigned long int val, val2;
936 unsigned int mb, me, sh;
937 long ival;
938
939 op->type = COMPUTE;
940
941 opcode = instr >> 26;
942 switch (opcode) {
943 case 16: /* bc */
944 op->type = BRANCH;
945 imm = (signed short)(instr & 0xfffc);
946 if ((instr & 2) == 0)
947 imm += regs->nip;
948 op->val = truncate_if_32bit(regs->msr, imm);
949 if (instr & 1)
950 op->type |= SETLK;
951 if (branch_taken(instr, regs, op))
952 op->type |= BRTAKEN;
953 return 1;
954 #ifdef CONFIG_PPC64
955 case 17: /* sc */
956 if ((instr & 0xfe2) == 2)
957 op->type = SYSCALL;
958 else
959 op->type = UNKNOWN;
960 return 0;
961 #endif
962 case 18: /* b */
963 op->type = BRANCH | BRTAKEN;
964 imm = instr & 0x03fffffc;
965 if (imm & 0x02000000)
966 imm -= 0x04000000;
967 if ((instr & 2) == 0)
968 imm += regs->nip;
969 op->val = truncate_if_32bit(regs->msr, imm);
970 if (instr & 1)
971 op->type |= SETLK;
972 return 1;
973 case 19:
974 switch ((instr >> 1) & 0x3ff) {
975 case 0: /* mcrf */
976 op->type = COMPUTE + SETCC;
977 rd = 7 - ((instr >> 23) & 0x7);
978 ra = 7 - ((instr >> 18) & 0x7);
979 rd *= 4;
980 ra *= 4;
981 val = (regs->ccr >> ra) & 0xf;
982 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
983 return 1;
984
985 case 16: /* bclr */
986 case 528: /* bcctr */
987 op->type = BRANCH;
988 imm = (instr & 0x400)? regs->ctr: regs->link;
989 op->val = truncate_if_32bit(regs->msr, imm);
990 if (instr & 1)
991 op->type |= SETLK;
992 if (branch_taken(instr, regs, op))
993 op->type |= BRTAKEN;
994 return 1;
995
996 case 18: /* rfid, scary */
997 if (regs->msr & MSR_PR)
998 goto priv;
999 op->type = RFI;
1000 return 0;
1001
1002 case 150: /* isync */
1003 op->type = BARRIER | BARRIER_ISYNC;
1004 return 1;
1005
1006 case 33: /* crnor */
1007 case 129: /* crandc */
1008 case 193: /* crxor */
1009 case 225: /* crnand */
1010 case 257: /* crand */
1011 case 289: /* creqv */
1012 case 417: /* crorc */
1013 case 449: /* cror */
1014 op->type = COMPUTE + SETCC;
1015 ra = (instr >> 16) & 0x1f;
1016 rb = (instr >> 11) & 0x1f;
1017 rd = (instr >> 21) & 0x1f;
1018 ra = (regs->ccr >> (31 - ra)) & 1;
1019 rb = (regs->ccr >> (31 - rb)) & 1;
1020 val = (instr >> (6 + ra * 2 + rb)) & 1;
1021 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1022 (val << (31 - rd));
1023 return 1;
1024 }
1025 break;
1026 case 31:
1027 switch ((instr >> 1) & 0x3ff) {
1028 case 598: /* sync */
1029 op->type = BARRIER + BARRIER_SYNC;
1030 #ifdef __powerpc64__
1031 switch ((instr >> 21) & 3) {
1032 case 1: /* lwsync */
1033 op->type = BARRIER + BARRIER_LWSYNC;
1034 break;
1035 case 2: /* ptesync */
1036 op->type = BARRIER + BARRIER_PTESYNC;
1037 break;
1038 }
1039 #endif
1040 return 1;
1041
1042 case 854: /* eieio */
1043 op->type = BARRIER + BARRIER_EIEIO;
1044 return 1;
1045 }
1046 break;
1047 }
1048
1049 /* Following cases refer to regs->gpr[], so we need all regs */
1050 if (!FULL_REGS(regs))
1051 return -1;
1052
1053 rd = (instr >> 21) & 0x1f;
1054 ra = (instr >> 16) & 0x1f;
1055 rb = (instr >> 11) & 0x1f;
1056
1057 switch (opcode) {
1058 #ifdef __powerpc64__
1059 case 2: /* tdi */
1060 if (rd & trap_compare(regs->gpr[ra], (short) instr))
1061 goto trap;
1062 return 1;
1063 #endif
1064 case 3: /* twi */
1065 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1066 goto trap;
1067 return 1;
1068
1069 case 7: /* mulli */
1070 op->val = regs->gpr[ra] * (short) instr;
1071 goto compute_done;
1072
1073 case 8: /* subfic */
1074 imm = (short) instr;
1075 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1076 return 1;
1077
1078 case 10: /* cmpli */
1079 imm = (unsigned short) instr;
1080 val = regs->gpr[ra];
1081 #ifdef __powerpc64__
1082 if ((rd & 1) == 0)
1083 val = (unsigned int) val;
1084 #endif
1085 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1086 return 1;
1087
1088 case 11: /* cmpi */
1089 imm = (short) instr;
1090 val = regs->gpr[ra];
1091 #ifdef __powerpc64__
1092 if ((rd & 1) == 0)
1093 val = (int) val;
1094 #endif
1095 do_cmp_signed(regs, op, val, imm, rd >> 2);
1096 return 1;
1097
1098 case 12: /* addic */
1099 imm = (short) instr;
1100 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1101 return 1;
1102
1103 case 13: /* addic. */
1104 imm = (short) instr;
1105 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1106 set_cr0(regs, op, rd);
1107 return 1;
1108
1109 case 14: /* addi */
1110 imm = (short) instr;
1111 if (ra)
1112 imm += regs->gpr[ra];
1113 op->val = imm;
1114 goto compute_done;
1115
1116 case 15: /* addis */
1117 imm = ((short) instr) << 16;
1118 if (ra)
1119 imm += regs->gpr[ra];
1120 op->val = imm;
1121 goto compute_done;
1122
1123 case 19:
1124 if (((instr >> 1) & 0x1f) == 2) {
1125 /* addpcis */
1126 imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
1127 imm |= (instr >> 15) & 0x3e; /* d1 field */
1128 op->val = regs->nip + (imm << 16) + 4;
1129 goto compute_done;
1130 }
1131 op->type = UNKNOWN;
1132 return 0;
1133
1134 case 20: /* rlwimi */
1135 mb = (instr >> 6) & 0x1f;
1136 me = (instr >> 1) & 0x1f;
1137 val = DATA32(regs->gpr[rd]);
1138 imm = MASK32(mb, me);
1139 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1140 goto logical_done;
1141
1142 case 21: /* rlwinm */
1143 mb = (instr >> 6) & 0x1f;
1144 me = (instr >> 1) & 0x1f;
1145 val = DATA32(regs->gpr[rd]);
1146 op->val = ROTATE(val, rb) & MASK32(mb, me);
1147 goto logical_done;
1148
1149 case 23: /* rlwnm */
1150 mb = (instr >> 6) & 0x1f;
1151 me = (instr >> 1) & 0x1f;
1152 rb = regs->gpr[rb] & 0x1f;
1153 val = DATA32(regs->gpr[rd]);
1154 op->val = ROTATE(val, rb) & MASK32(mb, me);
1155 goto logical_done;
1156
1157 case 24: /* ori */
1158 op->val = regs->gpr[rd] | (unsigned short) instr;
1159 goto logical_done_nocc;
1160
1161 case 25: /* oris */
1162 imm = (unsigned short) instr;
1163 op->val = regs->gpr[rd] | (imm << 16);
1164 goto logical_done_nocc;
1165
1166 case 26: /* xori */
1167 op->val = regs->gpr[rd] ^ (unsigned short) instr;
1168 goto logical_done_nocc;
1169
1170 case 27: /* xoris */
1171 imm = (unsigned short) instr;
1172 op->val = regs->gpr[rd] ^ (imm << 16);
1173 goto logical_done_nocc;
1174
1175 case 28: /* andi. */
1176 op->val = regs->gpr[rd] & (unsigned short) instr;
1177 set_cr0(regs, op, ra);
1178 goto logical_done_nocc;
1179
1180 case 29: /* andis. */
1181 imm = (unsigned short) instr;
1182 op->val = regs->gpr[rd] & (imm << 16);
1183 set_cr0(regs, op, ra);
1184 goto logical_done_nocc;
1185
1186 #ifdef __powerpc64__
1187 case 30: /* rld* */
1188 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1189 val = regs->gpr[rd];
1190 if ((instr & 0x10) == 0) {
1191 sh = rb | ((instr & 2) << 4);
1192 val = ROTATE(val, sh);
1193 switch ((instr >> 2) & 3) {
1194 case 0: /* rldicl */
1195 val &= MASK64_L(mb);
1196 break;
1197 case 1: /* rldicr */
1198 val &= MASK64_R(mb);
1199 break;
1200 case 2: /* rldic */
1201 val &= MASK64(mb, 63 - sh);
1202 break;
1203 case 3: /* rldimi */
1204 imm = MASK64(mb, 63 - sh);
1205 val = (regs->gpr[ra] & ~imm) |
1206 (val & imm);
1207 }
1208 op->val = val;
1209 goto logical_done;
1210 } else {
1211 sh = regs->gpr[rb] & 0x3f;
1212 val = ROTATE(val, sh);
1213 switch ((instr >> 1) & 7) {
1214 case 0: /* rldcl */
1215 op->val = val & MASK64_L(mb);
1216 goto logical_done;
1217 case 1: /* rldcr */
1218 op->val = val & MASK64_R(mb);
1219 goto logical_done;
1220 }
1221 }
1222 #endif
1223 op->type = UNKNOWN; /* illegal instruction */
1224 return 0;
1225
1226 case 31:
1227 /* isel occupies 32 minor opcodes */
1228 if (((instr >> 1) & 0x1f) == 15) {
1229 mb = (instr >> 6) & 0x1f; /* bc field */
1230 val = (regs->ccr >> (31 - mb)) & 1;
1231 val2 = (ra) ? regs->gpr[ra] : 0;
1232
1233 op->val = (val) ? val2 : regs->gpr[rb];
1234 goto compute_done;
1235 }
1236
1237 switch ((instr >> 1) & 0x3ff) {
1238 case 4: /* tw */
1239 if (rd == 0x1f ||
1240 (rd & trap_compare((int)regs->gpr[ra],
1241 (int)regs->gpr[rb])))
1242 goto trap;
1243 return 1;
1244 #ifdef __powerpc64__
1245 case 68: /* td */
1246 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1247 goto trap;
1248 return 1;
1249 #endif
1250 case 83: /* mfmsr */
1251 if (regs->msr & MSR_PR)
1252 goto priv;
1253 op->type = MFMSR;
1254 op->reg = rd;
1255 return 0;
1256 case 146: /* mtmsr */
1257 if (regs->msr & MSR_PR)
1258 goto priv;
1259 op->type = MTMSR;
1260 op->reg = rd;
1261 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1262 return 0;
1263 #ifdef CONFIG_PPC64
1264 case 178: /* mtmsrd */
1265 if (regs->msr & MSR_PR)
1266 goto priv;
1267 op->type = MTMSR;
1268 op->reg = rd;
1269 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1270 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1271 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1272 op->val = imm;
1273 return 0;
1274 #endif
1275
1276 case 19: /* mfcr */
1277 imm = 0xffffffffUL;
1278 if ((instr >> 20) & 1) {
1279 imm = 0xf0000000UL;
1280 for (sh = 0; sh < 8; ++sh) {
1281 if (instr & (0x80000 >> sh))
1282 break;
1283 imm >>= 4;
1284 }
1285 }
1286 op->val = regs->ccr & imm;
1287 goto compute_done;
1288
1289 case 144: /* mtcrf */
1290 op->type = COMPUTE + SETCC;
1291 imm = 0xf0000000UL;
1292 val = regs->gpr[rd];
1293 op->val = regs->ccr;
1294 for (sh = 0; sh < 8; ++sh) {
1295 if (instr & (0x80000 >> sh))
1296 op->val = (op->val & ~imm) |
1297 (val & imm);
1298 imm >>= 4;
1299 }
1300 return 1;
1301
1302 case 339: /* mfspr */
1303 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1304 op->type = MFSPR;
1305 op->reg = rd;
1306 op->spr = spr;
1307 if (spr == SPRN_XER || spr == SPRN_LR ||
1308 spr == SPRN_CTR)
1309 return 1;
1310 return 0;
1311
1312 case 467: /* mtspr */
1313 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1314 op->type = MTSPR;
1315 op->val = regs->gpr[rd];
1316 op->spr = spr;
1317 if (spr == SPRN_XER || spr == SPRN_LR ||
1318 spr == SPRN_CTR)
1319 return 1;
1320 return 0;
1321
1322 /*
1323 * Compare instructions
1324 */
1325 case 0: /* cmp */
1326 val = regs->gpr[ra];
1327 val2 = regs->gpr[rb];
1328 #ifdef __powerpc64__
1329 if ((rd & 1) == 0) {
1330 /* word (32-bit) compare */
1331 val = (int) val;
1332 val2 = (int) val2;
1333 }
1334 #endif
1335 do_cmp_signed(regs, op, val, val2, rd >> 2);
1336 return 1;
1337
1338 case 32: /* cmpl */
1339 val = regs->gpr[ra];
1340 val2 = regs->gpr[rb];
1341 #ifdef __powerpc64__
1342 if ((rd & 1) == 0) {
1343 /* word (32-bit) compare */
1344 val = (unsigned int) val;
1345 val2 = (unsigned int) val2;
1346 }
1347 #endif
1348 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1349 return 1;
1350
1351 case 508: /* cmpb */
1352 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1353 goto logical_done_nocc;
1354
1355 /*
1356 * Arithmetic instructions
1357 */
1358 case 8: /* subfc */
1359 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1360 regs->gpr[rb], 1);
1361 goto arith_done;
1362 #ifdef __powerpc64__
1363 case 9: /* mulhdu */
1364 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1365 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1366 goto arith_done;
1367 #endif
1368 case 10: /* addc */
1369 add_with_carry(regs, op, rd, regs->gpr[ra],
1370 regs->gpr[rb], 0);
1371 goto arith_done;
1372
1373 case 11: /* mulhwu */
1374 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1375 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1376 goto arith_done;
1377
1378 case 40: /* subf */
1379 op->val = regs->gpr[rb] - regs->gpr[ra];
1380 goto arith_done;
1381 #ifdef __powerpc64__
1382 case 73: /* mulhd */
1383 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1384 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1385 goto arith_done;
1386 #endif
1387 case 75: /* mulhw */
1388 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1389 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1390 goto arith_done;
1391
1392 case 104: /* neg */
1393 op->val = -regs->gpr[ra];
1394 goto arith_done;
1395
1396 case 136: /* subfe */
1397 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1398 regs->gpr[rb], regs->xer & XER_CA);
1399 goto arith_done;
1400
1401 case 138: /* adde */
1402 add_with_carry(regs, op, rd, regs->gpr[ra],
1403 regs->gpr[rb], regs->xer & XER_CA);
1404 goto arith_done;
1405
1406 case 200: /* subfze */
1407 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1408 regs->xer & XER_CA);
1409 goto arith_done;
1410
1411 case 202: /* addze */
1412 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1413 regs->xer & XER_CA);
1414 goto arith_done;
1415
1416 case 232: /* subfme */
1417 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1418 regs->xer & XER_CA);
1419 goto arith_done;
1420 #ifdef __powerpc64__
1421 case 233: /* mulld */
1422 op->val = regs->gpr[ra] * regs->gpr[rb];
1423 goto arith_done;
1424 #endif
1425 case 234: /* addme */
1426 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1427 regs->xer & XER_CA);
1428 goto arith_done;
1429
1430 case 235: /* mullw */
1431 op->val = (unsigned int) regs->gpr[ra] *
1432 (unsigned int) regs->gpr[rb];
1433 goto arith_done;
1434
1435 case 266: /* add */
1436 op->val = regs->gpr[ra] + regs->gpr[rb];
1437 goto arith_done;
1438 #ifdef __powerpc64__
1439 case 457: /* divdu */
1440 op->val = regs->gpr[ra] / regs->gpr[rb];
1441 goto arith_done;
1442 #endif
1443 case 459: /* divwu */
1444 op->val = (unsigned int) regs->gpr[ra] /
1445 (unsigned int) regs->gpr[rb];
1446 goto arith_done;
1447 #ifdef __powerpc64__
1448 case 489: /* divd */
1449 op->val = (long int) regs->gpr[ra] /
1450 (long int) regs->gpr[rb];
1451 goto arith_done;
1452 #endif
1453 case 491: /* divw */
1454 op->val = (int) regs->gpr[ra] /
1455 (int) regs->gpr[rb];
1456 goto arith_done;
1457
1458
1459 /*
1460 * Logical instructions
1461 */
1462 case 26: /* cntlzw */
1463 op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
1464 goto logical_done;
1465 #ifdef __powerpc64__
1466 case 58: /* cntlzd */
1467 op->val = __builtin_clzl(regs->gpr[rd]);
1468 goto logical_done;
1469 #endif
1470 case 28: /* and */
1471 op->val = regs->gpr[rd] & regs->gpr[rb];
1472 goto logical_done;
1473
1474 case 60: /* andc */
1475 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1476 goto logical_done;
1477
1478 case 122: /* popcntb */
1479 do_popcnt(regs, op, regs->gpr[rd], 8);
1480 goto logical_done_nocc;
1481
1482 case 124: /* nor */
1483 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1484 goto logical_done;
1485
1486 case 154: /* prtyw */
1487 do_prty(regs, op, regs->gpr[rd], 32);
1488 goto logical_done_nocc;
1489
1490 case 186: /* prtyd */
1491 do_prty(regs, op, regs->gpr[rd], 64);
1492 goto logical_done_nocc;
1493 #ifdef CONFIG_PPC64
1494 case 252: /* bpermd */
1495 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1496 goto logical_done_nocc;
1497 #endif
1498 case 284: /* xor */
1499 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1500 goto logical_done;
1501
1502 case 316: /* xor */
1503 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1504 goto logical_done;
1505
1506 case 378: /* popcntw */
1507 do_popcnt(regs, op, regs->gpr[rd], 32);
1508 goto logical_done_nocc;
1509
1510 case 412: /* orc */
1511 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1512 goto logical_done;
1513
1514 case 444: /* or */
1515 op->val = regs->gpr[rd] | regs->gpr[rb];
1516 goto logical_done;
1517
1518 case 476: /* nand */
1519 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1520 goto logical_done;
1521 #ifdef CONFIG_PPC64
1522 case 506: /* popcntd */
1523 do_popcnt(regs, op, regs->gpr[rd], 64);
1524 goto logical_done_nocc;
1525 #endif
1526 case 922: /* extsh */
1527 op->val = (signed short) regs->gpr[rd];
1528 goto logical_done;
1529
1530 case 954: /* extsb */
1531 op->val = (signed char) regs->gpr[rd];
1532 goto logical_done;
1533 #ifdef __powerpc64__
1534 case 986: /* extsw */
1535 op->val = (signed int) regs->gpr[rd];
1536 goto logical_done;
1537 #endif
1538
1539 /*
1540 * Shift instructions
1541 */
1542 case 24: /* slw */
1543 sh = regs->gpr[rb] & 0x3f;
1544 if (sh < 32)
1545 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1546 else
1547 op->val = 0;
1548 goto logical_done;
1549
1550 case 536: /* srw */
1551 sh = regs->gpr[rb] & 0x3f;
1552 if (sh < 32)
1553 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1554 else
1555 op->val = 0;
1556 goto logical_done;
1557
1558 case 792: /* sraw */
1559 op->type = COMPUTE + SETREG + SETXER;
1560 sh = regs->gpr[rb] & 0x3f;
1561 ival = (signed int) regs->gpr[rd];
1562 op->val = ival >> (sh < 32 ? sh : 31);
1563 op->xerval = regs->xer;
1564 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1565 op->xerval |= XER_CA;
1566 else
1567 op->xerval &= ~XER_CA;
1568 goto logical_done;
1569
1570 case 824: /* srawi */
1571 op->type = COMPUTE + SETREG + SETXER;
1572 sh = rb;
1573 ival = (signed int) regs->gpr[rd];
1574 op->val = ival >> sh;
1575 op->xerval = regs->xer;
1576 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1577 op->xerval |= XER_CA;
1578 else
1579 op->xerval &= ~XER_CA;
1580 goto logical_done;
1581
1582 #ifdef __powerpc64__
1583 case 27: /* sld */
1584 sh = regs->gpr[rb] & 0x7f;
1585 if (sh < 64)
1586 op->val = regs->gpr[rd] << sh;
1587 else
1588 op->val = 0;
1589 goto logical_done;
1590
1591 case 539: /* srd */
1592 sh = regs->gpr[rb] & 0x7f;
1593 if (sh < 64)
1594 op->val = regs->gpr[rd] >> sh;
1595 else
1596 op->val = 0;
1597 goto logical_done;
1598
1599 case 794: /* srad */
1600 op->type = COMPUTE + SETREG + SETXER;
1601 sh = regs->gpr[rb] & 0x7f;
1602 ival = (signed long int) regs->gpr[rd];
1603 op->val = ival >> (sh < 64 ? sh : 63);
1604 op->xerval = regs->xer;
1605 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1606 op->xerval |= XER_CA;
1607 else
1608 op->xerval &= ~XER_CA;
1609 goto logical_done;
1610
1611 case 826: /* sradi with sh_5 = 0 */
1612 case 827: /* sradi with sh_5 = 1 */
1613 op->type = COMPUTE + SETREG + SETXER;
1614 sh = rb | ((instr & 2) << 4);
1615 ival = (signed long int) regs->gpr[rd];
1616 op->val = ival >> sh;
1617 op->xerval = regs->xer;
1618 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1619 op->xerval |= XER_CA;
1620 else
1621 op->xerval &= ~XER_CA;
1622 goto logical_done;
1623 #endif /* __powerpc64__ */
1624
1625 /*
1626 * Cache instructions
1627 */
1628 case 54: /* dcbst */
1629 op->type = MKOP(CACHEOP, DCBST, 0);
1630 op->ea = xform_ea(instr, regs);
1631 return 0;
1632
1633 case 86: /* dcbf */
1634 op->type = MKOP(CACHEOP, DCBF, 0);
1635 op->ea = xform_ea(instr, regs);
1636 return 0;
1637
1638 case 246: /* dcbtst */
1639 op->type = MKOP(CACHEOP, DCBTST, 0);
1640 op->ea = xform_ea(instr, regs);
1641 op->reg = rd;
1642 return 0;
1643
1644 case 278: /* dcbt */
1645 op->type = MKOP(CACHEOP, DCBTST, 0);
1646 op->ea = xform_ea(instr, regs);
1647 op->reg = rd;
1648 return 0;
1649
1650 case 982: /* icbi */
1651 op->type = MKOP(CACHEOP, ICBI, 0);
1652 op->ea = xform_ea(instr, regs);
1653 return 0;
1654 }
1655 break;
1656 }
1657
1658 /*
1659 * Loads and stores.
1660 */
1661 op->type = UNKNOWN;
1662 op->update_reg = ra;
1663 op->reg = rd;
1664 op->val = regs->gpr[rd];
1665 u = (instr >> 20) & UPDATE;
1666 op->vsx_flags = 0;
1667
1668 switch (opcode) {
1669 case 31:
1670 u = instr & UPDATE;
1671 op->ea = xform_ea(instr, regs);
1672 switch ((instr >> 1) & 0x3ff) {
1673 case 20: /* lwarx */
1674 op->type = MKOP(LARX, 0, 4);
1675 break;
1676
1677 case 150: /* stwcx. */
1678 op->type = MKOP(STCX, 0, 4);
1679 break;
1680
1681 #ifdef __powerpc64__
1682 case 84: /* ldarx */
1683 op->type = MKOP(LARX, 0, 8);
1684 break;
1685
1686 case 214: /* stdcx. */
1687 op->type = MKOP(STCX, 0, 8);
1688 break;
1689
1690 case 52: /* lbarx */
1691 op->type = MKOP(LARX, 0, 1);
1692 break;
1693
1694 case 694: /* stbcx. */
1695 op->type = MKOP(STCX, 0, 1);
1696 break;
1697
1698 case 116: /* lharx */
1699 op->type = MKOP(LARX, 0, 2);
1700 break;
1701
1702 case 726: /* sthcx. */
1703 op->type = MKOP(STCX, 0, 2);
1704 break;
1705
1706 case 276: /* lqarx */
1707 if (!((rd & 1) || rd == ra || rd == rb))
1708 op->type = MKOP(LARX, 0, 16);
1709 break;
1710
1711 case 182: /* stqcx. */
1712 if (!(rd & 1))
1713 op->type = MKOP(STCX, 0, 16);
1714 break;
1715 #endif
1716
1717 case 23: /* lwzx */
1718 case 55: /* lwzux */
1719 op->type = MKOP(LOAD, u, 4);
1720 break;
1721
1722 case 87: /* lbzx */
1723 case 119: /* lbzux */
1724 op->type = MKOP(LOAD, u, 1);
1725 break;
1726
1727 #ifdef CONFIG_ALTIVEC
1728 case 103: /* lvx */
1729 case 359: /* lvxl */
1730 op->type = MKOP(LOAD_VMX, 0, 16);
1731 op->element_size = 16;
1732 break;
1733
1734 case 231: /* stvx */
1735 case 487: /* stvxl */
1736 op->type = MKOP(STORE_VMX, 0, 16);
1737 break;
1738 #endif /* CONFIG_ALTIVEC */
1739
1740 #ifdef __powerpc64__
1741 case 21: /* ldx */
1742 case 53: /* ldux */
1743 op->type = MKOP(LOAD, u, 8);
1744 break;
1745
1746 case 149: /* stdx */
1747 case 181: /* stdux */
1748 op->type = MKOP(STORE, u, 8);
1749 break;
1750 #endif
1751
1752 case 151: /* stwx */
1753 case 183: /* stwux */
1754 op->type = MKOP(STORE, u, 4);
1755 break;
1756
1757 case 215: /* stbx */
1758 case 247: /* stbux */
1759 op->type = MKOP(STORE, u, 1);
1760 break;
1761
1762 case 279: /* lhzx */
1763 case 311: /* lhzux */
1764 op->type = MKOP(LOAD, u, 2);
1765 break;
1766
1767 #ifdef __powerpc64__
1768 case 341: /* lwax */
1769 case 373: /* lwaux */
1770 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1771 break;
1772 #endif
1773
1774 case 343: /* lhax */
1775 case 375: /* lhaux */
1776 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1777 break;
1778
1779 case 407: /* sthx */
1780 case 439: /* sthux */
1781 op->type = MKOP(STORE, u, 2);
1782 break;
1783
1784 #ifdef __powerpc64__
1785 case 532: /* ldbrx */
1786 op->type = MKOP(LOAD, BYTEREV, 8);
1787 break;
1788
1789 #endif
1790 case 533: /* lswx */
1791 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1792 break;
1793
1794 case 534: /* lwbrx */
1795 op->type = MKOP(LOAD, BYTEREV, 4);
1796 break;
1797
1798 case 597: /* lswi */
1799 if (rb == 0)
1800 rb = 32; /* # bytes to load */
1801 op->type = MKOP(LOAD_MULTI, 0, rb);
1802 op->ea = ra ? regs->gpr[ra] : 0;
1803 break;
1804
1805 #ifdef CONFIG_PPC_FPU
1806 case 535: /* lfsx */
1807 case 567: /* lfsux */
1808 op->type = MKOP(LOAD_FP, u, 4);
1809 break;
1810
1811 case 599: /* lfdx */
1812 case 631: /* lfdux */
1813 op->type = MKOP(LOAD_FP, u, 8);
1814 break;
1815
1816 case 663: /* stfsx */
1817 case 695: /* stfsux */
1818 op->type = MKOP(STORE_FP, u, 4);
1819 break;
1820
1821 case 727: /* stfdx */
1822 case 759: /* stfdux */
1823 op->type = MKOP(STORE_FP, u, 8);
1824 break;
1825 #endif
1826
1827 #ifdef __powerpc64__
1828 case 660: /* stdbrx */
1829 op->type = MKOP(STORE, BYTEREV, 8);
1830 op->val = byterev_8(regs->gpr[rd]);
1831 break;
1832
1833 #endif
1834 case 661: /* stswx */
1835 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1836 break;
1837
1838 case 662: /* stwbrx */
1839 op->type = MKOP(STORE, BYTEREV, 4);
1840 op->val = byterev_4(regs->gpr[rd]);
1841 break;
1842
1843 case 725:
1844 if (rb == 0)
1845 rb = 32; /* # bytes to store */
1846 op->type = MKOP(STORE_MULTI, 0, rb);
1847 op->ea = ra ? regs->gpr[ra] : 0;
1848 break;
1849
1850 case 790: /* lhbrx */
1851 op->type = MKOP(LOAD, BYTEREV, 2);
1852 break;
1853
1854 case 918: /* sthbrx */
1855 op->type = MKOP(STORE, BYTEREV, 2);
1856 op->val = byterev_2(regs->gpr[rd]);
1857 break;
1858
1859 #ifdef CONFIG_VSX
1860 case 12: /* lxsiwzx */
1861 op->reg = rd | ((instr & 1) << 5);
1862 op->type = MKOP(LOAD_VSX, 0, 4);
1863 op->element_size = 8;
1864 break;
1865
1866 case 76: /* lxsiwax */
1867 op->reg = rd | ((instr & 1) << 5);
1868 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
1869 op->element_size = 8;
1870 break;
1871
1872 case 140: /* stxsiwx */
1873 op->reg = rd | ((instr & 1) << 5);
1874 op->type = MKOP(STORE_VSX, 0, 4);
1875 op->element_size = 8;
1876 break;
1877
1878 case 268: /* lxvx */
1879 op->reg = rd | ((instr & 1) << 5);
1880 op->type = MKOP(LOAD_VSX, 0, 16);
1881 op->element_size = 16;
1882 op->vsx_flags = VSX_CHECK_VEC;
1883 break;
1884
1885 case 269: /* lxvl */
1886 case 301: { /* lxvll */
1887 int nb;
1888 op->reg = rd | ((instr & 1) << 5);
1889 op->ea = ra ? regs->gpr[ra] : 0;
1890 nb = regs->gpr[rb] & 0xff;
1891 if (nb > 16)
1892 nb = 16;
1893 op->type = MKOP(LOAD_VSX, 0, nb);
1894 op->element_size = 16;
1895 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
1896 VSX_CHECK_VEC;
1897 break;
1898 }
1899 case 332: /* lxvdsx */
1900 op->reg = rd | ((instr & 1) << 5);
1901 op->type = MKOP(LOAD_VSX, 0, 8);
1902 op->element_size = 8;
1903 op->vsx_flags = VSX_SPLAT;
1904 break;
1905
1906 case 364: /* lxvwsx */
1907 op->reg = rd | ((instr & 1) << 5);
1908 op->type = MKOP(LOAD_VSX, 0, 4);
1909 op->element_size = 4;
1910 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
1911 break;
1912
1913 case 396: /* stxvx */
1914 op->reg = rd | ((instr & 1) << 5);
1915 op->type = MKOP(STORE_VSX, 0, 16);
1916 op->element_size = 16;
1917 op->vsx_flags = VSX_CHECK_VEC;
1918 break;
1919
1920 case 397: /* stxvl */
1921 case 429: { /* stxvll */
1922 int nb;
1923 op->reg = rd | ((instr & 1) << 5);
1924 op->ea = ra ? regs->gpr[ra] : 0;
1925 nb = regs->gpr[rb] & 0xff;
1926 if (nb > 16)
1927 nb = 16;
1928 op->type = MKOP(STORE_VSX, 0, nb);
1929 op->element_size = 16;
1930 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
1931 VSX_CHECK_VEC;
1932 break;
1933 }
1934 case 524: /* lxsspx */
1935 op->reg = rd | ((instr & 1) << 5);
1936 op->type = MKOP(LOAD_VSX, 0, 4);
1937 op->element_size = 8;
1938 op->vsx_flags = VSX_FPCONV;
1939 break;
1940
1941 case 588: /* lxsdx */
1942 op->reg = rd | ((instr & 1) << 5);
1943 op->type = MKOP(LOAD_VSX, 0, 8);
1944 op->element_size = 8;
1945 break;
1946
1947 case 652: /* stxsspx */
1948 op->reg = rd | ((instr & 1) << 5);
1949 op->type = MKOP(STORE_VSX, 0, 4);
1950 op->element_size = 8;
1951 op->vsx_flags = VSX_FPCONV;
1952 break;
1953
1954 case 716: /* stxsdx */
1955 op->reg = rd | ((instr & 1) << 5);
1956 op->type = MKOP(STORE_VSX, 0, 8);
1957 op->element_size = 8;
1958 break;
1959
1960 case 780: /* lxvw4x */
1961 op->reg = rd | ((instr & 1) << 5);
1962 op->type = MKOP(LOAD_VSX, 0, 16);
1963 op->element_size = 4;
1964 break;
1965
1966 case 781: /* lxsibzx */
1967 op->reg = rd | ((instr & 1) << 5);
1968 op->type = MKOP(LOAD_VSX, 0, 1);
1969 op->element_size = 8;
1970 op->vsx_flags = VSX_CHECK_VEC;
1971 break;
1972
1973 case 812: /* lxvh8x */
1974 op->reg = rd | ((instr & 1) << 5);
1975 op->type = MKOP(LOAD_VSX, 0, 16);
1976 op->element_size = 2;
1977 op->vsx_flags = VSX_CHECK_VEC;
1978 break;
1979
1980 case 813: /* lxsihzx */
1981 op->reg = rd | ((instr & 1) << 5);
1982 op->type = MKOP(LOAD_VSX, 0, 2);
1983 op->element_size = 8;
1984 op->vsx_flags = VSX_CHECK_VEC;
1985 break;
1986
1987 case 844: /* lxvd2x */
1988 op->reg = rd | ((instr & 1) << 5);
1989 op->type = MKOP(LOAD_VSX, 0, 16);
1990 op->element_size = 8;
1991 break;
1992
1993 case 876: /* lxvb16x */
1994 op->reg = rd | ((instr & 1) << 5);
1995 op->type = MKOP(LOAD_VSX, 0, 16);
1996 op->element_size = 1;
1997 op->vsx_flags = VSX_CHECK_VEC;
1998 break;
1999
2000 case 908: /* stxvw4x */
2001 op->reg = rd | ((instr & 1) << 5);
2002 op->type = MKOP(STORE_VSX, 0, 16);
2003 op->element_size = 4;
2004 break;
2005
2006 case 909: /* stxsibx */
2007 op->reg = rd | ((instr & 1) << 5);
2008 op->type = MKOP(STORE_VSX, 0, 1);
2009 op->element_size = 8;
2010 op->vsx_flags = VSX_CHECK_VEC;
2011 break;
2012
2013 case 940: /* stxvh8x */
2014 op->reg = rd | ((instr & 1) << 5);
2015 op->type = MKOP(STORE_VSX, 0, 16);
2016 op->element_size = 2;
2017 op->vsx_flags = VSX_CHECK_VEC;
2018 break;
2019
2020 case 941: /* stxsihx */
2021 op->reg = rd | ((instr & 1) << 5);
2022 op->type = MKOP(STORE_VSX, 0, 2);
2023 op->element_size = 8;
2024 op->vsx_flags = VSX_CHECK_VEC;
2025 break;
2026
2027 case 972: /* stxvd2x */
2028 op->reg = rd | ((instr & 1) << 5);
2029 op->type = MKOP(STORE_VSX, 0, 16);
2030 op->element_size = 8;
2031 break;
2032
2033 case 1004: /* stxvb16x */
2034 op->reg = rd | ((instr & 1) << 5);
2035 op->type = MKOP(STORE_VSX, 0, 16);
2036 op->element_size = 1;
2037 op->vsx_flags = VSX_CHECK_VEC;
2038 break;
2039
2040 #endif /* CONFIG_VSX */
2041 }
2042 break;
2043
2044 case 32: /* lwz */
2045 case 33: /* lwzu */
2046 op->type = MKOP(LOAD, u, 4);
2047 op->ea = dform_ea(instr, regs);
2048 break;
2049
2050 case 34: /* lbz */
2051 case 35: /* lbzu */
2052 op->type = MKOP(LOAD, u, 1);
2053 op->ea = dform_ea(instr, regs);
2054 break;
2055
2056 case 36: /* stw */
2057 case 37: /* stwu */
2058 op->type = MKOP(STORE, u, 4);
2059 op->ea = dform_ea(instr, regs);
2060 break;
2061
2062 case 38: /* stb */
2063 case 39: /* stbu */
2064 op->type = MKOP(STORE, u, 1);
2065 op->ea = dform_ea(instr, regs);
2066 break;
2067
2068 case 40: /* lhz */
2069 case 41: /* lhzu */
2070 op->type = MKOP(LOAD, u, 2);
2071 op->ea = dform_ea(instr, regs);
2072 break;
2073
2074 case 42: /* lha */
2075 case 43: /* lhau */
2076 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2077 op->ea = dform_ea(instr, regs);
2078 break;
2079
2080 case 44: /* sth */
2081 case 45: /* sthu */
2082 op->type = MKOP(STORE, u, 2);
2083 op->ea = dform_ea(instr, regs);
2084 break;
2085
2086 case 46: /* lmw */
2087 if (ra >= rd)
2088 break; /* invalid form, ra in range to load */
2089 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2090 op->ea = dform_ea(instr, regs);
2091 break;
2092
2093 case 47: /* stmw */
2094 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2095 op->ea = dform_ea(instr, regs);
2096 break;
2097
2098 #ifdef CONFIG_PPC_FPU
2099 case 48: /* lfs */
2100 case 49: /* lfsu */
2101 op->type = MKOP(LOAD_FP, u, 4);
2102 op->ea = dform_ea(instr, regs);
2103 break;
2104
2105 case 50: /* lfd */
2106 case 51: /* lfdu */
2107 op->type = MKOP(LOAD_FP, u, 8);
2108 op->ea = dform_ea(instr, regs);
2109 break;
2110
2111 case 52: /* stfs */
2112 case 53: /* stfsu */
2113 op->type = MKOP(STORE_FP, u, 4);
2114 op->ea = dform_ea(instr, regs);
2115 break;
2116
2117 case 54: /* stfd */
2118 case 55: /* stfdu */
2119 op->type = MKOP(STORE_FP, u, 8);
2120 op->ea = dform_ea(instr, regs);
2121 break;
2122 #endif
2123
2124 #ifdef __powerpc64__
2125 case 56: /* lq */
2126 if (!((rd & 1) || (rd == ra)))
2127 op->type = MKOP(LOAD, 0, 16);
2128 op->ea = dqform_ea(instr, regs);
2129 break;
2130 #endif
2131
2132 #ifdef CONFIG_VSX
2133 case 57: /* lxsd, lxssp */
2134 op->ea = dsform_ea(instr, regs);
2135 switch (instr & 3) {
2136 case 2: /* lxsd */
2137 op->reg = rd + 32;
2138 op->type = MKOP(LOAD_VSX, 0, 8);
2139 op->element_size = 8;
2140 op->vsx_flags = VSX_CHECK_VEC;
2141 break;
2142 case 3: /* lxssp */
2143 op->reg = rd + 32;
2144 op->type = MKOP(LOAD_VSX, 0, 4);
2145 op->element_size = 8;
2146 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2147 break;
2148 }
2149 break;
2150 #endif /* CONFIG_VSX */
2151
2152 #ifdef __powerpc64__
2153 case 58: /* ld[u], lwa */
2154 op->ea = dsform_ea(instr, regs);
2155 switch (instr & 3) {
2156 case 0: /* ld */
2157 op->type = MKOP(LOAD, 0, 8);
2158 break;
2159 case 1: /* ldu */
2160 op->type = MKOP(LOAD, UPDATE, 8);
2161 break;
2162 case 2: /* lwa */
2163 op->type = MKOP(LOAD, SIGNEXT, 4);
2164 break;
2165 }
2166 break;
2167 #endif
2168
2169 #ifdef CONFIG_VSX
2170 case 61: /* lxv, stxsd, stxssp, stxv */
2171 switch (instr & 7) {
2172 case 1: /* lxv */
2173 op->ea = dqform_ea(instr, regs);
2174 if (instr & 8)
2175 op->reg = rd + 32;
2176 op->type = MKOP(LOAD_VSX, 0, 16);
2177 op->element_size = 16;
2178 op->vsx_flags = VSX_CHECK_VEC;
2179 break;
2180
2181 case 2: /* stxsd with LSB of DS field = 0 */
2182 case 6: /* stxsd with LSB of DS field = 1 */
2183 op->ea = dsform_ea(instr, regs);
2184 op->reg = rd + 32;
2185 op->type = MKOP(STORE_VSX, 0, 8);
2186 op->element_size = 8;
2187 op->vsx_flags = VSX_CHECK_VEC;
2188 break;
2189
2190 case 3: /* stxssp with LSB of DS field = 0 */
2191 case 7: /* stxssp with LSB of DS field = 1 */
2192 op->ea = dsform_ea(instr, regs);
2193 op->reg = rd + 32;
2194 op->type = MKOP(STORE_VSX, 0, 4);
2195 op->element_size = 8;
2196 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2197 break;
2198
2199 case 5: /* stxv */
2200 op->ea = dqform_ea(instr, regs);
2201 if (instr & 8)
2202 op->reg = rd + 32;
2203 op->type = MKOP(STORE_VSX, 0, 16);
2204 op->element_size = 16;
2205 op->vsx_flags = VSX_CHECK_VEC;
2206 break;
2207 }
2208 break;
2209 #endif /* CONFIG_VSX */
2210
2211 #ifdef __powerpc64__
2212 case 62: /* std[u] */
2213 op->ea = dsform_ea(instr, regs);
2214 switch (instr & 3) {
2215 case 0: /* std */
2216 op->type = MKOP(STORE, 0, 8);
2217 break;
2218 case 1: /* stdu */
2219 op->type = MKOP(STORE, UPDATE, 8);
2220 break;
2221 case 2: /* stq */
2222 if (!(rd & 1))
2223 op->type = MKOP(STORE, 0, 16);
2224 break;
2225 }
2226 break;
2227 #endif /* __powerpc64__ */
2228
2229 }
2230 return 0;
2231
2232 logical_done:
2233 if (instr & 1)
2234 set_cr0(regs, op, ra);
2235 logical_done_nocc:
2236 op->reg = ra;
2237 op->type |= SETREG;
2238 return 1;
2239
2240 arith_done:
2241 if (instr & 1)
2242 set_cr0(regs, op, rd);
2243 compute_done:
2244 op->reg = rd;
2245 op->type |= SETREG;
2246 return 1;
2247
2248 priv:
2249 op->type = INTERRUPT | 0x700;
2250 op->val = SRR1_PROGPRIV;
2251 return 0;
2252
2253 trap:
2254 op->type = INTERRUPT | 0x700;
2255 op->val = SRR1_PROGTRAP;
2256 return 0;
2257 }
2258 EXPORT_SYMBOL_GPL(analyse_instr);
2259 NOKPROBE_SYMBOL(analyse_instr);
2260
2261 /*
2262 * For PPC32 we always use stwu with r1 to change the stack pointer.
2263 * So this emulated store may corrupt the exception frame, now we
2264 * have to provide the exception frame trampoline, which is pushed
2265 * below the kprobed function stack. So we only update gpr[1] but
2266 * don't emulate the real store operation. We will do real store
2267 * operation safely in exception return code by checking this flag.
2268 */
2269 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2270 {
2271 #ifdef CONFIG_PPC32
2272 /*
2273 * Check if we will touch kernel stack overflow
2274 */
2275 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2276 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2277 return -EINVAL;
2278 }
2279 #endif /* CONFIG_PPC32 */
2280 /*
2281 * Check if we already set since that means we'll
2282 * lose the previous value.
2283 */
2284 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2285 set_thread_flag(TIF_EMULATE_STACK_STORE);
2286 return 0;
2287 }
2288
2289 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2290 {
2291 switch (size) {
2292 case 2:
2293 *valp = (signed short) *valp;
2294 break;
2295 case 4:
2296 *valp = (signed int) *valp;
2297 break;
2298 }
2299 }
2300
2301 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2302 {
2303 switch (size) {
2304 case 2:
2305 *valp = byterev_2(*valp);
2306 break;
2307 case 4:
2308 *valp = byterev_4(*valp);
2309 break;
2310 #ifdef __powerpc64__
2311 case 8:
2312 *valp = byterev_8(*valp);
2313 break;
2314 #endif
2315 }
2316 }
2317
2318 /*
2319 * Emulate an instruction that can be executed just by updating
2320 * fields in *regs.
2321 */
2322 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2323 {
2324 unsigned long next_pc;
2325
2326 next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2327 switch (op->type & INSTR_TYPE_MASK) {
2328 case COMPUTE:
2329 if (op->type & SETREG)
2330 regs->gpr[op->reg] = op->val;
2331 if (op->type & SETCC)
2332 regs->ccr = op->ccval;
2333 if (op->type & SETXER)
2334 regs->xer = op->xerval;
2335 break;
2336
2337 case BRANCH:
2338 if (op->type & SETLK)
2339 regs->link = next_pc;
2340 if (op->type & BRTAKEN)
2341 next_pc = op->val;
2342 if (op->type & DECCTR)
2343 --regs->ctr;
2344 break;
2345
2346 case BARRIER:
2347 switch (op->type & BARRIER_MASK) {
2348 case BARRIER_SYNC:
2349 mb();
2350 break;
2351 case BARRIER_ISYNC:
2352 isync();
2353 break;
2354 case BARRIER_EIEIO:
2355 eieio();
2356 break;
2357 case BARRIER_LWSYNC:
2358 asm volatile("lwsync" : : : "memory");
2359 break;
2360 case BARRIER_PTESYNC:
2361 asm volatile("ptesync" : : : "memory");
2362 break;
2363 }
2364 break;
2365
2366 case MFSPR:
2367 switch (op->spr) {
2368 case SPRN_XER:
2369 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2370 break;
2371 case SPRN_LR:
2372 regs->gpr[op->reg] = regs->link;
2373 break;
2374 case SPRN_CTR:
2375 regs->gpr[op->reg] = regs->ctr;
2376 break;
2377 default:
2378 WARN_ON_ONCE(1);
2379 }
2380 break;
2381
2382 case MTSPR:
2383 switch (op->spr) {
2384 case SPRN_XER:
2385 regs->xer = op->val & 0xffffffffUL;
2386 break;
2387 case SPRN_LR:
2388 regs->link = op->val;
2389 break;
2390 case SPRN_CTR:
2391 regs->ctr = op->val;
2392 break;
2393 default:
2394 WARN_ON_ONCE(1);
2395 }
2396 break;
2397
2398 default:
2399 WARN_ON_ONCE(1);
2400 }
2401 regs->nip = next_pc;
2402 }
2403
2404 /*
2405 * Emulate instructions that cause a transfer of control,
2406 * loads and stores, and a few other instructions.
2407 * Returns 1 if the step was emulated, 0 if not,
2408 * or -1 if the instruction is one that should not be stepped,
2409 * such as an rfid, or a mtmsrd that would clear MSR_RI.
2410 */
2411 int emulate_step(struct pt_regs *regs, unsigned int instr)
2412 {
2413 struct instruction_op op;
2414 int r, err, size, type;
2415 unsigned long val;
2416 unsigned int cr;
2417 int i, rd, nb;
2418 unsigned long ea;
2419
2420 r = analyse_instr(&op, regs, instr);
2421 if (r < 0)
2422 return r;
2423 if (r > 0) {
2424 emulate_update_regs(regs, &op);
2425 return 1;
2426 }
2427
2428 err = 0;
2429 size = GETSIZE(op.type);
2430 type = op.type & INSTR_TYPE_MASK;
2431
2432 ea = op.ea;
2433 if (OP_IS_LOAD_STORE(type) || type == CACHEOP)
2434 ea = truncate_if_32bit(regs->msr, op.ea);
2435
2436 switch (type) {
2437 case CACHEOP:
2438 if (!address_ok(regs, ea, 8))
2439 return 0;
2440 switch (op.type & CACHEOP_MASK) {
2441 case DCBST:
2442 __cacheop_user_asmx(ea, err, "dcbst");
2443 break;
2444 case DCBF:
2445 __cacheop_user_asmx(ea, err, "dcbf");
2446 break;
2447 case DCBTST:
2448 if (op.reg == 0)
2449 prefetchw((void *) ea);
2450 break;
2451 case DCBT:
2452 if (op.reg == 0)
2453 prefetch((void *) ea);
2454 break;
2455 case ICBI:
2456 __cacheop_user_asmx(ea, err, "icbi");
2457 break;
2458 }
2459 if (err)
2460 return 0;
2461 goto instr_done;
2462
2463 case LARX:
2464 if (ea & (size - 1))
2465 break; /* can't handle misaligned */
2466 if (!address_ok(regs, ea, size))
2467 return 0;
2468 err = 0;
2469 switch (size) {
2470 #ifdef __powerpc64__
2471 case 1:
2472 __get_user_asmx(val, ea, err, "lbarx");
2473 break;
2474 case 2:
2475 __get_user_asmx(val, ea, err, "lharx");
2476 break;
2477 #endif
2478 case 4:
2479 __get_user_asmx(val, ea, err, "lwarx");
2480 break;
2481 #ifdef __powerpc64__
2482 case 8:
2483 __get_user_asmx(val, ea, err, "ldarx");
2484 break;
2485 case 16:
2486 err = do_lqarx(ea, &regs->gpr[op.reg]);
2487 goto ldst_done;
2488 #endif
2489 default:
2490 return 0;
2491 }
2492 if (!err)
2493 regs->gpr[op.reg] = val;
2494 goto ldst_done;
2495
2496 case STCX:
2497 if (ea & (size - 1))
2498 break; /* can't handle misaligned */
2499 if (!address_ok(regs, ea, size))
2500 return 0;
2501 err = 0;
2502 switch (size) {
2503 #ifdef __powerpc64__
2504 case 1:
2505 __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2506 break;
2507 case 2:
2508 __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2509 break;
2510 #endif
2511 case 4:
2512 __put_user_asmx(op.val, ea, err, "stwcx.", cr);
2513 break;
2514 #ifdef __powerpc64__
2515 case 8:
2516 __put_user_asmx(op.val, ea, err, "stdcx.", cr);
2517 break;
2518 case 16:
2519 err = do_stqcx(ea, regs->gpr[op.reg],
2520 regs->gpr[op.reg + 1], &cr);
2521 break;
2522 #endif
2523 default:
2524 return 0;
2525 }
2526 if (!err)
2527 regs->ccr = (regs->ccr & 0x0fffffff) |
2528 (cr & 0xe0000000) |
2529 ((regs->xer >> 3) & 0x10000000);
2530 goto ldst_done;
2531
2532 case LOAD:
2533 #ifdef __powerpc64__
2534 if (size == 16) {
2535 err = emulate_lq(regs, ea, op.reg);
2536 goto ldst_done;
2537 }
2538 #endif
2539 err = read_mem(&regs->gpr[op.reg], ea, size, regs);
2540 if (!err) {
2541 if (op.type & SIGNEXT)
2542 do_signext(&regs->gpr[op.reg], size);
2543 if (op.type & BYTEREV)
2544 do_byterev(&regs->gpr[op.reg], size);
2545 }
2546 goto ldst_done;
2547
2548 #ifdef CONFIG_PPC_FPU
2549 case LOAD_FP:
2550 if (!(regs->msr & MSR_FP))
2551 return 0;
2552 if (size == 4)
2553 err = do_fp_load(op.reg, do_lfs, ea, size, regs);
2554 else
2555 err = do_fp_load(op.reg, do_lfd, ea, size, regs);
2556 goto ldst_done;
2557 #endif
2558 #ifdef CONFIG_ALTIVEC
2559 case LOAD_VMX:
2560 if (!(regs->msr & MSR_VEC))
2561 return 0;
2562 err = do_vec_load(op.reg, do_lvx, ea, regs);
2563 goto ldst_done;
2564 #endif
2565 #ifdef CONFIG_VSX
2566 case LOAD_VSX: {
2567 char mem[16];
2568 union vsx_reg buf;
2569 unsigned long msrbit = MSR_VSX;
2570
2571 /*
2572 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2573 * when the target of the instruction is a vector register.
2574 */
2575 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2576 msrbit = MSR_VEC;
2577 if (!(regs->msr & msrbit))
2578 return 0;
2579 if (!address_ok(regs, ea, size) ||
2580 __copy_from_user(mem, (void __user *)ea, size))
2581 return 0;
2582
2583 emulate_vsx_load(&op, &buf, mem);
2584 load_vsrn(op.reg, &buf);
2585 goto ldst_done;
2586 }
2587 #endif
2588 case LOAD_MULTI:
2589 if (regs->msr & MSR_LE)
2590 return 0;
2591 rd = op.reg;
2592 for (i = 0; i < size; i += 4) {
2593 nb = size - i;
2594 if (nb > 4)
2595 nb = 4;
2596 err = read_mem(&regs->gpr[rd], ea, nb, regs);
2597 if (err)
2598 return 0;
2599 if (nb < 4) /* left-justify last bytes */
2600 regs->gpr[rd] <<= 32 - 8 * nb;
2601 ea += 4;
2602 ++rd;
2603 }
2604 goto instr_done;
2605
2606 case STORE:
2607 #ifdef __powerpc64__
2608 if (size == 16) {
2609 err = emulate_stq(regs, ea, op.reg);
2610 goto ldst_done;
2611 }
2612 #endif
2613 if ((op.type & UPDATE) && size == sizeof(long) &&
2614 op.reg == 1 && op.update_reg == 1 &&
2615 !(regs->msr & MSR_PR) &&
2616 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2617 err = handle_stack_update(ea, regs);
2618 goto ldst_done;
2619 }
2620 err = write_mem(op.val, ea, size, regs);
2621 goto ldst_done;
2622
2623 #ifdef CONFIG_PPC_FPU
2624 case STORE_FP:
2625 if (!(regs->msr & MSR_FP))
2626 return 0;
2627 if (size == 4)
2628 err = do_fp_store(op.reg, do_stfs, ea, size, regs);
2629 else
2630 err = do_fp_store(op.reg, do_stfd, ea, size, regs);
2631 goto ldst_done;
2632 #endif
2633 #ifdef CONFIG_ALTIVEC
2634 case STORE_VMX:
2635 if (!(regs->msr & MSR_VEC))
2636 return 0;
2637 err = do_vec_store(op.reg, do_stvx, ea, regs);
2638 goto ldst_done;
2639 #endif
2640 #ifdef CONFIG_VSX
2641 case STORE_VSX: {
2642 char mem[16];
2643 union vsx_reg buf;
2644 unsigned long msrbit = MSR_VSX;
2645
2646 /*
2647 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2648 * when the target of the instruction is a vector register.
2649 */
2650 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2651 msrbit = MSR_VEC;
2652 if (!(regs->msr & msrbit))
2653 return 0;
2654 if (!address_ok(regs, ea, size))
2655 return 0;
2656
2657 store_vsrn(op.reg, &buf);
2658 emulate_vsx_store(&op, &buf, mem);
2659 if (__copy_to_user((void __user *)ea, mem, size))
2660 return 0;
2661 goto ldst_done;
2662 }
2663 #endif
2664 case STORE_MULTI:
2665 if (regs->msr & MSR_LE)
2666 return 0;
2667 rd = op.reg;
2668 for (i = 0; i < size; i += 4) {
2669 val = regs->gpr[rd];
2670 nb = size - i;
2671 if (nb > 4)
2672 nb = 4;
2673 else
2674 val >>= 32 - 8 * nb;
2675 err = write_mem(val, ea, nb, regs);
2676 if (err)
2677 return 0;
2678 ea += 4;
2679 ++rd;
2680 }
2681 goto instr_done;
2682
2683 case MFMSR:
2684 regs->gpr[op.reg] = regs->msr & MSR_MASK;
2685 goto instr_done;
2686
2687 case MTMSR:
2688 val = regs->gpr[op.reg];
2689 if ((val & MSR_RI) == 0)
2690 /* can't step mtmsr[d] that would clear MSR_RI */
2691 return -1;
2692 /* here op.val is the mask of bits to change */
2693 regs->msr = (regs->msr & ~op.val) | (val & op.val);
2694 goto instr_done;
2695
2696 #ifdef CONFIG_PPC64
2697 case SYSCALL: /* sc */
2698 /*
2699 * N.B. this uses knowledge about how the syscall
2700 * entry code works. If that is changed, this will
2701 * need to be changed also.
2702 */
2703 if (regs->gpr[0] == 0x1ebe &&
2704 cpu_has_feature(CPU_FTR_REAL_LE)) {
2705 regs->msr ^= MSR_LE;
2706 goto instr_done;
2707 }
2708 regs->gpr[9] = regs->gpr[13];
2709 regs->gpr[10] = MSR_KERNEL;
2710 regs->gpr[11] = regs->nip + 4;
2711 regs->gpr[12] = regs->msr & MSR_MASK;
2712 regs->gpr[13] = (unsigned long) get_paca();
2713 regs->nip = (unsigned long) &system_call_common;
2714 regs->msr = MSR_KERNEL;
2715 return 1;
2716
2717 case RFI:
2718 return -1;
2719 #endif
2720 }
2721 return 0;
2722
2723 ldst_done:
2724 if (err)
2725 return 0;
2726 if (op.type & UPDATE)
2727 regs->gpr[op.update_reg] = op.ea;
2728
2729 instr_done:
2730 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2731 return 1;
2732 }
2733 NOKPROBE_SYMBOL(emulate_step);