]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/lib/sstep.c
96283499664bd83bc400f2e51cfe38809d7dbd32
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
21 extern char system_call_common
[];
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
27 #define MSR_MASK 0x87c0ffff
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
37 * Functions in ldstfp.S
39 extern int do_lfs(int rn
, unsigned long ea
);
40 extern int do_lfd(int rn
, unsigned long ea
);
41 extern int do_stfs(int rn
, unsigned long ea
);
42 extern int do_stfd(int rn
, unsigned long ea
);
43 extern int do_lvx(int rn
, unsigned long ea
);
44 extern int do_stvx(int rn
, unsigned long ea
);
45 extern void load_vsrn(int vsr
, const void *p
);
46 extern void store_vsrn(int vsr
, void *p
);
47 extern void conv_sp_to_dp(const float *sp
, double *dp
);
48 extern void conv_dp_to_sp(const double *dp
, float *sp
);
55 extern int do_lq(unsigned long ea
, unsigned long *regs
);
56 extern int do_stq(unsigned long ea
, unsigned long val0
, unsigned long val1
);
57 extern int do_lqarx(unsigned long ea
, unsigned long *regs
);
58 extern int do_stqcx(unsigned long ea
, unsigned long val0
, unsigned long val1
,
62 #ifdef __LITTLE_ENDIAN__
71 * Emulate the truncation of 64 bit values in 32-bit mode.
73 static nokprobe_inline
unsigned long truncate_if_32bit(unsigned long msr
,
77 if ((msr
& MSR_64BIT
) == 0)
84 * Determine whether a conditional branch instruction would branch.
86 static nokprobe_inline
int branch_taken(unsigned int instr
,
87 const struct pt_regs
*regs
,
88 struct instruction_op
*op
)
90 unsigned int bo
= (instr
>> 21) & 0x1f;
94 /* decrement counter */
96 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 1))
99 if ((bo
& 0x10) == 0) {
100 /* check bit from CR */
101 bi
= (instr
>> 16) & 0x1f;
102 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
108 static nokprobe_inline
long address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
110 if (!user_mode(regs
))
112 return __access_ok(ea
, nb
, USER_DS
);
116 * Calculate effective address for a D-form instruction
118 static nokprobe_inline
unsigned long dform_ea(unsigned int instr
,
119 const struct pt_regs
*regs
)
124 ra
= (instr
>> 16) & 0x1f;
125 ea
= (signed short) instr
; /* sign-extend */
134 * Calculate effective address for a DS-form instruction
136 static nokprobe_inline
unsigned long dsform_ea(unsigned int instr
,
137 const struct pt_regs
*regs
)
142 ra
= (instr
>> 16) & 0x1f;
143 ea
= (signed short) (instr
& ~3); /* sign-extend */
151 * Calculate effective address for a DQ-form instruction
153 static nokprobe_inline
unsigned long dqform_ea(unsigned int instr
,
154 const struct pt_regs
*regs
)
159 ra
= (instr
>> 16) & 0x1f;
160 ea
= (signed short) (instr
& ~0xf); /* sign-extend */
166 #endif /* __powerpc64 */
169 * Calculate effective address for an X-form instruction
171 static nokprobe_inline
unsigned long xform_ea(unsigned int instr
,
172 const struct pt_regs
*regs
)
177 ra
= (instr
>> 16) & 0x1f;
178 rb
= (instr
>> 11) & 0x1f;
187 * Return the largest power of 2, not greater than sizeof(unsigned long),
188 * such that x is a multiple of it.
190 static nokprobe_inline
unsigned long max_align(unsigned long x
)
192 x
|= sizeof(unsigned long);
193 return x
& -x
; /* isolates rightmost bit */
197 static nokprobe_inline
unsigned long byterev_2(unsigned long x
)
199 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
202 static nokprobe_inline
unsigned long byterev_4(unsigned long x
)
204 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
205 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
209 static nokprobe_inline
unsigned long byterev_8(unsigned long x
)
211 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
215 static nokprobe_inline
int read_mem_aligned(unsigned long *dest
,
216 unsigned long ea
, int nb
)
223 err
= __get_user(x
, (unsigned char __user
*) ea
);
226 err
= __get_user(x
, (unsigned short __user
*) ea
);
229 err
= __get_user(x
, (unsigned int __user
*) ea
);
233 err
= __get_user(x
, (unsigned long __user
*) ea
);
242 static nokprobe_inline
int read_mem_unaligned(unsigned long *dest
,
243 unsigned long ea
, int nb
, struct pt_regs
*regs
)
246 unsigned long x
, b
, c
;
247 #ifdef __LITTLE_ENDIAN__
248 int len
= nb
; /* save a copy of the length for byte reversal */
251 /* unaligned, do this in pieces */
253 for (; nb
> 0; nb
-= c
) {
254 #ifdef __LITTLE_ENDIAN__
257 #ifdef __BIG_ENDIAN__
262 err
= read_mem_aligned(&b
, ea
, c
);
265 x
= (x
<< (8 * c
)) + b
;
268 #ifdef __LITTLE_ENDIAN__
271 *dest
= byterev_2(x
);
274 *dest
= byterev_4(x
);
278 *dest
= byterev_8(x
);
283 #ifdef __BIG_ENDIAN__
290 * Read memory at address ea for nb bytes, return 0 for success
291 * or -EFAULT if an error occurred.
293 static int read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
294 struct pt_regs
*regs
)
296 if (!address_ok(regs
, ea
, nb
))
298 if ((ea
& (nb
- 1)) == 0)
299 return read_mem_aligned(dest
, ea
, nb
);
300 return read_mem_unaligned(dest
, ea
, nb
, regs
);
302 NOKPROBE_SYMBOL(read_mem
);
304 static nokprobe_inline
int write_mem_aligned(unsigned long val
,
305 unsigned long ea
, int nb
)
311 err
= __put_user(val
, (unsigned char __user
*) ea
);
314 err
= __put_user(val
, (unsigned short __user
*) ea
);
317 err
= __put_user(val
, (unsigned int __user
*) ea
);
321 err
= __put_user(val
, (unsigned long __user
*) ea
);
328 static nokprobe_inline
int write_mem_unaligned(unsigned long val
,
329 unsigned long ea
, int nb
, struct pt_regs
*regs
)
334 #ifdef __LITTLE_ENDIAN__
337 val
= byterev_2(val
);
340 val
= byterev_4(val
);
344 val
= byterev_8(val
);
349 /* unaligned or little-endian, do this in pieces */
350 for (; nb
> 0; nb
-= c
) {
351 #ifdef __LITTLE_ENDIAN__
354 #ifdef __BIG_ENDIAN__
359 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
368 * Write memory at address ea for nb bytes, return 0 for success
369 * or -EFAULT if an error occurred.
371 static int write_mem(unsigned long val
, unsigned long ea
, int nb
,
372 struct pt_regs
*regs
)
374 if (!address_ok(regs
, ea
, nb
))
376 if ((ea
& (nb
- 1)) == 0)
377 return write_mem_aligned(val
, ea
, nb
);
378 return write_mem_unaligned(val
, ea
, nb
, regs
);
380 NOKPROBE_SYMBOL(write_mem
);
382 #ifdef CONFIG_PPC_FPU
384 * Check the address and alignment, and call func to do the actual
387 static int do_fp_load(int rn
, int (*func
)(int, unsigned long),
388 unsigned long ea
, int nb
,
389 struct pt_regs
*regs
)
396 #ifdef __BIG_ENDIAN__
400 #ifdef __LITTLE_ENDIAN__
408 if (!address_ok(regs
, ea
, nb
))
411 return (*func
)(rn
, ea
);
412 ptr
= (unsigned long) &data
.ul
;
413 if (sizeof(unsigned long) == 8 || nb
== 4) {
414 err
= read_mem_unaligned(&data
.ul
[0], ea
, nb
, regs
);
416 ptr
= (unsigned long)&(data
.single
.word
);
418 /* reading a double on 32-bit */
419 err
= read_mem_unaligned(&data
.ul
[0], ea
, 4, regs
);
421 err
= read_mem_unaligned(&data
.ul
[1], ea
+ 4, 4, regs
);
425 return (*func
)(rn
, ptr
);
427 NOKPROBE_SYMBOL(do_fp_load
);
429 static int do_fp_store(int rn
, int (*func
)(int, unsigned long),
430 unsigned long ea
, int nb
,
431 struct pt_regs
*regs
)
438 #ifdef __BIG_ENDIAN__
442 #ifdef __LITTLE_ENDIAN__
450 if (!address_ok(regs
, ea
, nb
))
453 return (*func
)(rn
, ea
);
454 ptr
= (unsigned long) &data
.ul
[0];
455 if (sizeof(unsigned long) == 8 || nb
== 4) {
457 ptr
= (unsigned long)&(data
.single
.word
);
458 err
= (*func
)(rn
, ptr
);
461 err
= write_mem_unaligned(data
.ul
[0], ea
, nb
, regs
);
463 /* writing a double on 32-bit */
464 err
= (*func
)(rn
, ptr
);
467 err
= write_mem_unaligned(data
.ul
[0], ea
, 4, regs
);
469 err
= write_mem_unaligned(data
.ul
[1], ea
+ 4, 4, regs
);
473 NOKPROBE_SYMBOL(do_fp_store
);
476 #ifdef CONFIG_ALTIVEC
477 /* For Altivec/VMX, no need to worry about alignment */
478 static nokprobe_inline
int do_vec_load(int rn
, int (*func
)(int, unsigned long),
479 unsigned long ea
, struct pt_regs
*regs
)
481 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
483 return (*func
)(rn
, ea
);
486 static nokprobe_inline
int do_vec_store(int rn
, int (*func
)(int, unsigned long),
487 unsigned long ea
, struct pt_regs
*regs
)
489 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
491 return (*func
)(rn
, ea
);
493 #endif /* CONFIG_ALTIVEC */
496 static nokprobe_inline
int emulate_lq(struct pt_regs
*regs
, unsigned long ea
,
501 if (!address_ok(regs
, ea
, 16))
503 /* if aligned, should be atomic */
505 return do_lq(ea
, ®s
->gpr
[reg
]);
507 err
= read_mem(®s
->gpr
[reg
+ IS_LE
], ea
, 8, regs
);
509 err
= read_mem(®s
->gpr
[reg
+ IS_BE
], ea
+ 8, 8, regs
);
513 static nokprobe_inline
int emulate_stq(struct pt_regs
*regs
, unsigned long ea
,
518 if (!address_ok(regs
, ea
, 16))
520 /* if aligned, should be atomic */
522 return do_stq(ea
, regs
->gpr
[reg
], regs
->gpr
[reg
+ 1]);
524 err
= write_mem(regs
->gpr
[reg
+ IS_LE
], ea
, 8, regs
);
526 err
= write_mem(regs
->gpr
[reg
+ IS_BE
], ea
+ 8, 8, regs
);
529 #endif /* __powerpc64 */
532 void emulate_vsx_load(struct instruction_op
*op
, union vsx_reg
*reg
,
537 const unsigned int *wp
;
538 const unsigned short *hp
;
539 const unsigned char *bp
;
541 size
= GETSIZE(op
->type
);
542 reg
->d
[0] = reg
->d
[1] = 0;
544 switch (op
->element_size
) {
546 /* whole vector; lxv[x] or lxvl[l] */
549 memcpy(reg
, mem
, size
);
550 if (IS_LE
&& (op
->vsx_flags
& VSX_LDLEFT
)) {
551 /* reverse 16 bytes */
553 tmp
= byterev_8(reg
->d
[0]);
554 reg
->d
[0] = byterev_8(reg
->d
[1]);
559 /* scalar loads, lxvd2x, lxvdsx */
560 read_size
= (size
>= 8) ? 8 : size
;
561 i
= IS_LE
? 8 : 8 - read_size
;
562 memcpy(®
->b
[i
], mem
, read_size
);
564 if (op
->type
& SIGNEXT
) {
565 /* size == 4 is the only case here */
566 reg
->d
[IS_LE
] = (signed int) reg
->d
[IS_LE
];
567 } else if (op
->vsx_flags
& VSX_FPCONV
) {
569 conv_sp_to_dp(®
->fp
[1 + IS_LE
],
575 reg
->d
[IS_BE
] = *(unsigned long *)(mem
+ 8);
576 else if (op
->vsx_flags
& VSX_SPLAT
)
577 reg
->d
[IS_BE
] = reg
->d
[IS_LE
];
583 for (j
= 0; j
< size
/ 4; ++j
) {
584 i
= IS_LE
? 3 - j
: j
;
587 if (op
->vsx_flags
& VSX_SPLAT
) {
588 u32 val
= reg
->w
[IS_LE
? 3 : 0];
590 i
= IS_LE
? 3 - j
: j
;
598 for (j
= 0; j
< size
/ 2; ++j
) {
599 i
= IS_LE
? 7 - j
: j
;
606 for (j
= 0; j
< size
; ++j
) {
607 i
= IS_LE
? 15 - j
: j
;
613 EXPORT_SYMBOL_GPL(emulate_vsx_load
);
614 NOKPROBE_SYMBOL(emulate_vsx_load
);
616 void emulate_vsx_store(struct instruction_op
*op
, const union vsx_reg
*reg
,
619 int size
, write_size
;
626 size
= GETSIZE(op
->type
);
628 switch (op
->element_size
) {
630 /* stxv, stxvx, stxvl, stxvll */
633 if (IS_LE
&& (op
->vsx_flags
& VSX_LDLEFT
)) {
634 /* reverse 16 bytes */
635 buf
.d
[0] = byterev_8(reg
->d
[1]);
636 buf
.d
[1] = byterev_8(reg
->d
[0]);
639 memcpy(mem
, reg
, size
);
642 /* scalar stores, stxvd2x */
643 write_size
= (size
>= 8) ? 8 : size
;
644 i
= IS_LE
? 8 : 8 - write_size
;
645 if (size
< 8 && op
->vsx_flags
& VSX_FPCONV
) {
646 buf
.d
[0] = buf
.d
[1] = 0;
648 conv_dp_to_sp(®
->dp
[IS_LE
], &buf
.fp
[1 + IS_LE
]);
652 memcpy(mem
, ®
->b
[i
], write_size
);
654 memcpy(mem
+ 8, ®
->d
[IS_BE
], 8);
659 for (j
= 0; j
< size
/ 4; ++j
) {
660 i
= IS_LE
? 3 - j
: j
;
667 for (j
= 0; j
< size
/ 2; ++j
) {
668 i
= IS_LE
? 7 - j
: j
;
675 for (j
= 0; j
< size
; ++j
) {
676 i
= IS_LE
? 15 - j
: j
;
682 EXPORT_SYMBOL_GPL(emulate_vsx_store
);
683 NOKPROBE_SYMBOL(emulate_vsx_store
);
684 #endif /* CONFIG_VSX */
686 #define __put_user_asmx(x, addr, err, op, cr) \
687 __asm__ __volatile__( \
688 "1: " op " %2,0,%3\n" \
691 ".section .fixup,\"ax\"\n" \
696 : "=r" (err), "=r" (cr) \
697 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
699 #define __get_user_asmx(x, addr, err, op) \
700 __asm__ __volatile__( \
701 "1: "op" %1,0,%2\n" \
703 ".section .fixup,\"ax\"\n" \
708 : "=r" (err), "=r" (x) \
709 : "r" (addr), "i" (-EFAULT), "0" (err))
711 #define __cacheop_user_asmx(addr, err, op) \
712 __asm__ __volatile__( \
715 ".section .fixup,\"ax\"\n" \
721 : "r" (addr), "i" (-EFAULT), "0" (err))
723 static nokprobe_inline
void set_cr0(const struct pt_regs
*regs
,
724 struct instruction_op
*op
, int rd
)
726 long val
= regs
->gpr
[rd
];
729 op
->ccval
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
731 if (!(regs
->msr
& MSR_64BIT
))
735 op
->ccval
|= 0x80000000;
737 op
->ccval
|= 0x40000000;
739 op
->ccval
|= 0x20000000;
742 static nokprobe_inline
void add_with_carry(const struct pt_regs
*regs
,
743 struct instruction_op
*op
, int rd
,
744 unsigned long val1
, unsigned long val2
,
745 unsigned long carry_in
)
747 unsigned long val
= val1
+ val2
;
751 op
->type
= COMPUTE
+ SETREG
+ SETXER
;
755 if (!(regs
->msr
& MSR_64BIT
)) {
756 val
= (unsigned int) val
;
757 val1
= (unsigned int) val1
;
760 op
->xerval
= regs
->xer
;
761 if (val
< val1
|| (carry_in
&& val
== val1
))
762 op
->xerval
|= XER_CA
;
764 op
->xerval
&= ~XER_CA
;
767 static nokprobe_inline
void do_cmp_signed(const struct pt_regs
*regs
,
768 struct instruction_op
*op
,
769 long v1
, long v2
, int crfld
)
771 unsigned int crval
, shift
;
773 op
->type
= COMPUTE
+ SETCC
;
774 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
781 shift
= (7 - crfld
) * 4;
782 op
->ccval
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
785 static nokprobe_inline
void do_cmp_unsigned(const struct pt_regs
*regs
,
786 struct instruction_op
*op
,
788 unsigned long v2
, int crfld
)
790 unsigned int crval
, shift
;
792 op
->type
= COMPUTE
+ SETCC
;
793 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
800 shift
= (7 - crfld
) * 4;
801 op
->ccval
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
804 static nokprobe_inline
void do_cmpb(const struct pt_regs
*regs
,
805 struct instruction_op
*op
,
806 unsigned long v1
, unsigned long v2
)
808 unsigned long long out_val
, mask
;
812 for (i
= 0; i
< 8; i
++) {
813 mask
= 0xffUL
<< (i
* 8);
814 if ((v1
& mask
) == (v2
& mask
))
821 * The size parameter is used to adjust the equivalent popcnt instruction.
822 * popcntb = 8, popcntw = 32, popcntd = 64
824 static nokprobe_inline
void do_popcnt(const struct pt_regs
*regs
,
825 struct instruction_op
*op
,
826 unsigned long v1
, int size
)
828 unsigned long long out
= v1
;
830 out
-= (out
>> 1) & 0x5555555555555555;
831 out
= (0x3333333333333333 & out
) + (0x3333333333333333 & (out
>> 2));
832 out
= (out
+ (out
>> 4)) & 0x0f0f0f0f0f0f0f0f;
834 if (size
== 8) { /* popcntb */
840 if (size
== 32) { /* popcntw */
841 op
->val
= out
& 0x0000003f0000003f;
845 out
= (out
+ (out
>> 32)) & 0x7f;
846 op
->val
= out
; /* popcntd */
850 static nokprobe_inline
void do_bpermd(const struct pt_regs
*regs
,
851 struct instruction_op
*op
,
852 unsigned long v1
, unsigned long v2
)
854 unsigned char perm
, idx
;
858 for (i
= 0; i
< 8; i
++) {
859 idx
= (v1
>> (i
* 8)) & 0xff;
861 if (v2
& PPC_BIT(idx
))
866 #endif /* CONFIG_PPC64 */
868 * The size parameter adjusts the equivalent prty instruction.
869 * prtyw = 32, prtyd = 64
871 static nokprobe_inline
void do_prty(const struct pt_regs
*regs
,
872 struct instruction_op
*op
,
873 unsigned long v
, int size
)
875 unsigned long long res
= v
^ (v
>> 8);
878 if (size
== 32) { /* prtyw */
879 op
->val
= res
& 0x0000000100000001;
884 op
->val
= res
& 1; /*prtyd */
887 static nokprobe_inline
int trap_compare(long v1
, long v2
)
897 if ((unsigned long)v1
< (unsigned long)v2
)
899 else if ((unsigned long)v1
> (unsigned long)v2
)
905 * Elements of 32-bit rotate and mask instructions.
907 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
908 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
910 #define MASK64_L(mb) (~0UL >> (mb))
911 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
912 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
913 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
915 #define DATA32(x) (x)
917 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
920 * Decode an instruction, and return information about it in *op
921 * without changing *regs.
922 * Integer arithmetic and logical instructions, branches, and barrier
923 * instructions can be emulated just using the information in *op.
925 * Return value is 1 if the instruction can be emulated just by
926 * updating *regs with the information in *op, -1 if we need the
927 * GPRs but *regs doesn't contain the full register set, or 0
930 int analyse_instr(struct instruction_op
*op
, const struct pt_regs
*regs
,
933 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
934 unsigned long int imm
;
935 unsigned long int val
, val2
;
936 unsigned int mb
, me
, sh
;
941 opcode
= instr
>> 26;
945 imm
= (signed short)(instr
& 0xfffc);
946 if ((instr
& 2) == 0)
948 op
->val
= truncate_if_32bit(regs
->msr
, imm
);
951 if (branch_taken(instr
, regs
, op
))
956 if ((instr
& 0xfe2) == 2)
963 op
->type
= BRANCH
| BRTAKEN
;
964 imm
= instr
& 0x03fffffc;
965 if (imm
& 0x02000000)
967 if ((instr
& 2) == 0)
969 op
->val
= truncate_if_32bit(regs
->msr
, imm
);
974 switch ((instr
>> 1) & 0x3ff) {
976 op
->type
= COMPUTE
+ SETCC
;
977 rd
= 7 - ((instr
>> 23) & 0x7);
978 ra
= 7 - ((instr
>> 18) & 0x7);
981 val
= (regs
->ccr
>> ra
) & 0xf;
982 op
->ccval
= (regs
->ccr
& ~(0xfUL
<< rd
)) | (val
<< rd
);
986 case 528: /* bcctr */
988 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
989 op
->val
= truncate_if_32bit(regs
->msr
, imm
);
992 if (branch_taken(instr
, regs
, op
))
996 case 18: /* rfid, scary */
997 if (regs
->msr
& MSR_PR
)
1002 case 150: /* isync */
1003 op
->type
= BARRIER
| BARRIER_ISYNC
;
1006 case 33: /* crnor */
1007 case 129: /* crandc */
1008 case 193: /* crxor */
1009 case 225: /* crnand */
1010 case 257: /* crand */
1011 case 289: /* creqv */
1012 case 417: /* crorc */
1013 case 449: /* cror */
1014 op
->type
= COMPUTE
+ SETCC
;
1015 ra
= (instr
>> 16) & 0x1f;
1016 rb
= (instr
>> 11) & 0x1f;
1017 rd
= (instr
>> 21) & 0x1f;
1018 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
1019 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
1020 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
1021 op
->ccval
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
1027 switch ((instr
>> 1) & 0x3ff) {
1028 case 598: /* sync */
1029 op
->type
= BARRIER
+ BARRIER_SYNC
;
1030 #ifdef __powerpc64__
1031 switch ((instr
>> 21) & 3) {
1032 case 1: /* lwsync */
1033 op
->type
= BARRIER
+ BARRIER_LWSYNC
;
1035 case 2: /* ptesync */
1036 op
->type
= BARRIER
+ BARRIER_PTESYNC
;
1042 case 854: /* eieio */
1043 op
->type
= BARRIER
+ BARRIER_EIEIO
;
1049 /* Following cases refer to regs->gpr[], so we need all regs */
1050 if (!FULL_REGS(regs
))
1053 rd
= (instr
>> 21) & 0x1f;
1054 ra
= (instr
>> 16) & 0x1f;
1055 rb
= (instr
>> 11) & 0x1f;
1058 #ifdef __powerpc64__
1060 if (rd
& trap_compare(regs
->gpr
[ra
], (short) instr
))
1065 if (rd
& trap_compare((int)regs
->gpr
[ra
], (short) instr
))
1070 op
->val
= regs
->gpr
[ra
] * (short) instr
;
1073 case 8: /* subfic */
1074 imm
= (short) instr
;
1075 add_with_carry(regs
, op
, rd
, ~regs
->gpr
[ra
], imm
, 1);
1078 case 10: /* cmpli */
1079 imm
= (unsigned short) instr
;
1080 val
= regs
->gpr
[ra
];
1081 #ifdef __powerpc64__
1083 val
= (unsigned int) val
;
1085 do_cmp_unsigned(regs
, op
, val
, imm
, rd
>> 2);
1089 imm
= (short) instr
;
1090 val
= regs
->gpr
[ra
];
1091 #ifdef __powerpc64__
1095 do_cmp_signed(regs
, op
, val
, imm
, rd
>> 2);
1098 case 12: /* addic */
1099 imm
= (short) instr
;
1100 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
], imm
, 0);
1103 case 13: /* addic. */
1104 imm
= (short) instr
;
1105 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
], imm
, 0);
1106 set_cr0(regs
, op
, rd
);
1110 imm
= (short) instr
;
1112 imm
+= regs
->gpr
[ra
];
1116 case 15: /* addis */
1117 imm
= ((short) instr
) << 16;
1119 imm
+= regs
->gpr
[ra
];
1124 if (((instr
>> 1) & 0x1f) == 2) {
1126 imm
= (short) (instr
& 0xffc1); /* d0 + d2 fields */
1127 imm
|= (instr
>> 15) & 0x3e; /* d1 field */
1128 op
->val
= regs
->nip
+ (imm
<< 16) + 4;
1134 case 20: /* rlwimi */
1135 mb
= (instr
>> 6) & 0x1f;
1136 me
= (instr
>> 1) & 0x1f;
1137 val
= DATA32(regs
->gpr
[rd
]);
1138 imm
= MASK32(mb
, me
);
1139 op
->val
= (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
1142 case 21: /* rlwinm */
1143 mb
= (instr
>> 6) & 0x1f;
1144 me
= (instr
>> 1) & 0x1f;
1145 val
= DATA32(regs
->gpr
[rd
]);
1146 op
->val
= ROTATE(val
, rb
) & MASK32(mb
, me
);
1149 case 23: /* rlwnm */
1150 mb
= (instr
>> 6) & 0x1f;
1151 me
= (instr
>> 1) & 0x1f;
1152 rb
= regs
->gpr
[rb
] & 0x1f;
1153 val
= DATA32(regs
->gpr
[rd
]);
1154 op
->val
= ROTATE(val
, rb
) & MASK32(mb
, me
);
1158 op
->val
= regs
->gpr
[rd
] | (unsigned short) instr
;
1159 goto logical_done_nocc
;
1162 imm
= (unsigned short) instr
;
1163 op
->val
= regs
->gpr
[rd
] | (imm
<< 16);
1164 goto logical_done_nocc
;
1167 op
->val
= regs
->gpr
[rd
] ^ (unsigned short) instr
;
1168 goto logical_done_nocc
;
1170 case 27: /* xoris */
1171 imm
= (unsigned short) instr
;
1172 op
->val
= regs
->gpr
[rd
] ^ (imm
<< 16);
1173 goto logical_done_nocc
;
1175 case 28: /* andi. */
1176 op
->val
= regs
->gpr
[rd
] & (unsigned short) instr
;
1177 set_cr0(regs
, op
, ra
);
1178 goto logical_done_nocc
;
1180 case 29: /* andis. */
1181 imm
= (unsigned short) instr
;
1182 op
->val
= regs
->gpr
[rd
] & (imm
<< 16);
1183 set_cr0(regs
, op
, ra
);
1184 goto logical_done_nocc
;
1186 #ifdef __powerpc64__
1188 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
1189 val
= regs
->gpr
[rd
];
1190 if ((instr
& 0x10) == 0) {
1191 sh
= rb
| ((instr
& 2) << 4);
1192 val
= ROTATE(val
, sh
);
1193 switch ((instr
>> 2) & 3) {
1194 case 0: /* rldicl */
1195 val
&= MASK64_L(mb
);
1197 case 1: /* rldicr */
1198 val
&= MASK64_R(mb
);
1201 val
&= MASK64(mb
, 63 - sh
);
1203 case 3: /* rldimi */
1204 imm
= MASK64(mb
, 63 - sh
);
1205 val
= (regs
->gpr
[ra
] & ~imm
) |
1211 sh
= regs
->gpr
[rb
] & 0x3f;
1212 val
= ROTATE(val
, sh
);
1213 switch ((instr
>> 1) & 7) {
1215 op
->val
= val
& MASK64_L(mb
);
1218 op
->val
= val
& MASK64_R(mb
);
1223 op
->type
= UNKNOWN
; /* illegal instruction */
1227 /* isel occupies 32 minor opcodes */
1228 if (((instr
>> 1) & 0x1f) == 15) {
1229 mb
= (instr
>> 6) & 0x1f; /* bc field */
1230 val
= (regs
->ccr
>> (31 - mb
)) & 1;
1231 val2
= (ra
) ? regs
->gpr
[ra
] : 0;
1233 op
->val
= (val
) ? val2
: regs
->gpr
[rb
];
1237 switch ((instr
>> 1) & 0x3ff) {
1240 (rd
& trap_compare((int)regs
->gpr
[ra
],
1241 (int)regs
->gpr
[rb
])))
1244 #ifdef __powerpc64__
1246 if (rd
& trap_compare(regs
->gpr
[ra
], regs
->gpr
[rb
]))
1250 case 83: /* mfmsr */
1251 if (regs
->msr
& MSR_PR
)
1256 case 146: /* mtmsr */
1257 if (regs
->msr
& MSR_PR
)
1261 op
->val
= 0xffffffff & ~(MSR_ME
| MSR_LE
);
1264 case 178: /* mtmsrd */
1265 if (regs
->msr
& MSR_PR
)
1269 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1270 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1271 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffeffeUL
;
1278 if ((instr
>> 20) & 1) {
1280 for (sh
= 0; sh
< 8; ++sh
) {
1281 if (instr
& (0x80000 >> sh
))
1286 op
->val
= regs
->ccr
& imm
;
1289 case 144: /* mtcrf */
1290 op
->type
= COMPUTE
+ SETCC
;
1292 val
= regs
->gpr
[rd
];
1293 op
->val
= regs
->ccr
;
1294 for (sh
= 0; sh
< 8; ++sh
) {
1295 if (instr
& (0x80000 >> sh
))
1296 op
->val
= (op
->val
& ~imm
) |
1302 case 339: /* mfspr */
1303 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1307 if (spr
== SPRN_XER
|| spr
== SPRN_LR
||
1312 case 467: /* mtspr */
1313 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1315 op
->val
= regs
->gpr
[rd
];
1317 if (spr
== SPRN_XER
|| spr
== SPRN_LR
||
1323 * Compare instructions
1326 val
= regs
->gpr
[ra
];
1327 val2
= regs
->gpr
[rb
];
1328 #ifdef __powerpc64__
1329 if ((rd
& 1) == 0) {
1330 /* word (32-bit) compare */
1335 do_cmp_signed(regs
, op
, val
, val2
, rd
>> 2);
1339 val
= regs
->gpr
[ra
];
1340 val2
= regs
->gpr
[rb
];
1341 #ifdef __powerpc64__
1342 if ((rd
& 1) == 0) {
1343 /* word (32-bit) compare */
1344 val
= (unsigned int) val
;
1345 val2
= (unsigned int) val2
;
1348 do_cmp_unsigned(regs
, op
, val
, val2
, rd
>> 2);
1351 case 508: /* cmpb */
1352 do_cmpb(regs
, op
, regs
->gpr
[rd
], regs
->gpr
[rb
]);
1353 goto logical_done_nocc
;
1356 * Arithmetic instructions
1359 add_with_carry(regs
, op
, rd
, ~regs
->gpr
[ra
],
1362 #ifdef __powerpc64__
1363 case 9: /* mulhdu */
1364 asm("mulhdu %0,%1,%2" : "=r" (op
->val
) :
1365 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1369 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
],
1373 case 11: /* mulhwu */
1374 asm("mulhwu %0,%1,%2" : "=r" (op
->val
) :
1375 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1379 op
->val
= regs
->gpr
[rb
] - regs
->gpr
[ra
];
1381 #ifdef __powerpc64__
1382 case 73: /* mulhd */
1383 asm("mulhd %0,%1,%2" : "=r" (op
->val
) :
1384 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1387 case 75: /* mulhw */
1388 asm("mulhw %0,%1,%2" : "=r" (op
->val
) :
1389 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1393 op
->val
= -regs
->gpr
[ra
];
1396 case 136: /* subfe */
1397 add_with_carry(regs
, op
, rd
, ~regs
->gpr
[ra
],
1398 regs
->gpr
[rb
], regs
->xer
& XER_CA
);
1401 case 138: /* adde */
1402 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
],
1403 regs
->gpr
[rb
], regs
->xer
& XER_CA
);
1406 case 200: /* subfze */
1407 add_with_carry(regs
, op
, rd
, ~regs
->gpr
[ra
], 0L,
1408 regs
->xer
& XER_CA
);
1411 case 202: /* addze */
1412 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
], 0L,
1413 regs
->xer
& XER_CA
);
1416 case 232: /* subfme */
1417 add_with_carry(regs
, op
, rd
, ~regs
->gpr
[ra
], -1L,
1418 regs
->xer
& XER_CA
);
1420 #ifdef __powerpc64__
1421 case 233: /* mulld */
1422 op
->val
= regs
->gpr
[ra
] * regs
->gpr
[rb
];
1425 case 234: /* addme */
1426 add_with_carry(regs
, op
, rd
, regs
->gpr
[ra
], -1L,
1427 regs
->xer
& XER_CA
);
1430 case 235: /* mullw */
1431 op
->val
= (unsigned int) regs
->gpr
[ra
] *
1432 (unsigned int) regs
->gpr
[rb
];
1436 op
->val
= regs
->gpr
[ra
] + regs
->gpr
[rb
];
1438 #ifdef __powerpc64__
1439 case 457: /* divdu */
1440 op
->val
= regs
->gpr
[ra
] / regs
->gpr
[rb
];
1443 case 459: /* divwu */
1444 op
->val
= (unsigned int) regs
->gpr
[ra
] /
1445 (unsigned int) regs
->gpr
[rb
];
1447 #ifdef __powerpc64__
1448 case 489: /* divd */
1449 op
->val
= (long int) regs
->gpr
[ra
] /
1450 (long int) regs
->gpr
[rb
];
1453 case 491: /* divw */
1454 op
->val
= (int) regs
->gpr
[ra
] /
1455 (int) regs
->gpr
[rb
];
1460 * Logical instructions
1462 case 26: /* cntlzw */
1463 op
->val
= __builtin_clz((unsigned int) regs
->gpr
[rd
]);
1465 #ifdef __powerpc64__
1466 case 58: /* cntlzd */
1467 op
->val
= __builtin_clzl(regs
->gpr
[rd
]);
1471 op
->val
= regs
->gpr
[rd
] & regs
->gpr
[rb
];
1475 op
->val
= regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1478 case 122: /* popcntb */
1479 do_popcnt(regs
, op
, regs
->gpr
[rd
], 8);
1480 goto logical_done_nocc
;
1483 op
->val
= ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1486 case 154: /* prtyw */
1487 do_prty(regs
, op
, regs
->gpr
[rd
], 32);
1488 goto logical_done_nocc
;
1490 case 186: /* prtyd */
1491 do_prty(regs
, op
, regs
->gpr
[rd
], 64);
1492 goto logical_done_nocc
;
1494 case 252: /* bpermd */
1495 do_bpermd(regs
, op
, regs
->gpr
[rd
], regs
->gpr
[rb
]);
1496 goto logical_done_nocc
;
1499 op
->val
= ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1503 op
->val
= regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1506 case 378: /* popcntw */
1507 do_popcnt(regs
, op
, regs
->gpr
[rd
], 32);
1508 goto logical_done_nocc
;
1511 op
->val
= regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1515 op
->val
= regs
->gpr
[rd
] | regs
->gpr
[rb
];
1518 case 476: /* nand */
1519 op
->val
= ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1522 case 506: /* popcntd */
1523 do_popcnt(regs
, op
, regs
->gpr
[rd
], 64);
1524 goto logical_done_nocc
;
1526 case 922: /* extsh */
1527 op
->val
= (signed short) regs
->gpr
[rd
];
1530 case 954: /* extsb */
1531 op
->val
= (signed char) regs
->gpr
[rd
];
1533 #ifdef __powerpc64__
1534 case 986: /* extsw */
1535 op
->val
= (signed int) regs
->gpr
[rd
];
1540 * Shift instructions
1543 sh
= regs
->gpr
[rb
] & 0x3f;
1545 op
->val
= (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1551 sh
= regs
->gpr
[rb
] & 0x3f;
1553 op
->val
= (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1558 case 792: /* sraw */
1559 op
->type
= COMPUTE
+ SETREG
+ SETXER
;
1560 sh
= regs
->gpr
[rb
] & 0x3f;
1561 ival
= (signed int) regs
->gpr
[rd
];
1562 op
->val
= ival
>> (sh
< 32 ? sh
: 31);
1563 op
->xerval
= regs
->xer
;
1564 if (ival
< 0 && (sh
>= 32 || (ival
& ((1ul << sh
) - 1)) != 0))
1565 op
->xerval
|= XER_CA
;
1567 op
->xerval
&= ~XER_CA
;
1570 case 824: /* srawi */
1571 op
->type
= COMPUTE
+ SETREG
+ SETXER
;
1573 ival
= (signed int) regs
->gpr
[rd
];
1574 op
->val
= ival
>> sh
;
1575 op
->xerval
= regs
->xer
;
1576 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1577 op
->xerval
|= XER_CA
;
1579 op
->xerval
&= ~XER_CA
;
1582 #ifdef __powerpc64__
1584 sh
= regs
->gpr
[rb
] & 0x7f;
1586 op
->val
= regs
->gpr
[rd
] << sh
;
1592 sh
= regs
->gpr
[rb
] & 0x7f;
1594 op
->val
= regs
->gpr
[rd
] >> sh
;
1599 case 794: /* srad */
1600 op
->type
= COMPUTE
+ SETREG
+ SETXER
;
1601 sh
= regs
->gpr
[rb
] & 0x7f;
1602 ival
= (signed long int) regs
->gpr
[rd
];
1603 op
->val
= ival
>> (sh
< 64 ? sh
: 63);
1604 op
->xerval
= regs
->xer
;
1605 if (ival
< 0 && (sh
>= 64 || (ival
& ((1ul << sh
) - 1)) != 0))
1606 op
->xerval
|= XER_CA
;
1608 op
->xerval
&= ~XER_CA
;
1611 case 826: /* sradi with sh_5 = 0 */
1612 case 827: /* sradi with sh_5 = 1 */
1613 op
->type
= COMPUTE
+ SETREG
+ SETXER
;
1614 sh
= rb
| ((instr
& 2) << 4);
1615 ival
= (signed long int) regs
->gpr
[rd
];
1616 op
->val
= ival
>> sh
;
1617 op
->xerval
= regs
->xer
;
1618 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1619 op
->xerval
|= XER_CA
;
1621 op
->xerval
&= ~XER_CA
;
1623 #endif /* __powerpc64__ */
1626 * Cache instructions
1628 case 54: /* dcbst */
1629 op
->type
= MKOP(CACHEOP
, DCBST
, 0);
1630 op
->ea
= xform_ea(instr
, regs
);
1634 op
->type
= MKOP(CACHEOP
, DCBF
, 0);
1635 op
->ea
= xform_ea(instr
, regs
);
1638 case 246: /* dcbtst */
1639 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1640 op
->ea
= xform_ea(instr
, regs
);
1644 case 278: /* dcbt */
1645 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1646 op
->ea
= xform_ea(instr
, regs
);
1650 case 982: /* icbi */
1651 op
->type
= MKOP(CACHEOP
, ICBI
, 0);
1652 op
->ea
= xform_ea(instr
, regs
);
1662 op
->update_reg
= ra
;
1664 op
->val
= regs
->gpr
[rd
];
1665 u
= (instr
>> 20) & UPDATE
;
1671 op
->ea
= xform_ea(instr
, regs
);
1672 switch ((instr
>> 1) & 0x3ff) {
1673 case 20: /* lwarx */
1674 op
->type
= MKOP(LARX
, 0, 4);
1677 case 150: /* stwcx. */
1678 op
->type
= MKOP(STCX
, 0, 4);
1681 #ifdef __powerpc64__
1682 case 84: /* ldarx */
1683 op
->type
= MKOP(LARX
, 0, 8);
1686 case 214: /* stdcx. */
1687 op
->type
= MKOP(STCX
, 0, 8);
1690 case 52: /* lbarx */
1691 op
->type
= MKOP(LARX
, 0, 1);
1694 case 694: /* stbcx. */
1695 op
->type
= MKOP(STCX
, 0, 1);
1698 case 116: /* lharx */
1699 op
->type
= MKOP(LARX
, 0, 2);
1702 case 726: /* sthcx. */
1703 op
->type
= MKOP(STCX
, 0, 2);
1706 case 276: /* lqarx */
1707 if (!((rd
& 1) || rd
== ra
|| rd
== rb
))
1708 op
->type
= MKOP(LARX
, 0, 16);
1711 case 182: /* stqcx. */
1713 op
->type
= MKOP(STCX
, 0, 16);
1718 case 55: /* lwzux */
1719 op
->type
= MKOP(LOAD
, u
, 4);
1723 case 119: /* lbzux */
1724 op
->type
= MKOP(LOAD
, u
, 1);
1727 #ifdef CONFIG_ALTIVEC
1729 case 359: /* lvxl */
1730 op
->type
= MKOP(LOAD_VMX
, 0, 16);
1731 op
->element_size
= 16;
1734 case 231: /* stvx */
1735 case 487: /* stvxl */
1736 op
->type
= MKOP(STORE_VMX
, 0, 16);
1738 #endif /* CONFIG_ALTIVEC */
1740 #ifdef __powerpc64__
1743 op
->type
= MKOP(LOAD
, u
, 8);
1746 case 149: /* stdx */
1747 case 181: /* stdux */
1748 op
->type
= MKOP(STORE
, u
, 8);
1752 case 151: /* stwx */
1753 case 183: /* stwux */
1754 op
->type
= MKOP(STORE
, u
, 4);
1757 case 215: /* stbx */
1758 case 247: /* stbux */
1759 op
->type
= MKOP(STORE
, u
, 1);
1762 case 279: /* lhzx */
1763 case 311: /* lhzux */
1764 op
->type
= MKOP(LOAD
, u
, 2);
1767 #ifdef __powerpc64__
1768 case 341: /* lwax */
1769 case 373: /* lwaux */
1770 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 4);
1774 case 343: /* lhax */
1775 case 375: /* lhaux */
1776 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
1779 case 407: /* sthx */
1780 case 439: /* sthux */
1781 op
->type
= MKOP(STORE
, u
, 2);
1784 #ifdef __powerpc64__
1785 case 532: /* ldbrx */
1786 op
->type
= MKOP(LOAD
, BYTEREV
, 8);
1790 case 533: /* lswx */
1791 op
->type
= MKOP(LOAD_MULTI
, 0, regs
->xer
& 0x7f);
1794 case 534: /* lwbrx */
1795 op
->type
= MKOP(LOAD
, BYTEREV
, 4);
1798 case 597: /* lswi */
1800 rb
= 32; /* # bytes to load */
1801 op
->type
= MKOP(LOAD_MULTI
, 0, rb
);
1802 op
->ea
= ra
? regs
->gpr
[ra
] : 0;
1805 #ifdef CONFIG_PPC_FPU
1806 case 535: /* lfsx */
1807 case 567: /* lfsux */
1808 op
->type
= MKOP(LOAD_FP
, u
, 4);
1811 case 599: /* lfdx */
1812 case 631: /* lfdux */
1813 op
->type
= MKOP(LOAD_FP
, u
, 8);
1816 case 663: /* stfsx */
1817 case 695: /* stfsux */
1818 op
->type
= MKOP(STORE_FP
, u
, 4);
1821 case 727: /* stfdx */
1822 case 759: /* stfdux */
1823 op
->type
= MKOP(STORE_FP
, u
, 8);
1827 #ifdef __powerpc64__
1828 case 660: /* stdbrx */
1829 op
->type
= MKOP(STORE
, BYTEREV
, 8);
1830 op
->val
= byterev_8(regs
->gpr
[rd
]);
1834 case 661: /* stswx */
1835 op
->type
= MKOP(STORE_MULTI
, 0, regs
->xer
& 0x7f);
1838 case 662: /* stwbrx */
1839 op
->type
= MKOP(STORE
, BYTEREV
, 4);
1840 op
->val
= byterev_4(regs
->gpr
[rd
]);
1845 rb
= 32; /* # bytes to store */
1846 op
->type
= MKOP(STORE_MULTI
, 0, rb
);
1847 op
->ea
= ra
? regs
->gpr
[ra
] : 0;
1850 case 790: /* lhbrx */
1851 op
->type
= MKOP(LOAD
, BYTEREV
, 2);
1854 case 918: /* sthbrx */
1855 op
->type
= MKOP(STORE
, BYTEREV
, 2);
1856 op
->val
= byterev_2(regs
->gpr
[rd
]);
1860 case 12: /* lxsiwzx */
1861 op
->reg
= rd
| ((instr
& 1) << 5);
1862 op
->type
= MKOP(LOAD_VSX
, 0, 4);
1863 op
->element_size
= 8;
1866 case 76: /* lxsiwax */
1867 op
->reg
= rd
| ((instr
& 1) << 5);
1868 op
->type
= MKOP(LOAD_VSX
, SIGNEXT
, 4);
1869 op
->element_size
= 8;
1872 case 140: /* stxsiwx */
1873 op
->reg
= rd
| ((instr
& 1) << 5);
1874 op
->type
= MKOP(STORE_VSX
, 0, 4);
1875 op
->element_size
= 8;
1878 case 268: /* lxvx */
1879 op
->reg
= rd
| ((instr
& 1) << 5);
1880 op
->type
= MKOP(LOAD_VSX
, 0, 16);
1881 op
->element_size
= 16;
1882 op
->vsx_flags
= VSX_CHECK_VEC
;
1885 case 269: /* lxvl */
1886 case 301: { /* lxvll */
1888 op
->reg
= rd
| ((instr
& 1) << 5);
1889 op
->ea
= ra
? regs
->gpr
[ra
] : 0;
1890 nb
= regs
->gpr
[rb
] & 0xff;
1893 op
->type
= MKOP(LOAD_VSX
, 0, nb
);
1894 op
->element_size
= 16;
1895 op
->vsx_flags
= ((instr
& 0x20) ? VSX_LDLEFT
: 0) |
1899 case 332: /* lxvdsx */
1900 op
->reg
= rd
| ((instr
& 1) << 5);
1901 op
->type
= MKOP(LOAD_VSX
, 0, 8);
1902 op
->element_size
= 8;
1903 op
->vsx_flags
= VSX_SPLAT
;
1906 case 364: /* lxvwsx */
1907 op
->reg
= rd
| ((instr
& 1) << 5);
1908 op
->type
= MKOP(LOAD_VSX
, 0, 4);
1909 op
->element_size
= 4;
1910 op
->vsx_flags
= VSX_SPLAT
| VSX_CHECK_VEC
;
1913 case 396: /* stxvx */
1914 op
->reg
= rd
| ((instr
& 1) << 5);
1915 op
->type
= MKOP(STORE_VSX
, 0, 16);
1916 op
->element_size
= 16;
1917 op
->vsx_flags
= VSX_CHECK_VEC
;
1920 case 397: /* stxvl */
1921 case 429: { /* stxvll */
1923 op
->reg
= rd
| ((instr
& 1) << 5);
1924 op
->ea
= ra
? regs
->gpr
[ra
] : 0;
1925 nb
= regs
->gpr
[rb
] & 0xff;
1928 op
->type
= MKOP(STORE_VSX
, 0, nb
);
1929 op
->element_size
= 16;
1930 op
->vsx_flags
= ((instr
& 0x20) ? VSX_LDLEFT
: 0) |
1934 case 524: /* lxsspx */
1935 op
->reg
= rd
| ((instr
& 1) << 5);
1936 op
->type
= MKOP(LOAD_VSX
, 0, 4);
1937 op
->element_size
= 8;
1938 op
->vsx_flags
= VSX_FPCONV
;
1941 case 588: /* lxsdx */
1942 op
->reg
= rd
| ((instr
& 1) << 5);
1943 op
->type
= MKOP(LOAD_VSX
, 0, 8);
1944 op
->element_size
= 8;
1947 case 652: /* stxsspx */
1948 op
->reg
= rd
| ((instr
& 1) << 5);
1949 op
->type
= MKOP(STORE_VSX
, 0, 4);
1950 op
->element_size
= 8;
1951 op
->vsx_flags
= VSX_FPCONV
;
1954 case 716: /* stxsdx */
1955 op
->reg
= rd
| ((instr
& 1) << 5);
1956 op
->type
= MKOP(STORE_VSX
, 0, 8);
1957 op
->element_size
= 8;
1960 case 780: /* lxvw4x */
1961 op
->reg
= rd
| ((instr
& 1) << 5);
1962 op
->type
= MKOP(LOAD_VSX
, 0, 16);
1963 op
->element_size
= 4;
1966 case 781: /* lxsibzx */
1967 op
->reg
= rd
| ((instr
& 1) << 5);
1968 op
->type
= MKOP(LOAD_VSX
, 0, 1);
1969 op
->element_size
= 8;
1970 op
->vsx_flags
= VSX_CHECK_VEC
;
1973 case 812: /* lxvh8x */
1974 op
->reg
= rd
| ((instr
& 1) << 5);
1975 op
->type
= MKOP(LOAD_VSX
, 0, 16);
1976 op
->element_size
= 2;
1977 op
->vsx_flags
= VSX_CHECK_VEC
;
1980 case 813: /* lxsihzx */
1981 op
->reg
= rd
| ((instr
& 1) << 5);
1982 op
->type
= MKOP(LOAD_VSX
, 0, 2);
1983 op
->element_size
= 8;
1984 op
->vsx_flags
= VSX_CHECK_VEC
;
1987 case 844: /* lxvd2x */
1988 op
->reg
= rd
| ((instr
& 1) << 5);
1989 op
->type
= MKOP(LOAD_VSX
, 0, 16);
1990 op
->element_size
= 8;
1993 case 876: /* lxvb16x */
1994 op
->reg
= rd
| ((instr
& 1) << 5);
1995 op
->type
= MKOP(LOAD_VSX
, 0, 16);
1996 op
->element_size
= 1;
1997 op
->vsx_flags
= VSX_CHECK_VEC
;
2000 case 908: /* stxvw4x */
2001 op
->reg
= rd
| ((instr
& 1) << 5);
2002 op
->type
= MKOP(STORE_VSX
, 0, 16);
2003 op
->element_size
= 4;
2006 case 909: /* stxsibx */
2007 op
->reg
= rd
| ((instr
& 1) << 5);
2008 op
->type
= MKOP(STORE_VSX
, 0, 1);
2009 op
->element_size
= 8;
2010 op
->vsx_flags
= VSX_CHECK_VEC
;
2013 case 940: /* stxvh8x */
2014 op
->reg
= rd
| ((instr
& 1) << 5);
2015 op
->type
= MKOP(STORE_VSX
, 0, 16);
2016 op
->element_size
= 2;
2017 op
->vsx_flags
= VSX_CHECK_VEC
;
2020 case 941: /* stxsihx */
2021 op
->reg
= rd
| ((instr
& 1) << 5);
2022 op
->type
= MKOP(STORE_VSX
, 0, 2);
2023 op
->element_size
= 8;
2024 op
->vsx_flags
= VSX_CHECK_VEC
;
2027 case 972: /* stxvd2x */
2028 op
->reg
= rd
| ((instr
& 1) << 5);
2029 op
->type
= MKOP(STORE_VSX
, 0, 16);
2030 op
->element_size
= 8;
2033 case 1004: /* stxvb16x */
2034 op
->reg
= rd
| ((instr
& 1) << 5);
2035 op
->type
= MKOP(STORE_VSX
, 0, 16);
2036 op
->element_size
= 1;
2037 op
->vsx_flags
= VSX_CHECK_VEC
;
2040 #endif /* CONFIG_VSX */
2046 op
->type
= MKOP(LOAD
, u
, 4);
2047 op
->ea
= dform_ea(instr
, regs
);
2052 op
->type
= MKOP(LOAD
, u
, 1);
2053 op
->ea
= dform_ea(instr
, regs
);
2058 op
->type
= MKOP(STORE
, u
, 4);
2059 op
->ea
= dform_ea(instr
, regs
);
2064 op
->type
= MKOP(STORE
, u
, 1);
2065 op
->ea
= dform_ea(instr
, regs
);
2070 op
->type
= MKOP(LOAD
, u
, 2);
2071 op
->ea
= dform_ea(instr
, regs
);
2076 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
2077 op
->ea
= dform_ea(instr
, regs
);
2082 op
->type
= MKOP(STORE
, u
, 2);
2083 op
->ea
= dform_ea(instr
, regs
);
2088 break; /* invalid form, ra in range to load */
2089 op
->type
= MKOP(LOAD_MULTI
, 0, 4 * (32 - rd
));
2090 op
->ea
= dform_ea(instr
, regs
);
2094 op
->type
= MKOP(STORE_MULTI
, 0, 4 * (32 - rd
));
2095 op
->ea
= dform_ea(instr
, regs
);
2098 #ifdef CONFIG_PPC_FPU
2101 op
->type
= MKOP(LOAD_FP
, u
, 4);
2102 op
->ea
= dform_ea(instr
, regs
);
2107 op
->type
= MKOP(LOAD_FP
, u
, 8);
2108 op
->ea
= dform_ea(instr
, regs
);
2112 case 53: /* stfsu */
2113 op
->type
= MKOP(STORE_FP
, u
, 4);
2114 op
->ea
= dform_ea(instr
, regs
);
2118 case 55: /* stfdu */
2119 op
->type
= MKOP(STORE_FP
, u
, 8);
2120 op
->ea
= dform_ea(instr
, regs
);
2124 #ifdef __powerpc64__
2126 if (!((rd
& 1) || (rd
== ra
)))
2127 op
->type
= MKOP(LOAD
, 0, 16);
2128 op
->ea
= dqform_ea(instr
, regs
);
2133 case 57: /* lxsd, lxssp */
2134 op
->ea
= dsform_ea(instr
, regs
);
2135 switch (instr
& 3) {
2138 op
->type
= MKOP(LOAD_VSX
, 0, 8);
2139 op
->element_size
= 8;
2140 op
->vsx_flags
= VSX_CHECK_VEC
;
2144 op
->type
= MKOP(LOAD_VSX
, 0, 4);
2145 op
->element_size
= 8;
2146 op
->vsx_flags
= VSX_FPCONV
| VSX_CHECK_VEC
;
2150 #endif /* CONFIG_VSX */
2152 #ifdef __powerpc64__
2153 case 58: /* ld[u], lwa */
2154 op
->ea
= dsform_ea(instr
, regs
);
2155 switch (instr
& 3) {
2157 op
->type
= MKOP(LOAD
, 0, 8);
2160 op
->type
= MKOP(LOAD
, UPDATE
, 8);
2163 op
->type
= MKOP(LOAD
, SIGNEXT
, 4);
2170 case 61: /* lxv, stxsd, stxssp, stxv */
2171 switch (instr
& 7) {
2173 op
->ea
= dqform_ea(instr
, regs
);
2176 op
->type
= MKOP(LOAD_VSX
, 0, 16);
2177 op
->element_size
= 16;
2178 op
->vsx_flags
= VSX_CHECK_VEC
;
2181 case 2: /* stxsd with LSB of DS field = 0 */
2182 case 6: /* stxsd with LSB of DS field = 1 */
2183 op
->ea
= dsform_ea(instr
, regs
);
2185 op
->type
= MKOP(STORE_VSX
, 0, 8);
2186 op
->element_size
= 8;
2187 op
->vsx_flags
= VSX_CHECK_VEC
;
2190 case 3: /* stxssp with LSB of DS field = 0 */
2191 case 7: /* stxssp with LSB of DS field = 1 */
2192 op
->ea
= dsform_ea(instr
, regs
);
2194 op
->type
= MKOP(STORE_VSX
, 0, 4);
2195 op
->element_size
= 8;
2196 op
->vsx_flags
= VSX_FPCONV
| VSX_CHECK_VEC
;
2200 op
->ea
= dqform_ea(instr
, regs
);
2203 op
->type
= MKOP(STORE_VSX
, 0, 16);
2204 op
->element_size
= 16;
2205 op
->vsx_flags
= VSX_CHECK_VEC
;
2209 #endif /* CONFIG_VSX */
2211 #ifdef __powerpc64__
2212 case 62: /* std[u] */
2213 op
->ea
= dsform_ea(instr
, regs
);
2214 switch (instr
& 3) {
2216 op
->type
= MKOP(STORE
, 0, 8);
2219 op
->type
= MKOP(STORE
, UPDATE
, 8);
2223 op
->type
= MKOP(STORE
, 0, 16);
2227 #endif /* __powerpc64__ */
2234 set_cr0(regs
, op
, ra
);
2242 set_cr0(regs
, op
, rd
);
2249 op
->type
= INTERRUPT
| 0x700;
2250 op
->val
= SRR1_PROGPRIV
;
2254 op
->type
= INTERRUPT
| 0x700;
2255 op
->val
= SRR1_PROGTRAP
;
2258 EXPORT_SYMBOL_GPL(analyse_instr
);
2259 NOKPROBE_SYMBOL(analyse_instr
);
2262 * For PPC32 we always use stwu with r1 to change the stack pointer.
2263 * So this emulated store may corrupt the exception frame, now we
2264 * have to provide the exception frame trampoline, which is pushed
2265 * below the kprobed function stack. So we only update gpr[1] but
2266 * don't emulate the real store operation. We will do real store
2267 * operation safely in exception return code by checking this flag.
2269 static nokprobe_inline
int handle_stack_update(unsigned long ea
, struct pt_regs
*regs
)
2273 * Check if we will touch kernel stack overflow
2275 if (ea
- STACK_INT_FRAME_SIZE
<= current
->thread
.ksp_limit
) {
2276 printk(KERN_CRIT
"Can't kprobe this since kernel stack would overflow.\n");
2279 #endif /* CONFIG_PPC32 */
2281 * Check if we already set since that means we'll
2282 * lose the previous value.
2284 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE
));
2285 set_thread_flag(TIF_EMULATE_STACK_STORE
);
2289 static nokprobe_inline
void do_signext(unsigned long *valp
, int size
)
2293 *valp
= (signed short) *valp
;
2296 *valp
= (signed int) *valp
;
2301 static nokprobe_inline
void do_byterev(unsigned long *valp
, int size
)
2305 *valp
= byterev_2(*valp
);
2308 *valp
= byterev_4(*valp
);
2310 #ifdef __powerpc64__
2312 *valp
= byterev_8(*valp
);
2319 * Emulate an instruction that can be executed just by updating
2322 void emulate_update_regs(struct pt_regs
*regs
, struct instruction_op
*op
)
2324 unsigned long next_pc
;
2326 next_pc
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
2327 switch (op
->type
& INSTR_TYPE_MASK
) {
2329 if (op
->type
& SETREG
)
2330 regs
->gpr
[op
->reg
] = op
->val
;
2331 if (op
->type
& SETCC
)
2332 regs
->ccr
= op
->ccval
;
2333 if (op
->type
& SETXER
)
2334 regs
->xer
= op
->xerval
;
2338 if (op
->type
& SETLK
)
2339 regs
->link
= next_pc
;
2340 if (op
->type
& BRTAKEN
)
2342 if (op
->type
& DECCTR
)
2347 switch (op
->type
& BARRIER_MASK
) {
2357 case BARRIER_LWSYNC
:
2358 asm volatile("lwsync" : : : "memory");
2360 case BARRIER_PTESYNC
:
2361 asm volatile("ptesync" : : : "memory");
2369 regs
->gpr
[op
->reg
] = regs
->xer
& 0xffffffffUL
;
2372 regs
->gpr
[op
->reg
] = regs
->link
;
2375 regs
->gpr
[op
->reg
] = regs
->ctr
;
2385 regs
->xer
= op
->val
& 0xffffffffUL
;
2388 regs
->link
= op
->val
;
2391 regs
->ctr
= op
->val
;
2401 regs
->nip
= next_pc
;
2405 * Emulate instructions that cause a transfer of control,
2406 * loads and stores, and a few other instructions.
2407 * Returns 1 if the step was emulated, 0 if not,
2408 * or -1 if the instruction is one that should not be stepped,
2409 * such as an rfid, or a mtmsrd that would clear MSR_RI.
2411 int emulate_step(struct pt_regs
*regs
, unsigned int instr
)
2413 struct instruction_op op
;
2414 int r
, err
, size
, type
;
2420 r
= analyse_instr(&op
, regs
, instr
);
2424 emulate_update_regs(regs
, &op
);
2429 size
= GETSIZE(op
.type
);
2430 type
= op
.type
& INSTR_TYPE_MASK
;
2433 if (OP_IS_LOAD_STORE(type
) || type
== CACHEOP
)
2434 ea
= truncate_if_32bit(regs
->msr
, op
.ea
);
2438 if (!address_ok(regs
, ea
, 8))
2440 switch (op
.type
& CACHEOP_MASK
) {
2442 __cacheop_user_asmx(ea
, err
, "dcbst");
2445 __cacheop_user_asmx(ea
, err
, "dcbf");
2449 prefetchw((void *) ea
);
2453 prefetch((void *) ea
);
2456 __cacheop_user_asmx(ea
, err
, "icbi");
2464 if (ea
& (size
- 1))
2465 break; /* can't handle misaligned */
2466 if (!address_ok(regs
, ea
, size
))
2470 #ifdef __powerpc64__
2472 __get_user_asmx(val
, ea
, err
, "lbarx");
2475 __get_user_asmx(val
, ea
, err
, "lharx");
2479 __get_user_asmx(val
, ea
, err
, "lwarx");
2481 #ifdef __powerpc64__
2483 __get_user_asmx(val
, ea
, err
, "ldarx");
2486 err
= do_lqarx(ea
, ®s
->gpr
[op
.reg
]);
2493 regs
->gpr
[op
.reg
] = val
;
2497 if (ea
& (size
- 1))
2498 break; /* can't handle misaligned */
2499 if (!address_ok(regs
, ea
, size
))
2503 #ifdef __powerpc64__
2505 __put_user_asmx(op
.val
, ea
, err
, "stbcx.", cr
);
2508 __put_user_asmx(op
.val
, ea
, err
, "stbcx.", cr
);
2512 __put_user_asmx(op
.val
, ea
, err
, "stwcx.", cr
);
2514 #ifdef __powerpc64__
2516 __put_user_asmx(op
.val
, ea
, err
, "stdcx.", cr
);
2519 err
= do_stqcx(ea
, regs
->gpr
[op
.reg
],
2520 regs
->gpr
[op
.reg
+ 1], &cr
);
2527 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
2529 ((regs
->xer
>> 3) & 0x10000000);
2533 #ifdef __powerpc64__
2535 err
= emulate_lq(regs
, ea
, op
.reg
);
2539 err
= read_mem(®s
->gpr
[op
.reg
], ea
, size
, regs
);
2541 if (op
.type
& SIGNEXT
)
2542 do_signext(®s
->gpr
[op
.reg
], size
);
2543 if (op
.type
& BYTEREV
)
2544 do_byterev(®s
->gpr
[op
.reg
], size
);
2548 #ifdef CONFIG_PPC_FPU
2550 if (!(regs
->msr
& MSR_FP
))
2553 err
= do_fp_load(op
.reg
, do_lfs
, ea
, size
, regs
);
2555 err
= do_fp_load(op
.reg
, do_lfd
, ea
, size
, regs
);
2558 #ifdef CONFIG_ALTIVEC
2560 if (!(regs
->msr
& MSR_VEC
))
2562 err
= do_vec_load(op
.reg
, do_lvx
, ea
, regs
);
2569 unsigned long msrbit
= MSR_VSX
;
2572 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2573 * when the target of the instruction is a vector register.
2575 if (op
.reg
>= 32 && (op
.vsx_flags
& VSX_CHECK_VEC
))
2577 if (!(regs
->msr
& msrbit
))
2579 if (!address_ok(regs
, ea
, size
) ||
2580 __copy_from_user(mem
, (void __user
*)ea
, size
))
2583 emulate_vsx_load(&op
, &buf
, mem
);
2584 load_vsrn(op
.reg
, &buf
);
2589 if (regs
->msr
& MSR_LE
)
2592 for (i
= 0; i
< size
; i
+= 4) {
2596 err
= read_mem(®s
->gpr
[rd
], ea
, nb
, regs
);
2599 if (nb
< 4) /* left-justify last bytes */
2600 regs
->gpr
[rd
] <<= 32 - 8 * nb
;
2607 #ifdef __powerpc64__
2609 err
= emulate_stq(regs
, ea
, op
.reg
);
2613 if ((op
.type
& UPDATE
) && size
== sizeof(long) &&
2614 op
.reg
== 1 && op
.update_reg
== 1 &&
2615 !(regs
->msr
& MSR_PR
) &&
2616 ea
>= regs
->gpr
[1] - STACK_INT_FRAME_SIZE
) {
2617 err
= handle_stack_update(ea
, regs
);
2620 err
= write_mem(op
.val
, ea
, size
, regs
);
2623 #ifdef CONFIG_PPC_FPU
2625 if (!(regs
->msr
& MSR_FP
))
2628 err
= do_fp_store(op
.reg
, do_stfs
, ea
, size
, regs
);
2630 err
= do_fp_store(op
.reg
, do_stfd
, ea
, size
, regs
);
2633 #ifdef CONFIG_ALTIVEC
2635 if (!(regs
->msr
& MSR_VEC
))
2637 err
= do_vec_store(op
.reg
, do_stvx
, ea
, regs
);
2644 unsigned long msrbit
= MSR_VSX
;
2647 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2648 * when the target of the instruction is a vector register.
2650 if (op
.reg
>= 32 && (op
.vsx_flags
& VSX_CHECK_VEC
))
2652 if (!(regs
->msr
& msrbit
))
2654 if (!address_ok(regs
, ea
, size
))
2657 store_vsrn(op
.reg
, &buf
);
2658 emulate_vsx_store(&op
, &buf
, mem
);
2659 if (__copy_to_user((void __user
*)ea
, mem
, size
))
2665 if (regs
->msr
& MSR_LE
)
2668 for (i
= 0; i
< size
; i
+= 4) {
2669 val
= regs
->gpr
[rd
];
2674 val
>>= 32 - 8 * nb
;
2675 err
= write_mem(val
, ea
, nb
, regs
);
2684 regs
->gpr
[op
.reg
] = regs
->msr
& MSR_MASK
;
2688 val
= regs
->gpr
[op
.reg
];
2689 if ((val
& MSR_RI
) == 0)
2690 /* can't step mtmsr[d] that would clear MSR_RI */
2692 /* here op.val is the mask of bits to change */
2693 regs
->msr
= (regs
->msr
& ~op
.val
) | (val
& op
.val
);
2697 case SYSCALL
: /* sc */
2699 * N.B. this uses knowledge about how the syscall
2700 * entry code works. If that is changed, this will
2701 * need to be changed also.
2703 if (regs
->gpr
[0] == 0x1ebe &&
2704 cpu_has_feature(CPU_FTR_REAL_LE
)) {
2705 regs
->msr
^= MSR_LE
;
2708 regs
->gpr
[9] = regs
->gpr
[13];
2709 regs
->gpr
[10] = MSR_KERNEL
;
2710 regs
->gpr
[11] = regs
->nip
+ 4;
2711 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
2712 regs
->gpr
[13] = (unsigned long) get_paca();
2713 regs
->nip
= (unsigned long) &system_call_common
;
2714 regs
->msr
= MSR_KERNEL
;
2726 if (op
.type
& UPDATE
)
2727 regs
->gpr
[op
.update_reg
] = op
.ea
;
2730 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
2733 NOKPROBE_SYMBOL(emulate_step
);