4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
67 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
69 #if defined(CONFIG_USER_ONLY)
72 #define IS_USER(s) (s->user)
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
80 static TCGv_ptr cpu_env
;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
83 static TCGv_i32 cpu_R
[16];
84 static TCGv_i32 cpu_exclusive_addr
;
85 static TCGv_i32 cpu_exclusive_val
;
86 static TCGv_i32 cpu_exclusive_high
;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test
;
89 static TCGv_i32 cpu_exclusive_info
;
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s
, cpu_F1s
;
94 static TCGv_i64 cpu_F0d
, cpu_F1d
;
96 #include "gen-icount.h"
98 static const char *regnames
[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 16; i
++) {
110 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUState
, regs
[i
]),
114 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
116 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_val
), "exclusive_val");
118 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_high
), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUState
, exclusive_test
), "exclusive_test");
123 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUState
, exclusive_info
), "exclusive_info");
131 static int num_temps
;
133 /* Allocate a temporary variable. */
134 static TCGv_i32
new_tmp(void)
137 return tcg_temp_new_i32();
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp
)
147 static inline TCGv
load_cpu_offset(int offset
)
149 TCGv tmp
= new_tmp();
150 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
156 static inline void store_cpu_offset(TCGv var
, int offset
)
158 tcg_gen_st_i32(var
, cpu_env
, offset
);
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
170 /* normaly, since we updated PC, we need only to add one insn */
172 addr
= (long)s
->pc
+ 2;
174 addr
= (long)s
->pc
+ 4;
175 tcg_gen_movi_i32(var
, addr
);
177 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
184 TCGv tmp
= new_tmp();
185 load_reg_var(s
, tmp
, reg
);
189 /* Set a CPU register. The source must be a temporary and will be
191 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
194 tcg_gen_andi_i32(var
, var
, ~1);
195 s
->is_jmp
= DISAS_JUMP
;
197 tcg_gen_mov_i32(cpu_R
[reg
], var
);
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
211 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
213 TCGv tmp_mask
= tcg_const_i32(mask
);
214 gen_helper_cpsr_write(var
, tmp_mask
);
215 tcg_temp_free_i32(tmp_mask
);
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
220 static void gen_exception(int excp
)
222 TCGv tmp
= new_tmp();
223 tcg_gen_movi_i32(tmp
, excp
);
224 gen_helper_exception(tmp
);
228 static void gen_smul_dual(TCGv a
, TCGv b
)
230 TCGv tmp1
= new_tmp();
231 TCGv tmp2
= new_tmp();
232 tcg_gen_ext16s_i32(tmp1
, a
);
233 tcg_gen_ext16s_i32(tmp2
, b
);
234 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
236 tcg_gen_sari_i32(a
, a
, 16);
237 tcg_gen_sari_i32(b
, b
, 16);
238 tcg_gen_mul_i32(b
, b
, a
);
239 tcg_gen_mov_i32(a
, tmp1
);
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var
)
246 TCGv tmp
= new_tmp();
247 tcg_gen_shri_i32(tmp
, var
, 8);
248 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
249 tcg_gen_shli_i32(var
, var
, 8);
250 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
251 tcg_gen_or_i32(var
, var
, tmp
);
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var
)
258 tcg_gen_ext16u_i32(var
, var
);
259 tcg_gen_bswap16_i32(var
, var
);
260 tcg_gen_ext16s_i32(var
, var
);
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
267 tcg_gen_shri_i32(var
, var
, shift
);
268 tcg_gen_andi_i32(var
, var
, mask
);
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var
, int shift
, int width
)
277 tcg_gen_sari_i32(var
, var
, shift
);
278 if (shift
+ width
< 32) {
279 signbit
= 1u << (width
- 1);
280 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
281 tcg_gen_xori_i32(var
, var
, signbit
);
282 tcg_gen_subi_i32(var
, var
, signbit
);
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
289 tcg_gen_andi_i32(val
, val
, mask
);
290 tcg_gen_shli_i32(val
, val
, shift
);
291 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
292 tcg_gen_or_i32(dest
, base
, val
);
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
298 TCGv_i64 tmp64
= tcg_temp_new_i64();
300 tcg_gen_extu_i32_i64(tmp64
, b
);
302 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
303 tcg_gen_add_i64(a
, tmp64
, a
);
305 tcg_temp_free_i64(tmp64
);
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
312 TCGv_i64 tmp64
= tcg_temp_new_i64();
314 tcg_gen_extu_i32_i64(tmp64
, b
);
316 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
317 tcg_gen_sub_i64(a
, tmp64
, a
);
319 tcg_temp_free_i64(tmp64
);
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
328 TCGv_i64 tmp1
= tcg_temp_new_i64();
329 TCGv_i64 tmp2
= tcg_temp_new_i64();
331 tcg_gen_extu_i32_i64(tmp1
, a
);
333 tcg_gen_extu_i32_i64(tmp2
, b
);
335 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
336 tcg_temp_free_i64(tmp2
);
340 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
342 TCGv_i64 tmp1
= tcg_temp_new_i64();
343 TCGv_i64 tmp2
= tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1
, a
);
347 tcg_gen_ext_i32_i64(tmp2
, b
);
349 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
350 tcg_temp_free_i64(tmp2
);
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var
)
357 TCGv tmp
= new_tmp();
358 tcg_gen_shri_i32(tmp
, var
, 16);
359 tcg_gen_shli_i32(var
, var
, 16);
360 tcg_gen_or_i32(var
, var
, tmp
);
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
368 t0 = (t0 + t1) ^ tmp;
371 static void gen_add16(TCGv t0
, TCGv t1
)
373 TCGv tmp
= new_tmp();
374 tcg_gen_xor_i32(tmp
, t0
, t1
);
375 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
376 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
377 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
378 tcg_gen_add_i32(t0
, t0
, t1
);
379 tcg_gen_xor_i32(t0
, t0
, tmp
);
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var
)
389 TCGv tmp
= new_tmp();
390 tcg_gen_shri_i32(tmp
, var
, 31);
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var
)
398 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
399 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
403 static void gen_adc(TCGv t0
, TCGv t1
)
406 tcg_gen_add_i32(t0
, t0
, t1
);
407 tmp
= load_cpu_field(CF
);
408 tcg_gen_add_i32(t0
, t0
, tmp
);
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
416 tcg_gen_add_i32(dest
, t0
, t1
);
417 tmp
= load_cpu_field(CF
);
418 tcg_gen_add_i32(dest
, dest
, tmp
);
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
426 tcg_gen_sub_i32(dest
, t0
, t1
);
427 tmp
= load_cpu_field(CF
);
428 tcg_gen_add_i32(dest
, dest
, tmp
);
429 tcg_gen_subi_i32(dest
, dest
, 1);
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
436 static void shifter_out_im(TCGv var
, int shift
)
438 TCGv tmp
= new_tmp();
440 tcg_gen_andi_i32(tmp
, var
, 1);
442 tcg_gen_shri_i32(tmp
, var
, shift
);
444 tcg_gen_andi_i32(tmp
, tmp
, 1);
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
457 shifter_out_im(var
, 32 - shift
);
458 tcg_gen_shli_i32(var
, var
, shift
);
464 tcg_gen_shri_i32(var
, var
, 31);
467 tcg_gen_movi_i32(var
, 0);
470 shifter_out_im(var
, shift
- 1);
471 tcg_gen_shri_i32(var
, var
, shift
);
478 shifter_out_im(var
, shift
- 1);
481 tcg_gen_sari_i32(var
, var
, shift
);
483 case 3: /* ROR/RRX */
486 shifter_out_im(var
, shift
- 1);
487 tcg_gen_rotri_i32(var
, var
, shift
); break;
489 TCGv tmp
= load_cpu_field(CF
);
491 shifter_out_im(var
, 0);
492 tcg_gen_shri_i32(var
, var
, 1);
493 tcg_gen_shli_i32(tmp
, tmp
, 31);
494 tcg_gen_or_i32(var
, var
, tmp
);
500 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
501 TCGv shift
, int flags
)
505 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
506 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
507 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
508 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
512 case 0: gen_helper_shl(var
, var
, shift
); break;
513 case 1: gen_helper_shr(var
, var
, shift
); break;
514 case 2: gen_helper_sar(var
, var
, shift
); break;
515 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
516 tcg_gen_rotr_i32(var
, var
, shift
); break;
522 #define PAS_OP(pfx) \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
531 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
538 tmp
= tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
541 tcg_temp_free_ptr(tmp
);
544 tmp
= tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
547 tcg_temp_free_ptr(tmp
);
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
563 #undef gen_pas_helper
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
578 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
585 tmp
= tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
588 tcg_temp_free_ptr(tmp
);
591 tmp
= tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
594 tcg_temp_free_ptr(tmp
);
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
610 #undef gen_pas_helper
615 static void gen_test_cc(int cc
, int label
)
623 tmp
= load_cpu_field(ZF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(ZF
);
628 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
631 tmp
= load_cpu_field(CF
);
632 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(CF
);
636 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
639 tmp
= load_cpu_field(NF
);
640 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
643 tmp
= load_cpu_field(NF
);
644 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
647 tmp
= load_cpu_field(VF
);
648 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
651 tmp
= load_cpu_field(VF
);
652 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
654 case 8: /* hi: C && !Z */
655 inv
= gen_new_label();
656 tmp
= load_cpu_field(CF
);
657 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
659 tmp
= load_cpu_field(ZF
);
660 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
663 case 9: /* ls: !C || Z */
664 tmp
= load_cpu_field(CF
);
665 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
667 tmp
= load_cpu_field(ZF
);
668 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp
= load_cpu_field(VF
);
672 tmp2
= load_cpu_field(NF
);
673 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
675 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp
= load_cpu_field(VF
);
679 tmp2
= load_cpu_field(NF
);
680 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
682 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
684 case 12: /* gt: !Z && N == V */
685 inv
= gen_new_label();
686 tmp
= load_cpu_field(ZF
);
687 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
689 tmp
= load_cpu_field(VF
);
690 tmp2
= load_cpu_field(NF
);
691 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
693 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
696 case 13: /* le: Z || N != V */
697 tmp
= load_cpu_field(ZF
);
698 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
700 tmp
= load_cpu_field(VF
);
701 tmp2
= load_cpu_field(NF
);
702 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
704 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
707 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
713 static const uint8_t table_logic_cc
[16] = {
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
737 s
->is_jmp
= DISAS_UPDATE
;
738 if (s
->thumb
!= (addr
& 1)) {
740 tcg_gen_movi_i32(tmp
, addr
& 1);
741 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
744 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext
*s
, TCGv var
)
750 s
->is_jmp
= DISAS_UPDATE
;
751 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
752 tcg_gen_andi_i32(var
, var
, 1);
753 store_cpu_field(var
, thumb
);
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
762 if (reg
== 15 && ENABLE_ARCH_7
) {
765 store_reg(s
, reg
, var
);
769 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
771 TCGv tmp
= new_tmp();
772 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
775 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
777 TCGv tmp
= new_tmp();
778 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
781 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
783 TCGv tmp
= new_tmp();
784 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
787 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
789 TCGv tmp
= new_tmp();
790 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
793 static inline TCGv
gen_ld32(TCGv addr
, int index
)
795 TCGv tmp
= new_tmp();
796 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
799 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
801 TCGv_i64 tmp
= tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp
, addr
, index
);
805 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
807 tcg_gen_qemu_st8(val
, addr
, index
);
810 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
812 tcg_gen_qemu_st16(val
, addr
, index
);
815 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
817 tcg_gen_qemu_st32(val
, addr
, index
);
820 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
822 tcg_gen_qemu_st64(val
, addr
, index
);
823 tcg_temp_free_i64(val
);
826 static inline void gen_set_pc_im(uint32_t val
)
828 tcg_gen_movi_i32(cpu_R
[15], val
);
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext
*s
)
834 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
835 s
->is_jmp
= DISAS_UPDATE
;
838 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
841 int val
, rm
, shift
, shiftop
;
844 if (!(insn
& (1 << 25))) {
847 if (!(insn
& (1 << 23)))
850 tcg_gen_addi_i32(var
, var
, val
);
854 shift
= (insn
>> 7) & 0x1f;
855 shiftop
= (insn
>> 5) & 3;
856 offset
= load_reg(s
, rm
);
857 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
858 if (!(insn
& (1 << 23)))
859 tcg_gen_sub_i32(var
, var
, offset
);
861 tcg_gen_add_i32(var
, var
, offset
);
866 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
872 if (insn
& (1 << 22)) {
874 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
875 if (!(insn
& (1 << 23)))
879 tcg_gen_addi_i32(var
, var
, val
);
883 tcg_gen_addi_i32(var
, var
, extra
);
885 offset
= load_reg(s
, rm
);
886 if (!(insn
& (1 << 23)))
887 tcg_gen_sub_i32(var
, var
, offset
);
889 tcg_gen_add_i32(var
, var
, offset
);
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
910 static inline void gen_vfp_abs(int dp
)
913 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
915 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
918 static inline void gen_vfp_neg(int dp
)
921 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
923 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
926 static inline void gen_vfp_sqrt(int dp
)
929 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
931 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
934 static inline void gen_vfp_cmp(int dp
)
937 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
939 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
942 static inline void gen_vfp_cmpe(int dp
)
945 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
947 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
950 static inline void gen_vfp_F1_ld0(int dp
)
953 tcg_gen_movi_i64(cpu_F1d
, 0);
955 tcg_gen_movi_i32(cpu_F1s
, 0);
958 static inline void gen_vfp_uito(int dp
)
961 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
963 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
966 static inline void gen_vfp_sito(int dp
)
969 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
971 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_toui(int dp
)
977 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
979 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
982 static inline void gen_vfp_touiz(int dp
)
985 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
987 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
990 static inline void gen_vfp_tosi(int dp
)
993 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
995 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
998 static inline void gen_vfp_tosiz(int dp
)
1001 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1003 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1026 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1029 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1031 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1034 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1037 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1039 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1043 vfp_reg_offset (int dp
, int reg
)
1046 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1048 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1049 + offsetof(CPU_DoubleU
, l
.upper
);
1051 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1052 + offsetof(CPU_DoubleU
, l
.lower
);
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1059 neon_reg_offset (int reg
, int n
)
1063 return vfp_reg_offset(0, sreg
);
1066 static TCGv
neon_load_reg(int reg
, int pass
)
1068 TCGv tmp
= new_tmp();
1069 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1073 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1075 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1079 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1081 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1084 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1086 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1094 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1097 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1099 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1102 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1105 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1107 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1110 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1113 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1115 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1118 #define ARM_CP_RW_BIT (1 << 20)
1120 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1122 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1125 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1127 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1130 static inline TCGv
iwmmxt_load_creg(int reg
)
1132 TCGv var
= new_tmp();
1133 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1137 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1139 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1145 iwmmxt_store_reg(cpu_M0
, rn
);
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1150 iwmmxt_load_reg(cpu_M0
, rn
);
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1155 iwmmxt_load_reg(cpu_V1
, rn
);
1156 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1161 iwmmxt_load_reg(cpu_V1
, rn
);
1162 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1167 iwmmxt_load_reg(cpu_V1
, rn
);
1168 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1207 IWMMXT_OP_ENV_SIZE(unpackl
)
1208 IWMMXT_OP_ENV_SIZE(unpackh
)
1210 IWMMXT_OP_ENV1(unpacklub
)
1211 IWMMXT_OP_ENV1(unpackluw
)
1212 IWMMXT_OP_ENV1(unpacklul
)
1213 IWMMXT_OP_ENV1(unpackhub
)
1214 IWMMXT_OP_ENV1(unpackhuw
)
1215 IWMMXT_OP_ENV1(unpackhul
)
1216 IWMMXT_OP_ENV1(unpacklsb
)
1217 IWMMXT_OP_ENV1(unpacklsw
)
1218 IWMMXT_OP_ENV1(unpacklsl
)
1219 IWMMXT_OP_ENV1(unpackhsb
)
1220 IWMMXT_OP_ENV1(unpackhsw
)
1221 IWMMXT_OP_ENV1(unpackhsl
)
1223 IWMMXT_OP_ENV_SIZE(cmpeq
)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1225 IWMMXT_OP_ENV_SIZE(cmpgts
)
1227 IWMMXT_OP_ENV_SIZE(mins
)
1228 IWMMXT_OP_ENV_SIZE(minu
)
1229 IWMMXT_OP_ENV_SIZE(maxs
)
1230 IWMMXT_OP_ENV_SIZE(maxu
)
1232 IWMMXT_OP_ENV_SIZE(subn
)
1233 IWMMXT_OP_ENV_SIZE(addn
)
1234 IWMMXT_OP_ENV_SIZE(subu
)
1235 IWMMXT_OP_ENV_SIZE(addu
)
1236 IWMMXT_OP_ENV_SIZE(subs
)
1237 IWMMXT_OP_ENV_SIZE(adds
)
1239 IWMMXT_OP_ENV(avgb0
)
1240 IWMMXT_OP_ENV(avgb1
)
1241 IWMMXT_OP_ENV(avgw0
)
1242 IWMMXT_OP_ENV(avgw1
)
1246 IWMMXT_OP_ENV(packuw
)
1247 IWMMXT_OP_ENV(packul
)
1248 IWMMXT_OP_ENV(packuq
)
1249 IWMMXT_OP_ENV(packsw
)
1250 IWMMXT_OP_ENV(packsl
)
1251 IWMMXT_OP_ENV(packsq
)
1253 static void gen_op_iwmmxt_set_mup(void)
1256 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1257 tcg_gen_ori_i32(tmp
, tmp
, 2);
1258 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1261 static void gen_op_iwmmxt_set_cup(void)
1264 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1265 tcg_gen_ori_i32(tmp
, tmp
, 1);
1266 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1271 TCGv tmp
= new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1273 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1278 iwmmxt_load_reg(cpu_V1
, rn
);
1279 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1280 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1283 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1289 rd
= (insn
>> 16) & 0xf;
1290 tmp
= load_reg(s
, rd
);
1292 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1293 if (insn
& (1 << 24)) {
1295 if (insn
& (1 << 23))
1296 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1298 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1299 tcg_gen_mov_i32(dest
, tmp
);
1300 if (insn
& (1 << 21))
1301 store_reg(s
, rd
, tmp
);
1304 } else if (insn
& (1 << 21)) {
1306 tcg_gen_mov_i32(dest
, tmp
);
1307 if (insn
& (1 << 23))
1308 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1310 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1311 store_reg(s
, rd
, tmp
);
1312 } else if (!(insn
& (1 << 23)))
1317 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1319 int rd
= (insn
>> 0) & 0xf;
1322 if (insn
& (1 << 8)) {
1323 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1326 tmp
= iwmmxt_load_creg(rd
);
1330 iwmmxt_load_reg(cpu_V0
, rd
);
1331 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1333 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1334 tcg_gen_mov_i32(dest
, tmp
);
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1344 int rdhi
, rdlo
, rd0
, rd1
, i
;
1346 TCGv tmp
, tmp2
, tmp3
;
1348 if ((insn
& 0x0e000e00) == 0x0c000000) {
1349 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1351 rdlo
= (insn
>> 12) & 0xf;
1352 rdhi
= (insn
>> 16) & 0xf;
1353 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0
, wrd
);
1355 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1356 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1360 iwmmxt_store_reg(cpu_V0
, wrd
);
1361 gen_op_iwmmxt_set_mup();
1366 wrd
= (insn
>> 12) & 0xf;
1368 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1372 if (insn
& ARM_CP_RW_BIT
) {
1373 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1375 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1376 iwmmxt_store_creg(wrd
, tmp
);
1379 if (insn
& (1 << 8)) {
1380 if (insn
& (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1383 } else { /* WLDRW wRd */
1384 tmp
= gen_ld32(addr
, IS_USER(s
));
1387 if (insn
& (1 << 22)) { /* WLDRH */
1388 tmp
= gen_ld16u(addr
, IS_USER(s
));
1389 } else { /* WLDRB */
1390 tmp
= gen_ld8u(addr
, IS_USER(s
));
1394 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1397 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1400 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1401 tmp
= iwmmxt_load_creg(wrd
);
1402 gen_st32(tmp
, addr
, IS_USER(s
));
1404 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1406 if (insn
& (1 << 8)) {
1407 if (insn
& (1 << 22)) { /* WSTRD */
1409 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1412 gen_st32(tmp
, addr
, IS_USER(s
));
1415 if (insn
& (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1417 gen_st16(tmp
, addr
, IS_USER(s
));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1420 gen_st8(tmp
, addr
, IS_USER(s
));
1429 if ((insn
& 0x0f000000) != 0x0e000000)
1432 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd
= (insn
>> 12) & 0xf;
1435 rd0
= (insn
>> 0) & 0xf;
1436 rd1
= (insn
>> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1444 case 0x011: /* TMCR */
1447 rd
= (insn
>> 12) & 0xf;
1448 wrd
= (insn
>> 16) & 0xf;
1450 case ARM_IWMMXT_wCID
:
1451 case ARM_IWMMXT_wCASF
:
1453 case ARM_IWMMXT_wCon
:
1454 gen_op_iwmmxt_set_cup();
1456 case ARM_IWMMXT_wCSSF
:
1457 tmp
= iwmmxt_load_creg(wrd
);
1458 tmp2
= load_reg(s
, rd
);
1459 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1461 iwmmxt_store_creg(wrd
, tmp
);
1463 case ARM_IWMMXT_wCGR0
:
1464 case ARM_IWMMXT_wCGR1
:
1465 case ARM_IWMMXT_wCGR2
:
1466 case ARM_IWMMXT_wCGR3
:
1467 gen_op_iwmmxt_set_cup();
1468 tmp
= load_reg(s
, rd
);
1469 iwmmxt_store_creg(wrd
, tmp
);
1475 case 0x100: /* WXOR */
1476 wrd
= (insn
>> 12) & 0xf;
1477 rd0
= (insn
>> 0) & 0xf;
1478 rd1
= (insn
>> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1486 case 0x111: /* TMRC */
1489 rd
= (insn
>> 12) & 0xf;
1490 wrd
= (insn
>> 16) & 0xf;
1491 tmp
= iwmmxt_load_creg(wrd
);
1492 store_reg(s
, rd
, tmp
);
1494 case 0x300: /* WANDN */
1495 wrd
= (insn
>> 12) & 0xf;
1496 rd0
= (insn
>> 0) & 0xf;
1497 rd1
= (insn
>> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1499 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1506 case 0x200: /* WAND */
1507 wrd
= (insn
>> 12) & 0xf;
1508 rd0
= (insn
>> 0) & 0xf;
1509 rd1
= (insn
>> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd
= (insn
>> 12) & 0xf;
1519 rd0
= (insn
>> 0) & 0xf;
1520 rd1
= (insn
>> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1522 if (insn
& (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1527 gen_op_iwmmxt_set_mup();
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd
= (insn
>> 12) & 0xf;
1531 rd0
= (insn
>> 16) & 0xf;
1532 rd1
= (insn
>> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1534 switch ((insn
>> 22) & 3) {
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1547 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd
= (insn
>> 12) & 0xf;
1553 rd0
= (insn
>> 16) & 0xf;
1554 rd1
= (insn
>> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1556 switch ((insn
>> 22) & 3) {
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1569 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd
= (insn
>> 12) & 0xf;
1575 rd0
= (insn
>> 16) & 0xf;
1576 rd1
= (insn
>> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1578 if (insn
& (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1582 if (!(insn
& (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1585 gen_op_iwmmxt_set_mup();
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd
= (insn
>> 12) & 0xf;
1589 rd0
= (insn
>> 16) & 0xf;
1590 rd1
= (insn
>> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1592 if (insn
& (1 << 21)) {
1593 if (insn
& (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1598 if (insn
& (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1603 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1604 gen_op_iwmmxt_set_mup();
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd
= (insn
>> 12) & 0xf;
1608 rd0
= (insn
>> 16) & 0xf;
1609 rd1
= (insn
>> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1611 if (insn
& (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1615 if (!(insn
& (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1
, wrd
);
1617 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1619 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1620 gen_op_iwmmxt_set_mup();
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd
= (insn
>> 12) & 0xf;
1624 rd0
= (insn
>> 16) & 0xf;
1625 rd1
= (insn
>> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1627 switch ((insn
>> 22) & 3) {
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1640 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd
= (insn
>> 12) & 0xf;
1646 rd0
= (insn
>> 16) & 0xf;
1647 rd1
= (insn
>> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1649 if (insn
& (1 << 22)) {
1650 if (insn
& (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1655 if (insn
& (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1660 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd
= (insn
>> 12) & 0xf;
1666 rd0
= (insn
>> 16) & 0xf;
1667 rd1
= (insn
>> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1669 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1670 tcg_gen_andi_i32(tmp
, tmp
, 7);
1671 iwmmxt_load_reg(cpu_V1
, rd1
);
1672 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1675 gen_op_iwmmxt_set_mup();
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn
>> 6) & 3) == 3)
1680 rd
= (insn
>> 12) & 0xf;
1681 wrd
= (insn
>> 16) & 0xf;
1682 tmp
= load_reg(s
, rd
);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1684 switch ((insn
>> 6) & 3) {
1686 tmp2
= tcg_const_i32(0xff);
1687 tmp3
= tcg_const_i32((insn
& 7) << 3);
1690 tmp2
= tcg_const_i32(0xffff);
1691 tmp3
= tcg_const_i32((insn
& 3) << 4);
1694 tmp2
= tcg_const_i32(0xffffffff);
1695 tmp3
= tcg_const_i32((insn
& 1) << 5);
1701 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1702 tcg_temp_free(tmp3
);
1703 tcg_temp_free(tmp2
);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1706 gen_op_iwmmxt_set_mup();
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd
= (insn
>> 12) & 0xf;
1710 wrd
= (insn
>> 16) & 0xf;
1711 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1713 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1715 switch ((insn
>> 22) & 3) {
1717 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1720 tcg_gen_ext8s_i32(tmp
, tmp
);
1722 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1726 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1729 tcg_gen_ext16s_i32(tmp
, tmp
);
1731 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1735 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1739 store_reg(s
, rd
, tmp
);
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1744 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1745 switch ((insn
>> 22) & 3) {
1747 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1750 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1753 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1756 tcg_gen_shli_i32(tmp
, tmp
, 28);
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn
>> 6) & 3) == 3)
1763 rd
= (insn
>> 12) & 0xf;
1764 wrd
= (insn
>> 16) & 0xf;
1765 tmp
= load_reg(s
, rd
);
1766 switch ((insn
>> 6) & 3) {
1768 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1771 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1774 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1779 gen_op_iwmmxt_set_mup();
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1784 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1786 tcg_gen_mov_i32(tmp2
, tmp
);
1787 switch ((insn
>> 22) & 3) {
1789 for (i
= 0; i
< 7; i
++) {
1790 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1791 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1795 for (i
= 0; i
< 3; i
++) {
1796 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1797 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1801 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1802 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd
= (insn
>> 12) & 0xf;
1811 rd0
= (insn
>> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1813 switch ((insn
>> 22) & 3) {
1815 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1818 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1821 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1826 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1827 gen_op_iwmmxt_set_mup();
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1832 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1834 tcg_gen_mov_i32(tmp2
, tmp
);
1835 switch ((insn
>> 22) & 3) {
1837 for (i
= 0; i
< 7; i
++) {
1838 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1839 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1843 for (i
= 0; i
< 3; i
++) {
1844 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1845 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1849 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1850 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd
= (insn
>> 12) & 0xf;
1859 rd0
= (insn
>> 16) & 0xf;
1860 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1862 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1864 switch ((insn
>> 22) & 3) {
1866 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1869 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1872 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1875 store_reg(s
, rd
, tmp
);
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd
= (insn
>> 12) & 0xf;
1880 rd0
= (insn
>> 16) & 0xf;
1881 rd1
= (insn
>> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1883 switch ((insn
>> 22) & 3) {
1885 if (insn
& (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1891 if (insn
& (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1897 if (insn
& (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd
= (insn
>> 12) & 0xf;
1912 rd0
= (insn
>> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1914 switch ((insn
>> 22) & 3) {
1916 if (insn
& (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1919 gen_op_iwmmxt_unpacklub_M0();
1922 if (insn
& (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1925 gen_op_iwmmxt_unpackluw_M0();
1928 if (insn
& (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1931 gen_op_iwmmxt_unpacklul_M0();
1936 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd
= (insn
>> 12) & 0xf;
1943 rd0
= (insn
>> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1945 switch ((insn
>> 22) & 3) {
1947 if (insn
& (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1950 gen_op_iwmmxt_unpackhub_M0();
1953 if (insn
& (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1956 gen_op_iwmmxt_unpackhuw_M0();
1959 if (insn
& (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1962 gen_op_iwmmxt_unpackhul_M0();
1967 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn
>> 22) & 3) == 0)
1975 wrd
= (insn
>> 12) & 0xf;
1976 rd0
= (insn
>> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1979 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1983 switch ((insn
>> 22) & 3) {
1985 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1988 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1991 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn
>> 22) & 3) == 0)
2003 wrd
= (insn
>> 12) & 0xf;
2004 rd0
= (insn
>> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2011 switch ((insn
>> 22) & 3) {
2013 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2016 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2019 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn
>> 22) & 3) == 0)
2031 wrd
= (insn
>> 12) & 0xf;
2032 rd0
= (insn
>> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2035 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2039 switch ((insn
>> 22) & 3) {
2041 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2044 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2047 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn
>> 22) & 3) == 0)
2059 wrd
= (insn
>> 12) & 0xf;
2060 rd0
= (insn
>> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 switch ((insn
>> 22) & 3) {
2065 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2069 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2072 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2076 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2083 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd
= (insn
>> 12) & 0xf;
2094 rd0
= (insn
>> 16) & 0xf;
2095 rd1
= (insn
>> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2097 switch ((insn
>> 22) & 3) {
2099 if (insn
& (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2102 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2105 if (insn
& (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2111 if (insn
& (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2114 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2119 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2120 gen_op_iwmmxt_set_mup();
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd
= (insn
>> 12) & 0xf;
2125 rd0
= (insn
>> 16) & 0xf;
2126 rd1
= (insn
>> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2128 switch ((insn
>> 22) & 3) {
2130 if (insn
& (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2136 if (insn
& (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2142 if (insn
& (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2150 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2151 gen_op_iwmmxt_set_mup();
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd
= (insn
>> 12) & 0xf;
2156 rd0
= (insn
>> 16) & 0xf;
2157 rd1
= (insn
>> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2159 tmp
= tcg_const_i32((insn
>> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1
, rd1
);
2161 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2164 gen_op_iwmmxt_set_mup();
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd
= (insn
>> 12) & 0xf;
2171 rd0
= (insn
>> 16) & 0xf;
2172 rd1
= (insn
>> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2174 switch ((insn
>> 20) & 0xf) {
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2179 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2197 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd
= (insn
>> 12) & 0xf;
2214 rd0
= (insn
>> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2216 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd
= (insn
>> 12) & 0xf;
2228 rd0
= (insn
>> 16) & 0xf;
2229 rd1
= (insn
>> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2231 switch ((insn
>> 20) & 0xf) {
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2236 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2254 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2262 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2272 wrd
= (insn
>> 12) & 0xf;
2273 rd0
= (insn
>> 16) & 0xf;
2274 rd1
= (insn
>> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2276 switch ((insn
>> 22) & 3) {
2278 if (insn
& (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2284 if (insn
& (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2287 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2290 if (insn
& (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2296 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd
= (insn
>> 5) & 0xf;
2305 rd0
= (insn
>> 12) & 0xf;
2306 rd1
= (insn
>> 0) & 0xf;
2307 if (rd0
== 0xf || rd1
== 0xf)
2309 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2310 tmp
= load_reg(s
, rd0
);
2311 tmp2
= load_reg(s
, rd1
);
2312 switch ((insn
>> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn
& (1 << 16))
2321 tcg_gen_shri_i32(tmp
, tmp
, 16);
2322 if (insn
& (1 << 17))
2323 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2334 gen_op_iwmmxt_set_mup();
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2347 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2350 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0
= (insn
>> 12) & 0xf;
2354 acc
= (insn
>> 5) & 7;
2359 tmp
= load_reg(s
, rd0
);
2360 tmp2
= load_reg(s
, rd1
);
2361 switch ((insn
>> 16) & 0xf) {
2363 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn
& (1 << 16))
2373 tcg_gen_shri_i32(tmp
, tmp
, 16);
2374 if (insn
& (1 << 17))
2375 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2384 gen_op_iwmmxt_movq_wRn_M0(acc
);
2388 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi
= (insn
>> 16) & 0xf;
2391 rdlo
= (insn
>> 12) & 0xf;
2397 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0
, acc
);
2399 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2400 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2402 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2404 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2405 iwmmxt_store_reg(cpu_V0
, acc
);
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2418 uint32_t rd
= (insn
>> 12) & 0xf;
2419 uint32_t cp
= (insn
>> 8) & 0xf;
2424 if (insn
& ARM_CP_RW_BIT
) {
2425 if (!env
->cp
[cp
].cp_read
)
2427 gen_set_pc_im(s
->pc
);
2429 tmp2
= tcg_const_i32(insn
);
2430 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2431 tcg_temp_free(tmp2
);
2432 store_reg(s
, rd
, tmp
);
2434 if (!env
->cp
[cp
].cp_write
)
2436 gen_set_pc_im(s
->pc
);
2437 tmp
= load_reg(s
, rd
);
2438 tmp2
= tcg_const_i32(insn
);
2439 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2440 tcg_temp_free(tmp2
);
2446 static int cp15_user_ok(uint32_t insn
)
2448 int cpn
= (insn
>> 16) & 0xf;
2449 int cpm
= insn
& 0xf;
2450 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2452 if (cpn
== 13 && cpm
== 0) {
2454 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2458 /* ISB, DSB, DMB. */
2459 if ((cpm
== 5 && op
== 4)
2460 || (cpm
== 10 && (op
== 4 || op
== 5)))
2466 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2469 int cpn
= (insn
>> 16) & 0xf;
2470 int cpm
= insn
& 0xf;
2471 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2473 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2476 if (!(cpn
== 13 && cpm
== 0))
2479 if (insn
& ARM_CP_RW_BIT
) {
2482 tmp
= load_cpu_field(cp15
.c13_tls1
);
2485 tmp
= load_cpu_field(cp15
.c13_tls2
);
2488 tmp
= load_cpu_field(cp15
.c13_tls3
);
2493 store_reg(s
, rd
, tmp
);
2496 tmp
= load_reg(s
, rd
);
2499 store_cpu_field(tmp
, cp15
.c13_tls1
);
2502 store_cpu_field(tmp
, cp15
.c13_tls2
);
2505 store_cpu_field(tmp
, cp15
.c13_tls3
);
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env
, ARM_FEATURE_M
))
2526 if ((insn
& (1 << 25)) == 0) {
2527 if (insn
& (1 << 20)) {
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2534 if ((insn
& (1 << 4)) == 0) {
2538 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2541 if ((insn
& 0x0fff0fff) == 0x0e070f90
2542 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s
->pc
);
2545 s
->is_jmp
= DISAS_WFI
;
2548 rd
= (insn
>> 12) & 0xf;
2550 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2553 tmp2
= tcg_const_i32(insn
);
2554 if (insn
& ARM_CP_RW_BIT
) {
2556 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2557 /* If the destination register is r15 then sets condition codes. */
2559 store_reg(s
, rd
, tmp
);
2563 tmp
= load_reg(s
, rd
);
2564 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2570 (insn
& 0x0fff0fff) != 0x0e010f10)
2573 tcg_temp_free_i32(tmp2
);
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2585 if (insn & (1 << (smallbit))) \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2597 /* Move between integer and VFP cores. */
2598 static TCGv
gen_vfp_mrs(void)
2600 TCGv tmp
= new_tmp();
2601 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2605 static void gen_vfp_msr(TCGv tmp
)
2607 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2611 static void gen_neon_dup_u8(TCGv var
, int shift
)
2613 TCGv tmp
= new_tmp();
2615 tcg_gen_shri_i32(var
, var
, shift
);
2616 tcg_gen_ext8u_i32(var
, var
);
2617 tcg_gen_shli_i32(tmp
, var
, 8);
2618 tcg_gen_or_i32(var
, var
, tmp
);
2619 tcg_gen_shli_i32(tmp
, var
, 16);
2620 tcg_gen_or_i32(var
, var
, tmp
);
2624 static void gen_neon_dup_low16(TCGv var
)
2626 TCGv tmp
= new_tmp();
2627 tcg_gen_ext16u_i32(var
, var
);
2628 tcg_gen_shli_i32(tmp
, var
, 16);
2629 tcg_gen_or_i32(var
, var
, tmp
);
2633 static void gen_neon_dup_high16(TCGv var
)
2635 TCGv tmp
= new_tmp();
2636 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2637 tcg_gen_shri_i32(tmp
, var
, 16);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2646 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2652 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2655 if (!s
->vfp_enabled
) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2659 rn
= (insn
>> 16) & 0xf;
2660 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2661 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2664 dp
= ((insn
& 0xf00) == 0xb00);
2665 switch ((insn
>> 24) & 0xf) {
2667 if (insn
& (1 << 4)) {
2668 /* single register transfer */
2669 rd
= (insn
>> 12) & 0xf;
2674 VFP_DREG_N(rn
, insn
);
2677 if (insn
& 0x00c00060
2678 && !arm_feature(env
, ARM_FEATURE_NEON
))
2681 pass
= (insn
>> 21) & 1;
2682 if (insn
& (1 << 22)) {
2684 offset
= ((insn
>> 5) & 3) * 8;
2685 } else if (insn
& (1 << 5)) {
2687 offset
= (insn
& (1 << 6)) ? 16 : 0;
2692 if (insn
& ARM_CP_RW_BIT
) {
2694 tmp
= neon_load_reg(rn
, pass
);
2698 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2699 if (insn
& (1 << 23))
2705 if (insn
& (1 << 23)) {
2707 tcg_gen_shri_i32(tmp
, tmp
, 16);
2713 tcg_gen_sari_i32(tmp
, tmp
, 16);
2722 store_reg(s
, rd
, tmp
);
2725 tmp
= load_reg(s
, rd
);
2726 if (insn
& (1 << 23)) {
2729 gen_neon_dup_u8(tmp
, 0);
2730 } else if (size
== 1) {
2731 gen_neon_dup_low16(tmp
);
2733 for (n
= 0; n
<= pass
* 2; n
++) {
2735 tcg_gen_mov_i32(tmp2
, tmp
);
2736 neon_store_reg(rn
, n
, tmp2
);
2738 neon_store_reg(rn
, n
, tmp
);
2743 tmp2
= neon_load_reg(rn
, pass
);
2744 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2748 tmp2
= neon_load_reg(rn
, pass
);
2749 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2755 neon_store_reg(rn
, pass
, tmp
);
2759 if ((insn
& 0x6f) != 0x00)
2761 rn
= VFP_SREG_N(insn
);
2762 if (insn
& ARM_CP_RW_BIT
) {
2764 if (insn
& (1 << 21)) {
2765 /* system register */
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2774 && arm_feature(env
, ARM_FEATURE_VFP3
))
2776 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2781 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2783 case ARM_VFP_FPINST
:
2784 case ARM_VFP_FPINST2
:
2785 /* Not present in VFP3. */
2787 || arm_feature(env
, ARM_FEATURE_VFP3
))
2789 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2793 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2794 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2797 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2803 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2805 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2811 gen_mov_F0_vreg(0, rn
);
2812 tmp
= gen_vfp_mrs();
2815 /* Set the 4 flag bits in the CPSR. */
2819 store_reg(s
, rd
, tmp
);
2823 tmp
= load_reg(s
, rd
);
2824 if (insn
& (1 << 21)) {
2826 /* system register */
2831 /* Writes are ignored. */
2834 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2844 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2847 case ARM_VFP_FPINST
:
2848 case ARM_VFP_FPINST2
:
2849 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2856 gen_mov_vreg_F0(0, rn
);
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2867 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2869 /* rn is register number */
2870 VFP_DREG_N(rn
, insn
);
2873 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd
= VFP_SREG_D(insn
);
2877 VFP_DREG_D(rd
, insn
);
2880 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2884 rm
= VFP_SREG_M(insn
);
2886 VFP_DREG_M(rm
, insn
);
2889 rn
= VFP_SREG_N(insn
);
2890 if (op
== 15 && rn
== 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd
, insn
);
2894 rd
= VFP_SREG_D(insn
);
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2899 rm
= VFP_SREG_M(insn
);
2902 veclen
= s
->vec_len
;
2903 if (op
== 15 && rn
> 3)
2906 /* Shut up compiler warnings. */
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd
& bank_mask
) == 0) {
2923 delta_d
= (s
->vec_stride
>> 1) + 1;
2925 delta_d
= s
->vec_stride
+ 1;
2927 if ((rm
& bank_mask
) == 0) {
2928 /* mixed scalar/vector */
2937 /* Load the initial operands. */
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm
);
2948 gen_mov_F0_vreg(dp
, rd
);
2949 gen_mov_F1_vreg(dp
, rm
);
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp
, rd
);
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp
, rd
);
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp
, rm
);
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp
, rn
);
2976 gen_mov_F1_vreg(dp
, rm
);
2980 /* Perform the calculation. */
2982 case 0: /* mac: fd + (fn * fm) */
2984 gen_mov_F1_vreg(dp
, rd
);
2987 case 1: /* nmac: fd - (fn * fm) */
2990 gen_mov_F1_vreg(dp
, rd
);
2993 case 2: /* msc: -fd + (fn * fm) */
2995 gen_mov_F1_vreg(dp
, rd
);
2998 case 3: /* nmsc: -fd - (fn * fm) */
3001 gen_mov_F1_vreg(dp
, rd
);
3004 case 4: /* mul: fn * fm */
3007 case 5: /* nmul: -(fn * fm) */
3011 case 6: /* add: fn + fm */
3014 case 7: /* sub: fn - fm */
3017 case 8: /* div: fn / fm */
3020 case 14: /* fconst */
3021 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3024 n
= (insn
<< 12) & 0x80000000;
3025 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3032 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3039 tcg_gen_movi_i32(cpu_F0s
, n
);
3042 case 15: /* extension space */
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3059 tmp
= gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp
, tmp
);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3067 tmp
= gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp
, tmp
, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3077 gen_mov_F0_vreg(0, rd
);
3078 tmp2
= gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3080 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3089 tcg_gen_shli_i32(tmp
, tmp
, 16);
3090 gen_mov_F0_vreg(0, rd
);
3091 tmp2
= gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3093 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3106 case 11: /* cmpez */
3110 case 15: /* single<->double conversion */
3112 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3114 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3116 case 16: /* fuito */
3119 case 17: /* fsito */
3122 case 20: /* fshto */
3123 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3125 gen_vfp_shto(dp
, 16 - rm
);
3127 case 21: /* fslto */
3128 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3130 gen_vfp_slto(dp
, 32 - rm
);
3132 case 22: /* fuhto */
3133 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3135 gen_vfp_uhto(dp
, 16 - rm
);
3137 case 23: /* fulto */
3138 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3140 gen_vfp_ulto(dp
, 32 - rm
);
3142 case 24: /* ftoui */
3145 case 25: /* ftouiz */
3148 case 26: /* ftosi */
3151 case 27: /* ftosiz */
3154 case 28: /* ftosh */
3155 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3157 gen_vfp_tosh(dp
, 16 - rm
);
3159 case 29: /* ftosl */
3160 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3162 gen_vfp_tosl(dp
, 32 - rm
);
3164 case 30: /* ftouh */
3165 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3167 gen_vfp_touh(dp
, 16 - rm
);
3169 case 31: /* ftoul */
3170 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3172 gen_vfp_toul(dp
, 32 - rm
);
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn
);
3179 default: /* undefined */
3180 printf ("op:%d\n", op
);
3184 /* Write back the result. */
3185 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd
);
3190 else if (op
== 15 && rn
== 15)
3192 gen_mov_vreg_F0(!dp
, rd
);
3194 gen_mov_vreg_F0(dp
, rd
);
3196 /* break out of the loop if we have finished */
3200 if (op
== 15 && delta_m
== 0) {
3201 /* single source one-many */
3203 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3205 gen_mov_vreg_F0(dp
, rd
);
3209 /* Setup the next operands. */
3211 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3215 /* One source operand. */
3216 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3218 gen_mov_F0_vreg(dp
, rm
);
3220 /* Two source operands. */
3221 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3223 gen_mov_F0_vreg(dp
, rn
);
3225 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3227 gen_mov_F1_vreg(dp
, rm
);
3235 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn
= (insn
>> 16) & 0xf;
3238 rd
= (insn
>> 12) & 0xf;
3240 VFP_DREG_M(rm
, insn
);
3242 rm
= VFP_SREG_M(insn
);
3245 if (insn
& ARM_CP_RW_BIT
) {
3248 gen_mov_F0_vreg(0, rm
* 2);
3249 tmp
= gen_vfp_mrs();
3250 store_reg(s
, rd
, tmp
);
3251 gen_mov_F0_vreg(0, rm
* 2 + 1);
3252 tmp
= gen_vfp_mrs();
3253 store_reg(s
, rn
, tmp
);
3255 gen_mov_F0_vreg(0, rm
);
3256 tmp
= gen_vfp_mrs();
3257 store_reg(s
, rn
, tmp
);
3258 gen_mov_F0_vreg(0, rm
+ 1);
3259 tmp
= gen_vfp_mrs();
3260 store_reg(s
, rd
, tmp
);
3265 tmp
= load_reg(s
, rd
);
3267 gen_mov_vreg_F0(0, rm
* 2);
3268 tmp
= load_reg(s
, rn
);
3270 gen_mov_vreg_F0(0, rm
* 2 + 1);
3272 tmp
= load_reg(s
, rn
);
3274 gen_mov_vreg_F0(0, rm
);
3275 tmp
= load_reg(s
, rd
);
3277 gen_mov_vreg_F0(0, rm
+ 1);
3282 rn
= (insn
>> 16) & 0xf;
3284 VFP_DREG_D(rd
, insn
);
3286 rd
= VFP_SREG_D(insn
);
3287 if (s
->thumb
&& rn
== 15) {
3289 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3291 addr
= load_reg(s
, rn
);
3293 if ((insn
& 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset
= (insn
& 0xff) << 2;
3296 if ((insn
& (1 << 23)) == 0)
3298 tcg_gen_addi_i32(addr
, addr
, offset
);
3299 if (insn
& (1 << 20)) {
3300 gen_vfp_ld(s
, dp
, addr
);
3301 gen_mov_vreg_F0(dp
, rd
);
3303 gen_mov_F0_vreg(dp
, rd
);
3304 gen_vfp_st(s
, dp
, addr
);
3308 /* load/store multiple */
3310 n
= (insn
>> 1) & 0x7f;
3314 if (insn
& (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3321 for (i
= 0; i
< n
; i
++) {
3322 if (insn
& ARM_CP_RW_BIT
) {
3324 gen_vfp_ld(s
, dp
, addr
);
3325 gen_mov_vreg_F0(dp
, rd
+ i
);
3328 gen_mov_F0_vreg(dp
, rd
+ i
);
3329 gen_vfp_st(s
, dp
, addr
);
3331 tcg_gen_addi_i32(addr
, addr
, offset
);
3333 if (insn
& (1 << 21)) {
3335 if (insn
& (1 << 24))
3336 offset
= -offset
* n
;
3337 else if (dp
&& (insn
& 1))
3343 tcg_gen_addi_i32(addr
, addr
, offset
);
3344 store_reg(s
, rn
, addr
);
3352 /* Should never happen. */
3358 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3360 TranslationBlock
*tb
;
3363 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3365 gen_set_pc_im(dest
);
3366 tcg_gen_exit_tb((long)tb
+ n
);
3368 gen_set_pc_im(dest
);
3373 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3375 if (unlikely(s
->singlestep_enabled
)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3381 gen_goto_tb(s
, 0, dest
);
3382 s
->is_jmp
= DISAS_TB_JUMP
;
3386 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3389 tcg_gen_sari_i32(t0
, t0
, 16);
3393 tcg_gen_sari_i32(t1
, t1
, 16);
3396 tcg_gen_mul_i32(t0
, t0
, t1
);
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3404 if (flags
& (1 << 0))
3406 if (flags
& (1 << 1))
3408 if (flags
& (1 << 2))
3410 if (flags
& (1 << 3))
3413 /* Mask out undefined bits. */
3414 mask
&= ~CPSR_RESERVED
;
3415 if (!arm_feature(env
, ARM_FEATURE_V6
))
3416 mask
&= ~(CPSR_E
| CPSR_GE
);
3417 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3419 /* Mask out execution state bits. */
3422 /* Mask out privileged bits. */
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3433 /* ??? This is also undefined in system mode. */
3437 tmp
= load_cpu_field(spsr
);
3438 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3439 tcg_gen_andi_i32(t0
, t0
, mask
);
3440 tcg_gen_or_i32(tmp
, tmp
, t0
);
3441 store_cpu_field(tmp
, spsr
);
3443 gen_set_cpsr(t0
, mask
);
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3455 tcg_gen_movi_i32(tmp
, val
);
3456 return gen_set_psr(s
, mask
, spsr
, tmp
);
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3463 store_reg(s
, 15, pc
);
3464 tmp
= load_cpu_field(spsr
);
3465 gen_set_cpsr(tmp
, 0xffffffff);
3467 s
->is_jmp
= DISAS_UPDATE
;
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3473 gen_set_cpsr(cpsr
, 0xffffffff);
3475 store_reg(s
, 15, pc
);
3476 s
->is_jmp
= DISAS_UPDATE
;
3480 gen_set_condexec (DisasContext
*s
)
3482 if (s
->condexec_mask
) {
3483 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3484 TCGv tmp
= new_tmp();
3485 tcg_gen_movi_i32(tmp
, val
);
3486 store_cpu_field(tmp
, condexec_bits
);
3490 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3492 gen_set_condexec(s
);
3493 gen_set_pc_im(s
->pc
- offset
);
3494 gen_exception(excp
);
3495 s
->is_jmp
= DISAS_JUMP
;
3498 static void gen_nop_hint(DisasContext
*s
, int val
)
3502 gen_set_pc_im(s
->pc
);
3503 s
->is_jmp
= DISAS_WFI
;
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3515 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3518 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3519 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3520 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3526 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3529 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3530 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3531 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3562 default: return 1; \
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3585 default: return 1; \
3588 static TCGv
neon_load_scratch(int scratch
)
3590 TCGv tmp
= new_tmp();
3591 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3595 static void neon_store_scratch(int scratch
, TCGv var
)
3597 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3601 static inline TCGv
neon_get_scalar(int size
, int reg
)
3605 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3607 gen_neon_dup_high16(tmp
);
3609 gen_neon_dup_low16(tmp
);
3612 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3617 static void gen_neon_unzip_u8(TCGv t0
, TCGv t1
)
3625 tcg_gen_andi_i32(rd
, t0
, 0xff);
3626 tcg_gen_shri_i32(tmp
, t0
, 8);
3627 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3628 tcg_gen_or_i32(rd
, rd
, tmp
);
3629 tcg_gen_shli_i32(tmp
, t1
, 16);
3630 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3631 tcg_gen_or_i32(rd
, rd
, tmp
);
3632 tcg_gen_shli_i32(tmp
, t1
, 8);
3633 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3634 tcg_gen_or_i32(rd
, rd
, tmp
);
3636 tcg_gen_shri_i32(rm
, t0
, 8);
3637 tcg_gen_andi_i32(rm
, rm
, 0xff);
3638 tcg_gen_shri_i32(tmp
, t0
, 16);
3639 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3640 tcg_gen_or_i32(rm
, rm
, tmp
);
3641 tcg_gen_shli_i32(tmp
, t1
, 8);
3642 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3643 tcg_gen_or_i32(rm
, rm
, tmp
);
3644 tcg_gen_andi_i32(tmp
, t1
, 0xff000000);
3645 tcg_gen_or_i32(t1
, rm
, tmp
);
3646 tcg_gen_mov_i32(t0
, rd
);
3653 static void gen_neon_zip_u8(TCGv t0
, TCGv t1
)
3661 tcg_gen_andi_i32(rd
, t0
, 0xff);
3662 tcg_gen_shli_i32(tmp
, t1
, 8);
3663 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3664 tcg_gen_or_i32(rd
, rd
, tmp
);
3665 tcg_gen_shli_i32(tmp
, t0
, 16);
3666 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3667 tcg_gen_or_i32(rd
, rd
, tmp
);
3668 tcg_gen_shli_i32(tmp
, t1
, 24);
3669 tcg_gen_andi_i32(tmp
, tmp
, 0xff000000);
3670 tcg_gen_or_i32(rd
, rd
, tmp
);
3672 tcg_gen_andi_i32(rm
, t1
, 0xff000000);
3673 tcg_gen_shri_i32(tmp
, t0
, 8);
3674 tcg_gen_andi_i32(tmp
, tmp
, 0xff0000);
3675 tcg_gen_or_i32(rm
, rm
, tmp
);
3676 tcg_gen_shri_i32(tmp
, t1
, 8);
3677 tcg_gen_andi_i32(tmp
, tmp
, 0xff00);
3678 tcg_gen_or_i32(rm
, rm
, tmp
);
3679 tcg_gen_shri_i32(tmp
, t0
, 16);
3680 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
3681 tcg_gen_or_i32(t1
, rm
, tmp
);
3682 tcg_gen_mov_i32(t0
, rd
);
3689 static void gen_neon_zip_u16(TCGv t0
, TCGv t1
)
3696 tcg_gen_andi_i32(tmp
, t0
, 0xffff);
3697 tcg_gen_shli_i32(tmp2
, t1
, 16);
3698 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3699 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
3700 tcg_gen_shri_i32(tmp2
, t0
, 16);
3701 tcg_gen_or_i32(t1
, t1
, tmp2
);
3702 tcg_gen_mov_i32(t0
, tmp
);
3708 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3713 for (n
= 0; n
< q
+ 1; n
+= 2) {
3714 t0
= neon_load_reg(reg
, n
);
3715 t1
= neon_load_reg(reg
, n
+ 1);
3717 case 0: gen_neon_unzip_u8(t0
, t1
); break;
3718 case 1: gen_neon_zip_u16(t0
, t1
); break; /* zip and unzip are the same. */
3719 case 2: /* no-op */; break;
3722 neon_store_scratch(tmp
+ n
, t0
);
3723 neon_store_scratch(tmp
+ n
+ 1, t1
);
3727 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3734 tcg_gen_shli_i32(rd
, t0
, 8);
3735 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3736 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3737 tcg_gen_or_i32(rd
, rd
, tmp
);
3739 tcg_gen_shri_i32(t1
, t1
, 8);
3740 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3741 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3742 tcg_gen_or_i32(t1
, t1
, tmp
);
3743 tcg_gen_mov_i32(t0
, rd
);
3749 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3756 tcg_gen_shli_i32(rd
, t0
, 16);
3757 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3758 tcg_gen_or_i32(rd
, rd
, tmp
);
3759 tcg_gen_shri_i32(t1
, t1
, 16);
3760 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3761 tcg_gen_or_i32(t1
, t1
, tmp
);
3762 tcg_gen_mov_i32(t0
, rd
);
3773 } neon_ls_element_type
[11] = {
3787 /* Translate a NEON load/store element instruction. Return nonzero if the
3788 instruction is invalid. */
3789 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3808 if (!s
->vfp_enabled
)
3810 VFP_DREG_D(rd
, insn
);
3811 rn
= (insn
>> 16) & 0xf;
3813 load
= (insn
& (1 << 21)) != 0;
3815 if ((insn
& (1 << 23)) == 0) {
3816 /* Load store all elements. */
3817 op
= (insn
>> 8) & 0xf;
3818 size
= (insn
>> 6) & 3;
3821 nregs
= neon_ls_element_type
[op
].nregs
;
3822 interleave
= neon_ls_element_type
[op
].interleave
;
3823 spacing
= neon_ls_element_type
[op
].spacing
;
3824 if (size
== 3 && (interleave
| spacing
) != 1)
3826 load_reg_var(s
, addr
, rn
);
3827 stride
= (1 << size
) * interleave
;
3828 for (reg
= 0; reg
< nregs
; reg
++) {
3829 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3830 load_reg_var(s
, addr
, rn
);
3831 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3832 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3833 load_reg_var(s
, addr
, rn
);
3834 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3838 tmp64
= gen_ld64(addr
, IS_USER(s
));
3839 neon_store_reg64(tmp64
, rd
);
3840 tcg_temp_free_i64(tmp64
);
3842 tmp64
= tcg_temp_new_i64();
3843 neon_load_reg64(tmp64
, rd
);
3844 gen_st64(tmp64
, addr
, IS_USER(s
));
3846 tcg_gen_addi_i32(addr
, addr
, stride
);
3848 for (pass
= 0; pass
< 2; pass
++) {
3851 tmp
= gen_ld32(addr
, IS_USER(s
));
3852 neon_store_reg(rd
, pass
, tmp
);
3854 tmp
= neon_load_reg(rd
, pass
);
3855 gen_st32(tmp
, addr
, IS_USER(s
));
3857 tcg_gen_addi_i32(addr
, addr
, stride
);
3858 } else if (size
== 1) {
3860 tmp
= gen_ld16u(addr
, IS_USER(s
));
3861 tcg_gen_addi_i32(addr
, addr
, stride
);
3862 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3863 tcg_gen_addi_i32(addr
, addr
, stride
);
3864 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3865 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3867 neon_store_reg(rd
, pass
, tmp
);
3869 tmp
= neon_load_reg(rd
, pass
);
3871 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3872 gen_st16(tmp
, addr
, IS_USER(s
));
3873 tcg_gen_addi_i32(addr
, addr
, stride
);
3874 gen_st16(tmp2
, addr
, IS_USER(s
));
3875 tcg_gen_addi_i32(addr
, addr
, stride
);
3877 } else /* size == 0 */ {
3880 for (n
= 0; n
< 4; n
++) {
3881 tmp
= gen_ld8u(addr
, IS_USER(s
));
3882 tcg_gen_addi_i32(addr
, addr
, stride
);
3886 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3887 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3891 neon_store_reg(rd
, pass
, tmp2
);
3893 tmp2
= neon_load_reg(rd
, pass
);
3894 for (n
= 0; n
< 4; n
++) {
3897 tcg_gen_mov_i32(tmp
, tmp2
);
3899 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3901 gen_st8(tmp
, addr
, IS_USER(s
));
3902 tcg_gen_addi_i32(addr
, addr
, stride
);
3913 size
= (insn
>> 10) & 3;
3915 /* Load single element to all lanes. */
3918 size
= (insn
>> 6) & 3;
3919 nregs
= ((insn
>> 8) & 3) + 1;
3920 stride
= (insn
& (1 << 5)) ? 2 : 1;
3921 load_reg_var(s
, addr
, rn
);
3922 for (reg
= 0; reg
< nregs
; reg
++) {
3925 tmp
= gen_ld8u(addr
, IS_USER(s
));
3926 gen_neon_dup_u8(tmp
, 0);
3929 tmp
= gen_ld16u(addr
, IS_USER(s
));
3930 gen_neon_dup_low16(tmp
);
3933 tmp
= gen_ld32(addr
, IS_USER(s
));
3937 default: /* Avoid compiler warnings. */
3940 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3942 tcg_gen_mov_i32(tmp2
, tmp
);
3943 neon_store_reg(rd
, 0, tmp2
);
3944 neon_store_reg(rd
, 1, tmp
);
3947 stride
= (1 << size
) * nregs
;
3949 /* Single element. */
3950 pass
= (insn
>> 7) & 1;
3953 shift
= ((insn
>> 5) & 3) * 8;
3957 shift
= ((insn
>> 6) & 1) * 16;
3958 stride
= (insn
& (1 << 5)) ? 2 : 1;
3962 stride
= (insn
& (1 << 6)) ? 2 : 1;
3967 nregs
= ((insn
>> 8) & 3) + 1;
3968 load_reg_var(s
, addr
, rn
);
3969 for (reg
= 0; reg
< nregs
; reg
++) {
3973 tmp
= gen_ld8u(addr
, IS_USER(s
));
3976 tmp
= gen_ld16u(addr
, IS_USER(s
));
3979 tmp
= gen_ld32(addr
, IS_USER(s
));
3981 default: /* Avoid compiler warnings. */
3985 tmp2
= neon_load_reg(rd
, pass
);
3986 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3989 neon_store_reg(rd
, pass
, tmp
);
3990 } else { /* Store */
3991 tmp
= neon_load_reg(rd
, pass
);
3993 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3996 gen_st8(tmp
, addr
, IS_USER(s
));
3999 gen_st16(tmp
, addr
, IS_USER(s
));
4002 gen_st32(tmp
, addr
, IS_USER(s
));
4007 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4009 stride
= nregs
* (1 << size
);
4016 base
= load_reg(s
, rn
);
4018 tcg_gen_addi_i32(base
, base
, stride
);
4021 index
= load_reg(s
, rm
);
4022 tcg_gen_add_i32(base
, base
, index
);
4025 store_reg(s
, rn
, base
);
4030 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4031 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4033 tcg_gen_and_i32(t
, t
, c
);
4034 tcg_gen_andc_i32(f
, f
, c
);
4035 tcg_gen_or_i32(dest
, t
, f
);
4038 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4041 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4042 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4043 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4048 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4051 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4052 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4053 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4058 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4061 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4062 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4063 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4068 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4071 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4072 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4073 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4078 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4084 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4085 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4090 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4091 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4098 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4099 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4104 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4105 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4112 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4116 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4117 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4118 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4123 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4124 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4125 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4132 static inline void gen_neon_addl(int size
)
4135 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4136 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4137 case 2: tcg_gen_add_i64(CPU_V001
); break;
4142 static inline void gen_neon_subl(int size
)
4145 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4146 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4147 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4152 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4155 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4156 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4157 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4162 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4165 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4166 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4171 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4175 switch ((size
<< 1) | u
) {
4176 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4177 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4178 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4179 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4181 tmp
= gen_muls_i64_i32(a
, b
);
4182 tcg_gen_mov_i64(dest
, tmp
);
4185 tmp
= gen_mulu_i64_i32(a
, b
);
4186 tcg_gen_mov_i64(dest
, tmp
);
4191 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4192 Don't forget to clean them now. */
4199 /* Translate a NEON data processing instruction. Return nonzero if the
4200 instruction is invalid.
4201 We process data in a mixture of 32-bit and 64-bit chunks.
4202 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4204 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4217 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4220 if (!s
->vfp_enabled
)
4222 q
= (insn
& (1 << 6)) != 0;
4223 u
= (insn
>> 24) & 1;
4224 VFP_DREG_D(rd
, insn
);
4225 VFP_DREG_N(rn
, insn
);
4226 VFP_DREG_M(rm
, insn
);
4227 size
= (insn
>> 20) & 3;
4228 if ((insn
& (1 << 23)) == 0) {
4229 /* Three register same length. */
4230 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4231 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4232 || op
== 10 || op
== 11 || op
== 16)) {
4233 /* 64-bit element instructions. */
4234 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4235 neon_load_reg64(cpu_V0
, rn
+ pass
);
4236 neon_load_reg64(cpu_V1
, rm
+ pass
);
4240 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4243 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4249 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4252 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4258 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4260 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4265 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4268 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4272 case 10: /* VRSHL */
4274 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4276 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4279 case 11: /* VQRSHL */
4281 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4284 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4290 tcg_gen_sub_i64(CPU_V001
);
4292 tcg_gen_add_i64(CPU_V001
);
4298 neon_store_reg64(cpu_V0
, rd
+ pass
);
4305 case 10: /* VRSHL */
4306 case 11: /* VQRSHL */
4309 /* Shift instruction operands are reversed. */
4316 case 20: /* VPMAX */
4317 case 21: /* VPMIN */
4318 case 23: /* VPADD */
4321 case 26: /* VPADD (float) */
4322 pairwise
= (u
&& size
< 2);
4324 case 30: /* VPMIN/VPMAX (float) */
4332 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4341 tmp
= neon_load_reg(rn
, n
);
4342 tmp2
= neon_load_reg(rn
, n
+ 1);
4344 tmp
= neon_load_reg(rm
, n
);
4345 tmp2
= neon_load_reg(rm
, n
+ 1);
4349 tmp
= neon_load_reg(rn
, pass
);
4350 tmp2
= neon_load_reg(rm
, pass
);
4354 GEN_NEON_INTEGER_OP(hadd
);
4357 GEN_NEON_INTEGER_OP_ENV(qadd
);
4359 case 2: /* VRHADD */
4360 GEN_NEON_INTEGER_OP(rhadd
);
4362 case 3: /* Logic ops. */
4363 switch ((u
<< 2) | size
) {
4365 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4368 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4371 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4374 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4377 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4380 tmp3
= neon_load_reg(rd
, pass
);
4381 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4385 tmp3
= neon_load_reg(rd
, pass
);
4386 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4390 tmp3
= neon_load_reg(rd
, pass
);
4391 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4397 GEN_NEON_INTEGER_OP(hsub
);
4400 GEN_NEON_INTEGER_OP_ENV(qsub
);
4403 GEN_NEON_INTEGER_OP(cgt
);
4406 GEN_NEON_INTEGER_OP(cge
);
4409 GEN_NEON_INTEGER_OP(shl
);
4412 GEN_NEON_INTEGER_OP_ENV(qshl
);
4414 case 10: /* VRSHL */
4415 GEN_NEON_INTEGER_OP(rshl
);
4417 case 11: /* VQRSHL */
4418 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4421 GEN_NEON_INTEGER_OP(max
);
4424 GEN_NEON_INTEGER_OP(min
);
4427 GEN_NEON_INTEGER_OP(abd
);
4430 GEN_NEON_INTEGER_OP(abd
);
4432 tmp2
= neon_load_reg(rd
, pass
);
4433 gen_neon_add(size
, tmp
, tmp2
);
4436 if (!u
) { /* VADD */
4437 if (gen_neon_add(size
, tmp
, tmp2
))
4441 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4442 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4443 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4449 if (!u
) { /* VTST */
4451 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4452 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4453 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4458 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4459 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4460 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4465 case 18: /* Multiply. */
4467 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4468 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4469 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4473 tmp2
= neon_load_reg(rd
, pass
);
4475 gen_neon_rsb(size
, tmp
, tmp2
);
4477 gen_neon_add(size
, tmp
, tmp2
);
4481 if (u
) { /* polynomial */
4482 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4483 } else { /* Integer */
4485 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4486 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4487 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4492 case 20: /* VPMAX */
4493 GEN_NEON_INTEGER_OP(pmax
);
4495 case 21: /* VPMIN */
4496 GEN_NEON_INTEGER_OP(pmin
);
4498 case 22: /* Hultiply high. */
4499 if (!u
) { /* VQDMULH */
4501 case 1: gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4502 case 2: gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4505 } else { /* VQRDHMUL */
4507 case 1: gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
); break;
4508 case 2: gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
); break;
4513 case 23: /* VPADD */
4517 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4518 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4519 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4523 case 26: /* Floating point arithnetic. */
4524 switch ((u
<< 2) | size
) {
4526 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4529 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4532 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4535 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4541 case 27: /* Float multiply. */
4542 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4545 tmp2
= neon_load_reg(rd
, pass
);
4547 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4549 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4553 case 28: /* Float compare. */
4555 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4558 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4560 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4563 case 29: /* Float compare absolute. */
4567 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4569 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4571 case 30: /* Float min/max. */
4573 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4575 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4579 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4581 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4588 /* Save the result. For elementwise operations we can put it
4589 straight into the destination register. For pairwise operations
4590 we have to be careful to avoid clobbering the source operands. */
4591 if (pairwise
&& rd
== rm
) {
4592 neon_store_scratch(pass
, tmp
);
4594 neon_store_reg(rd
, pass
, tmp
);
4598 if (pairwise
&& rd
== rm
) {
4599 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4600 tmp
= neon_load_scratch(pass
);
4601 neon_store_reg(rd
, pass
, tmp
);
4604 /* End of 3 register same size operations. */
4605 } else if (insn
& (1 << 4)) {
4606 if ((insn
& 0x00380080) != 0) {
4607 /* Two registers and shift. */
4608 op
= (insn
>> 8) & 0xf;
4609 if (insn
& (1 << 7)) {
4614 while ((insn
& (1 << (size
+ 19))) == 0)
4617 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4618 /* To avoid excessive dumplication of ops we implement shift
4619 by immediate using the variable shift operations. */
4621 /* Shift by immediate:
4622 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4623 /* Right shifts are encoded as N - shift, where N is the
4624 element size in bits. */
4626 shift
= shift
- (1 << (size
+ 3));
4634 imm
= (uint8_t) shift
;
4639 imm
= (uint16_t) shift
;
4650 for (pass
= 0; pass
< count
; pass
++) {
4652 neon_load_reg64(cpu_V0
, rm
+ pass
);
4653 tcg_gen_movi_i64(cpu_V1
, imm
);
4658 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4660 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4665 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4667 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4672 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4674 case 5: /* VSHL, VSLI */
4675 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4677 case 6: /* VQSHLU */
4679 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4687 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4690 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4695 if (op
== 1 || op
== 3) {
4697 neon_load_reg64(cpu_V1
, rd
+ pass
);
4698 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4699 } else if (op
== 4 || (op
== 5 && u
)) {
4701 neon_load_reg64(cpu_V1
, rd
+ pass
);
4703 if (shift
< -63 || shift
> 63) {
4707 mask
= 0xffffffffffffffffull
>> -shift
;
4709 mask
= 0xffffffffffffffffull
<< shift
;
4712 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4713 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4715 neon_store_reg64(cpu_V0
, rd
+ pass
);
4716 } else { /* size < 3 */
4717 /* Operands in T0 and T1. */
4718 tmp
= neon_load_reg(rm
, pass
);
4720 tcg_gen_movi_i32(tmp2
, imm
);
4724 GEN_NEON_INTEGER_OP(shl
);
4728 GEN_NEON_INTEGER_OP(rshl
);
4733 GEN_NEON_INTEGER_OP(shl
);
4735 case 5: /* VSHL, VSLI */
4737 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4738 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4739 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4743 case 6: /* VQSHLU */
4749 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
4753 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
4757 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
4765 GEN_NEON_INTEGER_OP_ENV(qshl
);
4770 if (op
== 1 || op
== 3) {
4772 tmp2
= neon_load_reg(rd
, pass
);
4773 gen_neon_add(size
, tmp
, tmp2
);
4775 } else if (op
== 4 || (op
== 5 && u
)) {
4780 mask
= 0xff >> -shift
;
4782 mask
= (uint8_t)(0xff << shift
);
4788 mask
= 0xffff >> -shift
;
4790 mask
= (uint16_t)(0xffff << shift
);
4794 if (shift
< -31 || shift
> 31) {
4798 mask
= 0xffffffffu
>> -shift
;
4800 mask
= 0xffffffffu
<< shift
;
4806 tmp2
= neon_load_reg(rd
, pass
);
4807 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4808 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4809 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4812 neon_store_reg(rd
, pass
, tmp
);
4815 } else if (op
< 10) {
4816 /* Shift by immediate and narrow:
4817 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4818 shift
= shift
- (1 << (size
+ 3));
4822 imm
= (uint16_t)shift
;
4824 tmp2
= tcg_const_i32(imm
);
4825 TCGV_UNUSED_I64(tmp64
);
4828 imm
= (uint32_t)shift
;
4829 tmp2
= tcg_const_i32(imm
);
4830 TCGV_UNUSED_I64(tmp64
);
4833 tmp64
= tcg_const_i64(shift
);
4840 for (pass
= 0; pass
< 2; pass
++) {
4842 neon_load_reg64(cpu_V0
, rm
+ pass
);
4845 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4847 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4850 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4852 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4855 tmp
= neon_load_reg(rm
+ pass
, 0);
4856 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4857 tmp3
= neon_load_reg(rm
+ pass
, 1);
4858 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4859 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4864 if (op
== 8 && !u
) {
4865 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4868 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4870 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4872 neon_store_reg(rd
, pass
, tmp
);
4875 tcg_temp_free_i64(tmp64
);
4877 tcg_temp_free_i32(tmp2
);
4879 } else if (op
== 10) {
4883 tmp
= neon_load_reg(rm
, 0);
4884 tmp2
= neon_load_reg(rm
, 1);
4885 for (pass
= 0; pass
< 2; pass
++) {
4889 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4892 /* The shift is less than the width of the source
4893 type, so we can just shift the whole register. */
4894 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4895 /* Widen the result of shift: we need to clear
4896 * the potential overflow bits resulting from
4897 * left bits of the narrow input appearing as
4898 * right bits of left the neighbour narrow
4900 if (size
< 2 || !u
) {
4903 imm
= (0xffu
>> (8 - shift
));
4905 } else if (size
== 1) {
4906 imm
= 0xffff >> (16 - shift
);
4909 imm
= 0xffffffff >> (32 - shift
);
4912 imm64
= imm
| (((uint64_t)imm
) << 32);
4916 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
4919 neon_store_reg64(cpu_V0
, rd
+ pass
);
4921 } else if (op
>= 14) {
4922 /* VCVT fixed-point. */
4923 /* We have already masked out the must-be-1 top bit of imm6,
4924 * hence this 32-shift where the ARM ARM has 64-imm6.
4927 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4928 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4931 gen_vfp_ulto(0, shift
);
4933 gen_vfp_slto(0, shift
);
4936 gen_vfp_toul(0, shift
);
4938 gen_vfp_tosl(0, shift
);
4940 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4945 } else { /* (insn & 0x00380080) == 0 */
4948 op
= (insn
>> 8) & 0xf;
4949 /* One register and immediate. */
4950 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4951 invert
= (insn
& (1 << 5)) != 0;
4969 imm
= (imm
<< 8) | (imm
<< 24);
4972 imm
= (imm
<< 8) | 0xff;
4975 imm
= (imm
<< 16) | 0xffff;
4978 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4983 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4984 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4990 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4991 if (op
& 1 && op
< 12) {
4992 tmp
= neon_load_reg(rd
, pass
);
4994 /* The immediate value has already been inverted, so
4996 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4998 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5003 if (op
== 14 && invert
) {
5006 for (n
= 0; n
< 4; n
++) {
5007 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5008 val
|= 0xff << (n
* 8);
5010 tcg_gen_movi_i32(tmp
, val
);
5012 tcg_gen_movi_i32(tmp
, imm
);
5015 neon_store_reg(rd
, pass
, tmp
);
5018 } else { /* (insn & 0x00800010 == 0x00800000) */
5020 op
= (insn
>> 8) & 0xf;
5021 if ((insn
& (1 << 6)) == 0) {
5022 /* Three registers of different lengths. */
5026 /* prewiden, src1_wide, src2_wide */
5027 static const int neon_3reg_wide
[16][3] = {
5028 {1, 0, 0}, /* VADDL */
5029 {1, 1, 0}, /* VADDW */
5030 {1, 0, 0}, /* VSUBL */
5031 {1, 1, 0}, /* VSUBW */
5032 {0, 1, 1}, /* VADDHN */
5033 {0, 0, 0}, /* VABAL */
5034 {0, 1, 1}, /* VSUBHN */
5035 {0, 0, 0}, /* VABDL */
5036 {0, 0, 0}, /* VMLAL */
5037 {0, 0, 0}, /* VQDMLAL */
5038 {0, 0, 0}, /* VMLSL */
5039 {0, 0, 0}, /* VQDMLSL */
5040 {0, 0, 0}, /* Integer VMULL */
5041 {0, 0, 0}, /* VQDMULL */
5042 {0, 0, 0} /* Polynomial VMULL */
5045 prewiden
= neon_3reg_wide
[op
][0];
5046 src1_wide
= neon_3reg_wide
[op
][1];
5047 src2_wide
= neon_3reg_wide
[op
][2];
5049 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
5052 /* Avoid overlapping operands. Wide source operands are
5053 always aligned so will never overlap with wide
5054 destinations in problematic ways. */
5055 if (rd
== rm
&& !src2_wide
) {
5056 tmp
= neon_load_reg(rm
, 1);
5057 neon_store_scratch(2, tmp
);
5058 } else if (rd
== rn
&& !src1_wide
) {
5059 tmp
= neon_load_reg(rn
, 1);
5060 neon_store_scratch(2, tmp
);
5063 for (pass
= 0; pass
< 2; pass
++) {
5065 neon_load_reg64(cpu_V0
, rn
+ pass
);
5068 if (pass
== 1 && rd
== rn
) {
5069 tmp
= neon_load_scratch(2);
5071 tmp
= neon_load_reg(rn
, pass
);
5074 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5078 neon_load_reg64(cpu_V1
, rm
+ pass
);
5081 if (pass
== 1 && rd
== rm
) {
5082 tmp2
= neon_load_scratch(2);
5084 tmp2
= neon_load_reg(rm
, pass
);
5087 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5091 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5092 gen_neon_addl(size
);
5094 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5095 gen_neon_subl(size
);
5097 case 5: case 7: /* VABAL, VABDL */
5098 switch ((size
<< 1) | u
) {
5100 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5103 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5106 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5109 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5112 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5115 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5122 case 8: case 9: case 10: case 11: case 12: case 13:
5123 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5124 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5126 case 14: /* Polynomial VMULL */
5127 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5131 default: /* 15 is RESERVED. */
5136 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5137 neon_store_reg64(cpu_V0
, rd
+ pass
);
5138 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5140 neon_load_reg64(cpu_V1
, rd
+ pass
);
5142 case 10: /* VMLSL */
5143 gen_neon_negl(cpu_V0
, size
);
5145 case 5: case 8: /* VABAL, VMLAL */
5146 gen_neon_addl(size
);
5148 case 9: case 11: /* VQDMLAL, VQDMLSL */
5149 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5151 gen_neon_negl(cpu_V0
, size
);
5153 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5158 neon_store_reg64(cpu_V0
, rd
+ pass
);
5159 } else if (op
== 4 || op
== 6) {
5160 /* Narrowing operation. */
5165 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5168 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5171 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5172 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5179 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5182 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5185 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5186 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5187 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5195 neon_store_reg(rd
, 0, tmp3
);
5196 neon_store_reg(rd
, 1, tmp
);
5199 /* Write back the result. */
5200 neon_store_reg64(cpu_V0
, rd
+ pass
);
5204 /* Two registers and a scalar. */
5206 case 0: /* Integer VMLA scalar */
5207 case 1: /* Float VMLA scalar */
5208 case 4: /* Integer VMLS scalar */
5209 case 5: /* Floating point VMLS scalar */
5210 case 8: /* Integer VMUL scalar */
5211 case 9: /* Floating point VMUL scalar */
5212 case 12: /* VQDMULH scalar */
5213 case 13: /* VQRDMULH scalar */
5214 tmp
= neon_get_scalar(size
, rm
);
5215 neon_store_scratch(0, tmp
);
5216 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5217 tmp
= neon_load_scratch(0);
5218 tmp2
= neon_load_reg(rn
, pass
);
5221 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5223 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5225 } else if (op
== 13) {
5227 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5229 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5231 } else if (op
& 1) {
5232 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5235 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5236 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5237 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5244 tmp2
= neon_load_reg(rd
, pass
);
5247 gen_neon_add(size
, tmp
, tmp2
);
5250 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5253 gen_neon_rsb(size
, tmp
, tmp2
);
5256 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5263 neon_store_reg(rd
, pass
, tmp
);
5266 case 2: /* VMLAL sclar */
5267 case 3: /* VQDMLAL scalar */
5268 case 6: /* VMLSL scalar */
5269 case 7: /* VQDMLSL scalar */
5270 case 10: /* VMULL scalar */
5271 case 11: /* VQDMULL scalar */
5272 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5275 tmp2
= neon_get_scalar(size
, rm
);
5276 /* We need a copy of tmp2 because gen_neon_mull
5277 * deletes it during pass 0. */
5279 tcg_gen_mov_i32(tmp4
, tmp2
);
5280 tmp3
= neon_load_reg(rn
, 1);
5282 for (pass
= 0; pass
< 2; pass
++) {
5284 tmp
= neon_load_reg(rn
, 0);
5289 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5291 neon_load_reg64(cpu_V1
, rd
+ pass
);
5295 gen_neon_negl(cpu_V0
, size
);
5298 gen_neon_addl(size
);
5301 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5303 gen_neon_negl(cpu_V0
, size
);
5305 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5311 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5316 neon_store_reg64(cpu_V0
, rd
+ pass
);
5321 default: /* 14 and 15 are RESERVED */
5325 } else { /* size == 3 */
5328 imm
= (insn
>> 8) & 0xf;
5334 neon_load_reg64(cpu_V0
, rn
);
5336 neon_load_reg64(cpu_V1
, rn
+ 1);
5338 } else if (imm
== 8) {
5339 neon_load_reg64(cpu_V0
, rn
+ 1);
5341 neon_load_reg64(cpu_V1
, rm
);
5344 tmp64
= tcg_temp_new_i64();
5346 neon_load_reg64(cpu_V0
, rn
);
5347 neon_load_reg64(tmp64
, rn
+ 1);
5349 neon_load_reg64(cpu_V0
, rn
+ 1);
5350 neon_load_reg64(tmp64
, rm
);
5352 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5353 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5354 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5356 neon_load_reg64(cpu_V1
, rm
);
5358 neon_load_reg64(cpu_V1
, rm
+ 1);
5361 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5362 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5363 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5364 tcg_temp_free_i64(tmp64
);
5367 neon_load_reg64(cpu_V0
, rn
);
5368 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5369 neon_load_reg64(cpu_V1
, rm
);
5370 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5371 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5373 neon_store_reg64(cpu_V0
, rd
);
5375 neon_store_reg64(cpu_V1
, rd
+ 1);
5377 } else if ((insn
& (1 << 11)) == 0) {
5378 /* Two register misc. */
5379 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5380 size
= (insn
>> 18) & 3;
5382 case 0: /* VREV64 */
5385 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5386 tmp
= neon_load_reg(rm
, pass
* 2);
5387 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5389 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5390 case 1: gen_swap_half(tmp
); break;
5391 case 2: /* no-op */ break;
5394 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5396 neon_store_reg(rd
, pass
* 2, tmp2
);
5399 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5400 case 1: gen_swap_half(tmp2
); break;
5403 neon_store_reg(rd
, pass
* 2, tmp2
);
5407 case 4: case 5: /* VPADDL */
5408 case 12: case 13: /* VPADAL */
5411 for (pass
= 0; pass
< q
+ 1; pass
++) {
5412 tmp
= neon_load_reg(rm
, pass
* 2);
5413 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5414 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5415 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5417 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5418 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5419 case 2: tcg_gen_add_i64(CPU_V001
); break;
5424 neon_load_reg64(cpu_V1
, rd
+ pass
);
5425 gen_neon_addl(size
);
5427 neon_store_reg64(cpu_V0
, rd
+ pass
);
5432 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5433 tmp
= neon_load_reg(rm
, n
);
5434 tmp2
= neon_load_reg(rd
, n
+ 1);
5435 neon_store_reg(rm
, n
, tmp2
);
5436 neon_store_reg(rd
, n
+ 1, tmp
);
5444 Rd A3 A2 A1 A0 B2 B0 A2 A0
5445 Rm B3 B2 B1 B0 B3 B1 A3 A1
5449 gen_neon_unzip(rd
, q
, 0, size
);
5450 gen_neon_unzip(rm
, q
, 4, size
);
5452 static int unzip_order_q
[8] =
5453 {0, 2, 4, 6, 1, 3, 5, 7};
5454 for (n
= 0; n
< 8; n
++) {
5455 int reg
= (n
< 4) ? rd
: rm
;
5456 tmp
= neon_load_scratch(unzip_order_q
[n
]);
5457 neon_store_reg(reg
, n
% 4, tmp
);
5460 static int unzip_order
[4] =
5462 for (n
= 0; n
< 4; n
++) {
5463 int reg
= (n
< 2) ? rd
: rm
;
5464 tmp
= neon_load_scratch(unzip_order
[n
]);
5465 neon_store_reg(reg
, n
% 2, tmp
);
5471 Rd A3 A2 A1 A0 B1 A1 B0 A0
5472 Rm B3 B2 B1 B0 B3 A3 B2 A2
5476 count
= (q
? 4 : 2);
5477 for (n
= 0; n
< count
; n
++) {
5478 tmp
= neon_load_reg(rd
, n
);
5479 tmp2
= neon_load_reg(rd
, n
);
5481 case 0: gen_neon_zip_u8(tmp
, tmp2
); break;
5482 case 1: gen_neon_zip_u16(tmp
, tmp2
); break;
5483 case 2: /* no-op */; break;
5486 neon_store_scratch(n
* 2, tmp
);
5487 neon_store_scratch(n
* 2 + 1, tmp2
);
5489 for (n
= 0; n
< count
* 2; n
++) {
5490 int reg
= (n
< count
) ? rd
: rm
;
5491 tmp
= neon_load_scratch(n
);
5492 neon_store_reg(reg
, n
% count
, tmp
);
5495 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5499 for (pass
= 0; pass
< 2; pass
++) {
5500 neon_load_reg64(cpu_V0
, rm
+ pass
);
5503 if (q
) { /* VQMOVUN */
5504 gen_neon_unarrow_sats(size
, tmp
, cpu_V0
);
5505 } else { /* VMOVN */
5506 gen_neon_narrow(size
, tmp
, cpu_V0
);
5508 } else { /* VQMOVN */
5510 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5512 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5518 neon_store_reg(rd
, 0, tmp2
);
5519 neon_store_reg(rd
, 1, tmp
);
5523 case 38: /* VSHLL */
5526 tmp
= neon_load_reg(rm
, 0);
5527 tmp2
= neon_load_reg(rm
, 1);
5528 for (pass
= 0; pass
< 2; pass
++) {
5531 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5532 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5533 neon_store_reg64(cpu_V0
, rd
+ pass
);
5536 case 44: /* VCVT.F16.F32 */
5537 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5541 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5542 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5543 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5544 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5545 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5546 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5547 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5548 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5549 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5550 neon_store_reg(rd
, 0, tmp2
);
5552 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5553 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5554 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5555 neon_store_reg(rd
, 1, tmp2
);
5558 case 46: /* VCVT.F32.F16 */
5559 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5562 tmp
= neon_load_reg(rm
, 0);
5563 tmp2
= neon_load_reg(rm
, 1);
5564 tcg_gen_ext16u_i32(tmp3
, tmp
);
5565 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5566 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5567 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5568 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5569 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5571 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5572 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5573 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5574 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5575 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5576 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5582 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5583 if (op
== 30 || op
== 31 || op
>= 58) {
5584 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5585 neon_reg_offset(rm
, pass
));
5588 tmp
= neon_load_reg(rm
, pass
);
5591 case 1: /* VREV32 */
5593 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5594 case 1: gen_swap_half(tmp
); break;
5598 case 2: /* VREV16 */
5605 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5606 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5607 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5613 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5614 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5615 case 2: gen_helper_clz(tmp
, tmp
); break;
5622 gen_helper_neon_cnt_u8(tmp
, tmp
);
5627 tcg_gen_not_i32(tmp
, tmp
);
5629 case 14: /* VQABS */
5631 case 0: gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
); break;
5632 case 1: gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
); break;
5633 case 2: gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
); break;
5637 case 15: /* VQNEG */
5639 case 0: gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
); break;
5640 case 1: gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
); break;
5641 case 2: gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
); break;
5645 case 16: case 19: /* VCGT #0, VCLE #0 */
5646 tmp2
= tcg_const_i32(0);
5648 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5649 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5650 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5653 tcg_temp_free(tmp2
);
5655 tcg_gen_not_i32(tmp
, tmp
);
5657 case 17: case 20: /* VCGE #0, VCLT #0 */
5658 tmp2
= tcg_const_i32(0);
5660 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5661 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5662 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5665 tcg_temp_free(tmp2
);
5667 tcg_gen_not_i32(tmp
, tmp
);
5669 case 18: /* VCEQ #0 */
5670 tmp2
= tcg_const_i32(0);
5672 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5673 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5674 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5677 tcg_temp_free(tmp2
);
5681 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5682 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5683 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5690 tmp2
= tcg_const_i32(0);
5691 gen_neon_rsb(size
, tmp
, tmp2
);
5692 tcg_temp_free(tmp2
);
5694 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5695 tmp2
= tcg_const_i32(0);
5696 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5697 tcg_temp_free(tmp2
);
5699 tcg_gen_not_i32(tmp
, tmp
);
5701 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5702 tmp2
= tcg_const_i32(0);
5703 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5704 tcg_temp_free(tmp2
);
5706 tcg_gen_not_i32(tmp
, tmp
);
5708 case 26: /* Float VCEQ #0 */
5709 tmp2
= tcg_const_i32(0);
5710 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5711 tcg_temp_free(tmp2
);
5713 case 30: /* Float VABS */
5716 case 31: /* Float VNEG */
5720 tmp2
= neon_load_reg(rd
, pass
);
5721 neon_store_reg(rm
, pass
, tmp2
);
5724 tmp2
= neon_load_reg(rd
, pass
);
5726 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5727 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5731 neon_store_reg(rm
, pass
, tmp2
);
5733 case 56: /* Integer VRECPE */
5734 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5736 case 57: /* Integer VRSQRTE */
5737 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5739 case 58: /* Float VRECPE */
5740 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5742 case 59: /* Float VRSQRTE */
5743 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5745 case 60: /* VCVT.F32.S32 */
5748 case 61: /* VCVT.F32.U32 */
5751 case 62: /* VCVT.S32.F32 */
5754 case 63: /* VCVT.U32.F32 */
5758 /* Reserved: 21, 29, 39-56 */
5761 if (op
== 30 || op
== 31 || op
>= 58) {
5762 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5763 neon_reg_offset(rd
, pass
));
5765 neon_store_reg(rd
, pass
, tmp
);
5770 } else if ((insn
& (1 << 10)) == 0) {
5772 n
= ((insn
>> 5) & 0x18) + 8;
5773 if (insn
& (1 << 6)) {
5774 tmp
= neon_load_reg(rd
, 0);
5777 tcg_gen_movi_i32(tmp
, 0);
5779 tmp2
= neon_load_reg(rm
, 0);
5780 tmp4
= tcg_const_i32(rn
);
5781 tmp5
= tcg_const_i32(n
);
5782 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5784 if (insn
& (1 << 6)) {
5785 tmp
= neon_load_reg(rd
, 1);
5788 tcg_gen_movi_i32(tmp
, 0);
5790 tmp3
= neon_load_reg(rm
, 1);
5791 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5792 tcg_temp_free_i32(tmp5
);
5793 tcg_temp_free_i32(tmp4
);
5794 neon_store_reg(rd
, 0, tmp2
);
5795 neon_store_reg(rd
, 1, tmp3
);
5797 } else if ((insn
& 0x380) == 0) {
5799 if (insn
& (1 << 19)) {
5800 tmp
= neon_load_reg(rm
, 1);
5802 tmp
= neon_load_reg(rm
, 0);
5804 if (insn
& (1 << 16)) {
5805 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5806 } else if (insn
& (1 << 17)) {
5807 if ((insn
>> 18) & 1)
5808 gen_neon_dup_high16(tmp
);
5810 gen_neon_dup_low16(tmp
);
5812 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5814 tcg_gen_mov_i32(tmp2
, tmp
);
5815 neon_store_reg(rd
, pass
, tmp2
);
5826 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5828 int crn
= (insn
>> 16) & 0xf;
5829 int crm
= insn
& 0xf;
5830 int op1
= (insn
>> 21) & 7;
5831 int op2
= (insn
>> 5) & 7;
5832 int rt
= (insn
>> 12) & 0xf;
5835 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5836 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5840 tmp
= load_cpu_field(teecr
);
5841 store_reg(s
, rt
, tmp
);
5844 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5846 if (IS_USER(s
) && (env
->teecr
& 1))
5848 tmp
= load_cpu_field(teehbr
);
5849 store_reg(s
, rt
, tmp
);
5853 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5854 op1
, crn
, crm
, op2
);
5858 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5860 int crn
= (insn
>> 16) & 0xf;
5861 int crm
= insn
& 0xf;
5862 int op1
= (insn
>> 21) & 7;
5863 int op2
= (insn
>> 5) & 7;
5864 int rt
= (insn
>> 12) & 0xf;
5867 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5868 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5872 tmp
= load_reg(s
, rt
);
5873 gen_helper_set_teecr(cpu_env
, tmp
);
5877 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5879 if (IS_USER(s
) && (env
->teecr
& 1))
5881 tmp
= load_reg(s
, rt
);
5882 store_cpu_field(tmp
, teehbr
);
5886 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5887 op1
, crn
, crm
, op2
);
5891 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5895 cpnum
= (insn
>> 8) & 0xf;
5896 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5897 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5903 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5904 return disas_iwmmxt_insn(env
, s
, insn
);
5905 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5906 return disas_dsp_insn(env
, s
, insn
);
5911 return disas_vfp_insn (env
, s
, insn
);
5913 /* Coprocessors 7-15 are architecturally reserved by ARM.
5914 Unfortunately Intel decided to ignore this. */
5915 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5917 if (insn
& (1 << 20))
5918 return disas_cp14_read(env
, s
, insn
);
5920 return disas_cp14_write(env
, s
, insn
);
5922 return disas_cp15_insn (env
, s
, insn
);
5925 /* Unknown coprocessor. See if the board has hooked it. */
5926 return disas_cp_insn (env
, s
, insn
);
5931 /* Store a 64-bit value to a register pair. Clobbers val. */
5932 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5936 tcg_gen_trunc_i64_i32(tmp
, val
);
5937 store_reg(s
, rlow
, tmp
);
5939 tcg_gen_shri_i64(val
, val
, 32);
5940 tcg_gen_trunc_i64_i32(tmp
, val
);
5941 store_reg(s
, rhigh
, tmp
);
5944 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5945 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5950 /* Load value and extend to 64 bits. */
5951 tmp
= tcg_temp_new_i64();
5952 tmp2
= load_reg(s
, rlow
);
5953 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5955 tcg_gen_add_i64(val
, val
, tmp
);
5956 tcg_temp_free_i64(tmp
);
5959 /* load and add a 64-bit value from a register pair. */
5960 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5966 /* Load 64-bit value rd:rn. */
5967 tmpl
= load_reg(s
, rlow
);
5968 tmph
= load_reg(s
, rhigh
);
5969 tmp
= tcg_temp_new_i64();
5970 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5973 tcg_gen_add_i64(val
, val
, tmp
);
5974 tcg_temp_free_i64(tmp
);
5977 /* Set N and Z flags from a 64-bit value. */
5978 static void gen_logicq_cc(TCGv_i64 val
)
5980 TCGv tmp
= new_tmp();
5981 gen_helper_logicq_cc(tmp
, val
);
5986 /* Load/Store exclusive instructions are implemented by remembering
5987 the value/address loaded, and seeing if these are the same
5988 when the store is performed. This should be is sufficient to implement
5989 the architecturally mandated semantics, and avoids having to monitor
5992 In system emulation mode only one CPU will be running at once, so
5993 this sequence is effectively atomic. In user emulation mode we
5994 throw an exception and handle the atomic operation elsewhere. */
5995 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
5996 TCGv addr
, int size
)
6002 tmp
= gen_ld8u(addr
, IS_USER(s
));
6005 tmp
= gen_ld16u(addr
, IS_USER(s
));
6009 tmp
= gen_ld32(addr
, IS_USER(s
));
6014 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6015 store_reg(s
, rt
, tmp
);
6017 TCGv tmp2
= new_tmp();
6018 tcg_gen_addi_i32(tmp2
, addr
, 4);
6019 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6021 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6022 store_reg(s
, rt2
, tmp
);
6024 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6027 static void gen_clrex(DisasContext
*s
)
6029 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6032 #ifdef CONFIG_USER_ONLY
6033 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6034 TCGv addr
, int size
)
6036 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6037 tcg_gen_movi_i32(cpu_exclusive_info
,
6038 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6039 gen_exception_insn(s
, 4, EXCP_STREX
);
6042 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6043 TCGv addr
, int size
)
6049 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6055 fail_label
= gen_new_label();
6056 done_label
= gen_new_label();
6057 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6060 tmp
= gen_ld8u(addr
, IS_USER(s
));
6063 tmp
= gen_ld16u(addr
, IS_USER(s
));
6067 tmp
= gen_ld32(addr
, IS_USER(s
));
6072 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6075 TCGv tmp2
= new_tmp();
6076 tcg_gen_addi_i32(tmp2
, addr
, 4);
6077 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6079 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6082 tmp
= load_reg(s
, rt
);
6085 gen_st8(tmp
, addr
, IS_USER(s
));
6088 gen_st16(tmp
, addr
, IS_USER(s
));
6092 gen_st32(tmp
, addr
, IS_USER(s
));
6098 tcg_gen_addi_i32(addr
, addr
, 4);
6099 tmp
= load_reg(s
, rt2
);
6100 gen_st32(tmp
, addr
, IS_USER(s
));
6102 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6103 tcg_gen_br(done_label
);
6104 gen_set_label(fail_label
);
6105 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6106 gen_set_label(done_label
);
6107 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6111 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6113 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6120 insn
= ldl_code(s
->pc
);
6123 /* M variants do not implement ARM mode. */
6128 /* Unconditional instructions. */
6129 if (((insn
>> 25) & 7) == 1) {
6130 /* NEON Data processing. */
6131 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6134 if (disas_neon_data_insn(env
, s
, insn
))
6138 if ((insn
& 0x0f100000) == 0x04000000) {
6139 /* NEON load/store. */
6140 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6143 if (disas_neon_ls_insn(env
, s
, insn
))
6147 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6148 ((insn
& 0x0f30f010) == 0x0710f000)) {
6149 if ((insn
& (1 << 22)) == 0) {
6151 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6155 /* Otherwise PLD; v5TE+ */
6158 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6159 ((insn
& 0x0f70f010) == 0x0650f000)) {
6161 return; /* PLI; V7 */
6163 if (((insn
& 0x0f700000) == 0x04100000) ||
6164 ((insn
& 0x0f700010) == 0x06100000)) {
6165 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6168 return; /* v7MP: Unallocated memory hint: must NOP */
6171 if ((insn
& 0x0ffffdff) == 0x01010000) {
6174 if (insn
& (1 << 9)) {
6175 /* BE8 mode not implemented. */
6179 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6180 switch ((insn
>> 4) & 0xf) {
6189 /* We don't emulate caches so these are a no-op. */
6194 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6200 op1
= (insn
& 0x1f);
6202 tmp
= tcg_const_i32(op1
);
6203 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6204 tcg_temp_free_i32(tmp
);
6205 i
= (insn
>> 23) & 3;
6207 case 0: offset
= -4; break; /* DA */
6208 case 1: offset
= 0; break; /* IA */
6209 case 2: offset
= -8; break; /* DB */
6210 case 3: offset
= 4; break; /* IB */
6214 tcg_gen_addi_i32(addr
, addr
, offset
);
6215 tmp
= load_reg(s
, 14);
6216 gen_st32(tmp
, addr
, 0);
6217 tmp
= load_cpu_field(spsr
);
6218 tcg_gen_addi_i32(addr
, addr
, 4);
6219 gen_st32(tmp
, addr
, 0);
6220 if (insn
& (1 << 21)) {
6221 /* Base writeback. */
6223 case 0: offset
= -8; break;
6224 case 1: offset
= 4; break;
6225 case 2: offset
= -4; break;
6226 case 3: offset
= 0; break;
6230 tcg_gen_addi_i32(addr
, addr
, offset
);
6231 tmp
= tcg_const_i32(op1
);
6232 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6233 tcg_temp_free_i32(tmp
);
6239 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6245 rn
= (insn
>> 16) & 0xf;
6246 addr
= load_reg(s
, rn
);
6247 i
= (insn
>> 23) & 3;
6249 case 0: offset
= -4; break; /* DA */
6250 case 1: offset
= 0; break; /* IA */
6251 case 2: offset
= -8; break; /* DB */
6252 case 3: offset
= 4; break; /* IB */
6256 tcg_gen_addi_i32(addr
, addr
, offset
);
6257 /* Load PC into tmp and CPSR into tmp2. */
6258 tmp
= gen_ld32(addr
, 0);
6259 tcg_gen_addi_i32(addr
, addr
, 4);
6260 tmp2
= gen_ld32(addr
, 0);
6261 if (insn
& (1 << 21)) {
6262 /* Base writeback. */
6264 case 0: offset
= -8; break;
6265 case 1: offset
= 4; break;
6266 case 2: offset
= -4; break;
6267 case 3: offset
= 0; break;
6271 tcg_gen_addi_i32(addr
, addr
, offset
);
6272 store_reg(s
, rn
, addr
);
6276 gen_rfe(s
, tmp
, tmp2
);
6278 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6279 /* branch link and change to thumb (blx <offset>) */
6282 val
= (uint32_t)s
->pc
;
6284 tcg_gen_movi_i32(tmp
, val
);
6285 store_reg(s
, 14, tmp
);
6286 /* Sign-extend the 24-bit offset */
6287 offset
= (((int32_t)insn
) << 8) >> 8;
6288 /* offset * 4 + bit24 * 2 + (thumb bit) */
6289 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6290 /* pipeline offset */
6294 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6295 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6296 /* iWMMXt register transfer. */
6297 if (env
->cp15
.c15_cpar
& (1 << 1))
6298 if (!disas_iwmmxt_insn(env
, s
, insn
))
6301 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6302 /* Coprocessor double register transfer. */
6303 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6304 /* Additional coprocessor register transfer. */
6305 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6308 /* cps (privileged) */
6312 if (insn
& (1 << 19)) {
6313 if (insn
& (1 << 8))
6315 if (insn
& (1 << 7))
6317 if (insn
& (1 << 6))
6319 if (insn
& (1 << 18))
6322 if (insn
& (1 << 17)) {
6324 val
|= (insn
& 0x1f);
6327 gen_set_psr_im(s
, mask
, 0, val
);
6334 /* if not always execute, we generate a conditional jump to
6336 s
->condlabel
= gen_new_label();
6337 gen_test_cc(cond
^ 1, s
->condlabel
);
6340 if ((insn
& 0x0f900000) == 0x03000000) {
6341 if ((insn
& (1 << 21)) == 0) {
6343 rd
= (insn
>> 12) & 0xf;
6344 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6345 if ((insn
& (1 << 22)) == 0) {
6348 tcg_gen_movi_i32(tmp
, val
);
6351 tmp
= load_reg(s
, rd
);
6352 tcg_gen_ext16u_i32(tmp
, tmp
);
6353 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6355 store_reg(s
, rd
, tmp
);
6357 if (((insn
>> 12) & 0xf) != 0xf)
6359 if (((insn
>> 16) & 0xf) == 0) {
6360 gen_nop_hint(s
, insn
& 0xff);
6362 /* CPSR = immediate */
6364 shift
= ((insn
>> 8) & 0xf) * 2;
6366 val
= (val
>> shift
) | (val
<< (32 - shift
));
6367 i
= ((insn
& (1 << 22)) != 0);
6368 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6372 } else if ((insn
& 0x0f900000) == 0x01000000
6373 && (insn
& 0x00000090) != 0x00000090) {
6374 /* miscellaneous instructions */
6375 op1
= (insn
>> 21) & 3;
6376 sh
= (insn
>> 4) & 0xf;
6379 case 0x0: /* move program status register */
6382 tmp
= load_reg(s
, rm
);
6383 i
= ((op1
& 2) != 0);
6384 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6388 rd
= (insn
>> 12) & 0xf;
6392 tmp
= load_cpu_field(spsr
);
6395 gen_helper_cpsr_read(tmp
);
6397 store_reg(s
, rd
, tmp
);
6402 /* branch/exchange thumb (bx). */
6403 tmp
= load_reg(s
, rm
);
6405 } else if (op1
== 3) {
6407 rd
= (insn
>> 12) & 0xf;
6408 tmp
= load_reg(s
, rm
);
6409 gen_helper_clz(tmp
, tmp
);
6410 store_reg(s
, rd
, tmp
);
6418 /* Trivial implementation equivalent to bx. */
6419 tmp
= load_reg(s
, rm
);
6429 /* branch link/exchange thumb (blx) */
6430 tmp
= load_reg(s
, rm
);
6432 tcg_gen_movi_i32(tmp2
, s
->pc
);
6433 store_reg(s
, 14, tmp2
);
6436 case 0x5: /* saturating add/subtract */
6437 rd
= (insn
>> 12) & 0xf;
6438 rn
= (insn
>> 16) & 0xf;
6439 tmp
= load_reg(s
, rm
);
6440 tmp2
= load_reg(s
, rn
);
6442 gen_helper_double_saturate(tmp2
, tmp2
);
6444 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6446 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6448 store_reg(s
, rd
, tmp
);
6451 /* SMC instruction (op1 == 3)
6452 and undefined instructions (op1 == 0 || op1 == 2)
6458 gen_exception_insn(s
, 4, EXCP_BKPT
);
6460 case 0x8: /* signed multiply */
6464 rs
= (insn
>> 8) & 0xf;
6465 rn
= (insn
>> 12) & 0xf;
6466 rd
= (insn
>> 16) & 0xf;
6468 /* (32 * 16) >> 16 */
6469 tmp
= load_reg(s
, rm
);
6470 tmp2
= load_reg(s
, rs
);
6472 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6475 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6476 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6478 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6479 tcg_temp_free_i64(tmp64
);
6480 if ((sh
& 2) == 0) {
6481 tmp2
= load_reg(s
, rn
);
6482 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6485 store_reg(s
, rd
, tmp
);
6488 tmp
= load_reg(s
, rm
);
6489 tmp2
= load_reg(s
, rs
);
6490 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6493 tmp64
= tcg_temp_new_i64();
6494 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6496 gen_addq(s
, tmp64
, rn
, rd
);
6497 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6498 tcg_temp_free_i64(tmp64
);
6501 tmp2
= load_reg(s
, rn
);
6502 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6505 store_reg(s
, rd
, tmp
);
6512 } else if (((insn
& 0x0e000000) == 0 &&
6513 (insn
& 0x00000090) != 0x90) ||
6514 ((insn
& 0x0e000000) == (1 << 25))) {
6515 int set_cc
, logic_cc
, shiftop
;
6517 op1
= (insn
>> 21) & 0xf;
6518 set_cc
= (insn
>> 20) & 1;
6519 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6521 /* data processing instruction */
6522 if (insn
& (1 << 25)) {
6523 /* immediate operand */
6525 shift
= ((insn
>> 8) & 0xf) * 2;
6527 val
= (val
>> shift
) | (val
<< (32 - shift
));
6530 tcg_gen_movi_i32(tmp2
, val
);
6531 if (logic_cc
&& shift
) {
6532 gen_set_CF_bit31(tmp2
);
6537 tmp2
= load_reg(s
, rm
);
6538 shiftop
= (insn
>> 5) & 3;
6539 if (!(insn
& (1 << 4))) {
6540 shift
= (insn
>> 7) & 0x1f;
6541 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6543 rs
= (insn
>> 8) & 0xf;
6544 tmp
= load_reg(s
, rs
);
6545 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6548 if (op1
!= 0x0f && op1
!= 0x0d) {
6549 rn
= (insn
>> 16) & 0xf;
6550 tmp
= load_reg(s
, rn
);
6554 rd
= (insn
>> 12) & 0xf;
6557 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6561 store_reg_bx(env
, s
, rd
, tmp
);
6564 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6568 store_reg_bx(env
, s
, rd
, tmp
);
6571 if (set_cc
&& rd
== 15) {
6572 /* SUBS r15, ... is used for exception return. */
6576 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6577 gen_exception_return(s
, tmp
);
6580 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6582 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6584 store_reg_bx(env
, s
, rd
, tmp
);
6589 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6591 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6593 store_reg_bx(env
, s
, rd
, tmp
);
6597 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6599 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6601 store_reg_bx(env
, s
, rd
, tmp
);
6605 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6607 gen_add_carry(tmp
, tmp
, tmp2
);
6609 store_reg_bx(env
, s
, rd
, tmp
);
6613 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6615 gen_sub_carry(tmp
, tmp
, tmp2
);
6617 store_reg_bx(env
, s
, rd
, tmp
);
6621 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6623 gen_sub_carry(tmp
, tmp2
, tmp
);
6625 store_reg_bx(env
, s
, rd
, tmp
);
6629 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6636 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6643 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6649 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6654 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6658 store_reg_bx(env
, s
, rd
, tmp
);
6661 if (logic_cc
&& rd
== 15) {
6662 /* MOVS r15, ... is used for exception return. */
6666 gen_exception_return(s
, tmp2
);
6671 store_reg_bx(env
, s
, rd
, tmp2
);
6675 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6679 store_reg_bx(env
, s
, rd
, tmp
);
6683 tcg_gen_not_i32(tmp2
, tmp2
);
6687 store_reg_bx(env
, s
, rd
, tmp2
);
6690 if (op1
!= 0x0f && op1
!= 0x0d) {
6694 /* other instructions */
6695 op1
= (insn
>> 24) & 0xf;
6699 /* multiplies, extra load/stores */
6700 sh
= (insn
>> 5) & 3;
6703 rd
= (insn
>> 16) & 0xf;
6704 rn
= (insn
>> 12) & 0xf;
6705 rs
= (insn
>> 8) & 0xf;
6707 op1
= (insn
>> 20) & 0xf;
6709 case 0: case 1: case 2: case 3: case 6:
6711 tmp
= load_reg(s
, rs
);
6712 tmp2
= load_reg(s
, rm
);
6713 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6715 if (insn
& (1 << 22)) {
6716 /* Subtract (mls) */
6718 tmp2
= load_reg(s
, rn
);
6719 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6721 } else if (insn
& (1 << 21)) {
6723 tmp2
= load_reg(s
, rn
);
6724 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6727 if (insn
& (1 << 20))
6729 store_reg(s
, rd
, tmp
);
6732 /* 64 bit mul double accumulate (UMAAL) */
6734 tmp
= load_reg(s
, rs
);
6735 tmp2
= load_reg(s
, rm
);
6736 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6737 gen_addq_lo(s
, tmp64
, rn
);
6738 gen_addq_lo(s
, tmp64
, rd
);
6739 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6740 tcg_temp_free_i64(tmp64
);
6742 case 8: case 9: case 10: case 11:
6743 case 12: case 13: case 14: case 15:
6744 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6745 tmp
= load_reg(s
, rs
);
6746 tmp2
= load_reg(s
, rm
);
6747 if (insn
& (1 << 22)) {
6748 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6750 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6752 if (insn
& (1 << 21)) { /* mult accumulate */
6753 gen_addq(s
, tmp64
, rn
, rd
);
6755 if (insn
& (1 << 20)) {
6756 gen_logicq_cc(tmp64
);
6758 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6759 tcg_temp_free_i64(tmp64
);
6765 rn
= (insn
>> 16) & 0xf;
6766 rd
= (insn
>> 12) & 0xf;
6767 if (insn
& (1 << 23)) {
6768 /* load/store exclusive */
6769 op1
= (insn
>> 21) & 0x3;
6774 addr
= tcg_temp_local_new_i32();
6775 load_reg_var(s
, addr
, rn
);
6776 if (insn
& (1 << 20)) {
6779 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6781 case 1: /* ldrexd */
6782 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6784 case 2: /* ldrexb */
6785 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6787 case 3: /* ldrexh */
6788 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6797 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6799 case 1: /* strexd */
6800 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6802 case 2: /* strexb */
6803 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6805 case 3: /* strexh */
6806 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6812 tcg_temp_free(addr
);
6814 /* SWP instruction */
6817 /* ??? This is not really atomic. However we know
6818 we never have multiple CPUs running in parallel,
6819 so it is good enough. */
6820 addr
= load_reg(s
, rn
);
6821 tmp
= load_reg(s
, rm
);
6822 if (insn
& (1 << 22)) {
6823 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6824 gen_st8(tmp
, addr
, IS_USER(s
));
6826 tmp2
= gen_ld32(addr
, IS_USER(s
));
6827 gen_st32(tmp
, addr
, IS_USER(s
));
6830 store_reg(s
, rd
, tmp2
);
6836 /* Misc load/store */
6837 rn
= (insn
>> 16) & 0xf;
6838 rd
= (insn
>> 12) & 0xf;
6839 addr
= load_reg(s
, rn
);
6840 if (insn
& (1 << 24))
6841 gen_add_datah_offset(s
, insn
, 0, addr
);
6843 if (insn
& (1 << 20)) {
6847 tmp
= gen_ld16u(addr
, IS_USER(s
));
6850 tmp
= gen_ld8s(addr
, IS_USER(s
));
6854 tmp
= gen_ld16s(addr
, IS_USER(s
));
6858 } else if (sh
& 2) {
6862 tmp
= load_reg(s
, rd
);
6863 gen_st32(tmp
, addr
, IS_USER(s
));
6864 tcg_gen_addi_i32(addr
, addr
, 4);
6865 tmp
= load_reg(s
, rd
+ 1);
6866 gen_st32(tmp
, addr
, IS_USER(s
));
6870 tmp
= gen_ld32(addr
, IS_USER(s
));
6871 store_reg(s
, rd
, tmp
);
6872 tcg_gen_addi_i32(addr
, addr
, 4);
6873 tmp
= gen_ld32(addr
, IS_USER(s
));
6877 address_offset
= -4;
6880 tmp
= load_reg(s
, rd
);
6881 gen_st16(tmp
, addr
, IS_USER(s
));
6884 /* Perform base writeback before the loaded value to
6885 ensure correct behavior with overlapping index registers.
6886 ldrd with base writeback is is undefined if the
6887 destination and index registers overlap. */
6888 if (!(insn
& (1 << 24))) {
6889 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6890 store_reg(s
, rn
, addr
);
6891 } else if (insn
& (1 << 21)) {
6893 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6894 store_reg(s
, rn
, addr
);
6899 /* Complete the load. */
6900 store_reg(s
, rd
, tmp
);
6909 if (insn
& (1 << 4)) {
6911 /* Armv6 Media instructions. */
6913 rn
= (insn
>> 16) & 0xf;
6914 rd
= (insn
>> 12) & 0xf;
6915 rs
= (insn
>> 8) & 0xf;
6916 switch ((insn
>> 23) & 3) {
6917 case 0: /* Parallel add/subtract. */
6918 op1
= (insn
>> 20) & 7;
6919 tmp
= load_reg(s
, rn
);
6920 tmp2
= load_reg(s
, rm
);
6921 sh
= (insn
>> 5) & 7;
6922 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6924 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6926 store_reg(s
, rd
, tmp
);
6929 if ((insn
& 0x00700020) == 0) {
6930 /* Halfword pack. */
6931 tmp
= load_reg(s
, rn
);
6932 tmp2
= load_reg(s
, rm
);
6933 shift
= (insn
>> 7) & 0x1f;
6934 if (insn
& (1 << 6)) {
6938 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6939 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6940 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6944 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6945 tcg_gen_ext16u_i32(tmp
, tmp
);
6946 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6948 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6950 store_reg(s
, rd
, tmp
);
6951 } else if ((insn
& 0x00200020) == 0x00200000) {
6953 tmp
= load_reg(s
, rm
);
6954 shift
= (insn
>> 7) & 0x1f;
6955 if (insn
& (1 << 6)) {
6958 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6960 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6962 sh
= (insn
>> 16) & 0x1f;
6963 tmp2
= tcg_const_i32(sh
);
6964 if (insn
& (1 << 22))
6965 gen_helper_usat(tmp
, tmp
, tmp2
);
6967 gen_helper_ssat(tmp
, tmp
, tmp2
);
6968 tcg_temp_free_i32(tmp2
);
6969 store_reg(s
, rd
, tmp
);
6970 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6972 tmp
= load_reg(s
, rm
);
6973 sh
= (insn
>> 16) & 0x1f;
6974 tmp2
= tcg_const_i32(sh
);
6975 if (insn
& (1 << 22))
6976 gen_helper_usat16(tmp
, tmp
, tmp2
);
6978 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6979 tcg_temp_free_i32(tmp2
);
6980 store_reg(s
, rd
, tmp
);
6981 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6983 tmp
= load_reg(s
, rn
);
6984 tmp2
= load_reg(s
, rm
);
6986 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6987 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6990 store_reg(s
, rd
, tmp
);
6991 } else if ((insn
& 0x000003e0) == 0x00000060) {
6992 tmp
= load_reg(s
, rm
);
6993 shift
= (insn
>> 10) & 3;
6994 /* ??? In many cases it's not neccessary to do a
6995 rotate, a shift is sufficient. */
6997 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
6998 op1
= (insn
>> 20) & 7;
7000 case 0: gen_sxtb16(tmp
); break;
7001 case 2: gen_sxtb(tmp
); break;
7002 case 3: gen_sxth(tmp
); break;
7003 case 4: gen_uxtb16(tmp
); break;
7004 case 6: gen_uxtb(tmp
); break;
7005 case 7: gen_uxth(tmp
); break;
7006 default: goto illegal_op
;
7009 tmp2
= load_reg(s
, rn
);
7010 if ((op1
& 3) == 0) {
7011 gen_add16(tmp
, tmp2
);
7013 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7017 store_reg(s
, rd
, tmp
);
7018 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7020 tmp
= load_reg(s
, rm
);
7021 if (insn
& (1 << 22)) {
7022 if (insn
& (1 << 7)) {
7026 gen_helper_rbit(tmp
, tmp
);
7029 if (insn
& (1 << 7))
7032 tcg_gen_bswap32_i32(tmp
, tmp
);
7034 store_reg(s
, rd
, tmp
);
7039 case 2: /* Multiplies (Type 3). */
7040 tmp
= load_reg(s
, rm
);
7041 tmp2
= load_reg(s
, rs
);
7042 if (insn
& (1 << 20)) {
7043 /* Signed multiply most significant [accumulate].
7044 (SMMUL, SMMLA, SMMLS) */
7045 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7048 tmp
= load_reg(s
, rd
);
7049 if (insn
& (1 << 6)) {
7050 tmp64
= gen_subq_msw(tmp64
, tmp
);
7052 tmp64
= gen_addq_msw(tmp64
, tmp
);
7055 if (insn
& (1 << 5)) {
7056 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7058 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7060 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7061 tcg_temp_free_i64(tmp64
);
7062 store_reg(s
, rn
, tmp
);
7064 if (insn
& (1 << 5))
7065 gen_swap_half(tmp2
);
7066 gen_smul_dual(tmp
, tmp2
);
7067 /* This addition cannot overflow. */
7068 if (insn
& (1 << 6)) {
7069 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7071 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7074 if (insn
& (1 << 22)) {
7075 /* smlald, smlsld */
7076 tmp64
= tcg_temp_new_i64();
7077 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7079 gen_addq(s
, tmp64
, rd
, rn
);
7080 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7081 tcg_temp_free_i64(tmp64
);
7083 /* smuad, smusd, smlad, smlsd */
7086 tmp2
= load_reg(s
, rd
);
7087 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7090 store_reg(s
, rn
, tmp
);
7095 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7097 case 0: /* Unsigned sum of absolute differences. */
7099 tmp
= load_reg(s
, rm
);
7100 tmp2
= load_reg(s
, rs
);
7101 gen_helper_usad8(tmp
, tmp
, tmp2
);
7104 tmp2
= load_reg(s
, rd
);
7105 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7108 store_reg(s
, rn
, tmp
);
7110 case 0x20: case 0x24: case 0x28: case 0x2c:
7111 /* Bitfield insert/clear. */
7113 shift
= (insn
>> 7) & 0x1f;
7114 i
= (insn
>> 16) & 0x1f;
7118 tcg_gen_movi_i32(tmp
, 0);
7120 tmp
= load_reg(s
, rm
);
7123 tmp2
= load_reg(s
, rd
);
7124 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7127 store_reg(s
, rd
, tmp
);
7129 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7130 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7132 tmp
= load_reg(s
, rm
);
7133 shift
= (insn
>> 7) & 0x1f;
7134 i
= ((insn
>> 16) & 0x1f) + 1;
7139 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7141 gen_sbfx(tmp
, shift
, i
);
7144 store_reg(s
, rd
, tmp
);
7154 /* Check for undefined extension instructions
7155 * per the ARM Bible IE:
7156 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7158 sh
= (0xf << 20) | (0xf << 4);
7159 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7163 /* load/store byte/word */
7164 rn
= (insn
>> 16) & 0xf;
7165 rd
= (insn
>> 12) & 0xf;
7166 tmp2
= load_reg(s
, rn
);
7167 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7168 if (insn
& (1 << 24))
7169 gen_add_data_offset(s
, insn
, tmp2
);
7170 if (insn
& (1 << 20)) {
7172 if (insn
& (1 << 22)) {
7173 tmp
= gen_ld8u(tmp2
, i
);
7175 tmp
= gen_ld32(tmp2
, i
);
7179 tmp
= load_reg(s
, rd
);
7180 if (insn
& (1 << 22))
7181 gen_st8(tmp
, tmp2
, i
);
7183 gen_st32(tmp
, tmp2
, i
);
7185 if (!(insn
& (1 << 24))) {
7186 gen_add_data_offset(s
, insn
, tmp2
);
7187 store_reg(s
, rn
, tmp2
);
7188 } else if (insn
& (1 << 21)) {
7189 store_reg(s
, rn
, tmp2
);
7193 if (insn
& (1 << 20)) {
7194 /* Complete the load. */
7198 store_reg(s
, rd
, tmp
);
7204 int j
, n
, user
, loaded_base
;
7206 /* load/store multiple words */
7207 /* XXX: store correct base if write back */
7209 if (insn
& (1 << 22)) {
7211 goto illegal_op
; /* only usable in supervisor mode */
7213 if ((insn
& (1 << 15)) == 0)
7216 rn
= (insn
>> 16) & 0xf;
7217 addr
= load_reg(s
, rn
);
7219 /* compute total size */
7221 TCGV_UNUSED(loaded_var
);
7224 if (insn
& (1 << i
))
7227 /* XXX: test invalid n == 0 case ? */
7228 if (insn
& (1 << 23)) {
7229 if (insn
& (1 << 24)) {
7231 tcg_gen_addi_i32(addr
, addr
, 4);
7233 /* post increment */
7236 if (insn
& (1 << 24)) {
7238 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7240 /* post decrement */
7242 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7247 if (insn
& (1 << i
)) {
7248 if (insn
& (1 << 20)) {
7250 tmp
= gen_ld32(addr
, IS_USER(s
));
7254 tmp2
= tcg_const_i32(i
);
7255 gen_helper_set_user_reg(tmp2
, tmp
);
7256 tcg_temp_free_i32(tmp2
);
7258 } else if (i
== rn
) {
7262 store_reg(s
, i
, tmp
);
7267 /* special case: r15 = PC + 8 */
7268 val
= (long)s
->pc
+ 4;
7270 tcg_gen_movi_i32(tmp
, val
);
7273 tmp2
= tcg_const_i32(i
);
7274 gen_helper_get_user_reg(tmp
, tmp2
);
7275 tcg_temp_free_i32(tmp2
);
7277 tmp
= load_reg(s
, i
);
7279 gen_st32(tmp
, addr
, IS_USER(s
));
7282 /* no need to add after the last transfer */
7284 tcg_gen_addi_i32(addr
, addr
, 4);
7287 if (insn
& (1 << 21)) {
7289 if (insn
& (1 << 23)) {
7290 if (insn
& (1 << 24)) {
7293 /* post increment */
7294 tcg_gen_addi_i32(addr
, addr
, 4);
7297 if (insn
& (1 << 24)) {
7300 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7302 /* post decrement */
7303 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7306 store_reg(s
, rn
, addr
);
7311 store_reg(s
, rn
, loaded_var
);
7313 if ((insn
& (1 << 22)) && !user
) {
7314 /* Restore CPSR from SPSR. */
7315 tmp
= load_cpu_field(spsr
);
7316 gen_set_cpsr(tmp
, 0xffffffff);
7318 s
->is_jmp
= DISAS_UPDATE
;
7327 /* branch (and link) */
7328 val
= (int32_t)s
->pc
;
7329 if (insn
& (1 << 24)) {
7331 tcg_gen_movi_i32(tmp
, val
);
7332 store_reg(s
, 14, tmp
);
7334 offset
= (((int32_t)insn
<< 8) >> 8);
7335 val
+= (offset
<< 2) + 4;
7343 if (disas_coproc_insn(env
, s
, insn
))
7348 gen_set_pc_im(s
->pc
);
7349 s
->is_jmp
= DISAS_SWI
;
7353 gen_exception_insn(s
, 4, EXCP_UDEF
);
7359 /* Return true if this is a Thumb-2 logical op. */
7361 thumb2_logic_op(int op
)
7366 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7367 then set condition code flags based on the result of the operation.
7368 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7369 to the high bit of T1.
7370 Returns zero if the opcode is valid. */
7373 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7380 tcg_gen_and_i32(t0
, t0
, t1
);
7384 tcg_gen_andc_i32(t0
, t0
, t1
);
7388 tcg_gen_or_i32(t0
, t0
, t1
);
7392 tcg_gen_not_i32(t1
, t1
);
7393 tcg_gen_or_i32(t0
, t0
, t1
);
7397 tcg_gen_xor_i32(t0
, t0
, t1
);
7402 gen_helper_add_cc(t0
, t0
, t1
);
7404 tcg_gen_add_i32(t0
, t0
, t1
);
7408 gen_helper_adc_cc(t0
, t0
, t1
);
7414 gen_helper_sbc_cc(t0
, t0
, t1
);
7416 gen_sub_carry(t0
, t0
, t1
);
7420 gen_helper_sub_cc(t0
, t0
, t1
);
7422 tcg_gen_sub_i32(t0
, t0
, t1
);
7426 gen_helper_sub_cc(t0
, t1
, t0
);
7428 tcg_gen_sub_i32(t0
, t1
, t0
);
7430 default: /* 5, 6, 7, 9, 12, 15. */
7436 gen_set_CF_bit31(t1
);
7441 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7443 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7445 uint32_t insn
, imm
, shift
, offset
;
7446 uint32_t rd
, rn
, rm
, rs
;
7457 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7458 || arm_feature (env
, ARM_FEATURE_M
))) {
7459 /* Thumb-1 cores may need to treat bl and blx as a pair of
7460 16-bit instructions to get correct prefetch abort behavior. */
7462 if ((insn
& (1 << 12)) == 0) {
7463 /* Second half of blx. */
7464 offset
= ((insn
& 0x7ff) << 1);
7465 tmp
= load_reg(s
, 14);
7466 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7467 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7470 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7471 store_reg(s
, 14, tmp2
);
7475 if (insn
& (1 << 11)) {
7476 /* Second half of bl. */
7477 offset
= ((insn
& 0x7ff) << 1) | 1;
7478 tmp
= load_reg(s
, 14);
7479 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7482 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7483 store_reg(s
, 14, tmp2
);
7487 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7488 /* Instruction spans a page boundary. Implement it as two
7489 16-bit instructions in case the second half causes an
7491 offset
= ((int32_t)insn
<< 21) >> 9;
7492 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7495 /* Fall through to 32-bit decode. */
7498 insn
= lduw_code(s
->pc
);
7500 insn
|= (uint32_t)insn_hw1
<< 16;
7502 if ((insn
& 0xf800e800) != 0xf000e800) {
7506 rn
= (insn
>> 16) & 0xf;
7507 rs
= (insn
>> 12) & 0xf;
7508 rd
= (insn
>> 8) & 0xf;
7510 switch ((insn
>> 25) & 0xf) {
7511 case 0: case 1: case 2: case 3:
7512 /* 16-bit instructions. Should never happen. */
7515 if (insn
& (1 << 22)) {
7516 /* Other load/store, table branch. */
7517 if (insn
& 0x01200000) {
7518 /* Load/store doubleword. */
7521 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7523 addr
= load_reg(s
, rn
);
7525 offset
= (insn
& 0xff) * 4;
7526 if ((insn
& (1 << 23)) == 0)
7528 if (insn
& (1 << 24)) {
7529 tcg_gen_addi_i32(addr
, addr
, offset
);
7532 if (insn
& (1 << 20)) {
7534 tmp
= gen_ld32(addr
, IS_USER(s
));
7535 store_reg(s
, rs
, tmp
);
7536 tcg_gen_addi_i32(addr
, addr
, 4);
7537 tmp
= gen_ld32(addr
, IS_USER(s
));
7538 store_reg(s
, rd
, tmp
);
7541 tmp
= load_reg(s
, rs
);
7542 gen_st32(tmp
, addr
, IS_USER(s
));
7543 tcg_gen_addi_i32(addr
, addr
, 4);
7544 tmp
= load_reg(s
, rd
);
7545 gen_st32(tmp
, addr
, IS_USER(s
));
7547 if (insn
& (1 << 21)) {
7548 /* Base writeback. */
7551 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7552 store_reg(s
, rn
, addr
);
7556 } else if ((insn
& (1 << 23)) == 0) {
7557 /* Load/store exclusive word. */
7558 addr
= tcg_temp_local_new();
7559 load_reg_var(s
, addr
, rn
);
7560 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7561 if (insn
& (1 << 20)) {
7562 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7564 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7566 tcg_temp_free(addr
);
7567 } else if ((insn
& (1 << 6)) == 0) {
7571 tcg_gen_movi_i32(addr
, s
->pc
);
7573 addr
= load_reg(s
, rn
);
7575 tmp
= load_reg(s
, rm
);
7576 tcg_gen_add_i32(addr
, addr
, tmp
);
7577 if (insn
& (1 << 4)) {
7579 tcg_gen_add_i32(addr
, addr
, tmp
);
7581 tmp
= gen_ld16u(addr
, IS_USER(s
));
7584 tmp
= gen_ld8u(addr
, IS_USER(s
));
7587 tcg_gen_shli_i32(tmp
, tmp
, 1);
7588 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7589 store_reg(s
, 15, tmp
);
7591 /* Load/store exclusive byte/halfword/doubleword. */
7593 op
= (insn
>> 4) & 0x3;
7597 addr
= tcg_temp_local_new();
7598 load_reg_var(s
, addr
, rn
);
7599 if (insn
& (1 << 20)) {
7600 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7602 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7604 tcg_temp_free(addr
);
7607 /* Load/store multiple, RFE, SRS. */
7608 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7609 /* Not available in user mode. */
7612 if (insn
& (1 << 20)) {
7614 addr
= load_reg(s
, rn
);
7615 if ((insn
& (1 << 24)) == 0)
7616 tcg_gen_addi_i32(addr
, addr
, -8);
7617 /* Load PC into tmp and CPSR into tmp2. */
7618 tmp
= gen_ld32(addr
, 0);
7619 tcg_gen_addi_i32(addr
, addr
, 4);
7620 tmp2
= gen_ld32(addr
, 0);
7621 if (insn
& (1 << 21)) {
7622 /* Base writeback. */
7623 if (insn
& (1 << 24)) {
7624 tcg_gen_addi_i32(addr
, addr
, 4);
7626 tcg_gen_addi_i32(addr
, addr
, -4);
7628 store_reg(s
, rn
, addr
);
7632 gen_rfe(s
, tmp
, tmp2
);
7637 tmp
= tcg_const_i32(op
);
7638 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7639 tcg_temp_free_i32(tmp
);
7640 if ((insn
& (1 << 24)) == 0) {
7641 tcg_gen_addi_i32(addr
, addr
, -8);
7643 tmp
= load_reg(s
, 14);
7644 gen_st32(tmp
, addr
, 0);
7645 tcg_gen_addi_i32(addr
, addr
, 4);
7647 gen_helper_cpsr_read(tmp
);
7648 gen_st32(tmp
, addr
, 0);
7649 if (insn
& (1 << 21)) {
7650 if ((insn
& (1 << 24)) == 0) {
7651 tcg_gen_addi_i32(addr
, addr
, -4);
7653 tcg_gen_addi_i32(addr
, addr
, 4);
7655 tmp
= tcg_const_i32(op
);
7656 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7657 tcg_temp_free_i32(tmp
);
7664 /* Load/store multiple. */
7665 addr
= load_reg(s
, rn
);
7667 for (i
= 0; i
< 16; i
++) {
7668 if (insn
& (1 << i
))
7671 if (insn
& (1 << 24)) {
7672 tcg_gen_addi_i32(addr
, addr
, -offset
);
7675 for (i
= 0; i
< 16; i
++) {
7676 if ((insn
& (1 << i
)) == 0)
7678 if (insn
& (1 << 20)) {
7680 tmp
= gen_ld32(addr
, IS_USER(s
));
7684 store_reg(s
, i
, tmp
);
7688 tmp
= load_reg(s
, i
);
7689 gen_st32(tmp
, addr
, IS_USER(s
));
7691 tcg_gen_addi_i32(addr
, addr
, 4);
7693 if (insn
& (1 << 21)) {
7694 /* Base register writeback. */
7695 if (insn
& (1 << 24)) {
7696 tcg_gen_addi_i32(addr
, addr
, -offset
);
7698 /* Fault if writeback register is in register list. */
7699 if (insn
& (1 << rn
))
7701 store_reg(s
, rn
, addr
);
7710 op
= (insn
>> 21) & 0xf;
7712 /* Halfword pack. */
7713 tmp
= load_reg(s
, rn
);
7714 tmp2
= load_reg(s
, rm
);
7715 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
7716 if (insn
& (1 << 5)) {
7720 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7721 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7722 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7726 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7727 tcg_gen_ext16u_i32(tmp
, tmp
);
7728 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7730 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7732 store_reg(s
, rd
, tmp
);
7734 /* Data processing register constant shift. */
7737 tcg_gen_movi_i32(tmp
, 0);
7739 tmp
= load_reg(s
, rn
);
7741 tmp2
= load_reg(s
, rm
);
7743 shiftop
= (insn
>> 4) & 3;
7744 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7745 conds
= (insn
& (1 << 20)) != 0;
7746 logic_cc
= (conds
&& thumb2_logic_op(op
));
7747 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7748 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7752 store_reg(s
, rd
, tmp
);
7758 case 13: /* Misc data processing. */
7759 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7760 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7763 case 0: /* Register controlled shift. */
7764 tmp
= load_reg(s
, rn
);
7765 tmp2
= load_reg(s
, rm
);
7766 if ((insn
& 0x70) != 0)
7768 op
= (insn
>> 21) & 3;
7769 logic_cc
= (insn
& (1 << 20)) != 0;
7770 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7773 store_reg_bx(env
, s
, rd
, tmp
);
7775 case 1: /* Sign/zero extend. */
7776 tmp
= load_reg(s
, rm
);
7777 shift
= (insn
>> 4) & 3;
7778 /* ??? In many cases it's not neccessary to do a
7779 rotate, a shift is sufficient. */
7781 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7782 op
= (insn
>> 20) & 7;
7784 case 0: gen_sxth(tmp
); break;
7785 case 1: gen_uxth(tmp
); break;
7786 case 2: gen_sxtb16(tmp
); break;
7787 case 3: gen_uxtb16(tmp
); break;
7788 case 4: gen_sxtb(tmp
); break;
7789 case 5: gen_uxtb(tmp
); break;
7790 default: goto illegal_op
;
7793 tmp2
= load_reg(s
, rn
);
7794 if ((op
>> 1) == 1) {
7795 gen_add16(tmp
, tmp2
);
7797 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7801 store_reg(s
, rd
, tmp
);
7803 case 2: /* SIMD add/subtract. */
7804 op
= (insn
>> 20) & 7;
7805 shift
= (insn
>> 4) & 7;
7806 if ((op
& 3) == 3 || (shift
& 3) == 3)
7808 tmp
= load_reg(s
, rn
);
7809 tmp2
= load_reg(s
, rm
);
7810 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7812 store_reg(s
, rd
, tmp
);
7814 case 3: /* Other data processing. */
7815 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7817 /* Saturating add/subtract. */
7818 tmp
= load_reg(s
, rn
);
7819 tmp2
= load_reg(s
, rm
);
7821 gen_helper_double_saturate(tmp
, tmp
);
7823 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7825 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7828 tmp
= load_reg(s
, rn
);
7830 case 0x0a: /* rbit */
7831 gen_helper_rbit(tmp
, tmp
);
7833 case 0x08: /* rev */
7834 tcg_gen_bswap32_i32(tmp
, tmp
);
7836 case 0x09: /* rev16 */
7839 case 0x0b: /* revsh */
7842 case 0x10: /* sel */
7843 tmp2
= load_reg(s
, rm
);
7845 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7846 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7850 case 0x18: /* clz */
7851 gen_helper_clz(tmp
, tmp
);
7857 store_reg(s
, rd
, tmp
);
7859 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7860 op
= (insn
>> 4) & 0xf;
7861 tmp
= load_reg(s
, rn
);
7862 tmp2
= load_reg(s
, rm
);
7863 switch ((insn
>> 20) & 7) {
7864 case 0: /* 32 x 32 -> 32 */
7865 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7868 tmp2
= load_reg(s
, rs
);
7870 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7872 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7876 case 1: /* 16 x 16 -> 32 */
7877 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7880 tmp2
= load_reg(s
, rs
);
7881 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7885 case 2: /* Dual multiply add. */
7886 case 4: /* Dual multiply subtract. */
7888 gen_swap_half(tmp2
);
7889 gen_smul_dual(tmp
, tmp2
);
7890 /* This addition cannot overflow. */
7891 if (insn
& (1 << 22)) {
7892 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7894 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7899 tmp2
= load_reg(s
, rs
);
7900 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7904 case 3: /* 32 * 16 -> 32msb */
7906 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7909 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7910 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7912 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7913 tcg_temp_free_i64(tmp64
);
7916 tmp2
= load_reg(s
, rs
);
7917 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7921 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7922 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7924 tmp
= load_reg(s
, rs
);
7925 if (insn
& (1 << 20)) {
7926 tmp64
= gen_addq_msw(tmp64
, tmp
);
7928 tmp64
= gen_subq_msw(tmp64
, tmp
);
7931 if (insn
& (1 << 4)) {
7932 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7934 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7936 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7937 tcg_temp_free_i64(tmp64
);
7939 case 7: /* Unsigned sum of absolute differences. */
7940 gen_helper_usad8(tmp
, tmp
, tmp2
);
7943 tmp2
= load_reg(s
, rs
);
7944 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7949 store_reg(s
, rd
, tmp
);
7951 case 6: case 7: /* 64-bit multiply, Divide. */
7952 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7953 tmp
= load_reg(s
, rn
);
7954 tmp2
= load_reg(s
, rm
);
7955 if ((op
& 0x50) == 0x10) {
7957 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7960 gen_helper_udiv(tmp
, tmp
, tmp2
);
7962 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7964 store_reg(s
, rd
, tmp
);
7965 } else if ((op
& 0xe) == 0xc) {
7966 /* Dual multiply accumulate long. */
7968 gen_swap_half(tmp2
);
7969 gen_smul_dual(tmp
, tmp2
);
7971 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7973 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7977 tmp64
= tcg_temp_new_i64();
7978 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7980 gen_addq(s
, tmp64
, rs
, rd
);
7981 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7982 tcg_temp_free_i64(tmp64
);
7985 /* Unsigned 64-bit multiply */
7986 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7990 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7992 tmp64
= tcg_temp_new_i64();
7993 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7996 /* Signed 64-bit multiply */
7997 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8002 gen_addq_lo(s
, tmp64
, rs
);
8003 gen_addq_lo(s
, tmp64
, rd
);
8004 } else if (op
& 0x40) {
8005 /* 64-bit accumulate. */
8006 gen_addq(s
, tmp64
, rs
, rd
);
8008 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8009 tcg_temp_free_i64(tmp64
);
8014 case 6: case 7: case 14: case 15:
8016 if (((insn
>> 24) & 3) == 3) {
8017 /* Translate into the equivalent ARM encoding. */
8018 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
8019 if (disas_neon_data_insn(env
, s
, insn
))
8022 if (insn
& (1 << 28))
8024 if (disas_coproc_insn (env
, s
, insn
))
8028 case 8: case 9: case 10: case 11:
8029 if (insn
& (1 << 15)) {
8030 /* Branches, misc control. */
8031 if (insn
& 0x5000) {
8032 /* Unconditional branch. */
8033 /* signextend(hw1[10:0]) -> offset[:12]. */
8034 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8035 /* hw1[10:0] -> offset[11:1]. */
8036 offset
|= (insn
& 0x7ff) << 1;
8037 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8038 offset[24:22] already have the same value because of the
8039 sign extension above. */
8040 offset
^= ((~insn
) & (1 << 13)) << 10;
8041 offset
^= ((~insn
) & (1 << 11)) << 11;
8043 if (insn
& (1 << 14)) {
8044 /* Branch and link. */
8045 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8049 if (insn
& (1 << 12)) {
8054 offset
&= ~(uint32_t)2;
8055 gen_bx_im(s
, offset
);
8057 } else if (((insn
>> 23) & 7) == 7) {
8059 if (insn
& (1 << 13))
8062 if (insn
& (1 << 26)) {
8063 /* Secure monitor call (v6Z) */
8064 goto illegal_op
; /* not implemented. */
8066 op
= (insn
>> 20) & 7;
8068 case 0: /* msr cpsr. */
8070 tmp
= load_reg(s
, rn
);
8071 addr
= tcg_const_i32(insn
& 0xff);
8072 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8073 tcg_temp_free_i32(addr
);
8079 case 1: /* msr spsr. */
8082 tmp
= load_reg(s
, rn
);
8084 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8088 case 2: /* cps, nop-hint. */
8089 if (((insn
>> 8) & 7) == 0) {
8090 gen_nop_hint(s
, insn
& 0xff);
8092 /* Implemented as NOP in user mode. */
8097 if (insn
& (1 << 10)) {
8098 if (insn
& (1 << 7))
8100 if (insn
& (1 << 6))
8102 if (insn
& (1 << 5))
8104 if (insn
& (1 << 9))
8105 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8107 if (insn
& (1 << 8)) {
8109 imm
|= (insn
& 0x1f);
8112 gen_set_psr_im(s
, offset
, 0, imm
);
8115 case 3: /* Special control operations. */
8117 op
= (insn
>> 4) & 0xf;
8125 /* These execute as NOPs. */
8132 /* Trivial implementation equivalent to bx. */
8133 tmp
= load_reg(s
, rn
);
8136 case 5: /* Exception return. */
8140 if (rn
!= 14 || rd
!= 15) {
8143 tmp
= load_reg(s
, rn
);
8144 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8145 gen_exception_return(s
, tmp
);
8147 case 6: /* mrs cpsr. */
8150 addr
= tcg_const_i32(insn
& 0xff);
8151 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8152 tcg_temp_free_i32(addr
);
8154 gen_helper_cpsr_read(tmp
);
8156 store_reg(s
, rd
, tmp
);
8158 case 7: /* mrs spsr. */
8159 /* Not accessible in user mode. */
8160 if (IS_USER(s
) || IS_M(env
))
8162 tmp
= load_cpu_field(spsr
);
8163 store_reg(s
, rd
, tmp
);
8168 /* Conditional branch. */
8169 op
= (insn
>> 22) & 0xf;
8170 /* Generate a conditional jump to next instruction. */
8171 s
->condlabel
= gen_new_label();
8172 gen_test_cc(op
^ 1, s
->condlabel
);
8175 /* offset[11:1] = insn[10:0] */
8176 offset
= (insn
& 0x7ff) << 1;
8177 /* offset[17:12] = insn[21:16]. */
8178 offset
|= (insn
& 0x003f0000) >> 4;
8179 /* offset[31:20] = insn[26]. */
8180 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8181 /* offset[18] = insn[13]. */
8182 offset
|= (insn
& (1 << 13)) << 5;
8183 /* offset[19] = insn[11]. */
8184 offset
|= (insn
& (1 << 11)) << 8;
8186 /* jump to the offset */
8187 gen_jmp(s
, s
->pc
+ offset
);
8190 /* Data processing immediate. */
8191 if (insn
& (1 << 25)) {
8192 if (insn
& (1 << 24)) {
8193 if (insn
& (1 << 20))
8195 /* Bitfield/Saturate. */
8196 op
= (insn
>> 21) & 7;
8198 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8201 tcg_gen_movi_i32(tmp
, 0);
8203 tmp
= load_reg(s
, rn
);
8206 case 2: /* Signed bitfield extract. */
8208 if (shift
+ imm
> 32)
8211 gen_sbfx(tmp
, shift
, imm
);
8213 case 6: /* Unsigned bitfield extract. */
8215 if (shift
+ imm
> 32)
8218 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8220 case 3: /* Bitfield insert/clear. */
8223 imm
= imm
+ 1 - shift
;
8225 tmp2
= load_reg(s
, rd
);
8226 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8232 default: /* Saturate. */
8235 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8237 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8239 tmp2
= tcg_const_i32(imm
);
8242 if ((op
& 1) && shift
== 0)
8243 gen_helper_usat16(tmp
, tmp
, tmp2
);
8245 gen_helper_usat(tmp
, tmp
, tmp2
);
8248 if ((op
& 1) && shift
== 0)
8249 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8251 gen_helper_ssat(tmp
, tmp
, tmp2
);
8253 tcg_temp_free_i32(tmp2
);
8256 store_reg(s
, rd
, tmp
);
8258 imm
= ((insn
& 0x04000000) >> 15)
8259 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8260 if (insn
& (1 << 22)) {
8261 /* 16-bit immediate. */
8262 imm
|= (insn
>> 4) & 0xf000;
8263 if (insn
& (1 << 23)) {
8265 tmp
= load_reg(s
, rd
);
8266 tcg_gen_ext16u_i32(tmp
, tmp
);
8267 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8271 tcg_gen_movi_i32(tmp
, imm
);
8274 /* Add/sub 12-bit immediate. */
8276 offset
= s
->pc
& ~(uint32_t)3;
8277 if (insn
& (1 << 23))
8282 tcg_gen_movi_i32(tmp
, offset
);
8284 tmp
= load_reg(s
, rn
);
8285 if (insn
& (1 << 23))
8286 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8288 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8291 store_reg(s
, rd
, tmp
);
8294 int shifter_out
= 0;
8295 /* modified 12-bit immediate. */
8296 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8297 imm
= (insn
& 0xff);
8300 /* Nothing to do. */
8302 case 1: /* 00XY00XY */
8305 case 2: /* XY00XY00 */
8309 case 3: /* XYXYXYXY */
8313 default: /* Rotated constant. */
8314 shift
= (shift
<< 1) | (imm
>> 7);
8316 imm
= imm
<< (32 - shift
);
8321 tcg_gen_movi_i32(tmp2
, imm
);
8322 rn
= (insn
>> 16) & 0xf;
8325 tcg_gen_movi_i32(tmp
, 0);
8327 tmp
= load_reg(s
, rn
);
8329 op
= (insn
>> 21) & 0xf;
8330 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8331 shifter_out
, tmp
, tmp2
))
8334 rd
= (insn
>> 8) & 0xf;
8336 store_reg(s
, rd
, tmp
);
8343 case 12: /* Load/store single data item. */
8348 if ((insn
& 0x01100000) == 0x01000000) {
8349 if (disas_neon_ls_insn(env
, s
, insn
))
8353 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8355 if (!(insn
& (1 << 20))) {
8359 /* Byte or halfword load space with dest == r15 : memory hints.
8360 * Catch them early so we don't emit pointless addressing code.
8361 * This space is a mix of:
8362 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8363 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8365 * unallocated hints, which must be treated as NOPs
8366 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8367 * which is easiest for the decoding logic
8368 * Some space which must UNDEF
8370 int op1
= (insn
>> 23) & 3;
8371 int op2
= (insn
>> 6) & 0x3f;
8376 /* UNPREDICTABLE or unallocated hint */
8380 return 0; /* PLD* or unallocated hint */
8382 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8383 return 0; /* PLD* or unallocated hint */
8385 /* UNDEF space, or an UNPREDICTABLE */
8393 /* s->pc has already been incremented by 4. */
8394 imm
= s
->pc
& 0xfffffffc;
8395 if (insn
& (1 << 23))
8396 imm
+= insn
& 0xfff;
8398 imm
-= insn
& 0xfff;
8399 tcg_gen_movi_i32(addr
, imm
);
8401 addr
= load_reg(s
, rn
);
8402 if (insn
& (1 << 23)) {
8403 /* Positive offset. */
8405 tcg_gen_addi_i32(addr
, addr
, imm
);
8408 switch ((insn
>> 8) & 7) {
8409 case 0: case 8: /* Shifted Register. */
8410 shift
= (insn
>> 4) & 0xf;
8413 tmp
= load_reg(s
, rm
);
8415 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8416 tcg_gen_add_i32(addr
, addr
, tmp
);
8419 case 4: /* Negative offset. */
8420 tcg_gen_addi_i32(addr
, addr
, -imm
);
8422 case 6: /* User privilege. */
8423 tcg_gen_addi_i32(addr
, addr
, imm
);
8426 case 1: /* Post-decrement. */
8429 case 3: /* Post-increment. */
8433 case 5: /* Pre-decrement. */
8436 case 7: /* Pre-increment. */
8437 tcg_gen_addi_i32(addr
, addr
, imm
);
8445 if (insn
& (1 << 20)) {
8448 case 0: tmp
= gen_ld8u(addr
, user
); break;
8449 case 4: tmp
= gen_ld8s(addr
, user
); break;
8450 case 1: tmp
= gen_ld16u(addr
, user
); break;
8451 case 5: tmp
= gen_ld16s(addr
, user
); break;
8452 case 2: tmp
= gen_ld32(addr
, user
); break;
8453 default: goto illegal_op
;
8458 store_reg(s
, rs
, tmp
);
8462 tmp
= load_reg(s
, rs
);
8464 case 0: gen_st8(tmp
, addr
, user
); break;
8465 case 1: gen_st16(tmp
, addr
, user
); break;
8466 case 2: gen_st32(tmp
, addr
, user
); break;
8467 default: goto illegal_op
;
8471 tcg_gen_addi_i32(addr
, addr
, imm
);
8473 store_reg(s
, rn
, addr
);
8487 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8489 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8496 if (s
->condexec_mask
) {
8497 cond
= s
->condexec_cond
;
8498 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8499 s
->condlabel
= gen_new_label();
8500 gen_test_cc(cond
^ 1, s
->condlabel
);
8505 insn
= lduw_code(s
->pc
);
8508 switch (insn
>> 12) {
8512 op
= (insn
>> 11) & 3;
8515 rn
= (insn
>> 3) & 7;
8516 tmp
= load_reg(s
, rn
);
8517 if (insn
& (1 << 10)) {
8520 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8523 rm
= (insn
>> 6) & 7;
8524 tmp2
= load_reg(s
, rm
);
8526 if (insn
& (1 << 9)) {
8527 if (s
->condexec_mask
)
8528 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8530 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8532 if (s
->condexec_mask
)
8533 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8535 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8538 store_reg(s
, rd
, tmp
);
8540 /* shift immediate */
8541 rm
= (insn
>> 3) & 7;
8542 shift
= (insn
>> 6) & 0x1f;
8543 tmp
= load_reg(s
, rm
);
8544 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8545 if (!s
->condexec_mask
)
8547 store_reg(s
, rd
, tmp
);
8551 /* arithmetic large immediate */
8552 op
= (insn
>> 11) & 3;
8553 rd
= (insn
>> 8) & 0x7;
8554 if (op
== 0) { /* mov */
8556 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8557 if (!s
->condexec_mask
)
8559 store_reg(s
, rd
, tmp
);
8561 tmp
= load_reg(s
, rd
);
8563 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8566 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8571 if (s
->condexec_mask
)
8572 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8574 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8576 store_reg(s
, rd
, tmp
);
8579 if (s
->condexec_mask
)
8580 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8582 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8584 store_reg(s
, rd
, tmp
);
8590 if (insn
& (1 << 11)) {
8591 rd
= (insn
>> 8) & 7;
8592 /* load pc-relative. Bit 1 of PC is ignored. */
8593 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8594 val
&= ~(uint32_t)2;
8596 tcg_gen_movi_i32(addr
, val
);
8597 tmp
= gen_ld32(addr
, IS_USER(s
));
8599 store_reg(s
, rd
, tmp
);
8602 if (insn
& (1 << 10)) {
8603 /* data processing extended or blx */
8604 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8605 rm
= (insn
>> 3) & 0xf;
8606 op
= (insn
>> 8) & 3;
8609 tmp
= load_reg(s
, rd
);
8610 tmp2
= load_reg(s
, rm
);
8611 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8613 store_reg(s
, rd
, tmp
);
8616 tmp
= load_reg(s
, rd
);
8617 tmp2
= load_reg(s
, rm
);
8618 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8622 case 2: /* mov/cpy */
8623 tmp
= load_reg(s
, rm
);
8624 store_reg(s
, rd
, tmp
);
8626 case 3:/* branch [and link] exchange thumb register */
8627 tmp
= load_reg(s
, rm
);
8628 if (insn
& (1 << 7)) {
8629 val
= (uint32_t)s
->pc
| 1;
8631 tcg_gen_movi_i32(tmp2
, val
);
8632 store_reg(s
, 14, tmp2
);
8640 /* data processing register */
8642 rm
= (insn
>> 3) & 7;
8643 op
= (insn
>> 6) & 0xf;
8644 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8645 /* the shift/rotate ops want the operands backwards */
8654 if (op
== 9) { /* neg */
8656 tcg_gen_movi_i32(tmp
, 0);
8657 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8658 tmp
= load_reg(s
, rd
);
8663 tmp2
= load_reg(s
, rm
);
8666 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8667 if (!s
->condexec_mask
)
8671 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8672 if (!s
->condexec_mask
)
8676 if (s
->condexec_mask
) {
8677 gen_helper_shl(tmp2
, tmp2
, tmp
);
8679 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8684 if (s
->condexec_mask
) {
8685 gen_helper_shr(tmp2
, tmp2
, tmp
);
8687 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8692 if (s
->condexec_mask
) {
8693 gen_helper_sar(tmp2
, tmp2
, tmp
);
8695 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8700 if (s
->condexec_mask
)
8703 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8706 if (s
->condexec_mask
)
8707 gen_sub_carry(tmp
, tmp
, tmp2
);
8709 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8712 if (s
->condexec_mask
) {
8713 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8714 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8716 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8721 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8726 if (s
->condexec_mask
)
8727 tcg_gen_neg_i32(tmp
, tmp2
);
8729 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8732 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8736 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8740 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8741 if (!s
->condexec_mask
)
8745 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8746 if (!s
->condexec_mask
)
8750 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8751 if (!s
->condexec_mask
)
8755 tcg_gen_not_i32(tmp2
, tmp2
);
8756 if (!s
->condexec_mask
)
8764 store_reg(s
, rm
, tmp2
);
8768 store_reg(s
, rd
, tmp
);
8778 /* load/store register offset. */
8780 rn
= (insn
>> 3) & 7;
8781 rm
= (insn
>> 6) & 7;
8782 op
= (insn
>> 9) & 7;
8783 addr
= load_reg(s
, rn
);
8784 tmp
= load_reg(s
, rm
);
8785 tcg_gen_add_i32(addr
, addr
, tmp
);
8788 if (op
< 3) /* store */
8789 tmp
= load_reg(s
, rd
);
8793 gen_st32(tmp
, addr
, IS_USER(s
));
8796 gen_st16(tmp
, addr
, IS_USER(s
));
8799 gen_st8(tmp
, addr
, IS_USER(s
));
8802 tmp
= gen_ld8s(addr
, IS_USER(s
));
8805 tmp
= gen_ld32(addr
, IS_USER(s
));
8808 tmp
= gen_ld16u(addr
, IS_USER(s
));
8811 tmp
= gen_ld8u(addr
, IS_USER(s
));
8814 tmp
= gen_ld16s(addr
, IS_USER(s
));
8817 if (op
>= 3) /* load */
8818 store_reg(s
, rd
, tmp
);
8823 /* load/store word immediate offset */
8825 rn
= (insn
>> 3) & 7;
8826 addr
= load_reg(s
, rn
);
8827 val
= (insn
>> 4) & 0x7c;
8828 tcg_gen_addi_i32(addr
, addr
, val
);
8830 if (insn
& (1 << 11)) {
8832 tmp
= gen_ld32(addr
, IS_USER(s
));
8833 store_reg(s
, rd
, tmp
);
8836 tmp
= load_reg(s
, rd
);
8837 gen_st32(tmp
, addr
, IS_USER(s
));
8843 /* load/store byte immediate offset */
8845 rn
= (insn
>> 3) & 7;
8846 addr
= load_reg(s
, rn
);
8847 val
= (insn
>> 6) & 0x1f;
8848 tcg_gen_addi_i32(addr
, addr
, val
);
8850 if (insn
& (1 << 11)) {
8852 tmp
= gen_ld8u(addr
, IS_USER(s
));
8853 store_reg(s
, rd
, tmp
);
8856 tmp
= load_reg(s
, rd
);
8857 gen_st8(tmp
, addr
, IS_USER(s
));
8863 /* load/store halfword immediate offset */
8865 rn
= (insn
>> 3) & 7;
8866 addr
= load_reg(s
, rn
);
8867 val
= (insn
>> 5) & 0x3e;
8868 tcg_gen_addi_i32(addr
, addr
, val
);
8870 if (insn
& (1 << 11)) {
8872 tmp
= gen_ld16u(addr
, IS_USER(s
));
8873 store_reg(s
, rd
, tmp
);
8876 tmp
= load_reg(s
, rd
);
8877 gen_st16(tmp
, addr
, IS_USER(s
));
8883 /* load/store from stack */
8884 rd
= (insn
>> 8) & 7;
8885 addr
= load_reg(s
, 13);
8886 val
= (insn
& 0xff) * 4;
8887 tcg_gen_addi_i32(addr
, addr
, val
);
8889 if (insn
& (1 << 11)) {
8891 tmp
= gen_ld32(addr
, IS_USER(s
));
8892 store_reg(s
, rd
, tmp
);
8895 tmp
= load_reg(s
, rd
);
8896 gen_st32(tmp
, addr
, IS_USER(s
));
8902 /* add to high reg */
8903 rd
= (insn
>> 8) & 7;
8904 if (insn
& (1 << 11)) {
8906 tmp
= load_reg(s
, 13);
8908 /* PC. bit 1 is ignored. */
8910 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8912 val
= (insn
& 0xff) * 4;
8913 tcg_gen_addi_i32(tmp
, tmp
, val
);
8914 store_reg(s
, rd
, tmp
);
8919 op
= (insn
>> 8) & 0xf;
8922 /* adjust stack pointer */
8923 tmp
= load_reg(s
, 13);
8924 val
= (insn
& 0x7f) * 4;
8925 if (insn
& (1 << 7))
8926 val
= -(int32_t)val
;
8927 tcg_gen_addi_i32(tmp
, tmp
, val
);
8928 store_reg(s
, 13, tmp
);
8931 case 2: /* sign/zero extend. */
8934 rm
= (insn
>> 3) & 7;
8935 tmp
= load_reg(s
, rm
);
8936 switch ((insn
>> 6) & 3) {
8937 case 0: gen_sxth(tmp
); break;
8938 case 1: gen_sxtb(tmp
); break;
8939 case 2: gen_uxth(tmp
); break;
8940 case 3: gen_uxtb(tmp
); break;
8942 store_reg(s
, rd
, tmp
);
8944 case 4: case 5: case 0xc: case 0xd:
8946 addr
= load_reg(s
, 13);
8947 if (insn
& (1 << 8))
8951 for (i
= 0; i
< 8; i
++) {
8952 if (insn
& (1 << i
))
8955 if ((insn
& (1 << 11)) == 0) {
8956 tcg_gen_addi_i32(addr
, addr
, -offset
);
8958 for (i
= 0; i
< 8; i
++) {
8959 if (insn
& (1 << i
)) {
8960 if (insn
& (1 << 11)) {
8962 tmp
= gen_ld32(addr
, IS_USER(s
));
8963 store_reg(s
, i
, tmp
);
8966 tmp
= load_reg(s
, i
);
8967 gen_st32(tmp
, addr
, IS_USER(s
));
8969 /* advance to the next address. */
8970 tcg_gen_addi_i32(addr
, addr
, 4);
8974 if (insn
& (1 << 8)) {
8975 if (insn
& (1 << 11)) {
8977 tmp
= gen_ld32(addr
, IS_USER(s
));
8978 /* don't set the pc until the rest of the instruction
8982 tmp
= load_reg(s
, 14);
8983 gen_st32(tmp
, addr
, IS_USER(s
));
8985 tcg_gen_addi_i32(addr
, addr
, 4);
8987 if ((insn
& (1 << 11)) == 0) {
8988 tcg_gen_addi_i32(addr
, addr
, -offset
);
8990 /* write back the new stack pointer */
8991 store_reg(s
, 13, addr
);
8992 /* set the new PC value */
8993 if ((insn
& 0x0900) == 0x0900)
8997 case 1: case 3: case 9: case 11: /* czb */
8999 tmp
= load_reg(s
, rm
);
9000 s
->condlabel
= gen_new_label();
9002 if (insn
& (1 << 11))
9003 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9005 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9007 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9008 val
= (uint32_t)s
->pc
+ 2;
9013 case 15: /* IT, nop-hint. */
9014 if ((insn
& 0xf) == 0) {
9015 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9019 s
->condexec_cond
= (insn
>> 4) & 0xe;
9020 s
->condexec_mask
= insn
& 0x1f;
9021 /* No actual code generated for this insn, just setup state. */
9024 case 0xe: /* bkpt */
9025 gen_exception_insn(s
, 2, EXCP_BKPT
);
9030 rn
= (insn
>> 3) & 0x7;
9032 tmp
= load_reg(s
, rn
);
9033 switch ((insn
>> 6) & 3) {
9034 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9035 case 1: gen_rev16(tmp
); break;
9036 case 3: gen_revsh(tmp
); break;
9037 default: goto illegal_op
;
9039 store_reg(s
, rd
, tmp
);
9047 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9050 addr
= tcg_const_i32(16);
9051 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9052 tcg_temp_free_i32(addr
);
9056 addr
= tcg_const_i32(17);
9057 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9058 tcg_temp_free_i32(addr
);
9060 tcg_temp_free_i32(tmp
);
9063 if (insn
& (1 << 4))
9064 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9067 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9077 /* load/store multiple */
9078 rn
= (insn
>> 8) & 0x7;
9079 addr
= load_reg(s
, rn
);
9080 for (i
= 0; i
< 8; i
++) {
9081 if (insn
& (1 << i
)) {
9082 if (insn
& (1 << 11)) {
9084 tmp
= gen_ld32(addr
, IS_USER(s
));
9085 store_reg(s
, i
, tmp
);
9088 tmp
= load_reg(s
, i
);
9089 gen_st32(tmp
, addr
, IS_USER(s
));
9091 /* advance to the next address */
9092 tcg_gen_addi_i32(addr
, addr
, 4);
9095 /* Base register writeback. */
9096 if ((insn
& (1 << rn
)) == 0) {
9097 store_reg(s
, rn
, addr
);
9104 /* conditional branch or swi */
9105 cond
= (insn
>> 8) & 0xf;
9111 gen_set_pc_im(s
->pc
);
9112 s
->is_jmp
= DISAS_SWI
;
9115 /* generate a conditional jump to next instruction */
9116 s
->condlabel
= gen_new_label();
9117 gen_test_cc(cond
^ 1, s
->condlabel
);
9120 /* jump to the offset */
9121 val
= (uint32_t)s
->pc
+ 2;
9122 offset
= ((int32_t)insn
<< 24) >> 24;
9128 if (insn
& (1 << 11)) {
9129 if (disas_thumb2_insn(env
, s
, insn
))
9133 /* unconditional branch */
9134 val
= (uint32_t)s
->pc
;
9135 offset
= ((int32_t)insn
<< 21) >> 21;
9136 val
+= (offset
<< 1) + 2;
9141 if (disas_thumb2_insn(env
, s
, insn
))
9147 gen_exception_insn(s
, 4, EXCP_UDEF
);
9151 gen_exception_insn(s
, 2, EXCP_UDEF
);
9154 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9155 basic block 'tb'. If search_pc is TRUE, also generate PC
9156 information for each intermediate instruction. */
9157 static inline void gen_intermediate_code_internal(CPUState
*env
,
9158 TranslationBlock
*tb
,
9161 DisasContext dc1
, *dc
= &dc1
;
9163 uint16_t *gen_opc_end
;
9165 target_ulong pc_start
;
9166 uint32_t next_page_start
;
9170 /* generate intermediate code */
9177 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9179 dc
->is_jmp
= DISAS_NEXT
;
9181 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9183 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9184 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9185 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9186 #if !defined(CONFIG_USER_ONLY)
9187 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9189 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9190 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9191 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9192 cpu_F0s
= tcg_temp_new_i32();
9193 cpu_F1s
= tcg_temp_new_i32();
9194 cpu_F0d
= tcg_temp_new_i64();
9195 cpu_F1d
= tcg_temp_new_i64();
9198 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9199 cpu_M0
= tcg_temp_new_i64();
9200 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9203 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9205 max_insns
= CF_COUNT_MASK
;
9209 /* A note on handling of the condexec (IT) bits:
9211 * We want to avoid the overhead of having to write the updated condexec
9212 * bits back to the CPUState for every instruction in an IT block. So:
9213 * (1) if the condexec bits are not already zero then we write
9214 * zero back into the CPUState now. This avoids complications trying
9215 * to do it at the end of the block. (For example if we don't do this
9216 * it's hard to identify whether we can safely skip writing condexec
9217 * at the end of the TB, which we definitely want to do for the case
9218 * where a TB doesn't do anything with the IT state at all.)
9219 * (2) if we are going to leave the TB then we call gen_set_condexec()
9220 * which will write the correct value into CPUState if zero is wrong.
9221 * This is done both for leaving the TB at the end, and for leaving
9222 * it because of an exception we know will happen, which is done in
9223 * gen_exception_insn(). The latter is necessary because we need to
9224 * leave the TB with the PC/IT state just prior to execution of the
9225 * instruction which caused the exception.
9226 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9227 * then the CPUState will be wrong and we need to reset it.
9228 * This is handled in the same way as restoration of the
9229 * PC in these situations: we will be called again with search_pc=1
9230 * and generate a mapping of the condexec bits for each PC in
9231 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9232 * the condexec bits.
9234 * Note that there are no instructions which can read the condexec
9235 * bits, and none which can write non-static values to them, so
9236 * we don't need to care about whether CPUState is correct in the
9240 /* Reset the conditional execution bits immediately. This avoids
9241 complications trying to do it at the end of the block. */
9242 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9244 TCGv tmp
= new_tmp();
9245 tcg_gen_movi_i32(tmp
, 0);
9246 store_cpu_field(tmp
, condexec_bits
);
9249 #ifdef CONFIG_USER_ONLY
9250 /* Intercept jump to the magic kernel page. */
9251 if (dc
->pc
>= 0xffff0000) {
9252 /* We always get here via a jump, so know we are not in a
9253 conditional execution block. */
9254 gen_exception(EXCP_KERNEL_TRAP
);
9255 dc
->is_jmp
= DISAS_UPDATE
;
9259 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9260 /* We always get here via a jump, so know we are not in a
9261 conditional execution block. */
9262 gen_exception(EXCP_EXCEPTION_EXIT
);
9263 dc
->is_jmp
= DISAS_UPDATE
;
9268 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9269 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9270 if (bp
->pc
== dc
->pc
) {
9271 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9272 /* Advance PC so that clearing the breakpoint will
9273 invalidate this TB. */
9275 goto done_generating
;
9281 j
= gen_opc_ptr
- gen_opc_buf
;
9285 gen_opc_instr_start
[lj
++] = 0;
9287 gen_opc_pc
[lj
] = dc
->pc
;
9288 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9289 gen_opc_instr_start
[lj
] = 1;
9290 gen_opc_icount
[lj
] = num_insns
;
9293 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9296 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9297 tcg_gen_debug_insn_start(dc
->pc
);
9301 disas_thumb_insn(env
, dc
);
9302 if (dc
->condexec_mask
) {
9303 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9304 | ((dc
->condexec_mask
>> 4) & 1);
9305 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9306 if (dc
->condexec_mask
== 0) {
9307 dc
->condexec_cond
= 0;
9311 disas_arm_insn(env
, dc
);
9314 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
9318 if (dc
->condjmp
&& !dc
->is_jmp
) {
9319 gen_set_label(dc
->condlabel
);
9322 /* Translation stops when a conditional branch is encountered.
9323 * Otherwise the subsequent code could get translated several times.
9324 * Also stop translation when a page boundary is reached. This
9325 * ensures prefetch aborts occur at the right place. */
9327 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9328 !env
->singlestep_enabled
&&
9330 dc
->pc
< next_page_start
&&
9331 num_insns
< max_insns
);
9333 if (tb
->cflags
& CF_LAST_IO
) {
9335 /* FIXME: This can theoretically happen with self-modifying
9337 cpu_abort(env
, "IO on conditional branch instruction");
9342 /* At this stage dc->condjmp will only be set when the skipped
9343 instruction was a conditional branch or trap, and the PC has
9344 already been written. */
9345 if (unlikely(env
->singlestep_enabled
)) {
9346 /* Make sure the pc is updated, and raise a debug exception. */
9348 gen_set_condexec(dc
);
9349 if (dc
->is_jmp
== DISAS_SWI
) {
9350 gen_exception(EXCP_SWI
);
9352 gen_exception(EXCP_DEBUG
);
9354 gen_set_label(dc
->condlabel
);
9356 if (dc
->condjmp
|| !dc
->is_jmp
) {
9357 gen_set_pc_im(dc
->pc
);
9360 gen_set_condexec(dc
);
9361 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9362 gen_exception(EXCP_SWI
);
9364 /* FIXME: Single stepping a WFI insn will not halt
9366 gen_exception(EXCP_DEBUG
);
9369 /* While branches must always occur at the end of an IT block,
9370 there are a few other things that can cause us to terminate
9371 the TB in the middel of an IT block:
9372 - Exception generating instructions (bkpt, swi, undefined).
9374 - Hardware watchpoints.
9375 Hardware breakpoints have already been handled and skip this code.
9377 gen_set_condexec(dc
);
9378 switch(dc
->is_jmp
) {
9380 gen_goto_tb(dc
, 1, dc
->pc
);
9385 /* indicate that the hash table must be used to find the next TB */
9389 /* nothing more to generate */
9395 gen_exception(EXCP_SWI
);
9399 gen_set_label(dc
->condlabel
);
9400 gen_set_condexec(dc
);
9401 gen_goto_tb(dc
, 1, dc
->pc
);
9407 gen_icount_end(tb
, num_insns
);
9408 *gen_opc_ptr
= INDEX_op_end
;
9411 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9412 qemu_log("----------------\n");
9413 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9414 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9419 j
= gen_opc_ptr
- gen_opc_buf
;
9422 gen_opc_instr_start
[lj
++] = 0;
9424 tb
->size
= dc
->pc
- pc_start
;
9425 tb
->icount
= num_insns
;
9429 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9431 gen_intermediate_code_internal(env
, tb
, 0);
9434 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9436 gen_intermediate_code_internal(env
, tb
, 1);
9439 static const char *cpu_mode_names
[16] = {
9440 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9441 "???", "???", "???", "und", "???", "???", "???", "sys"
9444 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9454 /* ??? This assumes float64 and double have the same layout.
9455 Oh well, it's only debug dumps. */
9464 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9466 cpu_fprintf(f
, "\n");
9468 cpu_fprintf(f
, " ");
9470 psr
= cpsr_read(env
);
9471 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9473 psr
& (1 << 31) ? 'N' : '-',
9474 psr
& (1 << 30) ? 'Z' : '-',
9475 psr
& (1 << 29) ? 'C' : '-',
9476 psr
& (1 << 28) ? 'V' : '-',
9477 psr
& CPSR_T
? 'T' : 'A',
9478 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9481 for (i
= 0; i
< 16; i
++) {
9482 d
.d
= env
->vfp
.regs
[i
];
9486 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9487 i
* 2, (int)s0
.i
, s0
.s
,
9488 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9489 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9492 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9496 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9497 unsigned long searched_pc
, int pc_pos
, void *puc
)
9499 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9500 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];