4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
67 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
69 #if defined(CONFIG_USER_ONLY)
72 #define IS_USER(s) (s->user)
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
80 static TCGv_ptr cpu_env
;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
83 static TCGv_i32 cpu_R
[16];
84 static TCGv_i32 cpu_exclusive_addr
;
85 static TCGv_i32 cpu_exclusive_val
;
86 static TCGv_i32 cpu_exclusive_high
;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test
;
89 static TCGv_i32 cpu_exclusive_info
;
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s
, cpu_F1s
;
94 static TCGv_i64 cpu_F0d
, cpu_F1d
;
96 #include "gen-icount.h"
98 static const char *regnames
[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 16; i
++) {
110 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUState
, regs
[i
]),
114 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
116 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUState
, exclusive_val
), "exclusive_val");
118 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_high
), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUState
, exclusive_test
), "exclusive_test");
123 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUState
, exclusive_info
), "exclusive_info");
131 static inline TCGv
load_cpu_offset(int offset
)
133 TCGv tmp
= tcg_temp_new_i32();
134 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
138 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
140 static inline void store_cpu_offset(TCGv var
, int offset
)
142 tcg_gen_st_i32(var
, cpu_env
, offset
);
143 tcg_temp_free_i32(var
);
146 #define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
149 /* Set a variable to the value of a CPU register. */
150 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
154 /* normaly, since we updated PC, we need only to add one insn */
156 addr
= (long)s
->pc
+ 2;
158 addr
= (long)s
->pc
+ 4;
159 tcg_gen_movi_i32(var
, addr
);
161 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
165 /* Create a new temporary and set it to the value of a CPU register. */
166 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
168 TCGv tmp
= tcg_temp_new_i32();
169 load_reg_var(s
, tmp
, reg
);
173 /* Set a CPU register. The source must be a temporary and will be
175 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
178 tcg_gen_andi_i32(var
, var
, ~1);
179 s
->is_jmp
= DISAS_JUMP
;
181 tcg_gen_mov_i32(cpu_R
[reg
], var
);
182 tcg_temp_free_i32(var
);
185 /* Value extensions. */
186 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
188 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
191 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
195 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
197 TCGv tmp_mask
= tcg_const_i32(mask
);
198 gen_helper_cpsr_write(var
, tmp_mask
);
199 tcg_temp_free_i32(tmp_mask
);
201 /* Set NZCV flags from the high 4 bits of var. */
202 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
204 static void gen_exception(int excp
)
206 TCGv tmp
= tcg_temp_new_i32();
207 tcg_gen_movi_i32(tmp
, excp
);
208 gen_helper_exception(tmp
);
209 tcg_temp_free_i32(tmp
);
212 static void gen_smul_dual(TCGv a
, TCGv b
)
214 TCGv tmp1
= tcg_temp_new_i32();
215 TCGv tmp2
= tcg_temp_new_i32();
216 tcg_gen_ext16s_i32(tmp1
, a
);
217 tcg_gen_ext16s_i32(tmp2
, b
);
218 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
219 tcg_temp_free_i32(tmp2
);
220 tcg_gen_sari_i32(a
, a
, 16);
221 tcg_gen_sari_i32(b
, b
, 16);
222 tcg_gen_mul_i32(b
, b
, a
);
223 tcg_gen_mov_i32(a
, tmp1
);
224 tcg_temp_free_i32(tmp1
);
227 /* Byteswap each halfword. */
228 static void gen_rev16(TCGv var
)
230 TCGv tmp
= tcg_temp_new_i32();
231 tcg_gen_shri_i32(tmp
, var
, 8);
232 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
233 tcg_gen_shli_i32(var
, var
, 8);
234 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
235 tcg_gen_or_i32(var
, var
, tmp
);
236 tcg_temp_free_i32(tmp
);
239 /* Byteswap low halfword and sign extend. */
240 static void gen_revsh(TCGv var
)
242 tcg_gen_ext16u_i32(var
, var
);
243 tcg_gen_bswap16_i32(var
, var
);
244 tcg_gen_ext16s_i32(var
, var
);
247 /* Unsigned bitfield extract. */
248 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
251 tcg_gen_shri_i32(var
, var
, shift
);
252 tcg_gen_andi_i32(var
, var
, mask
);
255 /* Signed bitfield extract. */
256 static void gen_sbfx(TCGv var
, int shift
, int width
)
261 tcg_gen_sari_i32(var
, var
, shift
);
262 if (shift
+ width
< 32) {
263 signbit
= 1u << (width
- 1);
264 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
265 tcg_gen_xori_i32(var
, var
, signbit
);
266 tcg_gen_subi_i32(var
, var
, signbit
);
270 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
271 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
273 tcg_gen_andi_i32(val
, val
, mask
);
274 tcg_gen_shli_i32(val
, val
, shift
);
275 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
276 tcg_gen_or_i32(dest
, base
, val
);
279 /* Return (b << 32) + a. Mark inputs as dead */
280 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
282 TCGv_i64 tmp64
= tcg_temp_new_i64();
284 tcg_gen_extu_i32_i64(tmp64
, b
);
285 tcg_temp_free_i32(b
);
286 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
287 tcg_gen_add_i64(a
, tmp64
, a
);
289 tcg_temp_free_i64(tmp64
);
293 /* Return (b << 32) - a. Mark inputs as dead. */
294 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
296 TCGv_i64 tmp64
= tcg_temp_new_i64();
298 tcg_gen_extu_i32_i64(tmp64
, b
);
299 tcg_temp_free_i32(b
);
300 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
301 tcg_gen_sub_i64(a
, tmp64
, a
);
303 tcg_temp_free_i64(tmp64
);
307 /* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
312 TCGv_i64 tmp1
= tcg_temp_new_i64();
313 TCGv_i64 tmp2
= tcg_temp_new_i64();
315 tcg_gen_extu_i32_i64(tmp1
, a
);
316 tcg_temp_free_i32(a
);
317 tcg_gen_extu_i32_i64(tmp2
, b
);
318 tcg_temp_free_i32(b
);
319 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
320 tcg_temp_free_i64(tmp2
);
324 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
326 TCGv_i64 tmp1
= tcg_temp_new_i64();
327 TCGv_i64 tmp2
= tcg_temp_new_i64();
329 tcg_gen_ext_i32_i64(tmp1
, a
);
330 tcg_temp_free_i32(a
);
331 tcg_gen_ext_i32_i64(tmp2
, b
);
332 tcg_temp_free_i32(b
);
333 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
334 tcg_temp_free_i64(tmp2
);
338 /* Swap low and high halfwords. */
339 static void gen_swap_half(TCGv var
)
341 TCGv tmp
= tcg_temp_new_i32();
342 tcg_gen_shri_i32(tmp
, var
, 16);
343 tcg_gen_shli_i32(var
, var
, 16);
344 tcg_gen_or_i32(var
, var
, tmp
);
345 tcg_temp_free_i32(tmp
);
348 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
352 t0 = (t0 + t1) ^ tmp;
355 static void gen_add16(TCGv t0
, TCGv t1
)
357 TCGv tmp
= tcg_temp_new_i32();
358 tcg_gen_xor_i32(tmp
, t0
, t1
);
359 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
360 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
361 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
362 tcg_gen_add_i32(t0
, t0
, t1
);
363 tcg_gen_xor_i32(t0
, t0
, tmp
);
364 tcg_temp_free_i32(tmp
);
365 tcg_temp_free_i32(t1
);
368 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
370 /* Set CF to the top bit of var. */
371 static void gen_set_CF_bit31(TCGv var
)
373 TCGv tmp
= tcg_temp_new_i32();
374 tcg_gen_shri_i32(tmp
, var
, 31);
376 tcg_temp_free_i32(tmp
);
379 /* Set N and Z flags from var. */
380 static inline void gen_logic_CC(TCGv var
)
382 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
383 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
387 static void gen_adc(TCGv t0
, TCGv t1
)
390 tcg_gen_add_i32(t0
, t0
, t1
);
391 tmp
= load_cpu_field(CF
);
392 tcg_gen_add_i32(t0
, t0
, tmp
);
393 tcg_temp_free_i32(tmp
);
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
400 tcg_gen_add_i32(dest
, t0
, t1
);
401 tmp
= load_cpu_field(CF
);
402 tcg_gen_add_i32(dest
, dest
, tmp
);
403 tcg_temp_free_i32(tmp
);
406 /* dest = T0 - T1 + CF - 1. */
407 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
410 tcg_gen_sub_i32(dest
, t0
, t1
);
411 tmp
= load_cpu_field(CF
);
412 tcg_gen_add_i32(dest
, dest
, tmp
);
413 tcg_gen_subi_i32(dest
, dest
, 1);
414 tcg_temp_free_i32(tmp
);
417 /* FIXME: Implement this natively. */
418 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
420 static void shifter_out_im(TCGv var
, int shift
)
422 TCGv tmp
= tcg_temp_new_i32();
424 tcg_gen_andi_i32(tmp
, var
, 1);
426 tcg_gen_shri_i32(tmp
, var
, shift
);
428 tcg_gen_andi_i32(tmp
, tmp
, 1);
431 tcg_temp_free_i32(tmp
);
434 /* Shift by immediate. Includes special handling for shift == 0. */
435 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
441 shifter_out_im(var
, 32 - shift
);
442 tcg_gen_shli_i32(var
, var
, shift
);
448 tcg_gen_shri_i32(var
, var
, 31);
451 tcg_gen_movi_i32(var
, 0);
454 shifter_out_im(var
, shift
- 1);
455 tcg_gen_shri_i32(var
, var
, shift
);
462 shifter_out_im(var
, shift
- 1);
465 tcg_gen_sari_i32(var
, var
, shift
);
467 case 3: /* ROR/RRX */
470 shifter_out_im(var
, shift
- 1);
471 tcg_gen_rotri_i32(var
, var
, shift
); break;
473 TCGv tmp
= load_cpu_field(CF
);
475 shifter_out_im(var
, 0);
476 tcg_gen_shri_i32(var
, var
, 1);
477 tcg_gen_shli_i32(tmp
, tmp
, 31);
478 tcg_gen_or_i32(var
, var
, tmp
);
479 tcg_temp_free_i32(tmp
);
484 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
485 TCGv shift
, int flags
)
489 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
490 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
491 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
492 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
496 case 0: gen_helper_shl(var
, var
, shift
); break;
497 case 1: gen_helper_shr(var
, var
, shift
); break;
498 case 2: gen_helper_sar(var
, var
, shift
); break;
499 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
500 tcg_gen_rotr_i32(var
, var
, shift
); break;
503 tcg_temp_free_i32(shift
);
506 #define PAS_OP(pfx) \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
515 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
520 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
522 tmp
= tcg_temp_new_ptr();
523 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
525 tcg_temp_free_ptr(tmp
);
528 tmp
= tcg_temp_new_ptr();
529 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
531 tcg_temp_free_ptr(tmp
);
533 #undef gen_pas_helper
534 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
547 #undef gen_pas_helper
552 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553 #define PAS_OP(pfx) \
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
562 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
567 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
569 tmp
= tcg_temp_new_ptr();
570 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
572 tcg_temp_free_ptr(tmp
);
575 tmp
= tcg_temp_new_ptr();
576 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
578 tcg_temp_free_ptr(tmp
);
580 #undef gen_pas_helper
581 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
594 #undef gen_pas_helper
599 static void gen_test_cc(int cc
, int label
)
607 tmp
= load_cpu_field(ZF
);
608 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
615 tmp
= load_cpu_field(CF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
623 tmp
= load_cpu_field(NF
);
624 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
631 tmp
= load_cpu_field(VF
);
632 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
638 case 8: /* hi: C && !Z */
639 inv
= gen_new_label();
640 tmp
= load_cpu_field(CF
);
641 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
642 tcg_temp_free_i32(tmp
);
643 tmp
= load_cpu_field(ZF
);
644 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
647 case 9: /* ls: !C || Z */
648 tmp
= load_cpu_field(CF
);
649 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
650 tcg_temp_free_i32(tmp
);
651 tmp
= load_cpu_field(ZF
);
652 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp
= load_cpu_field(VF
);
656 tmp2
= load_cpu_field(NF
);
657 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
658 tcg_temp_free_i32(tmp2
);
659 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp
= load_cpu_field(VF
);
663 tmp2
= load_cpu_field(NF
);
664 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
665 tcg_temp_free_i32(tmp2
);
666 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
668 case 12: /* gt: !Z && N == V */
669 inv
= gen_new_label();
670 tmp
= load_cpu_field(ZF
);
671 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
672 tcg_temp_free_i32(tmp
);
673 tmp
= load_cpu_field(VF
);
674 tmp2
= load_cpu_field(NF
);
675 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
676 tcg_temp_free_i32(tmp2
);
677 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
680 case 13: /* le: Z || N != V */
681 tmp
= load_cpu_field(ZF
);
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
683 tcg_temp_free_i32(tmp
);
684 tmp
= load_cpu_field(VF
);
685 tmp2
= load_cpu_field(NF
);
686 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
687 tcg_temp_free_i32(tmp2
);
688 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
691 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
694 tcg_temp_free_i32(tmp
);
697 static const uint8_t table_logic_cc
[16] = {
716 /* Set PC and Thumb state from an immediate address. */
717 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
721 s
->is_jmp
= DISAS_UPDATE
;
722 if (s
->thumb
!= (addr
& 1)) {
723 tmp
= tcg_temp_new_i32();
724 tcg_gen_movi_i32(tmp
, addr
& 1);
725 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
726 tcg_temp_free_i32(tmp
);
728 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
731 /* Set PC and Thumb state from var. var is marked as dead. */
732 static inline void gen_bx(DisasContext
*s
, TCGv var
)
734 s
->is_jmp
= DISAS_UPDATE
;
735 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
736 tcg_gen_andi_i32(var
, var
, 1);
737 store_cpu_field(var
, thumb
);
740 /* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
746 if (reg
== 15 && ENABLE_ARCH_7
) {
749 store_reg(s
, reg
, var
);
753 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
755 TCGv tmp
= tcg_temp_new_i32();
756 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
759 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
761 TCGv tmp
= tcg_temp_new_i32();
762 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
765 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
767 TCGv tmp
= tcg_temp_new_i32();
768 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
771 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
777 static inline TCGv
gen_ld32(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
783 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
785 TCGv_i64 tmp
= tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp
, addr
, index
);
789 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
791 tcg_gen_qemu_st8(val
, addr
, index
);
792 tcg_temp_free_i32(val
);
794 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
796 tcg_gen_qemu_st16(val
, addr
, index
);
797 tcg_temp_free_i32(val
);
799 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
801 tcg_gen_qemu_st32(val
, addr
, index
);
802 tcg_temp_free_i32(val
);
804 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
806 tcg_gen_qemu_st64(val
, addr
, index
);
807 tcg_temp_free_i64(val
);
810 static inline void gen_set_pc_im(uint32_t val
)
812 tcg_gen_movi_i32(cpu_R
[15], val
);
815 /* Force a TB lookup after an instruction that changes the CPU state. */
816 static inline void gen_lookup_tb(DisasContext
*s
)
818 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
819 s
->is_jmp
= DISAS_UPDATE
;
822 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
825 int val
, rm
, shift
, shiftop
;
828 if (!(insn
& (1 << 25))) {
831 if (!(insn
& (1 << 23)))
834 tcg_gen_addi_i32(var
, var
, val
);
838 shift
= (insn
>> 7) & 0x1f;
839 shiftop
= (insn
>> 5) & 3;
840 offset
= load_reg(s
, rm
);
841 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
842 if (!(insn
& (1 << 23)))
843 tcg_gen_sub_i32(var
, var
, offset
);
845 tcg_gen_add_i32(var
, var
, offset
);
846 tcg_temp_free_i32(offset
);
850 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
856 if (insn
& (1 << 22)) {
858 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
859 if (!(insn
& (1 << 23)))
863 tcg_gen_addi_i32(var
, var
, val
);
867 tcg_gen_addi_i32(var
, var
, extra
);
869 offset
= load_reg(s
, rm
);
870 if (!(insn
& (1 << 23)))
871 tcg_gen_sub_i32(var
, var
, offset
);
873 tcg_gen_add_i32(var
, var
, offset
);
874 tcg_temp_free_i32(offset
);
878 #define VFP_OP2(name) \
879 static inline void gen_vfp_##name(int dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
894 static inline void gen_vfp_abs(int dp
)
897 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
899 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
902 static inline void gen_vfp_neg(int dp
)
905 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
907 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
910 static inline void gen_vfp_sqrt(int dp
)
913 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
915 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
918 static inline void gen_vfp_cmp(int dp
)
921 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
923 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
926 static inline void gen_vfp_cmpe(int dp
)
929 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
931 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
934 static inline void gen_vfp_F1_ld0(int dp
)
937 tcg_gen_movi_i64(cpu_F1d
, 0);
939 tcg_gen_movi_i32(cpu_F1s
, 0);
942 static inline void gen_vfp_uito(int dp
)
945 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
947 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
950 static inline void gen_vfp_sito(int dp
)
953 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
955 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
958 static inline void gen_vfp_toui(int dp
)
961 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
963 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
966 static inline void gen_vfp_touiz(int dp
)
969 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_tosi(int dp
)
977 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
979 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
982 static inline void gen_vfp_tosiz(int dp
)
985 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
987 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
990 #define VFP_GEN_FIX(name) \
991 static inline void gen_vfp_##name(int dp, int shift) \
993 TCGv tmp_shift = tcg_const_i32(shift); \
995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
1010 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1013 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1015 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1018 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1021 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1023 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1027 vfp_reg_offset (int dp
, int reg
)
1030 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1032 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1033 + offsetof(CPU_DoubleU
, l
.upper
);
1035 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1036 + offsetof(CPU_DoubleU
, l
.lower
);
1040 /* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1043 neon_reg_offset (int reg
, int n
)
1047 return vfp_reg_offset(0, sreg
);
1050 static TCGv
neon_load_reg(int reg
, int pass
)
1052 TCGv tmp
= tcg_temp_new_i32();
1053 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1057 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1059 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1060 tcg_temp_free_i32(var
);
1063 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1065 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1068 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1070 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1073 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1074 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1075 #define tcg_gen_st_f32 tcg_gen_st_i32
1076 #define tcg_gen_st_f64 tcg_gen_st_i64
1078 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1081 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1083 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1086 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1089 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1091 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1094 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1097 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1099 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1102 #define ARM_CP_RW_BIT (1 << 20)
1104 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1106 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1109 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1111 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1114 static inline TCGv
iwmmxt_load_creg(int reg
)
1116 TCGv var
= tcg_temp_new_i32();
1117 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1121 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1123 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1124 tcg_temp_free_i32(var
);
1127 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1129 iwmmxt_store_reg(cpu_M0
, rn
);
1132 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1134 iwmmxt_load_reg(cpu_M0
, rn
);
1137 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1139 iwmmxt_load_reg(cpu_V1
, rn
);
1140 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1143 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1145 iwmmxt_load_reg(cpu_V1
, rn
);
1146 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1149 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1151 iwmmxt_load_reg(cpu_V1
, rn
);
1152 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1155 #define IWMMXT_OP(name) \
1156 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1162 #define IWMMXT_OP_ENV(name) \
1163 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1169 #define IWMMXT_OP_ENV_SIZE(name) \
1170 IWMMXT_OP_ENV(name##b) \
1171 IWMMXT_OP_ENV(name##w) \
1172 IWMMXT_OP_ENV(name##l)
1174 #define IWMMXT_OP_ENV1(name) \
1175 static inline void gen_op_iwmmxt_##name##_M0(void) \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1191 IWMMXT_OP_ENV_SIZE(unpackl
)
1192 IWMMXT_OP_ENV_SIZE(unpackh
)
1194 IWMMXT_OP_ENV1(unpacklub
)
1195 IWMMXT_OP_ENV1(unpackluw
)
1196 IWMMXT_OP_ENV1(unpacklul
)
1197 IWMMXT_OP_ENV1(unpackhub
)
1198 IWMMXT_OP_ENV1(unpackhuw
)
1199 IWMMXT_OP_ENV1(unpackhul
)
1200 IWMMXT_OP_ENV1(unpacklsb
)
1201 IWMMXT_OP_ENV1(unpacklsw
)
1202 IWMMXT_OP_ENV1(unpacklsl
)
1203 IWMMXT_OP_ENV1(unpackhsb
)
1204 IWMMXT_OP_ENV1(unpackhsw
)
1205 IWMMXT_OP_ENV1(unpackhsl
)
1207 IWMMXT_OP_ENV_SIZE(cmpeq
)
1208 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1209 IWMMXT_OP_ENV_SIZE(cmpgts
)
1211 IWMMXT_OP_ENV_SIZE(mins
)
1212 IWMMXT_OP_ENV_SIZE(minu
)
1213 IWMMXT_OP_ENV_SIZE(maxs
)
1214 IWMMXT_OP_ENV_SIZE(maxu
)
1216 IWMMXT_OP_ENV_SIZE(subn
)
1217 IWMMXT_OP_ENV_SIZE(addn
)
1218 IWMMXT_OP_ENV_SIZE(subu
)
1219 IWMMXT_OP_ENV_SIZE(addu
)
1220 IWMMXT_OP_ENV_SIZE(subs
)
1221 IWMMXT_OP_ENV_SIZE(adds
)
1223 IWMMXT_OP_ENV(avgb0
)
1224 IWMMXT_OP_ENV(avgb1
)
1225 IWMMXT_OP_ENV(avgw0
)
1226 IWMMXT_OP_ENV(avgw1
)
1230 IWMMXT_OP_ENV(packuw
)
1231 IWMMXT_OP_ENV(packul
)
1232 IWMMXT_OP_ENV(packuq
)
1233 IWMMXT_OP_ENV(packsw
)
1234 IWMMXT_OP_ENV(packsl
)
1235 IWMMXT_OP_ENV(packsq
)
1237 static void gen_op_iwmmxt_set_mup(void)
1240 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1241 tcg_gen_ori_i32(tmp
, tmp
, 2);
1242 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1245 static void gen_op_iwmmxt_set_cup(void)
1248 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1249 tcg_gen_ori_i32(tmp
, tmp
, 1);
1250 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1253 static void gen_op_iwmmxt_setpsr_nz(void)
1255 TCGv tmp
= tcg_temp_new_i32();
1256 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1257 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1260 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1262 iwmmxt_load_reg(cpu_V1
, rn
);
1263 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1264 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1267 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1273 rd
= (insn
>> 16) & 0xf;
1274 tmp
= load_reg(s
, rd
);
1276 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1277 if (insn
& (1 << 24)) {
1279 if (insn
& (1 << 23))
1280 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1282 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1283 tcg_gen_mov_i32(dest
, tmp
);
1284 if (insn
& (1 << 21))
1285 store_reg(s
, rd
, tmp
);
1287 tcg_temp_free_i32(tmp
);
1288 } else if (insn
& (1 << 21)) {
1290 tcg_gen_mov_i32(dest
, tmp
);
1291 if (insn
& (1 << 23))
1292 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1294 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1295 store_reg(s
, rd
, tmp
);
1296 } else if (!(insn
& (1 << 23)))
1301 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1303 int rd
= (insn
>> 0) & 0xf;
1306 if (insn
& (1 << 8)) {
1307 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1310 tmp
= iwmmxt_load_creg(rd
);
1313 tmp
= tcg_temp_new_i32();
1314 iwmmxt_load_reg(cpu_V0
, rd
);
1315 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1317 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1318 tcg_gen_mov_i32(dest
, tmp
);
1319 tcg_temp_free_i32(tmp
);
1323 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1328 int rdhi
, rdlo
, rd0
, rd1
, i
;
1330 TCGv tmp
, tmp2
, tmp3
;
1332 if ((insn
& 0x0e000e00) == 0x0c000000) {
1333 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1335 rdlo
= (insn
>> 12) & 0xf;
1336 rdhi
= (insn
>> 16) & 0xf;
1337 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1338 iwmmxt_load_reg(cpu_V0
, wrd
);
1339 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1340 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1342 } else { /* TMCRR */
1343 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1344 iwmmxt_store_reg(cpu_V0
, wrd
);
1345 gen_op_iwmmxt_set_mup();
1350 wrd
= (insn
>> 12) & 0xf;
1351 addr
= tcg_temp_new_i32();
1352 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1353 tcg_temp_free_i32(addr
);
1356 if (insn
& ARM_CP_RW_BIT
) {
1357 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1358 tmp
= tcg_temp_new_i32();
1359 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1360 iwmmxt_store_creg(wrd
, tmp
);
1363 if (insn
& (1 << 8)) {
1364 if (insn
& (1 << 22)) { /* WLDRD */
1365 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1367 } else { /* WLDRW wRd */
1368 tmp
= gen_ld32(addr
, IS_USER(s
));
1371 if (insn
& (1 << 22)) { /* WLDRH */
1372 tmp
= gen_ld16u(addr
, IS_USER(s
));
1373 } else { /* WLDRB */
1374 tmp
= gen_ld8u(addr
, IS_USER(s
));
1378 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1379 tcg_temp_free_i32(tmp
);
1381 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1384 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1385 tmp
= iwmmxt_load_creg(wrd
);
1386 gen_st32(tmp
, addr
, IS_USER(s
));
1388 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1389 tmp
= tcg_temp_new_i32();
1390 if (insn
& (1 << 8)) {
1391 if (insn
& (1 << 22)) { /* WSTRD */
1392 tcg_temp_free_i32(tmp
);
1393 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1396 gen_st32(tmp
, addr
, IS_USER(s
));
1399 if (insn
& (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1401 gen_st16(tmp
, addr
, IS_USER(s
));
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1404 gen_st8(tmp
, addr
, IS_USER(s
));
1409 tcg_temp_free_i32(addr
);
1413 if ((insn
& 0x0f000000) != 0x0e000000)
1416 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd
= (insn
>> 12) & 0xf;
1419 rd0
= (insn
>> 0) & 0xf;
1420 rd1
= (insn
>> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1428 case 0x011: /* TMCR */
1431 rd
= (insn
>> 12) & 0xf;
1432 wrd
= (insn
>> 16) & 0xf;
1434 case ARM_IWMMXT_wCID
:
1435 case ARM_IWMMXT_wCASF
:
1437 case ARM_IWMMXT_wCon
:
1438 gen_op_iwmmxt_set_cup();
1440 case ARM_IWMMXT_wCSSF
:
1441 tmp
= iwmmxt_load_creg(wrd
);
1442 tmp2
= load_reg(s
, rd
);
1443 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1444 tcg_temp_free_i32(tmp2
);
1445 iwmmxt_store_creg(wrd
, tmp
);
1447 case ARM_IWMMXT_wCGR0
:
1448 case ARM_IWMMXT_wCGR1
:
1449 case ARM_IWMMXT_wCGR2
:
1450 case ARM_IWMMXT_wCGR3
:
1451 gen_op_iwmmxt_set_cup();
1452 tmp
= load_reg(s
, rd
);
1453 iwmmxt_store_creg(wrd
, tmp
);
1459 case 0x100: /* WXOR */
1460 wrd
= (insn
>> 12) & 0xf;
1461 rd0
= (insn
>> 0) & 0xf;
1462 rd1
= (insn
>> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1470 case 0x111: /* TMRC */
1473 rd
= (insn
>> 12) & 0xf;
1474 wrd
= (insn
>> 16) & 0xf;
1475 tmp
= iwmmxt_load_creg(wrd
);
1476 store_reg(s
, rd
, tmp
);
1478 case 0x300: /* WANDN */
1479 wrd
= (insn
>> 12) & 0xf;
1480 rd0
= (insn
>> 0) & 0xf;
1481 rd1
= (insn
>> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1483 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1484 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1490 case 0x200: /* WAND */
1491 wrd
= (insn
>> 12) & 0xf;
1492 rd0
= (insn
>> 0) & 0xf;
1493 rd1
= (insn
>> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd
= (insn
>> 12) & 0xf;
1503 rd0
= (insn
>> 0) & 0xf;
1504 rd1
= (insn
>> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1506 if (insn
& (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1511 gen_op_iwmmxt_set_mup();
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd
= (insn
>> 12) & 0xf;
1515 rd0
= (insn
>> 16) & 0xf;
1516 rd1
= (insn
>> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1518 switch ((insn
>> 22) & 3) {
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 16) & 0xf;
1538 rd1
= (insn
>> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 switch ((insn
>> 22) & 3) {
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1553 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd
= (insn
>> 12) & 0xf;
1559 rd0
= (insn
>> 16) & 0xf;
1560 rd1
= (insn
>> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1562 if (insn
& (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1566 if (!(insn
& (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1569 gen_op_iwmmxt_set_mup();
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd
= (insn
>> 12) & 0xf;
1573 rd0
= (insn
>> 16) & 0xf;
1574 rd1
= (insn
>> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1576 if (insn
& (1 << 21)) {
1577 if (insn
& (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1582 if (insn
& (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1587 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1588 gen_op_iwmmxt_set_mup();
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd
= (insn
>> 12) & 0xf;
1592 rd0
= (insn
>> 16) & 0xf;
1593 rd1
= (insn
>> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1595 if (insn
& (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1599 if (!(insn
& (1 << 20))) {
1600 iwmmxt_load_reg(cpu_V1
, wrd
);
1601 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1603 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1604 gen_op_iwmmxt_set_mup();
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd
= (insn
>> 12) & 0xf;
1608 rd0
= (insn
>> 16) & 0xf;
1609 rd1
= (insn
>> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1611 switch ((insn
>> 22) & 3) {
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1624 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd
= (insn
>> 12) & 0xf;
1630 rd0
= (insn
>> 16) & 0xf;
1631 rd1
= (insn
>> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1633 if (insn
& (1 << 22)) {
1634 if (insn
& (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1639 if (insn
& (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1644 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd
= (insn
>> 12) & 0xf;
1650 rd0
= (insn
>> 16) & 0xf;
1651 rd1
= (insn
>> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1653 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1654 tcg_gen_andi_i32(tmp
, tmp
, 7);
1655 iwmmxt_load_reg(cpu_V1
, rd1
);
1656 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1657 tcg_temp_free_i32(tmp
);
1658 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1659 gen_op_iwmmxt_set_mup();
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1662 if (((insn
>> 6) & 3) == 3)
1664 rd
= (insn
>> 12) & 0xf;
1665 wrd
= (insn
>> 16) & 0xf;
1666 tmp
= load_reg(s
, rd
);
1667 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1668 switch ((insn
>> 6) & 3) {
1670 tmp2
= tcg_const_i32(0xff);
1671 tmp3
= tcg_const_i32((insn
& 7) << 3);
1674 tmp2
= tcg_const_i32(0xffff);
1675 tmp3
= tcg_const_i32((insn
& 3) << 4);
1678 tmp2
= tcg_const_i32(0xffffffff);
1679 tmp3
= tcg_const_i32((insn
& 1) << 5);
1685 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1686 tcg_temp_free(tmp3
);
1687 tcg_temp_free(tmp2
);
1688 tcg_temp_free_i32(tmp
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd
= (insn
>> 12) & 0xf;
1694 wrd
= (insn
>> 16) & 0xf;
1695 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1697 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1698 tmp
= tcg_temp_new_i32();
1699 switch ((insn
>> 22) & 3) {
1701 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1704 tcg_gen_ext8s_i32(tmp
, tmp
);
1706 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1710 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1713 tcg_gen_ext16s_i32(tmp
, tmp
);
1715 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1719 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1723 store_reg(s
, rd
, tmp
);
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1726 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1728 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1729 switch ((insn
>> 22) & 3) {
1731 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1734 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1737 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1740 tcg_gen_shli_i32(tmp
, tmp
, 28);
1742 tcg_temp_free_i32(tmp
);
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1745 if (((insn
>> 6) & 3) == 3)
1747 rd
= (insn
>> 12) & 0xf;
1748 wrd
= (insn
>> 16) & 0xf;
1749 tmp
= load_reg(s
, rd
);
1750 switch ((insn
>> 6) & 3) {
1752 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1755 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1758 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1761 tcg_temp_free_i32(tmp
);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1763 gen_op_iwmmxt_set_mup();
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1766 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1768 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1769 tmp2
= tcg_temp_new_i32();
1770 tcg_gen_mov_i32(tmp2
, tmp
);
1771 switch ((insn
>> 22) & 3) {
1773 for (i
= 0; i
< 7; i
++) {
1774 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1775 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1779 for (i
= 0; i
< 3; i
++) {
1780 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1781 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1785 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1786 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1790 tcg_temp_free_i32(tmp2
);
1791 tcg_temp_free_i32(tmp
);
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd
= (insn
>> 12) & 0xf;
1795 rd0
= (insn
>> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 switch ((insn
>> 22) & 3) {
1799 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1802 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1805 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1810 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1811 gen_op_iwmmxt_set_mup();
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1814 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1816 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1817 tmp2
= tcg_temp_new_i32();
1818 tcg_gen_mov_i32(tmp2
, tmp
);
1819 switch ((insn
>> 22) & 3) {
1821 for (i
= 0; i
< 7; i
++) {
1822 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1823 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1827 for (i
= 0; i
< 3; i
++) {
1828 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1829 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1833 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1834 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1838 tcg_temp_free_i32(tmp2
);
1839 tcg_temp_free_i32(tmp
);
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd
= (insn
>> 12) & 0xf;
1843 rd0
= (insn
>> 16) & 0xf;
1844 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1846 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1847 tmp
= tcg_temp_new_i32();
1848 switch ((insn
>> 22) & 3) {
1850 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1853 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1856 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1859 store_reg(s
, rd
, tmp
);
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd
= (insn
>> 12) & 0xf;
1864 rd0
= (insn
>> 16) & 0xf;
1865 rd1
= (insn
>> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1867 switch ((insn
>> 22) & 3) {
1869 if (insn
& (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1875 if (insn
& (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1881 if (insn
& (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1889 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd
= (insn
>> 12) & 0xf;
1896 rd0
= (insn
>> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1898 switch ((insn
>> 22) & 3) {
1900 if (insn
& (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1903 gen_op_iwmmxt_unpacklub_M0();
1906 if (insn
& (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1909 gen_op_iwmmxt_unpackluw_M0();
1912 if (insn
& (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1915 gen_op_iwmmxt_unpacklul_M0();
1920 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd
= (insn
>> 12) & 0xf;
1927 rd0
= (insn
>> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1929 switch ((insn
>> 22) & 3) {
1931 if (insn
& (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1934 gen_op_iwmmxt_unpackhub_M0();
1937 if (insn
& (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1940 gen_op_iwmmxt_unpackhuw_M0();
1943 if (insn
& (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1946 gen_op_iwmmxt_unpackhul_M0();
1951 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
1957 if (((insn
>> 22) & 3) == 0)
1959 wrd
= (insn
>> 12) & 0xf;
1960 rd0
= (insn
>> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1962 tmp
= tcg_temp_new_i32();
1963 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1964 tcg_temp_free_i32(tmp
);
1967 switch ((insn
>> 22) & 3) {
1969 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1972 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1975 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1978 tcg_temp_free_i32(tmp
);
1979 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
1985 if (((insn
>> 22) & 3) == 0)
1987 wrd
= (insn
>> 12) & 0xf;
1988 rd0
= (insn
>> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1990 tmp
= tcg_temp_new_i32();
1991 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1992 tcg_temp_free_i32(tmp
);
1995 switch ((insn
>> 22) & 3) {
1997 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2000 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2003 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2006 tcg_temp_free_i32(tmp
);
2007 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
2013 if (((insn
>> 22) & 3) == 0)
2015 wrd
= (insn
>> 12) & 0xf;
2016 rd0
= (insn
>> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2018 tmp
= tcg_temp_new_i32();
2019 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2020 tcg_temp_free_i32(tmp
);
2023 switch ((insn
>> 22) & 3) {
2025 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2028 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2031 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2034 tcg_temp_free_i32(tmp
);
2035 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
2041 if (((insn
>> 22) & 3) == 0)
2043 wrd
= (insn
>> 12) & 0xf;
2044 rd0
= (insn
>> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2046 tmp
= tcg_temp_new_i32();
2047 switch ((insn
>> 22) & 3) {
2049 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2050 tcg_temp_free_i32(tmp
);
2053 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2056 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2057 tcg_temp_free_i32(tmp
);
2060 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2063 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2064 tcg_temp_free_i32(tmp
);
2067 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2070 tcg_temp_free_i32(tmp
);
2071 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd
= (insn
>> 12) & 0xf;
2078 rd0
= (insn
>> 16) & 0xf;
2079 rd1
= (insn
>> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2081 switch ((insn
>> 22) & 3) {
2083 if (insn
& (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2086 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2089 if (insn
& (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2095 if (insn
& (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2098 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2103 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2104 gen_op_iwmmxt_set_mup();
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 rd1
= (insn
>> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2112 switch ((insn
>> 22) & 3) {
2114 if (insn
& (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2120 if (insn
& (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2126 if (insn
& (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2134 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2135 gen_op_iwmmxt_set_mup();
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd
= (insn
>> 12) & 0xf;
2140 rd0
= (insn
>> 16) & 0xf;
2141 rd1
= (insn
>> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2143 tmp
= tcg_const_i32((insn
>> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1
, rd1
);
2145 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2148 gen_op_iwmmxt_set_mup();
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd
= (insn
>> 12) & 0xf;
2155 rd0
= (insn
>> 16) & 0xf;
2156 rd1
= (insn
>> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2158 switch ((insn
>> 20) & 0xf) {
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2163 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2181 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2189 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd
= (insn
>> 12) & 0xf;
2198 rd0
= (insn
>> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2200 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2203 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd
= (insn
>> 12) & 0xf;
2212 rd0
= (insn
>> 16) & 0xf;
2213 rd1
= (insn
>> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2215 switch ((insn
>> 20) & 0xf) {
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2220 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2238 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2254 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 22) & 3) {
2262 if (insn
& (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2268 if (insn
& (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2271 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2274 if (insn
& (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2280 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd
= (insn
>> 5) & 0xf;
2289 rd0
= (insn
>> 12) & 0xf;
2290 rd1
= (insn
>> 0) & 0xf;
2291 if (rd0
== 0xf || rd1
== 0xf)
2293 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2294 tmp
= load_reg(s
, rd0
);
2295 tmp2
= load_reg(s
, rd1
);
2296 switch ((insn
>> 16) & 0xf) {
2297 case 0x0: /* TMIA */
2298 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2300 case 0x8: /* TMIAPH */
2301 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2304 if (insn
& (1 << 16))
2305 tcg_gen_shri_i32(tmp
, tmp
, 16);
2306 if (insn
& (1 << 17))
2307 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 tcg_temp_free_i32(tmp2
);
2312 tcg_temp_free_i32(tmp
);
2315 tcg_temp_free_i32(tmp2
);
2316 tcg_temp_free_i32(tmp
);
2317 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2318 gen_op_iwmmxt_set_mup();
2327 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2331 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2334 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0
= (insn
>> 12) & 0xf;
2338 acc
= (insn
>> 5) & 7;
2343 tmp
= load_reg(s
, rd0
);
2344 tmp2
= load_reg(s
, rd1
);
2345 switch ((insn
>> 16) & 0xf) {
2347 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2349 case 0x8: /* MIAPH */
2350 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
2356 if (insn
& (1 << 16))
2357 tcg_gen_shri_i32(tmp
, tmp
, 16);
2358 if (insn
& (1 << 17))
2359 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2365 tcg_temp_free_i32(tmp2
);
2366 tcg_temp_free_i32(tmp
);
2368 gen_op_iwmmxt_movq_wRn_M0(acc
);
2372 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi
= (insn
>> 16) & 0xf;
2375 rdlo
= (insn
>> 12) & 0xf;
2381 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2382 iwmmxt_load_reg(cpu_V0
, acc
);
2383 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2384 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2386 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2389 iwmmxt_store_reg(cpu_V0
, acc
);
2397 /* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2402 uint32_t rd
= (insn
>> 12) & 0xf;
2403 uint32_t cp
= (insn
>> 8) & 0xf;
2408 if (insn
& ARM_CP_RW_BIT
) {
2409 if (!env
->cp
[cp
].cp_read
)
2411 gen_set_pc_im(s
->pc
);
2412 tmp
= tcg_temp_new_i32();
2413 tmp2
= tcg_const_i32(insn
);
2414 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2415 tcg_temp_free(tmp2
);
2416 store_reg(s
, rd
, tmp
);
2418 if (!env
->cp
[cp
].cp_write
)
2420 gen_set_pc_im(s
->pc
);
2421 tmp
= load_reg(s
, rd
);
2422 tmp2
= tcg_const_i32(insn
);
2423 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2424 tcg_temp_free(tmp2
);
2425 tcg_temp_free_i32(tmp
);
2430 static int cp15_user_ok(uint32_t insn
)
2432 int cpn
= (insn
>> 16) & 0xf;
2433 int cpm
= insn
& 0xf;
2434 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2436 if (cpn
== 13 && cpm
== 0) {
2438 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2442 /* ISB, DSB, DMB. */
2443 if ((cpm
== 5 && op
== 4)
2444 || (cpm
== 10 && (op
== 4 || op
== 5)))
2450 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2453 int cpn
= (insn
>> 16) & 0xf;
2454 int cpm
= insn
& 0xf;
2455 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2457 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2460 if (!(cpn
== 13 && cpm
== 0))
2463 if (insn
& ARM_CP_RW_BIT
) {
2466 tmp
= load_cpu_field(cp15
.c13_tls1
);
2469 tmp
= load_cpu_field(cp15
.c13_tls2
);
2472 tmp
= load_cpu_field(cp15
.c13_tls3
);
2477 store_reg(s
, rd
, tmp
);
2480 tmp
= load_reg(s
, rd
);
2483 store_cpu_field(tmp
, cp15
.c13_tls1
);
2486 store_cpu_field(tmp
, cp15
.c13_tls2
);
2489 store_cpu_field(tmp
, cp15
.c13_tls3
);
2492 tcg_temp_free_i32(tmp
);
2499 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
2501 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env
, ARM_FEATURE_M
))
2510 if ((insn
& (1 << 25)) == 0) {
2511 if (insn
& (1 << 20)) {
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2518 if ((insn
& (1 << 4)) == 0) {
2522 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2529 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2533 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s
->pc
);
2536 s
->is_jmp
= DISAS_WFI
;
2541 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2545 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s
->pc
);
2548 s
->is_jmp
= DISAS_WFI
;
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2557 rd
= (insn
>> 12) & 0xf;
2559 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2562 tmp2
= tcg_const_i32(insn
);
2563 if (insn
& ARM_CP_RW_BIT
) {
2564 tmp
= tcg_temp_new_i32();
2565 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2566 /* If the destination register is r15 then sets condition codes. */
2568 store_reg(s
, rd
, tmp
);
2570 tcg_temp_free_i32(tmp
);
2572 tmp
= load_reg(s
, rd
);
2573 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2574 tcg_temp_free_i32(tmp
);
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2579 (insn
& 0x0fff0fff) != 0x0e010f10)
2582 tcg_temp_free_i32(tmp2
);
2586 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587 #define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2594 if (insn & (1 << (smallbit))) \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2599 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2606 /* Move between integer and VFP cores. */
2607 static TCGv
gen_vfp_mrs(void)
2609 TCGv tmp
= tcg_temp_new_i32();
2610 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2614 static void gen_vfp_msr(TCGv tmp
)
2616 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2617 tcg_temp_free_i32(tmp
);
2620 static void gen_neon_dup_u8(TCGv var
, int shift
)
2622 TCGv tmp
= tcg_temp_new_i32();
2624 tcg_gen_shri_i32(var
, var
, shift
);
2625 tcg_gen_ext8u_i32(var
, var
);
2626 tcg_gen_shli_i32(tmp
, var
, 8);
2627 tcg_gen_or_i32(var
, var
, tmp
);
2628 tcg_gen_shli_i32(tmp
, var
, 16);
2629 tcg_gen_or_i32(var
, var
, tmp
);
2630 tcg_temp_free_i32(tmp
);
2633 static void gen_neon_dup_low16(TCGv var
)
2635 TCGv tmp
= tcg_temp_new_i32();
2636 tcg_gen_ext16u_i32(var
, var
);
2637 tcg_gen_shli_i32(tmp
, var
, 16);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2639 tcg_temp_free_i32(tmp
);
2642 static void gen_neon_dup_high16(TCGv var
)
2644 TCGv tmp
= tcg_temp_new_i32();
2645 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2646 tcg_gen_shri_i32(tmp
, var
, 16);
2647 tcg_gen_or_i32(var
, var
, tmp
);
2648 tcg_temp_free_i32(tmp
);
2651 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2653 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2657 tmp
= gen_ld8u(addr
, IS_USER(s
));
2658 gen_neon_dup_u8(tmp
, 0);
2661 tmp
= gen_ld16u(addr
, IS_USER(s
));
2662 gen_neon_dup_low16(tmp
);
2665 tmp
= gen_ld32(addr
, IS_USER(s
));
2667 default: /* Avoid compiler warnings. */
2673 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2674 (ie. an undefined instruction). */
2675 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2677 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2683 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2686 if (!s
->vfp_enabled
) {
2687 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2688 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2690 rn
= (insn
>> 16) & 0xf;
2691 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2692 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2695 dp
= ((insn
& 0xf00) == 0xb00);
2696 switch ((insn
>> 24) & 0xf) {
2698 if (insn
& (1 << 4)) {
2699 /* single register transfer */
2700 rd
= (insn
>> 12) & 0xf;
2705 VFP_DREG_N(rn
, insn
);
2708 if (insn
& 0x00c00060
2709 && !arm_feature(env
, ARM_FEATURE_NEON
))
2712 pass
= (insn
>> 21) & 1;
2713 if (insn
& (1 << 22)) {
2715 offset
= ((insn
>> 5) & 3) * 8;
2716 } else if (insn
& (1 << 5)) {
2718 offset
= (insn
& (1 << 6)) ? 16 : 0;
2723 if (insn
& ARM_CP_RW_BIT
) {
2725 tmp
= neon_load_reg(rn
, pass
);
2729 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2730 if (insn
& (1 << 23))
2736 if (insn
& (1 << 23)) {
2738 tcg_gen_shri_i32(tmp
, tmp
, 16);
2744 tcg_gen_sari_i32(tmp
, tmp
, 16);
2753 store_reg(s
, rd
, tmp
);
2756 tmp
= load_reg(s
, rd
);
2757 if (insn
& (1 << 23)) {
2760 gen_neon_dup_u8(tmp
, 0);
2761 } else if (size
== 1) {
2762 gen_neon_dup_low16(tmp
);
2764 for (n
= 0; n
<= pass
* 2; n
++) {
2765 tmp2
= tcg_temp_new_i32();
2766 tcg_gen_mov_i32(tmp2
, tmp
);
2767 neon_store_reg(rn
, n
, tmp2
);
2769 neon_store_reg(rn
, n
, tmp
);
2774 tmp2
= neon_load_reg(rn
, pass
);
2775 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2776 tcg_temp_free_i32(tmp2
);
2779 tmp2
= neon_load_reg(rn
, pass
);
2780 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2781 tcg_temp_free_i32(tmp2
);
2786 neon_store_reg(rn
, pass
, tmp
);
2790 if ((insn
& 0x6f) != 0x00)
2792 rn
= VFP_SREG_N(insn
);
2793 if (insn
& ARM_CP_RW_BIT
) {
2795 if (insn
& (1 << 21)) {
2796 /* system register */
2801 /* VFP2 allows access to FSID from userspace.
2802 VFP3 restricts all id registers to privileged
2805 && arm_feature(env
, ARM_FEATURE_VFP3
))
2807 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2812 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2814 case ARM_VFP_FPINST
:
2815 case ARM_VFP_FPINST2
:
2816 /* Not present in VFP3. */
2818 || arm_feature(env
, ARM_FEATURE_VFP3
))
2820 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2824 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2825 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2827 tmp
= tcg_temp_new_i32();
2828 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2834 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2836 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2842 gen_mov_F0_vreg(0, rn
);
2843 tmp
= gen_vfp_mrs();
2846 /* Set the 4 flag bits in the CPSR. */
2848 tcg_temp_free_i32(tmp
);
2850 store_reg(s
, rd
, tmp
);
2854 tmp
= load_reg(s
, rd
);
2855 if (insn
& (1 << 21)) {
2857 /* system register */
2862 /* Writes are ignored. */
2865 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2866 tcg_temp_free_i32(tmp
);
2872 /* TODO: VFP subarchitecture support.
2873 * For now, keep the EN bit only */
2874 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2875 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2878 case ARM_VFP_FPINST
:
2879 case ARM_VFP_FPINST2
:
2880 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2887 gen_mov_vreg_F0(0, rn
);
2892 /* data processing */
2893 /* The opcode is in bits 23, 21, 20 and 6. */
2894 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2898 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2900 /* rn is register number */
2901 VFP_DREG_N(rn
, insn
);
2904 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2905 /* Integer or single precision destination. */
2906 rd
= VFP_SREG_D(insn
);
2908 VFP_DREG_D(rd
, insn
);
2911 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2912 /* VCVT from int is always from S reg regardless of dp bit.
2913 * VCVT with immediate frac_bits has same format as SREG_M
2915 rm
= VFP_SREG_M(insn
);
2917 VFP_DREG_M(rm
, insn
);
2920 rn
= VFP_SREG_N(insn
);
2921 if (op
== 15 && rn
== 15) {
2922 /* Double precision destination. */
2923 VFP_DREG_D(rd
, insn
);
2925 rd
= VFP_SREG_D(insn
);
2927 /* NB that we implicitly rely on the encoding for the frac_bits
2928 * in VCVT of fixed to float being the same as that of an SREG_M
2930 rm
= VFP_SREG_M(insn
);
2933 veclen
= s
->vec_len
;
2934 if (op
== 15 && rn
> 3)
2937 /* Shut up compiler warnings. */
2948 /* Figure out what type of vector operation this is. */
2949 if ((rd
& bank_mask
) == 0) {
2954 delta_d
= (s
->vec_stride
>> 1) + 1;
2956 delta_d
= s
->vec_stride
+ 1;
2958 if ((rm
& bank_mask
) == 0) {
2959 /* mixed scalar/vector */
2968 /* Load the initial operands. */
2973 /* Integer source */
2974 gen_mov_F0_vreg(0, rm
);
2979 gen_mov_F0_vreg(dp
, rd
);
2980 gen_mov_F1_vreg(dp
, rm
);
2984 /* Compare with zero */
2985 gen_mov_F0_vreg(dp
, rd
);
2996 /* Source and destination the same. */
2997 gen_mov_F0_vreg(dp
, rd
);
3000 /* One source operand. */
3001 gen_mov_F0_vreg(dp
, rm
);
3005 /* Two source operands. */
3006 gen_mov_F0_vreg(dp
, rn
);
3007 gen_mov_F1_vreg(dp
, rm
);
3011 /* Perform the calculation. */
3013 case 0: /* mac: fd + (fn * fm) */
3015 gen_mov_F1_vreg(dp
, rd
);
3018 case 1: /* nmac: fd - (fn * fm) */
3021 gen_mov_F1_vreg(dp
, rd
);
3024 case 2: /* msc: -fd + (fn * fm) */
3026 gen_mov_F1_vreg(dp
, rd
);
3029 case 3: /* nmsc: -fd - (fn * fm) */
3032 gen_mov_F1_vreg(dp
, rd
);
3035 case 4: /* mul: fn * fm */
3038 case 5: /* nmul: -(fn * fm) */
3042 case 6: /* add: fn + fm */
3045 case 7: /* sub: fn - fm */
3048 case 8: /* div: fn / fm */
3051 case 14: /* fconst */
3052 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3055 n
= (insn
<< 12) & 0x80000000;
3056 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3063 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3070 tcg_gen_movi_i32(cpu_F0s
, n
);
3073 case 15: /* extension space */
3087 case 4: /* vcvtb.f32.f16 */
3088 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3090 tmp
= gen_vfp_mrs();
3091 tcg_gen_ext16u_i32(tmp
, tmp
);
3092 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3093 tcg_temp_free_i32(tmp
);
3095 case 5: /* vcvtt.f32.f16 */
3096 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3098 tmp
= gen_vfp_mrs();
3099 tcg_gen_shri_i32(tmp
, tmp
, 16);
3100 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3101 tcg_temp_free_i32(tmp
);
3103 case 6: /* vcvtb.f16.f32 */
3104 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3106 tmp
= tcg_temp_new_i32();
3107 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3108 gen_mov_F0_vreg(0, rd
);
3109 tmp2
= gen_vfp_mrs();
3110 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3111 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3112 tcg_temp_free_i32(tmp2
);
3115 case 7: /* vcvtt.f16.f32 */
3116 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3118 tmp
= tcg_temp_new_i32();
3119 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3120 tcg_gen_shli_i32(tmp
, tmp
, 16);
3121 gen_mov_F0_vreg(0, rd
);
3122 tmp2
= gen_vfp_mrs();
3123 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3124 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3125 tcg_temp_free_i32(tmp2
);
3137 case 11: /* cmpez */
3141 case 15: /* single<->double conversion */
3143 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3145 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3147 case 16: /* fuito */
3150 case 17: /* fsito */
3153 case 20: /* fshto */
3154 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3156 gen_vfp_shto(dp
, 16 - rm
);
3158 case 21: /* fslto */
3159 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3161 gen_vfp_slto(dp
, 32 - rm
);
3163 case 22: /* fuhto */
3164 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3166 gen_vfp_uhto(dp
, 16 - rm
);
3168 case 23: /* fulto */
3169 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3171 gen_vfp_ulto(dp
, 32 - rm
);
3173 case 24: /* ftoui */
3176 case 25: /* ftouiz */
3179 case 26: /* ftosi */
3182 case 27: /* ftosiz */
3185 case 28: /* ftosh */
3186 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3188 gen_vfp_tosh(dp
, 16 - rm
);
3190 case 29: /* ftosl */
3191 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3193 gen_vfp_tosl(dp
, 32 - rm
);
3195 case 30: /* ftouh */
3196 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3198 gen_vfp_touh(dp
, 16 - rm
);
3200 case 31: /* ftoul */
3201 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3203 gen_vfp_toul(dp
, 32 - rm
);
3205 default: /* undefined */
3206 printf ("rn:%d\n", rn
);
3210 default: /* undefined */
3211 printf ("op:%d\n", op
);
3215 /* Write back the result. */
3216 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3217 ; /* Comparison, do nothing. */
3218 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3219 /* VCVT double to int: always integer result. */
3220 gen_mov_vreg_F0(0, rd
);
3221 else if (op
== 15 && rn
== 15)
3223 gen_mov_vreg_F0(!dp
, rd
);
3225 gen_mov_vreg_F0(dp
, rd
);
3227 /* break out of the loop if we have finished */
3231 if (op
== 15 && delta_m
== 0) {
3232 /* single source one-many */
3234 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3236 gen_mov_vreg_F0(dp
, rd
);
3240 /* Setup the next operands. */
3242 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3246 /* One source operand. */
3247 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3249 gen_mov_F0_vreg(dp
, rm
);
3251 /* Two source operands. */
3252 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3254 gen_mov_F0_vreg(dp
, rn
);
3256 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3258 gen_mov_F1_vreg(dp
, rm
);
3266 if ((insn
& 0x03e00000) == 0x00400000) {
3267 /* two-register transfer */
3268 rn
= (insn
>> 16) & 0xf;
3269 rd
= (insn
>> 12) & 0xf;
3271 VFP_DREG_M(rm
, insn
);
3273 rm
= VFP_SREG_M(insn
);
3276 if (insn
& ARM_CP_RW_BIT
) {
3279 gen_mov_F0_vreg(0, rm
* 2);
3280 tmp
= gen_vfp_mrs();
3281 store_reg(s
, rd
, tmp
);
3282 gen_mov_F0_vreg(0, rm
* 2 + 1);
3283 tmp
= gen_vfp_mrs();
3284 store_reg(s
, rn
, tmp
);
3286 gen_mov_F0_vreg(0, rm
);
3287 tmp
= gen_vfp_mrs();
3288 store_reg(s
, rd
, tmp
);
3289 gen_mov_F0_vreg(0, rm
+ 1);
3290 tmp
= gen_vfp_mrs();
3291 store_reg(s
, rn
, tmp
);
3296 tmp
= load_reg(s
, rd
);
3298 gen_mov_vreg_F0(0, rm
* 2);
3299 tmp
= load_reg(s
, rn
);
3301 gen_mov_vreg_F0(0, rm
* 2 + 1);
3303 tmp
= load_reg(s
, rd
);
3305 gen_mov_vreg_F0(0, rm
);
3306 tmp
= load_reg(s
, rn
);
3308 gen_mov_vreg_F0(0, rm
+ 1);
3313 rn
= (insn
>> 16) & 0xf;
3315 VFP_DREG_D(rd
, insn
);
3317 rd
= VFP_SREG_D(insn
);
3318 if (s
->thumb
&& rn
== 15) {
3319 addr
= tcg_temp_new_i32();
3320 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3322 addr
= load_reg(s
, rn
);
3324 if ((insn
& 0x01200000) == 0x01000000) {
3325 /* Single load/store */
3326 offset
= (insn
& 0xff) << 2;
3327 if ((insn
& (1 << 23)) == 0)
3329 tcg_gen_addi_i32(addr
, addr
, offset
);
3330 if (insn
& (1 << 20)) {
3331 gen_vfp_ld(s
, dp
, addr
);
3332 gen_mov_vreg_F0(dp
, rd
);
3334 gen_mov_F0_vreg(dp
, rd
);
3335 gen_vfp_st(s
, dp
, addr
);
3337 tcg_temp_free_i32(addr
);
3339 /* load/store multiple */
3341 n
= (insn
>> 1) & 0x7f;
3345 if (insn
& (1 << 24)) /* pre-decrement */
3346 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3352 for (i
= 0; i
< n
; i
++) {
3353 if (insn
& ARM_CP_RW_BIT
) {
3355 gen_vfp_ld(s
, dp
, addr
);
3356 gen_mov_vreg_F0(dp
, rd
+ i
);
3359 gen_mov_F0_vreg(dp
, rd
+ i
);
3360 gen_vfp_st(s
, dp
, addr
);
3362 tcg_gen_addi_i32(addr
, addr
, offset
);
3364 if (insn
& (1 << 21)) {
3366 if (insn
& (1 << 24))
3367 offset
= -offset
* n
;
3368 else if (dp
&& (insn
& 1))
3374 tcg_gen_addi_i32(addr
, addr
, offset
);
3375 store_reg(s
, rn
, addr
);
3377 tcg_temp_free_i32(addr
);
3383 /* Should never happen. */
3389 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3391 TranslationBlock
*tb
;
3394 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3396 gen_set_pc_im(dest
);
3397 tcg_gen_exit_tb((long)tb
+ n
);
3399 gen_set_pc_im(dest
);
3404 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3406 if (unlikely(s
->singlestep_enabled
)) {
3407 /* An indirect jump so that we still trigger the debug exception. */
3412 gen_goto_tb(s
, 0, dest
);
3413 s
->is_jmp
= DISAS_TB_JUMP
;
3417 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3420 tcg_gen_sari_i32(t0
, t0
, 16);
3424 tcg_gen_sari_i32(t1
, t1
, 16);
3427 tcg_gen_mul_i32(t0
, t0
, t1
);
3430 /* Return the mask of PSR bits set by a MSR instruction. */
3431 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3435 if (flags
& (1 << 0))
3437 if (flags
& (1 << 1))
3439 if (flags
& (1 << 2))
3441 if (flags
& (1 << 3))
3444 /* Mask out undefined bits. */
3445 mask
&= ~CPSR_RESERVED
;
3446 if (!arm_feature(env
, ARM_FEATURE_V6
))
3447 mask
&= ~(CPSR_E
| CPSR_GE
);
3448 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3450 /* Mask out execution state bits. */
3453 /* Mask out privileged bits. */
3459 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3460 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3464 /* ??? This is also undefined in system mode. */
3468 tmp
= load_cpu_field(spsr
);
3469 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3470 tcg_gen_andi_i32(t0
, t0
, mask
);
3471 tcg_gen_or_i32(tmp
, tmp
, t0
);
3472 store_cpu_field(tmp
, spsr
);
3474 gen_set_cpsr(t0
, mask
);
3476 tcg_temp_free_i32(t0
);
3481 /* Returns nonzero if access to the PSR is not permitted. */
3482 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3485 tmp
= tcg_temp_new_i32();
3486 tcg_gen_movi_i32(tmp
, val
);
3487 return gen_set_psr(s
, mask
, spsr
, tmp
);
3490 /* Generate an old-style exception return. Marks pc as dead. */
3491 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3494 store_reg(s
, 15, pc
);
3495 tmp
= load_cpu_field(spsr
);
3496 gen_set_cpsr(tmp
, 0xffffffff);
3497 tcg_temp_free_i32(tmp
);
3498 s
->is_jmp
= DISAS_UPDATE
;
3501 /* Generate a v6 exception return. Marks both values as dead. */
3502 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3504 gen_set_cpsr(cpsr
, 0xffffffff);
3505 tcg_temp_free_i32(cpsr
);
3506 store_reg(s
, 15, pc
);
3507 s
->is_jmp
= DISAS_UPDATE
;
3511 gen_set_condexec (DisasContext
*s
)
3513 if (s
->condexec_mask
) {
3514 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3515 TCGv tmp
= tcg_temp_new_i32();
3516 tcg_gen_movi_i32(tmp
, val
);
3517 store_cpu_field(tmp
, condexec_bits
);
3521 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3523 gen_set_condexec(s
);
3524 gen_set_pc_im(s
->pc
- offset
);
3525 gen_exception(excp
);
3526 s
->is_jmp
= DISAS_JUMP
;
3529 static void gen_nop_hint(DisasContext
*s
, int val
)
3533 gen_set_pc_im(s
->pc
);
3534 s
->is_jmp
= DISAS_WFI
;
3538 /* TODO: Implement SEV and WFE. May help SMP performance. */
3544 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3546 static inline int gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3549 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3550 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3551 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3557 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3560 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3561 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3562 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3567 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3568 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3569 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3570 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3571 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3573 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3574 switch ((size << 1) | u) { \
3576 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3579 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3582 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3585 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3588 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3591 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3593 default: return 1; \
3596 #define GEN_NEON_INTEGER_OP(name) do { \
3597 switch ((size << 1) | u) { \
3599 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3602 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3605 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3608 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3611 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3614 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3616 default: return 1; \
3619 static TCGv
neon_load_scratch(int scratch
)
3621 TCGv tmp
= tcg_temp_new_i32();
3622 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3626 static void neon_store_scratch(int scratch
, TCGv var
)
3628 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3629 tcg_temp_free_i32(var
);
3632 static inline TCGv
neon_get_scalar(int size
, int reg
)
3636 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3638 gen_neon_dup_high16(tmp
);
3640 gen_neon_dup_low16(tmp
);
3643 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3648 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3651 if (size
== 3 || (!q
&& size
== 2)) {
3654 tmp
= tcg_const_i32(rd
);
3655 tmp2
= tcg_const_i32(rm
);
3659 gen_helper_neon_qunzip8(tmp
, tmp2
);
3662 gen_helper_neon_qunzip16(tmp
, tmp2
);
3665 gen_helper_neon_qunzip32(tmp
, tmp2
);
3673 gen_helper_neon_unzip8(tmp
, tmp2
);
3676 gen_helper_neon_unzip16(tmp
, tmp2
);
3682 tcg_temp_free_i32(tmp
);
3683 tcg_temp_free_i32(tmp2
);
3687 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3690 if (size
== 3 || (!q
&& size
== 2)) {
3693 tmp
= tcg_const_i32(rd
);
3694 tmp2
= tcg_const_i32(rm
);
3698 gen_helper_neon_qzip8(tmp
, tmp2
);
3701 gen_helper_neon_qzip16(tmp
, tmp2
);
3704 gen_helper_neon_qzip32(tmp
, tmp2
);
3712 gen_helper_neon_zip8(tmp
, tmp2
);
3715 gen_helper_neon_zip16(tmp
, tmp2
);
3721 tcg_temp_free_i32(tmp
);
3722 tcg_temp_free_i32(tmp2
);
3726 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3730 rd
= tcg_temp_new_i32();
3731 tmp
= tcg_temp_new_i32();
3733 tcg_gen_shli_i32(rd
, t0
, 8);
3734 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3736 tcg_gen_or_i32(rd
, rd
, tmp
);
3738 tcg_gen_shri_i32(t1
, t1
, 8);
3739 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3741 tcg_gen_or_i32(t1
, t1
, tmp
);
3742 tcg_gen_mov_i32(t0
, rd
);
3744 tcg_temp_free_i32(tmp
);
3745 tcg_temp_free_i32(rd
);
3748 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3752 rd
= tcg_temp_new_i32();
3753 tmp
= tcg_temp_new_i32();
3755 tcg_gen_shli_i32(rd
, t0
, 16);
3756 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3757 tcg_gen_or_i32(rd
, rd
, tmp
);
3758 tcg_gen_shri_i32(t1
, t1
, 16);
3759 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3760 tcg_gen_or_i32(t1
, t1
, tmp
);
3761 tcg_gen_mov_i32(t0
, rd
);
3763 tcg_temp_free_i32(tmp
);
3764 tcg_temp_free_i32(rd
);
3772 } neon_ls_element_type
[11] = {
3786 /* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3807 if (!s
->vfp_enabled
)
3809 VFP_DREG_D(rd
, insn
);
3810 rn
= (insn
>> 16) & 0xf;
3812 load
= (insn
& (1 << 21)) != 0;
3813 if ((insn
& (1 << 23)) == 0) {
3814 /* Load store all elements. */
3815 op
= (insn
>> 8) & 0xf;
3816 size
= (insn
>> 6) & 3;
3819 nregs
= neon_ls_element_type
[op
].nregs
;
3820 interleave
= neon_ls_element_type
[op
].interleave
;
3821 spacing
= neon_ls_element_type
[op
].spacing
;
3822 if (size
== 3 && (interleave
| spacing
) != 1)
3824 addr
= tcg_temp_new_i32();
3825 load_reg_var(s
, addr
, rn
);
3826 stride
= (1 << size
) * interleave
;
3827 for (reg
= 0; reg
< nregs
; reg
++) {
3828 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3829 load_reg_var(s
, addr
, rn
);
3830 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3831 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3832 load_reg_var(s
, addr
, rn
);
3833 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3837 tmp64
= gen_ld64(addr
, IS_USER(s
));
3838 neon_store_reg64(tmp64
, rd
);
3839 tcg_temp_free_i64(tmp64
);
3841 tmp64
= tcg_temp_new_i64();
3842 neon_load_reg64(tmp64
, rd
);
3843 gen_st64(tmp64
, addr
, IS_USER(s
));
3845 tcg_gen_addi_i32(addr
, addr
, stride
);
3847 for (pass
= 0; pass
< 2; pass
++) {
3850 tmp
= gen_ld32(addr
, IS_USER(s
));
3851 neon_store_reg(rd
, pass
, tmp
);
3853 tmp
= neon_load_reg(rd
, pass
);
3854 gen_st32(tmp
, addr
, IS_USER(s
));
3856 tcg_gen_addi_i32(addr
, addr
, stride
);
3857 } else if (size
== 1) {
3859 tmp
= gen_ld16u(addr
, IS_USER(s
));
3860 tcg_gen_addi_i32(addr
, addr
, stride
);
3861 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3862 tcg_gen_addi_i32(addr
, addr
, stride
);
3863 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3864 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3865 tcg_temp_free_i32(tmp2
);
3866 neon_store_reg(rd
, pass
, tmp
);
3868 tmp
= neon_load_reg(rd
, pass
);
3869 tmp2
= tcg_temp_new_i32();
3870 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3871 gen_st16(tmp
, addr
, IS_USER(s
));
3872 tcg_gen_addi_i32(addr
, addr
, stride
);
3873 gen_st16(tmp2
, addr
, IS_USER(s
));
3874 tcg_gen_addi_i32(addr
, addr
, stride
);
3876 } else /* size == 0 */ {
3879 for (n
= 0; n
< 4; n
++) {
3880 tmp
= gen_ld8u(addr
, IS_USER(s
));
3881 tcg_gen_addi_i32(addr
, addr
, stride
);
3885 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3886 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3887 tcg_temp_free_i32(tmp
);
3890 neon_store_reg(rd
, pass
, tmp2
);
3892 tmp2
= neon_load_reg(rd
, pass
);
3893 for (n
= 0; n
< 4; n
++) {
3894 tmp
= tcg_temp_new_i32();
3896 tcg_gen_mov_i32(tmp
, tmp2
);
3898 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3900 gen_st8(tmp
, addr
, IS_USER(s
));
3901 tcg_gen_addi_i32(addr
, addr
, stride
);
3903 tcg_temp_free_i32(tmp2
);
3910 tcg_temp_free_i32(addr
);
3913 size
= (insn
>> 10) & 3;
3915 /* Load single element to all lanes. */
3916 int a
= (insn
>> 4) & 1;
3920 size
= (insn
>> 6) & 3;
3921 nregs
= ((insn
>> 8) & 3) + 1;
3924 if (nregs
!= 4 || a
== 0) {
3927 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3930 if (nregs
== 1 && a
== 1 && size
== 0) {
3933 if (nregs
== 3 && a
== 1) {
3936 addr
= tcg_temp_new_i32();
3937 load_reg_var(s
, addr
, rn
);
3939 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3940 tmp
= gen_load_and_replicate(s
, addr
, size
);
3941 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3942 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3943 if (insn
& (1 << 5)) {
3944 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3945 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3947 tcg_temp_free_i32(tmp
);
3949 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3950 stride
= (insn
& (1 << 5)) ? 2 : 1;
3951 for (reg
= 0; reg
< nregs
; reg
++) {
3952 tmp
= gen_load_and_replicate(s
, addr
, size
);
3953 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3954 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3955 tcg_temp_free_i32(tmp
);
3956 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3960 tcg_temp_free_i32(addr
);
3961 stride
= (1 << size
) * nregs
;
3963 /* Single element. */
3964 pass
= (insn
>> 7) & 1;
3967 shift
= ((insn
>> 5) & 3) * 8;
3971 shift
= ((insn
>> 6) & 1) * 16;
3972 stride
= (insn
& (1 << 5)) ? 2 : 1;
3976 stride
= (insn
& (1 << 6)) ? 2 : 1;
3981 nregs
= ((insn
>> 8) & 3) + 1;
3982 addr
= tcg_temp_new_i32();
3983 load_reg_var(s
, addr
, rn
);
3984 for (reg
= 0; reg
< nregs
; reg
++) {
3988 tmp
= gen_ld8u(addr
, IS_USER(s
));
3991 tmp
= gen_ld16u(addr
, IS_USER(s
));
3994 tmp
= gen_ld32(addr
, IS_USER(s
));
3996 default: /* Avoid compiler warnings. */
4000 tmp2
= neon_load_reg(rd
, pass
);
4001 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4002 tcg_temp_free_i32(tmp2
);
4004 neon_store_reg(rd
, pass
, tmp
);
4005 } else { /* Store */
4006 tmp
= neon_load_reg(rd
, pass
);
4008 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4011 gen_st8(tmp
, addr
, IS_USER(s
));
4014 gen_st16(tmp
, addr
, IS_USER(s
));
4017 gen_st32(tmp
, addr
, IS_USER(s
));
4022 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4024 tcg_temp_free_i32(addr
);
4025 stride
= nregs
* (1 << size
);
4031 base
= load_reg(s
, rn
);
4033 tcg_gen_addi_i32(base
, base
, stride
);
4036 index
= load_reg(s
, rm
);
4037 tcg_gen_add_i32(base
, base
, index
);
4038 tcg_temp_free_i32(index
);
4040 store_reg(s
, rn
, base
);
4045 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4046 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4048 tcg_gen_and_i32(t
, t
, c
);
4049 tcg_gen_andc_i32(f
, f
, c
);
4050 tcg_gen_or_i32(dest
, t
, f
);
4053 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4056 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4057 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4058 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4063 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4066 case 0: gen_helper_neon_narrow_sat_s8(dest
, src
); break;
4067 case 1: gen_helper_neon_narrow_sat_s16(dest
, src
); break;
4068 case 2: gen_helper_neon_narrow_sat_s32(dest
, src
); break;
4073 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4076 case 0: gen_helper_neon_narrow_sat_u8(dest
, src
); break;
4077 case 1: gen_helper_neon_narrow_sat_u16(dest
, src
); break;
4078 case 2: gen_helper_neon_narrow_sat_u32(dest
, src
); break;
4083 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4086 case 0: gen_helper_neon_unarrow_sat8(dest
, src
); break;
4087 case 1: gen_helper_neon_unarrow_sat16(dest
, src
); break;
4088 case 2: gen_helper_neon_unarrow_sat32(dest
, src
); break;
4093 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4099 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4100 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4105 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4106 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4113 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4114 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4119 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4120 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4127 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4131 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4132 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4133 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4138 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4139 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4140 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4144 tcg_temp_free_i32(src
);
4147 static inline void gen_neon_addl(int size
)
4150 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4151 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4152 case 2: tcg_gen_add_i64(CPU_V001
); break;
4157 static inline void gen_neon_subl(int size
)
4160 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4161 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4162 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4167 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4170 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4171 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4172 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4177 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4180 case 1: gen_helper_neon_addl_saturate_s32(op0
, op0
, op1
); break;
4181 case 2: gen_helper_neon_addl_saturate_s64(op0
, op0
, op1
); break;
4186 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4190 switch ((size
<< 1) | u
) {
4191 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4192 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4193 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4194 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4196 tmp
= gen_muls_i64_i32(a
, b
);
4197 tcg_gen_mov_i64(dest
, tmp
);
4198 tcg_temp_free_i64(tmp
);
4201 tmp
= gen_mulu_i64_i32(a
, b
);
4202 tcg_gen_mov_i64(dest
, tmp
);
4203 tcg_temp_free_i64(tmp
);
4208 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4209 Don't forget to clean them now. */
4211 tcg_temp_free_i32(a
);
4212 tcg_temp_free_i32(b
);
4216 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4220 gen_neon_unarrow_sats(size
, dest
, src
);
4222 gen_neon_narrow(size
, dest
, src
);
4226 gen_neon_narrow_satu(size
, dest
, src
);
4228 gen_neon_narrow_sats(size
, dest
, src
);
4233 /* Translate a NEON data processing instruction. Return nonzero if the
4234 instruction is invalid.
4235 We process data in a mixture of 32-bit and 64-bit chunks.
4236 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4238 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4251 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4254 if (!s
->vfp_enabled
)
4256 q
= (insn
& (1 << 6)) != 0;
4257 u
= (insn
>> 24) & 1;
4258 VFP_DREG_D(rd
, insn
);
4259 VFP_DREG_N(rn
, insn
);
4260 VFP_DREG_M(rm
, insn
);
4261 size
= (insn
>> 20) & 3;
4262 if ((insn
& (1 << 23)) == 0) {
4263 /* Three register same length. */
4264 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4265 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4266 || op
== 10 || op
== 11 || op
== 16)) {
4267 /* 64-bit element instructions. */
4268 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4269 neon_load_reg64(cpu_V0
, rn
+ pass
);
4270 neon_load_reg64(cpu_V1
, rm
+ pass
);
4274 gen_helper_neon_qadd_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4276 gen_helper_neon_qadd_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4281 gen_helper_neon_qsub_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4283 gen_helper_neon_qsub_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4288 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4290 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4295 gen_helper_neon_qshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4297 gen_helper_neon_qshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4300 case 10: /* VRSHL */
4302 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4304 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4307 case 11: /* VQRSHL */
4309 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4311 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4316 tcg_gen_sub_i64(CPU_V001
);
4318 tcg_gen_add_i64(CPU_V001
);
4324 neon_store_reg64(cpu_V0
, rd
+ pass
);
4331 case 10: /* VRSHL */
4332 case 11: /* VQRSHL */
4335 /* Shift instruction operands are reversed. */
4342 case 20: /* VPMAX */
4343 case 21: /* VPMIN */
4344 case 23: /* VPADD */
4347 case 26: /* VPADD (float) */
4348 pairwise
= (u
&& size
< 2);
4350 case 30: /* VPMIN/VPMAX (float) */
4358 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4367 tmp
= neon_load_reg(rn
, n
);
4368 tmp2
= neon_load_reg(rn
, n
+ 1);
4370 tmp
= neon_load_reg(rm
, n
);
4371 tmp2
= neon_load_reg(rm
, n
+ 1);
4375 tmp
= neon_load_reg(rn
, pass
);
4376 tmp2
= neon_load_reg(rm
, pass
);
4380 GEN_NEON_INTEGER_OP(hadd
);
4383 GEN_NEON_INTEGER_OP(qadd
);
4385 case 2: /* VRHADD */
4386 GEN_NEON_INTEGER_OP(rhadd
);
4388 case 3: /* Logic ops. */
4389 switch ((u
<< 2) | size
) {
4391 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4394 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4397 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4400 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4403 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4406 tmp3
= neon_load_reg(rd
, pass
);
4407 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4408 tcg_temp_free_i32(tmp3
);
4411 tmp3
= neon_load_reg(rd
, pass
);
4412 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4413 tcg_temp_free_i32(tmp3
);
4416 tmp3
= neon_load_reg(rd
, pass
);
4417 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4418 tcg_temp_free_i32(tmp3
);
4423 GEN_NEON_INTEGER_OP(hsub
);
4426 GEN_NEON_INTEGER_OP(qsub
);
4429 GEN_NEON_INTEGER_OP(cgt
);
4432 GEN_NEON_INTEGER_OP(cge
);
4435 GEN_NEON_INTEGER_OP(shl
);
4438 GEN_NEON_INTEGER_OP(qshl
);
4440 case 10: /* VRSHL */
4441 GEN_NEON_INTEGER_OP(rshl
);
4443 case 11: /* VQRSHL */
4444 GEN_NEON_INTEGER_OP(qrshl
);
4447 GEN_NEON_INTEGER_OP(max
);
4450 GEN_NEON_INTEGER_OP(min
);
4453 GEN_NEON_INTEGER_OP(abd
);
4456 GEN_NEON_INTEGER_OP(abd
);
4457 tcg_temp_free_i32(tmp2
);
4458 tmp2
= neon_load_reg(rd
, pass
);
4459 gen_neon_add(size
, tmp
, tmp2
);
4462 if (!u
) { /* VADD */
4463 if (gen_neon_add(size
, tmp
, tmp2
))
4467 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4468 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4469 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4475 if (!u
) { /* VTST */
4477 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4478 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4479 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4484 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4485 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4486 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4491 case 18: /* Multiply. */
4493 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4494 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4495 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4498 tcg_temp_free_i32(tmp2
);
4499 tmp2
= neon_load_reg(rd
, pass
);
4501 gen_neon_rsb(size
, tmp
, tmp2
);
4503 gen_neon_add(size
, tmp
, tmp2
);
4507 if (u
) { /* polynomial */
4508 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4509 } else { /* Integer */
4511 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4512 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4513 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4518 case 20: /* VPMAX */
4519 GEN_NEON_INTEGER_OP(pmax
);
4521 case 21: /* VPMIN */
4522 GEN_NEON_INTEGER_OP(pmin
);
4524 case 22: /* Hultiply high. */
4525 if (!u
) { /* VQDMULH */
4527 case 1: gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
); break;
4528 case 2: gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
); break;
4531 } else { /* VQRDHMUL */
4533 case 1: gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
); break;
4534 case 2: gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
); break;
4539 case 23: /* VPADD */
4543 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4544 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4545 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4549 case 26: /* Floating point arithnetic. */
4550 switch ((u
<< 2) | size
) {
4552 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4555 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4558 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4561 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4567 case 27: /* Float multiply. */
4568 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4570 tcg_temp_free_i32(tmp2
);
4571 tmp2
= neon_load_reg(rd
, pass
);
4573 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4575 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4579 case 28: /* Float compare. */
4581 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4584 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4586 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4589 case 29: /* Float compare absolute. */
4593 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4595 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4597 case 30: /* Float min/max. */
4599 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4601 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4605 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4607 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4612 tcg_temp_free_i32(tmp2
);
4614 /* Save the result. For elementwise operations we can put it
4615 straight into the destination register. For pairwise operations
4616 we have to be careful to avoid clobbering the source operands. */
4617 if (pairwise
&& rd
== rm
) {
4618 neon_store_scratch(pass
, tmp
);
4620 neon_store_reg(rd
, pass
, tmp
);
4624 if (pairwise
&& rd
== rm
) {
4625 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4626 tmp
= neon_load_scratch(pass
);
4627 neon_store_reg(rd
, pass
, tmp
);
4630 /* End of 3 register same size operations. */
4631 } else if (insn
& (1 << 4)) {
4632 if ((insn
& 0x00380080) != 0) {
4633 /* Two registers and shift. */
4634 op
= (insn
>> 8) & 0xf;
4635 if (insn
& (1 << 7)) {
4640 while ((insn
& (1 << (size
+ 19))) == 0)
4643 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4644 /* To avoid excessive dumplication of ops we implement shift
4645 by immediate using the variable shift operations. */
4647 /* Shift by immediate:
4648 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4649 /* Right shifts are encoded as N - shift, where N is the
4650 element size in bits. */
4652 shift
= shift
- (1 << (size
+ 3));
4660 imm
= (uint8_t) shift
;
4665 imm
= (uint16_t) shift
;
4676 for (pass
= 0; pass
< count
; pass
++) {
4678 neon_load_reg64(cpu_V0
, rm
+ pass
);
4679 tcg_gen_movi_i64(cpu_V1
, imm
);
4684 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4686 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4691 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4693 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4698 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4700 case 5: /* VSHL, VSLI */
4701 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4703 case 6: /* VQSHLU */
4705 gen_helper_neon_qshlu_s64(cpu_V0
,
4713 gen_helper_neon_qshl_u64(cpu_V0
,
4716 gen_helper_neon_qshl_s64(cpu_V0
,
4721 if (op
== 1 || op
== 3) {
4723 neon_load_reg64(cpu_V1
, rd
+ pass
);
4724 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4725 } else if (op
== 4 || (op
== 5 && u
)) {
4727 neon_load_reg64(cpu_V1
, rd
+ pass
);
4729 if (shift
< -63 || shift
> 63) {
4733 mask
= 0xffffffffffffffffull
>> -shift
;
4735 mask
= 0xffffffffffffffffull
<< shift
;
4738 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4739 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4741 neon_store_reg64(cpu_V0
, rd
+ pass
);
4742 } else { /* size < 3 */
4743 /* Operands in T0 and T1. */
4744 tmp
= neon_load_reg(rm
, pass
);
4745 tmp2
= tcg_temp_new_i32();
4746 tcg_gen_movi_i32(tmp2
, imm
);
4750 GEN_NEON_INTEGER_OP(shl
);
4754 GEN_NEON_INTEGER_OP(rshl
);
4759 GEN_NEON_INTEGER_OP(shl
);
4761 case 5: /* VSHL, VSLI */
4763 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4764 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4765 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4769 case 6: /* VQSHLU */
4775 gen_helper_neon_qshlu_s8(tmp
, tmp
, tmp2
);
4778 gen_helper_neon_qshlu_s16(tmp
, tmp
, tmp2
);
4781 gen_helper_neon_qshlu_s32(tmp
, tmp
, tmp2
);
4788 GEN_NEON_INTEGER_OP(qshl
);
4791 tcg_temp_free_i32(tmp2
);
4793 if (op
== 1 || op
== 3) {
4795 tmp2
= neon_load_reg(rd
, pass
);
4796 gen_neon_add(size
, tmp
, tmp2
);
4797 tcg_temp_free_i32(tmp2
);
4798 } else if (op
== 4 || (op
== 5 && u
)) {
4803 mask
= 0xff >> -shift
;
4805 mask
= (uint8_t)(0xff << shift
);
4811 mask
= 0xffff >> -shift
;
4813 mask
= (uint16_t)(0xffff << shift
);
4817 if (shift
< -31 || shift
> 31) {
4821 mask
= 0xffffffffu
>> -shift
;
4823 mask
= 0xffffffffu
<< shift
;
4829 tmp2
= neon_load_reg(rd
, pass
);
4830 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4831 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4832 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4833 tcg_temp_free_i32(tmp2
);
4835 neon_store_reg(rd
, pass
, tmp
);
4838 } else if (op
< 10) {
4839 /* Shift by immediate and narrow:
4840 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4841 int input_unsigned
= (op
== 8) ? !u
: u
;
4843 shift
= shift
- (1 << (size
+ 3));
4846 tmp64
= tcg_const_i64(shift
);
4847 neon_load_reg64(cpu_V0
, rm
);
4848 neon_load_reg64(cpu_V1
, rm
+ 1);
4849 for (pass
= 0; pass
< 2; pass
++) {
4857 if (input_unsigned
) {
4858 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
4860 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
4863 if (input_unsigned
) {
4864 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
4866 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
4869 tmp
= tcg_temp_new_i32();
4870 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4871 neon_store_reg(rd
, pass
, tmp
);
4873 tcg_temp_free_i64(tmp64
);
4876 imm
= (uint16_t)shift
;
4880 imm
= (uint32_t)shift
;
4882 tmp2
= tcg_const_i32(imm
);
4883 tmp4
= neon_load_reg(rm
+ 1, 0);
4884 tmp5
= neon_load_reg(rm
+ 1, 1);
4885 for (pass
= 0; pass
< 2; pass
++) {
4887 tmp
= neon_load_reg(rm
, 0);
4891 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
4894 tmp3
= neon_load_reg(rm
, 1);
4898 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
4900 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4901 tcg_temp_free_i32(tmp
);
4902 tcg_temp_free_i32(tmp3
);
4903 tmp
= tcg_temp_new_i32();
4904 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4905 neon_store_reg(rd
, pass
, tmp
);
4907 tcg_temp_free_i32(tmp2
);
4909 } else if (op
== 10) {
4913 tmp
= neon_load_reg(rm
, 0);
4914 tmp2
= neon_load_reg(rm
, 1);
4915 for (pass
= 0; pass
< 2; pass
++) {
4919 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4922 /* The shift is less than the width of the source
4923 type, so we can just shift the whole register. */
4924 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4925 /* Widen the result of shift: we need to clear
4926 * the potential overflow bits resulting from
4927 * left bits of the narrow input appearing as
4928 * right bits of left the neighbour narrow
4930 if (size
< 2 || !u
) {
4933 imm
= (0xffu
>> (8 - shift
));
4935 } else if (size
== 1) {
4936 imm
= 0xffff >> (16 - shift
);
4939 imm
= 0xffffffff >> (32 - shift
);
4942 imm64
= imm
| (((uint64_t)imm
) << 32);
4946 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
4949 neon_store_reg64(cpu_V0
, rd
+ pass
);
4951 } else if (op
>= 14) {
4952 /* VCVT fixed-point. */
4953 /* We have already masked out the must-be-1 top bit of imm6,
4954 * hence this 32-shift where the ARM ARM has 64-imm6.
4957 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4958 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4961 gen_vfp_ulto(0, shift
);
4963 gen_vfp_slto(0, shift
);
4966 gen_vfp_toul(0, shift
);
4968 gen_vfp_tosl(0, shift
);
4970 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4975 } else { /* (insn & 0x00380080) == 0 */
4978 op
= (insn
>> 8) & 0xf;
4979 /* One register and immediate. */
4980 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4981 invert
= (insn
& (1 << 5)) != 0;
4999 imm
= (imm
<< 8) | (imm
<< 24);
5002 imm
= (imm
<< 8) | 0xff;
5005 imm
= (imm
<< 16) | 0xffff;
5008 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5013 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5014 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5020 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5021 if (op
& 1 && op
< 12) {
5022 tmp
= neon_load_reg(rd
, pass
);
5024 /* The immediate value has already been inverted, so
5026 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5028 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5032 tmp
= tcg_temp_new_i32();
5033 if (op
== 14 && invert
) {
5036 for (n
= 0; n
< 4; n
++) {
5037 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5038 val
|= 0xff << (n
* 8);
5040 tcg_gen_movi_i32(tmp
, val
);
5042 tcg_gen_movi_i32(tmp
, imm
);
5045 neon_store_reg(rd
, pass
, tmp
);
5048 } else { /* (insn & 0x00800010 == 0x00800000) */
5050 op
= (insn
>> 8) & 0xf;
5051 if ((insn
& (1 << 6)) == 0) {
5052 /* Three registers of different lengths. */
5056 /* prewiden, src1_wide, src2_wide */
5057 static const int neon_3reg_wide
[16][3] = {
5058 {1, 0, 0}, /* VADDL */
5059 {1, 1, 0}, /* VADDW */
5060 {1, 0, 0}, /* VSUBL */
5061 {1, 1, 0}, /* VSUBW */
5062 {0, 1, 1}, /* VADDHN */
5063 {0, 0, 0}, /* VABAL */
5064 {0, 1, 1}, /* VSUBHN */
5065 {0, 0, 0}, /* VABDL */
5066 {0, 0, 0}, /* VMLAL */
5067 {0, 0, 0}, /* VQDMLAL */
5068 {0, 0, 0}, /* VMLSL */
5069 {0, 0, 0}, /* VQDMLSL */
5070 {0, 0, 0}, /* Integer VMULL */
5071 {0, 0, 0}, /* VQDMULL */
5072 {0, 0, 0} /* Polynomial VMULL */
5075 prewiden
= neon_3reg_wide
[op
][0];
5076 src1_wide
= neon_3reg_wide
[op
][1];
5077 src2_wide
= neon_3reg_wide
[op
][2];
5079 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
5082 /* Avoid overlapping operands. Wide source operands are
5083 always aligned so will never overlap with wide
5084 destinations in problematic ways. */
5085 if (rd
== rm
&& !src2_wide
) {
5086 tmp
= neon_load_reg(rm
, 1);
5087 neon_store_scratch(2, tmp
);
5088 } else if (rd
== rn
&& !src1_wide
) {
5089 tmp
= neon_load_reg(rn
, 1);
5090 neon_store_scratch(2, tmp
);
5093 for (pass
= 0; pass
< 2; pass
++) {
5095 neon_load_reg64(cpu_V0
, rn
+ pass
);
5098 if (pass
== 1 && rd
== rn
) {
5099 tmp
= neon_load_scratch(2);
5101 tmp
= neon_load_reg(rn
, pass
);
5104 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5108 neon_load_reg64(cpu_V1
, rm
+ pass
);
5111 if (pass
== 1 && rd
== rm
) {
5112 tmp2
= neon_load_scratch(2);
5114 tmp2
= neon_load_reg(rm
, pass
);
5117 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5121 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5122 gen_neon_addl(size
);
5124 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5125 gen_neon_subl(size
);
5127 case 5: case 7: /* VABAL, VABDL */
5128 switch ((size
<< 1) | u
) {
5130 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5133 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5136 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5139 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5142 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5145 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5149 tcg_temp_free_i32(tmp2
);
5150 tcg_temp_free_i32(tmp
);
5152 case 8: case 9: case 10: case 11: case 12: case 13:
5153 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5154 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5156 case 14: /* Polynomial VMULL */
5157 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5158 tcg_temp_free_i32(tmp2
);
5159 tcg_temp_free_i32(tmp
);
5161 default: /* 15 is RESERVED. */
5166 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5167 neon_store_reg64(cpu_V0
, rd
+ pass
);
5168 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5170 neon_load_reg64(cpu_V1
, rd
+ pass
);
5172 case 10: /* VMLSL */
5173 gen_neon_negl(cpu_V0
, size
);
5175 case 5: case 8: /* VABAL, VMLAL */
5176 gen_neon_addl(size
);
5178 case 9: case 11: /* VQDMLAL, VQDMLSL */
5179 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5181 gen_neon_negl(cpu_V0
, size
);
5183 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5188 neon_store_reg64(cpu_V0
, rd
+ pass
);
5189 } else if (op
== 4 || op
== 6) {
5190 /* Narrowing operation. */
5191 tmp
= tcg_temp_new_i32();
5195 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5198 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5201 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5202 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5209 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5212 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5215 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5216 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5217 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5225 neon_store_reg(rd
, 0, tmp3
);
5226 neon_store_reg(rd
, 1, tmp
);
5229 /* Write back the result. */
5230 neon_store_reg64(cpu_V0
, rd
+ pass
);
5234 /* Two registers and a scalar. */
5236 case 0: /* Integer VMLA scalar */
5237 case 1: /* Float VMLA scalar */
5238 case 4: /* Integer VMLS scalar */
5239 case 5: /* Floating point VMLS scalar */
5240 case 8: /* Integer VMUL scalar */
5241 case 9: /* Floating point VMUL scalar */
5242 case 12: /* VQDMULH scalar */
5243 case 13: /* VQRDMULH scalar */
5244 tmp
= neon_get_scalar(size
, rm
);
5245 neon_store_scratch(0, tmp
);
5246 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5247 tmp
= neon_load_scratch(0);
5248 tmp2
= neon_load_reg(rn
, pass
);
5251 gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
);
5253 gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
);
5255 } else if (op
== 13) {
5257 gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
);
5259 gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
);
5261 } else if (op
& 1) {
5262 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5265 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5266 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5267 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5271 tcg_temp_free_i32(tmp2
);
5274 tmp2
= neon_load_reg(rd
, pass
);
5277 gen_neon_add(size
, tmp
, tmp2
);
5280 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5283 gen_neon_rsb(size
, tmp
, tmp2
);
5286 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5291 tcg_temp_free_i32(tmp2
);
5293 neon_store_reg(rd
, pass
, tmp
);
5296 case 2: /* VMLAL sclar */
5297 case 3: /* VQDMLAL scalar */
5298 case 6: /* VMLSL scalar */
5299 case 7: /* VQDMLSL scalar */
5300 case 10: /* VMULL scalar */
5301 case 11: /* VQDMULL scalar */
5302 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5305 tmp2
= neon_get_scalar(size
, rm
);
5306 /* We need a copy of tmp2 because gen_neon_mull
5307 * deletes it during pass 0. */
5308 tmp4
= tcg_temp_new_i32();
5309 tcg_gen_mov_i32(tmp4
, tmp2
);
5310 tmp3
= neon_load_reg(rn
, 1);
5312 for (pass
= 0; pass
< 2; pass
++) {
5314 tmp
= neon_load_reg(rn
, 0);
5319 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5321 neon_load_reg64(cpu_V1
, rd
+ pass
);
5325 gen_neon_negl(cpu_V0
, size
);
5328 gen_neon_addl(size
);
5331 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5333 gen_neon_negl(cpu_V0
, size
);
5335 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5341 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5346 neon_store_reg64(cpu_V0
, rd
+ pass
);
5351 default: /* 14 and 15 are RESERVED */
5355 } else { /* size == 3 */
5358 imm
= (insn
>> 8) & 0xf;
5364 neon_load_reg64(cpu_V0
, rn
);
5366 neon_load_reg64(cpu_V1
, rn
+ 1);
5368 } else if (imm
== 8) {
5369 neon_load_reg64(cpu_V0
, rn
+ 1);
5371 neon_load_reg64(cpu_V1
, rm
);
5374 tmp64
= tcg_temp_new_i64();
5376 neon_load_reg64(cpu_V0
, rn
);
5377 neon_load_reg64(tmp64
, rn
+ 1);
5379 neon_load_reg64(cpu_V0
, rn
+ 1);
5380 neon_load_reg64(tmp64
, rm
);
5382 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5383 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5384 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5386 neon_load_reg64(cpu_V1
, rm
);
5388 neon_load_reg64(cpu_V1
, rm
+ 1);
5391 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5392 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5393 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5394 tcg_temp_free_i64(tmp64
);
5397 neon_load_reg64(cpu_V0
, rn
);
5398 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5399 neon_load_reg64(cpu_V1
, rm
);
5400 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5401 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5403 neon_store_reg64(cpu_V0
, rd
);
5405 neon_store_reg64(cpu_V1
, rd
+ 1);
5407 } else if ((insn
& (1 << 11)) == 0) {
5408 /* Two register misc. */
5409 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5410 size
= (insn
>> 18) & 3;
5412 case 0: /* VREV64 */
5415 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5416 tmp
= neon_load_reg(rm
, pass
* 2);
5417 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5419 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5420 case 1: gen_swap_half(tmp
); break;
5421 case 2: /* no-op */ break;
5424 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5426 neon_store_reg(rd
, pass
* 2, tmp2
);
5429 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5430 case 1: gen_swap_half(tmp2
); break;
5433 neon_store_reg(rd
, pass
* 2, tmp2
);
5437 case 4: case 5: /* VPADDL */
5438 case 12: case 13: /* VPADAL */
5441 for (pass
= 0; pass
< q
+ 1; pass
++) {
5442 tmp
= neon_load_reg(rm
, pass
* 2);
5443 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5444 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5445 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5447 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5448 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5449 case 2: tcg_gen_add_i64(CPU_V001
); break;
5454 neon_load_reg64(cpu_V1
, rd
+ pass
);
5455 gen_neon_addl(size
);
5457 neon_store_reg64(cpu_V0
, rd
+ pass
);
5462 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5463 tmp
= neon_load_reg(rm
, n
);
5464 tmp2
= neon_load_reg(rd
, n
+ 1);
5465 neon_store_reg(rm
, n
, tmp2
);
5466 neon_store_reg(rd
, n
+ 1, tmp
);
5473 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5478 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5482 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5486 for (pass
= 0; pass
< 2; pass
++) {
5487 neon_load_reg64(cpu_V0
, rm
+ pass
);
5488 tmp
= tcg_temp_new_i32();
5489 gen_neon_narrow_op(op
== 36, q
, size
, tmp
, cpu_V0
);
5493 neon_store_reg(rd
, 0, tmp2
);
5494 neon_store_reg(rd
, 1, tmp
);
5498 case 38: /* VSHLL */
5501 tmp
= neon_load_reg(rm
, 0);
5502 tmp2
= neon_load_reg(rm
, 1);
5503 for (pass
= 0; pass
< 2; pass
++) {
5506 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5507 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5508 neon_store_reg64(cpu_V0
, rd
+ pass
);
5511 case 44: /* VCVT.F16.F32 */
5512 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5514 tmp
= tcg_temp_new_i32();
5515 tmp2
= tcg_temp_new_i32();
5516 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5517 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5518 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5519 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5520 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5521 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5522 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5523 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5524 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5525 neon_store_reg(rd
, 0, tmp2
);
5526 tmp2
= tcg_temp_new_i32();
5527 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5528 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5529 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5530 neon_store_reg(rd
, 1, tmp2
);
5531 tcg_temp_free_i32(tmp
);
5533 case 46: /* VCVT.F32.F16 */
5534 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5536 tmp3
= tcg_temp_new_i32();
5537 tmp
= neon_load_reg(rm
, 0);
5538 tmp2
= neon_load_reg(rm
, 1);
5539 tcg_gen_ext16u_i32(tmp3
, tmp
);
5540 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5541 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5542 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5543 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5544 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5545 tcg_temp_free_i32(tmp
);
5546 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5547 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5548 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5549 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5550 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5551 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5552 tcg_temp_free_i32(tmp2
);
5553 tcg_temp_free_i32(tmp3
);
5557 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5558 if (op
== 30 || op
== 31 || op
>= 58) {
5559 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5560 neon_reg_offset(rm
, pass
));
5563 tmp
= neon_load_reg(rm
, pass
);
5566 case 1: /* VREV32 */
5568 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5569 case 1: gen_swap_half(tmp
); break;
5573 case 2: /* VREV16 */
5580 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5581 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5582 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5588 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5589 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5590 case 2: gen_helper_clz(tmp
, tmp
); break;
5597 gen_helper_neon_cnt_u8(tmp
, tmp
);
5602 tcg_gen_not_i32(tmp
, tmp
);
5604 case 14: /* VQABS */
5606 case 0: gen_helper_neon_qabs_s8(tmp
, tmp
); break;
5607 case 1: gen_helper_neon_qabs_s16(tmp
, tmp
); break;
5608 case 2: gen_helper_neon_qabs_s32(tmp
, tmp
); break;
5612 case 15: /* VQNEG */
5614 case 0: gen_helper_neon_qneg_s8(tmp
, tmp
); break;
5615 case 1: gen_helper_neon_qneg_s16(tmp
, tmp
); break;
5616 case 2: gen_helper_neon_qneg_s32(tmp
, tmp
); break;
5620 case 16: case 19: /* VCGT #0, VCLE #0 */
5621 tmp2
= tcg_const_i32(0);
5623 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5624 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5625 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5628 tcg_temp_free(tmp2
);
5630 tcg_gen_not_i32(tmp
, tmp
);
5632 case 17: case 20: /* VCGE #0, VCLT #0 */
5633 tmp2
= tcg_const_i32(0);
5635 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5636 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5637 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5640 tcg_temp_free(tmp2
);
5642 tcg_gen_not_i32(tmp
, tmp
);
5644 case 18: /* VCEQ #0 */
5645 tmp2
= tcg_const_i32(0);
5647 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5648 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5649 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5652 tcg_temp_free(tmp2
);
5656 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5657 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5658 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5665 tmp2
= tcg_const_i32(0);
5666 gen_neon_rsb(size
, tmp
, tmp2
);
5667 tcg_temp_free(tmp2
);
5669 case 24: /* Float VCGT #0 */
5670 tmp2
= tcg_const_i32(0);
5671 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5672 tcg_temp_free(tmp2
);
5674 case 25: /* Float VCGE #0 */
5675 tmp2
= tcg_const_i32(0);
5676 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5677 tcg_temp_free(tmp2
);
5679 case 26: /* Float VCEQ #0 */
5680 tmp2
= tcg_const_i32(0);
5681 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5682 tcg_temp_free(tmp2
);
5684 case 27: /* Float VCLE #0 */
5685 tmp2
= tcg_const_i32(0);
5686 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
);
5687 tcg_temp_free(tmp2
);
5689 case 28: /* Float VCLT #0 */
5690 tmp2
= tcg_const_i32(0);
5691 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
);
5692 tcg_temp_free(tmp2
);
5694 case 30: /* Float VABS */
5697 case 31: /* Float VNEG */
5701 tmp2
= neon_load_reg(rd
, pass
);
5702 neon_store_reg(rm
, pass
, tmp2
);
5705 tmp2
= neon_load_reg(rd
, pass
);
5707 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5708 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5712 neon_store_reg(rm
, pass
, tmp2
);
5714 case 56: /* Integer VRECPE */
5715 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5717 case 57: /* Integer VRSQRTE */
5718 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5720 case 58: /* Float VRECPE */
5721 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5723 case 59: /* Float VRSQRTE */
5724 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5726 case 60: /* VCVT.F32.S32 */
5729 case 61: /* VCVT.F32.U32 */
5732 case 62: /* VCVT.S32.F32 */
5735 case 63: /* VCVT.U32.F32 */
5739 /* Reserved: 21, 29, 39-56 */
5742 if (op
== 30 || op
== 31 || op
>= 58) {
5743 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5744 neon_reg_offset(rd
, pass
));
5746 neon_store_reg(rd
, pass
, tmp
);
5751 } else if ((insn
& (1 << 10)) == 0) {
5753 n
= ((insn
>> 5) & 0x18) + 8;
5754 if (insn
& (1 << 6)) {
5755 tmp
= neon_load_reg(rd
, 0);
5757 tmp
= tcg_temp_new_i32();
5758 tcg_gen_movi_i32(tmp
, 0);
5760 tmp2
= neon_load_reg(rm
, 0);
5761 tmp4
= tcg_const_i32(rn
);
5762 tmp5
= tcg_const_i32(n
);
5763 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5764 tcg_temp_free_i32(tmp
);
5765 if (insn
& (1 << 6)) {
5766 tmp
= neon_load_reg(rd
, 1);
5768 tmp
= tcg_temp_new_i32();
5769 tcg_gen_movi_i32(tmp
, 0);
5771 tmp3
= neon_load_reg(rm
, 1);
5772 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5773 tcg_temp_free_i32(tmp5
);
5774 tcg_temp_free_i32(tmp4
);
5775 neon_store_reg(rd
, 0, tmp2
);
5776 neon_store_reg(rd
, 1, tmp3
);
5777 tcg_temp_free_i32(tmp
);
5778 } else if ((insn
& 0x380) == 0) {
5780 if (insn
& (1 << 19)) {
5781 tmp
= neon_load_reg(rm
, 1);
5783 tmp
= neon_load_reg(rm
, 0);
5785 if (insn
& (1 << 16)) {
5786 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5787 } else if (insn
& (1 << 17)) {
5788 if ((insn
>> 18) & 1)
5789 gen_neon_dup_high16(tmp
);
5791 gen_neon_dup_low16(tmp
);
5793 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5794 tmp2
= tcg_temp_new_i32();
5795 tcg_gen_mov_i32(tmp2
, tmp
);
5796 neon_store_reg(rd
, pass
, tmp2
);
5798 tcg_temp_free_i32(tmp
);
5807 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5809 int crn
= (insn
>> 16) & 0xf;
5810 int crm
= insn
& 0xf;
5811 int op1
= (insn
>> 21) & 7;
5812 int op2
= (insn
>> 5) & 7;
5813 int rt
= (insn
>> 12) & 0xf;
5816 /* Minimal set of debug registers, since we don't support debug */
5817 if (op1
== 0 && crn
== 0 && op2
== 0) {
5820 /* DBGDIDR: just RAZ. In particular this means the
5821 * "debug architecture version" bits will read as
5822 * a reserved value, which should cause Linux to
5823 * not try to use the debug hardware.
5825 tmp
= tcg_const_i32(0);
5826 store_reg(s
, rt
, tmp
);
5830 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5831 * don't implement memory mapped debug components
5833 if (ENABLE_ARCH_7
) {
5834 tmp
= tcg_const_i32(0);
5835 store_reg(s
, rt
, tmp
);
5844 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5845 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5849 tmp
= load_cpu_field(teecr
);
5850 store_reg(s
, rt
, tmp
);
5853 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5855 if (IS_USER(s
) && (env
->teecr
& 1))
5857 tmp
= load_cpu_field(teehbr
);
5858 store_reg(s
, rt
, tmp
);
5862 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5863 op1
, crn
, crm
, op2
);
5867 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5869 int crn
= (insn
>> 16) & 0xf;
5870 int crm
= insn
& 0xf;
5871 int op1
= (insn
>> 21) & 7;
5872 int op2
= (insn
>> 5) & 7;
5873 int rt
= (insn
>> 12) & 0xf;
5876 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5877 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5881 tmp
= load_reg(s
, rt
);
5882 gen_helper_set_teecr(cpu_env
, tmp
);
5883 tcg_temp_free_i32(tmp
);
5886 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5888 if (IS_USER(s
) && (env
->teecr
& 1))
5890 tmp
= load_reg(s
, rt
);
5891 store_cpu_field(tmp
, teehbr
);
5895 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5896 op1
, crn
, crm
, op2
);
5900 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5904 cpnum
= (insn
>> 8) & 0xf;
5905 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5906 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5912 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5913 return disas_iwmmxt_insn(env
, s
, insn
);
5914 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5915 return disas_dsp_insn(env
, s
, insn
);
5920 return disas_vfp_insn (env
, s
, insn
);
5922 /* Coprocessors 7-15 are architecturally reserved by ARM.
5923 Unfortunately Intel decided to ignore this. */
5924 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5926 if (insn
& (1 << 20))
5927 return disas_cp14_read(env
, s
, insn
);
5929 return disas_cp14_write(env
, s
, insn
);
5931 return disas_cp15_insn (env
, s
, insn
);
5934 /* Unknown coprocessor. See if the board has hooked it. */
5935 return disas_cp_insn (env
, s
, insn
);
5940 /* Store a 64-bit value to a register pair. Clobbers val. */
5941 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5944 tmp
= tcg_temp_new_i32();
5945 tcg_gen_trunc_i64_i32(tmp
, val
);
5946 store_reg(s
, rlow
, tmp
);
5947 tmp
= tcg_temp_new_i32();
5948 tcg_gen_shri_i64(val
, val
, 32);
5949 tcg_gen_trunc_i64_i32(tmp
, val
);
5950 store_reg(s
, rhigh
, tmp
);
5953 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5954 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5959 /* Load value and extend to 64 bits. */
5960 tmp
= tcg_temp_new_i64();
5961 tmp2
= load_reg(s
, rlow
);
5962 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5963 tcg_temp_free_i32(tmp2
);
5964 tcg_gen_add_i64(val
, val
, tmp
);
5965 tcg_temp_free_i64(tmp
);
5968 /* load and add a 64-bit value from a register pair. */
5969 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5975 /* Load 64-bit value rd:rn. */
5976 tmpl
= load_reg(s
, rlow
);
5977 tmph
= load_reg(s
, rhigh
);
5978 tmp
= tcg_temp_new_i64();
5979 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5980 tcg_temp_free_i32(tmpl
);
5981 tcg_temp_free_i32(tmph
);
5982 tcg_gen_add_i64(val
, val
, tmp
);
5983 tcg_temp_free_i64(tmp
);
5986 /* Set N and Z flags from a 64-bit value. */
5987 static void gen_logicq_cc(TCGv_i64 val
)
5989 TCGv tmp
= tcg_temp_new_i32();
5990 gen_helper_logicq_cc(tmp
, val
);
5992 tcg_temp_free_i32(tmp
);
5995 /* Load/Store exclusive instructions are implemented by remembering
5996 the value/address loaded, and seeing if these are the same
5997 when the store is performed. This should be is sufficient to implement
5998 the architecturally mandated semantics, and avoids having to monitor
6001 In system emulation mode only one CPU will be running at once, so
6002 this sequence is effectively atomic. In user emulation mode we
6003 throw an exception and handle the atomic operation elsewhere. */
6004 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6005 TCGv addr
, int size
)
6011 tmp
= gen_ld8u(addr
, IS_USER(s
));
6014 tmp
= gen_ld16u(addr
, IS_USER(s
));
6018 tmp
= gen_ld32(addr
, IS_USER(s
));
6023 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6024 store_reg(s
, rt
, tmp
);
6026 TCGv tmp2
= tcg_temp_new_i32();
6027 tcg_gen_addi_i32(tmp2
, addr
, 4);
6028 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6029 tcg_temp_free_i32(tmp2
);
6030 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6031 store_reg(s
, rt2
, tmp
);
6033 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6036 static void gen_clrex(DisasContext
*s
)
6038 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6041 #ifdef CONFIG_USER_ONLY
6042 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6043 TCGv addr
, int size
)
6045 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6046 tcg_gen_movi_i32(cpu_exclusive_info
,
6047 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6048 gen_exception_insn(s
, 4, EXCP_STREX
);
6051 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6052 TCGv addr
, int size
)
6058 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6064 fail_label
= gen_new_label();
6065 done_label
= gen_new_label();
6066 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6069 tmp
= gen_ld8u(addr
, IS_USER(s
));
6072 tmp
= gen_ld16u(addr
, IS_USER(s
));
6076 tmp
= gen_ld32(addr
, IS_USER(s
));
6081 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6082 tcg_temp_free_i32(tmp
);
6084 TCGv tmp2
= tcg_temp_new_i32();
6085 tcg_gen_addi_i32(tmp2
, addr
, 4);
6086 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6087 tcg_temp_free_i32(tmp2
);
6088 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6089 tcg_temp_free_i32(tmp
);
6091 tmp
= load_reg(s
, rt
);
6094 gen_st8(tmp
, addr
, IS_USER(s
));
6097 gen_st16(tmp
, addr
, IS_USER(s
));
6101 gen_st32(tmp
, addr
, IS_USER(s
));
6107 tcg_gen_addi_i32(addr
, addr
, 4);
6108 tmp
= load_reg(s
, rt2
);
6109 gen_st32(tmp
, addr
, IS_USER(s
));
6111 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6112 tcg_gen_br(done_label
);
6113 gen_set_label(fail_label
);
6114 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6115 gen_set_label(done_label
);
6116 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6120 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6122 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6129 insn
= ldl_code(s
->pc
);
6132 /* M variants do not implement ARM mode. */
6137 /* Unconditional instructions. */
6138 if (((insn
>> 25) & 7) == 1) {
6139 /* NEON Data processing. */
6140 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6143 if (disas_neon_data_insn(env
, s
, insn
))
6147 if ((insn
& 0x0f100000) == 0x04000000) {
6148 /* NEON load/store. */
6149 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6152 if (disas_neon_ls_insn(env
, s
, insn
))
6156 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6157 ((insn
& 0x0f30f010) == 0x0710f000)) {
6158 if ((insn
& (1 << 22)) == 0) {
6160 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6164 /* Otherwise PLD; v5TE+ */
6167 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6168 ((insn
& 0x0f70f010) == 0x0650f000)) {
6170 return; /* PLI; V7 */
6172 if (((insn
& 0x0f700000) == 0x04100000) ||
6173 ((insn
& 0x0f700010) == 0x06100000)) {
6174 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6177 return; /* v7MP: Unallocated memory hint: must NOP */
6180 if ((insn
& 0x0ffffdff) == 0x01010000) {
6183 if (insn
& (1 << 9)) {
6184 /* BE8 mode not implemented. */
6188 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6189 switch ((insn
>> 4) & 0xf) {
6198 /* We don't emulate caches so these are a no-op. */
6203 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6209 op1
= (insn
& 0x1f);
6210 addr
= tcg_temp_new_i32();
6211 tmp
= tcg_const_i32(op1
);
6212 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6213 tcg_temp_free_i32(tmp
);
6214 i
= (insn
>> 23) & 3;
6216 case 0: offset
= -4; break; /* DA */
6217 case 1: offset
= 0; break; /* IA */
6218 case 2: offset
= -8; break; /* DB */
6219 case 3: offset
= 4; break; /* IB */
6223 tcg_gen_addi_i32(addr
, addr
, offset
);
6224 tmp
= load_reg(s
, 14);
6225 gen_st32(tmp
, addr
, 0);
6226 tmp
= load_cpu_field(spsr
);
6227 tcg_gen_addi_i32(addr
, addr
, 4);
6228 gen_st32(tmp
, addr
, 0);
6229 if (insn
& (1 << 21)) {
6230 /* Base writeback. */
6232 case 0: offset
= -8; break;
6233 case 1: offset
= 4; break;
6234 case 2: offset
= -4; break;
6235 case 3: offset
= 0; break;
6239 tcg_gen_addi_i32(addr
, addr
, offset
);
6240 tmp
= tcg_const_i32(op1
);
6241 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6242 tcg_temp_free_i32(tmp
);
6243 tcg_temp_free_i32(addr
);
6245 tcg_temp_free_i32(addr
);
6248 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6254 rn
= (insn
>> 16) & 0xf;
6255 addr
= load_reg(s
, rn
);
6256 i
= (insn
>> 23) & 3;
6258 case 0: offset
= -4; break; /* DA */
6259 case 1: offset
= 0; break; /* IA */
6260 case 2: offset
= -8; break; /* DB */
6261 case 3: offset
= 4; break; /* IB */
6265 tcg_gen_addi_i32(addr
, addr
, offset
);
6266 /* Load PC into tmp and CPSR into tmp2. */
6267 tmp
= gen_ld32(addr
, 0);
6268 tcg_gen_addi_i32(addr
, addr
, 4);
6269 tmp2
= gen_ld32(addr
, 0);
6270 if (insn
& (1 << 21)) {
6271 /* Base writeback. */
6273 case 0: offset
= -8; break;
6274 case 1: offset
= 4; break;
6275 case 2: offset
= -4; break;
6276 case 3: offset
= 0; break;
6280 tcg_gen_addi_i32(addr
, addr
, offset
);
6281 store_reg(s
, rn
, addr
);
6283 tcg_temp_free_i32(addr
);
6285 gen_rfe(s
, tmp
, tmp2
);
6287 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6288 /* branch link and change to thumb (blx <offset>) */
6291 val
= (uint32_t)s
->pc
;
6292 tmp
= tcg_temp_new_i32();
6293 tcg_gen_movi_i32(tmp
, val
);
6294 store_reg(s
, 14, tmp
);
6295 /* Sign-extend the 24-bit offset */
6296 offset
= (((int32_t)insn
) << 8) >> 8;
6297 /* offset * 4 + bit24 * 2 + (thumb bit) */
6298 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6299 /* pipeline offset */
6303 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6304 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6305 /* iWMMXt register transfer. */
6306 if (env
->cp15
.c15_cpar
& (1 << 1))
6307 if (!disas_iwmmxt_insn(env
, s
, insn
))
6310 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6311 /* Coprocessor double register transfer. */
6312 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6313 /* Additional coprocessor register transfer. */
6314 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6317 /* cps (privileged) */
6321 if (insn
& (1 << 19)) {
6322 if (insn
& (1 << 8))
6324 if (insn
& (1 << 7))
6326 if (insn
& (1 << 6))
6328 if (insn
& (1 << 18))
6331 if (insn
& (1 << 17)) {
6333 val
|= (insn
& 0x1f);
6336 gen_set_psr_im(s
, mask
, 0, val
);
6343 /* if not always execute, we generate a conditional jump to
6345 s
->condlabel
= gen_new_label();
6346 gen_test_cc(cond
^ 1, s
->condlabel
);
6349 if ((insn
& 0x0f900000) == 0x03000000) {
6350 if ((insn
& (1 << 21)) == 0) {
6352 rd
= (insn
>> 12) & 0xf;
6353 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6354 if ((insn
& (1 << 22)) == 0) {
6356 tmp
= tcg_temp_new_i32();
6357 tcg_gen_movi_i32(tmp
, val
);
6360 tmp
= load_reg(s
, rd
);
6361 tcg_gen_ext16u_i32(tmp
, tmp
);
6362 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6364 store_reg(s
, rd
, tmp
);
6366 if (((insn
>> 12) & 0xf) != 0xf)
6368 if (((insn
>> 16) & 0xf) == 0) {
6369 gen_nop_hint(s
, insn
& 0xff);
6371 /* CPSR = immediate */
6373 shift
= ((insn
>> 8) & 0xf) * 2;
6375 val
= (val
>> shift
) | (val
<< (32 - shift
));
6376 i
= ((insn
& (1 << 22)) != 0);
6377 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6381 } else if ((insn
& 0x0f900000) == 0x01000000
6382 && (insn
& 0x00000090) != 0x00000090) {
6383 /* miscellaneous instructions */
6384 op1
= (insn
>> 21) & 3;
6385 sh
= (insn
>> 4) & 0xf;
6388 case 0x0: /* move program status register */
6391 tmp
= load_reg(s
, rm
);
6392 i
= ((op1
& 2) != 0);
6393 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6397 rd
= (insn
>> 12) & 0xf;
6401 tmp
= load_cpu_field(spsr
);
6403 tmp
= tcg_temp_new_i32();
6404 gen_helper_cpsr_read(tmp
);
6406 store_reg(s
, rd
, tmp
);
6411 /* branch/exchange thumb (bx). */
6412 tmp
= load_reg(s
, rm
);
6414 } else if (op1
== 3) {
6416 rd
= (insn
>> 12) & 0xf;
6417 tmp
= load_reg(s
, rm
);
6418 gen_helper_clz(tmp
, tmp
);
6419 store_reg(s
, rd
, tmp
);
6427 /* Trivial implementation equivalent to bx. */
6428 tmp
= load_reg(s
, rm
);
6438 /* branch link/exchange thumb (blx) */
6439 tmp
= load_reg(s
, rm
);
6440 tmp2
= tcg_temp_new_i32();
6441 tcg_gen_movi_i32(tmp2
, s
->pc
);
6442 store_reg(s
, 14, tmp2
);
6445 case 0x5: /* saturating add/subtract */
6446 rd
= (insn
>> 12) & 0xf;
6447 rn
= (insn
>> 16) & 0xf;
6448 tmp
= load_reg(s
, rm
);
6449 tmp2
= load_reg(s
, rn
);
6451 gen_helper_double_saturate(tmp2
, tmp2
);
6453 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6455 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6456 tcg_temp_free_i32(tmp2
);
6457 store_reg(s
, rd
, tmp
);
6460 /* SMC instruction (op1 == 3)
6461 and undefined instructions (op1 == 0 || op1 == 2)
6467 gen_exception_insn(s
, 4, EXCP_BKPT
);
6469 case 0x8: /* signed multiply */
6473 rs
= (insn
>> 8) & 0xf;
6474 rn
= (insn
>> 12) & 0xf;
6475 rd
= (insn
>> 16) & 0xf;
6477 /* (32 * 16) >> 16 */
6478 tmp
= load_reg(s
, rm
);
6479 tmp2
= load_reg(s
, rs
);
6481 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6484 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6485 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6486 tmp
= tcg_temp_new_i32();
6487 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6488 tcg_temp_free_i64(tmp64
);
6489 if ((sh
& 2) == 0) {
6490 tmp2
= load_reg(s
, rn
);
6491 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6492 tcg_temp_free_i32(tmp2
);
6494 store_reg(s
, rd
, tmp
);
6497 tmp
= load_reg(s
, rm
);
6498 tmp2
= load_reg(s
, rs
);
6499 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6500 tcg_temp_free_i32(tmp2
);
6502 tmp64
= tcg_temp_new_i64();
6503 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6504 tcg_temp_free_i32(tmp
);
6505 gen_addq(s
, tmp64
, rn
, rd
);
6506 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6507 tcg_temp_free_i64(tmp64
);
6510 tmp2
= load_reg(s
, rn
);
6511 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6512 tcg_temp_free_i32(tmp2
);
6514 store_reg(s
, rd
, tmp
);
6521 } else if (((insn
& 0x0e000000) == 0 &&
6522 (insn
& 0x00000090) != 0x90) ||
6523 ((insn
& 0x0e000000) == (1 << 25))) {
6524 int set_cc
, logic_cc
, shiftop
;
6526 op1
= (insn
>> 21) & 0xf;
6527 set_cc
= (insn
>> 20) & 1;
6528 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6530 /* data processing instruction */
6531 if (insn
& (1 << 25)) {
6532 /* immediate operand */
6534 shift
= ((insn
>> 8) & 0xf) * 2;
6536 val
= (val
>> shift
) | (val
<< (32 - shift
));
6538 tmp2
= tcg_temp_new_i32();
6539 tcg_gen_movi_i32(tmp2
, val
);
6540 if (logic_cc
&& shift
) {
6541 gen_set_CF_bit31(tmp2
);
6546 tmp2
= load_reg(s
, rm
);
6547 shiftop
= (insn
>> 5) & 3;
6548 if (!(insn
& (1 << 4))) {
6549 shift
= (insn
>> 7) & 0x1f;
6550 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6552 rs
= (insn
>> 8) & 0xf;
6553 tmp
= load_reg(s
, rs
);
6554 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6557 if (op1
!= 0x0f && op1
!= 0x0d) {
6558 rn
= (insn
>> 16) & 0xf;
6559 tmp
= load_reg(s
, rn
);
6563 rd
= (insn
>> 12) & 0xf;
6566 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6570 store_reg_bx(env
, s
, rd
, tmp
);
6573 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6577 store_reg_bx(env
, s
, rd
, tmp
);
6580 if (set_cc
&& rd
== 15) {
6581 /* SUBS r15, ... is used for exception return. */
6585 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6586 gen_exception_return(s
, tmp
);
6589 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6591 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6593 store_reg_bx(env
, s
, rd
, tmp
);
6598 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6600 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6602 store_reg_bx(env
, s
, rd
, tmp
);
6606 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6608 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6610 store_reg_bx(env
, s
, rd
, tmp
);
6614 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6616 gen_add_carry(tmp
, tmp
, tmp2
);
6618 store_reg_bx(env
, s
, rd
, tmp
);
6622 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6624 gen_sub_carry(tmp
, tmp
, tmp2
);
6626 store_reg_bx(env
, s
, rd
, tmp
);
6630 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6632 gen_sub_carry(tmp
, tmp2
, tmp
);
6634 store_reg_bx(env
, s
, rd
, tmp
);
6638 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6641 tcg_temp_free_i32(tmp
);
6645 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6648 tcg_temp_free_i32(tmp
);
6652 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6654 tcg_temp_free_i32(tmp
);
6658 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6660 tcg_temp_free_i32(tmp
);
6663 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6667 store_reg_bx(env
, s
, rd
, tmp
);
6670 if (logic_cc
&& rd
== 15) {
6671 /* MOVS r15, ... is used for exception return. */
6675 gen_exception_return(s
, tmp2
);
6680 store_reg_bx(env
, s
, rd
, tmp2
);
6684 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6688 store_reg_bx(env
, s
, rd
, tmp
);
6692 tcg_gen_not_i32(tmp2
, tmp2
);
6696 store_reg_bx(env
, s
, rd
, tmp2
);
6699 if (op1
!= 0x0f && op1
!= 0x0d) {
6700 tcg_temp_free_i32(tmp2
);
6703 /* other instructions */
6704 op1
= (insn
>> 24) & 0xf;
6708 /* multiplies, extra load/stores */
6709 sh
= (insn
>> 5) & 3;
6712 rd
= (insn
>> 16) & 0xf;
6713 rn
= (insn
>> 12) & 0xf;
6714 rs
= (insn
>> 8) & 0xf;
6716 op1
= (insn
>> 20) & 0xf;
6718 case 0: case 1: case 2: case 3: case 6:
6720 tmp
= load_reg(s
, rs
);
6721 tmp2
= load_reg(s
, rm
);
6722 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6723 tcg_temp_free_i32(tmp2
);
6724 if (insn
& (1 << 22)) {
6725 /* Subtract (mls) */
6727 tmp2
= load_reg(s
, rn
);
6728 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6729 tcg_temp_free_i32(tmp2
);
6730 } else if (insn
& (1 << 21)) {
6732 tmp2
= load_reg(s
, rn
);
6733 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6734 tcg_temp_free_i32(tmp2
);
6736 if (insn
& (1 << 20))
6738 store_reg(s
, rd
, tmp
);
6741 /* 64 bit mul double accumulate (UMAAL) */
6743 tmp
= load_reg(s
, rs
);
6744 tmp2
= load_reg(s
, rm
);
6745 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6746 gen_addq_lo(s
, tmp64
, rn
);
6747 gen_addq_lo(s
, tmp64
, rd
);
6748 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6749 tcg_temp_free_i64(tmp64
);
6751 case 8: case 9: case 10: case 11:
6752 case 12: case 13: case 14: case 15:
6753 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6754 tmp
= load_reg(s
, rs
);
6755 tmp2
= load_reg(s
, rm
);
6756 if (insn
& (1 << 22)) {
6757 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6759 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6761 if (insn
& (1 << 21)) { /* mult accumulate */
6762 gen_addq(s
, tmp64
, rn
, rd
);
6764 if (insn
& (1 << 20)) {
6765 gen_logicq_cc(tmp64
);
6767 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6768 tcg_temp_free_i64(tmp64
);
6774 rn
= (insn
>> 16) & 0xf;
6775 rd
= (insn
>> 12) & 0xf;
6776 if (insn
& (1 << 23)) {
6777 /* load/store exclusive */
6778 op1
= (insn
>> 21) & 0x3;
6783 addr
= tcg_temp_local_new_i32();
6784 load_reg_var(s
, addr
, rn
);
6785 if (insn
& (1 << 20)) {
6788 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6790 case 1: /* ldrexd */
6791 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6793 case 2: /* ldrexb */
6794 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6796 case 3: /* ldrexh */
6797 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6806 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6808 case 1: /* strexd */
6809 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6811 case 2: /* strexb */
6812 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6814 case 3: /* strexh */
6815 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6821 tcg_temp_free(addr
);
6823 /* SWP instruction */
6826 /* ??? This is not really atomic. However we know
6827 we never have multiple CPUs running in parallel,
6828 so it is good enough. */
6829 addr
= load_reg(s
, rn
);
6830 tmp
= load_reg(s
, rm
);
6831 if (insn
& (1 << 22)) {
6832 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6833 gen_st8(tmp
, addr
, IS_USER(s
));
6835 tmp2
= gen_ld32(addr
, IS_USER(s
));
6836 gen_st32(tmp
, addr
, IS_USER(s
));
6838 tcg_temp_free_i32(addr
);
6839 store_reg(s
, rd
, tmp2
);
6845 /* Misc load/store */
6846 rn
= (insn
>> 16) & 0xf;
6847 rd
= (insn
>> 12) & 0xf;
6848 addr
= load_reg(s
, rn
);
6849 if (insn
& (1 << 24))
6850 gen_add_datah_offset(s
, insn
, 0, addr
);
6852 if (insn
& (1 << 20)) {
6856 tmp
= gen_ld16u(addr
, IS_USER(s
));
6859 tmp
= gen_ld8s(addr
, IS_USER(s
));
6863 tmp
= gen_ld16s(addr
, IS_USER(s
));
6867 } else if (sh
& 2) {
6871 tmp
= load_reg(s
, rd
);
6872 gen_st32(tmp
, addr
, IS_USER(s
));
6873 tcg_gen_addi_i32(addr
, addr
, 4);
6874 tmp
= load_reg(s
, rd
+ 1);
6875 gen_st32(tmp
, addr
, IS_USER(s
));
6879 tmp
= gen_ld32(addr
, IS_USER(s
));
6880 store_reg(s
, rd
, tmp
);
6881 tcg_gen_addi_i32(addr
, addr
, 4);
6882 tmp
= gen_ld32(addr
, IS_USER(s
));
6886 address_offset
= -4;
6889 tmp
= load_reg(s
, rd
);
6890 gen_st16(tmp
, addr
, IS_USER(s
));
6893 /* Perform base writeback before the loaded value to
6894 ensure correct behavior with overlapping index registers.
6895 ldrd with base writeback is is undefined if the
6896 destination and index registers overlap. */
6897 if (!(insn
& (1 << 24))) {
6898 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6899 store_reg(s
, rn
, addr
);
6900 } else if (insn
& (1 << 21)) {
6902 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6903 store_reg(s
, rn
, addr
);
6905 tcg_temp_free_i32(addr
);
6908 /* Complete the load. */
6909 store_reg(s
, rd
, tmp
);
6918 if (insn
& (1 << 4)) {
6920 /* Armv6 Media instructions. */
6922 rn
= (insn
>> 16) & 0xf;
6923 rd
= (insn
>> 12) & 0xf;
6924 rs
= (insn
>> 8) & 0xf;
6925 switch ((insn
>> 23) & 3) {
6926 case 0: /* Parallel add/subtract. */
6927 op1
= (insn
>> 20) & 7;
6928 tmp
= load_reg(s
, rn
);
6929 tmp2
= load_reg(s
, rm
);
6930 sh
= (insn
>> 5) & 7;
6931 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6933 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6934 tcg_temp_free_i32(tmp2
);
6935 store_reg(s
, rd
, tmp
);
6938 if ((insn
& 0x00700020) == 0) {
6939 /* Halfword pack. */
6940 tmp
= load_reg(s
, rn
);
6941 tmp2
= load_reg(s
, rm
);
6942 shift
= (insn
>> 7) & 0x1f;
6943 if (insn
& (1 << 6)) {
6947 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6948 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6949 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6953 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6954 tcg_gen_ext16u_i32(tmp
, tmp
);
6955 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6957 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6958 tcg_temp_free_i32(tmp2
);
6959 store_reg(s
, rd
, tmp
);
6960 } else if ((insn
& 0x00200020) == 0x00200000) {
6962 tmp
= load_reg(s
, rm
);
6963 shift
= (insn
>> 7) & 0x1f;
6964 if (insn
& (1 << 6)) {
6967 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6969 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6971 sh
= (insn
>> 16) & 0x1f;
6972 tmp2
= tcg_const_i32(sh
);
6973 if (insn
& (1 << 22))
6974 gen_helper_usat(tmp
, tmp
, tmp2
);
6976 gen_helper_ssat(tmp
, tmp
, tmp2
);
6977 tcg_temp_free_i32(tmp2
);
6978 store_reg(s
, rd
, tmp
);
6979 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6981 tmp
= load_reg(s
, rm
);
6982 sh
= (insn
>> 16) & 0x1f;
6983 tmp2
= tcg_const_i32(sh
);
6984 if (insn
& (1 << 22))
6985 gen_helper_usat16(tmp
, tmp
, tmp2
);
6987 gen_helper_ssat16(tmp
, tmp
, tmp2
);
6988 tcg_temp_free_i32(tmp2
);
6989 store_reg(s
, rd
, tmp
);
6990 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6992 tmp
= load_reg(s
, rn
);
6993 tmp2
= load_reg(s
, rm
);
6994 tmp3
= tcg_temp_new_i32();
6995 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6996 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6997 tcg_temp_free_i32(tmp3
);
6998 tcg_temp_free_i32(tmp2
);
6999 store_reg(s
, rd
, tmp
);
7000 } else if ((insn
& 0x000003e0) == 0x00000060) {
7001 tmp
= load_reg(s
, rm
);
7002 shift
= (insn
>> 10) & 3;
7003 /* ??? In many cases it's not neccessary to do a
7004 rotate, a shift is sufficient. */
7006 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7007 op1
= (insn
>> 20) & 7;
7009 case 0: gen_sxtb16(tmp
); break;
7010 case 2: gen_sxtb(tmp
); break;
7011 case 3: gen_sxth(tmp
); break;
7012 case 4: gen_uxtb16(tmp
); break;
7013 case 6: gen_uxtb(tmp
); break;
7014 case 7: gen_uxth(tmp
); break;
7015 default: goto illegal_op
;
7018 tmp2
= load_reg(s
, rn
);
7019 if ((op1
& 3) == 0) {
7020 gen_add16(tmp
, tmp2
);
7022 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7023 tcg_temp_free_i32(tmp2
);
7026 store_reg(s
, rd
, tmp
);
7027 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7029 tmp
= load_reg(s
, rm
);
7030 if (insn
& (1 << 22)) {
7031 if (insn
& (1 << 7)) {
7035 gen_helper_rbit(tmp
, tmp
);
7038 if (insn
& (1 << 7))
7041 tcg_gen_bswap32_i32(tmp
, tmp
);
7043 store_reg(s
, rd
, tmp
);
7048 case 2: /* Multiplies (Type 3). */
7049 tmp
= load_reg(s
, rm
);
7050 tmp2
= load_reg(s
, rs
);
7051 if (insn
& (1 << 20)) {
7052 /* Signed multiply most significant [accumulate].
7053 (SMMUL, SMMLA, SMMLS) */
7054 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7057 tmp
= load_reg(s
, rd
);
7058 if (insn
& (1 << 6)) {
7059 tmp64
= gen_subq_msw(tmp64
, tmp
);
7061 tmp64
= gen_addq_msw(tmp64
, tmp
);
7064 if (insn
& (1 << 5)) {
7065 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7067 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7068 tmp
= tcg_temp_new_i32();
7069 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7070 tcg_temp_free_i64(tmp64
);
7071 store_reg(s
, rn
, tmp
);
7073 if (insn
& (1 << 5))
7074 gen_swap_half(tmp2
);
7075 gen_smul_dual(tmp
, tmp2
);
7076 if (insn
& (1 << 6)) {
7077 /* This subtraction cannot overflow. */
7078 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7080 /* This addition cannot overflow 32 bits;
7081 * however it may overflow considered as a signed
7082 * operation, in which case we must set the Q flag.
7084 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7086 tcg_temp_free_i32(tmp2
);
7087 if (insn
& (1 << 22)) {
7088 /* smlald, smlsld */
7089 tmp64
= tcg_temp_new_i64();
7090 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7091 tcg_temp_free_i32(tmp
);
7092 gen_addq(s
, tmp64
, rd
, rn
);
7093 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7094 tcg_temp_free_i64(tmp64
);
7096 /* smuad, smusd, smlad, smlsd */
7099 tmp2
= load_reg(s
, rd
);
7100 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7101 tcg_temp_free_i32(tmp2
);
7103 store_reg(s
, rn
, tmp
);
7108 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7110 case 0: /* Unsigned sum of absolute differences. */
7112 tmp
= load_reg(s
, rm
);
7113 tmp2
= load_reg(s
, rs
);
7114 gen_helper_usad8(tmp
, tmp
, tmp2
);
7115 tcg_temp_free_i32(tmp2
);
7117 tmp2
= load_reg(s
, rd
);
7118 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7119 tcg_temp_free_i32(tmp2
);
7121 store_reg(s
, rn
, tmp
);
7123 case 0x20: case 0x24: case 0x28: case 0x2c:
7124 /* Bitfield insert/clear. */
7126 shift
= (insn
>> 7) & 0x1f;
7127 i
= (insn
>> 16) & 0x1f;
7130 tmp
= tcg_temp_new_i32();
7131 tcg_gen_movi_i32(tmp
, 0);
7133 tmp
= load_reg(s
, rm
);
7136 tmp2
= load_reg(s
, rd
);
7137 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7138 tcg_temp_free_i32(tmp2
);
7140 store_reg(s
, rd
, tmp
);
7142 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7143 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7145 tmp
= load_reg(s
, rm
);
7146 shift
= (insn
>> 7) & 0x1f;
7147 i
= ((insn
>> 16) & 0x1f) + 1;
7152 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7154 gen_sbfx(tmp
, shift
, i
);
7157 store_reg(s
, rd
, tmp
);
7167 /* Check for undefined extension instructions
7168 * per the ARM Bible IE:
7169 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7171 sh
= (0xf << 20) | (0xf << 4);
7172 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7176 /* load/store byte/word */
7177 rn
= (insn
>> 16) & 0xf;
7178 rd
= (insn
>> 12) & 0xf;
7179 tmp2
= load_reg(s
, rn
);
7180 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7181 if (insn
& (1 << 24))
7182 gen_add_data_offset(s
, insn
, tmp2
);
7183 if (insn
& (1 << 20)) {
7185 if (insn
& (1 << 22)) {
7186 tmp
= gen_ld8u(tmp2
, i
);
7188 tmp
= gen_ld32(tmp2
, i
);
7192 tmp
= load_reg(s
, rd
);
7193 if (insn
& (1 << 22))
7194 gen_st8(tmp
, tmp2
, i
);
7196 gen_st32(tmp
, tmp2
, i
);
7198 if (!(insn
& (1 << 24))) {
7199 gen_add_data_offset(s
, insn
, tmp2
);
7200 store_reg(s
, rn
, tmp2
);
7201 } else if (insn
& (1 << 21)) {
7202 store_reg(s
, rn
, tmp2
);
7204 tcg_temp_free_i32(tmp2
);
7206 if (insn
& (1 << 20)) {
7207 /* Complete the load. */
7211 store_reg(s
, rd
, tmp
);
7217 int j
, n
, user
, loaded_base
;
7219 /* load/store multiple words */
7220 /* XXX: store correct base if write back */
7222 if (insn
& (1 << 22)) {
7224 goto illegal_op
; /* only usable in supervisor mode */
7226 if ((insn
& (1 << 15)) == 0)
7229 rn
= (insn
>> 16) & 0xf;
7230 addr
= load_reg(s
, rn
);
7232 /* compute total size */
7234 TCGV_UNUSED(loaded_var
);
7237 if (insn
& (1 << i
))
7240 /* XXX: test invalid n == 0 case ? */
7241 if (insn
& (1 << 23)) {
7242 if (insn
& (1 << 24)) {
7244 tcg_gen_addi_i32(addr
, addr
, 4);
7246 /* post increment */
7249 if (insn
& (1 << 24)) {
7251 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7253 /* post decrement */
7255 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7260 if (insn
& (1 << i
)) {
7261 if (insn
& (1 << 20)) {
7263 tmp
= gen_ld32(addr
, IS_USER(s
));
7267 tmp2
= tcg_const_i32(i
);
7268 gen_helper_set_user_reg(tmp2
, tmp
);
7269 tcg_temp_free_i32(tmp2
);
7270 tcg_temp_free_i32(tmp
);
7271 } else if (i
== rn
) {
7275 store_reg(s
, i
, tmp
);
7280 /* special case: r15 = PC + 8 */
7281 val
= (long)s
->pc
+ 4;
7282 tmp
= tcg_temp_new_i32();
7283 tcg_gen_movi_i32(tmp
, val
);
7285 tmp
= tcg_temp_new_i32();
7286 tmp2
= tcg_const_i32(i
);
7287 gen_helper_get_user_reg(tmp
, tmp2
);
7288 tcg_temp_free_i32(tmp2
);
7290 tmp
= load_reg(s
, i
);
7292 gen_st32(tmp
, addr
, IS_USER(s
));
7295 /* no need to add after the last transfer */
7297 tcg_gen_addi_i32(addr
, addr
, 4);
7300 if (insn
& (1 << 21)) {
7302 if (insn
& (1 << 23)) {
7303 if (insn
& (1 << 24)) {
7306 /* post increment */
7307 tcg_gen_addi_i32(addr
, addr
, 4);
7310 if (insn
& (1 << 24)) {
7313 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7315 /* post decrement */
7316 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7319 store_reg(s
, rn
, addr
);
7321 tcg_temp_free_i32(addr
);
7324 store_reg(s
, rn
, loaded_var
);
7326 if ((insn
& (1 << 22)) && !user
) {
7327 /* Restore CPSR from SPSR. */
7328 tmp
= load_cpu_field(spsr
);
7329 gen_set_cpsr(tmp
, 0xffffffff);
7330 tcg_temp_free_i32(tmp
);
7331 s
->is_jmp
= DISAS_UPDATE
;
7340 /* branch (and link) */
7341 val
= (int32_t)s
->pc
;
7342 if (insn
& (1 << 24)) {
7343 tmp
= tcg_temp_new_i32();
7344 tcg_gen_movi_i32(tmp
, val
);
7345 store_reg(s
, 14, tmp
);
7347 offset
= (((int32_t)insn
<< 8) >> 8);
7348 val
+= (offset
<< 2) + 4;
7356 if (disas_coproc_insn(env
, s
, insn
))
7361 gen_set_pc_im(s
->pc
);
7362 s
->is_jmp
= DISAS_SWI
;
7366 gen_exception_insn(s
, 4, EXCP_UDEF
);
7372 /* Return true if this is a Thumb-2 logical op. */
7374 thumb2_logic_op(int op
)
7379 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7380 then set condition code flags based on the result of the operation.
7381 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7382 to the high bit of T1.
7383 Returns zero if the opcode is valid. */
7386 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7393 tcg_gen_and_i32(t0
, t0
, t1
);
7397 tcg_gen_andc_i32(t0
, t0
, t1
);
7401 tcg_gen_or_i32(t0
, t0
, t1
);
7405 tcg_gen_orc_i32(t0
, t0
, t1
);
7409 tcg_gen_xor_i32(t0
, t0
, t1
);
7414 gen_helper_add_cc(t0
, t0
, t1
);
7416 tcg_gen_add_i32(t0
, t0
, t1
);
7420 gen_helper_adc_cc(t0
, t0
, t1
);
7426 gen_helper_sbc_cc(t0
, t0
, t1
);
7428 gen_sub_carry(t0
, t0
, t1
);
7432 gen_helper_sub_cc(t0
, t0
, t1
);
7434 tcg_gen_sub_i32(t0
, t0
, t1
);
7438 gen_helper_sub_cc(t0
, t1
, t0
);
7440 tcg_gen_sub_i32(t0
, t1
, t0
);
7442 default: /* 5, 6, 7, 9, 12, 15. */
7448 gen_set_CF_bit31(t1
);
7453 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7455 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7457 uint32_t insn
, imm
, shift
, offset
;
7458 uint32_t rd
, rn
, rm
, rs
;
7469 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7470 || arm_feature (env
, ARM_FEATURE_M
))) {
7471 /* Thumb-1 cores may need to treat bl and blx as a pair of
7472 16-bit instructions to get correct prefetch abort behavior. */
7474 if ((insn
& (1 << 12)) == 0) {
7475 /* Second half of blx. */
7476 offset
= ((insn
& 0x7ff) << 1);
7477 tmp
= load_reg(s
, 14);
7478 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7479 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7481 tmp2
= tcg_temp_new_i32();
7482 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7483 store_reg(s
, 14, tmp2
);
7487 if (insn
& (1 << 11)) {
7488 /* Second half of bl. */
7489 offset
= ((insn
& 0x7ff) << 1) | 1;
7490 tmp
= load_reg(s
, 14);
7491 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7493 tmp2
= tcg_temp_new_i32();
7494 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7495 store_reg(s
, 14, tmp2
);
7499 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7500 /* Instruction spans a page boundary. Implement it as two
7501 16-bit instructions in case the second half causes an
7503 offset
= ((int32_t)insn
<< 21) >> 9;
7504 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7507 /* Fall through to 32-bit decode. */
7510 insn
= lduw_code(s
->pc
);
7512 insn
|= (uint32_t)insn_hw1
<< 16;
7514 if ((insn
& 0xf800e800) != 0xf000e800) {
7518 rn
= (insn
>> 16) & 0xf;
7519 rs
= (insn
>> 12) & 0xf;
7520 rd
= (insn
>> 8) & 0xf;
7522 switch ((insn
>> 25) & 0xf) {
7523 case 0: case 1: case 2: case 3:
7524 /* 16-bit instructions. Should never happen. */
7527 if (insn
& (1 << 22)) {
7528 /* Other load/store, table branch. */
7529 if (insn
& 0x01200000) {
7530 /* Load/store doubleword. */
7532 addr
= tcg_temp_new_i32();
7533 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7535 addr
= load_reg(s
, rn
);
7537 offset
= (insn
& 0xff) * 4;
7538 if ((insn
& (1 << 23)) == 0)
7540 if (insn
& (1 << 24)) {
7541 tcg_gen_addi_i32(addr
, addr
, offset
);
7544 if (insn
& (1 << 20)) {
7546 tmp
= gen_ld32(addr
, IS_USER(s
));
7547 store_reg(s
, rs
, tmp
);
7548 tcg_gen_addi_i32(addr
, addr
, 4);
7549 tmp
= gen_ld32(addr
, IS_USER(s
));
7550 store_reg(s
, rd
, tmp
);
7553 tmp
= load_reg(s
, rs
);
7554 gen_st32(tmp
, addr
, IS_USER(s
));
7555 tcg_gen_addi_i32(addr
, addr
, 4);
7556 tmp
= load_reg(s
, rd
);
7557 gen_st32(tmp
, addr
, IS_USER(s
));
7559 if (insn
& (1 << 21)) {
7560 /* Base writeback. */
7563 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7564 store_reg(s
, rn
, addr
);
7566 tcg_temp_free_i32(addr
);
7568 } else if ((insn
& (1 << 23)) == 0) {
7569 /* Load/store exclusive word. */
7570 addr
= tcg_temp_local_new();
7571 load_reg_var(s
, addr
, rn
);
7572 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7573 if (insn
& (1 << 20)) {
7574 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7576 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7578 tcg_temp_free(addr
);
7579 } else if ((insn
& (1 << 6)) == 0) {
7582 addr
= tcg_temp_new_i32();
7583 tcg_gen_movi_i32(addr
, s
->pc
);
7585 addr
= load_reg(s
, rn
);
7587 tmp
= load_reg(s
, rm
);
7588 tcg_gen_add_i32(addr
, addr
, tmp
);
7589 if (insn
& (1 << 4)) {
7591 tcg_gen_add_i32(addr
, addr
, tmp
);
7592 tcg_temp_free_i32(tmp
);
7593 tmp
= gen_ld16u(addr
, IS_USER(s
));
7595 tcg_temp_free_i32(tmp
);
7596 tmp
= gen_ld8u(addr
, IS_USER(s
));
7598 tcg_temp_free_i32(addr
);
7599 tcg_gen_shli_i32(tmp
, tmp
, 1);
7600 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7601 store_reg(s
, 15, tmp
);
7603 /* Load/store exclusive byte/halfword/doubleword. */
7605 op
= (insn
>> 4) & 0x3;
7609 addr
= tcg_temp_local_new();
7610 load_reg_var(s
, addr
, rn
);
7611 if (insn
& (1 << 20)) {
7612 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7614 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7616 tcg_temp_free(addr
);
7619 /* Load/store multiple, RFE, SRS. */
7620 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7621 /* Not available in user mode. */
7624 if (insn
& (1 << 20)) {
7626 addr
= load_reg(s
, rn
);
7627 if ((insn
& (1 << 24)) == 0)
7628 tcg_gen_addi_i32(addr
, addr
, -8);
7629 /* Load PC into tmp and CPSR into tmp2. */
7630 tmp
= gen_ld32(addr
, 0);
7631 tcg_gen_addi_i32(addr
, addr
, 4);
7632 tmp2
= gen_ld32(addr
, 0);
7633 if (insn
& (1 << 21)) {
7634 /* Base writeback. */
7635 if (insn
& (1 << 24)) {
7636 tcg_gen_addi_i32(addr
, addr
, 4);
7638 tcg_gen_addi_i32(addr
, addr
, -4);
7640 store_reg(s
, rn
, addr
);
7642 tcg_temp_free_i32(addr
);
7644 gen_rfe(s
, tmp
, tmp2
);
7648 addr
= tcg_temp_new_i32();
7649 tmp
= tcg_const_i32(op
);
7650 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7651 tcg_temp_free_i32(tmp
);
7652 if ((insn
& (1 << 24)) == 0) {
7653 tcg_gen_addi_i32(addr
, addr
, -8);
7655 tmp
= load_reg(s
, 14);
7656 gen_st32(tmp
, addr
, 0);
7657 tcg_gen_addi_i32(addr
, addr
, 4);
7658 tmp
= tcg_temp_new_i32();
7659 gen_helper_cpsr_read(tmp
);
7660 gen_st32(tmp
, addr
, 0);
7661 if (insn
& (1 << 21)) {
7662 if ((insn
& (1 << 24)) == 0) {
7663 tcg_gen_addi_i32(addr
, addr
, -4);
7665 tcg_gen_addi_i32(addr
, addr
, 4);
7667 tmp
= tcg_const_i32(op
);
7668 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7669 tcg_temp_free_i32(tmp
);
7671 tcg_temp_free_i32(addr
);
7676 /* Load/store multiple. */
7677 addr
= load_reg(s
, rn
);
7679 for (i
= 0; i
< 16; i
++) {
7680 if (insn
& (1 << i
))
7683 if (insn
& (1 << 24)) {
7684 tcg_gen_addi_i32(addr
, addr
, -offset
);
7687 for (i
= 0; i
< 16; i
++) {
7688 if ((insn
& (1 << i
)) == 0)
7690 if (insn
& (1 << 20)) {
7692 tmp
= gen_ld32(addr
, IS_USER(s
));
7696 store_reg(s
, i
, tmp
);
7700 tmp
= load_reg(s
, i
);
7701 gen_st32(tmp
, addr
, IS_USER(s
));
7703 tcg_gen_addi_i32(addr
, addr
, 4);
7705 if (insn
& (1 << 21)) {
7706 /* Base register writeback. */
7707 if (insn
& (1 << 24)) {
7708 tcg_gen_addi_i32(addr
, addr
, -offset
);
7710 /* Fault if writeback register is in register list. */
7711 if (insn
& (1 << rn
))
7713 store_reg(s
, rn
, addr
);
7715 tcg_temp_free_i32(addr
);
7722 op
= (insn
>> 21) & 0xf;
7724 /* Halfword pack. */
7725 tmp
= load_reg(s
, rn
);
7726 tmp2
= load_reg(s
, rm
);
7727 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
7728 if (insn
& (1 << 5)) {
7732 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7733 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7734 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7738 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7739 tcg_gen_ext16u_i32(tmp
, tmp
);
7740 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7742 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7743 tcg_temp_free_i32(tmp2
);
7744 store_reg(s
, rd
, tmp
);
7746 /* Data processing register constant shift. */
7748 tmp
= tcg_temp_new_i32();
7749 tcg_gen_movi_i32(tmp
, 0);
7751 tmp
= load_reg(s
, rn
);
7753 tmp2
= load_reg(s
, rm
);
7755 shiftop
= (insn
>> 4) & 3;
7756 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7757 conds
= (insn
& (1 << 20)) != 0;
7758 logic_cc
= (conds
&& thumb2_logic_op(op
));
7759 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7760 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7762 tcg_temp_free_i32(tmp2
);
7764 store_reg(s
, rd
, tmp
);
7766 tcg_temp_free_i32(tmp
);
7770 case 13: /* Misc data processing. */
7771 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7772 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7775 case 0: /* Register controlled shift. */
7776 tmp
= load_reg(s
, rn
);
7777 tmp2
= load_reg(s
, rm
);
7778 if ((insn
& 0x70) != 0)
7780 op
= (insn
>> 21) & 3;
7781 logic_cc
= (insn
& (1 << 20)) != 0;
7782 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7785 store_reg_bx(env
, s
, rd
, tmp
);
7787 case 1: /* Sign/zero extend. */
7788 tmp
= load_reg(s
, rm
);
7789 shift
= (insn
>> 4) & 3;
7790 /* ??? In many cases it's not neccessary to do a
7791 rotate, a shift is sufficient. */
7793 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7794 op
= (insn
>> 20) & 7;
7796 case 0: gen_sxth(tmp
); break;
7797 case 1: gen_uxth(tmp
); break;
7798 case 2: gen_sxtb16(tmp
); break;
7799 case 3: gen_uxtb16(tmp
); break;
7800 case 4: gen_sxtb(tmp
); break;
7801 case 5: gen_uxtb(tmp
); break;
7802 default: goto illegal_op
;
7805 tmp2
= load_reg(s
, rn
);
7806 if ((op
>> 1) == 1) {
7807 gen_add16(tmp
, tmp2
);
7809 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7810 tcg_temp_free_i32(tmp2
);
7813 store_reg(s
, rd
, tmp
);
7815 case 2: /* SIMD add/subtract. */
7816 op
= (insn
>> 20) & 7;
7817 shift
= (insn
>> 4) & 7;
7818 if ((op
& 3) == 3 || (shift
& 3) == 3)
7820 tmp
= load_reg(s
, rn
);
7821 tmp2
= load_reg(s
, rm
);
7822 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7823 tcg_temp_free_i32(tmp2
);
7824 store_reg(s
, rd
, tmp
);
7826 case 3: /* Other data processing. */
7827 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7829 /* Saturating add/subtract. */
7830 tmp
= load_reg(s
, rn
);
7831 tmp2
= load_reg(s
, rm
);
7833 gen_helper_double_saturate(tmp
, tmp
);
7835 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7837 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7838 tcg_temp_free_i32(tmp2
);
7840 tmp
= load_reg(s
, rn
);
7842 case 0x0a: /* rbit */
7843 gen_helper_rbit(tmp
, tmp
);
7845 case 0x08: /* rev */
7846 tcg_gen_bswap32_i32(tmp
, tmp
);
7848 case 0x09: /* rev16 */
7851 case 0x0b: /* revsh */
7854 case 0x10: /* sel */
7855 tmp2
= load_reg(s
, rm
);
7856 tmp3
= tcg_temp_new_i32();
7857 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7858 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7859 tcg_temp_free_i32(tmp3
);
7860 tcg_temp_free_i32(tmp2
);
7862 case 0x18: /* clz */
7863 gen_helper_clz(tmp
, tmp
);
7869 store_reg(s
, rd
, tmp
);
7871 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7872 op
= (insn
>> 4) & 0xf;
7873 tmp
= load_reg(s
, rn
);
7874 tmp2
= load_reg(s
, rm
);
7875 switch ((insn
>> 20) & 7) {
7876 case 0: /* 32 x 32 -> 32 */
7877 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7878 tcg_temp_free_i32(tmp2
);
7880 tmp2
= load_reg(s
, rs
);
7882 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7884 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7885 tcg_temp_free_i32(tmp2
);
7888 case 1: /* 16 x 16 -> 32 */
7889 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7890 tcg_temp_free_i32(tmp2
);
7892 tmp2
= load_reg(s
, rs
);
7893 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7894 tcg_temp_free_i32(tmp2
);
7897 case 2: /* Dual multiply add. */
7898 case 4: /* Dual multiply subtract. */
7900 gen_swap_half(tmp2
);
7901 gen_smul_dual(tmp
, tmp2
);
7902 if (insn
& (1 << 22)) {
7903 /* This subtraction cannot overflow. */
7904 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7906 /* This addition cannot overflow 32 bits;
7907 * however it may overflow considered as a signed
7908 * operation, in which case we must set the Q flag.
7910 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7912 tcg_temp_free_i32(tmp2
);
7915 tmp2
= load_reg(s
, rs
);
7916 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7917 tcg_temp_free_i32(tmp2
);
7920 case 3: /* 32 * 16 -> 32msb */
7922 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7925 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7926 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7927 tmp
= tcg_temp_new_i32();
7928 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7929 tcg_temp_free_i64(tmp64
);
7932 tmp2
= load_reg(s
, rs
);
7933 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7934 tcg_temp_free_i32(tmp2
);
7937 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7938 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7940 tmp
= load_reg(s
, rs
);
7941 if (insn
& (1 << 20)) {
7942 tmp64
= gen_addq_msw(tmp64
, tmp
);
7944 tmp64
= gen_subq_msw(tmp64
, tmp
);
7947 if (insn
& (1 << 4)) {
7948 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7950 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7951 tmp
= tcg_temp_new_i32();
7952 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7953 tcg_temp_free_i64(tmp64
);
7955 case 7: /* Unsigned sum of absolute differences. */
7956 gen_helper_usad8(tmp
, tmp
, tmp2
);
7957 tcg_temp_free_i32(tmp2
);
7959 tmp2
= load_reg(s
, rs
);
7960 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7961 tcg_temp_free_i32(tmp2
);
7965 store_reg(s
, rd
, tmp
);
7967 case 6: case 7: /* 64-bit multiply, Divide. */
7968 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7969 tmp
= load_reg(s
, rn
);
7970 tmp2
= load_reg(s
, rm
);
7971 if ((op
& 0x50) == 0x10) {
7973 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7976 gen_helper_udiv(tmp
, tmp
, tmp2
);
7978 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7979 tcg_temp_free_i32(tmp2
);
7980 store_reg(s
, rd
, tmp
);
7981 } else if ((op
& 0xe) == 0xc) {
7982 /* Dual multiply accumulate long. */
7984 gen_swap_half(tmp2
);
7985 gen_smul_dual(tmp
, tmp2
);
7987 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7989 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7991 tcg_temp_free_i32(tmp2
);
7993 tmp64
= tcg_temp_new_i64();
7994 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7995 tcg_temp_free_i32(tmp
);
7996 gen_addq(s
, tmp64
, rs
, rd
);
7997 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7998 tcg_temp_free_i64(tmp64
);
8001 /* Unsigned 64-bit multiply */
8002 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8006 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8007 tcg_temp_free_i32(tmp2
);
8008 tmp64
= tcg_temp_new_i64();
8009 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8010 tcg_temp_free_i32(tmp
);
8012 /* Signed 64-bit multiply */
8013 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8018 gen_addq_lo(s
, tmp64
, rs
);
8019 gen_addq_lo(s
, tmp64
, rd
);
8020 } else if (op
& 0x40) {
8021 /* 64-bit accumulate. */
8022 gen_addq(s
, tmp64
, rs
, rd
);
8024 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8025 tcg_temp_free_i64(tmp64
);
8030 case 6: case 7: case 14: case 15:
8032 if (((insn
>> 24) & 3) == 3) {
8033 /* Translate into the equivalent ARM encoding. */
8034 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8035 if (disas_neon_data_insn(env
, s
, insn
))
8038 if (insn
& (1 << 28))
8040 if (disas_coproc_insn (env
, s
, insn
))
8044 case 8: case 9: case 10: case 11:
8045 if (insn
& (1 << 15)) {
8046 /* Branches, misc control. */
8047 if (insn
& 0x5000) {
8048 /* Unconditional branch. */
8049 /* signextend(hw1[10:0]) -> offset[:12]. */
8050 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8051 /* hw1[10:0] -> offset[11:1]. */
8052 offset
|= (insn
& 0x7ff) << 1;
8053 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8054 offset[24:22] already have the same value because of the
8055 sign extension above. */
8056 offset
^= ((~insn
) & (1 << 13)) << 10;
8057 offset
^= ((~insn
) & (1 << 11)) << 11;
8059 if (insn
& (1 << 14)) {
8060 /* Branch and link. */
8061 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8065 if (insn
& (1 << 12)) {
8070 offset
&= ~(uint32_t)2;
8071 gen_bx_im(s
, offset
);
8073 } else if (((insn
>> 23) & 7) == 7) {
8075 if (insn
& (1 << 13))
8078 if (insn
& (1 << 26)) {
8079 /* Secure monitor call (v6Z) */
8080 goto illegal_op
; /* not implemented. */
8082 op
= (insn
>> 20) & 7;
8084 case 0: /* msr cpsr. */
8086 tmp
= load_reg(s
, rn
);
8087 addr
= tcg_const_i32(insn
& 0xff);
8088 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8089 tcg_temp_free_i32(addr
);
8090 tcg_temp_free_i32(tmp
);
8095 case 1: /* msr spsr. */
8098 tmp
= load_reg(s
, rn
);
8100 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8104 case 2: /* cps, nop-hint. */
8105 if (((insn
>> 8) & 7) == 0) {
8106 gen_nop_hint(s
, insn
& 0xff);
8108 /* Implemented as NOP in user mode. */
8113 if (insn
& (1 << 10)) {
8114 if (insn
& (1 << 7))
8116 if (insn
& (1 << 6))
8118 if (insn
& (1 << 5))
8120 if (insn
& (1 << 9))
8121 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8123 if (insn
& (1 << 8)) {
8125 imm
|= (insn
& 0x1f);
8128 gen_set_psr_im(s
, offset
, 0, imm
);
8131 case 3: /* Special control operations. */
8133 op
= (insn
>> 4) & 0xf;
8141 /* These execute as NOPs. */
8148 /* Trivial implementation equivalent to bx. */
8149 tmp
= load_reg(s
, rn
);
8152 case 5: /* Exception return. */
8156 if (rn
!= 14 || rd
!= 15) {
8159 tmp
= load_reg(s
, rn
);
8160 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8161 gen_exception_return(s
, tmp
);
8163 case 6: /* mrs cpsr. */
8164 tmp
= tcg_temp_new_i32();
8166 addr
= tcg_const_i32(insn
& 0xff);
8167 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8168 tcg_temp_free_i32(addr
);
8170 gen_helper_cpsr_read(tmp
);
8172 store_reg(s
, rd
, tmp
);
8174 case 7: /* mrs spsr. */
8175 /* Not accessible in user mode. */
8176 if (IS_USER(s
) || IS_M(env
))
8178 tmp
= load_cpu_field(spsr
);
8179 store_reg(s
, rd
, tmp
);
8184 /* Conditional branch. */
8185 op
= (insn
>> 22) & 0xf;
8186 /* Generate a conditional jump to next instruction. */
8187 s
->condlabel
= gen_new_label();
8188 gen_test_cc(op
^ 1, s
->condlabel
);
8191 /* offset[11:1] = insn[10:0] */
8192 offset
= (insn
& 0x7ff) << 1;
8193 /* offset[17:12] = insn[21:16]. */
8194 offset
|= (insn
& 0x003f0000) >> 4;
8195 /* offset[31:20] = insn[26]. */
8196 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8197 /* offset[18] = insn[13]. */
8198 offset
|= (insn
& (1 << 13)) << 5;
8199 /* offset[19] = insn[11]. */
8200 offset
|= (insn
& (1 << 11)) << 8;
8202 /* jump to the offset */
8203 gen_jmp(s
, s
->pc
+ offset
);
8206 /* Data processing immediate. */
8207 if (insn
& (1 << 25)) {
8208 if (insn
& (1 << 24)) {
8209 if (insn
& (1 << 20))
8211 /* Bitfield/Saturate. */
8212 op
= (insn
>> 21) & 7;
8214 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8216 tmp
= tcg_temp_new_i32();
8217 tcg_gen_movi_i32(tmp
, 0);
8219 tmp
= load_reg(s
, rn
);
8222 case 2: /* Signed bitfield extract. */
8224 if (shift
+ imm
> 32)
8227 gen_sbfx(tmp
, shift
, imm
);
8229 case 6: /* Unsigned bitfield extract. */
8231 if (shift
+ imm
> 32)
8234 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8236 case 3: /* Bitfield insert/clear. */
8239 imm
= imm
+ 1 - shift
;
8241 tmp2
= load_reg(s
, rd
);
8242 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8243 tcg_temp_free_i32(tmp2
);
8248 default: /* Saturate. */
8251 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8253 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8255 tmp2
= tcg_const_i32(imm
);
8258 if ((op
& 1) && shift
== 0)
8259 gen_helper_usat16(tmp
, tmp
, tmp2
);
8261 gen_helper_usat(tmp
, tmp
, tmp2
);
8264 if ((op
& 1) && shift
== 0)
8265 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8267 gen_helper_ssat(tmp
, tmp
, tmp2
);
8269 tcg_temp_free_i32(tmp2
);
8272 store_reg(s
, rd
, tmp
);
8274 imm
= ((insn
& 0x04000000) >> 15)
8275 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8276 if (insn
& (1 << 22)) {
8277 /* 16-bit immediate. */
8278 imm
|= (insn
>> 4) & 0xf000;
8279 if (insn
& (1 << 23)) {
8281 tmp
= load_reg(s
, rd
);
8282 tcg_gen_ext16u_i32(tmp
, tmp
);
8283 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8286 tmp
= tcg_temp_new_i32();
8287 tcg_gen_movi_i32(tmp
, imm
);
8290 /* Add/sub 12-bit immediate. */
8292 offset
= s
->pc
& ~(uint32_t)3;
8293 if (insn
& (1 << 23))
8297 tmp
= tcg_temp_new_i32();
8298 tcg_gen_movi_i32(tmp
, offset
);
8300 tmp
= load_reg(s
, rn
);
8301 if (insn
& (1 << 23))
8302 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8304 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8307 store_reg(s
, rd
, tmp
);
8310 int shifter_out
= 0;
8311 /* modified 12-bit immediate. */
8312 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8313 imm
= (insn
& 0xff);
8316 /* Nothing to do. */
8318 case 1: /* 00XY00XY */
8321 case 2: /* XY00XY00 */
8325 case 3: /* XYXYXYXY */
8329 default: /* Rotated constant. */
8330 shift
= (shift
<< 1) | (imm
>> 7);
8332 imm
= imm
<< (32 - shift
);
8336 tmp2
= tcg_temp_new_i32();
8337 tcg_gen_movi_i32(tmp2
, imm
);
8338 rn
= (insn
>> 16) & 0xf;
8340 tmp
= tcg_temp_new_i32();
8341 tcg_gen_movi_i32(tmp
, 0);
8343 tmp
= load_reg(s
, rn
);
8345 op
= (insn
>> 21) & 0xf;
8346 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8347 shifter_out
, tmp
, tmp2
))
8349 tcg_temp_free_i32(tmp2
);
8350 rd
= (insn
>> 8) & 0xf;
8352 store_reg(s
, rd
, tmp
);
8354 tcg_temp_free_i32(tmp
);
8359 case 12: /* Load/store single data item. */
8364 if ((insn
& 0x01100000) == 0x01000000) {
8365 if (disas_neon_ls_insn(env
, s
, insn
))
8369 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8371 if (!(insn
& (1 << 20))) {
8375 /* Byte or halfword load space with dest == r15 : memory hints.
8376 * Catch them early so we don't emit pointless addressing code.
8377 * This space is a mix of:
8378 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8379 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8381 * unallocated hints, which must be treated as NOPs
8382 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8383 * which is easiest for the decoding logic
8384 * Some space which must UNDEF
8386 int op1
= (insn
>> 23) & 3;
8387 int op2
= (insn
>> 6) & 0x3f;
8392 /* UNPREDICTABLE or unallocated hint */
8396 return 0; /* PLD* or unallocated hint */
8398 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8399 return 0; /* PLD* or unallocated hint */
8401 /* UNDEF space, or an UNPREDICTABLE */
8407 addr
= tcg_temp_new_i32();
8409 /* s->pc has already been incremented by 4. */
8410 imm
= s
->pc
& 0xfffffffc;
8411 if (insn
& (1 << 23))
8412 imm
+= insn
& 0xfff;
8414 imm
-= insn
& 0xfff;
8415 tcg_gen_movi_i32(addr
, imm
);
8417 addr
= load_reg(s
, rn
);
8418 if (insn
& (1 << 23)) {
8419 /* Positive offset. */
8421 tcg_gen_addi_i32(addr
, addr
, imm
);
8424 switch ((insn
>> 8) & 0xf) {
8425 case 0x0: /* Shifted Register. */
8426 shift
= (insn
>> 4) & 0xf;
8428 tcg_temp_free_i32(addr
);
8431 tmp
= load_reg(s
, rm
);
8433 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8434 tcg_gen_add_i32(addr
, addr
, tmp
);
8435 tcg_temp_free_i32(tmp
);
8437 case 0xc: /* Negative offset. */
8438 tcg_gen_addi_i32(addr
, addr
, -imm
);
8440 case 0xe: /* User privilege. */
8441 tcg_gen_addi_i32(addr
, addr
, imm
);
8444 case 0x9: /* Post-decrement. */
8447 case 0xb: /* Post-increment. */
8451 case 0xd: /* Pre-decrement. */
8454 case 0xf: /* Pre-increment. */
8455 tcg_gen_addi_i32(addr
, addr
, imm
);
8459 tcg_temp_free_i32(addr
);
8464 if (insn
& (1 << 20)) {
8467 case 0: tmp
= gen_ld8u(addr
, user
); break;
8468 case 4: tmp
= gen_ld8s(addr
, user
); break;
8469 case 1: tmp
= gen_ld16u(addr
, user
); break;
8470 case 5: tmp
= gen_ld16s(addr
, user
); break;
8471 case 2: tmp
= gen_ld32(addr
, user
); break;
8473 tcg_temp_free_i32(addr
);
8479 store_reg(s
, rs
, tmp
);
8483 tmp
= load_reg(s
, rs
);
8485 case 0: gen_st8(tmp
, addr
, user
); break;
8486 case 1: gen_st16(tmp
, addr
, user
); break;
8487 case 2: gen_st32(tmp
, addr
, user
); break;
8489 tcg_temp_free_i32(addr
);
8494 tcg_gen_addi_i32(addr
, addr
, imm
);
8496 store_reg(s
, rn
, addr
);
8498 tcg_temp_free_i32(addr
);
8510 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8512 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8519 if (s
->condexec_mask
) {
8520 cond
= s
->condexec_cond
;
8521 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8522 s
->condlabel
= gen_new_label();
8523 gen_test_cc(cond
^ 1, s
->condlabel
);
8528 insn
= lduw_code(s
->pc
);
8531 switch (insn
>> 12) {
8535 op
= (insn
>> 11) & 3;
8538 rn
= (insn
>> 3) & 7;
8539 tmp
= load_reg(s
, rn
);
8540 if (insn
& (1 << 10)) {
8542 tmp2
= tcg_temp_new_i32();
8543 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8546 rm
= (insn
>> 6) & 7;
8547 tmp2
= load_reg(s
, rm
);
8549 if (insn
& (1 << 9)) {
8550 if (s
->condexec_mask
)
8551 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8553 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8555 if (s
->condexec_mask
)
8556 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8558 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8560 tcg_temp_free_i32(tmp2
);
8561 store_reg(s
, rd
, tmp
);
8563 /* shift immediate */
8564 rm
= (insn
>> 3) & 7;
8565 shift
= (insn
>> 6) & 0x1f;
8566 tmp
= load_reg(s
, rm
);
8567 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8568 if (!s
->condexec_mask
)
8570 store_reg(s
, rd
, tmp
);
8574 /* arithmetic large immediate */
8575 op
= (insn
>> 11) & 3;
8576 rd
= (insn
>> 8) & 0x7;
8577 if (op
== 0) { /* mov */
8578 tmp
= tcg_temp_new_i32();
8579 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8580 if (!s
->condexec_mask
)
8582 store_reg(s
, rd
, tmp
);
8584 tmp
= load_reg(s
, rd
);
8585 tmp2
= tcg_temp_new_i32();
8586 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8589 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8590 tcg_temp_free_i32(tmp
);
8591 tcg_temp_free_i32(tmp2
);
8594 if (s
->condexec_mask
)
8595 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8597 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8598 tcg_temp_free_i32(tmp2
);
8599 store_reg(s
, rd
, tmp
);
8602 if (s
->condexec_mask
)
8603 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8605 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8606 tcg_temp_free_i32(tmp2
);
8607 store_reg(s
, rd
, tmp
);
8613 if (insn
& (1 << 11)) {
8614 rd
= (insn
>> 8) & 7;
8615 /* load pc-relative. Bit 1 of PC is ignored. */
8616 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8617 val
&= ~(uint32_t)2;
8618 addr
= tcg_temp_new_i32();
8619 tcg_gen_movi_i32(addr
, val
);
8620 tmp
= gen_ld32(addr
, IS_USER(s
));
8621 tcg_temp_free_i32(addr
);
8622 store_reg(s
, rd
, tmp
);
8625 if (insn
& (1 << 10)) {
8626 /* data processing extended or blx */
8627 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8628 rm
= (insn
>> 3) & 0xf;
8629 op
= (insn
>> 8) & 3;
8632 tmp
= load_reg(s
, rd
);
8633 tmp2
= load_reg(s
, rm
);
8634 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8635 tcg_temp_free_i32(tmp2
);
8636 store_reg(s
, rd
, tmp
);
8639 tmp
= load_reg(s
, rd
);
8640 tmp2
= load_reg(s
, rm
);
8641 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8642 tcg_temp_free_i32(tmp2
);
8643 tcg_temp_free_i32(tmp
);
8645 case 2: /* mov/cpy */
8646 tmp
= load_reg(s
, rm
);
8647 store_reg(s
, rd
, tmp
);
8649 case 3:/* branch [and link] exchange thumb register */
8650 tmp
= load_reg(s
, rm
);
8651 if (insn
& (1 << 7)) {
8652 val
= (uint32_t)s
->pc
| 1;
8653 tmp2
= tcg_temp_new_i32();
8654 tcg_gen_movi_i32(tmp2
, val
);
8655 store_reg(s
, 14, tmp2
);
8663 /* data processing register */
8665 rm
= (insn
>> 3) & 7;
8666 op
= (insn
>> 6) & 0xf;
8667 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8668 /* the shift/rotate ops want the operands backwards */
8677 if (op
== 9) { /* neg */
8678 tmp
= tcg_temp_new_i32();
8679 tcg_gen_movi_i32(tmp
, 0);
8680 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8681 tmp
= load_reg(s
, rd
);
8686 tmp2
= load_reg(s
, rm
);
8689 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8690 if (!s
->condexec_mask
)
8694 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8695 if (!s
->condexec_mask
)
8699 if (s
->condexec_mask
) {
8700 gen_helper_shl(tmp2
, tmp2
, tmp
);
8702 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8707 if (s
->condexec_mask
) {
8708 gen_helper_shr(tmp2
, tmp2
, tmp
);
8710 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8715 if (s
->condexec_mask
) {
8716 gen_helper_sar(tmp2
, tmp2
, tmp
);
8718 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8723 if (s
->condexec_mask
)
8726 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8729 if (s
->condexec_mask
)
8730 gen_sub_carry(tmp
, tmp
, tmp2
);
8732 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8735 if (s
->condexec_mask
) {
8736 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8737 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8739 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8744 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8749 if (s
->condexec_mask
)
8750 tcg_gen_neg_i32(tmp
, tmp2
);
8752 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8755 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8759 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8763 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8764 if (!s
->condexec_mask
)
8768 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8769 if (!s
->condexec_mask
)
8773 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8774 if (!s
->condexec_mask
)
8778 tcg_gen_not_i32(tmp2
, tmp2
);
8779 if (!s
->condexec_mask
)
8787 store_reg(s
, rm
, tmp2
);
8789 tcg_temp_free_i32(tmp
);
8791 store_reg(s
, rd
, tmp
);
8792 tcg_temp_free_i32(tmp2
);
8795 tcg_temp_free_i32(tmp
);
8796 tcg_temp_free_i32(tmp2
);
8801 /* load/store register offset. */
8803 rn
= (insn
>> 3) & 7;
8804 rm
= (insn
>> 6) & 7;
8805 op
= (insn
>> 9) & 7;
8806 addr
= load_reg(s
, rn
);
8807 tmp
= load_reg(s
, rm
);
8808 tcg_gen_add_i32(addr
, addr
, tmp
);
8809 tcg_temp_free_i32(tmp
);
8811 if (op
< 3) /* store */
8812 tmp
= load_reg(s
, rd
);
8816 gen_st32(tmp
, addr
, IS_USER(s
));
8819 gen_st16(tmp
, addr
, IS_USER(s
));
8822 gen_st8(tmp
, addr
, IS_USER(s
));
8825 tmp
= gen_ld8s(addr
, IS_USER(s
));
8828 tmp
= gen_ld32(addr
, IS_USER(s
));
8831 tmp
= gen_ld16u(addr
, IS_USER(s
));
8834 tmp
= gen_ld8u(addr
, IS_USER(s
));
8837 tmp
= gen_ld16s(addr
, IS_USER(s
));
8840 if (op
>= 3) /* load */
8841 store_reg(s
, rd
, tmp
);
8842 tcg_temp_free_i32(addr
);
8846 /* load/store word immediate offset */
8848 rn
= (insn
>> 3) & 7;
8849 addr
= load_reg(s
, rn
);
8850 val
= (insn
>> 4) & 0x7c;
8851 tcg_gen_addi_i32(addr
, addr
, val
);
8853 if (insn
& (1 << 11)) {
8855 tmp
= gen_ld32(addr
, IS_USER(s
));
8856 store_reg(s
, rd
, tmp
);
8859 tmp
= load_reg(s
, rd
);
8860 gen_st32(tmp
, addr
, IS_USER(s
));
8862 tcg_temp_free_i32(addr
);
8866 /* load/store byte immediate offset */
8868 rn
= (insn
>> 3) & 7;
8869 addr
= load_reg(s
, rn
);
8870 val
= (insn
>> 6) & 0x1f;
8871 tcg_gen_addi_i32(addr
, addr
, val
);
8873 if (insn
& (1 << 11)) {
8875 tmp
= gen_ld8u(addr
, IS_USER(s
));
8876 store_reg(s
, rd
, tmp
);
8879 tmp
= load_reg(s
, rd
);
8880 gen_st8(tmp
, addr
, IS_USER(s
));
8882 tcg_temp_free_i32(addr
);
8886 /* load/store halfword immediate offset */
8888 rn
= (insn
>> 3) & 7;
8889 addr
= load_reg(s
, rn
);
8890 val
= (insn
>> 5) & 0x3e;
8891 tcg_gen_addi_i32(addr
, addr
, val
);
8893 if (insn
& (1 << 11)) {
8895 tmp
= gen_ld16u(addr
, IS_USER(s
));
8896 store_reg(s
, rd
, tmp
);
8899 tmp
= load_reg(s
, rd
);
8900 gen_st16(tmp
, addr
, IS_USER(s
));
8902 tcg_temp_free_i32(addr
);
8906 /* load/store from stack */
8907 rd
= (insn
>> 8) & 7;
8908 addr
= load_reg(s
, 13);
8909 val
= (insn
& 0xff) * 4;
8910 tcg_gen_addi_i32(addr
, addr
, val
);
8912 if (insn
& (1 << 11)) {
8914 tmp
= gen_ld32(addr
, IS_USER(s
));
8915 store_reg(s
, rd
, tmp
);
8918 tmp
= load_reg(s
, rd
);
8919 gen_st32(tmp
, addr
, IS_USER(s
));
8921 tcg_temp_free_i32(addr
);
8925 /* add to high reg */
8926 rd
= (insn
>> 8) & 7;
8927 if (insn
& (1 << 11)) {
8929 tmp
= load_reg(s
, 13);
8931 /* PC. bit 1 is ignored. */
8932 tmp
= tcg_temp_new_i32();
8933 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8935 val
= (insn
& 0xff) * 4;
8936 tcg_gen_addi_i32(tmp
, tmp
, val
);
8937 store_reg(s
, rd
, tmp
);
8942 op
= (insn
>> 8) & 0xf;
8945 /* adjust stack pointer */
8946 tmp
= load_reg(s
, 13);
8947 val
= (insn
& 0x7f) * 4;
8948 if (insn
& (1 << 7))
8949 val
= -(int32_t)val
;
8950 tcg_gen_addi_i32(tmp
, tmp
, val
);
8951 store_reg(s
, 13, tmp
);
8954 case 2: /* sign/zero extend. */
8957 rm
= (insn
>> 3) & 7;
8958 tmp
= load_reg(s
, rm
);
8959 switch ((insn
>> 6) & 3) {
8960 case 0: gen_sxth(tmp
); break;
8961 case 1: gen_sxtb(tmp
); break;
8962 case 2: gen_uxth(tmp
); break;
8963 case 3: gen_uxtb(tmp
); break;
8965 store_reg(s
, rd
, tmp
);
8967 case 4: case 5: case 0xc: case 0xd:
8969 addr
= load_reg(s
, 13);
8970 if (insn
& (1 << 8))
8974 for (i
= 0; i
< 8; i
++) {
8975 if (insn
& (1 << i
))
8978 if ((insn
& (1 << 11)) == 0) {
8979 tcg_gen_addi_i32(addr
, addr
, -offset
);
8981 for (i
= 0; i
< 8; i
++) {
8982 if (insn
& (1 << i
)) {
8983 if (insn
& (1 << 11)) {
8985 tmp
= gen_ld32(addr
, IS_USER(s
));
8986 store_reg(s
, i
, tmp
);
8989 tmp
= load_reg(s
, i
);
8990 gen_st32(tmp
, addr
, IS_USER(s
));
8992 /* advance to the next address. */
8993 tcg_gen_addi_i32(addr
, addr
, 4);
8997 if (insn
& (1 << 8)) {
8998 if (insn
& (1 << 11)) {
9000 tmp
= gen_ld32(addr
, IS_USER(s
));
9001 /* don't set the pc until the rest of the instruction
9005 tmp
= load_reg(s
, 14);
9006 gen_st32(tmp
, addr
, IS_USER(s
));
9008 tcg_gen_addi_i32(addr
, addr
, 4);
9010 if ((insn
& (1 << 11)) == 0) {
9011 tcg_gen_addi_i32(addr
, addr
, -offset
);
9013 /* write back the new stack pointer */
9014 store_reg(s
, 13, addr
);
9015 /* set the new PC value */
9016 if ((insn
& 0x0900) == 0x0900)
9020 case 1: case 3: case 9: case 11: /* czb */
9022 tmp
= load_reg(s
, rm
);
9023 s
->condlabel
= gen_new_label();
9025 if (insn
& (1 << 11))
9026 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9028 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9029 tcg_temp_free_i32(tmp
);
9030 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9031 val
= (uint32_t)s
->pc
+ 2;
9036 case 15: /* IT, nop-hint. */
9037 if ((insn
& 0xf) == 0) {
9038 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9042 s
->condexec_cond
= (insn
>> 4) & 0xe;
9043 s
->condexec_mask
= insn
& 0x1f;
9044 /* No actual code generated for this insn, just setup state. */
9047 case 0xe: /* bkpt */
9048 gen_exception_insn(s
, 2, EXCP_BKPT
);
9053 rn
= (insn
>> 3) & 0x7;
9055 tmp
= load_reg(s
, rn
);
9056 switch ((insn
>> 6) & 3) {
9057 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9058 case 1: gen_rev16(tmp
); break;
9059 case 3: gen_revsh(tmp
); break;
9060 default: goto illegal_op
;
9062 store_reg(s
, rd
, tmp
);
9070 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9073 addr
= tcg_const_i32(16);
9074 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9075 tcg_temp_free_i32(addr
);
9079 addr
= tcg_const_i32(17);
9080 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9081 tcg_temp_free_i32(addr
);
9083 tcg_temp_free_i32(tmp
);
9086 if (insn
& (1 << 4))
9087 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9090 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9100 /* load/store multiple */
9101 rn
= (insn
>> 8) & 0x7;
9102 addr
= load_reg(s
, rn
);
9103 for (i
= 0; i
< 8; i
++) {
9104 if (insn
& (1 << i
)) {
9105 if (insn
& (1 << 11)) {
9107 tmp
= gen_ld32(addr
, IS_USER(s
));
9108 store_reg(s
, i
, tmp
);
9111 tmp
= load_reg(s
, i
);
9112 gen_st32(tmp
, addr
, IS_USER(s
));
9114 /* advance to the next address */
9115 tcg_gen_addi_i32(addr
, addr
, 4);
9118 /* Base register writeback. */
9119 if ((insn
& (1 << rn
)) == 0) {
9120 store_reg(s
, rn
, addr
);
9122 tcg_temp_free_i32(addr
);
9127 /* conditional branch or swi */
9128 cond
= (insn
>> 8) & 0xf;
9134 gen_set_pc_im(s
->pc
);
9135 s
->is_jmp
= DISAS_SWI
;
9138 /* generate a conditional jump to next instruction */
9139 s
->condlabel
= gen_new_label();
9140 gen_test_cc(cond
^ 1, s
->condlabel
);
9143 /* jump to the offset */
9144 val
= (uint32_t)s
->pc
+ 2;
9145 offset
= ((int32_t)insn
<< 24) >> 24;
9151 if (insn
& (1 << 11)) {
9152 if (disas_thumb2_insn(env
, s
, insn
))
9156 /* unconditional branch */
9157 val
= (uint32_t)s
->pc
;
9158 offset
= ((int32_t)insn
<< 21) >> 21;
9159 val
+= (offset
<< 1) + 2;
9164 if (disas_thumb2_insn(env
, s
, insn
))
9170 gen_exception_insn(s
, 4, EXCP_UDEF
);
9174 gen_exception_insn(s
, 2, EXCP_UDEF
);
9177 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9178 basic block 'tb'. If search_pc is TRUE, also generate PC
9179 information for each intermediate instruction. */
9180 static inline void gen_intermediate_code_internal(CPUState
*env
,
9181 TranslationBlock
*tb
,
9184 DisasContext dc1
, *dc
= &dc1
;
9186 uint16_t *gen_opc_end
;
9188 target_ulong pc_start
;
9189 uint32_t next_page_start
;
9193 /* generate intermediate code */
9198 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9200 dc
->is_jmp
= DISAS_NEXT
;
9202 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9204 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9205 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9206 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9207 #if !defined(CONFIG_USER_ONLY)
9208 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9210 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9211 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9212 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9213 cpu_F0s
= tcg_temp_new_i32();
9214 cpu_F1s
= tcg_temp_new_i32();
9215 cpu_F0d
= tcg_temp_new_i64();
9216 cpu_F1d
= tcg_temp_new_i64();
9219 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9220 cpu_M0
= tcg_temp_new_i64();
9221 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9224 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9226 max_insns
= CF_COUNT_MASK
;
9230 tcg_clear_temp_count();
9232 /* A note on handling of the condexec (IT) bits:
9234 * We want to avoid the overhead of having to write the updated condexec
9235 * bits back to the CPUState for every instruction in an IT block. So:
9236 * (1) if the condexec bits are not already zero then we write
9237 * zero back into the CPUState now. This avoids complications trying
9238 * to do it at the end of the block. (For example if we don't do this
9239 * it's hard to identify whether we can safely skip writing condexec
9240 * at the end of the TB, which we definitely want to do for the case
9241 * where a TB doesn't do anything with the IT state at all.)
9242 * (2) if we are going to leave the TB then we call gen_set_condexec()
9243 * which will write the correct value into CPUState if zero is wrong.
9244 * This is done both for leaving the TB at the end, and for leaving
9245 * it because of an exception we know will happen, which is done in
9246 * gen_exception_insn(). The latter is necessary because we need to
9247 * leave the TB with the PC/IT state just prior to execution of the
9248 * instruction which caused the exception.
9249 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9250 * then the CPUState will be wrong and we need to reset it.
9251 * This is handled in the same way as restoration of the
9252 * PC in these situations: we will be called again with search_pc=1
9253 * and generate a mapping of the condexec bits for each PC in
9254 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9255 * the condexec bits.
9257 * Note that there are no instructions which can read the condexec
9258 * bits, and none which can write non-static values to them, so
9259 * we don't need to care about whether CPUState is correct in the
9263 /* Reset the conditional execution bits immediately. This avoids
9264 complications trying to do it at the end of the block. */
9265 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9267 TCGv tmp
= tcg_temp_new_i32();
9268 tcg_gen_movi_i32(tmp
, 0);
9269 store_cpu_field(tmp
, condexec_bits
);
9272 #ifdef CONFIG_USER_ONLY
9273 /* Intercept jump to the magic kernel page. */
9274 if (dc
->pc
>= 0xffff0000) {
9275 /* We always get here via a jump, so know we are not in a
9276 conditional execution block. */
9277 gen_exception(EXCP_KERNEL_TRAP
);
9278 dc
->is_jmp
= DISAS_UPDATE
;
9282 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9283 /* We always get here via a jump, so know we are not in a
9284 conditional execution block. */
9285 gen_exception(EXCP_EXCEPTION_EXIT
);
9286 dc
->is_jmp
= DISAS_UPDATE
;
9291 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9292 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9293 if (bp
->pc
== dc
->pc
) {
9294 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9295 /* Advance PC so that clearing the breakpoint will
9296 invalidate this TB. */
9298 goto done_generating
;
9304 j
= gen_opc_ptr
- gen_opc_buf
;
9308 gen_opc_instr_start
[lj
++] = 0;
9310 gen_opc_pc
[lj
] = dc
->pc
;
9311 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9312 gen_opc_instr_start
[lj
] = 1;
9313 gen_opc_icount
[lj
] = num_insns
;
9316 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9319 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9320 tcg_gen_debug_insn_start(dc
->pc
);
9324 disas_thumb_insn(env
, dc
);
9325 if (dc
->condexec_mask
) {
9326 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9327 | ((dc
->condexec_mask
>> 4) & 1);
9328 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9329 if (dc
->condexec_mask
== 0) {
9330 dc
->condexec_cond
= 0;
9334 disas_arm_insn(env
, dc
);
9337 if (dc
->condjmp
&& !dc
->is_jmp
) {
9338 gen_set_label(dc
->condlabel
);
9342 if (tcg_check_temp_count()) {
9343 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9346 /* Translation stops when a conditional branch is encountered.
9347 * Otherwise the subsequent code could get translated several times.
9348 * Also stop translation when a page boundary is reached. This
9349 * ensures prefetch aborts occur at the right place. */
9351 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9352 !env
->singlestep_enabled
&&
9354 dc
->pc
< next_page_start
&&
9355 num_insns
< max_insns
);
9357 if (tb
->cflags
& CF_LAST_IO
) {
9359 /* FIXME: This can theoretically happen with self-modifying
9361 cpu_abort(env
, "IO on conditional branch instruction");
9366 /* At this stage dc->condjmp will only be set when the skipped
9367 instruction was a conditional branch or trap, and the PC has
9368 already been written. */
9369 if (unlikely(env
->singlestep_enabled
)) {
9370 /* Make sure the pc is updated, and raise a debug exception. */
9372 gen_set_condexec(dc
);
9373 if (dc
->is_jmp
== DISAS_SWI
) {
9374 gen_exception(EXCP_SWI
);
9376 gen_exception(EXCP_DEBUG
);
9378 gen_set_label(dc
->condlabel
);
9380 if (dc
->condjmp
|| !dc
->is_jmp
) {
9381 gen_set_pc_im(dc
->pc
);
9384 gen_set_condexec(dc
);
9385 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9386 gen_exception(EXCP_SWI
);
9388 /* FIXME: Single stepping a WFI insn will not halt
9390 gen_exception(EXCP_DEBUG
);
9393 /* While branches must always occur at the end of an IT block,
9394 there are a few other things that can cause us to terminate
9395 the TB in the middel of an IT block:
9396 - Exception generating instructions (bkpt, swi, undefined).
9398 - Hardware watchpoints.
9399 Hardware breakpoints have already been handled and skip this code.
9401 gen_set_condexec(dc
);
9402 switch(dc
->is_jmp
) {
9404 gen_goto_tb(dc
, 1, dc
->pc
);
9409 /* indicate that the hash table must be used to find the next TB */
9413 /* nothing more to generate */
9419 gen_exception(EXCP_SWI
);
9423 gen_set_label(dc
->condlabel
);
9424 gen_set_condexec(dc
);
9425 gen_goto_tb(dc
, 1, dc
->pc
);
9431 gen_icount_end(tb
, num_insns
);
9432 *gen_opc_ptr
= INDEX_op_end
;
9435 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9436 qemu_log("----------------\n");
9437 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9438 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9443 j
= gen_opc_ptr
- gen_opc_buf
;
9446 gen_opc_instr_start
[lj
++] = 0;
9448 tb
->size
= dc
->pc
- pc_start
;
9449 tb
->icount
= num_insns
;
9453 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9455 gen_intermediate_code_internal(env
, tb
, 0);
9458 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9460 gen_intermediate_code_internal(env
, tb
, 1);
9463 static const char *cpu_mode_names
[16] = {
9464 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9465 "???", "???", "???", "und", "???", "???", "???", "sys"
9468 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9478 /* ??? This assumes float64 and double have the same layout.
9479 Oh well, it's only debug dumps. */
9488 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9490 cpu_fprintf(f
, "\n");
9492 cpu_fprintf(f
, " ");
9494 psr
= cpsr_read(env
);
9495 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9497 psr
& (1 << 31) ? 'N' : '-',
9498 psr
& (1 << 30) ? 'Z' : '-',
9499 psr
& (1 << 29) ? 'C' : '-',
9500 psr
& (1 << 28) ? 'V' : '-',
9501 psr
& CPSR_T
? 'T' : 'A',
9502 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9505 for (i
= 0; i
< 16; i
++) {
9506 d
.d
= env
->vfp
.regs
[i
];
9510 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9511 i
* 2, (int)s0
.i
, s0
.s
,
9512 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9513 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9516 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9520 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9521 unsigned long searched_pc
, int pc_pos
, void *puc
)
9523 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9524 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];