4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 conditional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
89 static TCGv_i32 cpu_exclusive_addr
;
90 static TCGv_i32 cpu_exclusive_val
;
91 static TCGv_i32 cpu_exclusive_high
;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test
;
94 static TCGv_i32 cpu_exclusive_info
;
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s
, cpu_F1s
;
99 static TCGv_i64 cpu_F0d
, cpu_F1d
;
101 #include "gen-icount.h"
103 static const char *regnames
[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
112 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
114 for (i
= 0; i
< 16; i
++) {
115 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUARMState
, regs
[i
]),
119 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
120 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
121 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
122 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
124 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
126 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
128 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
133 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
141 static inline TCGv
load_cpu_offset(int offset
)
143 TCGv tmp
= tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150 static inline void store_cpu_offset(TCGv var
, int offset
)
152 tcg_gen_st_i32(var
, cpu_env
, offset
);
153 tcg_temp_free_i32(var
);
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
164 /* normally, since we updated PC, we need only to add one insn */
166 addr
= (long)s
->pc
+ 2;
168 addr
= (long)s
->pc
+ 4;
169 tcg_gen_movi_i32(var
, addr
);
171 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
178 TCGv tmp
= tcg_temp_new_i32();
179 load_reg_var(s
, tmp
, reg
);
183 /* Set a CPU register. The source must be a temporary and will be
185 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
188 tcg_gen_andi_i32(var
, var
, ~1);
189 s
->is_jmp
= DISAS_JUMP
;
191 tcg_gen_mov_i32(cpu_R
[reg
], var
);
192 tcg_temp_free_i32(var
);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
207 TCGv tmp_mask
= tcg_const_i32(mask
);
208 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
209 tcg_temp_free_i32(tmp_mask
);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception(int excp
)
216 TCGv tmp
= tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp
, excp
);
218 gen_helper_exception(cpu_env
, tmp
);
219 tcg_temp_free_i32(tmp
);
222 static void gen_smul_dual(TCGv a
, TCGv b
)
224 TCGv tmp1
= tcg_temp_new_i32();
225 TCGv tmp2
= tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1
, a
);
227 tcg_gen_ext16s_i32(tmp2
, b
);
228 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
229 tcg_temp_free_i32(tmp2
);
230 tcg_gen_sari_i32(a
, a
, 16);
231 tcg_gen_sari_i32(b
, b
, 16);
232 tcg_gen_mul_i32(b
, b
, a
);
233 tcg_gen_mov_i32(a
, tmp1
);
234 tcg_temp_free_i32(tmp1
);
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var
)
240 TCGv tmp
= tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp
, var
, 8);
242 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
243 tcg_gen_shli_i32(var
, var
, 8);
244 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
245 tcg_gen_or_i32(var
, var
, tmp
);
246 tcg_temp_free_i32(tmp
);
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var
)
252 tcg_gen_ext16u_i32(var
, var
);
253 tcg_gen_bswap16_i32(var
, var
);
254 tcg_gen_ext16s_i32(var
, var
);
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
261 tcg_gen_shri_i32(var
, var
, shift
);
262 tcg_gen_andi_i32(var
, var
, mask
);
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var
, int shift
, int width
)
271 tcg_gen_sari_i32(var
, var
, shift
);
272 if (shift
+ width
< 32) {
273 signbit
= 1u << (width
- 1);
274 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
275 tcg_gen_xori_i32(var
, var
, signbit
);
276 tcg_gen_subi_i32(var
, var
, signbit
);
280 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
281 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
283 tcg_gen_andi_i32(val
, val
, mask
);
284 tcg_gen_shli_i32(val
, val
, shift
);
285 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
286 tcg_gen_or_i32(dest
, base
, val
);
289 /* Return (b << 32) + a. Mark inputs as dead */
290 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
292 TCGv_i64 tmp64
= tcg_temp_new_i64();
294 tcg_gen_extu_i32_i64(tmp64
, b
);
295 tcg_temp_free_i32(b
);
296 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
297 tcg_gen_add_i64(a
, tmp64
, a
);
299 tcg_temp_free_i64(tmp64
);
303 /* Return (b << 32) - a. Mark inputs as dead. */
304 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
306 TCGv_i64 tmp64
= tcg_temp_new_i64();
308 tcg_gen_extu_i32_i64(tmp64
, b
);
309 tcg_temp_free_i32(b
);
310 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
311 tcg_gen_sub_i64(a
, tmp64
, a
);
313 tcg_temp_free_i64(tmp64
);
317 /* FIXME: Most targets have native widening multiplication.
318 It would be good to use that instead of a full wide multiply. */
319 /* 32x32->64 multiply. Marks inputs as dead. */
320 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
322 TCGv_i64 tmp1
= tcg_temp_new_i64();
323 TCGv_i64 tmp2
= tcg_temp_new_i64();
325 tcg_gen_extu_i32_i64(tmp1
, a
);
326 tcg_temp_free_i32(a
);
327 tcg_gen_extu_i32_i64(tmp2
, b
);
328 tcg_temp_free_i32(b
);
329 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
330 tcg_temp_free_i64(tmp2
);
334 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
336 TCGv_i64 tmp1
= tcg_temp_new_i64();
337 TCGv_i64 tmp2
= tcg_temp_new_i64();
339 tcg_gen_ext_i32_i64(tmp1
, a
);
340 tcg_temp_free_i32(a
);
341 tcg_gen_ext_i32_i64(tmp2
, b
);
342 tcg_temp_free_i32(b
);
343 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
344 tcg_temp_free_i64(tmp2
);
348 /* Swap low and high halfwords. */
349 static void gen_swap_half(TCGv var
)
351 TCGv tmp
= tcg_temp_new_i32();
352 tcg_gen_shri_i32(tmp
, var
, 16);
353 tcg_gen_shli_i32(var
, var
, 16);
354 tcg_gen_or_i32(var
, var
, tmp
);
355 tcg_temp_free_i32(tmp
);
358 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
362 t0 = (t0 + t1) ^ tmp;
365 static void gen_add16(TCGv t0
, TCGv t1
)
367 TCGv tmp
= tcg_temp_new_i32();
368 tcg_gen_xor_i32(tmp
, t0
, t1
);
369 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
370 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
371 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
372 tcg_gen_add_i32(t0
, t0
, t1
);
373 tcg_gen_xor_i32(t0
, t0
, tmp
);
374 tcg_temp_free_i32(tmp
);
375 tcg_temp_free_i32(t1
);
378 /* Set CF to the top bit of var. */
379 static void gen_set_CF_bit31(TCGv var
)
381 tcg_gen_shri_i32(cpu_CF
, var
, 31);
384 /* Set N and Z flags from var. */
385 static inline void gen_logic_CC(TCGv var
)
387 tcg_gen_mov_i32(cpu_NF
, var
);
388 tcg_gen_mov_i32(cpu_ZF
, var
);
392 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
398 /* dest = T0 + T1 + CF. */
399 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
401 tcg_gen_add_i32(dest
, t0
, t1
);
402 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
405 /* dest = T0 - T1 + CF - 1. */
406 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
408 tcg_gen_sub_i32(dest
, t0
, t1
);
409 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
410 tcg_gen_subi_i32(dest
, dest
, 1);
413 /* FIXME: Implement this natively. */
414 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
416 static void shifter_out_im(TCGv var
, int shift
)
419 tcg_gen_andi_i32(cpu_CF
, var
, 1);
421 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
423 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
428 /* Shift by immediate. Includes special handling for shift == 0. */
429 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
435 shifter_out_im(var
, 32 - shift
);
436 tcg_gen_shli_i32(var
, var
, shift
);
442 tcg_gen_shri_i32(cpu_CF
, var
, 31);
444 tcg_gen_movi_i32(var
, 0);
447 shifter_out_im(var
, shift
- 1);
448 tcg_gen_shri_i32(var
, var
, shift
);
455 shifter_out_im(var
, shift
- 1);
458 tcg_gen_sari_i32(var
, var
, shift
);
460 case 3: /* ROR/RRX */
463 shifter_out_im(var
, shift
- 1);
464 tcg_gen_rotri_i32(var
, var
, shift
); break;
466 TCGv tmp
= tcg_temp_new_i32();
468 shifter_out_im(var
, 0);
469 tcg_gen_shri_i32(var
, var
, 1);
470 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
471 tcg_gen_or_i32(var
, var
, tmp
);
472 tcg_temp_free_i32(tmp
);
477 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
478 TCGv shift
, int flags
)
482 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
483 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
484 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
485 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
489 case 0: gen_helper_shl(var
, cpu_env
, var
, shift
); break;
490 case 1: gen_helper_shr(var
, cpu_env
, var
, shift
); break;
491 case 2: gen_helper_sar(var
, cpu_env
, var
, shift
); break;
492 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
493 tcg_gen_rotr_i32(var
, var
, shift
); break;
496 tcg_temp_free_i32(shift
);
499 #define PAS_OP(pfx) \
501 case 0: gen_pas_helper(glue(pfx,add16)); break; \
502 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
503 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
504 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
505 case 4: gen_pas_helper(glue(pfx,add8)); break; \
506 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
508 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
513 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
515 tmp
= tcg_temp_new_ptr();
516 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
518 tcg_temp_free_ptr(tmp
);
521 tmp
= tcg_temp_new_ptr();
522 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
524 tcg_temp_free_ptr(tmp
);
526 #undef gen_pas_helper
527 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
540 #undef gen_pas_helper
545 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
546 #define PAS_OP(pfx) \
548 case 0: gen_pas_helper(glue(pfx,add8)); break; \
549 case 1: gen_pas_helper(glue(pfx,add16)); break; \
550 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
551 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
552 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
553 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
555 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
560 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
562 tmp
= tcg_temp_new_ptr();
563 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
565 tcg_temp_free_ptr(tmp
);
568 tmp
= tcg_temp_new_ptr();
569 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
571 tcg_temp_free_ptr(tmp
);
573 #undef gen_pas_helper
574 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
587 #undef gen_pas_helper
592 static void gen_test_cc(int cc
, int label
)
599 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
602 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
605 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
608 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
611 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
614 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
617 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
620 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
622 case 8: /* hi: C && !Z */
623 inv
= gen_new_label();
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
625 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
628 case 9: /* ls: !C || Z */
629 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
630 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
632 case 10: /* ge: N == V -> N ^ V == 0 */
633 tmp
= tcg_temp_new_i32();
634 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
635 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
636 tcg_temp_free_i32(tmp
);
638 case 11: /* lt: N != V -> N ^ V != 0 */
639 tmp
= tcg_temp_new_i32();
640 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
641 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
642 tcg_temp_free_i32(tmp
);
644 case 12: /* gt: !Z && N == V */
645 inv
= gen_new_label();
646 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
647 tmp
= tcg_temp_new_i32();
648 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
649 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
650 tcg_temp_free_i32(tmp
);
653 case 13: /* le: Z || N != V */
654 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
655 tmp
= tcg_temp_new_i32();
656 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
657 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
658 tcg_temp_free_i32(tmp
);
661 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
666 static const uint8_t table_logic_cc
[16] = {
685 /* Set PC and Thumb state from an immediate address. */
686 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
690 s
->is_jmp
= DISAS_UPDATE
;
691 if (s
->thumb
!= (addr
& 1)) {
692 tmp
= tcg_temp_new_i32();
693 tcg_gen_movi_i32(tmp
, addr
& 1);
694 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
695 tcg_temp_free_i32(tmp
);
697 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
700 /* Set PC and Thumb state from var. var is marked as dead. */
701 static inline void gen_bx(DisasContext
*s
, TCGv var
)
703 s
->is_jmp
= DISAS_UPDATE
;
704 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
705 tcg_gen_andi_i32(var
, var
, 1);
706 store_cpu_field(var
, thumb
);
709 /* Variant of store_reg which uses branch&exchange logic when storing
710 to r15 in ARM architecture v7 and above. The source must be a temporary
711 and will be marked as dead. */
712 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
715 if (reg
== 15 && ENABLE_ARCH_7
) {
718 store_reg(s
, reg
, var
);
722 /* Variant of store_reg which uses branch&exchange logic when storing
723 * to r15 in ARM architecture v5T and above. This is used for storing
724 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
725 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
726 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
729 if (reg
== 15 && ENABLE_ARCH_5
) {
732 store_reg(s
, reg
, var
);
736 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
738 TCGv tmp
= tcg_temp_new_i32();
739 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
742 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
744 TCGv tmp
= tcg_temp_new_i32();
745 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
748 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
750 TCGv tmp
= tcg_temp_new_i32();
751 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
754 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
756 TCGv tmp
= tcg_temp_new_i32();
757 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
760 static inline TCGv
gen_ld32(TCGv addr
, int index
)
762 TCGv tmp
= tcg_temp_new_i32();
763 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
766 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
768 TCGv_i64 tmp
= tcg_temp_new_i64();
769 tcg_gen_qemu_ld64(tmp
, addr
, index
);
772 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
774 tcg_gen_qemu_st8(val
, addr
, index
);
775 tcg_temp_free_i32(val
);
777 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
779 tcg_gen_qemu_st16(val
, addr
, index
);
780 tcg_temp_free_i32(val
);
782 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
784 tcg_gen_qemu_st32(val
, addr
, index
);
785 tcg_temp_free_i32(val
);
787 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
789 tcg_gen_qemu_st64(val
, addr
, index
);
790 tcg_temp_free_i64(val
);
793 static inline void gen_set_pc_im(uint32_t val
)
795 tcg_gen_movi_i32(cpu_R
[15], val
);
798 /* Force a TB lookup after an instruction that changes the CPU state. */
799 static inline void gen_lookup_tb(DisasContext
*s
)
801 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
802 s
->is_jmp
= DISAS_UPDATE
;
805 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
808 int val
, rm
, shift
, shiftop
;
811 if (!(insn
& (1 << 25))) {
814 if (!(insn
& (1 << 23)))
817 tcg_gen_addi_i32(var
, var
, val
);
821 shift
= (insn
>> 7) & 0x1f;
822 shiftop
= (insn
>> 5) & 3;
823 offset
= load_reg(s
, rm
);
824 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
825 if (!(insn
& (1 << 23)))
826 tcg_gen_sub_i32(var
, var
, offset
);
828 tcg_gen_add_i32(var
, var
, offset
);
829 tcg_temp_free_i32(offset
);
833 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
839 if (insn
& (1 << 22)) {
841 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
842 if (!(insn
& (1 << 23)))
846 tcg_gen_addi_i32(var
, var
, val
);
850 tcg_gen_addi_i32(var
, var
, extra
);
852 offset
= load_reg(s
, rm
);
853 if (!(insn
& (1 << 23)))
854 tcg_gen_sub_i32(var
, var
, offset
);
856 tcg_gen_add_i32(var
, var
, offset
);
857 tcg_temp_free_i32(offset
);
861 static TCGv_ptr
get_fpstatus_ptr(int neon
)
863 TCGv_ptr statusptr
= tcg_temp_new_ptr();
866 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
868 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
870 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
874 #define VFP_OP2(name) \
875 static inline void gen_vfp_##name(int dp) \
877 TCGv_ptr fpst = get_fpstatus_ptr(0); \
879 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
881 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
883 tcg_temp_free_ptr(fpst); \
893 static inline void gen_vfp_F1_mul(int dp
)
895 /* Like gen_vfp_mul() but put result in F1 */
896 TCGv_ptr fpst
= get_fpstatus_ptr(0);
898 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
900 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
902 tcg_temp_free_ptr(fpst
);
905 static inline void gen_vfp_F1_neg(int dp
)
907 /* Like gen_vfp_neg() but put result in F1 */
909 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
911 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
915 static inline void gen_vfp_abs(int dp
)
918 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
920 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
923 static inline void gen_vfp_neg(int dp
)
926 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
928 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
931 static inline void gen_vfp_sqrt(int dp
)
934 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
936 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
939 static inline void gen_vfp_cmp(int dp
)
942 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
944 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
947 static inline void gen_vfp_cmpe(int dp
)
950 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
952 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
955 static inline void gen_vfp_F1_ld0(int dp
)
958 tcg_gen_movi_i64(cpu_F1d
, 0);
960 tcg_gen_movi_i32(cpu_F1s
, 0);
963 #define VFP_GEN_ITOF(name) \
964 static inline void gen_vfp_##name(int dp, int neon) \
966 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
968 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
970 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
972 tcg_temp_free_ptr(statusptr); \
979 #define VFP_GEN_FTOI(name) \
980 static inline void gen_vfp_##name(int dp, int neon) \
982 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
984 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
986 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
988 tcg_temp_free_ptr(statusptr); \
997 #define VFP_GEN_FIX(name) \
998 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1000 TCGv tmp_shift = tcg_const_i32(shift); \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1007 tcg_temp_free_i32(tmp_shift); \
1008 tcg_temp_free_ptr(statusptr); \
1020 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1023 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1025 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1028 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1037 vfp_reg_offset (int dp
, int reg
)
1040 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1042 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1043 + offsetof(CPU_DoubleU
, l
.upper
);
1045 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1046 + offsetof(CPU_DoubleU
, l
.lower
);
1050 /* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1053 neon_reg_offset (int reg
, int n
)
1057 return vfp_reg_offset(0, sreg
);
1060 static TCGv
neon_load_reg(int reg
, int pass
)
1062 TCGv tmp
= tcg_temp_new_i32();
1063 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1067 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1069 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1070 tcg_temp_free_i32(var
);
1073 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1075 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1078 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1080 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1083 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1084 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1085 #define tcg_gen_st_f32 tcg_gen_st_i32
1086 #define tcg_gen_st_f64 tcg_gen_st_i64
1088 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1091 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1093 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1096 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1107 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 #define ARM_CP_RW_BIT (1 << 20)
1114 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1116 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1119 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1121 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1124 static inline TCGv
iwmmxt_load_creg(int reg
)
1126 TCGv var
= tcg_temp_new_i32();
1127 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1131 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1133 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1134 tcg_temp_free_i32(var
);
1137 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1139 iwmmxt_store_reg(cpu_M0
, rn
);
1142 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1144 iwmmxt_load_reg(cpu_M0
, rn
);
1147 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1149 iwmmxt_load_reg(cpu_V1
, rn
);
1150 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1153 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1155 iwmmxt_load_reg(cpu_V1
, rn
);
1156 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1159 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1161 iwmmxt_load_reg(cpu_V1
, rn
);
1162 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1165 #define IWMMXT_OP(name) \
1166 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1172 #define IWMMXT_OP_ENV(name) \
1173 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 iwmmxt_load_reg(cpu_V1, rn); \
1176 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1179 #define IWMMXT_OP_ENV_SIZE(name) \
1180 IWMMXT_OP_ENV(name##b) \
1181 IWMMXT_OP_ENV(name##w) \
1182 IWMMXT_OP_ENV(name##l)
1184 #define IWMMXT_OP_ENV1(name) \
1185 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1201 IWMMXT_OP_ENV_SIZE(unpackl
)
1202 IWMMXT_OP_ENV_SIZE(unpackh
)
1204 IWMMXT_OP_ENV1(unpacklub
)
1205 IWMMXT_OP_ENV1(unpackluw
)
1206 IWMMXT_OP_ENV1(unpacklul
)
1207 IWMMXT_OP_ENV1(unpackhub
)
1208 IWMMXT_OP_ENV1(unpackhuw
)
1209 IWMMXT_OP_ENV1(unpackhul
)
1210 IWMMXT_OP_ENV1(unpacklsb
)
1211 IWMMXT_OP_ENV1(unpacklsw
)
1212 IWMMXT_OP_ENV1(unpacklsl
)
1213 IWMMXT_OP_ENV1(unpackhsb
)
1214 IWMMXT_OP_ENV1(unpackhsw
)
1215 IWMMXT_OP_ENV1(unpackhsl
)
1217 IWMMXT_OP_ENV_SIZE(cmpeq
)
1218 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1219 IWMMXT_OP_ENV_SIZE(cmpgts
)
1221 IWMMXT_OP_ENV_SIZE(mins
)
1222 IWMMXT_OP_ENV_SIZE(minu
)
1223 IWMMXT_OP_ENV_SIZE(maxs
)
1224 IWMMXT_OP_ENV_SIZE(maxu
)
1226 IWMMXT_OP_ENV_SIZE(subn
)
1227 IWMMXT_OP_ENV_SIZE(addn
)
1228 IWMMXT_OP_ENV_SIZE(subu
)
1229 IWMMXT_OP_ENV_SIZE(addu
)
1230 IWMMXT_OP_ENV_SIZE(subs
)
1231 IWMMXT_OP_ENV_SIZE(adds
)
1233 IWMMXT_OP_ENV(avgb0
)
1234 IWMMXT_OP_ENV(avgb1
)
1235 IWMMXT_OP_ENV(avgw0
)
1236 IWMMXT_OP_ENV(avgw1
)
1240 IWMMXT_OP_ENV(packuw
)
1241 IWMMXT_OP_ENV(packul
)
1242 IWMMXT_OP_ENV(packuq
)
1243 IWMMXT_OP_ENV(packsw
)
1244 IWMMXT_OP_ENV(packsl
)
1245 IWMMXT_OP_ENV(packsq
)
1247 static void gen_op_iwmmxt_set_mup(void)
1250 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1251 tcg_gen_ori_i32(tmp
, tmp
, 2);
1252 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1255 static void gen_op_iwmmxt_set_cup(void)
1258 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1259 tcg_gen_ori_i32(tmp
, tmp
, 1);
1260 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1263 static void gen_op_iwmmxt_setpsr_nz(void)
1265 TCGv tmp
= tcg_temp_new_i32();
1266 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1267 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1270 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1272 iwmmxt_load_reg(cpu_V1
, rn
);
1273 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1274 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1277 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1283 rd
= (insn
>> 16) & 0xf;
1284 tmp
= load_reg(s
, rd
);
1286 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1287 if (insn
& (1 << 24)) {
1289 if (insn
& (1 << 23))
1290 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1292 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1293 tcg_gen_mov_i32(dest
, tmp
);
1294 if (insn
& (1 << 21))
1295 store_reg(s
, rd
, tmp
);
1297 tcg_temp_free_i32(tmp
);
1298 } else if (insn
& (1 << 21)) {
1300 tcg_gen_mov_i32(dest
, tmp
);
1301 if (insn
& (1 << 23))
1302 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1304 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1305 store_reg(s
, rd
, tmp
);
1306 } else if (!(insn
& (1 << 23)))
1311 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1313 int rd
= (insn
>> 0) & 0xf;
1316 if (insn
& (1 << 8)) {
1317 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1320 tmp
= iwmmxt_load_creg(rd
);
1323 tmp
= tcg_temp_new_i32();
1324 iwmmxt_load_reg(cpu_V0
, rd
);
1325 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1327 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1328 tcg_gen_mov_i32(dest
, tmp
);
1329 tcg_temp_free_i32(tmp
);
1333 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1334 (ie. an undefined instruction). */
1335 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1338 int rdhi
, rdlo
, rd0
, rd1
, i
;
1340 TCGv tmp
, tmp2
, tmp3
;
1342 if ((insn
& 0x0e000e00) == 0x0c000000) {
1343 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1345 rdlo
= (insn
>> 12) & 0xf;
1346 rdhi
= (insn
>> 16) & 0xf;
1347 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1348 iwmmxt_load_reg(cpu_V0
, wrd
);
1349 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1350 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1351 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1352 } else { /* TMCRR */
1353 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1354 iwmmxt_store_reg(cpu_V0
, wrd
);
1355 gen_op_iwmmxt_set_mup();
1360 wrd
= (insn
>> 12) & 0xf;
1361 addr
= tcg_temp_new_i32();
1362 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1363 tcg_temp_free_i32(addr
);
1366 if (insn
& ARM_CP_RW_BIT
) {
1367 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1368 tmp
= tcg_temp_new_i32();
1369 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1370 iwmmxt_store_creg(wrd
, tmp
);
1373 if (insn
& (1 << 8)) {
1374 if (insn
& (1 << 22)) { /* WLDRD */
1375 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1377 } else { /* WLDRW wRd */
1378 tmp
= gen_ld32(addr
, IS_USER(s
));
1381 if (insn
& (1 << 22)) { /* WLDRH */
1382 tmp
= gen_ld16u(addr
, IS_USER(s
));
1383 } else { /* WLDRB */
1384 tmp
= gen_ld8u(addr
, IS_USER(s
));
1388 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1389 tcg_temp_free_i32(tmp
);
1391 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1394 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1395 tmp
= iwmmxt_load_creg(wrd
);
1396 gen_st32(tmp
, addr
, IS_USER(s
));
1398 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1399 tmp
= tcg_temp_new_i32();
1400 if (insn
& (1 << 8)) {
1401 if (insn
& (1 << 22)) { /* WSTRD */
1402 tcg_temp_free_i32(tmp
);
1403 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1404 } else { /* WSTRW wRd */
1405 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1406 gen_st32(tmp
, addr
, IS_USER(s
));
1409 if (insn
& (1 << 22)) { /* WSTRH */
1410 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1411 gen_st16(tmp
, addr
, IS_USER(s
));
1412 } else { /* WSTRB */
1413 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1414 gen_st8(tmp
, addr
, IS_USER(s
));
1419 tcg_temp_free_i32(addr
);
1423 if ((insn
& 0x0f000000) != 0x0e000000)
1426 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1427 case 0x000: /* WOR */
1428 wrd
= (insn
>> 12) & 0xf;
1429 rd0
= (insn
>> 0) & 0xf;
1430 rd1
= (insn
>> 16) & 0xf;
1431 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1432 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1433 gen_op_iwmmxt_setpsr_nz();
1434 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1438 case 0x011: /* TMCR */
1441 rd
= (insn
>> 12) & 0xf;
1442 wrd
= (insn
>> 16) & 0xf;
1444 case ARM_IWMMXT_wCID
:
1445 case ARM_IWMMXT_wCASF
:
1447 case ARM_IWMMXT_wCon
:
1448 gen_op_iwmmxt_set_cup();
1450 case ARM_IWMMXT_wCSSF
:
1451 tmp
= iwmmxt_load_creg(wrd
);
1452 tmp2
= load_reg(s
, rd
);
1453 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1454 tcg_temp_free_i32(tmp2
);
1455 iwmmxt_store_creg(wrd
, tmp
);
1457 case ARM_IWMMXT_wCGR0
:
1458 case ARM_IWMMXT_wCGR1
:
1459 case ARM_IWMMXT_wCGR2
:
1460 case ARM_IWMMXT_wCGR3
:
1461 gen_op_iwmmxt_set_cup();
1462 tmp
= load_reg(s
, rd
);
1463 iwmmxt_store_creg(wrd
, tmp
);
1469 case 0x100: /* WXOR */
1470 wrd
= (insn
>> 12) & 0xf;
1471 rd0
= (insn
>> 0) & 0xf;
1472 rd1
= (insn
>> 16) & 0xf;
1473 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1474 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1475 gen_op_iwmmxt_setpsr_nz();
1476 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1477 gen_op_iwmmxt_set_mup();
1478 gen_op_iwmmxt_set_cup();
1480 case 0x111: /* TMRC */
1483 rd
= (insn
>> 12) & 0xf;
1484 wrd
= (insn
>> 16) & 0xf;
1485 tmp
= iwmmxt_load_creg(wrd
);
1486 store_reg(s
, rd
, tmp
);
1488 case 0x300: /* WANDN */
1489 wrd
= (insn
>> 12) & 0xf;
1490 rd0
= (insn
>> 0) & 0xf;
1491 rd1
= (insn
>> 16) & 0xf;
1492 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1493 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1494 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1495 gen_op_iwmmxt_setpsr_nz();
1496 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1497 gen_op_iwmmxt_set_mup();
1498 gen_op_iwmmxt_set_cup();
1500 case 0x200: /* WAND */
1501 wrd
= (insn
>> 12) & 0xf;
1502 rd0
= (insn
>> 0) & 0xf;
1503 rd1
= (insn
>> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1505 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1511 case 0x810: case 0xa10: /* WMADD */
1512 wrd
= (insn
>> 12) & 0xf;
1513 rd0
= (insn
>> 0) & 0xf;
1514 rd1
= (insn
>> 16) & 0xf;
1515 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1516 if (insn
& (1 << 21))
1517 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1519 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1520 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1521 gen_op_iwmmxt_set_mup();
1523 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1524 wrd
= (insn
>> 12) & 0xf;
1525 rd0
= (insn
>> 16) & 0xf;
1526 rd1
= (insn
>> 0) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1528 switch ((insn
>> 22) & 3) {
1530 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1533 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1536 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1541 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1545 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1546 wrd
= (insn
>> 12) & 0xf;
1547 rd0
= (insn
>> 16) & 0xf;
1548 rd1
= (insn
>> 0) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1550 switch ((insn
>> 22) & 3) {
1552 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1555 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1558 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1563 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1567 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1568 wrd
= (insn
>> 12) & 0xf;
1569 rd0
= (insn
>> 16) & 0xf;
1570 rd1
= (insn
>> 0) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1572 if (insn
& (1 << 22))
1573 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1575 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1576 if (!(insn
& (1 << 20)))
1577 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1579 gen_op_iwmmxt_set_mup();
1581 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1582 wrd
= (insn
>> 12) & 0xf;
1583 rd0
= (insn
>> 16) & 0xf;
1584 rd1
= (insn
>> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1586 if (insn
& (1 << 21)) {
1587 if (insn
& (1 << 20))
1588 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1590 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1592 if (insn
& (1 << 20))
1593 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1595 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1598 gen_op_iwmmxt_set_mup();
1600 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1601 wrd
= (insn
>> 12) & 0xf;
1602 rd0
= (insn
>> 16) & 0xf;
1603 rd1
= (insn
>> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1605 if (insn
& (1 << 21))
1606 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1608 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1609 if (!(insn
& (1 << 20))) {
1610 iwmmxt_load_reg(cpu_V1
, wrd
);
1611 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1617 wrd
= (insn
>> 12) & 0xf;
1618 rd0
= (insn
>> 16) & 0xf;
1619 rd1
= (insn
>> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1621 switch ((insn
>> 22) & 3) {
1623 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1626 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1629 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1634 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1638 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1639 wrd
= (insn
>> 12) & 0xf;
1640 rd0
= (insn
>> 16) & 0xf;
1641 rd1
= (insn
>> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1643 if (insn
& (1 << 22)) {
1644 if (insn
& (1 << 20))
1645 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1647 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1649 if (insn
& (1 << 20))
1650 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1652 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1654 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1658 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1659 wrd
= (insn
>> 12) & 0xf;
1660 rd0
= (insn
>> 16) & 0xf;
1661 rd1
= (insn
>> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1663 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1664 tcg_gen_andi_i32(tmp
, tmp
, 7);
1665 iwmmxt_load_reg(cpu_V1
, rd1
);
1666 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1667 tcg_temp_free_i32(tmp
);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1669 gen_op_iwmmxt_set_mup();
1671 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1672 if (((insn
>> 6) & 3) == 3)
1674 rd
= (insn
>> 12) & 0xf;
1675 wrd
= (insn
>> 16) & 0xf;
1676 tmp
= load_reg(s
, rd
);
1677 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1678 switch ((insn
>> 6) & 3) {
1680 tmp2
= tcg_const_i32(0xff);
1681 tmp3
= tcg_const_i32((insn
& 7) << 3);
1684 tmp2
= tcg_const_i32(0xffff);
1685 tmp3
= tcg_const_i32((insn
& 3) << 4);
1688 tmp2
= tcg_const_i32(0xffffffff);
1689 tmp3
= tcg_const_i32((insn
& 1) << 5);
1695 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1696 tcg_temp_free(tmp3
);
1697 tcg_temp_free(tmp2
);
1698 tcg_temp_free_i32(tmp
);
1699 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1700 gen_op_iwmmxt_set_mup();
1702 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1703 rd
= (insn
>> 12) & 0xf;
1704 wrd
= (insn
>> 16) & 0xf;
1705 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1707 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1708 tmp
= tcg_temp_new_i32();
1709 switch ((insn
>> 22) & 3) {
1711 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1712 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1714 tcg_gen_ext8s_i32(tmp
, tmp
);
1716 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1720 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1721 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1723 tcg_gen_ext16s_i32(tmp
, tmp
);
1725 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1729 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1730 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1733 store_reg(s
, rd
, tmp
);
1735 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1736 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1738 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1739 switch ((insn
>> 22) & 3) {
1741 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1744 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1747 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1750 tcg_gen_shli_i32(tmp
, tmp
, 28);
1752 tcg_temp_free_i32(tmp
);
1754 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1755 if (((insn
>> 6) & 3) == 3)
1757 rd
= (insn
>> 12) & 0xf;
1758 wrd
= (insn
>> 16) & 0xf;
1759 tmp
= load_reg(s
, rd
);
1760 switch ((insn
>> 6) & 3) {
1762 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1765 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1768 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1771 tcg_temp_free_i32(tmp
);
1772 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1773 gen_op_iwmmxt_set_mup();
1775 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1776 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1778 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1779 tmp2
= tcg_temp_new_i32();
1780 tcg_gen_mov_i32(tmp2
, tmp
);
1781 switch ((insn
>> 22) & 3) {
1783 for (i
= 0; i
< 7; i
++) {
1784 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1785 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1789 for (i
= 0; i
< 3; i
++) {
1790 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1791 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1795 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1796 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1800 tcg_temp_free_i32(tmp2
);
1801 tcg_temp_free_i32(tmp
);
1803 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1804 wrd
= (insn
>> 12) & 0xf;
1805 rd0
= (insn
>> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1807 switch ((insn
>> 22) & 3) {
1809 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1812 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1815 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1820 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1821 gen_op_iwmmxt_set_mup();
1823 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1824 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1826 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1827 tmp2
= tcg_temp_new_i32();
1828 tcg_gen_mov_i32(tmp2
, tmp
);
1829 switch ((insn
>> 22) & 3) {
1831 for (i
= 0; i
< 7; i
++) {
1832 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1833 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1837 for (i
= 0; i
< 3; i
++) {
1838 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1839 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1843 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1844 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1848 tcg_temp_free_i32(tmp2
);
1849 tcg_temp_free_i32(tmp
);
1851 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1852 rd
= (insn
>> 12) & 0xf;
1853 rd0
= (insn
>> 16) & 0xf;
1854 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1856 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1857 tmp
= tcg_temp_new_i32();
1858 switch ((insn
>> 22) & 3) {
1860 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1863 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1866 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1869 store_reg(s
, rd
, tmp
);
1871 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1872 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873 wrd
= (insn
>> 12) & 0xf;
1874 rd0
= (insn
>> 16) & 0xf;
1875 rd1
= (insn
>> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1877 switch ((insn
>> 22) & 3) {
1879 if (insn
& (1 << 21))
1880 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1882 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1885 if (insn
& (1 << 21))
1886 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1888 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1891 if (insn
& (1 << 21))
1892 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1894 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1899 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1903 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1904 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905 wrd
= (insn
>> 12) & 0xf;
1906 rd0
= (insn
>> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1908 switch ((insn
>> 22) & 3) {
1910 if (insn
& (1 << 21))
1911 gen_op_iwmmxt_unpacklsb_M0();
1913 gen_op_iwmmxt_unpacklub_M0();
1916 if (insn
& (1 << 21))
1917 gen_op_iwmmxt_unpacklsw_M0();
1919 gen_op_iwmmxt_unpackluw_M0();
1922 if (insn
& (1 << 21))
1923 gen_op_iwmmxt_unpacklsl_M0();
1925 gen_op_iwmmxt_unpacklul_M0();
1930 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1934 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1935 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936 wrd
= (insn
>> 12) & 0xf;
1937 rd0
= (insn
>> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1939 switch ((insn
>> 22) & 3) {
1941 if (insn
& (1 << 21))
1942 gen_op_iwmmxt_unpackhsb_M0();
1944 gen_op_iwmmxt_unpackhub_M0();
1947 if (insn
& (1 << 21))
1948 gen_op_iwmmxt_unpackhsw_M0();
1950 gen_op_iwmmxt_unpackhuw_M0();
1953 if (insn
& (1 << 21))
1954 gen_op_iwmmxt_unpackhsl_M0();
1956 gen_op_iwmmxt_unpackhul_M0();
1961 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1965 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1966 case 0x214: case 0x614: case 0xa14: case 0xe14:
1967 if (((insn
>> 22) & 3) == 0)
1969 wrd
= (insn
>> 12) & 0xf;
1970 rd0
= (insn
>> 16) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1972 tmp
= tcg_temp_new_i32();
1973 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1974 tcg_temp_free_i32(tmp
);
1977 switch ((insn
>> 22) & 3) {
1979 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1982 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1985 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
1988 tcg_temp_free_i32(tmp
);
1989 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1993 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1994 case 0x014: case 0x414: case 0x814: case 0xc14:
1995 if (((insn
>> 22) & 3) == 0)
1997 wrd
= (insn
>> 12) & 0xf;
1998 rd0
= (insn
>> 16) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2000 tmp
= tcg_temp_new_i32();
2001 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2002 tcg_temp_free_i32(tmp
);
2005 switch ((insn
>> 22) & 3) {
2007 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2010 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2013 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2016 tcg_temp_free_i32(tmp
);
2017 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2021 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2022 case 0x114: case 0x514: case 0x914: case 0xd14:
2023 if (((insn
>> 22) & 3) == 0)
2025 wrd
= (insn
>> 12) & 0xf;
2026 rd0
= (insn
>> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2028 tmp
= tcg_temp_new_i32();
2029 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2030 tcg_temp_free_i32(tmp
);
2033 switch ((insn
>> 22) & 3) {
2035 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2038 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2041 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2044 tcg_temp_free_i32(tmp
);
2045 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2046 gen_op_iwmmxt_set_mup();
2047 gen_op_iwmmxt_set_cup();
2049 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2050 case 0x314: case 0x714: case 0xb14: case 0xf14:
2051 if (((insn
>> 22) & 3) == 0)
2053 wrd
= (insn
>> 12) & 0xf;
2054 rd0
= (insn
>> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2056 tmp
= tcg_temp_new_i32();
2057 switch ((insn
>> 22) & 3) {
2059 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2060 tcg_temp_free_i32(tmp
);
2063 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2066 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2067 tcg_temp_free_i32(tmp
);
2070 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2073 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2074 tcg_temp_free_i32(tmp
);
2077 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2080 tcg_temp_free_i32(tmp
);
2081 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2085 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2086 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087 wrd
= (insn
>> 12) & 0xf;
2088 rd0
= (insn
>> 16) & 0xf;
2089 rd1
= (insn
>> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2091 switch ((insn
>> 22) & 3) {
2093 if (insn
& (1 << 21))
2094 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2096 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2099 if (insn
& (1 << 21))
2100 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2102 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2105 if (insn
& (1 << 21))
2106 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2108 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2114 gen_op_iwmmxt_set_mup();
2116 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2117 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118 wrd
= (insn
>> 12) & 0xf;
2119 rd0
= (insn
>> 16) & 0xf;
2120 rd1
= (insn
>> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2122 switch ((insn
>> 22) & 3) {
2124 if (insn
& (1 << 21))
2125 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2127 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2130 if (insn
& (1 << 21))
2131 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2133 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2136 if (insn
& (1 << 21))
2137 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2139 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2144 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2145 gen_op_iwmmxt_set_mup();
2147 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2148 case 0x402: case 0x502: case 0x602: case 0x702:
2149 wrd
= (insn
>> 12) & 0xf;
2150 rd0
= (insn
>> 16) & 0xf;
2151 rd1
= (insn
>> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2153 tmp
= tcg_const_i32((insn
>> 20) & 3);
2154 iwmmxt_load_reg(cpu_V1
, rd1
);
2155 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2158 gen_op_iwmmxt_set_mup();
2160 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2161 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164 wrd
= (insn
>> 12) & 0xf;
2165 rd0
= (insn
>> 16) & 0xf;
2166 rd1
= (insn
>> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2168 switch ((insn
>> 20) & 0xf) {
2170 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2173 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2176 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2179 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2182 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2185 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2188 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2191 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2194 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2199 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2200 gen_op_iwmmxt_set_mup();
2201 gen_op_iwmmxt_set_cup();
2203 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2204 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207 wrd
= (insn
>> 12) & 0xf;
2208 rd0
= (insn
>> 16) & 0xf;
2209 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2210 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2211 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2213 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2214 gen_op_iwmmxt_set_mup();
2215 gen_op_iwmmxt_set_cup();
2217 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2218 case 0x418: case 0x518: case 0x618: case 0x718:
2219 case 0x818: case 0x918: case 0xa18: case 0xb18:
2220 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221 wrd
= (insn
>> 12) & 0xf;
2222 rd0
= (insn
>> 16) & 0xf;
2223 rd1
= (insn
>> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2225 switch ((insn
>> 20) & 0xf) {
2227 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2230 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2233 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2236 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2239 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2242 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2245 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2248 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2251 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2256 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2260 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2261 case 0x408: case 0x508: case 0x608: case 0x708:
2262 case 0x808: case 0x908: case 0xa08: case 0xb08:
2263 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2264 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2266 wrd
= (insn
>> 12) & 0xf;
2267 rd0
= (insn
>> 16) & 0xf;
2268 rd1
= (insn
>> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2270 switch ((insn
>> 22) & 3) {
2272 if (insn
& (1 << 21))
2273 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2275 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2278 if (insn
& (1 << 21))
2279 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2281 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2284 if (insn
& (1 << 21))
2285 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2287 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2294 case 0x201: case 0x203: case 0x205: case 0x207:
2295 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296 case 0x211: case 0x213: case 0x215: case 0x217:
2297 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298 wrd
= (insn
>> 5) & 0xf;
2299 rd0
= (insn
>> 12) & 0xf;
2300 rd1
= (insn
>> 0) & 0xf;
2301 if (rd0
== 0xf || rd1
== 0xf)
2303 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2304 tmp
= load_reg(s
, rd0
);
2305 tmp2
= load_reg(s
, rd1
);
2306 switch ((insn
>> 16) & 0xf) {
2307 case 0x0: /* TMIA */
2308 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2310 case 0x8: /* TMIAPH */
2311 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2313 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2314 if (insn
& (1 << 16))
2315 tcg_gen_shri_i32(tmp
, tmp
, 16);
2316 if (insn
& (1 << 17))
2317 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2318 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2321 tcg_temp_free_i32(tmp2
);
2322 tcg_temp_free_i32(tmp
);
2325 tcg_temp_free_i32(tmp2
);
2326 tcg_temp_free_i32(tmp
);
2327 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2328 gen_op_iwmmxt_set_mup();
2337 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2338 (ie. an undefined instruction). */
2339 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2341 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2344 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2345 /* Multiply with Internal Accumulate Format */
2346 rd0
= (insn
>> 12) & 0xf;
2348 acc
= (insn
>> 5) & 7;
2353 tmp
= load_reg(s
, rd0
);
2354 tmp2
= load_reg(s
, rd1
);
2355 switch ((insn
>> 16) & 0xf) {
2357 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2359 case 0x8: /* MIAPH */
2360 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2362 case 0xc: /* MIABB */
2363 case 0xd: /* MIABT */
2364 case 0xe: /* MIATB */
2365 case 0xf: /* MIATT */
2366 if (insn
& (1 << 16))
2367 tcg_gen_shri_i32(tmp
, tmp
, 16);
2368 if (insn
& (1 << 17))
2369 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2370 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2375 tcg_temp_free_i32(tmp2
);
2376 tcg_temp_free_i32(tmp
);
2378 gen_op_iwmmxt_movq_wRn_M0(acc
);
2382 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2383 /* Internal Accumulator Access Format */
2384 rdhi
= (insn
>> 16) & 0xf;
2385 rdlo
= (insn
>> 12) & 0xf;
2391 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2392 iwmmxt_load_reg(cpu_V0
, acc
);
2393 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2394 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2395 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2396 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2398 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2399 iwmmxt_store_reg(cpu_V0
, acc
);
2407 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2408 #define VFP_SREG(insn, bigbit, smallbit) \
2409 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2410 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2411 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2412 reg = (((insn) >> (bigbit)) & 0x0f) \
2413 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2415 if (insn & (1 << (smallbit))) \
2417 reg = ((insn) >> (bigbit)) & 0x0f; \
2420 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2421 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2422 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2423 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2424 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2425 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2427 /* Move between integer and VFP cores. */
2428 static TCGv
gen_vfp_mrs(void)
2430 TCGv tmp
= tcg_temp_new_i32();
2431 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2435 static void gen_vfp_msr(TCGv tmp
)
2437 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2438 tcg_temp_free_i32(tmp
);
2441 static void gen_neon_dup_u8(TCGv var
, int shift
)
2443 TCGv tmp
= tcg_temp_new_i32();
2445 tcg_gen_shri_i32(var
, var
, shift
);
2446 tcg_gen_ext8u_i32(var
, var
);
2447 tcg_gen_shli_i32(tmp
, var
, 8);
2448 tcg_gen_or_i32(var
, var
, tmp
);
2449 tcg_gen_shli_i32(tmp
, var
, 16);
2450 tcg_gen_or_i32(var
, var
, tmp
);
2451 tcg_temp_free_i32(tmp
);
2454 static void gen_neon_dup_low16(TCGv var
)
2456 TCGv tmp
= tcg_temp_new_i32();
2457 tcg_gen_ext16u_i32(var
, var
);
2458 tcg_gen_shli_i32(tmp
, var
, 16);
2459 tcg_gen_or_i32(var
, var
, tmp
);
2460 tcg_temp_free_i32(tmp
);
2463 static void gen_neon_dup_high16(TCGv var
)
2465 TCGv tmp
= tcg_temp_new_i32();
2466 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2467 tcg_gen_shri_i32(tmp
, var
, 16);
2468 tcg_gen_or_i32(var
, var
, tmp
);
2469 tcg_temp_free_i32(tmp
);
2472 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2474 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2478 tmp
= gen_ld8u(addr
, IS_USER(s
));
2479 gen_neon_dup_u8(tmp
, 0);
2482 tmp
= gen_ld16u(addr
, IS_USER(s
));
2483 gen_neon_dup_low16(tmp
);
2486 tmp
= gen_ld32(addr
, IS_USER(s
));
2488 default: /* Avoid compiler warnings. */
2494 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2495 (ie. an undefined instruction). */
2496 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2498 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2504 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2507 if (!s
->vfp_enabled
) {
2508 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2509 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2511 rn
= (insn
>> 16) & 0xf;
2512 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2513 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2516 dp
= ((insn
& 0xf00) == 0xb00);
2517 switch ((insn
>> 24) & 0xf) {
2519 if (insn
& (1 << 4)) {
2520 /* single register transfer */
2521 rd
= (insn
>> 12) & 0xf;
2526 VFP_DREG_N(rn
, insn
);
2529 if (insn
& 0x00c00060
2530 && !arm_feature(env
, ARM_FEATURE_NEON
))
2533 pass
= (insn
>> 21) & 1;
2534 if (insn
& (1 << 22)) {
2536 offset
= ((insn
>> 5) & 3) * 8;
2537 } else if (insn
& (1 << 5)) {
2539 offset
= (insn
& (1 << 6)) ? 16 : 0;
2544 if (insn
& ARM_CP_RW_BIT
) {
2546 tmp
= neon_load_reg(rn
, pass
);
2550 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2551 if (insn
& (1 << 23))
2557 if (insn
& (1 << 23)) {
2559 tcg_gen_shri_i32(tmp
, tmp
, 16);
2565 tcg_gen_sari_i32(tmp
, tmp
, 16);
2574 store_reg(s
, rd
, tmp
);
2577 tmp
= load_reg(s
, rd
);
2578 if (insn
& (1 << 23)) {
2581 gen_neon_dup_u8(tmp
, 0);
2582 } else if (size
== 1) {
2583 gen_neon_dup_low16(tmp
);
2585 for (n
= 0; n
<= pass
* 2; n
++) {
2586 tmp2
= tcg_temp_new_i32();
2587 tcg_gen_mov_i32(tmp2
, tmp
);
2588 neon_store_reg(rn
, n
, tmp2
);
2590 neon_store_reg(rn
, n
, tmp
);
2595 tmp2
= neon_load_reg(rn
, pass
);
2596 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2597 tcg_temp_free_i32(tmp2
);
2600 tmp2
= neon_load_reg(rn
, pass
);
2601 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2602 tcg_temp_free_i32(tmp2
);
2607 neon_store_reg(rn
, pass
, tmp
);
2611 if ((insn
& 0x6f) != 0x00)
2613 rn
= VFP_SREG_N(insn
);
2614 if (insn
& ARM_CP_RW_BIT
) {
2616 if (insn
& (1 << 21)) {
2617 /* system register */
2622 /* VFP2 allows access to FSID from userspace.
2623 VFP3 restricts all id registers to privileged
2626 && arm_feature(env
, ARM_FEATURE_VFP3
))
2628 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2633 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2635 case ARM_VFP_FPINST
:
2636 case ARM_VFP_FPINST2
:
2637 /* Not present in VFP3. */
2639 || arm_feature(env
, ARM_FEATURE_VFP3
))
2641 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2645 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2646 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2648 tmp
= tcg_temp_new_i32();
2649 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2655 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2657 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2663 gen_mov_F0_vreg(0, rn
);
2664 tmp
= gen_vfp_mrs();
2667 /* Set the 4 flag bits in the CPSR. */
2669 tcg_temp_free_i32(tmp
);
2671 store_reg(s
, rd
, tmp
);
2675 tmp
= load_reg(s
, rd
);
2676 if (insn
& (1 << 21)) {
2678 /* system register */
2683 /* Writes are ignored. */
2686 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2687 tcg_temp_free_i32(tmp
);
2693 /* TODO: VFP subarchitecture support.
2694 * For now, keep the EN bit only */
2695 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2696 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2699 case ARM_VFP_FPINST
:
2700 case ARM_VFP_FPINST2
:
2701 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2708 gen_mov_vreg_F0(0, rn
);
2713 /* data processing */
2714 /* The opcode is in bits 23, 21, 20 and 6. */
2715 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2719 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2721 /* rn is register number */
2722 VFP_DREG_N(rn
, insn
);
2725 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2726 /* Integer or single precision destination. */
2727 rd
= VFP_SREG_D(insn
);
2729 VFP_DREG_D(rd
, insn
);
2732 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2733 /* VCVT from int is always from S reg regardless of dp bit.
2734 * VCVT with immediate frac_bits has same format as SREG_M
2736 rm
= VFP_SREG_M(insn
);
2738 VFP_DREG_M(rm
, insn
);
2741 rn
= VFP_SREG_N(insn
);
2742 if (op
== 15 && rn
== 15) {
2743 /* Double precision destination. */
2744 VFP_DREG_D(rd
, insn
);
2746 rd
= VFP_SREG_D(insn
);
2748 /* NB that we implicitly rely on the encoding for the frac_bits
2749 * in VCVT of fixed to float being the same as that of an SREG_M
2751 rm
= VFP_SREG_M(insn
);
2754 veclen
= s
->vec_len
;
2755 if (op
== 15 && rn
> 3)
2758 /* Shut up compiler warnings. */
2769 /* Figure out what type of vector operation this is. */
2770 if ((rd
& bank_mask
) == 0) {
2775 delta_d
= (s
->vec_stride
>> 1) + 1;
2777 delta_d
= s
->vec_stride
+ 1;
2779 if ((rm
& bank_mask
) == 0) {
2780 /* mixed scalar/vector */
2789 /* Load the initial operands. */
2794 /* Integer source */
2795 gen_mov_F0_vreg(0, rm
);
2800 gen_mov_F0_vreg(dp
, rd
);
2801 gen_mov_F1_vreg(dp
, rm
);
2805 /* Compare with zero */
2806 gen_mov_F0_vreg(dp
, rd
);
2817 /* Source and destination the same. */
2818 gen_mov_F0_vreg(dp
, rd
);
2824 /* VCVTB, VCVTT: only present with the halfprec extension,
2825 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2827 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2830 /* Otherwise fall through */
2832 /* One source operand. */
2833 gen_mov_F0_vreg(dp
, rm
);
2837 /* Two source operands. */
2838 gen_mov_F0_vreg(dp
, rn
);
2839 gen_mov_F1_vreg(dp
, rm
);
2843 /* Perform the calculation. */
2845 case 0: /* VMLA: fd + (fn * fm) */
2846 /* Note that order of inputs to the add matters for NaNs */
2848 gen_mov_F0_vreg(dp
, rd
);
2851 case 1: /* VMLS: fd + -(fn * fm) */
2854 gen_mov_F0_vreg(dp
, rd
);
2857 case 2: /* VNMLS: -fd + (fn * fm) */
2858 /* Note that it isn't valid to replace (-A + B) with (B - A)
2859 * or similar plausible looking simplifications
2860 * because this will give wrong results for NaNs.
2863 gen_mov_F0_vreg(dp
, rd
);
2867 case 3: /* VNMLA: -fd + -(fn * fm) */
2870 gen_mov_F0_vreg(dp
, rd
);
2874 case 4: /* mul: fn * fm */
2877 case 5: /* nmul: -(fn * fm) */
2881 case 6: /* add: fn + fm */
2884 case 7: /* sub: fn - fm */
2887 case 8: /* div: fn / fm */
2890 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2891 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2892 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2893 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2894 /* These are fused multiply-add, and must be done as one
2895 * floating point operation with no rounding between the
2896 * multiplication and addition steps.
2897 * NB that doing the negations here as separate steps is
2898 * correct : an input NaN should come out with its sign bit
2899 * flipped if it is a negated-input.
2901 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
2909 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
2911 frd
= tcg_temp_new_i64();
2912 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2915 gen_helper_vfp_negd(frd
, frd
);
2917 fpst
= get_fpstatus_ptr(0);
2918 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
2919 cpu_F1d
, frd
, fpst
);
2920 tcg_temp_free_ptr(fpst
);
2921 tcg_temp_free_i64(frd
);
2927 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
2929 frd
= tcg_temp_new_i32();
2930 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2932 gen_helper_vfp_negs(frd
, frd
);
2934 fpst
= get_fpstatus_ptr(0);
2935 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
2936 cpu_F1s
, frd
, fpst
);
2937 tcg_temp_free_ptr(fpst
);
2938 tcg_temp_free_i32(frd
);
2941 case 14: /* fconst */
2942 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2945 n
= (insn
<< 12) & 0x80000000;
2946 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2953 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
2960 tcg_gen_movi_i32(cpu_F0s
, n
);
2963 case 15: /* extension space */
2977 case 4: /* vcvtb.f32.f16 */
2978 tmp
= gen_vfp_mrs();
2979 tcg_gen_ext16u_i32(tmp
, tmp
);
2980 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
2981 tcg_temp_free_i32(tmp
);
2983 case 5: /* vcvtt.f32.f16 */
2984 tmp
= gen_vfp_mrs();
2985 tcg_gen_shri_i32(tmp
, tmp
, 16);
2986 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
2987 tcg_temp_free_i32(tmp
);
2989 case 6: /* vcvtb.f16.f32 */
2990 tmp
= tcg_temp_new_i32();
2991 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
2992 gen_mov_F0_vreg(0, rd
);
2993 tmp2
= gen_vfp_mrs();
2994 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
2995 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2996 tcg_temp_free_i32(tmp2
);
2999 case 7: /* vcvtt.f16.f32 */
3000 tmp
= tcg_temp_new_i32();
3001 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3002 tcg_gen_shli_i32(tmp
, tmp
, 16);
3003 gen_mov_F0_vreg(0, rd
);
3004 tmp2
= gen_vfp_mrs();
3005 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3006 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3007 tcg_temp_free_i32(tmp2
);
3019 case 11: /* cmpez */
3023 case 15: /* single<->double conversion */
3025 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3027 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3029 case 16: /* fuito */
3030 gen_vfp_uito(dp
, 0);
3032 case 17: /* fsito */
3033 gen_vfp_sito(dp
, 0);
3035 case 20: /* fshto */
3036 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3038 gen_vfp_shto(dp
, 16 - rm
, 0);
3040 case 21: /* fslto */
3041 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3043 gen_vfp_slto(dp
, 32 - rm
, 0);
3045 case 22: /* fuhto */
3046 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3048 gen_vfp_uhto(dp
, 16 - rm
, 0);
3050 case 23: /* fulto */
3051 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3053 gen_vfp_ulto(dp
, 32 - rm
, 0);
3055 case 24: /* ftoui */
3056 gen_vfp_toui(dp
, 0);
3058 case 25: /* ftouiz */
3059 gen_vfp_touiz(dp
, 0);
3061 case 26: /* ftosi */
3062 gen_vfp_tosi(dp
, 0);
3064 case 27: /* ftosiz */
3065 gen_vfp_tosiz(dp
, 0);
3067 case 28: /* ftosh */
3068 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3070 gen_vfp_tosh(dp
, 16 - rm
, 0);
3072 case 29: /* ftosl */
3073 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3075 gen_vfp_tosl(dp
, 32 - rm
, 0);
3077 case 30: /* ftouh */
3078 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3080 gen_vfp_touh(dp
, 16 - rm
, 0);
3082 case 31: /* ftoul */
3083 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3085 gen_vfp_toul(dp
, 32 - rm
, 0);
3087 default: /* undefined */
3091 default: /* undefined */
3095 /* Write back the result. */
3096 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3097 ; /* Comparison, do nothing. */
3098 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3099 /* VCVT double to int: always integer result. */
3100 gen_mov_vreg_F0(0, rd
);
3101 else if (op
== 15 && rn
== 15)
3103 gen_mov_vreg_F0(!dp
, rd
);
3105 gen_mov_vreg_F0(dp
, rd
);
3107 /* break out of the loop if we have finished */
3111 if (op
== 15 && delta_m
== 0) {
3112 /* single source one-many */
3114 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3116 gen_mov_vreg_F0(dp
, rd
);
3120 /* Setup the next operands. */
3122 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3126 /* One source operand. */
3127 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3129 gen_mov_F0_vreg(dp
, rm
);
3131 /* Two source operands. */
3132 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3134 gen_mov_F0_vreg(dp
, rn
);
3136 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3138 gen_mov_F1_vreg(dp
, rm
);
3146 if ((insn
& 0x03e00000) == 0x00400000) {
3147 /* two-register transfer */
3148 rn
= (insn
>> 16) & 0xf;
3149 rd
= (insn
>> 12) & 0xf;
3151 VFP_DREG_M(rm
, insn
);
3153 rm
= VFP_SREG_M(insn
);
3156 if (insn
& ARM_CP_RW_BIT
) {
3159 gen_mov_F0_vreg(0, rm
* 2);
3160 tmp
= gen_vfp_mrs();
3161 store_reg(s
, rd
, tmp
);
3162 gen_mov_F0_vreg(0, rm
* 2 + 1);
3163 tmp
= gen_vfp_mrs();
3164 store_reg(s
, rn
, tmp
);
3166 gen_mov_F0_vreg(0, rm
);
3167 tmp
= gen_vfp_mrs();
3168 store_reg(s
, rd
, tmp
);
3169 gen_mov_F0_vreg(0, rm
+ 1);
3170 tmp
= gen_vfp_mrs();
3171 store_reg(s
, rn
, tmp
);
3176 tmp
= load_reg(s
, rd
);
3178 gen_mov_vreg_F0(0, rm
* 2);
3179 tmp
= load_reg(s
, rn
);
3181 gen_mov_vreg_F0(0, rm
* 2 + 1);
3183 tmp
= load_reg(s
, rd
);
3185 gen_mov_vreg_F0(0, rm
);
3186 tmp
= load_reg(s
, rn
);
3188 gen_mov_vreg_F0(0, rm
+ 1);
3193 rn
= (insn
>> 16) & 0xf;
3195 VFP_DREG_D(rd
, insn
);
3197 rd
= VFP_SREG_D(insn
);
3198 if ((insn
& 0x01200000) == 0x01000000) {
3199 /* Single load/store */
3200 offset
= (insn
& 0xff) << 2;
3201 if ((insn
& (1 << 23)) == 0)
3203 if (s
->thumb
&& rn
== 15) {
3204 /* This is actually UNPREDICTABLE */
3205 addr
= tcg_temp_new_i32();
3206 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3208 addr
= load_reg(s
, rn
);
3210 tcg_gen_addi_i32(addr
, addr
, offset
);
3211 if (insn
& (1 << 20)) {
3212 gen_vfp_ld(s
, dp
, addr
);
3213 gen_mov_vreg_F0(dp
, rd
);
3215 gen_mov_F0_vreg(dp
, rd
);
3216 gen_vfp_st(s
, dp
, addr
);
3218 tcg_temp_free_i32(addr
);
3220 /* load/store multiple */
3221 int w
= insn
& (1 << 21);
3223 n
= (insn
>> 1) & 0x7f;
3227 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3228 /* P == U , W == 1 => UNDEF */
3231 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3232 /* UNPREDICTABLE cases for bad immediates: we choose to
3233 * UNDEF to avoid generating huge numbers of TCG ops
3237 if (rn
== 15 && w
) {
3238 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3242 if (s
->thumb
&& rn
== 15) {
3243 /* This is actually UNPREDICTABLE */
3244 addr
= tcg_temp_new_i32();
3245 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3247 addr
= load_reg(s
, rn
);
3249 if (insn
& (1 << 24)) /* pre-decrement */
3250 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3256 for (i
= 0; i
< n
; i
++) {
3257 if (insn
& ARM_CP_RW_BIT
) {
3259 gen_vfp_ld(s
, dp
, addr
);
3260 gen_mov_vreg_F0(dp
, rd
+ i
);
3263 gen_mov_F0_vreg(dp
, rd
+ i
);
3264 gen_vfp_st(s
, dp
, addr
);
3266 tcg_gen_addi_i32(addr
, addr
, offset
);
3270 if (insn
& (1 << 24))
3271 offset
= -offset
* n
;
3272 else if (dp
&& (insn
& 1))
3278 tcg_gen_addi_i32(addr
, addr
, offset
);
3279 store_reg(s
, rn
, addr
);
3281 tcg_temp_free_i32(addr
);
3287 /* Should never happen. */
3293 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3295 TranslationBlock
*tb
;
3298 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3300 gen_set_pc_im(dest
);
3301 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3303 gen_set_pc_im(dest
);
3308 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3310 if (unlikely(s
->singlestep_enabled
)) {
3311 /* An indirect jump so that we still trigger the debug exception. */
3316 gen_goto_tb(s
, 0, dest
);
3317 s
->is_jmp
= DISAS_TB_JUMP
;
3321 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3324 tcg_gen_sari_i32(t0
, t0
, 16);
3328 tcg_gen_sari_i32(t1
, t1
, 16);
3331 tcg_gen_mul_i32(t0
, t0
, t1
);
3334 /* Return the mask of PSR bits set by a MSR instruction. */
3335 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3339 if (flags
& (1 << 0))
3341 if (flags
& (1 << 1))
3343 if (flags
& (1 << 2))
3345 if (flags
& (1 << 3))
3348 /* Mask out undefined bits. */
3349 mask
&= ~CPSR_RESERVED
;
3350 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3352 if (!arm_feature(env
, ARM_FEATURE_V5
))
3353 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3354 if (!arm_feature(env
, ARM_FEATURE_V6
))
3355 mask
&= ~(CPSR_E
| CPSR_GE
);
3356 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3358 /* Mask out execution state bits. */
3361 /* Mask out privileged bits. */
3367 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3368 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3372 /* ??? This is also undefined in system mode. */
3376 tmp
= load_cpu_field(spsr
);
3377 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3378 tcg_gen_andi_i32(t0
, t0
, mask
);
3379 tcg_gen_or_i32(tmp
, tmp
, t0
);
3380 store_cpu_field(tmp
, spsr
);
3382 gen_set_cpsr(t0
, mask
);
3384 tcg_temp_free_i32(t0
);
3389 /* Returns nonzero if access to the PSR is not permitted. */
3390 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3393 tmp
= tcg_temp_new_i32();
3394 tcg_gen_movi_i32(tmp
, val
);
3395 return gen_set_psr(s
, mask
, spsr
, tmp
);
3398 /* Generate an old-style exception return. Marks pc as dead. */
3399 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3402 store_reg(s
, 15, pc
);
3403 tmp
= load_cpu_field(spsr
);
3404 gen_set_cpsr(tmp
, 0xffffffff);
3405 tcg_temp_free_i32(tmp
);
3406 s
->is_jmp
= DISAS_UPDATE
;
3409 /* Generate a v6 exception return. Marks both values as dead. */
3410 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3412 gen_set_cpsr(cpsr
, 0xffffffff);
3413 tcg_temp_free_i32(cpsr
);
3414 store_reg(s
, 15, pc
);
3415 s
->is_jmp
= DISAS_UPDATE
;
3419 gen_set_condexec (DisasContext
*s
)
3421 if (s
->condexec_mask
) {
3422 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3423 TCGv tmp
= tcg_temp_new_i32();
3424 tcg_gen_movi_i32(tmp
, val
);
3425 store_cpu_field(tmp
, condexec_bits
);
3429 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3431 gen_set_condexec(s
);
3432 gen_set_pc_im(s
->pc
- offset
);
3433 gen_exception(excp
);
3434 s
->is_jmp
= DISAS_JUMP
;
3437 static void gen_nop_hint(DisasContext
*s
, int val
)
3441 gen_set_pc_im(s
->pc
);
3442 s
->is_jmp
= DISAS_WFI
;
3446 /* TODO: Implement SEV and WFE. May help SMP performance. */
3452 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3454 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3457 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3458 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3459 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3464 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3467 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3468 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3469 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3474 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3475 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3476 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3477 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3478 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3480 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3481 switch ((size << 1) | u) { \
3483 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3486 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3489 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3492 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3495 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3498 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3500 default: return 1; \
3503 #define GEN_NEON_INTEGER_OP(name) do { \
3504 switch ((size << 1) | u) { \
3506 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3509 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3512 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3515 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3518 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3521 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3523 default: return 1; \
3526 static TCGv
neon_load_scratch(int scratch
)
3528 TCGv tmp
= tcg_temp_new_i32();
3529 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3533 static void neon_store_scratch(int scratch
, TCGv var
)
3535 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3536 tcg_temp_free_i32(var
);
3539 static inline TCGv
neon_get_scalar(int size
, int reg
)
3543 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3545 gen_neon_dup_high16(tmp
);
3547 gen_neon_dup_low16(tmp
);
3550 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3555 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3558 if (!q
&& size
== 2) {
3561 tmp
= tcg_const_i32(rd
);
3562 tmp2
= tcg_const_i32(rm
);
3566 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3569 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3572 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3580 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3583 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3589 tcg_temp_free_i32(tmp
);
3590 tcg_temp_free_i32(tmp2
);
3594 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3597 if (!q
&& size
== 2) {
3600 tmp
= tcg_const_i32(rd
);
3601 tmp2
= tcg_const_i32(rm
);
3605 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3608 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3611 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3619 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3622 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3628 tcg_temp_free_i32(tmp
);
3629 tcg_temp_free_i32(tmp2
);
3633 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3637 rd
= tcg_temp_new_i32();
3638 tmp
= tcg_temp_new_i32();
3640 tcg_gen_shli_i32(rd
, t0
, 8);
3641 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3642 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3643 tcg_gen_or_i32(rd
, rd
, tmp
);
3645 tcg_gen_shri_i32(t1
, t1
, 8);
3646 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3647 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3648 tcg_gen_or_i32(t1
, t1
, tmp
);
3649 tcg_gen_mov_i32(t0
, rd
);
3651 tcg_temp_free_i32(tmp
);
3652 tcg_temp_free_i32(rd
);
3655 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3659 rd
= tcg_temp_new_i32();
3660 tmp
= tcg_temp_new_i32();
3662 tcg_gen_shli_i32(rd
, t0
, 16);
3663 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3664 tcg_gen_or_i32(rd
, rd
, tmp
);
3665 tcg_gen_shri_i32(t1
, t1
, 16);
3666 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3667 tcg_gen_or_i32(t1
, t1
, tmp
);
3668 tcg_gen_mov_i32(t0
, rd
);
3670 tcg_temp_free_i32(tmp
);
3671 tcg_temp_free_i32(rd
);
3679 } neon_ls_element_type
[11] = {
3693 /* Translate a NEON load/store element instruction. Return nonzero if the
3694 instruction is invalid. */
3695 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3714 if (!s
->vfp_enabled
)
3716 VFP_DREG_D(rd
, insn
);
3717 rn
= (insn
>> 16) & 0xf;
3719 load
= (insn
& (1 << 21)) != 0;
3720 if ((insn
& (1 << 23)) == 0) {
3721 /* Load store all elements. */
3722 op
= (insn
>> 8) & 0xf;
3723 size
= (insn
>> 6) & 3;
3726 /* Catch UNDEF cases for bad values of align field */
3729 if (((insn
>> 5) & 1) == 1) {
3734 if (((insn
>> 4) & 3) == 3) {
3741 nregs
= neon_ls_element_type
[op
].nregs
;
3742 interleave
= neon_ls_element_type
[op
].interleave
;
3743 spacing
= neon_ls_element_type
[op
].spacing
;
3744 if (size
== 3 && (interleave
| spacing
) != 1)
3746 addr
= tcg_temp_new_i32();
3747 load_reg_var(s
, addr
, rn
);
3748 stride
= (1 << size
) * interleave
;
3749 for (reg
= 0; reg
< nregs
; reg
++) {
3750 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3751 load_reg_var(s
, addr
, rn
);
3752 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3753 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3754 load_reg_var(s
, addr
, rn
);
3755 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3759 tmp64
= gen_ld64(addr
, IS_USER(s
));
3760 neon_store_reg64(tmp64
, rd
);
3761 tcg_temp_free_i64(tmp64
);
3763 tmp64
= tcg_temp_new_i64();
3764 neon_load_reg64(tmp64
, rd
);
3765 gen_st64(tmp64
, addr
, IS_USER(s
));
3767 tcg_gen_addi_i32(addr
, addr
, stride
);
3769 for (pass
= 0; pass
< 2; pass
++) {
3772 tmp
= gen_ld32(addr
, IS_USER(s
));
3773 neon_store_reg(rd
, pass
, tmp
);
3775 tmp
= neon_load_reg(rd
, pass
);
3776 gen_st32(tmp
, addr
, IS_USER(s
));
3778 tcg_gen_addi_i32(addr
, addr
, stride
);
3779 } else if (size
== 1) {
3781 tmp
= gen_ld16u(addr
, IS_USER(s
));
3782 tcg_gen_addi_i32(addr
, addr
, stride
);
3783 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3784 tcg_gen_addi_i32(addr
, addr
, stride
);
3785 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3786 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3787 tcg_temp_free_i32(tmp2
);
3788 neon_store_reg(rd
, pass
, tmp
);
3790 tmp
= neon_load_reg(rd
, pass
);
3791 tmp2
= tcg_temp_new_i32();
3792 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3793 gen_st16(tmp
, addr
, IS_USER(s
));
3794 tcg_gen_addi_i32(addr
, addr
, stride
);
3795 gen_st16(tmp2
, addr
, IS_USER(s
));
3796 tcg_gen_addi_i32(addr
, addr
, stride
);
3798 } else /* size == 0 */ {
3801 for (n
= 0; n
< 4; n
++) {
3802 tmp
= gen_ld8u(addr
, IS_USER(s
));
3803 tcg_gen_addi_i32(addr
, addr
, stride
);
3807 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3808 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3809 tcg_temp_free_i32(tmp
);
3812 neon_store_reg(rd
, pass
, tmp2
);
3814 tmp2
= neon_load_reg(rd
, pass
);
3815 for (n
= 0; n
< 4; n
++) {
3816 tmp
= tcg_temp_new_i32();
3818 tcg_gen_mov_i32(tmp
, tmp2
);
3820 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3822 gen_st8(tmp
, addr
, IS_USER(s
));
3823 tcg_gen_addi_i32(addr
, addr
, stride
);
3825 tcg_temp_free_i32(tmp2
);
3832 tcg_temp_free_i32(addr
);
3835 size
= (insn
>> 10) & 3;
3837 /* Load single element to all lanes. */
3838 int a
= (insn
>> 4) & 1;
3842 size
= (insn
>> 6) & 3;
3843 nregs
= ((insn
>> 8) & 3) + 1;
3846 if (nregs
!= 4 || a
== 0) {
3849 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3852 if (nregs
== 1 && a
== 1 && size
== 0) {
3855 if (nregs
== 3 && a
== 1) {
3858 addr
= tcg_temp_new_i32();
3859 load_reg_var(s
, addr
, rn
);
3861 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3862 tmp
= gen_load_and_replicate(s
, addr
, size
);
3863 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3864 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3865 if (insn
& (1 << 5)) {
3866 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3867 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3869 tcg_temp_free_i32(tmp
);
3871 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3872 stride
= (insn
& (1 << 5)) ? 2 : 1;
3873 for (reg
= 0; reg
< nregs
; reg
++) {
3874 tmp
= gen_load_and_replicate(s
, addr
, size
);
3875 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3876 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3877 tcg_temp_free_i32(tmp
);
3878 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3882 tcg_temp_free_i32(addr
);
3883 stride
= (1 << size
) * nregs
;
3885 /* Single element. */
3886 int idx
= (insn
>> 4) & 0xf;
3887 pass
= (insn
>> 7) & 1;
3890 shift
= ((insn
>> 5) & 3) * 8;
3894 shift
= ((insn
>> 6) & 1) * 16;
3895 stride
= (insn
& (1 << 5)) ? 2 : 1;
3899 stride
= (insn
& (1 << 6)) ? 2 : 1;
3904 nregs
= ((insn
>> 8) & 3) + 1;
3905 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3908 if (((idx
& (1 << size
)) != 0) ||
3909 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3914 if ((idx
& 1) != 0) {
3919 if (size
== 2 && (idx
& 2) != 0) {
3924 if ((size
== 2) && ((idx
& 3) == 3)) {
3931 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3932 /* Attempts to write off the end of the register file
3933 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3934 * the neon_load_reg() would write off the end of the array.
3938 addr
= tcg_temp_new_i32();
3939 load_reg_var(s
, addr
, rn
);
3940 for (reg
= 0; reg
< nregs
; reg
++) {
3944 tmp
= gen_ld8u(addr
, IS_USER(s
));
3947 tmp
= gen_ld16u(addr
, IS_USER(s
));
3950 tmp
= gen_ld32(addr
, IS_USER(s
));
3952 default: /* Avoid compiler warnings. */
3956 tmp2
= neon_load_reg(rd
, pass
);
3957 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3958 tcg_temp_free_i32(tmp2
);
3960 neon_store_reg(rd
, pass
, tmp
);
3961 } else { /* Store */
3962 tmp
= neon_load_reg(rd
, pass
);
3964 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3967 gen_st8(tmp
, addr
, IS_USER(s
));
3970 gen_st16(tmp
, addr
, IS_USER(s
));
3973 gen_st32(tmp
, addr
, IS_USER(s
));
3978 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3980 tcg_temp_free_i32(addr
);
3981 stride
= nregs
* (1 << size
);
3987 base
= load_reg(s
, rn
);
3989 tcg_gen_addi_i32(base
, base
, stride
);
3992 index
= load_reg(s
, rm
);
3993 tcg_gen_add_i32(base
, base
, index
);
3994 tcg_temp_free_i32(index
);
3996 store_reg(s
, rn
, base
);
4001 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4002 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4004 tcg_gen_and_i32(t
, t
, c
);
4005 tcg_gen_andc_i32(f
, f
, c
);
4006 tcg_gen_or_i32(dest
, t
, f
);
4009 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4012 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4013 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4014 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4019 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4022 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4023 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4024 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4029 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4032 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4033 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4034 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4039 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4042 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4043 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4044 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4049 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4055 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4056 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4061 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4062 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4069 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4070 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4075 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4076 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4083 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4087 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4088 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4089 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4094 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4095 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4096 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4100 tcg_temp_free_i32(src
);
4103 static inline void gen_neon_addl(int size
)
4106 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4107 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4108 case 2: tcg_gen_add_i64(CPU_V001
); break;
4113 static inline void gen_neon_subl(int size
)
4116 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4117 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4118 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4123 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4126 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4127 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4128 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4133 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4136 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4137 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4142 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4146 switch ((size
<< 1) | u
) {
4147 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4148 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4149 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4150 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4152 tmp
= gen_muls_i64_i32(a
, b
);
4153 tcg_gen_mov_i64(dest
, tmp
);
4154 tcg_temp_free_i64(tmp
);
4157 tmp
= gen_mulu_i64_i32(a
, b
);
4158 tcg_gen_mov_i64(dest
, tmp
);
4159 tcg_temp_free_i64(tmp
);
4164 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4165 Don't forget to clean them now. */
4167 tcg_temp_free_i32(a
);
4168 tcg_temp_free_i32(b
);
4172 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4176 gen_neon_unarrow_sats(size
, dest
, src
);
4178 gen_neon_narrow(size
, dest
, src
);
4182 gen_neon_narrow_satu(size
, dest
, src
);
4184 gen_neon_narrow_sats(size
, dest
, src
);
4189 /* Symbolic constants for op fields for Neon 3-register same-length.
4190 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4193 #define NEON_3R_VHADD 0
4194 #define NEON_3R_VQADD 1
4195 #define NEON_3R_VRHADD 2
4196 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4197 #define NEON_3R_VHSUB 4
4198 #define NEON_3R_VQSUB 5
4199 #define NEON_3R_VCGT 6
4200 #define NEON_3R_VCGE 7
4201 #define NEON_3R_VSHL 8
4202 #define NEON_3R_VQSHL 9
4203 #define NEON_3R_VRSHL 10
4204 #define NEON_3R_VQRSHL 11
4205 #define NEON_3R_VMAX 12
4206 #define NEON_3R_VMIN 13
4207 #define NEON_3R_VABD 14
4208 #define NEON_3R_VABA 15
4209 #define NEON_3R_VADD_VSUB 16
4210 #define NEON_3R_VTST_VCEQ 17
4211 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4212 #define NEON_3R_VMUL 19
4213 #define NEON_3R_VPMAX 20
4214 #define NEON_3R_VPMIN 21
4215 #define NEON_3R_VQDMULH_VQRDMULH 22
4216 #define NEON_3R_VPADD 23
4217 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4218 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4219 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4220 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4221 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4222 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4223 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4225 static const uint8_t neon_3r_sizes
[] = {
4226 [NEON_3R_VHADD
] = 0x7,
4227 [NEON_3R_VQADD
] = 0xf,
4228 [NEON_3R_VRHADD
] = 0x7,
4229 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4230 [NEON_3R_VHSUB
] = 0x7,
4231 [NEON_3R_VQSUB
] = 0xf,
4232 [NEON_3R_VCGT
] = 0x7,
4233 [NEON_3R_VCGE
] = 0x7,
4234 [NEON_3R_VSHL
] = 0xf,
4235 [NEON_3R_VQSHL
] = 0xf,
4236 [NEON_3R_VRSHL
] = 0xf,
4237 [NEON_3R_VQRSHL
] = 0xf,
4238 [NEON_3R_VMAX
] = 0x7,
4239 [NEON_3R_VMIN
] = 0x7,
4240 [NEON_3R_VABD
] = 0x7,
4241 [NEON_3R_VABA
] = 0x7,
4242 [NEON_3R_VADD_VSUB
] = 0xf,
4243 [NEON_3R_VTST_VCEQ
] = 0x7,
4244 [NEON_3R_VML
] = 0x7,
4245 [NEON_3R_VMUL
] = 0x7,
4246 [NEON_3R_VPMAX
] = 0x7,
4247 [NEON_3R_VPMIN
] = 0x7,
4248 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4249 [NEON_3R_VPADD
] = 0x7,
4250 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4251 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4252 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4253 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4254 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4255 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4256 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4259 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4260 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4263 #define NEON_2RM_VREV64 0
4264 #define NEON_2RM_VREV32 1
4265 #define NEON_2RM_VREV16 2
4266 #define NEON_2RM_VPADDL 4
4267 #define NEON_2RM_VPADDL_U 5
4268 #define NEON_2RM_VCLS 8
4269 #define NEON_2RM_VCLZ 9
4270 #define NEON_2RM_VCNT 10
4271 #define NEON_2RM_VMVN 11
4272 #define NEON_2RM_VPADAL 12
4273 #define NEON_2RM_VPADAL_U 13
4274 #define NEON_2RM_VQABS 14
4275 #define NEON_2RM_VQNEG 15
4276 #define NEON_2RM_VCGT0 16
4277 #define NEON_2RM_VCGE0 17
4278 #define NEON_2RM_VCEQ0 18
4279 #define NEON_2RM_VCLE0 19
4280 #define NEON_2RM_VCLT0 20
4281 #define NEON_2RM_VABS 22
4282 #define NEON_2RM_VNEG 23
4283 #define NEON_2RM_VCGT0_F 24
4284 #define NEON_2RM_VCGE0_F 25
4285 #define NEON_2RM_VCEQ0_F 26
4286 #define NEON_2RM_VCLE0_F 27
4287 #define NEON_2RM_VCLT0_F 28
4288 #define NEON_2RM_VABS_F 30
4289 #define NEON_2RM_VNEG_F 31
4290 #define NEON_2RM_VSWP 32
4291 #define NEON_2RM_VTRN 33
4292 #define NEON_2RM_VUZP 34
4293 #define NEON_2RM_VZIP 35
4294 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4295 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4296 #define NEON_2RM_VSHLL 38
4297 #define NEON_2RM_VCVT_F16_F32 44
4298 #define NEON_2RM_VCVT_F32_F16 46
4299 #define NEON_2RM_VRECPE 56
4300 #define NEON_2RM_VRSQRTE 57
4301 #define NEON_2RM_VRECPE_F 58
4302 #define NEON_2RM_VRSQRTE_F 59
4303 #define NEON_2RM_VCVT_FS 60
4304 #define NEON_2RM_VCVT_FU 61
4305 #define NEON_2RM_VCVT_SF 62
4306 #define NEON_2RM_VCVT_UF 63
4308 static int neon_2rm_is_float_op(int op
)
4310 /* Return true if this neon 2reg-misc op is float-to-float */
4311 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4312 op
>= NEON_2RM_VRECPE_F
);
4315 /* Each entry in this array has bit n set if the insn allows
4316 * size value n (otherwise it will UNDEF). Since unallocated
4317 * op values will have no bits set they always UNDEF.
4319 static const uint8_t neon_2rm_sizes
[] = {
4320 [NEON_2RM_VREV64
] = 0x7,
4321 [NEON_2RM_VREV32
] = 0x3,
4322 [NEON_2RM_VREV16
] = 0x1,
4323 [NEON_2RM_VPADDL
] = 0x7,
4324 [NEON_2RM_VPADDL_U
] = 0x7,
4325 [NEON_2RM_VCLS
] = 0x7,
4326 [NEON_2RM_VCLZ
] = 0x7,
4327 [NEON_2RM_VCNT
] = 0x1,
4328 [NEON_2RM_VMVN
] = 0x1,
4329 [NEON_2RM_VPADAL
] = 0x7,
4330 [NEON_2RM_VPADAL_U
] = 0x7,
4331 [NEON_2RM_VQABS
] = 0x7,
4332 [NEON_2RM_VQNEG
] = 0x7,
4333 [NEON_2RM_VCGT0
] = 0x7,
4334 [NEON_2RM_VCGE0
] = 0x7,
4335 [NEON_2RM_VCEQ0
] = 0x7,
4336 [NEON_2RM_VCLE0
] = 0x7,
4337 [NEON_2RM_VCLT0
] = 0x7,
4338 [NEON_2RM_VABS
] = 0x7,
4339 [NEON_2RM_VNEG
] = 0x7,
4340 [NEON_2RM_VCGT0_F
] = 0x4,
4341 [NEON_2RM_VCGE0_F
] = 0x4,
4342 [NEON_2RM_VCEQ0_F
] = 0x4,
4343 [NEON_2RM_VCLE0_F
] = 0x4,
4344 [NEON_2RM_VCLT0_F
] = 0x4,
4345 [NEON_2RM_VABS_F
] = 0x4,
4346 [NEON_2RM_VNEG_F
] = 0x4,
4347 [NEON_2RM_VSWP
] = 0x1,
4348 [NEON_2RM_VTRN
] = 0x7,
4349 [NEON_2RM_VUZP
] = 0x7,
4350 [NEON_2RM_VZIP
] = 0x7,
4351 [NEON_2RM_VMOVN
] = 0x7,
4352 [NEON_2RM_VQMOVN
] = 0x7,
4353 [NEON_2RM_VSHLL
] = 0x7,
4354 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4355 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4356 [NEON_2RM_VRECPE
] = 0x4,
4357 [NEON_2RM_VRSQRTE
] = 0x4,
4358 [NEON_2RM_VRECPE_F
] = 0x4,
4359 [NEON_2RM_VRSQRTE_F
] = 0x4,
4360 [NEON_2RM_VCVT_FS
] = 0x4,
4361 [NEON_2RM_VCVT_FU
] = 0x4,
4362 [NEON_2RM_VCVT_SF
] = 0x4,
4363 [NEON_2RM_VCVT_UF
] = 0x4,
4366 /* Translate a NEON data processing instruction. Return nonzero if the
4367 instruction is invalid.
4368 We process data in a mixture of 32-bit and 64-bit chunks.
4369 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4371 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4383 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4386 if (!s
->vfp_enabled
)
4388 q
= (insn
& (1 << 6)) != 0;
4389 u
= (insn
>> 24) & 1;
4390 VFP_DREG_D(rd
, insn
);
4391 VFP_DREG_N(rn
, insn
);
4392 VFP_DREG_M(rm
, insn
);
4393 size
= (insn
>> 20) & 3;
4394 if ((insn
& (1 << 23)) == 0) {
4395 /* Three register same length. */
4396 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4397 /* Catch invalid op and bad size combinations: UNDEF */
4398 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4401 /* All insns of this form UNDEF for either this condition or the
4402 * superset of cases "Q==1"; we catch the latter later.
4404 if (q
&& ((rd
| rn
| rm
) & 1)) {
4407 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4408 /* 64-bit element instructions. */
4409 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4410 neon_load_reg64(cpu_V0
, rn
+ pass
);
4411 neon_load_reg64(cpu_V1
, rm
+ pass
);
4415 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4418 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4424 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4427 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4433 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4435 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4440 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4443 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4449 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4451 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4454 case NEON_3R_VQRSHL
:
4456 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4459 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4463 case NEON_3R_VADD_VSUB
:
4465 tcg_gen_sub_i64(CPU_V001
);
4467 tcg_gen_add_i64(CPU_V001
);
4473 neon_store_reg64(cpu_V0
, rd
+ pass
);
4482 case NEON_3R_VQRSHL
:
4485 /* Shift instruction operands are reversed. */
4500 case NEON_3R_FLOAT_ARITH
:
4501 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4503 case NEON_3R_FLOAT_MINMAX
:
4504 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4506 case NEON_3R_FLOAT_CMP
:
4508 /* no encoding for U=0 C=1x */
4512 case NEON_3R_FLOAT_ACMP
:
4517 case NEON_3R_VRECPS_VRSQRTS
:
4523 if (u
&& (size
!= 0)) {
4524 /* UNDEF on invalid size for polynomial subcase */
4529 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4537 if (pairwise
&& q
) {
4538 /* All the pairwise insns UNDEF if Q is set */
4542 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4547 tmp
= neon_load_reg(rn
, 0);
4548 tmp2
= neon_load_reg(rn
, 1);
4550 tmp
= neon_load_reg(rm
, 0);
4551 tmp2
= neon_load_reg(rm
, 1);
4555 tmp
= neon_load_reg(rn
, pass
);
4556 tmp2
= neon_load_reg(rm
, pass
);
4560 GEN_NEON_INTEGER_OP(hadd
);
4563 GEN_NEON_INTEGER_OP_ENV(qadd
);
4565 case NEON_3R_VRHADD
:
4566 GEN_NEON_INTEGER_OP(rhadd
);
4568 case NEON_3R_LOGIC
: /* Logic ops. */
4569 switch ((u
<< 2) | size
) {
4571 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4574 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4577 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4580 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4583 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4586 tmp3
= neon_load_reg(rd
, pass
);
4587 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4588 tcg_temp_free_i32(tmp3
);
4591 tmp3
= neon_load_reg(rd
, pass
);
4592 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4593 tcg_temp_free_i32(tmp3
);
4596 tmp3
= neon_load_reg(rd
, pass
);
4597 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4598 tcg_temp_free_i32(tmp3
);
4603 GEN_NEON_INTEGER_OP(hsub
);
4606 GEN_NEON_INTEGER_OP_ENV(qsub
);
4609 GEN_NEON_INTEGER_OP(cgt
);
4612 GEN_NEON_INTEGER_OP(cge
);
4615 GEN_NEON_INTEGER_OP(shl
);
4618 GEN_NEON_INTEGER_OP_ENV(qshl
);
4621 GEN_NEON_INTEGER_OP(rshl
);
4623 case NEON_3R_VQRSHL
:
4624 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4627 GEN_NEON_INTEGER_OP(max
);
4630 GEN_NEON_INTEGER_OP(min
);
4633 GEN_NEON_INTEGER_OP(abd
);
4636 GEN_NEON_INTEGER_OP(abd
);
4637 tcg_temp_free_i32(tmp2
);
4638 tmp2
= neon_load_reg(rd
, pass
);
4639 gen_neon_add(size
, tmp
, tmp2
);
4641 case NEON_3R_VADD_VSUB
:
4642 if (!u
) { /* VADD */
4643 gen_neon_add(size
, tmp
, tmp2
);
4646 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4647 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4648 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4653 case NEON_3R_VTST_VCEQ
:
4654 if (!u
) { /* VTST */
4656 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4657 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4658 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4663 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4664 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4665 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4670 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4672 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4673 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4674 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4677 tcg_temp_free_i32(tmp2
);
4678 tmp2
= neon_load_reg(rd
, pass
);
4680 gen_neon_rsb(size
, tmp
, tmp2
);
4682 gen_neon_add(size
, tmp
, tmp2
);
4686 if (u
) { /* polynomial */
4687 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4688 } else { /* Integer */
4690 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4691 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4692 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4698 GEN_NEON_INTEGER_OP(pmax
);
4701 GEN_NEON_INTEGER_OP(pmin
);
4703 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4704 if (!u
) { /* VQDMULH */
4707 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4710 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4714 } else { /* VQRDMULH */
4717 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4720 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4728 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4729 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4730 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4734 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4736 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4737 switch ((u
<< 2) | size
) {
4740 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4743 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4746 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4751 tcg_temp_free_ptr(fpstatus
);
4754 case NEON_3R_FLOAT_MULTIPLY
:
4756 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4757 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4759 tcg_temp_free_i32(tmp2
);
4760 tmp2
= neon_load_reg(rd
, pass
);
4762 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4764 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4767 tcg_temp_free_ptr(fpstatus
);
4770 case NEON_3R_FLOAT_CMP
:
4772 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4774 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4777 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4779 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4782 tcg_temp_free_ptr(fpstatus
);
4785 case NEON_3R_FLOAT_ACMP
:
4787 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4789 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4791 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4793 tcg_temp_free_ptr(fpstatus
);
4796 case NEON_3R_FLOAT_MINMAX
:
4798 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4800 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4802 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4804 tcg_temp_free_ptr(fpstatus
);
4807 case NEON_3R_VRECPS_VRSQRTS
:
4809 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4811 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4815 /* VFMA, VFMS: fused multiply-add */
4816 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4817 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4820 gen_helper_vfp_negs(tmp
, tmp
);
4822 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4823 tcg_temp_free_i32(tmp3
);
4824 tcg_temp_free_ptr(fpstatus
);
4830 tcg_temp_free_i32(tmp2
);
4832 /* Save the result. For elementwise operations we can put it
4833 straight into the destination register. For pairwise operations
4834 we have to be careful to avoid clobbering the source operands. */
4835 if (pairwise
&& rd
== rm
) {
4836 neon_store_scratch(pass
, tmp
);
4838 neon_store_reg(rd
, pass
, tmp
);
4842 if (pairwise
&& rd
== rm
) {
4843 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4844 tmp
= neon_load_scratch(pass
);
4845 neon_store_reg(rd
, pass
, tmp
);
4848 /* End of 3 register same size operations. */
4849 } else if (insn
& (1 << 4)) {
4850 if ((insn
& 0x00380080) != 0) {
4851 /* Two registers and shift. */
4852 op
= (insn
>> 8) & 0xf;
4853 if (insn
& (1 << 7)) {
4861 while ((insn
& (1 << (size
+ 19))) == 0)
4864 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4865 /* To avoid excessive duplication of ops we implement shift
4866 by immediate using the variable shift operations. */
4868 /* Shift by immediate:
4869 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4870 if (q
&& ((rd
| rm
) & 1)) {
4873 if (!u
&& (op
== 4 || op
== 6)) {
4876 /* Right shifts are encoded as N - shift, where N is the
4877 element size in bits. */
4879 shift
= shift
- (1 << (size
+ 3));
4887 imm
= (uint8_t) shift
;
4892 imm
= (uint16_t) shift
;
4903 for (pass
= 0; pass
< count
; pass
++) {
4905 neon_load_reg64(cpu_V0
, rm
+ pass
);
4906 tcg_gen_movi_i64(cpu_V1
, imm
);
4911 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4913 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4918 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4920 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4923 case 5: /* VSHL, VSLI */
4924 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4926 case 6: /* VQSHLU */
4927 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4932 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4935 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4940 if (op
== 1 || op
== 3) {
4942 neon_load_reg64(cpu_V1
, rd
+ pass
);
4943 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4944 } else if (op
== 4 || (op
== 5 && u
)) {
4946 neon_load_reg64(cpu_V1
, rd
+ pass
);
4948 if (shift
< -63 || shift
> 63) {
4952 mask
= 0xffffffffffffffffull
>> -shift
;
4954 mask
= 0xffffffffffffffffull
<< shift
;
4957 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4958 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4960 neon_store_reg64(cpu_V0
, rd
+ pass
);
4961 } else { /* size < 3 */
4962 /* Operands in T0 and T1. */
4963 tmp
= neon_load_reg(rm
, pass
);
4964 tmp2
= tcg_temp_new_i32();
4965 tcg_gen_movi_i32(tmp2
, imm
);
4969 GEN_NEON_INTEGER_OP(shl
);
4973 GEN_NEON_INTEGER_OP(rshl
);
4976 case 5: /* VSHL, VSLI */
4978 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4979 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4980 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4984 case 6: /* VQSHLU */
4987 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
4991 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
4995 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5003 GEN_NEON_INTEGER_OP_ENV(qshl
);
5006 tcg_temp_free_i32(tmp2
);
5008 if (op
== 1 || op
== 3) {
5010 tmp2
= neon_load_reg(rd
, pass
);
5011 gen_neon_add(size
, tmp
, tmp2
);
5012 tcg_temp_free_i32(tmp2
);
5013 } else if (op
== 4 || (op
== 5 && u
)) {
5018 mask
= 0xff >> -shift
;
5020 mask
= (uint8_t)(0xff << shift
);
5026 mask
= 0xffff >> -shift
;
5028 mask
= (uint16_t)(0xffff << shift
);
5032 if (shift
< -31 || shift
> 31) {
5036 mask
= 0xffffffffu
>> -shift
;
5038 mask
= 0xffffffffu
<< shift
;
5044 tmp2
= neon_load_reg(rd
, pass
);
5045 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5046 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5047 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5048 tcg_temp_free_i32(tmp2
);
5050 neon_store_reg(rd
, pass
, tmp
);
5053 } else if (op
< 10) {
5054 /* Shift by immediate and narrow:
5055 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5056 int input_unsigned
= (op
== 8) ? !u
: u
;
5060 shift
= shift
- (1 << (size
+ 3));
5063 tmp64
= tcg_const_i64(shift
);
5064 neon_load_reg64(cpu_V0
, rm
);
5065 neon_load_reg64(cpu_V1
, rm
+ 1);
5066 for (pass
= 0; pass
< 2; pass
++) {
5074 if (input_unsigned
) {
5075 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5077 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5080 if (input_unsigned
) {
5081 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5083 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5086 tmp
= tcg_temp_new_i32();
5087 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5088 neon_store_reg(rd
, pass
, tmp
);
5090 tcg_temp_free_i64(tmp64
);
5093 imm
= (uint16_t)shift
;
5097 imm
= (uint32_t)shift
;
5099 tmp2
= tcg_const_i32(imm
);
5100 tmp4
= neon_load_reg(rm
+ 1, 0);
5101 tmp5
= neon_load_reg(rm
+ 1, 1);
5102 for (pass
= 0; pass
< 2; pass
++) {
5104 tmp
= neon_load_reg(rm
, 0);
5108 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5111 tmp3
= neon_load_reg(rm
, 1);
5115 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5117 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5118 tcg_temp_free_i32(tmp
);
5119 tcg_temp_free_i32(tmp3
);
5120 tmp
= tcg_temp_new_i32();
5121 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5122 neon_store_reg(rd
, pass
, tmp
);
5124 tcg_temp_free_i32(tmp2
);
5126 } else if (op
== 10) {
5128 if (q
|| (rd
& 1)) {
5131 tmp
= neon_load_reg(rm
, 0);
5132 tmp2
= neon_load_reg(rm
, 1);
5133 for (pass
= 0; pass
< 2; pass
++) {
5137 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5140 /* The shift is less than the width of the source
5141 type, so we can just shift the whole register. */
5142 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5143 /* Widen the result of shift: we need to clear
5144 * the potential overflow bits resulting from
5145 * left bits of the narrow input appearing as
5146 * right bits of left the neighbour narrow
5148 if (size
< 2 || !u
) {
5151 imm
= (0xffu
>> (8 - shift
));
5153 } else if (size
== 1) {
5154 imm
= 0xffff >> (16 - shift
);
5157 imm
= 0xffffffff >> (32 - shift
);
5160 imm64
= imm
| (((uint64_t)imm
) << 32);
5164 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5167 neon_store_reg64(cpu_V0
, rd
+ pass
);
5169 } else if (op
>= 14) {
5170 /* VCVT fixed-point. */
5171 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5174 /* We have already masked out the must-be-1 top bit of imm6,
5175 * hence this 32-shift where the ARM ARM has 64-imm6.
5178 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5179 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5182 gen_vfp_ulto(0, shift
, 1);
5184 gen_vfp_slto(0, shift
, 1);
5187 gen_vfp_toul(0, shift
, 1);
5189 gen_vfp_tosl(0, shift
, 1);
5191 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5196 } else { /* (insn & 0x00380080) == 0 */
5198 if (q
&& (rd
& 1)) {
5202 op
= (insn
>> 8) & 0xf;
5203 /* One register and immediate. */
5204 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5205 invert
= (insn
& (1 << 5)) != 0;
5206 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5207 * We choose to not special-case this and will behave as if a
5208 * valid constant encoding of 0 had been given.
5227 imm
= (imm
<< 8) | (imm
<< 24);
5230 imm
= (imm
<< 8) | 0xff;
5233 imm
= (imm
<< 16) | 0xffff;
5236 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5244 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5245 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5251 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5252 if (op
& 1 && op
< 12) {
5253 tmp
= neon_load_reg(rd
, pass
);
5255 /* The immediate value has already been inverted, so
5257 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5259 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5263 tmp
= tcg_temp_new_i32();
5264 if (op
== 14 && invert
) {
5268 for (n
= 0; n
< 4; n
++) {
5269 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5270 val
|= 0xff << (n
* 8);
5272 tcg_gen_movi_i32(tmp
, val
);
5274 tcg_gen_movi_i32(tmp
, imm
);
5277 neon_store_reg(rd
, pass
, tmp
);
5280 } else { /* (insn & 0x00800010 == 0x00800000) */
5282 op
= (insn
>> 8) & 0xf;
5283 if ((insn
& (1 << 6)) == 0) {
5284 /* Three registers of different lengths. */
5288 /* undefreq: bit 0 : UNDEF if size != 0
5289 * bit 1 : UNDEF if size == 0
5290 * bit 2 : UNDEF if U == 1
5291 * Note that [1:0] set implies 'always UNDEF'
5294 /* prewiden, src1_wide, src2_wide, undefreq */
5295 static const int neon_3reg_wide
[16][4] = {
5296 {1, 0, 0, 0}, /* VADDL */
5297 {1, 1, 0, 0}, /* VADDW */
5298 {1, 0, 0, 0}, /* VSUBL */
5299 {1, 1, 0, 0}, /* VSUBW */
5300 {0, 1, 1, 0}, /* VADDHN */
5301 {0, 0, 0, 0}, /* VABAL */
5302 {0, 1, 1, 0}, /* VSUBHN */
5303 {0, 0, 0, 0}, /* VABDL */
5304 {0, 0, 0, 0}, /* VMLAL */
5305 {0, 0, 0, 6}, /* VQDMLAL */
5306 {0, 0, 0, 0}, /* VMLSL */
5307 {0, 0, 0, 6}, /* VQDMLSL */
5308 {0, 0, 0, 0}, /* Integer VMULL */
5309 {0, 0, 0, 2}, /* VQDMULL */
5310 {0, 0, 0, 5}, /* Polynomial VMULL */
5311 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5314 prewiden
= neon_3reg_wide
[op
][0];
5315 src1_wide
= neon_3reg_wide
[op
][1];
5316 src2_wide
= neon_3reg_wide
[op
][2];
5317 undefreq
= neon_3reg_wide
[op
][3];
5319 if (((undefreq
& 1) && (size
!= 0)) ||
5320 ((undefreq
& 2) && (size
== 0)) ||
5321 ((undefreq
& 4) && u
)) {
5324 if ((src1_wide
&& (rn
& 1)) ||
5325 (src2_wide
&& (rm
& 1)) ||
5326 (!src2_wide
&& (rd
& 1))) {
5330 /* Avoid overlapping operands. Wide source operands are
5331 always aligned so will never overlap with wide
5332 destinations in problematic ways. */
5333 if (rd
== rm
&& !src2_wide
) {
5334 tmp
= neon_load_reg(rm
, 1);
5335 neon_store_scratch(2, tmp
);
5336 } else if (rd
== rn
&& !src1_wide
) {
5337 tmp
= neon_load_reg(rn
, 1);
5338 neon_store_scratch(2, tmp
);
5341 for (pass
= 0; pass
< 2; pass
++) {
5343 neon_load_reg64(cpu_V0
, rn
+ pass
);
5346 if (pass
== 1 && rd
== rn
) {
5347 tmp
= neon_load_scratch(2);
5349 tmp
= neon_load_reg(rn
, pass
);
5352 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5356 neon_load_reg64(cpu_V1
, rm
+ pass
);
5359 if (pass
== 1 && rd
== rm
) {
5360 tmp2
= neon_load_scratch(2);
5362 tmp2
= neon_load_reg(rm
, pass
);
5365 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5369 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5370 gen_neon_addl(size
);
5372 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5373 gen_neon_subl(size
);
5375 case 5: case 7: /* VABAL, VABDL */
5376 switch ((size
<< 1) | u
) {
5378 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5381 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5384 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5387 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5390 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5393 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5397 tcg_temp_free_i32(tmp2
);
5398 tcg_temp_free_i32(tmp
);
5400 case 8: case 9: case 10: case 11: case 12: case 13:
5401 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5402 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5404 case 14: /* Polynomial VMULL */
5405 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5406 tcg_temp_free_i32(tmp2
);
5407 tcg_temp_free_i32(tmp
);
5409 default: /* 15 is RESERVED: caught earlier */
5414 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5415 neon_store_reg64(cpu_V0
, rd
+ pass
);
5416 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5418 neon_load_reg64(cpu_V1
, rd
+ pass
);
5420 case 10: /* VMLSL */
5421 gen_neon_negl(cpu_V0
, size
);
5423 case 5: case 8: /* VABAL, VMLAL */
5424 gen_neon_addl(size
);
5426 case 9: case 11: /* VQDMLAL, VQDMLSL */
5427 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5429 gen_neon_negl(cpu_V0
, size
);
5431 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5436 neon_store_reg64(cpu_V0
, rd
+ pass
);
5437 } else if (op
== 4 || op
== 6) {
5438 /* Narrowing operation. */
5439 tmp
= tcg_temp_new_i32();
5443 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5446 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5449 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5450 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5457 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5460 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5463 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5464 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5465 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5473 neon_store_reg(rd
, 0, tmp3
);
5474 neon_store_reg(rd
, 1, tmp
);
5477 /* Write back the result. */
5478 neon_store_reg64(cpu_V0
, rd
+ pass
);
5482 /* Two registers and a scalar. NB that for ops of this form
5483 * the ARM ARM labels bit 24 as Q, but it is in our variable
5490 case 1: /* Float VMLA scalar */
5491 case 5: /* Floating point VMLS scalar */
5492 case 9: /* Floating point VMUL scalar */
5497 case 0: /* Integer VMLA scalar */
5498 case 4: /* Integer VMLS scalar */
5499 case 8: /* Integer VMUL scalar */
5500 case 12: /* VQDMULH scalar */
5501 case 13: /* VQRDMULH scalar */
5502 if (u
&& ((rd
| rn
) & 1)) {
5505 tmp
= neon_get_scalar(size
, rm
);
5506 neon_store_scratch(0, tmp
);
5507 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5508 tmp
= neon_load_scratch(0);
5509 tmp2
= neon_load_reg(rn
, pass
);
5512 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5514 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5516 } else if (op
== 13) {
5518 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5520 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5522 } else if (op
& 1) {
5523 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5524 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5525 tcg_temp_free_ptr(fpstatus
);
5528 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5529 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5530 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5534 tcg_temp_free_i32(tmp2
);
5537 tmp2
= neon_load_reg(rd
, pass
);
5540 gen_neon_add(size
, tmp
, tmp2
);
5544 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5545 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5546 tcg_temp_free_ptr(fpstatus
);
5550 gen_neon_rsb(size
, tmp
, tmp2
);
5554 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5555 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5556 tcg_temp_free_ptr(fpstatus
);
5562 tcg_temp_free_i32(tmp2
);
5564 neon_store_reg(rd
, pass
, tmp
);
5567 case 3: /* VQDMLAL scalar */
5568 case 7: /* VQDMLSL scalar */
5569 case 11: /* VQDMULL scalar */
5574 case 2: /* VMLAL sclar */
5575 case 6: /* VMLSL scalar */
5576 case 10: /* VMULL scalar */
5580 tmp2
= neon_get_scalar(size
, rm
);
5581 /* We need a copy of tmp2 because gen_neon_mull
5582 * deletes it during pass 0. */
5583 tmp4
= tcg_temp_new_i32();
5584 tcg_gen_mov_i32(tmp4
, tmp2
);
5585 tmp3
= neon_load_reg(rn
, 1);
5587 for (pass
= 0; pass
< 2; pass
++) {
5589 tmp
= neon_load_reg(rn
, 0);
5594 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5596 neon_load_reg64(cpu_V1
, rd
+ pass
);
5600 gen_neon_negl(cpu_V0
, size
);
5603 gen_neon_addl(size
);
5606 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5608 gen_neon_negl(cpu_V0
, size
);
5610 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5616 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5621 neon_store_reg64(cpu_V0
, rd
+ pass
);
5626 default: /* 14 and 15 are RESERVED */
5630 } else { /* size == 3 */
5633 imm
= (insn
>> 8) & 0xf;
5638 if (q
&& ((rd
| rn
| rm
) & 1)) {
5643 neon_load_reg64(cpu_V0
, rn
);
5645 neon_load_reg64(cpu_V1
, rn
+ 1);
5647 } else if (imm
== 8) {
5648 neon_load_reg64(cpu_V0
, rn
+ 1);
5650 neon_load_reg64(cpu_V1
, rm
);
5653 tmp64
= tcg_temp_new_i64();
5655 neon_load_reg64(cpu_V0
, rn
);
5656 neon_load_reg64(tmp64
, rn
+ 1);
5658 neon_load_reg64(cpu_V0
, rn
+ 1);
5659 neon_load_reg64(tmp64
, rm
);
5661 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5662 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5663 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5665 neon_load_reg64(cpu_V1
, rm
);
5667 neon_load_reg64(cpu_V1
, rm
+ 1);
5670 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5671 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5672 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5673 tcg_temp_free_i64(tmp64
);
5676 neon_load_reg64(cpu_V0
, rn
);
5677 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5678 neon_load_reg64(cpu_V1
, rm
);
5679 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5680 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5682 neon_store_reg64(cpu_V0
, rd
);
5684 neon_store_reg64(cpu_V1
, rd
+ 1);
5686 } else if ((insn
& (1 << 11)) == 0) {
5687 /* Two register misc. */
5688 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5689 size
= (insn
>> 18) & 3;
5690 /* UNDEF for unknown op values and bad op-size combinations */
5691 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5694 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5695 q
&& ((rm
| rd
) & 1)) {
5699 case NEON_2RM_VREV64
:
5700 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5701 tmp
= neon_load_reg(rm
, pass
* 2);
5702 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5704 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5705 case 1: gen_swap_half(tmp
); break;
5706 case 2: /* no-op */ break;
5709 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5711 neon_store_reg(rd
, pass
* 2, tmp2
);
5714 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5715 case 1: gen_swap_half(tmp2
); break;
5718 neon_store_reg(rd
, pass
* 2, tmp2
);
5722 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5723 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5724 for (pass
= 0; pass
< q
+ 1; pass
++) {
5725 tmp
= neon_load_reg(rm
, pass
* 2);
5726 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5727 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5728 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5730 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5731 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5732 case 2: tcg_gen_add_i64(CPU_V001
); break;
5735 if (op
>= NEON_2RM_VPADAL
) {
5737 neon_load_reg64(cpu_V1
, rd
+ pass
);
5738 gen_neon_addl(size
);
5740 neon_store_reg64(cpu_V0
, rd
+ pass
);
5746 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5747 tmp
= neon_load_reg(rm
, n
);
5748 tmp2
= neon_load_reg(rd
, n
+ 1);
5749 neon_store_reg(rm
, n
, tmp2
);
5750 neon_store_reg(rd
, n
+ 1, tmp
);
5757 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5762 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5766 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5767 /* also VQMOVUN; op field and mnemonics don't line up */
5772 for (pass
= 0; pass
< 2; pass
++) {
5773 neon_load_reg64(cpu_V0
, rm
+ pass
);
5774 tmp
= tcg_temp_new_i32();
5775 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5780 neon_store_reg(rd
, 0, tmp2
);
5781 neon_store_reg(rd
, 1, tmp
);
5785 case NEON_2RM_VSHLL
:
5786 if (q
|| (rd
& 1)) {
5789 tmp
= neon_load_reg(rm
, 0);
5790 tmp2
= neon_load_reg(rm
, 1);
5791 for (pass
= 0; pass
< 2; pass
++) {
5794 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5795 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5796 neon_store_reg64(cpu_V0
, rd
+ pass
);
5799 case NEON_2RM_VCVT_F16_F32
:
5800 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5804 tmp
= tcg_temp_new_i32();
5805 tmp2
= tcg_temp_new_i32();
5806 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5807 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5808 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5809 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5810 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5811 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5812 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5813 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5814 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5815 neon_store_reg(rd
, 0, tmp2
);
5816 tmp2
= tcg_temp_new_i32();
5817 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5818 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5819 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5820 neon_store_reg(rd
, 1, tmp2
);
5821 tcg_temp_free_i32(tmp
);
5823 case NEON_2RM_VCVT_F32_F16
:
5824 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5828 tmp3
= tcg_temp_new_i32();
5829 tmp
= neon_load_reg(rm
, 0);
5830 tmp2
= neon_load_reg(rm
, 1);
5831 tcg_gen_ext16u_i32(tmp3
, tmp
);
5832 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5833 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5834 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5835 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5836 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5837 tcg_temp_free_i32(tmp
);
5838 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5839 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5840 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5841 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5842 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5843 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5844 tcg_temp_free_i32(tmp2
);
5845 tcg_temp_free_i32(tmp3
);
5849 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5850 if (neon_2rm_is_float_op(op
)) {
5851 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5852 neon_reg_offset(rm
, pass
));
5855 tmp
= neon_load_reg(rm
, pass
);
5858 case NEON_2RM_VREV32
:
5860 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5861 case 1: gen_swap_half(tmp
); break;
5865 case NEON_2RM_VREV16
:
5870 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5871 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5872 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5878 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5879 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5880 case 2: gen_helper_clz(tmp
, tmp
); break;
5885 gen_helper_neon_cnt_u8(tmp
, tmp
);
5888 tcg_gen_not_i32(tmp
, tmp
);
5890 case NEON_2RM_VQABS
:
5893 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
5896 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
5899 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
5904 case NEON_2RM_VQNEG
:
5907 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
5910 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
5913 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
5918 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5919 tmp2
= tcg_const_i32(0);
5921 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5922 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5923 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5926 tcg_temp_free(tmp2
);
5927 if (op
== NEON_2RM_VCLE0
) {
5928 tcg_gen_not_i32(tmp
, tmp
);
5931 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
5932 tmp2
= tcg_const_i32(0);
5934 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5935 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5936 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5939 tcg_temp_free(tmp2
);
5940 if (op
== NEON_2RM_VCLT0
) {
5941 tcg_gen_not_i32(tmp
, tmp
);
5944 case NEON_2RM_VCEQ0
:
5945 tmp2
= tcg_const_i32(0);
5947 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5948 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5949 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5952 tcg_temp_free(tmp2
);
5956 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5957 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5958 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5963 tmp2
= tcg_const_i32(0);
5964 gen_neon_rsb(size
, tmp
, tmp2
);
5965 tcg_temp_free(tmp2
);
5967 case NEON_2RM_VCGT0_F
:
5969 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5970 tmp2
= tcg_const_i32(0);
5971 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5972 tcg_temp_free(tmp2
);
5973 tcg_temp_free_ptr(fpstatus
);
5976 case NEON_2RM_VCGE0_F
:
5978 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5979 tmp2
= tcg_const_i32(0);
5980 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5981 tcg_temp_free(tmp2
);
5982 tcg_temp_free_ptr(fpstatus
);
5985 case NEON_2RM_VCEQ0_F
:
5987 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5988 tmp2
= tcg_const_i32(0);
5989 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5990 tcg_temp_free(tmp2
);
5991 tcg_temp_free_ptr(fpstatus
);
5994 case NEON_2RM_VCLE0_F
:
5996 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5997 tmp2
= tcg_const_i32(0);
5998 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
5999 tcg_temp_free(tmp2
);
6000 tcg_temp_free_ptr(fpstatus
);
6003 case NEON_2RM_VCLT0_F
:
6005 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6006 tmp2
= tcg_const_i32(0);
6007 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6008 tcg_temp_free(tmp2
);
6009 tcg_temp_free_ptr(fpstatus
);
6012 case NEON_2RM_VABS_F
:
6015 case NEON_2RM_VNEG_F
:
6019 tmp2
= neon_load_reg(rd
, pass
);
6020 neon_store_reg(rm
, pass
, tmp2
);
6023 tmp2
= neon_load_reg(rd
, pass
);
6025 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6026 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6029 neon_store_reg(rm
, pass
, tmp2
);
6031 case NEON_2RM_VRECPE
:
6032 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6034 case NEON_2RM_VRSQRTE
:
6035 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6037 case NEON_2RM_VRECPE_F
:
6038 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6040 case NEON_2RM_VRSQRTE_F
:
6041 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6043 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6046 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6049 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6050 gen_vfp_tosiz(0, 1);
6052 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6053 gen_vfp_touiz(0, 1);
6056 /* Reserved op values were caught by the
6057 * neon_2rm_sizes[] check earlier.
6061 if (neon_2rm_is_float_op(op
)) {
6062 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6063 neon_reg_offset(rd
, pass
));
6065 neon_store_reg(rd
, pass
, tmp
);
6070 } else if ((insn
& (1 << 10)) == 0) {
6072 int n
= ((insn
>> 8) & 3) + 1;
6073 if ((rn
+ n
) > 32) {
6074 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6075 * helper function running off the end of the register file.
6080 if (insn
& (1 << 6)) {
6081 tmp
= neon_load_reg(rd
, 0);
6083 tmp
= tcg_temp_new_i32();
6084 tcg_gen_movi_i32(tmp
, 0);
6086 tmp2
= neon_load_reg(rm
, 0);
6087 tmp4
= tcg_const_i32(rn
);
6088 tmp5
= tcg_const_i32(n
);
6089 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
6090 tcg_temp_free_i32(tmp
);
6091 if (insn
& (1 << 6)) {
6092 tmp
= neon_load_reg(rd
, 1);
6094 tmp
= tcg_temp_new_i32();
6095 tcg_gen_movi_i32(tmp
, 0);
6097 tmp3
= neon_load_reg(rm
, 1);
6098 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
6099 tcg_temp_free_i32(tmp5
);
6100 tcg_temp_free_i32(tmp4
);
6101 neon_store_reg(rd
, 0, tmp2
);
6102 neon_store_reg(rd
, 1, tmp3
);
6103 tcg_temp_free_i32(tmp
);
6104 } else if ((insn
& 0x380) == 0) {
6106 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6109 if (insn
& (1 << 19)) {
6110 tmp
= neon_load_reg(rm
, 1);
6112 tmp
= neon_load_reg(rm
, 0);
6114 if (insn
& (1 << 16)) {
6115 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6116 } else if (insn
& (1 << 17)) {
6117 if ((insn
>> 18) & 1)
6118 gen_neon_dup_high16(tmp
);
6120 gen_neon_dup_low16(tmp
);
6122 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6123 tmp2
= tcg_temp_new_i32();
6124 tcg_gen_mov_i32(tmp2
, tmp
);
6125 neon_store_reg(rd
, pass
, tmp2
);
6127 tcg_temp_free_i32(tmp
);
6136 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6138 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6139 const ARMCPRegInfo
*ri
;
6140 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6142 cpnum
= (insn
>> 8) & 0xf;
6143 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6144 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6147 /* First check for coprocessor space used for actual instructions */
6151 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6152 return disas_iwmmxt_insn(env
, s
, insn
);
6153 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6154 return disas_dsp_insn(env
, s
, insn
);
6159 return disas_vfp_insn (env
, s
, insn
);
6164 /* Otherwise treat as a generic register access */
6165 is64
= (insn
& (1 << 25)) == 0;
6166 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6174 opc1
= (insn
>> 4) & 0xf;
6176 rt2
= (insn
>> 16) & 0xf;
6178 crn
= (insn
>> 16) & 0xf;
6179 opc1
= (insn
>> 21) & 7;
6180 opc2
= (insn
>> 5) & 7;
6183 isread
= (insn
>> 20) & 1;
6184 rt
= (insn
>> 12) & 0xf;
6186 ri
= get_arm_cp_reginfo(cpu
,
6187 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6189 /* Check access permissions */
6190 if (!cp_access_ok(env
, ri
, isread
)) {
6194 /* Handle special cases first */
6195 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6202 gen_set_pc_im(s
->pc
);
6203 s
->is_jmp
= DISAS_WFI
;
6214 if (ri
->type
& ARM_CP_CONST
) {
6215 tmp64
= tcg_const_i64(ri
->resetvalue
);
6216 } else if (ri
->readfn
) {
6218 gen_set_pc_im(s
->pc
);
6219 tmp64
= tcg_temp_new_i64();
6220 tmpptr
= tcg_const_ptr(ri
);
6221 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6222 tcg_temp_free_ptr(tmpptr
);
6224 tmp64
= tcg_temp_new_i64();
6225 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6227 tmp
= tcg_temp_new_i32();
6228 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6229 store_reg(s
, rt
, tmp
);
6230 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6231 tmp
= tcg_temp_new_i32();
6232 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6233 tcg_temp_free_i64(tmp64
);
6234 store_reg(s
, rt2
, tmp
);
6237 if (ri
->type
& ARM_CP_CONST
) {
6238 tmp
= tcg_const_i32(ri
->resetvalue
);
6239 } else if (ri
->readfn
) {
6241 gen_set_pc_im(s
->pc
);
6242 tmp
= tcg_temp_new_i32();
6243 tmpptr
= tcg_const_ptr(ri
);
6244 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6245 tcg_temp_free_ptr(tmpptr
);
6247 tmp
= load_cpu_offset(ri
->fieldoffset
);
6250 /* Destination register of r15 for 32 bit loads sets
6251 * the condition codes from the high 4 bits of the value
6254 tcg_temp_free_i32(tmp
);
6256 store_reg(s
, rt
, tmp
);
6261 if (ri
->type
& ARM_CP_CONST
) {
6262 /* If not forbidden by access permissions, treat as WI */
6268 TCGv_i64 tmp64
= tcg_temp_new_i64();
6269 tmplo
= load_reg(s
, rt
);
6270 tmphi
= load_reg(s
, rt2
);
6271 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6272 tcg_temp_free_i32(tmplo
);
6273 tcg_temp_free_i32(tmphi
);
6275 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6276 gen_set_pc_im(s
->pc
);
6277 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6278 tcg_temp_free_ptr(tmpptr
);
6280 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6282 tcg_temp_free_i64(tmp64
);
6287 gen_set_pc_im(s
->pc
);
6288 tmp
= load_reg(s
, rt
);
6289 tmpptr
= tcg_const_ptr(ri
);
6290 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6291 tcg_temp_free_ptr(tmpptr
);
6292 tcg_temp_free_i32(tmp
);
6294 TCGv tmp
= load_reg(s
, rt
);
6295 store_cpu_offset(tmp
, ri
->fieldoffset
);
6298 /* We default to ending the TB on a coprocessor register write,
6299 * but allow this to be suppressed by the register definition
6300 * (usually only necessary to work around guest bugs).
6302 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6313 /* Store a 64-bit value to a register pair. Clobbers val. */
6314 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6317 tmp
= tcg_temp_new_i32();
6318 tcg_gen_trunc_i64_i32(tmp
, val
);
6319 store_reg(s
, rlow
, tmp
);
6320 tmp
= tcg_temp_new_i32();
6321 tcg_gen_shri_i64(val
, val
, 32);
6322 tcg_gen_trunc_i64_i32(tmp
, val
);
6323 store_reg(s
, rhigh
, tmp
);
6326 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6327 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6332 /* Load value and extend to 64 bits. */
6333 tmp
= tcg_temp_new_i64();
6334 tmp2
= load_reg(s
, rlow
);
6335 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6336 tcg_temp_free_i32(tmp2
);
6337 tcg_gen_add_i64(val
, val
, tmp
);
6338 tcg_temp_free_i64(tmp
);
6341 /* load and add a 64-bit value from a register pair. */
6342 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6348 /* Load 64-bit value rd:rn. */
6349 tmpl
= load_reg(s
, rlow
);
6350 tmph
= load_reg(s
, rhigh
);
6351 tmp
= tcg_temp_new_i64();
6352 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6353 tcg_temp_free_i32(tmpl
);
6354 tcg_temp_free_i32(tmph
);
6355 tcg_gen_add_i64(val
, val
, tmp
);
6356 tcg_temp_free_i64(tmp
);
6359 /* Set N and Z flags from a 64-bit value. */
6360 static void gen_logicq_cc(TCGv_i64 val
)
6362 TCGv tmp
= tcg_temp_new_i32();
6363 gen_helper_logicq_cc(tmp
, val
);
6365 tcg_temp_free_i32(tmp
);
6368 /* Load/Store exclusive instructions are implemented by remembering
6369 the value/address loaded, and seeing if these are the same
6370 when the store is performed. This should be sufficient to implement
6371 the architecturally mandated semantics, and avoids having to monitor
6374 In system emulation mode only one CPU will be running at once, so
6375 this sequence is effectively atomic. In user emulation mode we
6376 throw an exception and handle the atomic operation elsewhere. */
6377 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6378 TCGv addr
, int size
)
6384 tmp
= gen_ld8u(addr
, IS_USER(s
));
6387 tmp
= gen_ld16u(addr
, IS_USER(s
));
6391 tmp
= gen_ld32(addr
, IS_USER(s
));
6396 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6397 store_reg(s
, rt
, tmp
);
6399 TCGv tmp2
= tcg_temp_new_i32();
6400 tcg_gen_addi_i32(tmp2
, addr
, 4);
6401 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6402 tcg_temp_free_i32(tmp2
);
6403 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6404 store_reg(s
, rt2
, tmp
);
6406 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6409 static void gen_clrex(DisasContext
*s
)
6411 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6414 #ifdef CONFIG_USER_ONLY
6415 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6416 TCGv addr
, int size
)
6418 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6419 tcg_gen_movi_i32(cpu_exclusive_info
,
6420 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6421 gen_exception_insn(s
, 4, EXCP_STREX
);
6424 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6425 TCGv addr
, int size
)
6431 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6437 fail_label
= gen_new_label();
6438 done_label
= gen_new_label();
6439 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6442 tmp
= gen_ld8u(addr
, IS_USER(s
));
6445 tmp
= gen_ld16u(addr
, IS_USER(s
));
6449 tmp
= gen_ld32(addr
, IS_USER(s
));
6454 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6455 tcg_temp_free_i32(tmp
);
6457 TCGv tmp2
= tcg_temp_new_i32();
6458 tcg_gen_addi_i32(tmp2
, addr
, 4);
6459 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6460 tcg_temp_free_i32(tmp2
);
6461 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6462 tcg_temp_free_i32(tmp
);
6464 tmp
= load_reg(s
, rt
);
6467 gen_st8(tmp
, addr
, IS_USER(s
));
6470 gen_st16(tmp
, addr
, IS_USER(s
));
6474 gen_st32(tmp
, addr
, IS_USER(s
));
6480 tcg_gen_addi_i32(addr
, addr
, 4);
6481 tmp
= load_reg(s
, rt2
);
6482 gen_st32(tmp
, addr
, IS_USER(s
));
6484 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6485 tcg_gen_br(done_label
);
6486 gen_set_label(fail_label
);
6487 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6488 gen_set_label(done_label
);
6489 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6493 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6495 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6502 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
6505 /* M variants do not implement ARM mode. */
6510 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6511 * choose to UNDEF. In ARMv5 and above the space is used
6512 * for miscellaneous unconditional instructions.
6516 /* Unconditional instructions. */
6517 if (((insn
>> 25) & 7) == 1) {
6518 /* NEON Data processing. */
6519 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6522 if (disas_neon_data_insn(env
, s
, insn
))
6526 if ((insn
& 0x0f100000) == 0x04000000) {
6527 /* NEON load/store. */
6528 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6531 if (disas_neon_ls_insn(env
, s
, insn
))
6535 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6536 ((insn
& 0x0f30f010) == 0x0710f000)) {
6537 if ((insn
& (1 << 22)) == 0) {
6539 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6543 /* Otherwise PLD; v5TE+ */
6547 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6548 ((insn
& 0x0f70f010) == 0x0650f000)) {
6550 return; /* PLI; V7 */
6552 if (((insn
& 0x0f700000) == 0x04100000) ||
6553 ((insn
& 0x0f700010) == 0x06100000)) {
6554 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6557 return; /* v7MP: Unallocated memory hint: must NOP */
6560 if ((insn
& 0x0ffffdff) == 0x01010000) {
6563 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6564 /* Dynamic endianness switching not implemented. */
6568 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6569 switch ((insn
>> 4) & 0xf) {
6578 /* We don't emulate caches so these are a no-op. */
6583 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6589 op1
= (insn
& 0x1f);
6590 addr
= tcg_temp_new_i32();
6591 tmp
= tcg_const_i32(op1
);
6592 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6593 tcg_temp_free_i32(tmp
);
6594 i
= (insn
>> 23) & 3;
6596 case 0: offset
= -4; break; /* DA */
6597 case 1: offset
= 0; break; /* IA */
6598 case 2: offset
= -8; break; /* DB */
6599 case 3: offset
= 4; break; /* IB */
6603 tcg_gen_addi_i32(addr
, addr
, offset
);
6604 tmp
= load_reg(s
, 14);
6605 gen_st32(tmp
, addr
, 0);
6606 tmp
= load_cpu_field(spsr
);
6607 tcg_gen_addi_i32(addr
, addr
, 4);
6608 gen_st32(tmp
, addr
, 0);
6609 if (insn
& (1 << 21)) {
6610 /* Base writeback. */
6612 case 0: offset
= -8; break;
6613 case 1: offset
= 4; break;
6614 case 2: offset
= -4; break;
6615 case 3: offset
= 0; break;
6619 tcg_gen_addi_i32(addr
, addr
, offset
);
6620 tmp
= tcg_const_i32(op1
);
6621 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6622 tcg_temp_free_i32(tmp
);
6623 tcg_temp_free_i32(addr
);
6625 tcg_temp_free_i32(addr
);
6628 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6634 rn
= (insn
>> 16) & 0xf;
6635 addr
= load_reg(s
, rn
);
6636 i
= (insn
>> 23) & 3;
6638 case 0: offset
= -4; break; /* DA */
6639 case 1: offset
= 0; break; /* IA */
6640 case 2: offset
= -8; break; /* DB */
6641 case 3: offset
= 4; break; /* IB */
6645 tcg_gen_addi_i32(addr
, addr
, offset
);
6646 /* Load PC into tmp and CPSR into tmp2. */
6647 tmp
= gen_ld32(addr
, 0);
6648 tcg_gen_addi_i32(addr
, addr
, 4);
6649 tmp2
= gen_ld32(addr
, 0);
6650 if (insn
& (1 << 21)) {
6651 /* Base writeback. */
6653 case 0: offset
= -8; break;
6654 case 1: offset
= 4; break;
6655 case 2: offset
= -4; break;
6656 case 3: offset
= 0; break;
6660 tcg_gen_addi_i32(addr
, addr
, offset
);
6661 store_reg(s
, rn
, addr
);
6663 tcg_temp_free_i32(addr
);
6665 gen_rfe(s
, tmp
, tmp2
);
6667 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6668 /* branch link and change to thumb (blx <offset>) */
6671 val
= (uint32_t)s
->pc
;
6672 tmp
= tcg_temp_new_i32();
6673 tcg_gen_movi_i32(tmp
, val
);
6674 store_reg(s
, 14, tmp
);
6675 /* Sign-extend the 24-bit offset */
6676 offset
= (((int32_t)insn
) << 8) >> 8;
6677 /* offset * 4 + bit24 * 2 + (thumb bit) */
6678 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6679 /* pipeline offset */
6681 /* protected by ARCH(5); above, near the start of uncond block */
6684 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6685 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6686 /* iWMMXt register transfer. */
6687 if (env
->cp15
.c15_cpar
& (1 << 1))
6688 if (!disas_iwmmxt_insn(env
, s
, insn
))
6691 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6692 /* Coprocessor double register transfer. */
6694 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6695 /* Additional coprocessor register transfer. */
6696 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6699 /* cps (privileged) */
6703 if (insn
& (1 << 19)) {
6704 if (insn
& (1 << 8))
6706 if (insn
& (1 << 7))
6708 if (insn
& (1 << 6))
6710 if (insn
& (1 << 18))
6713 if (insn
& (1 << 17)) {
6715 val
|= (insn
& 0x1f);
6718 gen_set_psr_im(s
, mask
, 0, val
);
6725 /* if not always execute, we generate a conditional jump to
6727 s
->condlabel
= gen_new_label();
6728 gen_test_cc(cond
^ 1, s
->condlabel
);
6731 if ((insn
& 0x0f900000) == 0x03000000) {
6732 if ((insn
& (1 << 21)) == 0) {
6734 rd
= (insn
>> 12) & 0xf;
6735 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6736 if ((insn
& (1 << 22)) == 0) {
6738 tmp
= tcg_temp_new_i32();
6739 tcg_gen_movi_i32(tmp
, val
);
6742 tmp
= load_reg(s
, rd
);
6743 tcg_gen_ext16u_i32(tmp
, tmp
);
6744 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6746 store_reg(s
, rd
, tmp
);
6748 if (((insn
>> 12) & 0xf) != 0xf)
6750 if (((insn
>> 16) & 0xf) == 0) {
6751 gen_nop_hint(s
, insn
& 0xff);
6753 /* CPSR = immediate */
6755 shift
= ((insn
>> 8) & 0xf) * 2;
6757 val
= (val
>> shift
) | (val
<< (32 - shift
));
6758 i
= ((insn
& (1 << 22)) != 0);
6759 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6763 } else if ((insn
& 0x0f900000) == 0x01000000
6764 && (insn
& 0x00000090) != 0x00000090) {
6765 /* miscellaneous instructions */
6766 op1
= (insn
>> 21) & 3;
6767 sh
= (insn
>> 4) & 0xf;
6770 case 0x0: /* move program status register */
6773 tmp
= load_reg(s
, rm
);
6774 i
= ((op1
& 2) != 0);
6775 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6779 rd
= (insn
>> 12) & 0xf;
6783 tmp
= load_cpu_field(spsr
);
6785 tmp
= tcg_temp_new_i32();
6786 gen_helper_cpsr_read(tmp
, cpu_env
);
6788 store_reg(s
, rd
, tmp
);
6793 /* branch/exchange thumb (bx). */
6795 tmp
= load_reg(s
, rm
);
6797 } else if (op1
== 3) {
6800 rd
= (insn
>> 12) & 0xf;
6801 tmp
= load_reg(s
, rm
);
6802 gen_helper_clz(tmp
, tmp
);
6803 store_reg(s
, rd
, tmp
);
6811 /* Trivial implementation equivalent to bx. */
6812 tmp
= load_reg(s
, rm
);
6823 /* branch link/exchange thumb (blx) */
6824 tmp
= load_reg(s
, rm
);
6825 tmp2
= tcg_temp_new_i32();
6826 tcg_gen_movi_i32(tmp2
, s
->pc
);
6827 store_reg(s
, 14, tmp2
);
6830 case 0x5: /* saturating add/subtract */
6832 rd
= (insn
>> 12) & 0xf;
6833 rn
= (insn
>> 16) & 0xf;
6834 tmp
= load_reg(s
, rm
);
6835 tmp2
= load_reg(s
, rn
);
6837 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
6839 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6841 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6842 tcg_temp_free_i32(tmp2
);
6843 store_reg(s
, rd
, tmp
);
6846 /* SMC instruction (op1 == 3)
6847 and undefined instructions (op1 == 0 || op1 == 2)
6854 gen_exception_insn(s
, 4, EXCP_BKPT
);
6856 case 0x8: /* signed multiply */
6861 rs
= (insn
>> 8) & 0xf;
6862 rn
= (insn
>> 12) & 0xf;
6863 rd
= (insn
>> 16) & 0xf;
6865 /* (32 * 16) >> 16 */
6866 tmp
= load_reg(s
, rm
);
6867 tmp2
= load_reg(s
, rs
);
6869 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6872 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6873 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6874 tmp
= tcg_temp_new_i32();
6875 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6876 tcg_temp_free_i64(tmp64
);
6877 if ((sh
& 2) == 0) {
6878 tmp2
= load_reg(s
, rn
);
6879 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6880 tcg_temp_free_i32(tmp2
);
6882 store_reg(s
, rd
, tmp
);
6885 tmp
= load_reg(s
, rm
);
6886 tmp2
= load_reg(s
, rs
);
6887 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6888 tcg_temp_free_i32(tmp2
);
6890 tmp64
= tcg_temp_new_i64();
6891 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6892 tcg_temp_free_i32(tmp
);
6893 gen_addq(s
, tmp64
, rn
, rd
);
6894 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6895 tcg_temp_free_i64(tmp64
);
6898 tmp2
= load_reg(s
, rn
);
6899 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6900 tcg_temp_free_i32(tmp2
);
6902 store_reg(s
, rd
, tmp
);
6909 } else if (((insn
& 0x0e000000) == 0 &&
6910 (insn
& 0x00000090) != 0x90) ||
6911 ((insn
& 0x0e000000) == (1 << 25))) {
6912 int set_cc
, logic_cc
, shiftop
;
6914 op1
= (insn
>> 21) & 0xf;
6915 set_cc
= (insn
>> 20) & 1;
6916 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6918 /* data processing instruction */
6919 if (insn
& (1 << 25)) {
6920 /* immediate operand */
6922 shift
= ((insn
>> 8) & 0xf) * 2;
6924 val
= (val
>> shift
) | (val
<< (32 - shift
));
6926 tmp2
= tcg_temp_new_i32();
6927 tcg_gen_movi_i32(tmp2
, val
);
6928 if (logic_cc
&& shift
) {
6929 gen_set_CF_bit31(tmp2
);
6934 tmp2
= load_reg(s
, rm
);
6935 shiftop
= (insn
>> 5) & 3;
6936 if (!(insn
& (1 << 4))) {
6937 shift
= (insn
>> 7) & 0x1f;
6938 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6940 rs
= (insn
>> 8) & 0xf;
6941 tmp
= load_reg(s
, rs
);
6942 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6945 if (op1
!= 0x0f && op1
!= 0x0d) {
6946 rn
= (insn
>> 16) & 0xf;
6947 tmp
= load_reg(s
, rn
);
6951 rd
= (insn
>> 12) & 0xf;
6954 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6958 store_reg_bx(env
, s
, rd
, tmp
);
6961 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6965 store_reg_bx(env
, s
, rd
, tmp
);
6968 if (set_cc
&& rd
== 15) {
6969 /* SUBS r15, ... is used for exception return. */
6973 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
6974 gen_exception_return(s
, tmp
);
6977 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
6979 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6981 store_reg_bx(env
, s
, rd
, tmp
);
6986 gen_helper_sub_cc(tmp
, cpu_env
, tmp2
, tmp
);
6988 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6990 store_reg_bx(env
, s
, rd
, tmp
);
6994 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
6996 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6998 store_reg_bx(env
, s
, rd
, tmp
);
7002 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
7004 gen_add_carry(tmp
, tmp
, tmp2
);
7006 store_reg_bx(env
, s
, rd
, tmp
);
7010 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
7012 gen_sub_carry(tmp
, tmp
, tmp2
);
7014 store_reg_bx(env
, s
, rd
, tmp
);
7018 gen_helper_sbc_cc(tmp
, cpu_env
, tmp2
, tmp
);
7020 gen_sub_carry(tmp
, tmp2
, tmp
);
7022 store_reg_bx(env
, s
, rd
, tmp
);
7026 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7029 tcg_temp_free_i32(tmp
);
7033 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7036 tcg_temp_free_i32(tmp
);
7040 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
7042 tcg_temp_free_i32(tmp
);
7046 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
7048 tcg_temp_free_i32(tmp
);
7051 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7055 store_reg_bx(env
, s
, rd
, tmp
);
7058 if (logic_cc
&& rd
== 15) {
7059 /* MOVS r15, ... is used for exception return. */
7063 gen_exception_return(s
, tmp2
);
7068 store_reg_bx(env
, s
, rd
, tmp2
);
7072 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7076 store_reg_bx(env
, s
, rd
, tmp
);
7080 tcg_gen_not_i32(tmp2
, tmp2
);
7084 store_reg_bx(env
, s
, rd
, tmp2
);
7087 if (op1
!= 0x0f && op1
!= 0x0d) {
7088 tcg_temp_free_i32(tmp2
);
7091 /* other instructions */
7092 op1
= (insn
>> 24) & 0xf;
7096 /* multiplies, extra load/stores */
7097 sh
= (insn
>> 5) & 3;
7100 rd
= (insn
>> 16) & 0xf;
7101 rn
= (insn
>> 12) & 0xf;
7102 rs
= (insn
>> 8) & 0xf;
7104 op1
= (insn
>> 20) & 0xf;
7106 case 0: case 1: case 2: case 3: case 6:
7108 tmp
= load_reg(s
, rs
);
7109 tmp2
= load_reg(s
, rm
);
7110 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7111 tcg_temp_free_i32(tmp2
);
7112 if (insn
& (1 << 22)) {
7113 /* Subtract (mls) */
7115 tmp2
= load_reg(s
, rn
);
7116 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7117 tcg_temp_free_i32(tmp2
);
7118 } else if (insn
& (1 << 21)) {
7120 tmp2
= load_reg(s
, rn
);
7121 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7122 tcg_temp_free_i32(tmp2
);
7124 if (insn
& (1 << 20))
7126 store_reg(s
, rd
, tmp
);
7129 /* 64 bit mul double accumulate (UMAAL) */
7131 tmp
= load_reg(s
, rs
);
7132 tmp2
= load_reg(s
, rm
);
7133 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7134 gen_addq_lo(s
, tmp64
, rn
);
7135 gen_addq_lo(s
, tmp64
, rd
);
7136 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7137 tcg_temp_free_i64(tmp64
);
7139 case 8: case 9: case 10: case 11:
7140 case 12: case 13: case 14: case 15:
7141 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7142 tmp
= load_reg(s
, rs
);
7143 tmp2
= load_reg(s
, rm
);
7144 if (insn
& (1 << 22)) {
7145 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7147 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7149 if (insn
& (1 << 21)) { /* mult accumulate */
7150 gen_addq(s
, tmp64
, rn
, rd
);
7152 if (insn
& (1 << 20)) {
7153 gen_logicq_cc(tmp64
);
7155 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7156 tcg_temp_free_i64(tmp64
);
7162 rn
= (insn
>> 16) & 0xf;
7163 rd
= (insn
>> 12) & 0xf;
7164 if (insn
& (1 << 23)) {
7165 /* load/store exclusive */
7166 op1
= (insn
>> 21) & 0x3;
7171 addr
= tcg_temp_local_new_i32();
7172 load_reg_var(s
, addr
, rn
);
7173 if (insn
& (1 << 20)) {
7176 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7178 case 1: /* ldrexd */
7179 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7181 case 2: /* ldrexb */
7182 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7184 case 3: /* ldrexh */
7185 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7194 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7196 case 1: /* strexd */
7197 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7199 case 2: /* strexb */
7200 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7202 case 3: /* strexh */
7203 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7209 tcg_temp_free(addr
);
7211 /* SWP instruction */
7214 /* ??? This is not really atomic. However we know
7215 we never have multiple CPUs running in parallel,
7216 so it is good enough. */
7217 addr
= load_reg(s
, rn
);
7218 tmp
= load_reg(s
, rm
);
7219 if (insn
& (1 << 22)) {
7220 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7221 gen_st8(tmp
, addr
, IS_USER(s
));
7223 tmp2
= gen_ld32(addr
, IS_USER(s
));
7224 gen_st32(tmp
, addr
, IS_USER(s
));
7226 tcg_temp_free_i32(addr
);
7227 store_reg(s
, rd
, tmp2
);
7233 /* Misc load/store */
7234 rn
= (insn
>> 16) & 0xf;
7235 rd
= (insn
>> 12) & 0xf;
7236 addr
= load_reg(s
, rn
);
7237 if (insn
& (1 << 24))
7238 gen_add_datah_offset(s
, insn
, 0, addr
);
7240 if (insn
& (1 << 20)) {
7244 tmp
= gen_ld16u(addr
, IS_USER(s
));
7247 tmp
= gen_ld8s(addr
, IS_USER(s
));
7251 tmp
= gen_ld16s(addr
, IS_USER(s
));
7255 } else if (sh
& 2) {
7260 tmp
= load_reg(s
, rd
);
7261 gen_st32(tmp
, addr
, IS_USER(s
));
7262 tcg_gen_addi_i32(addr
, addr
, 4);
7263 tmp
= load_reg(s
, rd
+ 1);
7264 gen_st32(tmp
, addr
, IS_USER(s
));
7268 tmp
= gen_ld32(addr
, IS_USER(s
));
7269 store_reg(s
, rd
, tmp
);
7270 tcg_gen_addi_i32(addr
, addr
, 4);
7271 tmp
= gen_ld32(addr
, IS_USER(s
));
7275 address_offset
= -4;
7278 tmp
= load_reg(s
, rd
);
7279 gen_st16(tmp
, addr
, IS_USER(s
));
7282 /* Perform base writeback before the loaded value to
7283 ensure correct behavior with overlapping index registers.
7284 ldrd with base writeback is is undefined if the
7285 destination and index registers overlap. */
7286 if (!(insn
& (1 << 24))) {
7287 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7288 store_reg(s
, rn
, addr
);
7289 } else if (insn
& (1 << 21)) {
7291 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7292 store_reg(s
, rn
, addr
);
7294 tcg_temp_free_i32(addr
);
7297 /* Complete the load. */
7298 store_reg(s
, rd
, tmp
);
7307 if (insn
& (1 << 4)) {
7309 /* Armv6 Media instructions. */
7311 rn
= (insn
>> 16) & 0xf;
7312 rd
= (insn
>> 12) & 0xf;
7313 rs
= (insn
>> 8) & 0xf;
7314 switch ((insn
>> 23) & 3) {
7315 case 0: /* Parallel add/subtract. */
7316 op1
= (insn
>> 20) & 7;
7317 tmp
= load_reg(s
, rn
);
7318 tmp2
= load_reg(s
, rm
);
7319 sh
= (insn
>> 5) & 7;
7320 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7322 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7323 tcg_temp_free_i32(tmp2
);
7324 store_reg(s
, rd
, tmp
);
7327 if ((insn
& 0x00700020) == 0) {
7328 /* Halfword pack. */
7329 tmp
= load_reg(s
, rn
);
7330 tmp2
= load_reg(s
, rm
);
7331 shift
= (insn
>> 7) & 0x1f;
7332 if (insn
& (1 << 6)) {
7336 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7337 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7338 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7342 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7343 tcg_gen_ext16u_i32(tmp
, tmp
);
7344 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7346 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7347 tcg_temp_free_i32(tmp2
);
7348 store_reg(s
, rd
, tmp
);
7349 } else if ((insn
& 0x00200020) == 0x00200000) {
7351 tmp
= load_reg(s
, rm
);
7352 shift
= (insn
>> 7) & 0x1f;
7353 if (insn
& (1 << 6)) {
7356 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7358 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7360 sh
= (insn
>> 16) & 0x1f;
7361 tmp2
= tcg_const_i32(sh
);
7362 if (insn
& (1 << 22))
7363 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
7365 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
7366 tcg_temp_free_i32(tmp2
);
7367 store_reg(s
, rd
, tmp
);
7368 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7370 tmp
= load_reg(s
, rm
);
7371 sh
= (insn
>> 16) & 0x1f;
7372 tmp2
= tcg_const_i32(sh
);
7373 if (insn
& (1 << 22))
7374 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
7376 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
7377 tcg_temp_free_i32(tmp2
);
7378 store_reg(s
, rd
, tmp
);
7379 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7381 tmp
= load_reg(s
, rn
);
7382 tmp2
= load_reg(s
, rm
);
7383 tmp3
= tcg_temp_new_i32();
7384 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7385 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7386 tcg_temp_free_i32(tmp3
);
7387 tcg_temp_free_i32(tmp2
);
7388 store_reg(s
, rd
, tmp
);
7389 } else if ((insn
& 0x000003e0) == 0x00000060) {
7390 tmp
= load_reg(s
, rm
);
7391 shift
= (insn
>> 10) & 3;
7392 /* ??? In many cases it's not necessary to do a
7393 rotate, a shift is sufficient. */
7395 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7396 op1
= (insn
>> 20) & 7;
7398 case 0: gen_sxtb16(tmp
); break;
7399 case 2: gen_sxtb(tmp
); break;
7400 case 3: gen_sxth(tmp
); break;
7401 case 4: gen_uxtb16(tmp
); break;
7402 case 6: gen_uxtb(tmp
); break;
7403 case 7: gen_uxth(tmp
); break;
7404 default: goto illegal_op
;
7407 tmp2
= load_reg(s
, rn
);
7408 if ((op1
& 3) == 0) {
7409 gen_add16(tmp
, tmp2
);
7411 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7412 tcg_temp_free_i32(tmp2
);
7415 store_reg(s
, rd
, tmp
);
7416 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7418 tmp
= load_reg(s
, rm
);
7419 if (insn
& (1 << 22)) {
7420 if (insn
& (1 << 7)) {
7424 gen_helper_rbit(tmp
, tmp
);
7427 if (insn
& (1 << 7))
7430 tcg_gen_bswap32_i32(tmp
, tmp
);
7432 store_reg(s
, rd
, tmp
);
7437 case 2: /* Multiplies (Type 3). */
7438 switch ((insn
>> 20) & 0x7) {
7440 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7441 /* op2 not 00x or 11x : UNDEF */
7444 /* Signed multiply most significant [accumulate].
7445 (SMMUL, SMMLA, SMMLS) */
7446 tmp
= load_reg(s
, rm
);
7447 tmp2
= load_reg(s
, rs
);
7448 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7451 tmp
= load_reg(s
, rd
);
7452 if (insn
& (1 << 6)) {
7453 tmp64
= gen_subq_msw(tmp64
, tmp
);
7455 tmp64
= gen_addq_msw(tmp64
, tmp
);
7458 if (insn
& (1 << 5)) {
7459 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7461 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7462 tmp
= tcg_temp_new_i32();
7463 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7464 tcg_temp_free_i64(tmp64
);
7465 store_reg(s
, rn
, tmp
);
7469 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7470 if (insn
& (1 << 7)) {
7473 tmp
= load_reg(s
, rm
);
7474 tmp2
= load_reg(s
, rs
);
7475 if (insn
& (1 << 5))
7476 gen_swap_half(tmp2
);
7477 gen_smul_dual(tmp
, tmp2
);
7478 if (insn
& (1 << 6)) {
7479 /* This subtraction cannot overflow. */
7480 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7482 /* This addition cannot overflow 32 bits;
7483 * however it may overflow considered as a signed
7484 * operation, in which case we must set the Q flag.
7486 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7488 tcg_temp_free_i32(tmp2
);
7489 if (insn
& (1 << 22)) {
7490 /* smlald, smlsld */
7491 tmp64
= tcg_temp_new_i64();
7492 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7493 tcg_temp_free_i32(tmp
);
7494 gen_addq(s
, tmp64
, rd
, rn
);
7495 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7496 tcg_temp_free_i64(tmp64
);
7498 /* smuad, smusd, smlad, smlsd */
7501 tmp2
= load_reg(s
, rd
);
7502 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7503 tcg_temp_free_i32(tmp2
);
7505 store_reg(s
, rn
, tmp
);
7511 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7514 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7517 tmp
= load_reg(s
, rm
);
7518 tmp2
= load_reg(s
, rs
);
7519 if (insn
& (1 << 21)) {
7520 gen_helper_udiv(tmp
, tmp
, tmp2
);
7522 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7524 tcg_temp_free_i32(tmp2
);
7525 store_reg(s
, rn
, tmp
);
7532 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7534 case 0: /* Unsigned sum of absolute differences. */
7536 tmp
= load_reg(s
, rm
);
7537 tmp2
= load_reg(s
, rs
);
7538 gen_helper_usad8(tmp
, tmp
, tmp2
);
7539 tcg_temp_free_i32(tmp2
);
7541 tmp2
= load_reg(s
, rd
);
7542 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7543 tcg_temp_free_i32(tmp2
);
7545 store_reg(s
, rn
, tmp
);
7547 case 0x20: case 0x24: case 0x28: case 0x2c:
7548 /* Bitfield insert/clear. */
7550 shift
= (insn
>> 7) & 0x1f;
7551 i
= (insn
>> 16) & 0x1f;
7554 tmp
= tcg_temp_new_i32();
7555 tcg_gen_movi_i32(tmp
, 0);
7557 tmp
= load_reg(s
, rm
);
7560 tmp2
= load_reg(s
, rd
);
7561 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7562 tcg_temp_free_i32(tmp2
);
7564 store_reg(s
, rd
, tmp
);
7566 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7567 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7569 tmp
= load_reg(s
, rm
);
7570 shift
= (insn
>> 7) & 0x1f;
7571 i
= ((insn
>> 16) & 0x1f) + 1;
7576 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7578 gen_sbfx(tmp
, shift
, i
);
7581 store_reg(s
, rd
, tmp
);
7591 /* Check for undefined extension instructions
7592 * per the ARM Bible IE:
7593 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7595 sh
= (0xf << 20) | (0xf << 4);
7596 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7600 /* load/store byte/word */
7601 rn
= (insn
>> 16) & 0xf;
7602 rd
= (insn
>> 12) & 0xf;
7603 tmp2
= load_reg(s
, rn
);
7604 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7605 if (insn
& (1 << 24))
7606 gen_add_data_offset(s
, insn
, tmp2
);
7607 if (insn
& (1 << 20)) {
7609 if (insn
& (1 << 22)) {
7610 tmp
= gen_ld8u(tmp2
, i
);
7612 tmp
= gen_ld32(tmp2
, i
);
7616 tmp
= load_reg(s
, rd
);
7617 if (insn
& (1 << 22))
7618 gen_st8(tmp
, tmp2
, i
);
7620 gen_st32(tmp
, tmp2
, i
);
7622 if (!(insn
& (1 << 24))) {
7623 gen_add_data_offset(s
, insn
, tmp2
);
7624 store_reg(s
, rn
, tmp2
);
7625 } else if (insn
& (1 << 21)) {
7626 store_reg(s
, rn
, tmp2
);
7628 tcg_temp_free_i32(tmp2
);
7630 if (insn
& (1 << 20)) {
7631 /* Complete the load. */
7632 store_reg_from_load(env
, s
, rd
, tmp
);
7638 int j
, n
, user
, loaded_base
;
7640 /* load/store multiple words */
7641 /* XXX: store correct base if write back */
7643 if (insn
& (1 << 22)) {
7645 goto illegal_op
; /* only usable in supervisor mode */
7647 if ((insn
& (1 << 15)) == 0)
7650 rn
= (insn
>> 16) & 0xf;
7651 addr
= load_reg(s
, rn
);
7653 /* compute total size */
7655 TCGV_UNUSED(loaded_var
);
7658 if (insn
& (1 << i
))
7661 /* XXX: test invalid n == 0 case ? */
7662 if (insn
& (1 << 23)) {
7663 if (insn
& (1 << 24)) {
7665 tcg_gen_addi_i32(addr
, addr
, 4);
7667 /* post increment */
7670 if (insn
& (1 << 24)) {
7672 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7674 /* post decrement */
7676 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7681 if (insn
& (1 << i
)) {
7682 if (insn
& (1 << 20)) {
7684 tmp
= gen_ld32(addr
, IS_USER(s
));
7686 tmp2
= tcg_const_i32(i
);
7687 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
7688 tcg_temp_free_i32(tmp2
);
7689 tcg_temp_free_i32(tmp
);
7690 } else if (i
== rn
) {
7694 store_reg_from_load(env
, s
, i
, tmp
);
7699 /* special case: r15 = PC + 8 */
7700 val
= (long)s
->pc
+ 4;
7701 tmp
= tcg_temp_new_i32();
7702 tcg_gen_movi_i32(tmp
, val
);
7704 tmp
= tcg_temp_new_i32();
7705 tmp2
= tcg_const_i32(i
);
7706 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
7707 tcg_temp_free_i32(tmp2
);
7709 tmp
= load_reg(s
, i
);
7711 gen_st32(tmp
, addr
, IS_USER(s
));
7714 /* no need to add after the last transfer */
7716 tcg_gen_addi_i32(addr
, addr
, 4);
7719 if (insn
& (1 << 21)) {
7721 if (insn
& (1 << 23)) {
7722 if (insn
& (1 << 24)) {
7725 /* post increment */
7726 tcg_gen_addi_i32(addr
, addr
, 4);
7729 if (insn
& (1 << 24)) {
7732 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7734 /* post decrement */
7735 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7738 store_reg(s
, rn
, addr
);
7740 tcg_temp_free_i32(addr
);
7743 store_reg(s
, rn
, loaded_var
);
7745 if ((insn
& (1 << 22)) && !user
) {
7746 /* Restore CPSR from SPSR. */
7747 tmp
= load_cpu_field(spsr
);
7748 gen_set_cpsr(tmp
, 0xffffffff);
7749 tcg_temp_free_i32(tmp
);
7750 s
->is_jmp
= DISAS_UPDATE
;
7759 /* branch (and link) */
7760 val
= (int32_t)s
->pc
;
7761 if (insn
& (1 << 24)) {
7762 tmp
= tcg_temp_new_i32();
7763 tcg_gen_movi_i32(tmp
, val
);
7764 store_reg(s
, 14, tmp
);
7766 offset
= (((int32_t)insn
<< 8) >> 8);
7767 val
+= (offset
<< 2) + 4;
7775 if (disas_coproc_insn(env
, s
, insn
))
7780 gen_set_pc_im(s
->pc
);
7781 s
->is_jmp
= DISAS_SWI
;
7785 gen_exception_insn(s
, 4, EXCP_UDEF
);
7791 /* Return true if this is a Thumb-2 logical op. */
7793 thumb2_logic_op(int op
)
7798 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7799 then set condition code flags based on the result of the operation.
7800 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7801 to the high bit of T1.
7802 Returns zero if the opcode is valid. */
7805 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7812 tcg_gen_and_i32(t0
, t0
, t1
);
7816 tcg_gen_andc_i32(t0
, t0
, t1
);
7820 tcg_gen_or_i32(t0
, t0
, t1
);
7824 tcg_gen_orc_i32(t0
, t0
, t1
);
7828 tcg_gen_xor_i32(t0
, t0
, t1
);
7833 gen_helper_add_cc(t0
, cpu_env
, t0
, t1
);
7835 tcg_gen_add_i32(t0
, t0
, t1
);
7839 gen_helper_adc_cc(t0
, cpu_env
, t0
, t1
);
7845 gen_helper_sbc_cc(t0
, cpu_env
, t0
, t1
);
7847 gen_sub_carry(t0
, t0
, t1
);
7851 gen_helper_sub_cc(t0
, cpu_env
, t0
, t1
);
7853 tcg_gen_sub_i32(t0
, t0
, t1
);
7857 gen_helper_sub_cc(t0
, cpu_env
, t1
, t0
);
7859 tcg_gen_sub_i32(t0
, t1
, t0
);
7861 default: /* 5, 6, 7, 9, 12, 15. */
7867 gen_set_CF_bit31(t1
);
7872 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7874 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7876 uint32_t insn
, imm
, shift
, offset
;
7877 uint32_t rd
, rn
, rm
, rs
;
7888 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7889 || arm_feature (env
, ARM_FEATURE_M
))) {
7890 /* Thumb-1 cores may need to treat bl and blx as a pair of
7891 16-bit instructions to get correct prefetch abort behavior. */
7893 if ((insn
& (1 << 12)) == 0) {
7895 /* Second half of blx. */
7896 offset
= ((insn
& 0x7ff) << 1);
7897 tmp
= load_reg(s
, 14);
7898 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7899 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7901 tmp2
= tcg_temp_new_i32();
7902 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7903 store_reg(s
, 14, tmp2
);
7907 if (insn
& (1 << 11)) {
7908 /* Second half of bl. */
7909 offset
= ((insn
& 0x7ff) << 1) | 1;
7910 tmp
= load_reg(s
, 14);
7911 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7913 tmp2
= tcg_temp_new_i32();
7914 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7915 store_reg(s
, 14, tmp2
);
7919 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7920 /* Instruction spans a page boundary. Implement it as two
7921 16-bit instructions in case the second half causes an
7923 offset
= ((int32_t)insn
<< 21) >> 9;
7924 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7927 /* Fall through to 32-bit decode. */
7930 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
7932 insn
|= (uint32_t)insn_hw1
<< 16;
7934 if ((insn
& 0xf800e800) != 0xf000e800) {
7938 rn
= (insn
>> 16) & 0xf;
7939 rs
= (insn
>> 12) & 0xf;
7940 rd
= (insn
>> 8) & 0xf;
7942 switch ((insn
>> 25) & 0xf) {
7943 case 0: case 1: case 2: case 3:
7944 /* 16-bit instructions. Should never happen. */
7947 if (insn
& (1 << 22)) {
7948 /* Other load/store, table branch. */
7949 if (insn
& 0x01200000) {
7950 /* Load/store doubleword. */
7952 addr
= tcg_temp_new_i32();
7953 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7955 addr
= load_reg(s
, rn
);
7957 offset
= (insn
& 0xff) * 4;
7958 if ((insn
& (1 << 23)) == 0)
7960 if (insn
& (1 << 24)) {
7961 tcg_gen_addi_i32(addr
, addr
, offset
);
7964 if (insn
& (1 << 20)) {
7966 tmp
= gen_ld32(addr
, IS_USER(s
));
7967 store_reg(s
, rs
, tmp
);
7968 tcg_gen_addi_i32(addr
, addr
, 4);
7969 tmp
= gen_ld32(addr
, IS_USER(s
));
7970 store_reg(s
, rd
, tmp
);
7973 tmp
= load_reg(s
, rs
);
7974 gen_st32(tmp
, addr
, IS_USER(s
));
7975 tcg_gen_addi_i32(addr
, addr
, 4);
7976 tmp
= load_reg(s
, rd
);
7977 gen_st32(tmp
, addr
, IS_USER(s
));
7979 if (insn
& (1 << 21)) {
7980 /* Base writeback. */
7983 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7984 store_reg(s
, rn
, addr
);
7986 tcg_temp_free_i32(addr
);
7988 } else if ((insn
& (1 << 23)) == 0) {
7989 /* Load/store exclusive word. */
7990 addr
= tcg_temp_local_new();
7991 load_reg_var(s
, addr
, rn
);
7992 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7993 if (insn
& (1 << 20)) {
7994 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7996 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7998 tcg_temp_free(addr
);
7999 } else if ((insn
& (1 << 6)) == 0) {
8002 addr
= tcg_temp_new_i32();
8003 tcg_gen_movi_i32(addr
, s
->pc
);
8005 addr
= load_reg(s
, rn
);
8007 tmp
= load_reg(s
, rm
);
8008 tcg_gen_add_i32(addr
, addr
, tmp
);
8009 if (insn
& (1 << 4)) {
8011 tcg_gen_add_i32(addr
, addr
, tmp
);
8012 tcg_temp_free_i32(tmp
);
8013 tmp
= gen_ld16u(addr
, IS_USER(s
));
8015 tcg_temp_free_i32(tmp
);
8016 tmp
= gen_ld8u(addr
, IS_USER(s
));
8018 tcg_temp_free_i32(addr
);
8019 tcg_gen_shli_i32(tmp
, tmp
, 1);
8020 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8021 store_reg(s
, 15, tmp
);
8023 /* Load/store exclusive byte/halfword/doubleword. */
8025 op
= (insn
>> 4) & 0x3;
8029 addr
= tcg_temp_local_new();
8030 load_reg_var(s
, addr
, rn
);
8031 if (insn
& (1 << 20)) {
8032 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8034 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8036 tcg_temp_free(addr
);
8039 /* Load/store multiple, RFE, SRS. */
8040 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8041 /* Not available in user mode. */
8044 if (insn
& (1 << 20)) {
8046 addr
= load_reg(s
, rn
);
8047 if ((insn
& (1 << 24)) == 0)
8048 tcg_gen_addi_i32(addr
, addr
, -8);
8049 /* Load PC into tmp and CPSR into tmp2. */
8050 tmp
= gen_ld32(addr
, 0);
8051 tcg_gen_addi_i32(addr
, addr
, 4);
8052 tmp2
= gen_ld32(addr
, 0);
8053 if (insn
& (1 << 21)) {
8054 /* Base writeback. */
8055 if (insn
& (1 << 24)) {
8056 tcg_gen_addi_i32(addr
, addr
, 4);
8058 tcg_gen_addi_i32(addr
, addr
, -4);
8060 store_reg(s
, rn
, addr
);
8062 tcg_temp_free_i32(addr
);
8064 gen_rfe(s
, tmp
, tmp2
);
8068 addr
= tcg_temp_new_i32();
8069 tmp
= tcg_const_i32(op
);
8070 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8071 tcg_temp_free_i32(tmp
);
8072 if ((insn
& (1 << 24)) == 0) {
8073 tcg_gen_addi_i32(addr
, addr
, -8);
8075 tmp
= load_reg(s
, 14);
8076 gen_st32(tmp
, addr
, 0);
8077 tcg_gen_addi_i32(addr
, addr
, 4);
8078 tmp
= tcg_temp_new_i32();
8079 gen_helper_cpsr_read(tmp
, cpu_env
);
8080 gen_st32(tmp
, addr
, 0);
8081 if (insn
& (1 << 21)) {
8082 if ((insn
& (1 << 24)) == 0) {
8083 tcg_gen_addi_i32(addr
, addr
, -4);
8085 tcg_gen_addi_i32(addr
, addr
, 4);
8087 tmp
= tcg_const_i32(op
);
8088 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8089 tcg_temp_free_i32(tmp
);
8091 tcg_temp_free_i32(addr
);
8095 int i
, loaded_base
= 0;
8097 /* Load/store multiple. */
8098 addr
= load_reg(s
, rn
);
8100 for (i
= 0; i
< 16; i
++) {
8101 if (insn
& (1 << i
))
8104 if (insn
& (1 << 24)) {
8105 tcg_gen_addi_i32(addr
, addr
, -offset
);
8108 TCGV_UNUSED(loaded_var
);
8109 for (i
= 0; i
< 16; i
++) {
8110 if ((insn
& (1 << i
)) == 0)
8112 if (insn
& (1 << 20)) {
8114 tmp
= gen_ld32(addr
, IS_USER(s
));
8117 } else if (i
== rn
) {
8121 store_reg(s
, i
, tmp
);
8125 tmp
= load_reg(s
, i
);
8126 gen_st32(tmp
, addr
, IS_USER(s
));
8128 tcg_gen_addi_i32(addr
, addr
, 4);
8131 store_reg(s
, rn
, loaded_var
);
8133 if (insn
& (1 << 21)) {
8134 /* Base register writeback. */
8135 if (insn
& (1 << 24)) {
8136 tcg_gen_addi_i32(addr
, addr
, -offset
);
8138 /* Fault if writeback register is in register list. */
8139 if (insn
& (1 << rn
))
8141 store_reg(s
, rn
, addr
);
8143 tcg_temp_free_i32(addr
);
8150 op
= (insn
>> 21) & 0xf;
8152 /* Halfword pack. */
8153 tmp
= load_reg(s
, rn
);
8154 tmp2
= load_reg(s
, rm
);
8155 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8156 if (insn
& (1 << 5)) {
8160 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8161 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8162 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8166 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8167 tcg_gen_ext16u_i32(tmp
, tmp
);
8168 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8170 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8171 tcg_temp_free_i32(tmp2
);
8172 store_reg(s
, rd
, tmp
);
8174 /* Data processing register constant shift. */
8176 tmp
= tcg_temp_new_i32();
8177 tcg_gen_movi_i32(tmp
, 0);
8179 tmp
= load_reg(s
, rn
);
8181 tmp2
= load_reg(s
, rm
);
8183 shiftop
= (insn
>> 4) & 3;
8184 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8185 conds
= (insn
& (1 << 20)) != 0;
8186 logic_cc
= (conds
&& thumb2_logic_op(op
));
8187 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8188 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8190 tcg_temp_free_i32(tmp2
);
8192 store_reg(s
, rd
, tmp
);
8194 tcg_temp_free_i32(tmp
);
8198 case 13: /* Misc data processing. */
8199 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8200 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8203 case 0: /* Register controlled shift. */
8204 tmp
= load_reg(s
, rn
);
8205 tmp2
= load_reg(s
, rm
);
8206 if ((insn
& 0x70) != 0)
8208 op
= (insn
>> 21) & 3;
8209 logic_cc
= (insn
& (1 << 20)) != 0;
8210 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8213 store_reg_bx(env
, s
, rd
, tmp
);
8215 case 1: /* Sign/zero extend. */
8216 tmp
= load_reg(s
, rm
);
8217 shift
= (insn
>> 4) & 3;
8218 /* ??? In many cases it's not necessary to do a
8219 rotate, a shift is sufficient. */
8221 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8222 op
= (insn
>> 20) & 7;
8224 case 0: gen_sxth(tmp
); break;
8225 case 1: gen_uxth(tmp
); break;
8226 case 2: gen_sxtb16(tmp
); break;
8227 case 3: gen_uxtb16(tmp
); break;
8228 case 4: gen_sxtb(tmp
); break;
8229 case 5: gen_uxtb(tmp
); break;
8230 default: goto illegal_op
;
8233 tmp2
= load_reg(s
, rn
);
8234 if ((op
>> 1) == 1) {
8235 gen_add16(tmp
, tmp2
);
8237 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8238 tcg_temp_free_i32(tmp2
);
8241 store_reg(s
, rd
, tmp
);
8243 case 2: /* SIMD add/subtract. */
8244 op
= (insn
>> 20) & 7;
8245 shift
= (insn
>> 4) & 7;
8246 if ((op
& 3) == 3 || (shift
& 3) == 3)
8248 tmp
= load_reg(s
, rn
);
8249 tmp2
= load_reg(s
, rm
);
8250 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8251 tcg_temp_free_i32(tmp2
);
8252 store_reg(s
, rd
, tmp
);
8254 case 3: /* Other data processing. */
8255 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8257 /* Saturating add/subtract. */
8258 tmp
= load_reg(s
, rn
);
8259 tmp2
= load_reg(s
, rm
);
8261 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
8263 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
8265 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8266 tcg_temp_free_i32(tmp2
);
8268 tmp
= load_reg(s
, rn
);
8270 case 0x0a: /* rbit */
8271 gen_helper_rbit(tmp
, tmp
);
8273 case 0x08: /* rev */
8274 tcg_gen_bswap32_i32(tmp
, tmp
);
8276 case 0x09: /* rev16 */
8279 case 0x0b: /* revsh */
8282 case 0x10: /* sel */
8283 tmp2
= load_reg(s
, rm
);
8284 tmp3
= tcg_temp_new_i32();
8285 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8286 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8287 tcg_temp_free_i32(tmp3
);
8288 tcg_temp_free_i32(tmp2
);
8290 case 0x18: /* clz */
8291 gen_helper_clz(tmp
, tmp
);
8297 store_reg(s
, rd
, tmp
);
8299 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8300 op
= (insn
>> 4) & 0xf;
8301 tmp
= load_reg(s
, rn
);
8302 tmp2
= load_reg(s
, rm
);
8303 switch ((insn
>> 20) & 7) {
8304 case 0: /* 32 x 32 -> 32 */
8305 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8306 tcg_temp_free_i32(tmp2
);
8308 tmp2
= load_reg(s
, rs
);
8310 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8312 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8313 tcg_temp_free_i32(tmp2
);
8316 case 1: /* 16 x 16 -> 32 */
8317 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8318 tcg_temp_free_i32(tmp2
);
8320 tmp2
= load_reg(s
, rs
);
8321 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8322 tcg_temp_free_i32(tmp2
);
8325 case 2: /* Dual multiply add. */
8326 case 4: /* Dual multiply subtract. */
8328 gen_swap_half(tmp2
);
8329 gen_smul_dual(tmp
, tmp2
);
8330 if (insn
& (1 << 22)) {
8331 /* This subtraction cannot overflow. */
8332 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8334 /* This addition cannot overflow 32 bits;
8335 * however it may overflow considered as a signed
8336 * operation, in which case we must set the Q flag.
8338 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8340 tcg_temp_free_i32(tmp2
);
8343 tmp2
= load_reg(s
, rs
);
8344 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8345 tcg_temp_free_i32(tmp2
);
8348 case 3: /* 32 * 16 -> 32msb */
8350 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8353 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8354 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8355 tmp
= tcg_temp_new_i32();
8356 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8357 tcg_temp_free_i64(tmp64
);
8360 tmp2
= load_reg(s
, rs
);
8361 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8362 tcg_temp_free_i32(tmp2
);
8365 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8366 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8368 tmp
= load_reg(s
, rs
);
8369 if (insn
& (1 << 20)) {
8370 tmp64
= gen_addq_msw(tmp64
, tmp
);
8372 tmp64
= gen_subq_msw(tmp64
, tmp
);
8375 if (insn
& (1 << 4)) {
8376 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8378 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8379 tmp
= tcg_temp_new_i32();
8380 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8381 tcg_temp_free_i64(tmp64
);
8383 case 7: /* Unsigned sum of absolute differences. */
8384 gen_helper_usad8(tmp
, tmp
, tmp2
);
8385 tcg_temp_free_i32(tmp2
);
8387 tmp2
= load_reg(s
, rs
);
8388 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8389 tcg_temp_free_i32(tmp2
);
8393 store_reg(s
, rd
, tmp
);
8395 case 6: case 7: /* 64-bit multiply, Divide. */
8396 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8397 tmp
= load_reg(s
, rn
);
8398 tmp2
= load_reg(s
, rm
);
8399 if ((op
& 0x50) == 0x10) {
8401 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8405 gen_helper_udiv(tmp
, tmp
, tmp2
);
8407 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8408 tcg_temp_free_i32(tmp2
);
8409 store_reg(s
, rd
, tmp
);
8410 } else if ((op
& 0xe) == 0xc) {
8411 /* Dual multiply accumulate long. */
8413 gen_swap_half(tmp2
);
8414 gen_smul_dual(tmp
, tmp2
);
8416 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8418 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8420 tcg_temp_free_i32(tmp2
);
8422 tmp64
= tcg_temp_new_i64();
8423 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8424 tcg_temp_free_i32(tmp
);
8425 gen_addq(s
, tmp64
, rs
, rd
);
8426 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8427 tcg_temp_free_i64(tmp64
);
8430 /* Unsigned 64-bit multiply */
8431 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8435 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8436 tcg_temp_free_i32(tmp2
);
8437 tmp64
= tcg_temp_new_i64();
8438 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8439 tcg_temp_free_i32(tmp
);
8441 /* Signed 64-bit multiply */
8442 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8447 gen_addq_lo(s
, tmp64
, rs
);
8448 gen_addq_lo(s
, tmp64
, rd
);
8449 } else if (op
& 0x40) {
8450 /* 64-bit accumulate. */
8451 gen_addq(s
, tmp64
, rs
, rd
);
8453 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8454 tcg_temp_free_i64(tmp64
);
8459 case 6: case 7: case 14: case 15:
8461 if (((insn
>> 24) & 3) == 3) {
8462 /* Translate into the equivalent ARM encoding. */
8463 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8464 if (disas_neon_data_insn(env
, s
, insn
))
8467 if (insn
& (1 << 28))
8469 if (disas_coproc_insn (env
, s
, insn
))
8473 case 8: case 9: case 10: case 11:
8474 if (insn
& (1 << 15)) {
8475 /* Branches, misc control. */
8476 if (insn
& 0x5000) {
8477 /* Unconditional branch. */
8478 /* signextend(hw1[10:0]) -> offset[:12]. */
8479 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8480 /* hw1[10:0] -> offset[11:1]. */
8481 offset
|= (insn
& 0x7ff) << 1;
8482 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8483 offset[24:22] already have the same value because of the
8484 sign extension above. */
8485 offset
^= ((~insn
) & (1 << 13)) << 10;
8486 offset
^= ((~insn
) & (1 << 11)) << 11;
8488 if (insn
& (1 << 14)) {
8489 /* Branch and link. */
8490 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8494 if (insn
& (1 << 12)) {
8499 offset
&= ~(uint32_t)2;
8500 /* thumb2 bx, no need to check */
8501 gen_bx_im(s
, offset
);
8503 } else if (((insn
>> 23) & 7) == 7) {
8505 if (insn
& (1 << 13))
8508 if (insn
& (1 << 26)) {
8509 /* Secure monitor call (v6Z) */
8510 goto illegal_op
; /* not implemented. */
8512 op
= (insn
>> 20) & 7;
8514 case 0: /* msr cpsr. */
8516 tmp
= load_reg(s
, rn
);
8517 addr
= tcg_const_i32(insn
& 0xff);
8518 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8519 tcg_temp_free_i32(addr
);
8520 tcg_temp_free_i32(tmp
);
8525 case 1: /* msr spsr. */
8528 tmp
= load_reg(s
, rn
);
8530 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8534 case 2: /* cps, nop-hint. */
8535 if (((insn
>> 8) & 7) == 0) {
8536 gen_nop_hint(s
, insn
& 0xff);
8538 /* Implemented as NOP in user mode. */
8543 if (insn
& (1 << 10)) {
8544 if (insn
& (1 << 7))
8546 if (insn
& (1 << 6))
8548 if (insn
& (1 << 5))
8550 if (insn
& (1 << 9))
8551 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8553 if (insn
& (1 << 8)) {
8555 imm
|= (insn
& 0x1f);
8558 gen_set_psr_im(s
, offset
, 0, imm
);
8561 case 3: /* Special control operations. */
8563 op
= (insn
>> 4) & 0xf;
8571 /* These execute as NOPs. */
8578 /* Trivial implementation equivalent to bx. */
8579 tmp
= load_reg(s
, rn
);
8582 case 5: /* Exception return. */
8586 if (rn
!= 14 || rd
!= 15) {
8589 tmp
= load_reg(s
, rn
);
8590 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8591 gen_exception_return(s
, tmp
);
8593 case 6: /* mrs cpsr. */
8594 tmp
= tcg_temp_new_i32();
8596 addr
= tcg_const_i32(insn
& 0xff);
8597 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8598 tcg_temp_free_i32(addr
);
8600 gen_helper_cpsr_read(tmp
, cpu_env
);
8602 store_reg(s
, rd
, tmp
);
8604 case 7: /* mrs spsr. */
8605 /* Not accessible in user mode. */
8606 if (IS_USER(s
) || IS_M(env
))
8608 tmp
= load_cpu_field(spsr
);
8609 store_reg(s
, rd
, tmp
);
8614 /* Conditional branch. */
8615 op
= (insn
>> 22) & 0xf;
8616 /* Generate a conditional jump to next instruction. */
8617 s
->condlabel
= gen_new_label();
8618 gen_test_cc(op
^ 1, s
->condlabel
);
8621 /* offset[11:1] = insn[10:0] */
8622 offset
= (insn
& 0x7ff) << 1;
8623 /* offset[17:12] = insn[21:16]. */
8624 offset
|= (insn
& 0x003f0000) >> 4;
8625 /* offset[31:20] = insn[26]. */
8626 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8627 /* offset[18] = insn[13]. */
8628 offset
|= (insn
& (1 << 13)) << 5;
8629 /* offset[19] = insn[11]. */
8630 offset
|= (insn
& (1 << 11)) << 8;
8632 /* jump to the offset */
8633 gen_jmp(s
, s
->pc
+ offset
);
8636 /* Data processing immediate. */
8637 if (insn
& (1 << 25)) {
8638 if (insn
& (1 << 24)) {
8639 if (insn
& (1 << 20))
8641 /* Bitfield/Saturate. */
8642 op
= (insn
>> 21) & 7;
8644 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8646 tmp
= tcg_temp_new_i32();
8647 tcg_gen_movi_i32(tmp
, 0);
8649 tmp
= load_reg(s
, rn
);
8652 case 2: /* Signed bitfield extract. */
8654 if (shift
+ imm
> 32)
8657 gen_sbfx(tmp
, shift
, imm
);
8659 case 6: /* Unsigned bitfield extract. */
8661 if (shift
+ imm
> 32)
8664 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8666 case 3: /* Bitfield insert/clear. */
8669 imm
= imm
+ 1 - shift
;
8671 tmp2
= load_reg(s
, rd
);
8672 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8673 tcg_temp_free_i32(tmp2
);
8678 default: /* Saturate. */
8681 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8683 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8685 tmp2
= tcg_const_i32(imm
);
8688 if ((op
& 1) && shift
== 0)
8689 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8691 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8694 if ((op
& 1) && shift
== 0)
8695 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8697 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8699 tcg_temp_free_i32(tmp2
);
8702 store_reg(s
, rd
, tmp
);
8704 imm
= ((insn
& 0x04000000) >> 15)
8705 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8706 if (insn
& (1 << 22)) {
8707 /* 16-bit immediate. */
8708 imm
|= (insn
>> 4) & 0xf000;
8709 if (insn
& (1 << 23)) {
8711 tmp
= load_reg(s
, rd
);
8712 tcg_gen_ext16u_i32(tmp
, tmp
);
8713 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8716 tmp
= tcg_temp_new_i32();
8717 tcg_gen_movi_i32(tmp
, imm
);
8720 /* Add/sub 12-bit immediate. */
8722 offset
= s
->pc
& ~(uint32_t)3;
8723 if (insn
& (1 << 23))
8727 tmp
= tcg_temp_new_i32();
8728 tcg_gen_movi_i32(tmp
, offset
);
8730 tmp
= load_reg(s
, rn
);
8731 if (insn
& (1 << 23))
8732 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8734 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8737 store_reg(s
, rd
, tmp
);
8740 int shifter_out
= 0;
8741 /* modified 12-bit immediate. */
8742 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8743 imm
= (insn
& 0xff);
8746 /* Nothing to do. */
8748 case 1: /* 00XY00XY */
8751 case 2: /* XY00XY00 */
8755 case 3: /* XYXYXYXY */
8759 default: /* Rotated constant. */
8760 shift
= (shift
<< 1) | (imm
>> 7);
8762 imm
= imm
<< (32 - shift
);
8766 tmp2
= tcg_temp_new_i32();
8767 tcg_gen_movi_i32(tmp2
, imm
);
8768 rn
= (insn
>> 16) & 0xf;
8770 tmp
= tcg_temp_new_i32();
8771 tcg_gen_movi_i32(tmp
, 0);
8773 tmp
= load_reg(s
, rn
);
8775 op
= (insn
>> 21) & 0xf;
8776 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8777 shifter_out
, tmp
, tmp2
))
8779 tcg_temp_free_i32(tmp2
);
8780 rd
= (insn
>> 8) & 0xf;
8782 store_reg(s
, rd
, tmp
);
8784 tcg_temp_free_i32(tmp
);
8789 case 12: /* Load/store single data item. */
8794 if ((insn
& 0x01100000) == 0x01000000) {
8795 if (disas_neon_ls_insn(env
, s
, insn
))
8799 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8801 if (!(insn
& (1 << 20))) {
8805 /* Byte or halfword load space with dest == r15 : memory hints.
8806 * Catch them early so we don't emit pointless addressing code.
8807 * This space is a mix of:
8808 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8809 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8811 * unallocated hints, which must be treated as NOPs
8812 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8813 * which is easiest for the decoding logic
8814 * Some space which must UNDEF
8816 int op1
= (insn
>> 23) & 3;
8817 int op2
= (insn
>> 6) & 0x3f;
8822 /* UNPREDICTABLE, unallocated hint or
8823 * PLD/PLDW/PLI (literal)
8828 return 0; /* PLD/PLDW/PLI or unallocated hint */
8830 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8831 return 0; /* PLD/PLDW/PLI or unallocated hint */
8833 /* UNDEF space, or an UNPREDICTABLE */
8839 addr
= tcg_temp_new_i32();
8841 /* s->pc has already been incremented by 4. */
8842 imm
= s
->pc
& 0xfffffffc;
8843 if (insn
& (1 << 23))
8844 imm
+= insn
& 0xfff;
8846 imm
-= insn
& 0xfff;
8847 tcg_gen_movi_i32(addr
, imm
);
8849 addr
= load_reg(s
, rn
);
8850 if (insn
& (1 << 23)) {
8851 /* Positive offset. */
8853 tcg_gen_addi_i32(addr
, addr
, imm
);
8856 switch ((insn
>> 8) & 0xf) {
8857 case 0x0: /* Shifted Register. */
8858 shift
= (insn
>> 4) & 0xf;
8860 tcg_temp_free_i32(addr
);
8863 tmp
= load_reg(s
, rm
);
8865 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8866 tcg_gen_add_i32(addr
, addr
, tmp
);
8867 tcg_temp_free_i32(tmp
);
8869 case 0xc: /* Negative offset. */
8870 tcg_gen_addi_i32(addr
, addr
, -imm
);
8872 case 0xe: /* User privilege. */
8873 tcg_gen_addi_i32(addr
, addr
, imm
);
8876 case 0x9: /* Post-decrement. */
8879 case 0xb: /* Post-increment. */
8883 case 0xd: /* Pre-decrement. */
8886 case 0xf: /* Pre-increment. */
8887 tcg_gen_addi_i32(addr
, addr
, imm
);
8891 tcg_temp_free_i32(addr
);
8896 if (insn
& (1 << 20)) {
8899 case 0: tmp
= gen_ld8u(addr
, user
); break;
8900 case 4: tmp
= gen_ld8s(addr
, user
); break;
8901 case 1: tmp
= gen_ld16u(addr
, user
); break;
8902 case 5: tmp
= gen_ld16s(addr
, user
); break;
8903 case 2: tmp
= gen_ld32(addr
, user
); break;
8905 tcg_temp_free_i32(addr
);
8911 store_reg(s
, rs
, tmp
);
8915 tmp
= load_reg(s
, rs
);
8917 case 0: gen_st8(tmp
, addr
, user
); break;
8918 case 1: gen_st16(tmp
, addr
, user
); break;
8919 case 2: gen_st32(tmp
, addr
, user
); break;
8921 tcg_temp_free_i32(addr
);
8926 tcg_gen_addi_i32(addr
, addr
, imm
);
8928 store_reg(s
, rn
, addr
);
8930 tcg_temp_free_i32(addr
);
8942 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
8944 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8951 if (s
->condexec_mask
) {
8952 cond
= s
->condexec_cond
;
8953 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8954 s
->condlabel
= gen_new_label();
8955 gen_test_cc(cond
^ 1, s
->condlabel
);
8960 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
8963 switch (insn
>> 12) {
8967 op
= (insn
>> 11) & 3;
8970 rn
= (insn
>> 3) & 7;
8971 tmp
= load_reg(s
, rn
);
8972 if (insn
& (1 << 10)) {
8974 tmp2
= tcg_temp_new_i32();
8975 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8978 rm
= (insn
>> 6) & 7;
8979 tmp2
= load_reg(s
, rm
);
8981 if (insn
& (1 << 9)) {
8982 if (s
->condexec_mask
)
8983 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8985 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
8987 if (s
->condexec_mask
)
8988 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8990 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
8992 tcg_temp_free_i32(tmp2
);
8993 store_reg(s
, rd
, tmp
);
8995 /* shift immediate */
8996 rm
= (insn
>> 3) & 7;
8997 shift
= (insn
>> 6) & 0x1f;
8998 tmp
= load_reg(s
, rm
);
8999 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9000 if (!s
->condexec_mask
)
9002 store_reg(s
, rd
, tmp
);
9006 /* arithmetic large immediate */
9007 op
= (insn
>> 11) & 3;
9008 rd
= (insn
>> 8) & 0x7;
9009 if (op
== 0) { /* mov */
9010 tmp
= tcg_temp_new_i32();
9011 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9012 if (!s
->condexec_mask
)
9014 store_reg(s
, rd
, tmp
);
9016 tmp
= load_reg(s
, rd
);
9017 tmp2
= tcg_temp_new_i32();
9018 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9021 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
9022 tcg_temp_free_i32(tmp
);
9023 tcg_temp_free_i32(tmp2
);
9026 if (s
->condexec_mask
)
9027 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9029 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
9030 tcg_temp_free_i32(tmp2
);
9031 store_reg(s
, rd
, tmp
);
9034 if (s
->condexec_mask
)
9035 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9037 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
9038 tcg_temp_free_i32(tmp2
);
9039 store_reg(s
, rd
, tmp
);
9045 if (insn
& (1 << 11)) {
9046 rd
= (insn
>> 8) & 7;
9047 /* load pc-relative. Bit 1 of PC is ignored. */
9048 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9049 val
&= ~(uint32_t)2;
9050 addr
= tcg_temp_new_i32();
9051 tcg_gen_movi_i32(addr
, val
);
9052 tmp
= gen_ld32(addr
, IS_USER(s
));
9053 tcg_temp_free_i32(addr
);
9054 store_reg(s
, rd
, tmp
);
9057 if (insn
& (1 << 10)) {
9058 /* data processing extended or blx */
9059 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9060 rm
= (insn
>> 3) & 0xf;
9061 op
= (insn
>> 8) & 3;
9064 tmp
= load_reg(s
, rd
);
9065 tmp2
= load_reg(s
, rm
);
9066 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9067 tcg_temp_free_i32(tmp2
);
9068 store_reg(s
, rd
, tmp
);
9071 tmp
= load_reg(s
, rd
);
9072 tmp2
= load_reg(s
, rm
);
9073 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
9074 tcg_temp_free_i32(tmp2
);
9075 tcg_temp_free_i32(tmp
);
9077 case 2: /* mov/cpy */
9078 tmp
= load_reg(s
, rm
);
9079 store_reg(s
, rd
, tmp
);
9081 case 3:/* branch [and link] exchange thumb register */
9082 tmp
= load_reg(s
, rm
);
9083 if (insn
& (1 << 7)) {
9085 val
= (uint32_t)s
->pc
| 1;
9086 tmp2
= tcg_temp_new_i32();
9087 tcg_gen_movi_i32(tmp2
, val
);
9088 store_reg(s
, 14, tmp2
);
9090 /* already thumb, no need to check */
9097 /* data processing register */
9099 rm
= (insn
>> 3) & 7;
9100 op
= (insn
>> 6) & 0xf;
9101 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9102 /* the shift/rotate ops want the operands backwards */
9111 if (op
== 9) { /* neg */
9112 tmp
= tcg_temp_new_i32();
9113 tcg_gen_movi_i32(tmp
, 0);
9114 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9115 tmp
= load_reg(s
, rd
);
9120 tmp2
= load_reg(s
, rm
);
9123 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9124 if (!s
->condexec_mask
)
9128 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9129 if (!s
->condexec_mask
)
9133 if (s
->condexec_mask
) {
9134 gen_helper_shl(tmp2
, cpu_env
, tmp2
, tmp
);
9136 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9141 if (s
->condexec_mask
) {
9142 gen_helper_shr(tmp2
, cpu_env
, tmp2
, tmp
);
9144 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9149 if (s
->condexec_mask
) {
9150 gen_helper_sar(tmp2
, cpu_env
, tmp2
, tmp
);
9152 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9157 if (s
->condexec_mask
)
9160 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
9163 if (s
->condexec_mask
)
9164 gen_sub_carry(tmp
, tmp
, tmp2
);
9166 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
9169 if (s
->condexec_mask
) {
9170 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9171 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9173 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9178 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9183 if (s
->condexec_mask
)
9184 tcg_gen_neg_i32(tmp
, tmp2
);
9186 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
9189 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
9193 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
9197 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9198 if (!s
->condexec_mask
)
9202 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9203 if (!s
->condexec_mask
)
9207 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9208 if (!s
->condexec_mask
)
9212 tcg_gen_not_i32(tmp2
, tmp2
);
9213 if (!s
->condexec_mask
)
9221 store_reg(s
, rm
, tmp2
);
9223 tcg_temp_free_i32(tmp
);
9225 store_reg(s
, rd
, tmp
);
9226 tcg_temp_free_i32(tmp2
);
9229 tcg_temp_free_i32(tmp
);
9230 tcg_temp_free_i32(tmp2
);
9235 /* load/store register offset. */
9237 rn
= (insn
>> 3) & 7;
9238 rm
= (insn
>> 6) & 7;
9239 op
= (insn
>> 9) & 7;
9240 addr
= load_reg(s
, rn
);
9241 tmp
= load_reg(s
, rm
);
9242 tcg_gen_add_i32(addr
, addr
, tmp
);
9243 tcg_temp_free_i32(tmp
);
9245 if (op
< 3) /* store */
9246 tmp
= load_reg(s
, rd
);
9250 gen_st32(tmp
, addr
, IS_USER(s
));
9253 gen_st16(tmp
, addr
, IS_USER(s
));
9256 gen_st8(tmp
, addr
, IS_USER(s
));
9259 tmp
= gen_ld8s(addr
, IS_USER(s
));
9262 tmp
= gen_ld32(addr
, IS_USER(s
));
9265 tmp
= gen_ld16u(addr
, IS_USER(s
));
9268 tmp
= gen_ld8u(addr
, IS_USER(s
));
9271 tmp
= gen_ld16s(addr
, IS_USER(s
));
9274 if (op
>= 3) /* load */
9275 store_reg(s
, rd
, tmp
);
9276 tcg_temp_free_i32(addr
);
9280 /* load/store word immediate offset */
9282 rn
= (insn
>> 3) & 7;
9283 addr
= load_reg(s
, rn
);
9284 val
= (insn
>> 4) & 0x7c;
9285 tcg_gen_addi_i32(addr
, addr
, val
);
9287 if (insn
& (1 << 11)) {
9289 tmp
= gen_ld32(addr
, IS_USER(s
));
9290 store_reg(s
, rd
, tmp
);
9293 tmp
= load_reg(s
, rd
);
9294 gen_st32(tmp
, addr
, IS_USER(s
));
9296 tcg_temp_free_i32(addr
);
9300 /* load/store byte immediate offset */
9302 rn
= (insn
>> 3) & 7;
9303 addr
= load_reg(s
, rn
);
9304 val
= (insn
>> 6) & 0x1f;
9305 tcg_gen_addi_i32(addr
, addr
, val
);
9307 if (insn
& (1 << 11)) {
9309 tmp
= gen_ld8u(addr
, IS_USER(s
));
9310 store_reg(s
, rd
, tmp
);
9313 tmp
= load_reg(s
, rd
);
9314 gen_st8(tmp
, addr
, IS_USER(s
));
9316 tcg_temp_free_i32(addr
);
9320 /* load/store halfword immediate offset */
9322 rn
= (insn
>> 3) & 7;
9323 addr
= load_reg(s
, rn
);
9324 val
= (insn
>> 5) & 0x3e;
9325 tcg_gen_addi_i32(addr
, addr
, val
);
9327 if (insn
& (1 << 11)) {
9329 tmp
= gen_ld16u(addr
, IS_USER(s
));
9330 store_reg(s
, rd
, tmp
);
9333 tmp
= load_reg(s
, rd
);
9334 gen_st16(tmp
, addr
, IS_USER(s
));
9336 tcg_temp_free_i32(addr
);
9340 /* load/store from stack */
9341 rd
= (insn
>> 8) & 7;
9342 addr
= load_reg(s
, 13);
9343 val
= (insn
& 0xff) * 4;
9344 tcg_gen_addi_i32(addr
, addr
, val
);
9346 if (insn
& (1 << 11)) {
9348 tmp
= gen_ld32(addr
, IS_USER(s
));
9349 store_reg(s
, rd
, tmp
);
9352 tmp
= load_reg(s
, rd
);
9353 gen_st32(tmp
, addr
, IS_USER(s
));
9355 tcg_temp_free_i32(addr
);
9359 /* add to high reg */
9360 rd
= (insn
>> 8) & 7;
9361 if (insn
& (1 << 11)) {
9363 tmp
= load_reg(s
, 13);
9365 /* PC. bit 1 is ignored. */
9366 tmp
= tcg_temp_new_i32();
9367 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9369 val
= (insn
& 0xff) * 4;
9370 tcg_gen_addi_i32(tmp
, tmp
, val
);
9371 store_reg(s
, rd
, tmp
);
9376 op
= (insn
>> 8) & 0xf;
9379 /* adjust stack pointer */
9380 tmp
= load_reg(s
, 13);
9381 val
= (insn
& 0x7f) * 4;
9382 if (insn
& (1 << 7))
9383 val
= -(int32_t)val
;
9384 tcg_gen_addi_i32(tmp
, tmp
, val
);
9385 store_reg(s
, 13, tmp
);
9388 case 2: /* sign/zero extend. */
9391 rm
= (insn
>> 3) & 7;
9392 tmp
= load_reg(s
, rm
);
9393 switch ((insn
>> 6) & 3) {
9394 case 0: gen_sxth(tmp
); break;
9395 case 1: gen_sxtb(tmp
); break;
9396 case 2: gen_uxth(tmp
); break;
9397 case 3: gen_uxtb(tmp
); break;
9399 store_reg(s
, rd
, tmp
);
9401 case 4: case 5: case 0xc: case 0xd:
9403 addr
= load_reg(s
, 13);
9404 if (insn
& (1 << 8))
9408 for (i
= 0; i
< 8; i
++) {
9409 if (insn
& (1 << i
))
9412 if ((insn
& (1 << 11)) == 0) {
9413 tcg_gen_addi_i32(addr
, addr
, -offset
);
9415 for (i
= 0; i
< 8; i
++) {
9416 if (insn
& (1 << i
)) {
9417 if (insn
& (1 << 11)) {
9419 tmp
= gen_ld32(addr
, IS_USER(s
));
9420 store_reg(s
, i
, tmp
);
9423 tmp
= load_reg(s
, i
);
9424 gen_st32(tmp
, addr
, IS_USER(s
));
9426 /* advance to the next address. */
9427 tcg_gen_addi_i32(addr
, addr
, 4);
9431 if (insn
& (1 << 8)) {
9432 if (insn
& (1 << 11)) {
9434 tmp
= gen_ld32(addr
, IS_USER(s
));
9435 /* don't set the pc until the rest of the instruction
9439 tmp
= load_reg(s
, 14);
9440 gen_st32(tmp
, addr
, IS_USER(s
));
9442 tcg_gen_addi_i32(addr
, addr
, 4);
9444 if ((insn
& (1 << 11)) == 0) {
9445 tcg_gen_addi_i32(addr
, addr
, -offset
);
9447 /* write back the new stack pointer */
9448 store_reg(s
, 13, addr
);
9449 /* set the new PC value */
9450 if ((insn
& 0x0900) == 0x0900) {
9451 store_reg_from_load(env
, s
, 15, tmp
);
9455 case 1: case 3: case 9: case 11: /* czb */
9457 tmp
= load_reg(s
, rm
);
9458 s
->condlabel
= gen_new_label();
9460 if (insn
& (1 << 11))
9461 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9463 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9464 tcg_temp_free_i32(tmp
);
9465 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9466 val
= (uint32_t)s
->pc
+ 2;
9471 case 15: /* IT, nop-hint. */
9472 if ((insn
& 0xf) == 0) {
9473 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9477 s
->condexec_cond
= (insn
>> 4) & 0xe;
9478 s
->condexec_mask
= insn
& 0x1f;
9479 /* No actual code generated for this insn, just setup state. */
9482 case 0xe: /* bkpt */
9484 gen_exception_insn(s
, 2, EXCP_BKPT
);
9489 rn
= (insn
>> 3) & 0x7;
9491 tmp
= load_reg(s
, rn
);
9492 switch ((insn
>> 6) & 3) {
9493 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9494 case 1: gen_rev16(tmp
); break;
9495 case 3: gen_revsh(tmp
); break;
9496 default: goto illegal_op
;
9498 store_reg(s
, rd
, tmp
);
9502 switch ((insn
>> 5) & 7) {
9506 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9507 /* Dynamic endianness switching not implemented. */
9518 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9521 addr
= tcg_const_i32(19);
9522 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9523 tcg_temp_free_i32(addr
);
9527 addr
= tcg_const_i32(16);
9528 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9529 tcg_temp_free_i32(addr
);
9531 tcg_temp_free_i32(tmp
);
9534 if (insn
& (1 << 4)) {
9535 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9539 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9554 /* load/store multiple */
9556 TCGV_UNUSED(loaded_var
);
9557 rn
= (insn
>> 8) & 0x7;
9558 addr
= load_reg(s
, rn
);
9559 for (i
= 0; i
< 8; i
++) {
9560 if (insn
& (1 << i
)) {
9561 if (insn
& (1 << 11)) {
9563 tmp
= gen_ld32(addr
, IS_USER(s
));
9567 store_reg(s
, i
, tmp
);
9571 tmp
= load_reg(s
, i
);
9572 gen_st32(tmp
, addr
, IS_USER(s
));
9574 /* advance to the next address */
9575 tcg_gen_addi_i32(addr
, addr
, 4);
9578 if ((insn
& (1 << rn
)) == 0) {
9579 /* base reg not in list: base register writeback */
9580 store_reg(s
, rn
, addr
);
9582 /* base reg in list: if load, complete it now */
9583 if (insn
& (1 << 11)) {
9584 store_reg(s
, rn
, loaded_var
);
9586 tcg_temp_free_i32(addr
);
9591 /* conditional branch or swi */
9592 cond
= (insn
>> 8) & 0xf;
9598 gen_set_pc_im(s
->pc
);
9599 s
->is_jmp
= DISAS_SWI
;
9602 /* generate a conditional jump to next instruction */
9603 s
->condlabel
= gen_new_label();
9604 gen_test_cc(cond
^ 1, s
->condlabel
);
9607 /* jump to the offset */
9608 val
= (uint32_t)s
->pc
+ 2;
9609 offset
= ((int32_t)insn
<< 24) >> 24;
9615 if (insn
& (1 << 11)) {
9616 if (disas_thumb2_insn(env
, s
, insn
))
9620 /* unconditional branch */
9621 val
= (uint32_t)s
->pc
;
9622 offset
= ((int32_t)insn
<< 21) >> 21;
9623 val
+= (offset
<< 1) + 2;
9628 if (disas_thumb2_insn(env
, s
, insn
))
9634 gen_exception_insn(s
, 4, EXCP_UDEF
);
9638 gen_exception_insn(s
, 2, EXCP_UDEF
);
9641 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9642 basic block 'tb'. If search_pc is TRUE, also generate PC
9643 information for each intermediate instruction. */
9644 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9645 TranslationBlock
*tb
,
9648 DisasContext dc1
, *dc
= &dc1
;
9650 uint16_t *gen_opc_end
;
9652 target_ulong pc_start
;
9653 uint32_t next_page_start
;
9657 /* generate intermediate code */
9662 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9664 dc
->is_jmp
= DISAS_NEXT
;
9666 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9668 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9669 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9670 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9671 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9672 #if !defined(CONFIG_USER_ONLY)
9673 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9675 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9676 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9677 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9678 cpu_F0s
= tcg_temp_new_i32();
9679 cpu_F1s
= tcg_temp_new_i32();
9680 cpu_F0d
= tcg_temp_new_i64();
9681 cpu_F1d
= tcg_temp_new_i64();
9684 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9685 cpu_M0
= tcg_temp_new_i64();
9686 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9689 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9691 max_insns
= CF_COUNT_MASK
;
9695 tcg_clear_temp_count();
9697 /* A note on handling of the condexec (IT) bits:
9699 * We want to avoid the overhead of having to write the updated condexec
9700 * bits back to the CPUARMState for every instruction in an IT block. So:
9701 * (1) if the condexec bits are not already zero then we write
9702 * zero back into the CPUARMState now. This avoids complications trying
9703 * to do it at the end of the block. (For example if we don't do this
9704 * it's hard to identify whether we can safely skip writing condexec
9705 * at the end of the TB, which we definitely want to do for the case
9706 * where a TB doesn't do anything with the IT state at all.)
9707 * (2) if we are going to leave the TB then we call gen_set_condexec()
9708 * which will write the correct value into CPUARMState if zero is wrong.
9709 * This is done both for leaving the TB at the end, and for leaving
9710 * it because of an exception we know will happen, which is done in
9711 * gen_exception_insn(). The latter is necessary because we need to
9712 * leave the TB with the PC/IT state just prior to execution of the
9713 * instruction which caused the exception.
9714 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9715 * then the CPUARMState will be wrong and we need to reset it.
9716 * This is handled in the same way as restoration of the
9717 * PC in these situations: we will be called again with search_pc=1
9718 * and generate a mapping of the condexec bits for each PC in
9719 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9720 * this to restore the condexec bits.
9722 * Note that there are no instructions which can read the condexec
9723 * bits, and none which can write non-static values to them, so
9724 * we don't need to care about whether CPUARMState is correct in the
9728 /* Reset the conditional execution bits immediately. This avoids
9729 complications trying to do it at the end of the block. */
9730 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9732 TCGv tmp
= tcg_temp_new_i32();
9733 tcg_gen_movi_i32(tmp
, 0);
9734 store_cpu_field(tmp
, condexec_bits
);
9737 #ifdef CONFIG_USER_ONLY
9738 /* Intercept jump to the magic kernel page. */
9739 if (dc
->pc
>= 0xffff0000) {
9740 /* We always get here via a jump, so know we are not in a
9741 conditional execution block. */
9742 gen_exception(EXCP_KERNEL_TRAP
);
9743 dc
->is_jmp
= DISAS_UPDATE
;
9747 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9748 /* We always get here via a jump, so know we are not in a
9749 conditional execution block. */
9750 gen_exception(EXCP_EXCEPTION_EXIT
);
9751 dc
->is_jmp
= DISAS_UPDATE
;
9756 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9757 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9758 if (bp
->pc
== dc
->pc
) {
9759 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9760 /* Advance PC so that clearing the breakpoint will
9761 invalidate this TB. */
9763 goto done_generating
;
9769 j
= gen_opc_ptr
- gen_opc_buf
;
9773 gen_opc_instr_start
[lj
++] = 0;
9775 gen_opc_pc
[lj
] = dc
->pc
;
9776 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9777 gen_opc_instr_start
[lj
] = 1;
9778 gen_opc_icount
[lj
] = num_insns
;
9781 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9784 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
9785 tcg_gen_debug_insn_start(dc
->pc
);
9789 disas_thumb_insn(env
, dc
);
9790 if (dc
->condexec_mask
) {
9791 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9792 | ((dc
->condexec_mask
>> 4) & 1);
9793 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9794 if (dc
->condexec_mask
== 0) {
9795 dc
->condexec_cond
= 0;
9799 disas_arm_insn(env
, dc
);
9802 if (dc
->condjmp
&& !dc
->is_jmp
) {
9803 gen_set_label(dc
->condlabel
);
9807 if (tcg_check_temp_count()) {
9808 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9811 /* Translation stops when a conditional branch is encountered.
9812 * Otherwise the subsequent code could get translated several times.
9813 * Also stop translation when a page boundary is reached. This
9814 * ensures prefetch aborts occur at the right place. */
9816 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9817 !env
->singlestep_enabled
&&
9819 dc
->pc
< next_page_start
&&
9820 num_insns
< max_insns
);
9822 if (tb
->cflags
& CF_LAST_IO
) {
9824 /* FIXME: This can theoretically happen with self-modifying
9826 cpu_abort(env
, "IO on conditional branch instruction");
9831 /* At this stage dc->condjmp will only be set when the skipped
9832 instruction was a conditional branch or trap, and the PC has
9833 already been written. */
9834 if (unlikely(env
->singlestep_enabled
)) {
9835 /* Make sure the pc is updated, and raise a debug exception. */
9837 gen_set_condexec(dc
);
9838 if (dc
->is_jmp
== DISAS_SWI
) {
9839 gen_exception(EXCP_SWI
);
9841 gen_exception(EXCP_DEBUG
);
9843 gen_set_label(dc
->condlabel
);
9845 if (dc
->condjmp
|| !dc
->is_jmp
) {
9846 gen_set_pc_im(dc
->pc
);
9849 gen_set_condexec(dc
);
9850 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9851 gen_exception(EXCP_SWI
);
9853 /* FIXME: Single stepping a WFI insn will not halt
9855 gen_exception(EXCP_DEBUG
);
9858 /* While branches must always occur at the end of an IT block,
9859 there are a few other things that can cause us to terminate
9860 the TB in the middle of an IT block:
9861 - Exception generating instructions (bkpt, swi, undefined).
9863 - Hardware watchpoints.
9864 Hardware breakpoints have already been handled and skip this code.
9866 gen_set_condexec(dc
);
9867 switch(dc
->is_jmp
) {
9869 gen_goto_tb(dc
, 1, dc
->pc
);
9874 /* indicate that the hash table must be used to find the next TB */
9878 /* nothing more to generate */
9881 gen_helper_wfi(cpu_env
);
9884 gen_exception(EXCP_SWI
);
9888 gen_set_label(dc
->condlabel
);
9889 gen_set_condexec(dc
);
9890 gen_goto_tb(dc
, 1, dc
->pc
);
9896 gen_icount_end(tb
, num_insns
);
9897 *gen_opc_ptr
= INDEX_op_end
;
9900 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9901 qemu_log("----------------\n");
9902 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9903 log_target_disas(pc_start
, dc
->pc
- pc_start
,
9904 dc
->thumb
| (dc
->bswap_code
<< 1));
9909 j
= gen_opc_ptr
- gen_opc_buf
;
9912 gen_opc_instr_start
[lj
++] = 0;
9914 tb
->size
= dc
->pc
- pc_start
;
9915 tb
->icount
= num_insns
;
9919 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
9921 gen_intermediate_code_internal(env
, tb
, 0);
9924 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
9926 gen_intermediate_code_internal(env
, tb
, 1);
9929 static const char *cpu_mode_names
[16] = {
9930 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9931 "???", "???", "???", "und", "???", "???", "???", "sys"
9934 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9941 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9943 cpu_fprintf(f
, "\n");
9945 cpu_fprintf(f
, " ");
9947 psr
= cpsr_read(env
);
9948 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9950 psr
& (1 << 31) ? 'N' : '-',
9951 psr
& (1 << 30) ? 'Z' : '-',
9952 psr
& (1 << 29) ? 'C' : '-',
9953 psr
& (1 << 28) ? 'V' : '-',
9954 psr
& CPSR_T
? 'T' : 'A',
9955 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9957 if (flags
& CPU_DUMP_FPU
) {
9959 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
9962 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
9965 for (i
= 0; i
< numvfpregs
; i
++) {
9966 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
9967 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
9969 i
* 2 + 1, (uint32_t)(v
>> 32),
9972 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9976 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
9978 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9979 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];