4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 conditional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
89 static TCGv_i32 cpu_exclusive_addr
;
90 static TCGv_i32 cpu_exclusive_val
;
91 static TCGv_i32 cpu_exclusive_high
;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test
;
94 static TCGv_i32 cpu_exclusive_info
;
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s
, cpu_F1s
;
99 static TCGv_i64 cpu_F0d
, cpu_F1d
;
101 #include "exec/gen-icount.h"
103 static const char *regnames
[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
112 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
114 for (i
= 0; i
< 16; i
++) {
115 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUARMState
, regs
[i
]),
119 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
120 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
121 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
122 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
124 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
126 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
128 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
133 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
141 static inline TCGv
load_cpu_offset(int offset
)
143 TCGv tmp
= tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150 static inline void store_cpu_offset(TCGv var
, int offset
)
152 tcg_gen_st_i32(var
, cpu_env
, offset
);
153 tcg_temp_free_i32(var
);
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
164 /* normally, since we updated PC, we need only to add one insn */
166 addr
= (long)s
->pc
+ 2;
168 addr
= (long)s
->pc
+ 4;
169 tcg_gen_movi_i32(var
, addr
);
171 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
178 TCGv tmp
= tcg_temp_new_i32();
179 load_reg_var(s
, tmp
, reg
);
183 /* Set a CPU register. The source must be a temporary and will be
185 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
188 tcg_gen_andi_i32(var
, var
, ~1);
189 s
->is_jmp
= DISAS_JUMP
;
191 tcg_gen_mov_i32(cpu_R
[reg
], var
);
192 tcg_temp_free_i32(var
);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
207 TCGv tmp_mask
= tcg_const_i32(mask
);
208 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
209 tcg_temp_free_i32(tmp_mask
);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception(int excp
)
216 TCGv tmp
= tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp
, excp
);
218 gen_helper_exception(cpu_env
, tmp
);
219 tcg_temp_free_i32(tmp
);
222 static void gen_smul_dual(TCGv a
, TCGv b
)
224 TCGv tmp1
= tcg_temp_new_i32();
225 TCGv tmp2
= tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1
, a
);
227 tcg_gen_ext16s_i32(tmp2
, b
);
228 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
229 tcg_temp_free_i32(tmp2
);
230 tcg_gen_sari_i32(a
, a
, 16);
231 tcg_gen_sari_i32(b
, b
, 16);
232 tcg_gen_mul_i32(b
, b
, a
);
233 tcg_gen_mov_i32(a
, tmp1
);
234 tcg_temp_free_i32(tmp1
);
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var
)
240 TCGv tmp
= tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp
, var
, 8);
242 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
243 tcg_gen_shli_i32(var
, var
, 8);
244 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
245 tcg_gen_or_i32(var
, var
, tmp
);
246 tcg_temp_free_i32(tmp
);
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var
)
252 tcg_gen_ext16u_i32(var
, var
);
253 tcg_gen_bswap16_i32(var
, var
);
254 tcg_gen_ext16s_i32(var
, var
);
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
261 tcg_gen_shri_i32(var
, var
, shift
);
262 tcg_gen_andi_i32(var
, var
, mask
);
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var
, int shift
, int width
)
271 tcg_gen_sari_i32(var
, var
, shift
);
272 if (shift
+ width
< 32) {
273 signbit
= 1u << (width
- 1);
274 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
275 tcg_gen_xori_i32(var
, var
, signbit
);
276 tcg_gen_subi_i32(var
, var
, signbit
);
280 /* Return (b << 32) + a. Mark inputs as dead */
281 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
283 TCGv_i64 tmp64
= tcg_temp_new_i64();
285 tcg_gen_extu_i32_i64(tmp64
, b
);
286 tcg_temp_free_i32(b
);
287 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
288 tcg_gen_add_i64(a
, tmp64
, a
);
290 tcg_temp_free_i64(tmp64
);
294 /* Return (b << 32) - a. Mark inputs as dead. */
295 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
297 TCGv_i64 tmp64
= tcg_temp_new_i64();
299 tcg_gen_extu_i32_i64(tmp64
, b
);
300 tcg_temp_free_i32(b
);
301 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
302 tcg_gen_sub_i64(a
, tmp64
, a
);
304 tcg_temp_free_i64(tmp64
);
308 /* 32x32->64 multiply. Marks inputs as dead. */
309 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
311 TCGv lo
= tcg_temp_new_i32();
312 TCGv hi
= tcg_temp_new_i32();
315 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
316 tcg_temp_free_i32(a
);
317 tcg_temp_free_i32(b
);
319 ret
= tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
327 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
329 TCGv lo
= tcg_temp_new_i32();
330 TCGv hi
= tcg_temp_new_i32();
333 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
334 tcg_temp_free_i32(a
);
335 tcg_temp_free_i32(b
);
337 ret
= tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
345 /* Swap low and high halfwords. */
346 static void gen_swap_half(TCGv var
)
348 TCGv tmp
= tcg_temp_new_i32();
349 tcg_gen_shri_i32(tmp
, var
, 16);
350 tcg_gen_shli_i32(var
, var
, 16);
351 tcg_gen_or_i32(var
, var
, tmp
);
352 tcg_temp_free_i32(tmp
);
355 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
359 t0 = (t0 + t1) ^ tmp;
362 static void gen_add16(TCGv t0
, TCGv t1
)
364 TCGv tmp
= tcg_temp_new_i32();
365 tcg_gen_xor_i32(tmp
, t0
, t1
);
366 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
367 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
368 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
369 tcg_gen_add_i32(t0
, t0
, t1
);
370 tcg_gen_xor_i32(t0
, t0
, tmp
);
371 tcg_temp_free_i32(tmp
);
372 tcg_temp_free_i32(t1
);
375 /* Set CF to the top bit of var. */
376 static void gen_set_CF_bit31(TCGv var
)
378 tcg_gen_shri_i32(cpu_CF
, var
, 31);
381 /* Set N and Z flags from var. */
382 static inline void gen_logic_CC(TCGv var
)
384 tcg_gen_mov_i32(cpu_NF
, var
);
385 tcg_gen_mov_i32(cpu_ZF
, var
);
389 static void gen_adc(TCGv t0
, TCGv t1
)
391 tcg_gen_add_i32(t0
, t0
, t1
);
392 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
395 /* dest = T0 + T1 + CF. */
396 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
398 tcg_gen_add_i32(dest
, t0
, t1
);
399 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
402 /* dest = T0 - T1 + CF - 1. */
403 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
405 tcg_gen_sub_i32(dest
, t0
, t1
);
406 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
407 tcg_gen_subi_i32(dest
, dest
, 1);
410 /* dest = T0 + T1. Compute C, N, V and Z flags */
411 static void gen_add_CC(TCGv dest
, TCGv t0
, TCGv t1
)
413 TCGv tmp
= tcg_temp_new_i32();
414 tcg_gen_movi_i32(tmp
, 0);
415 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
416 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
417 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
418 tcg_gen_xor_i32(tmp
, t0
, t1
);
419 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
420 tcg_temp_free_i32(tmp
);
421 tcg_gen_mov_i32(dest
, cpu_NF
);
424 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
425 static void gen_adc_CC(TCGv dest
, TCGv t0
, TCGv t1
)
427 TCGv tmp
= tcg_temp_new_i32();
428 if (TCG_TARGET_HAS_add2_i32
) {
429 tcg_gen_movi_i32(tmp
, 0);
430 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
431 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
433 TCGv_i64 q0
= tcg_temp_new_i64();
434 TCGv_i64 q1
= tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0
, t0
);
436 tcg_gen_extu_i32_i64(q1
, t1
);
437 tcg_gen_add_i64(q0
, q0
, q1
);
438 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
439 tcg_gen_add_i64(q0
, q0
, q1
);
440 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
441 tcg_temp_free_i64(q0
);
442 tcg_temp_free_i64(q1
);
444 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
445 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
446 tcg_gen_xor_i32(tmp
, t0
, t1
);
447 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
448 tcg_temp_free_i32(tmp
);
449 tcg_gen_mov_i32(dest
, cpu_NF
);
452 /* dest = T0 - T1. Compute C, N, V and Z flags */
453 static void gen_sub_CC(TCGv dest
, TCGv t0
, TCGv t1
)
456 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
457 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
458 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
459 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
460 tmp
= tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp
, t0
, t1
);
462 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
463 tcg_temp_free_i32(tmp
);
464 tcg_gen_mov_i32(dest
, cpu_NF
);
467 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
468 static void gen_sbc_CC(TCGv dest
, TCGv t0
, TCGv t1
)
470 TCGv tmp
= tcg_temp_new_i32();
471 tcg_gen_not_i32(tmp
, t1
);
472 gen_adc_CC(dest
, t0
, tmp
);
476 #define GEN_SHIFT(name) \
477 static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
479 TCGv tmp1, tmp2, tmp3; \
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
495 static void gen_sar(TCGv dest
, TCGv t0
, TCGv t1
)
498 tmp1
= tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
500 tmp2
= tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
502 tcg_temp_free_i32(tmp2
);
503 tcg_gen_sar_i32(dest
, t0
, tmp1
);
504 tcg_temp_free_i32(tmp1
);
507 static void tcg_gen_abs_i32(TCGv dest
, TCGv src
)
509 TCGv c0
= tcg_const_i32(0);
510 TCGv tmp
= tcg_temp_new_i32();
511 tcg_gen_neg_i32(tmp
, src
);
512 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
513 tcg_temp_free_i32(c0
);
514 tcg_temp_free_i32(tmp
);
517 static void shifter_out_im(TCGv var
, int shift
)
520 tcg_gen_andi_i32(cpu_CF
, var
, 1);
522 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
524 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
529 /* Shift by immediate. Includes special handling for shift == 0. */
530 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
536 shifter_out_im(var
, 32 - shift
);
537 tcg_gen_shli_i32(var
, var
, shift
);
543 tcg_gen_shri_i32(cpu_CF
, var
, 31);
545 tcg_gen_movi_i32(var
, 0);
548 shifter_out_im(var
, shift
- 1);
549 tcg_gen_shri_i32(var
, var
, shift
);
556 shifter_out_im(var
, shift
- 1);
559 tcg_gen_sari_i32(var
, var
, shift
);
561 case 3: /* ROR/RRX */
564 shifter_out_im(var
, shift
- 1);
565 tcg_gen_rotri_i32(var
, var
, shift
); break;
567 TCGv tmp
= tcg_temp_new_i32();
568 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
570 shifter_out_im(var
, 0);
571 tcg_gen_shri_i32(var
, var
, 1);
572 tcg_gen_or_i32(var
, var
, tmp
);
573 tcg_temp_free_i32(tmp
);
578 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
579 TCGv shift
, int flags
)
583 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
584 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
585 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
586 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
591 gen_shl(var
, var
, shift
);
594 gen_shr(var
, var
, shift
);
597 gen_sar(var
, var
, shift
);
599 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
600 tcg_gen_rotr_i32(var
, var
, shift
); break;
603 tcg_temp_free_i32(shift
);
606 #define PAS_OP(pfx) \
608 case 0: gen_pas_helper(glue(pfx,add16)); break; \
609 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
610 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
611 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
612 case 4: gen_pas_helper(glue(pfx,add8)); break; \
613 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
615 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
620 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
622 tmp
= tcg_temp_new_ptr();
623 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
625 tcg_temp_free_ptr(tmp
);
628 tmp
= tcg_temp_new_ptr();
629 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
631 tcg_temp_free_ptr(tmp
);
633 #undef gen_pas_helper
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
647 #undef gen_pas_helper
652 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
653 #define PAS_OP(pfx) \
655 case 0: gen_pas_helper(glue(pfx,add8)); break; \
656 case 1: gen_pas_helper(glue(pfx,add16)); break; \
657 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
658 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
659 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
660 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
662 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
667 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
669 tmp
= tcg_temp_new_ptr();
670 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
672 tcg_temp_free_ptr(tmp
);
675 tmp
= tcg_temp_new_ptr();
676 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
678 tcg_temp_free_ptr(tmp
);
680 #undef gen_pas_helper
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
694 #undef gen_pas_helper
699 static void gen_test_cc(int cc
, int label
)
706 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
709 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
712 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
715 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
718 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
721 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
724 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
727 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
729 case 8: /* hi: C && !Z */
730 inv
= gen_new_label();
731 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
732 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
735 case 9: /* ls: !C || Z */
736 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
737 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
739 case 10: /* ge: N == V -> N ^ V == 0 */
740 tmp
= tcg_temp_new_i32();
741 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
742 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
743 tcg_temp_free_i32(tmp
);
745 case 11: /* lt: N != V -> N ^ V != 0 */
746 tmp
= tcg_temp_new_i32();
747 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
748 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
749 tcg_temp_free_i32(tmp
);
751 case 12: /* gt: !Z && N == V */
752 inv
= gen_new_label();
753 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
754 tmp
= tcg_temp_new_i32();
755 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
756 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
757 tcg_temp_free_i32(tmp
);
760 case 13: /* le: Z || N != V */
761 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
762 tmp
= tcg_temp_new_i32();
763 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
764 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
765 tcg_temp_free_i32(tmp
);
768 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
773 static const uint8_t table_logic_cc
[16] = {
792 /* Set PC and Thumb state from an immediate address. */
793 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
797 s
->is_jmp
= DISAS_UPDATE
;
798 if (s
->thumb
!= (addr
& 1)) {
799 tmp
= tcg_temp_new_i32();
800 tcg_gen_movi_i32(tmp
, addr
& 1);
801 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
802 tcg_temp_free_i32(tmp
);
804 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
807 /* Set PC and Thumb state from var. var is marked as dead. */
808 static inline void gen_bx(DisasContext
*s
, TCGv var
)
810 s
->is_jmp
= DISAS_UPDATE
;
811 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
812 tcg_gen_andi_i32(var
, var
, 1);
813 store_cpu_field(var
, thumb
);
816 /* Variant of store_reg which uses branch&exchange logic when storing
817 to r15 in ARM architecture v7 and above. The source must be a temporary
818 and will be marked as dead. */
819 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
822 if (reg
== 15 && ENABLE_ARCH_7
) {
825 store_reg(s
, reg
, var
);
829 /* Variant of store_reg which uses branch&exchange logic when storing
830 * to r15 in ARM architecture v5T and above. This is used for storing
831 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
832 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
833 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
836 if (reg
== 15 && ENABLE_ARCH_5
) {
839 store_reg(s
, reg
, var
);
843 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
845 TCGv tmp
= tcg_temp_new_i32();
846 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
849 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
851 TCGv tmp
= tcg_temp_new_i32();
852 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
855 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
857 TCGv tmp
= tcg_temp_new_i32();
858 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
861 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
863 TCGv tmp
= tcg_temp_new_i32();
864 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
867 static inline TCGv
gen_ld32(TCGv addr
, int index
)
869 TCGv tmp
= tcg_temp_new_i32();
870 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
873 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
875 TCGv_i64 tmp
= tcg_temp_new_i64();
876 tcg_gen_qemu_ld64(tmp
, addr
, index
);
879 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
881 tcg_gen_qemu_st8(val
, addr
, index
);
882 tcg_temp_free_i32(val
);
884 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
886 tcg_gen_qemu_st16(val
, addr
, index
);
887 tcg_temp_free_i32(val
);
889 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
891 tcg_gen_qemu_st32(val
, addr
, index
);
892 tcg_temp_free_i32(val
);
894 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
896 tcg_gen_qemu_st64(val
, addr
, index
);
897 tcg_temp_free_i64(val
);
900 static inline void gen_set_pc_im(uint32_t val
)
902 tcg_gen_movi_i32(cpu_R
[15], val
);
905 /* Force a TB lookup after an instruction that changes the CPU state. */
906 static inline void gen_lookup_tb(DisasContext
*s
)
908 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
909 s
->is_jmp
= DISAS_UPDATE
;
912 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
915 int val
, rm
, shift
, shiftop
;
918 if (!(insn
& (1 << 25))) {
921 if (!(insn
& (1 << 23)))
924 tcg_gen_addi_i32(var
, var
, val
);
928 shift
= (insn
>> 7) & 0x1f;
929 shiftop
= (insn
>> 5) & 3;
930 offset
= load_reg(s
, rm
);
931 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
932 if (!(insn
& (1 << 23)))
933 tcg_gen_sub_i32(var
, var
, offset
);
935 tcg_gen_add_i32(var
, var
, offset
);
936 tcg_temp_free_i32(offset
);
940 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
946 if (insn
& (1 << 22)) {
948 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
949 if (!(insn
& (1 << 23)))
953 tcg_gen_addi_i32(var
, var
, val
);
957 tcg_gen_addi_i32(var
, var
, extra
);
959 offset
= load_reg(s
, rm
);
960 if (!(insn
& (1 << 23)))
961 tcg_gen_sub_i32(var
, var
, offset
);
963 tcg_gen_add_i32(var
, var
, offset
);
964 tcg_temp_free_i32(offset
);
968 static TCGv_ptr
get_fpstatus_ptr(int neon
)
970 TCGv_ptr statusptr
= tcg_temp_new_ptr();
973 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
975 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
977 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
981 #define VFP_OP2(name) \
982 static inline void gen_vfp_##name(int dp) \
984 TCGv_ptr fpst = get_fpstatus_ptr(0); \
986 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
988 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
990 tcg_temp_free_ptr(fpst); \
1000 static inline void gen_vfp_F1_mul(int dp
)
1002 /* Like gen_vfp_mul() but put result in F1 */
1003 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1005 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1007 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1009 tcg_temp_free_ptr(fpst
);
1012 static inline void gen_vfp_F1_neg(int dp
)
1014 /* Like gen_vfp_neg() but put result in F1 */
1016 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1018 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1022 static inline void gen_vfp_abs(int dp
)
1025 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1027 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1030 static inline void gen_vfp_neg(int dp
)
1033 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1035 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1038 static inline void gen_vfp_sqrt(int dp
)
1041 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1043 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1046 static inline void gen_vfp_cmp(int dp
)
1049 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1051 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1054 static inline void gen_vfp_cmpe(int dp
)
1057 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1059 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1062 static inline void gen_vfp_F1_ld0(int dp
)
1065 tcg_gen_movi_i64(cpu_F1d
, 0);
1067 tcg_gen_movi_i32(cpu_F1s
, 0);
1070 #define VFP_GEN_ITOF(name) \
1071 static inline void gen_vfp_##name(int dp, int neon) \
1073 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1075 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1077 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1079 tcg_temp_free_ptr(statusptr); \
1086 #define VFP_GEN_FTOI(name) \
1087 static inline void gen_vfp_##name(int dp, int neon) \
1089 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1091 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1093 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1095 tcg_temp_free_ptr(statusptr); \
1104 #define VFP_GEN_FIX(name) \
1105 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1107 TCGv tmp_shift = tcg_const_i32(shift); \
1108 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1110 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1112 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1114 tcg_temp_free_i32(tmp_shift); \
1115 tcg_temp_free_ptr(statusptr); \
1127 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1130 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1132 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1135 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1138 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1140 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1144 vfp_reg_offset (int dp
, int reg
)
1147 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1149 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1150 + offsetof(CPU_DoubleU
, l
.upper
);
1152 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1153 + offsetof(CPU_DoubleU
, l
.lower
);
1157 /* Return the offset of a 32-bit piece of a NEON register.
1158 zero is the least significant end of the register. */
1160 neon_reg_offset (int reg
, int n
)
1164 return vfp_reg_offset(0, sreg
);
1167 static TCGv
neon_load_reg(int reg
, int pass
)
1169 TCGv tmp
= tcg_temp_new_i32();
1170 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1174 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1176 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1177 tcg_temp_free_i32(var
);
1180 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1182 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1185 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1187 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1190 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1191 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1192 #define tcg_gen_st_f32 tcg_gen_st_i32
1193 #define tcg_gen_st_f64 tcg_gen_st_i64
1195 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1198 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1200 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1203 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1206 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1208 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1211 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1214 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1216 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1219 #define ARM_CP_RW_BIT (1 << 20)
1221 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1223 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1226 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1228 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1231 static inline TCGv
iwmmxt_load_creg(int reg
)
1233 TCGv var
= tcg_temp_new_i32();
1234 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1238 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1240 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1241 tcg_temp_free_i32(var
);
1244 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1246 iwmmxt_store_reg(cpu_M0
, rn
);
1249 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1251 iwmmxt_load_reg(cpu_M0
, rn
);
1254 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1256 iwmmxt_load_reg(cpu_V1
, rn
);
1257 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1260 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1262 iwmmxt_load_reg(cpu_V1
, rn
);
1263 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1266 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1268 iwmmxt_load_reg(cpu_V1
, rn
);
1269 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1272 #define IWMMXT_OP(name) \
1273 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1275 iwmmxt_load_reg(cpu_V1, rn); \
1276 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1279 #define IWMMXT_OP_ENV(name) \
1280 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1282 iwmmxt_load_reg(cpu_V1, rn); \
1283 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1286 #define IWMMXT_OP_ENV_SIZE(name) \
1287 IWMMXT_OP_ENV(name##b) \
1288 IWMMXT_OP_ENV(name##w) \
1289 IWMMXT_OP_ENV(name##l)
1291 #define IWMMXT_OP_ENV1(name) \
1292 static inline void gen_op_iwmmxt_##name##_M0(void) \
1294 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1308 IWMMXT_OP_ENV_SIZE(unpackl
)
1309 IWMMXT_OP_ENV_SIZE(unpackh
)
1311 IWMMXT_OP_ENV1(unpacklub
)
1312 IWMMXT_OP_ENV1(unpackluw
)
1313 IWMMXT_OP_ENV1(unpacklul
)
1314 IWMMXT_OP_ENV1(unpackhub
)
1315 IWMMXT_OP_ENV1(unpackhuw
)
1316 IWMMXT_OP_ENV1(unpackhul
)
1317 IWMMXT_OP_ENV1(unpacklsb
)
1318 IWMMXT_OP_ENV1(unpacklsw
)
1319 IWMMXT_OP_ENV1(unpacklsl
)
1320 IWMMXT_OP_ENV1(unpackhsb
)
1321 IWMMXT_OP_ENV1(unpackhsw
)
1322 IWMMXT_OP_ENV1(unpackhsl
)
1324 IWMMXT_OP_ENV_SIZE(cmpeq
)
1325 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1326 IWMMXT_OP_ENV_SIZE(cmpgts
)
1328 IWMMXT_OP_ENV_SIZE(mins
)
1329 IWMMXT_OP_ENV_SIZE(minu
)
1330 IWMMXT_OP_ENV_SIZE(maxs
)
1331 IWMMXT_OP_ENV_SIZE(maxu
)
1333 IWMMXT_OP_ENV_SIZE(subn
)
1334 IWMMXT_OP_ENV_SIZE(addn
)
1335 IWMMXT_OP_ENV_SIZE(subu
)
1336 IWMMXT_OP_ENV_SIZE(addu
)
1337 IWMMXT_OP_ENV_SIZE(subs
)
1338 IWMMXT_OP_ENV_SIZE(adds
)
1340 IWMMXT_OP_ENV(avgb0
)
1341 IWMMXT_OP_ENV(avgb1
)
1342 IWMMXT_OP_ENV(avgw0
)
1343 IWMMXT_OP_ENV(avgw1
)
1347 IWMMXT_OP_ENV(packuw
)
1348 IWMMXT_OP_ENV(packul
)
1349 IWMMXT_OP_ENV(packuq
)
1350 IWMMXT_OP_ENV(packsw
)
1351 IWMMXT_OP_ENV(packsl
)
1352 IWMMXT_OP_ENV(packsq
)
1354 static void gen_op_iwmmxt_set_mup(void)
1357 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1358 tcg_gen_ori_i32(tmp
, tmp
, 2);
1359 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1362 static void gen_op_iwmmxt_set_cup(void)
1365 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1366 tcg_gen_ori_i32(tmp
, tmp
, 1);
1367 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1370 static void gen_op_iwmmxt_setpsr_nz(void)
1372 TCGv tmp
= tcg_temp_new_i32();
1373 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1374 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1377 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1379 iwmmxt_load_reg(cpu_V1
, rn
);
1380 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1381 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1384 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1390 rd
= (insn
>> 16) & 0xf;
1391 tmp
= load_reg(s
, rd
);
1393 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1394 if (insn
& (1 << 24)) {
1396 if (insn
& (1 << 23))
1397 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1399 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1400 tcg_gen_mov_i32(dest
, tmp
);
1401 if (insn
& (1 << 21))
1402 store_reg(s
, rd
, tmp
);
1404 tcg_temp_free_i32(tmp
);
1405 } else if (insn
& (1 << 21)) {
1407 tcg_gen_mov_i32(dest
, tmp
);
1408 if (insn
& (1 << 23))
1409 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1411 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1412 store_reg(s
, rd
, tmp
);
1413 } else if (!(insn
& (1 << 23)))
1418 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1420 int rd
= (insn
>> 0) & 0xf;
1423 if (insn
& (1 << 8)) {
1424 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1427 tmp
= iwmmxt_load_creg(rd
);
1430 tmp
= tcg_temp_new_i32();
1431 iwmmxt_load_reg(cpu_V0
, rd
);
1432 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1434 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1435 tcg_gen_mov_i32(dest
, tmp
);
1436 tcg_temp_free_i32(tmp
);
1440 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1441 (ie. an undefined instruction). */
1442 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1445 int rdhi
, rdlo
, rd0
, rd1
, i
;
1447 TCGv tmp
, tmp2
, tmp3
;
1449 if ((insn
& 0x0e000e00) == 0x0c000000) {
1450 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1452 rdlo
= (insn
>> 12) & 0xf;
1453 rdhi
= (insn
>> 16) & 0xf;
1454 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1455 iwmmxt_load_reg(cpu_V0
, wrd
);
1456 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1457 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1458 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1459 } else { /* TMCRR */
1460 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1461 iwmmxt_store_reg(cpu_V0
, wrd
);
1462 gen_op_iwmmxt_set_mup();
1467 wrd
= (insn
>> 12) & 0xf;
1468 addr
= tcg_temp_new_i32();
1469 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1470 tcg_temp_free_i32(addr
);
1473 if (insn
& ARM_CP_RW_BIT
) {
1474 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1475 tmp
= tcg_temp_new_i32();
1476 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1477 iwmmxt_store_creg(wrd
, tmp
);
1480 if (insn
& (1 << 8)) {
1481 if (insn
& (1 << 22)) { /* WLDRD */
1482 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1484 } else { /* WLDRW wRd */
1485 tmp
= gen_ld32(addr
, IS_USER(s
));
1488 if (insn
& (1 << 22)) { /* WLDRH */
1489 tmp
= gen_ld16u(addr
, IS_USER(s
));
1490 } else { /* WLDRB */
1491 tmp
= gen_ld8u(addr
, IS_USER(s
));
1495 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1496 tcg_temp_free_i32(tmp
);
1498 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1501 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1502 tmp
= iwmmxt_load_creg(wrd
);
1503 gen_st32(tmp
, addr
, IS_USER(s
));
1505 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1506 tmp
= tcg_temp_new_i32();
1507 if (insn
& (1 << 8)) {
1508 if (insn
& (1 << 22)) { /* WSTRD */
1509 tcg_temp_free_i32(tmp
);
1510 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1511 } else { /* WSTRW wRd */
1512 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1513 gen_st32(tmp
, addr
, IS_USER(s
));
1516 if (insn
& (1 << 22)) { /* WSTRH */
1517 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1518 gen_st16(tmp
, addr
, IS_USER(s
));
1519 } else { /* WSTRB */
1520 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1521 gen_st8(tmp
, addr
, IS_USER(s
));
1526 tcg_temp_free_i32(addr
);
1530 if ((insn
& 0x0f000000) != 0x0e000000)
1533 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1534 case 0x000: /* WOR */
1535 wrd
= (insn
>> 12) & 0xf;
1536 rd0
= (insn
>> 0) & 0xf;
1537 rd1
= (insn
>> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1539 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1545 case 0x011: /* TMCR */
1548 rd
= (insn
>> 12) & 0xf;
1549 wrd
= (insn
>> 16) & 0xf;
1551 case ARM_IWMMXT_wCID
:
1552 case ARM_IWMMXT_wCASF
:
1554 case ARM_IWMMXT_wCon
:
1555 gen_op_iwmmxt_set_cup();
1557 case ARM_IWMMXT_wCSSF
:
1558 tmp
= iwmmxt_load_creg(wrd
);
1559 tmp2
= load_reg(s
, rd
);
1560 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1561 tcg_temp_free_i32(tmp2
);
1562 iwmmxt_store_creg(wrd
, tmp
);
1564 case ARM_IWMMXT_wCGR0
:
1565 case ARM_IWMMXT_wCGR1
:
1566 case ARM_IWMMXT_wCGR2
:
1567 case ARM_IWMMXT_wCGR3
:
1568 gen_op_iwmmxt_set_cup();
1569 tmp
= load_reg(s
, rd
);
1570 iwmmxt_store_creg(wrd
, tmp
);
1576 case 0x100: /* WXOR */
1577 wrd
= (insn
>> 12) & 0xf;
1578 rd0
= (insn
>> 0) & 0xf;
1579 rd1
= (insn
>> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1581 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1582 gen_op_iwmmxt_setpsr_nz();
1583 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1584 gen_op_iwmmxt_set_mup();
1585 gen_op_iwmmxt_set_cup();
1587 case 0x111: /* TMRC */
1590 rd
= (insn
>> 12) & 0xf;
1591 wrd
= (insn
>> 16) & 0xf;
1592 tmp
= iwmmxt_load_creg(wrd
);
1593 store_reg(s
, rd
, tmp
);
1595 case 0x300: /* WANDN */
1596 wrd
= (insn
>> 12) & 0xf;
1597 rd0
= (insn
>> 0) & 0xf;
1598 rd1
= (insn
>> 16) & 0xf;
1599 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1600 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1601 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1602 gen_op_iwmmxt_setpsr_nz();
1603 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1604 gen_op_iwmmxt_set_mup();
1605 gen_op_iwmmxt_set_cup();
1607 case 0x200: /* WAND */
1608 wrd
= (insn
>> 12) & 0xf;
1609 rd0
= (insn
>> 0) & 0xf;
1610 rd1
= (insn
>> 16) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1612 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1613 gen_op_iwmmxt_setpsr_nz();
1614 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1615 gen_op_iwmmxt_set_mup();
1616 gen_op_iwmmxt_set_cup();
1618 case 0x810: case 0xa10: /* WMADD */
1619 wrd
= (insn
>> 12) & 0xf;
1620 rd0
= (insn
>> 0) & 0xf;
1621 rd1
= (insn
>> 16) & 0xf;
1622 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1623 if (insn
& (1 << 21))
1624 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1626 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1627 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1628 gen_op_iwmmxt_set_mup();
1630 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1631 wrd
= (insn
>> 12) & 0xf;
1632 rd0
= (insn
>> 16) & 0xf;
1633 rd1
= (insn
>> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1635 switch ((insn
>> 22) & 3) {
1637 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1640 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1643 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1649 gen_op_iwmmxt_set_mup();
1650 gen_op_iwmmxt_set_cup();
1652 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1653 wrd
= (insn
>> 12) & 0xf;
1654 rd0
= (insn
>> 16) & 0xf;
1655 rd1
= (insn
>> 0) & 0xf;
1656 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1657 switch ((insn
>> 22) & 3) {
1659 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1662 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1665 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1670 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1671 gen_op_iwmmxt_set_mup();
1672 gen_op_iwmmxt_set_cup();
1674 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1675 wrd
= (insn
>> 12) & 0xf;
1676 rd0
= (insn
>> 16) & 0xf;
1677 rd1
= (insn
>> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1679 if (insn
& (1 << 22))
1680 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1682 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1683 if (!(insn
& (1 << 20)))
1684 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1685 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1686 gen_op_iwmmxt_set_mup();
1688 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1689 wrd
= (insn
>> 12) & 0xf;
1690 rd0
= (insn
>> 16) & 0xf;
1691 rd1
= (insn
>> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1693 if (insn
& (1 << 21)) {
1694 if (insn
& (1 << 20))
1695 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1697 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1699 if (insn
& (1 << 20))
1700 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1702 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1704 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1705 gen_op_iwmmxt_set_mup();
1707 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1708 wrd
= (insn
>> 12) & 0xf;
1709 rd0
= (insn
>> 16) & 0xf;
1710 rd1
= (insn
>> 0) & 0xf;
1711 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1712 if (insn
& (1 << 21))
1713 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1715 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1716 if (!(insn
& (1 << 20))) {
1717 iwmmxt_load_reg(cpu_V1
, wrd
);
1718 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1720 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1721 gen_op_iwmmxt_set_mup();
1723 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1724 wrd
= (insn
>> 12) & 0xf;
1725 rd0
= (insn
>> 16) & 0xf;
1726 rd1
= (insn
>> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1728 switch ((insn
>> 22) & 3) {
1730 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1733 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1736 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1741 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1742 gen_op_iwmmxt_set_mup();
1743 gen_op_iwmmxt_set_cup();
1745 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1746 wrd
= (insn
>> 12) & 0xf;
1747 rd0
= (insn
>> 16) & 0xf;
1748 rd1
= (insn
>> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1750 if (insn
& (1 << 22)) {
1751 if (insn
& (1 << 20))
1752 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1754 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1756 if (insn
& (1 << 20))
1757 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1759 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1761 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1762 gen_op_iwmmxt_set_mup();
1763 gen_op_iwmmxt_set_cup();
1765 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1766 wrd
= (insn
>> 12) & 0xf;
1767 rd0
= (insn
>> 16) & 0xf;
1768 rd1
= (insn
>> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1770 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1771 tcg_gen_andi_i32(tmp
, tmp
, 7);
1772 iwmmxt_load_reg(cpu_V1
, rd1
);
1773 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1774 tcg_temp_free_i32(tmp
);
1775 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1776 gen_op_iwmmxt_set_mup();
1778 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1779 if (((insn
>> 6) & 3) == 3)
1781 rd
= (insn
>> 12) & 0xf;
1782 wrd
= (insn
>> 16) & 0xf;
1783 tmp
= load_reg(s
, rd
);
1784 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1785 switch ((insn
>> 6) & 3) {
1787 tmp2
= tcg_const_i32(0xff);
1788 tmp3
= tcg_const_i32((insn
& 7) << 3);
1791 tmp2
= tcg_const_i32(0xffff);
1792 tmp3
= tcg_const_i32((insn
& 3) << 4);
1795 tmp2
= tcg_const_i32(0xffffffff);
1796 tmp3
= tcg_const_i32((insn
& 1) << 5);
1802 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1803 tcg_temp_free(tmp3
);
1804 tcg_temp_free(tmp2
);
1805 tcg_temp_free_i32(tmp
);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1807 gen_op_iwmmxt_set_mup();
1809 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1810 rd
= (insn
>> 12) & 0xf;
1811 wrd
= (insn
>> 16) & 0xf;
1812 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1814 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1815 tmp
= tcg_temp_new_i32();
1816 switch ((insn
>> 22) & 3) {
1818 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1819 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1821 tcg_gen_ext8s_i32(tmp
, tmp
);
1823 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1827 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1828 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1830 tcg_gen_ext16s_i32(tmp
, tmp
);
1832 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1836 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1837 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1840 store_reg(s
, rd
, tmp
);
1842 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1843 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1845 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1846 switch ((insn
>> 22) & 3) {
1848 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1851 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1854 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1857 tcg_gen_shli_i32(tmp
, tmp
, 28);
1859 tcg_temp_free_i32(tmp
);
1861 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1862 if (((insn
>> 6) & 3) == 3)
1864 rd
= (insn
>> 12) & 0xf;
1865 wrd
= (insn
>> 16) & 0xf;
1866 tmp
= load_reg(s
, rd
);
1867 switch ((insn
>> 6) & 3) {
1869 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1872 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1875 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1878 tcg_temp_free_i32(tmp
);
1879 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1880 gen_op_iwmmxt_set_mup();
1882 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1883 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1885 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1886 tmp2
= tcg_temp_new_i32();
1887 tcg_gen_mov_i32(tmp2
, tmp
);
1888 switch ((insn
>> 22) & 3) {
1890 for (i
= 0; i
< 7; i
++) {
1891 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1892 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1896 for (i
= 0; i
< 3; i
++) {
1897 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1898 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1902 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1903 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1907 tcg_temp_free_i32(tmp2
);
1908 tcg_temp_free_i32(tmp
);
1910 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1911 wrd
= (insn
>> 12) & 0xf;
1912 rd0
= (insn
>> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1914 switch ((insn
>> 22) & 3) {
1916 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1919 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1922 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1927 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1928 gen_op_iwmmxt_set_mup();
1930 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1931 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1933 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1934 tmp2
= tcg_temp_new_i32();
1935 tcg_gen_mov_i32(tmp2
, tmp
);
1936 switch ((insn
>> 22) & 3) {
1938 for (i
= 0; i
< 7; i
++) {
1939 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1940 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1944 for (i
= 0; i
< 3; i
++) {
1945 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1946 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1950 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1951 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1955 tcg_temp_free_i32(tmp2
);
1956 tcg_temp_free_i32(tmp
);
1958 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1959 rd
= (insn
>> 12) & 0xf;
1960 rd0
= (insn
>> 16) & 0xf;
1961 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1963 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1964 tmp
= tcg_temp_new_i32();
1965 switch ((insn
>> 22) & 3) {
1967 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1970 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1973 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1976 store_reg(s
, rd
, tmp
);
1978 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1979 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1980 wrd
= (insn
>> 12) & 0xf;
1981 rd0
= (insn
>> 16) & 0xf;
1982 rd1
= (insn
>> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1984 switch ((insn
>> 22) & 3) {
1986 if (insn
& (1 << 21))
1987 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1989 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1992 if (insn
& (1 << 21))
1993 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1995 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1998 if (insn
& (1 << 21))
1999 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2001 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2006 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2007 gen_op_iwmmxt_set_mup();
2008 gen_op_iwmmxt_set_cup();
2010 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2011 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2012 wrd
= (insn
>> 12) & 0xf;
2013 rd0
= (insn
>> 16) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2015 switch ((insn
>> 22) & 3) {
2017 if (insn
& (1 << 21))
2018 gen_op_iwmmxt_unpacklsb_M0();
2020 gen_op_iwmmxt_unpacklub_M0();
2023 if (insn
& (1 << 21))
2024 gen_op_iwmmxt_unpacklsw_M0();
2026 gen_op_iwmmxt_unpackluw_M0();
2029 if (insn
& (1 << 21))
2030 gen_op_iwmmxt_unpacklsl_M0();
2032 gen_op_iwmmxt_unpacklul_M0();
2037 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2041 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2042 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2043 wrd
= (insn
>> 12) & 0xf;
2044 rd0
= (insn
>> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2046 switch ((insn
>> 22) & 3) {
2048 if (insn
& (1 << 21))
2049 gen_op_iwmmxt_unpackhsb_M0();
2051 gen_op_iwmmxt_unpackhub_M0();
2054 if (insn
& (1 << 21))
2055 gen_op_iwmmxt_unpackhsw_M0();
2057 gen_op_iwmmxt_unpackhuw_M0();
2060 if (insn
& (1 << 21))
2061 gen_op_iwmmxt_unpackhsl_M0();
2063 gen_op_iwmmxt_unpackhul_M0();
2068 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2072 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2073 case 0x214: case 0x614: case 0xa14: case 0xe14:
2074 if (((insn
>> 22) & 3) == 0)
2076 wrd
= (insn
>> 12) & 0xf;
2077 rd0
= (insn
>> 16) & 0xf;
2078 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2079 tmp
= tcg_temp_new_i32();
2080 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2081 tcg_temp_free_i32(tmp
);
2084 switch ((insn
>> 22) & 3) {
2086 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2089 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2092 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2095 tcg_temp_free_i32(tmp
);
2096 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2100 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2101 case 0x014: case 0x414: case 0x814: case 0xc14:
2102 if (((insn
>> 22) & 3) == 0)
2104 wrd
= (insn
>> 12) & 0xf;
2105 rd0
= (insn
>> 16) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2107 tmp
= tcg_temp_new_i32();
2108 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2109 tcg_temp_free_i32(tmp
);
2112 switch ((insn
>> 22) & 3) {
2114 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2117 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2120 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2123 tcg_temp_free_i32(tmp
);
2124 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2125 gen_op_iwmmxt_set_mup();
2126 gen_op_iwmmxt_set_cup();
2128 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2129 case 0x114: case 0x514: case 0x914: case 0xd14:
2130 if (((insn
>> 22) & 3) == 0)
2132 wrd
= (insn
>> 12) & 0xf;
2133 rd0
= (insn
>> 16) & 0xf;
2134 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2135 tmp
= tcg_temp_new_i32();
2136 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2137 tcg_temp_free_i32(tmp
);
2140 switch ((insn
>> 22) & 3) {
2142 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2145 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2148 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2151 tcg_temp_free_i32(tmp
);
2152 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
2158 if (((insn
>> 22) & 3) == 0)
2160 wrd
= (insn
>> 12) & 0xf;
2161 rd0
= (insn
>> 16) & 0xf;
2162 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2163 tmp
= tcg_temp_new_i32();
2164 switch ((insn
>> 22) & 3) {
2166 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2167 tcg_temp_free_i32(tmp
);
2170 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2173 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2174 tcg_temp_free_i32(tmp
);
2177 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2180 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2181 tcg_temp_free_i32(tmp
);
2184 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2187 tcg_temp_free_i32(tmp
);
2188 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2189 gen_op_iwmmxt_set_mup();
2190 gen_op_iwmmxt_set_cup();
2192 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2193 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2194 wrd
= (insn
>> 12) & 0xf;
2195 rd0
= (insn
>> 16) & 0xf;
2196 rd1
= (insn
>> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2198 switch ((insn
>> 22) & 3) {
2200 if (insn
& (1 << 21))
2201 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2203 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2206 if (insn
& (1 << 21))
2207 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2209 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2212 if (insn
& (1 << 21))
2213 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2215 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2220 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2221 gen_op_iwmmxt_set_mup();
2223 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2224 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2225 wrd
= (insn
>> 12) & 0xf;
2226 rd0
= (insn
>> 16) & 0xf;
2227 rd1
= (insn
>> 0) & 0xf;
2228 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2229 switch ((insn
>> 22) & 3) {
2231 if (insn
& (1 << 21))
2232 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2234 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2237 if (insn
& (1 << 21))
2238 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2243 if (insn
& (1 << 21))
2244 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2246 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2251 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2252 gen_op_iwmmxt_set_mup();
2254 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2255 case 0x402: case 0x502: case 0x602: case 0x702:
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 tmp
= tcg_const_i32((insn
>> 20) & 3);
2261 iwmmxt_load_reg(cpu_V1
, rd1
);
2262 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2264 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2265 gen_op_iwmmxt_set_mup();
2267 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2268 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2269 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2270 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2271 wrd
= (insn
>> 12) & 0xf;
2272 rd0
= (insn
>> 16) & 0xf;
2273 rd1
= (insn
>> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2275 switch ((insn
>> 20) & 0xf) {
2277 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2280 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2283 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2286 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2289 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2292 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2295 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2298 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2301 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2306 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2307 gen_op_iwmmxt_set_mup();
2308 gen_op_iwmmxt_set_cup();
2310 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2311 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2312 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2313 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2314 wrd
= (insn
>> 12) & 0xf;
2315 rd0
= (insn
>> 16) & 0xf;
2316 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2317 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2318 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd
= (insn
>> 12) & 0xf;
2329 rd0
= (insn
>> 16) & 0xf;
2330 rd1
= (insn
>> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2332 switch ((insn
>> 20) & 0xf) {
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2337 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2355 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2373 wrd
= (insn
>> 12) & 0xf;
2374 rd0
= (insn
>> 16) & 0xf;
2375 rd1
= (insn
>> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2377 switch ((insn
>> 22) & 3) {
2379 if (insn
& (1 << 21))
2380 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2382 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2385 if (insn
& (1 << 21))
2386 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2388 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2391 if (insn
& (1 << 21))
2392 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2394 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2397 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2398 gen_op_iwmmxt_set_mup();
2399 gen_op_iwmmxt_set_cup();
2401 case 0x201: case 0x203: case 0x205: case 0x207:
2402 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2403 case 0x211: case 0x213: case 0x215: case 0x217:
2404 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2405 wrd
= (insn
>> 5) & 0xf;
2406 rd0
= (insn
>> 12) & 0xf;
2407 rd1
= (insn
>> 0) & 0xf;
2408 if (rd0
== 0xf || rd1
== 0xf)
2410 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2411 tmp
= load_reg(s
, rd0
);
2412 tmp2
= load_reg(s
, rd1
);
2413 switch ((insn
>> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2417 case 0x8: /* TMIAPH */
2418 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2420 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2421 if (insn
& (1 << 16))
2422 tcg_gen_shri_i32(tmp
, tmp
, 16);
2423 if (insn
& (1 << 17))
2424 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2428 tcg_temp_free_i32(tmp2
);
2429 tcg_temp_free_i32(tmp
);
2432 tcg_temp_free_i32(tmp2
);
2433 tcg_temp_free_i32(tmp
);
2434 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2435 gen_op_iwmmxt_set_mup();
2444 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2445 (ie. an undefined instruction). */
2446 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2448 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2451 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2452 /* Multiply with Internal Accumulate Format */
2453 rd0
= (insn
>> 12) & 0xf;
2455 acc
= (insn
>> 5) & 7;
2460 tmp
= load_reg(s
, rd0
);
2461 tmp2
= load_reg(s
, rd1
);
2462 switch ((insn
>> 16) & 0xf) {
2464 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2466 case 0x8: /* MIAPH */
2467 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2469 case 0xc: /* MIABB */
2470 case 0xd: /* MIABT */
2471 case 0xe: /* MIATB */
2472 case 0xf: /* MIATT */
2473 if (insn
& (1 << 16))
2474 tcg_gen_shri_i32(tmp
, tmp
, 16);
2475 if (insn
& (1 << 17))
2476 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2477 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2482 tcg_temp_free_i32(tmp2
);
2483 tcg_temp_free_i32(tmp
);
2485 gen_op_iwmmxt_movq_wRn_M0(acc
);
2489 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2490 /* Internal Accumulator Access Format */
2491 rdhi
= (insn
>> 16) & 0xf;
2492 rdlo
= (insn
>> 12) & 0xf;
2498 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2499 iwmmxt_load_reg(cpu_V0
, acc
);
2500 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2501 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2502 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2503 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2505 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2506 iwmmxt_store_reg(cpu_V0
, acc
);
2514 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2515 #define VFP_SREG(insn, bigbit, smallbit) \
2516 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2517 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2518 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2519 reg = (((insn) >> (bigbit)) & 0x0f) \
2520 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2522 if (insn & (1 << (smallbit))) \
2524 reg = ((insn) >> (bigbit)) & 0x0f; \
2527 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2528 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2529 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2530 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2531 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2532 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2534 /* Move between integer and VFP cores. */
2535 static TCGv
gen_vfp_mrs(void)
2537 TCGv tmp
= tcg_temp_new_i32();
2538 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2542 static void gen_vfp_msr(TCGv tmp
)
2544 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2545 tcg_temp_free_i32(tmp
);
2548 static void gen_neon_dup_u8(TCGv var
, int shift
)
2550 TCGv tmp
= tcg_temp_new_i32();
2552 tcg_gen_shri_i32(var
, var
, shift
);
2553 tcg_gen_ext8u_i32(var
, var
);
2554 tcg_gen_shli_i32(tmp
, var
, 8);
2555 tcg_gen_or_i32(var
, var
, tmp
);
2556 tcg_gen_shli_i32(tmp
, var
, 16);
2557 tcg_gen_or_i32(var
, var
, tmp
);
2558 tcg_temp_free_i32(tmp
);
2561 static void gen_neon_dup_low16(TCGv var
)
2563 TCGv tmp
= tcg_temp_new_i32();
2564 tcg_gen_ext16u_i32(var
, var
);
2565 tcg_gen_shli_i32(tmp
, var
, 16);
2566 tcg_gen_or_i32(var
, var
, tmp
);
2567 tcg_temp_free_i32(tmp
);
2570 static void gen_neon_dup_high16(TCGv var
)
2572 TCGv tmp
= tcg_temp_new_i32();
2573 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2574 tcg_gen_shri_i32(tmp
, var
, 16);
2575 tcg_gen_or_i32(var
, var
, tmp
);
2576 tcg_temp_free_i32(tmp
);
2579 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2581 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2585 tmp
= gen_ld8u(addr
, IS_USER(s
));
2586 gen_neon_dup_u8(tmp
, 0);
2589 tmp
= gen_ld16u(addr
, IS_USER(s
));
2590 gen_neon_dup_low16(tmp
);
2593 tmp
= gen_ld32(addr
, IS_USER(s
));
2595 default: /* Avoid compiler warnings. */
2601 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2602 (ie. an undefined instruction). */
2603 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2605 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2611 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2614 if (!s
->vfp_enabled
) {
2615 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2616 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2618 rn
= (insn
>> 16) & 0xf;
2619 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2620 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2623 dp
= ((insn
& 0xf00) == 0xb00);
2624 switch ((insn
>> 24) & 0xf) {
2626 if (insn
& (1 << 4)) {
2627 /* single register transfer */
2628 rd
= (insn
>> 12) & 0xf;
2633 VFP_DREG_N(rn
, insn
);
2636 if (insn
& 0x00c00060
2637 && !arm_feature(env
, ARM_FEATURE_NEON
))
2640 pass
= (insn
>> 21) & 1;
2641 if (insn
& (1 << 22)) {
2643 offset
= ((insn
>> 5) & 3) * 8;
2644 } else if (insn
& (1 << 5)) {
2646 offset
= (insn
& (1 << 6)) ? 16 : 0;
2651 if (insn
& ARM_CP_RW_BIT
) {
2653 tmp
= neon_load_reg(rn
, pass
);
2657 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2658 if (insn
& (1 << 23))
2664 if (insn
& (1 << 23)) {
2666 tcg_gen_shri_i32(tmp
, tmp
, 16);
2672 tcg_gen_sari_i32(tmp
, tmp
, 16);
2681 store_reg(s
, rd
, tmp
);
2684 tmp
= load_reg(s
, rd
);
2685 if (insn
& (1 << 23)) {
2688 gen_neon_dup_u8(tmp
, 0);
2689 } else if (size
== 1) {
2690 gen_neon_dup_low16(tmp
);
2692 for (n
= 0; n
<= pass
* 2; n
++) {
2693 tmp2
= tcg_temp_new_i32();
2694 tcg_gen_mov_i32(tmp2
, tmp
);
2695 neon_store_reg(rn
, n
, tmp2
);
2697 neon_store_reg(rn
, n
, tmp
);
2702 tmp2
= neon_load_reg(rn
, pass
);
2703 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
2704 tcg_temp_free_i32(tmp2
);
2707 tmp2
= neon_load_reg(rn
, pass
);
2708 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
2709 tcg_temp_free_i32(tmp2
);
2714 neon_store_reg(rn
, pass
, tmp
);
2718 if ((insn
& 0x6f) != 0x00)
2720 rn
= VFP_SREG_N(insn
);
2721 if (insn
& ARM_CP_RW_BIT
) {
2723 if (insn
& (1 << 21)) {
2724 /* system register */
2729 /* VFP2 allows access to FSID from userspace.
2730 VFP3 restricts all id registers to privileged
2733 && arm_feature(env
, ARM_FEATURE_VFP3
))
2735 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2740 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2742 case ARM_VFP_FPINST
:
2743 case ARM_VFP_FPINST2
:
2744 /* Not present in VFP3. */
2746 || arm_feature(env
, ARM_FEATURE_VFP3
))
2748 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2752 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2753 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2755 tmp
= tcg_temp_new_i32();
2756 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2762 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2764 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2770 gen_mov_F0_vreg(0, rn
);
2771 tmp
= gen_vfp_mrs();
2774 /* Set the 4 flag bits in the CPSR. */
2776 tcg_temp_free_i32(tmp
);
2778 store_reg(s
, rd
, tmp
);
2782 if (insn
& (1 << 21)) {
2784 /* system register */
2789 /* Writes are ignored. */
2792 tmp
= load_reg(s
, rd
);
2793 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2794 tcg_temp_free_i32(tmp
);
2800 /* TODO: VFP subarchitecture support.
2801 * For now, keep the EN bit only */
2802 tmp
= load_reg(s
, rd
);
2803 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2804 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2807 case ARM_VFP_FPINST
:
2808 case ARM_VFP_FPINST2
:
2809 tmp
= load_reg(s
, rd
);
2810 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2816 tmp
= load_reg(s
, rd
);
2818 gen_mov_vreg_F0(0, rn
);
2823 /* data processing */
2824 /* The opcode is in bits 23, 21, 20 and 6. */
2825 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2829 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2831 /* rn is register number */
2832 VFP_DREG_N(rn
, insn
);
2835 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2836 /* Integer or single precision destination. */
2837 rd
= VFP_SREG_D(insn
);
2839 VFP_DREG_D(rd
, insn
);
2842 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2843 /* VCVT from int is always from S reg regardless of dp bit.
2844 * VCVT with immediate frac_bits has same format as SREG_M
2846 rm
= VFP_SREG_M(insn
);
2848 VFP_DREG_M(rm
, insn
);
2851 rn
= VFP_SREG_N(insn
);
2852 if (op
== 15 && rn
== 15) {
2853 /* Double precision destination. */
2854 VFP_DREG_D(rd
, insn
);
2856 rd
= VFP_SREG_D(insn
);
2858 /* NB that we implicitly rely on the encoding for the frac_bits
2859 * in VCVT of fixed to float being the same as that of an SREG_M
2861 rm
= VFP_SREG_M(insn
);
2864 veclen
= s
->vec_len
;
2865 if (op
== 15 && rn
> 3)
2868 /* Shut up compiler warnings. */
2879 /* Figure out what type of vector operation this is. */
2880 if ((rd
& bank_mask
) == 0) {
2885 delta_d
= (s
->vec_stride
>> 1) + 1;
2887 delta_d
= s
->vec_stride
+ 1;
2889 if ((rm
& bank_mask
) == 0) {
2890 /* mixed scalar/vector */
2899 /* Load the initial operands. */
2904 /* Integer source */
2905 gen_mov_F0_vreg(0, rm
);
2910 gen_mov_F0_vreg(dp
, rd
);
2911 gen_mov_F1_vreg(dp
, rm
);
2915 /* Compare with zero */
2916 gen_mov_F0_vreg(dp
, rd
);
2927 /* Source and destination the same. */
2928 gen_mov_F0_vreg(dp
, rd
);
2934 /* VCVTB, VCVTT: only present with the halfprec extension,
2935 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2937 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2940 /* Otherwise fall through */
2942 /* One source operand. */
2943 gen_mov_F0_vreg(dp
, rm
);
2947 /* Two source operands. */
2948 gen_mov_F0_vreg(dp
, rn
);
2949 gen_mov_F1_vreg(dp
, rm
);
2953 /* Perform the calculation. */
2955 case 0: /* VMLA: fd + (fn * fm) */
2956 /* Note that order of inputs to the add matters for NaNs */
2958 gen_mov_F0_vreg(dp
, rd
);
2961 case 1: /* VMLS: fd + -(fn * fm) */
2964 gen_mov_F0_vreg(dp
, rd
);
2967 case 2: /* VNMLS: -fd + (fn * fm) */
2968 /* Note that it isn't valid to replace (-A + B) with (B - A)
2969 * or similar plausible looking simplifications
2970 * because this will give wrong results for NaNs.
2973 gen_mov_F0_vreg(dp
, rd
);
2977 case 3: /* VNMLA: -fd + -(fn * fm) */
2980 gen_mov_F0_vreg(dp
, rd
);
2984 case 4: /* mul: fn * fm */
2987 case 5: /* nmul: -(fn * fm) */
2991 case 6: /* add: fn + fm */
2994 case 7: /* sub: fn - fm */
2997 case 8: /* div: fn / fm */
3000 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3001 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3002 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3003 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3004 /* These are fused multiply-add, and must be done as one
3005 * floating point operation with no rounding between the
3006 * multiplication and addition steps.
3007 * NB that doing the negations here as separate steps is
3008 * correct : an input NaN should come out with its sign bit
3009 * flipped if it is a negated-input.
3011 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
3019 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3021 frd
= tcg_temp_new_i64();
3022 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3025 gen_helper_vfp_negd(frd
, frd
);
3027 fpst
= get_fpstatus_ptr(0);
3028 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3029 cpu_F1d
, frd
, fpst
);
3030 tcg_temp_free_ptr(fpst
);
3031 tcg_temp_free_i64(frd
);
3037 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3039 frd
= tcg_temp_new_i32();
3040 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3042 gen_helper_vfp_negs(frd
, frd
);
3044 fpst
= get_fpstatus_ptr(0);
3045 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3046 cpu_F1s
, frd
, fpst
);
3047 tcg_temp_free_ptr(fpst
);
3048 tcg_temp_free_i32(frd
);
3051 case 14: /* fconst */
3052 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3055 n
= (insn
<< 12) & 0x80000000;
3056 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3063 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3070 tcg_gen_movi_i32(cpu_F0s
, n
);
3073 case 15: /* extension space */
3087 case 4: /* vcvtb.f32.f16 */
3088 tmp
= gen_vfp_mrs();
3089 tcg_gen_ext16u_i32(tmp
, tmp
);
3090 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3091 tcg_temp_free_i32(tmp
);
3093 case 5: /* vcvtt.f32.f16 */
3094 tmp
= gen_vfp_mrs();
3095 tcg_gen_shri_i32(tmp
, tmp
, 16);
3096 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3097 tcg_temp_free_i32(tmp
);
3099 case 6: /* vcvtb.f16.f32 */
3100 tmp
= tcg_temp_new_i32();
3101 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3102 gen_mov_F0_vreg(0, rd
);
3103 tmp2
= gen_vfp_mrs();
3104 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3105 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3106 tcg_temp_free_i32(tmp2
);
3109 case 7: /* vcvtt.f16.f32 */
3110 tmp
= tcg_temp_new_i32();
3111 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3112 tcg_gen_shli_i32(tmp
, tmp
, 16);
3113 gen_mov_F0_vreg(0, rd
);
3114 tmp2
= gen_vfp_mrs();
3115 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3116 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3117 tcg_temp_free_i32(tmp2
);
3129 case 11: /* cmpez */
3133 case 15: /* single<->double conversion */
3135 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3137 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3139 case 16: /* fuito */
3140 gen_vfp_uito(dp
, 0);
3142 case 17: /* fsito */
3143 gen_vfp_sito(dp
, 0);
3145 case 20: /* fshto */
3146 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3148 gen_vfp_shto(dp
, 16 - rm
, 0);
3150 case 21: /* fslto */
3151 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3153 gen_vfp_slto(dp
, 32 - rm
, 0);
3155 case 22: /* fuhto */
3156 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3158 gen_vfp_uhto(dp
, 16 - rm
, 0);
3160 case 23: /* fulto */
3161 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3163 gen_vfp_ulto(dp
, 32 - rm
, 0);
3165 case 24: /* ftoui */
3166 gen_vfp_toui(dp
, 0);
3168 case 25: /* ftouiz */
3169 gen_vfp_touiz(dp
, 0);
3171 case 26: /* ftosi */
3172 gen_vfp_tosi(dp
, 0);
3174 case 27: /* ftosiz */
3175 gen_vfp_tosiz(dp
, 0);
3177 case 28: /* ftosh */
3178 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3180 gen_vfp_tosh(dp
, 16 - rm
, 0);
3182 case 29: /* ftosl */
3183 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3185 gen_vfp_tosl(dp
, 32 - rm
, 0);
3187 case 30: /* ftouh */
3188 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3190 gen_vfp_touh(dp
, 16 - rm
, 0);
3192 case 31: /* ftoul */
3193 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3195 gen_vfp_toul(dp
, 32 - rm
, 0);
3197 default: /* undefined */
3201 default: /* undefined */
3205 /* Write back the result. */
3206 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3207 ; /* Comparison, do nothing. */
3208 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3209 /* VCVT double to int: always integer result. */
3210 gen_mov_vreg_F0(0, rd
);
3211 else if (op
== 15 && rn
== 15)
3213 gen_mov_vreg_F0(!dp
, rd
);
3215 gen_mov_vreg_F0(dp
, rd
);
3217 /* break out of the loop if we have finished */
3221 if (op
== 15 && delta_m
== 0) {
3222 /* single source one-many */
3224 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3226 gen_mov_vreg_F0(dp
, rd
);
3230 /* Setup the next operands. */
3232 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3236 /* One source operand. */
3237 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3239 gen_mov_F0_vreg(dp
, rm
);
3241 /* Two source operands. */
3242 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3244 gen_mov_F0_vreg(dp
, rn
);
3246 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3248 gen_mov_F1_vreg(dp
, rm
);
3256 if ((insn
& 0x03e00000) == 0x00400000) {
3257 /* two-register transfer */
3258 rn
= (insn
>> 16) & 0xf;
3259 rd
= (insn
>> 12) & 0xf;
3261 VFP_DREG_M(rm
, insn
);
3263 rm
= VFP_SREG_M(insn
);
3266 if (insn
& ARM_CP_RW_BIT
) {
3269 gen_mov_F0_vreg(0, rm
* 2);
3270 tmp
= gen_vfp_mrs();
3271 store_reg(s
, rd
, tmp
);
3272 gen_mov_F0_vreg(0, rm
* 2 + 1);
3273 tmp
= gen_vfp_mrs();
3274 store_reg(s
, rn
, tmp
);
3276 gen_mov_F0_vreg(0, rm
);
3277 tmp
= gen_vfp_mrs();
3278 store_reg(s
, rd
, tmp
);
3279 gen_mov_F0_vreg(0, rm
+ 1);
3280 tmp
= gen_vfp_mrs();
3281 store_reg(s
, rn
, tmp
);
3286 tmp
= load_reg(s
, rd
);
3288 gen_mov_vreg_F0(0, rm
* 2);
3289 tmp
= load_reg(s
, rn
);
3291 gen_mov_vreg_F0(0, rm
* 2 + 1);
3293 tmp
= load_reg(s
, rd
);
3295 gen_mov_vreg_F0(0, rm
);
3296 tmp
= load_reg(s
, rn
);
3298 gen_mov_vreg_F0(0, rm
+ 1);
3303 rn
= (insn
>> 16) & 0xf;
3305 VFP_DREG_D(rd
, insn
);
3307 rd
= VFP_SREG_D(insn
);
3308 if ((insn
& 0x01200000) == 0x01000000) {
3309 /* Single load/store */
3310 offset
= (insn
& 0xff) << 2;
3311 if ((insn
& (1 << 23)) == 0)
3313 if (s
->thumb
&& rn
== 15) {
3314 /* This is actually UNPREDICTABLE */
3315 addr
= tcg_temp_new_i32();
3316 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3318 addr
= load_reg(s
, rn
);
3320 tcg_gen_addi_i32(addr
, addr
, offset
);
3321 if (insn
& (1 << 20)) {
3322 gen_vfp_ld(s
, dp
, addr
);
3323 gen_mov_vreg_F0(dp
, rd
);
3325 gen_mov_F0_vreg(dp
, rd
);
3326 gen_vfp_st(s
, dp
, addr
);
3328 tcg_temp_free_i32(addr
);
3330 /* load/store multiple */
3331 int w
= insn
& (1 << 21);
3333 n
= (insn
>> 1) & 0x7f;
3337 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3338 /* P == U , W == 1 => UNDEF */
3341 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3342 /* UNPREDICTABLE cases for bad immediates: we choose to
3343 * UNDEF to avoid generating huge numbers of TCG ops
3347 if (rn
== 15 && w
) {
3348 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3352 if (s
->thumb
&& rn
== 15) {
3353 /* This is actually UNPREDICTABLE */
3354 addr
= tcg_temp_new_i32();
3355 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3357 addr
= load_reg(s
, rn
);
3359 if (insn
& (1 << 24)) /* pre-decrement */
3360 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3366 for (i
= 0; i
< n
; i
++) {
3367 if (insn
& ARM_CP_RW_BIT
) {
3369 gen_vfp_ld(s
, dp
, addr
);
3370 gen_mov_vreg_F0(dp
, rd
+ i
);
3373 gen_mov_F0_vreg(dp
, rd
+ i
);
3374 gen_vfp_st(s
, dp
, addr
);
3376 tcg_gen_addi_i32(addr
, addr
, offset
);
3380 if (insn
& (1 << 24))
3381 offset
= -offset
* n
;
3382 else if (dp
&& (insn
& 1))
3388 tcg_gen_addi_i32(addr
, addr
, offset
);
3389 store_reg(s
, rn
, addr
);
3391 tcg_temp_free_i32(addr
);
3397 /* Should never happen. */
3403 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3405 TranslationBlock
*tb
;
3408 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3410 gen_set_pc_im(dest
);
3411 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3413 gen_set_pc_im(dest
);
3418 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3420 if (unlikely(s
->singlestep_enabled
)) {
3421 /* An indirect jump so that we still trigger the debug exception. */
3426 gen_goto_tb(s
, 0, dest
);
3427 s
->is_jmp
= DISAS_TB_JUMP
;
3431 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3434 tcg_gen_sari_i32(t0
, t0
, 16);
3438 tcg_gen_sari_i32(t1
, t1
, 16);
3441 tcg_gen_mul_i32(t0
, t0
, t1
);
3444 /* Return the mask of PSR bits set by a MSR instruction. */
3445 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3449 if (flags
& (1 << 0))
3451 if (flags
& (1 << 1))
3453 if (flags
& (1 << 2))
3455 if (flags
& (1 << 3))
3458 /* Mask out undefined bits. */
3459 mask
&= ~CPSR_RESERVED
;
3460 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3462 if (!arm_feature(env
, ARM_FEATURE_V5
))
3463 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3464 if (!arm_feature(env
, ARM_FEATURE_V6
))
3465 mask
&= ~(CPSR_E
| CPSR_GE
);
3466 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3468 /* Mask out execution state bits. */
3471 /* Mask out privileged bits. */
3477 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3478 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3482 /* ??? This is also undefined in system mode. */
3486 tmp
= load_cpu_field(spsr
);
3487 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3488 tcg_gen_andi_i32(t0
, t0
, mask
);
3489 tcg_gen_or_i32(tmp
, tmp
, t0
);
3490 store_cpu_field(tmp
, spsr
);
3492 gen_set_cpsr(t0
, mask
);
3494 tcg_temp_free_i32(t0
);
3499 /* Returns nonzero if access to the PSR is not permitted. */
3500 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3503 tmp
= tcg_temp_new_i32();
3504 tcg_gen_movi_i32(tmp
, val
);
3505 return gen_set_psr(s
, mask
, spsr
, tmp
);
3508 /* Generate an old-style exception return. Marks pc as dead. */
3509 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3512 store_reg(s
, 15, pc
);
3513 tmp
= load_cpu_field(spsr
);
3514 gen_set_cpsr(tmp
, 0xffffffff);
3515 tcg_temp_free_i32(tmp
);
3516 s
->is_jmp
= DISAS_UPDATE
;
3519 /* Generate a v6 exception return. Marks both values as dead. */
3520 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3522 gen_set_cpsr(cpsr
, 0xffffffff);
3523 tcg_temp_free_i32(cpsr
);
3524 store_reg(s
, 15, pc
);
3525 s
->is_jmp
= DISAS_UPDATE
;
3529 gen_set_condexec (DisasContext
*s
)
3531 if (s
->condexec_mask
) {
3532 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3533 TCGv tmp
= tcg_temp_new_i32();
3534 tcg_gen_movi_i32(tmp
, val
);
3535 store_cpu_field(tmp
, condexec_bits
);
3539 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3541 gen_set_condexec(s
);
3542 gen_set_pc_im(s
->pc
- offset
);
3543 gen_exception(excp
);
3544 s
->is_jmp
= DISAS_JUMP
;
3547 static void gen_nop_hint(DisasContext
*s
, int val
)
3551 gen_set_pc_im(s
->pc
);
3552 s
->is_jmp
= DISAS_WFI
;
3556 /* TODO: Implement SEV and WFE. May help SMP performance. */
3562 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3564 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3567 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3568 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3569 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3574 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3577 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3578 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3579 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3584 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3585 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3586 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3587 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3588 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3590 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3591 switch ((size << 1) | u) { \
3593 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3596 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3599 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3602 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3605 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3608 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3610 default: return 1; \
3613 #define GEN_NEON_INTEGER_OP(name) do { \
3614 switch ((size << 1) | u) { \
3616 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3619 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3622 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3625 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3628 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3631 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3633 default: return 1; \
3636 static TCGv
neon_load_scratch(int scratch
)
3638 TCGv tmp
= tcg_temp_new_i32();
3639 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3643 static void neon_store_scratch(int scratch
, TCGv var
)
3645 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3646 tcg_temp_free_i32(var
);
3649 static inline TCGv
neon_get_scalar(int size
, int reg
)
3653 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3655 gen_neon_dup_high16(tmp
);
3657 gen_neon_dup_low16(tmp
);
3660 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3665 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3668 if (!q
&& size
== 2) {
3671 tmp
= tcg_const_i32(rd
);
3672 tmp2
= tcg_const_i32(rm
);
3676 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3679 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3682 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3690 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3693 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3699 tcg_temp_free_i32(tmp
);
3700 tcg_temp_free_i32(tmp2
);
3704 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3707 if (!q
&& size
== 2) {
3710 tmp
= tcg_const_i32(rd
);
3711 tmp2
= tcg_const_i32(rm
);
3715 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3718 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3721 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3729 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3732 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3738 tcg_temp_free_i32(tmp
);
3739 tcg_temp_free_i32(tmp2
);
3743 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3747 rd
= tcg_temp_new_i32();
3748 tmp
= tcg_temp_new_i32();
3750 tcg_gen_shli_i32(rd
, t0
, 8);
3751 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3752 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3753 tcg_gen_or_i32(rd
, rd
, tmp
);
3755 tcg_gen_shri_i32(t1
, t1
, 8);
3756 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3757 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3758 tcg_gen_or_i32(t1
, t1
, tmp
);
3759 tcg_gen_mov_i32(t0
, rd
);
3761 tcg_temp_free_i32(tmp
);
3762 tcg_temp_free_i32(rd
);
3765 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3769 rd
= tcg_temp_new_i32();
3770 tmp
= tcg_temp_new_i32();
3772 tcg_gen_shli_i32(rd
, t0
, 16);
3773 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3774 tcg_gen_or_i32(rd
, rd
, tmp
);
3775 tcg_gen_shri_i32(t1
, t1
, 16);
3776 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3777 tcg_gen_or_i32(t1
, t1
, tmp
);
3778 tcg_gen_mov_i32(t0
, rd
);
3780 tcg_temp_free_i32(tmp
);
3781 tcg_temp_free_i32(rd
);
3789 } neon_ls_element_type
[11] = {
3803 /* Translate a NEON load/store element instruction. Return nonzero if the
3804 instruction is invalid. */
3805 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3824 if (!s
->vfp_enabled
)
3826 VFP_DREG_D(rd
, insn
);
3827 rn
= (insn
>> 16) & 0xf;
3829 load
= (insn
& (1 << 21)) != 0;
3830 if ((insn
& (1 << 23)) == 0) {
3831 /* Load store all elements. */
3832 op
= (insn
>> 8) & 0xf;
3833 size
= (insn
>> 6) & 3;
3836 /* Catch UNDEF cases for bad values of align field */
3839 if (((insn
>> 5) & 1) == 1) {
3844 if (((insn
>> 4) & 3) == 3) {
3851 nregs
= neon_ls_element_type
[op
].nregs
;
3852 interleave
= neon_ls_element_type
[op
].interleave
;
3853 spacing
= neon_ls_element_type
[op
].spacing
;
3854 if (size
== 3 && (interleave
| spacing
) != 1)
3856 addr
= tcg_temp_new_i32();
3857 load_reg_var(s
, addr
, rn
);
3858 stride
= (1 << size
) * interleave
;
3859 for (reg
= 0; reg
< nregs
; reg
++) {
3860 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3861 load_reg_var(s
, addr
, rn
);
3862 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3863 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3864 load_reg_var(s
, addr
, rn
);
3865 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3869 tmp64
= gen_ld64(addr
, IS_USER(s
));
3870 neon_store_reg64(tmp64
, rd
);
3871 tcg_temp_free_i64(tmp64
);
3873 tmp64
= tcg_temp_new_i64();
3874 neon_load_reg64(tmp64
, rd
);
3875 gen_st64(tmp64
, addr
, IS_USER(s
));
3877 tcg_gen_addi_i32(addr
, addr
, stride
);
3879 for (pass
= 0; pass
< 2; pass
++) {
3882 tmp
= gen_ld32(addr
, IS_USER(s
));
3883 neon_store_reg(rd
, pass
, tmp
);
3885 tmp
= neon_load_reg(rd
, pass
);
3886 gen_st32(tmp
, addr
, IS_USER(s
));
3888 tcg_gen_addi_i32(addr
, addr
, stride
);
3889 } else if (size
== 1) {
3891 tmp
= gen_ld16u(addr
, IS_USER(s
));
3892 tcg_gen_addi_i32(addr
, addr
, stride
);
3893 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3894 tcg_gen_addi_i32(addr
, addr
, stride
);
3895 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3896 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3897 tcg_temp_free_i32(tmp2
);
3898 neon_store_reg(rd
, pass
, tmp
);
3900 tmp
= neon_load_reg(rd
, pass
);
3901 tmp2
= tcg_temp_new_i32();
3902 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3903 gen_st16(tmp
, addr
, IS_USER(s
));
3904 tcg_gen_addi_i32(addr
, addr
, stride
);
3905 gen_st16(tmp2
, addr
, IS_USER(s
));
3906 tcg_gen_addi_i32(addr
, addr
, stride
);
3908 } else /* size == 0 */ {
3911 for (n
= 0; n
< 4; n
++) {
3912 tmp
= gen_ld8u(addr
, IS_USER(s
));
3913 tcg_gen_addi_i32(addr
, addr
, stride
);
3917 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3918 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3919 tcg_temp_free_i32(tmp
);
3922 neon_store_reg(rd
, pass
, tmp2
);
3924 tmp2
= neon_load_reg(rd
, pass
);
3925 for (n
= 0; n
< 4; n
++) {
3926 tmp
= tcg_temp_new_i32();
3928 tcg_gen_mov_i32(tmp
, tmp2
);
3930 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3932 gen_st8(tmp
, addr
, IS_USER(s
));
3933 tcg_gen_addi_i32(addr
, addr
, stride
);
3935 tcg_temp_free_i32(tmp2
);
3942 tcg_temp_free_i32(addr
);
3945 size
= (insn
>> 10) & 3;
3947 /* Load single element to all lanes. */
3948 int a
= (insn
>> 4) & 1;
3952 size
= (insn
>> 6) & 3;
3953 nregs
= ((insn
>> 8) & 3) + 1;
3956 if (nregs
!= 4 || a
== 0) {
3959 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3962 if (nregs
== 1 && a
== 1 && size
== 0) {
3965 if (nregs
== 3 && a
== 1) {
3968 addr
= tcg_temp_new_i32();
3969 load_reg_var(s
, addr
, rn
);
3971 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3972 tmp
= gen_load_and_replicate(s
, addr
, size
);
3973 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3974 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3975 if (insn
& (1 << 5)) {
3976 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3977 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3979 tcg_temp_free_i32(tmp
);
3981 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3982 stride
= (insn
& (1 << 5)) ? 2 : 1;
3983 for (reg
= 0; reg
< nregs
; reg
++) {
3984 tmp
= gen_load_and_replicate(s
, addr
, size
);
3985 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3986 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3987 tcg_temp_free_i32(tmp
);
3988 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3992 tcg_temp_free_i32(addr
);
3993 stride
= (1 << size
) * nregs
;
3995 /* Single element. */
3996 int idx
= (insn
>> 4) & 0xf;
3997 pass
= (insn
>> 7) & 1;
4000 shift
= ((insn
>> 5) & 3) * 8;
4004 shift
= ((insn
>> 6) & 1) * 16;
4005 stride
= (insn
& (1 << 5)) ? 2 : 1;
4009 stride
= (insn
& (1 << 6)) ? 2 : 1;
4014 nregs
= ((insn
>> 8) & 3) + 1;
4015 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4018 if (((idx
& (1 << size
)) != 0) ||
4019 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4024 if ((idx
& 1) != 0) {
4029 if (size
== 2 && (idx
& 2) != 0) {
4034 if ((size
== 2) && ((idx
& 3) == 3)) {
4041 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4042 /* Attempts to write off the end of the register file
4043 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4044 * the neon_load_reg() would write off the end of the array.
4048 addr
= tcg_temp_new_i32();
4049 load_reg_var(s
, addr
, rn
);
4050 for (reg
= 0; reg
< nregs
; reg
++) {
4054 tmp
= gen_ld8u(addr
, IS_USER(s
));
4057 tmp
= gen_ld16u(addr
, IS_USER(s
));
4060 tmp
= gen_ld32(addr
, IS_USER(s
));
4062 default: /* Avoid compiler warnings. */
4066 tmp2
= neon_load_reg(rd
, pass
);
4067 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4068 shift
, size
? 16 : 8);
4069 tcg_temp_free_i32(tmp2
);
4071 neon_store_reg(rd
, pass
, tmp
);
4072 } else { /* Store */
4073 tmp
= neon_load_reg(rd
, pass
);
4075 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4078 gen_st8(tmp
, addr
, IS_USER(s
));
4081 gen_st16(tmp
, addr
, IS_USER(s
));
4084 gen_st32(tmp
, addr
, IS_USER(s
));
4089 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4091 tcg_temp_free_i32(addr
);
4092 stride
= nregs
* (1 << size
);
4098 base
= load_reg(s
, rn
);
4100 tcg_gen_addi_i32(base
, base
, stride
);
4103 index
= load_reg(s
, rm
);
4104 tcg_gen_add_i32(base
, base
, index
);
4105 tcg_temp_free_i32(index
);
4107 store_reg(s
, rn
, base
);
4112 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4113 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4115 tcg_gen_and_i32(t
, t
, c
);
4116 tcg_gen_andc_i32(f
, f
, c
);
4117 tcg_gen_or_i32(dest
, t
, f
);
4120 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4123 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4124 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4125 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4130 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4133 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4134 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4135 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4140 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4143 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4144 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4145 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4150 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4153 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4154 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4155 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4160 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4166 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4167 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4172 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4173 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4180 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4181 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4186 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4187 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4194 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4198 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4199 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4200 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4205 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4206 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4207 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4211 tcg_temp_free_i32(src
);
4214 static inline void gen_neon_addl(int size
)
4217 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4218 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4219 case 2: tcg_gen_add_i64(CPU_V001
); break;
4224 static inline void gen_neon_subl(int size
)
4227 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4228 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4229 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4234 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4237 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4238 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4240 tcg_gen_neg_i64(var
, var
);
4246 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4249 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4250 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4255 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4259 switch ((size
<< 1) | u
) {
4260 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4261 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4262 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4263 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4265 tmp
= gen_muls_i64_i32(a
, b
);
4266 tcg_gen_mov_i64(dest
, tmp
);
4267 tcg_temp_free_i64(tmp
);
4270 tmp
= gen_mulu_i64_i32(a
, b
);
4271 tcg_gen_mov_i64(dest
, tmp
);
4272 tcg_temp_free_i64(tmp
);
4277 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4278 Don't forget to clean them now. */
4280 tcg_temp_free_i32(a
);
4281 tcg_temp_free_i32(b
);
4285 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4289 gen_neon_unarrow_sats(size
, dest
, src
);
4291 gen_neon_narrow(size
, dest
, src
);
4295 gen_neon_narrow_satu(size
, dest
, src
);
4297 gen_neon_narrow_sats(size
, dest
, src
);
4302 /* Symbolic constants for op fields for Neon 3-register same-length.
4303 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4306 #define NEON_3R_VHADD 0
4307 #define NEON_3R_VQADD 1
4308 #define NEON_3R_VRHADD 2
4309 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4310 #define NEON_3R_VHSUB 4
4311 #define NEON_3R_VQSUB 5
4312 #define NEON_3R_VCGT 6
4313 #define NEON_3R_VCGE 7
4314 #define NEON_3R_VSHL 8
4315 #define NEON_3R_VQSHL 9
4316 #define NEON_3R_VRSHL 10
4317 #define NEON_3R_VQRSHL 11
4318 #define NEON_3R_VMAX 12
4319 #define NEON_3R_VMIN 13
4320 #define NEON_3R_VABD 14
4321 #define NEON_3R_VABA 15
4322 #define NEON_3R_VADD_VSUB 16
4323 #define NEON_3R_VTST_VCEQ 17
4324 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4325 #define NEON_3R_VMUL 19
4326 #define NEON_3R_VPMAX 20
4327 #define NEON_3R_VPMIN 21
4328 #define NEON_3R_VQDMULH_VQRDMULH 22
4329 #define NEON_3R_VPADD 23
4330 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4331 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4332 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4333 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4334 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4335 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4336 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4338 static const uint8_t neon_3r_sizes
[] = {
4339 [NEON_3R_VHADD
] = 0x7,
4340 [NEON_3R_VQADD
] = 0xf,
4341 [NEON_3R_VRHADD
] = 0x7,
4342 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4343 [NEON_3R_VHSUB
] = 0x7,
4344 [NEON_3R_VQSUB
] = 0xf,
4345 [NEON_3R_VCGT
] = 0x7,
4346 [NEON_3R_VCGE
] = 0x7,
4347 [NEON_3R_VSHL
] = 0xf,
4348 [NEON_3R_VQSHL
] = 0xf,
4349 [NEON_3R_VRSHL
] = 0xf,
4350 [NEON_3R_VQRSHL
] = 0xf,
4351 [NEON_3R_VMAX
] = 0x7,
4352 [NEON_3R_VMIN
] = 0x7,
4353 [NEON_3R_VABD
] = 0x7,
4354 [NEON_3R_VABA
] = 0x7,
4355 [NEON_3R_VADD_VSUB
] = 0xf,
4356 [NEON_3R_VTST_VCEQ
] = 0x7,
4357 [NEON_3R_VML
] = 0x7,
4358 [NEON_3R_VMUL
] = 0x7,
4359 [NEON_3R_VPMAX
] = 0x7,
4360 [NEON_3R_VPMIN
] = 0x7,
4361 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4362 [NEON_3R_VPADD
] = 0x7,
4363 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4364 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4365 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4366 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4367 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4368 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4369 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4372 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4373 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4376 #define NEON_2RM_VREV64 0
4377 #define NEON_2RM_VREV32 1
4378 #define NEON_2RM_VREV16 2
4379 #define NEON_2RM_VPADDL 4
4380 #define NEON_2RM_VPADDL_U 5
4381 #define NEON_2RM_VCLS 8
4382 #define NEON_2RM_VCLZ 9
4383 #define NEON_2RM_VCNT 10
4384 #define NEON_2RM_VMVN 11
4385 #define NEON_2RM_VPADAL 12
4386 #define NEON_2RM_VPADAL_U 13
4387 #define NEON_2RM_VQABS 14
4388 #define NEON_2RM_VQNEG 15
4389 #define NEON_2RM_VCGT0 16
4390 #define NEON_2RM_VCGE0 17
4391 #define NEON_2RM_VCEQ0 18
4392 #define NEON_2RM_VCLE0 19
4393 #define NEON_2RM_VCLT0 20
4394 #define NEON_2RM_VABS 22
4395 #define NEON_2RM_VNEG 23
4396 #define NEON_2RM_VCGT0_F 24
4397 #define NEON_2RM_VCGE0_F 25
4398 #define NEON_2RM_VCEQ0_F 26
4399 #define NEON_2RM_VCLE0_F 27
4400 #define NEON_2RM_VCLT0_F 28
4401 #define NEON_2RM_VABS_F 30
4402 #define NEON_2RM_VNEG_F 31
4403 #define NEON_2RM_VSWP 32
4404 #define NEON_2RM_VTRN 33
4405 #define NEON_2RM_VUZP 34
4406 #define NEON_2RM_VZIP 35
4407 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4408 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4409 #define NEON_2RM_VSHLL 38
4410 #define NEON_2RM_VCVT_F16_F32 44
4411 #define NEON_2RM_VCVT_F32_F16 46
4412 #define NEON_2RM_VRECPE 56
4413 #define NEON_2RM_VRSQRTE 57
4414 #define NEON_2RM_VRECPE_F 58
4415 #define NEON_2RM_VRSQRTE_F 59
4416 #define NEON_2RM_VCVT_FS 60
4417 #define NEON_2RM_VCVT_FU 61
4418 #define NEON_2RM_VCVT_SF 62
4419 #define NEON_2RM_VCVT_UF 63
4421 static int neon_2rm_is_float_op(int op
)
4423 /* Return true if this neon 2reg-misc op is float-to-float */
4424 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4425 op
>= NEON_2RM_VRECPE_F
);
4428 /* Each entry in this array has bit n set if the insn allows
4429 * size value n (otherwise it will UNDEF). Since unallocated
4430 * op values will have no bits set they always UNDEF.
4432 static const uint8_t neon_2rm_sizes
[] = {
4433 [NEON_2RM_VREV64
] = 0x7,
4434 [NEON_2RM_VREV32
] = 0x3,
4435 [NEON_2RM_VREV16
] = 0x1,
4436 [NEON_2RM_VPADDL
] = 0x7,
4437 [NEON_2RM_VPADDL_U
] = 0x7,
4438 [NEON_2RM_VCLS
] = 0x7,
4439 [NEON_2RM_VCLZ
] = 0x7,
4440 [NEON_2RM_VCNT
] = 0x1,
4441 [NEON_2RM_VMVN
] = 0x1,
4442 [NEON_2RM_VPADAL
] = 0x7,
4443 [NEON_2RM_VPADAL_U
] = 0x7,
4444 [NEON_2RM_VQABS
] = 0x7,
4445 [NEON_2RM_VQNEG
] = 0x7,
4446 [NEON_2RM_VCGT0
] = 0x7,
4447 [NEON_2RM_VCGE0
] = 0x7,
4448 [NEON_2RM_VCEQ0
] = 0x7,
4449 [NEON_2RM_VCLE0
] = 0x7,
4450 [NEON_2RM_VCLT0
] = 0x7,
4451 [NEON_2RM_VABS
] = 0x7,
4452 [NEON_2RM_VNEG
] = 0x7,
4453 [NEON_2RM_VCGT0_F
] = 0x4,
4454 [NEON_2RM_VCGE0_F
] = 0x4,
4455 [NEON_2RM_VCEQ0_F
] = 0x4,
4456 [NEON_2RM_VCLE0_F
] = 0x4,
4457 [NEON_2RM_VCLT0_F
] = 0x4,
4458 [NEON_2RM_VABS_F
] = 0x4,
4459 [NEON_2RM_VNEG_F
] = 0x4,
4460 [NEON_2RM_VSWP
] = 0x1,
4461 [NEON_2RM_VTRN
] = 0x7,
4462 [NEON_2RM_VUZP
] = 0x7,
4463 [NEON_2RM_VZIP
] = 0x7,
4464 [NEON_2RM_VMOVN
] = 0x7,
4465 [NEON_2RM_VQMOVN
] = 0x7,
4466 [NEON_2RM_VSHLL
] = 0x7,
4467 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4468 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4469 [NEON_2RM_VRECPE
] = 0x4,
4470 [NEON_2RM_VRSQRTE
] = 0x4,
4471 [NEON_2RM_VRECPE_F
] = 0x4,
4472 [NEON_2RM_VRSQRTE_F
] = 0x4,
4473 [NEON_2RM_VCVT_FS
] = 0x4,
4474 [NEON_2RM_VCVT_FU
] = 0x4,
4475 [NEON_2RM_VCVT_SF
] = 0x4,
4476 [NEON_2RM_VCVT_UF
] = 0x4,
4479 /* Translate a NEON data processing instruction. Return nonzero if the
4480 instruction is invalid.
4481 We process data in a mixture of 32-bit and 64-bit chunks.
4482 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4484 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4496 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4499 if (!s
->vfp_enabled
)
4501 q
= (insn
& (1 << 6)) != 0;
4502 u
= (insn
>> 24) & 1;
4503 VFP_DREG_D(rd
, insn
);
4504 VFP_DREG_N(rn
, insn
);
4505 VFP_DREG_M(rm
, insn
);
4506 size
= (insn
>> 20) & 3;
4507 if ((insn
& (1 << 23)) == 0) {
4508 /* Three register same length. */
4509 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4510 /* Catch invalid op and bad size combinations: UNDEF */
4511 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4514 /* All insns of this form UNDEF for either this condition or the
4515 * superset of cases "Q==1"; we catch the latter later.
4517 if (q
&& ((rd
| rn
| rm
) & 1)) {
4520 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4521 /* 64-bit element instructions. */
4522 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4523 neon_load_reg64(cpu_V0
, rn
+ pass
);
4524 neon_load_reg64(cpu_V1
, rm
+ pass
);
4528 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4531 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4537 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4540 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4546 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4548 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4553 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4556 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4562 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4564 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4567 case NEON_3R_VQRSHL
:
4569 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4572 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4576 case NEON_3R_VADD_VSUB
:
4578 tcg_gen_sub_i64(CPU_V001
);
4580 tcg_gen_add_i64(CPU_V001
);
4586 neon_store_reg64(cpu_V0
, rd
+ pass
);
4595 case NEON_3R_VQRSHL
:
4598 /* Shift instruction operands are reversed. */
4613 case NEON_3R_FLOAT_ARITH
:
4614 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4616 case NEON_3R_FLOAT_MINMAX
:
4617 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4619 case NEON_3R_FLOAT_CMP
:
4621 /* no encoding for U=0 C=1x */
4625 case NEON_3R_FLOAT_ACMP
:
4630 case NEON_3R_VRECPS_VRSQRTS
:
4636 if (u
&& (size
!= 0)) {
4637 /* UNDEF on invalid size for polynomial subcase */
4642 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4650 if (pairwise
&& q
) {
4651 /* All the pairwise insns UNDEF if Q is set */
4655 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4660 tmp
= neon_load_reg(rn
, 0);
4661 tmp2
= neon_load_reg(rn
, 1);
4663 tmp
= neon_load_reg(rm
, 0);
4664 tmp2
= neon_load_reg(rm
, 1);
4668 tmp
= neon_load_reg(rn
, pass
);
4669 tmp2
= neon_load_reg(rm
, pass
);
4673 GEN_NEON_INTEGER_OP(hadd
);
4676 GEN_NEON_INTEGER_OP_ENV(qadd
);
4678 case NEON_3R_VRHADD
:
4679 GEN_NEON_INTEGER_OP(rhadd
);
4681 case NEON_3R_LOGIC
: /* Logic ops. */
4682 switch ((u
<< 2) | size
) {
4684 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4687 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4690 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4693 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4696 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4699 tmp3
= neon_load_reg(rd
, pass
);
4700 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4701 tcg_temp_free_i32(tmp3
);
4704 tmp3
= neon_load_reg(rd
, pass
);
4705 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4706 tcg_temp_free_i32(tmp3
);
4709 tmp3
= neon_load_reg(rd
, pass
);
4710 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4711 tcg_temp_free_i32(tmp3
);
4716 GEN_NEON_INTEGER_OP(hsub
);
4719 GEN_NEON_INTEGER_OP_ENV(qsub
);
4722 GEN_NEON_INTEGER_OP(cgt
);
4725 GEN_NEON_INTEGER_OP(cge
);
4728 GEN_NEON_INTEGER_OP(shl
);
4731 GEN_NEON_INTEGER_OP_ENV(qshl
);
4734 GEN_NEON_INTEGER_OP(rshl
);
4736 case NEON_3R_VQRSHL
:
4737 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4740 GEN_NEON_INTEGER_OP(max
);
4743 GEN_NEON_INTEGER_OP(min
);
4746 GEN_NEON_INTEGER_OP(abd
);
4749 GEN_NEON_INTEGER_OP(abd
);
4750 tcg_temp_free_i32(tmp2
);
4751 tmp2
= neon_load_reg(rd
, pass
);
4752 gen_neon_add(size
, tmp
, tmp2
);
4754 case NEON_3R_VADD_VSUB
:
4755 if (!u
) { /* VADD */
4756 gen_neon_add(size
, tmp
, tmp2
);
4759 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4760 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4761 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4766 case NEON_3R_VTST_VCEQ
:
4767 if (!u
) { /* VTST */
4769 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4770 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4771 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4776 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4777 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4778 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4783 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4785 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4786 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4787 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4790 tcg_temp_free_i32(tmp2
);
4791 tmp2
= neon_load_reg(rd
, pass
);
4793 gen_neon_rsb(size
, tmp
, tmp2
);
4795 gen_neon_add(size
, tmp
, tmp2
);
4799 if (u
) { /* polynomial */
4800 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4801 } else { /* Integer */
4803 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4804 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4805 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4811 GEN_NEON_INTEGER_OP(pmax
);
4814 GEN_NEON_INTEGER_OP(pmin
);
4816 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4817 if (!u
) { /* VQDMULH */
4820 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4823 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4827 } else { /* VQRDMULH */
4830 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4833 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4841 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4842 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4843 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4847 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4849 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4850 switch ((u
<< 2) | size
) {
4853 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4856 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4859 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4864 tcg_temp_free_ptr(fpstatus
);
4867 case NEON_3R_FLOAT_MULTIPLY
:
4869 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4870 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4872 tcg_temp_free_i32(tmp2
);
4873 tmp2
= neon_load_reg(rd
, pass
);
4875 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4877 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4880 tcg_temp_free_ptr(fpstatus
);
4883 case NEON_3R_FLOAT_CMP
:
4885 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4887 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4890 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4892 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4895 tcg_temp_free_ptr(fpstatus
);
4898 case NEON_3R_FLOAT_ACMP
:
4900 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4902 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4904 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4906 tcg_temp_free_ptr(fpstatus
);
4909 case NEON_3R_FLOAT_MINMAX
:
4911 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4913 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4915 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4917 tcg_temp_free_ptr(fpstatus
);
4920 case NEON_3R_VRECPS_VRSQRTS
:
4922 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4924 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4928 /* VFMA, VFMS: fused multiply-add */
4929 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4930 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4933 gen_helper_vfp_negs(tmp
, tmp
);
4935 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4936 tcg_temp_free_i32(tmp3
);
4937 tcg_temp_free_ptr(fpstatus
);
4943 tcg_temp_free_i32(tmp2
);
4945 /* Save the result. For elementwise operations we can put it
4946 straight into the destination register. For pairwise operations
4947 we have to be careful to avoid clobbering the source operands. */
4948 if (pairwise
&& rd
== rm
) {
4949 neon_store_scratch(pass
, tmp
);
4951 neon_store_reg(rd
, pass
, tmp
);
4955 if (pairwise
&& rd
== rm
) {
4956 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4957 tmp
= neon_load_scratch(pass
);
4958 neon_store_reg(rd
, pass
, tmp
);
4961 /* End of 3 register same size operations. */
4962 } else if (insn
& (1 << 4)) {
4963 if ((insn
& 0x00380080) != 0) {
4964 /* Two registers and shift. */
4965 op
= (insn
>> 8) & 0xf;
4966 if (insn
& (1 << 7)) {
4974 while ((insn
& (1 << (size
+ 19))) == 0)
4977 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4978 /* To avoid excessive duplication of ops we implement shift
4979 by immediate using the variable shift operations. */
4981 /* Shift by immediate:
4982 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4983 if (q
&& ((rd
| rm
) & 1)) {
4986 if (!u
&& (op
== 4 || op
== 6)) {
4989 /* Right shifts are encoded as N - shift, where N is the
4990 element size in bits. */
4992 shift
= shift
- (1 << (size
+ 3));
5000 imm
= (uint8_t) shift
;
5005 imm
= (uint16_t) shift
;
5016 for (pass
= 0; pass
< count
; pass
++) {
5018 neon_load_reg64(cpu_V0
, rm
+ pass
);
5019 tcg_gen_movi_i64(cpu_V1
, imm
);
5024 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5026 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5031 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5033 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5036 case 5: /* VSHL, VSLI */
5037 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5039 case 6: /* VQSHLU */
5040 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5045 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5048 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5053 if (op
== 1 || op
== 3) {
5055 neon_load_reg64(cpu_V1
, rd
+ pass
);
5056 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5057 } else if (op
== 4 || (op
== 5 && u
)) {
5059 neon_load_reg64(cpu_V1
, rd
+ pass
);
5061 if (shift
< -63 || shift
> 63) {
5065 mask
= 0xffffffffffffffffull
>> -shift
;
5067 mask
= 0xffffffffffffffffull
<< shift
;
5070 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5071 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5073 neon_store_reg64(cpu_V0
, rd
+ pass
);
5074 } else { /* size < 3 */
5075 /* Operands in T0 and T1. */
5076 tmp
= neon_load_reg(rm
, pass
);
5077 tmp2
= tcg_temp_new_i32();
5078 tcg_gen_movi_i32(tmp2
, imm
);
5082 GEN_NEON_INTEGER_OP(shl
);
5086 GEN_NEON_INTEGER_OP(rshl
);
5089 case 5: /* VSHL, VSLI */
5091 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5092 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5093 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5097 case 6: /* VQSHLU */
5100 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5104 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5108 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5116 GEN_NEON_INTEGER_OP_ENV(qshl
);
5119 tcg_temp_free_i32(tmp2
);
5121 if (op
== 1 || op
== 3) {
5123 tmp2
= neon_load_reg(rd
, pass
);
5124 gen_neon_add(size
, tmp
, tmp2
);
5125 tcg_temp_free_i32(tmp2
);
5126 } else if (op
== 4 || (op
== 5 && u
)) {
5131 mask
= 0xff >> -shift
;
5133 mask
= (uint8_t)(0xff << shift
);
5139 mask
= 0xffff >> -shift
;
5141 mask
= (uint16_t)(0xffff << shift
);
5145 if (shift
< -31 || shift
> 31) {
5149 mask
= 0xffffffffu
>> -shift
;
5151 mask
= 0xffffffffu
<< shift
;
5157 tmp2
= neon_load_reg(rd
, pass
);
5158 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5159 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5160 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5161 tcg_temp_free_i32(tmp2
);
5163 neon_store_reg(rd
, pass
, tmp
);
5166 } else if (op
< 10) {
5167 /* Shift by immediate and narrow:
5168 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5169 int input_unsigned
= (op
== 8) ? !u
: u
;
5173 shift
= shift
- (1 << (size
+ 3));
5176 tmp64
= tcg_const_i64(shift
);
5177 neon_load_reg64(cpu_V0
, rm
);
5178 neon_load_reg64(cpu_V1
, rm
+ 1);
5179 for (pass
= 0; pass
< 2; pass
++) {
5187 if (input_unsigned
) {
5188 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5190 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5193 if (input_unsigned
) {
5194 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5196 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5199 tmp
= tcg_temp_new_i32();
5200 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5201 neon_store_reg(rd
, pass
, tmp
);
5203 tcg_temp_free_i64(tmp64
);
5206 imm
= (uint16_t)shift
;
5210 imm
= (uint32_t)shift
;
5212 tmp2
= tcg_const_i32(imm
);
5213 tmp4
= neon_load_reg(rm
+ 1, 0);
5214 tmp5
= neon_load_reg(rm
+ 1, 1);
5215 for (pass
= 0; pass
< 2; pass
++) {
5217 tmp
= neon_load_reg(rm
, 0);
5221 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5224 tmp3
= neon_load_reg(rm
, 1);
5228 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5230 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5231 tcg_temp_free_i32(tmp
);
5232 tcg_temp_free_i32(tmp3
);
5233 tmp
= tcg_temp_new_i32();
5234 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5235 neon_store_reg(rd
, pass
, tmp
);
5237 tcg_temp_free_i32(tmp2
);
5239 } else if (op
== 10) {
5241 if (q
|| (rd
& 1)) {
5244 tmp
= neon_load_reg(rm
, 0);
5245 tmp2
= neon_load_reg(rm
, 1);
5246 for (pass
= 0; pass
< 2; pass
++) {
5250 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5253 /* The shift is less than the width of the source
5254 type, so we can just shift the whole register. */
5255 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5256 /* Widen the result of shift: we need to clear
5257 * the potential overflow bits resulting from
5258 * left bits of the narrow input appearing as
5259 * right bits of left the neighbour narrow
5261 if (size
< 2 || !u
) {
5264 imm
= (0xffu
>> (8 - shift
));
5266 } else if (size
== 1) {
5267 imm
= 0xffff >> (16 - shift
);
5270 imm
= 0xffffffff >> (32 - shift
);
5273 imm64
= imm
| (((uint64_t)imm
) << 32);
5277 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5280 neon_store_reg64(cpu_V0
, rd
+ pass
);
5282 } else if (op
>= 14) {
5283 /* VCVT fixed-point. */
5284 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5287 /* We have already masked out the must-be-1 top bit of imm6,
5288 * hence this 32-shift where the ARM ARM has 64-imm6.
5291 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5292 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5295 gen_vfp_ulto(0, shift
, 1);
5297 gen_vfp_slto(0, shift
, 1);
5300 gen_vfp_toul(0, shift
, 1);
5302 gen_vfp_tosl(0, shift
, 1);
5304 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5309 } else { /* (insn & 0x00380080) == 0 */
5311 if (q
&& (rd
& 1)) {
5315 op
= (insn
>> 8) & 0xf;
5316 /* One register and immediate. */
5317 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5318 invert
= (insn
& (1 << 5)) != 0;
5319 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5320 * We choose to not special-case this and will behave as if a
5321 * valid constant encoding of 0 had been given.
5340 imm
= (imm
<< 8) | (imm
<< 24);
5343 imm
= (imm
<< 8) | 0xff;
5346 imm
= (imm
<< 16) | 0xffff;
5349 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5357 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5358 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5364 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5365 if (op
& 1 && op
< 12) {
5366 tmp
= neon_load_reg(rd
, pass
);
5368 /* The immediate value has already been inverted, so
5370 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5372 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5376 tmp
= tcg_temp_new_i32();
5377 if (op
== 14 && invert
) {
5381 for (n
= 0; n
< 4; n
++) {
5382 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5383 val
|= 0xff << (n
* 8);
5385 tcg_gen_movi_i32(tmp
, val
);
5387 tcg_gen_movi_i32(tmp
, imm
);
5390 neon_store_reg(rd
, pass
, tmp
);
5393 } else { /* (insn & 0x00800010 == 0x00800000) */
5395 op
= (insn
>> 8) & 0xf;
5396 if ((insn
& (1 << 6)) == 0) {
5397 /* Three registers of different lengths. */
5401 /* undefreq: bit 0 : UNDEF if size != 0
5402 * bit 1 : UNDEF if size == 0
5403 * bit 2 : UNDEF if U == 1
5404 * Note that [1:0] set implies 'always UNDEF'
5407 /* prewiden, src1_wide, src2_wide, undefreq */
5408 static const int neon_3reg_wide
[16][4] = {
5409 {1, 0, 0, 0}, /* VADDL */
5410 {1, 1, 0, 0}, /* VADDW */
5411 {1, 0, 0, 0}, /* VSUBL */
5412 {1, 1, 0, 0}, /* VSUBW */
5413 {0, 1, 1, 0}, /* VADDHN */
5414 {0, 0, 0, 0}, /* VABAL */
5415 {0, 1, 1, 0}, /* VSUBHN */
5416 {0, 0, 0, 0}, /* VABDL */
5417 {0, 0, 0, 0}, /* VMLAL */
5418 {0, 0, 0, 6}, /* VQDMLAL */
5419 {0, 0, 0, 0}, /* VMLSL */
5420 {0, 0, 0, 6}, /* VQDMLSL */
5421 {0, 0, 0, 0}, /* Integer VMULL */
5422 {0, 0, 0, 2}, /* VQDMULL */
5423 {0, 0, 0, 5}, /* Polynomial VMULL */
5424 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5427 prewiden
= neon_3reg_wide
[op
][0];
5428 src1_wide
= neon_3reg_wide
[op
][1];
5429 src2_wide
= neon_3reg_wide
[op
][2];
5430 undefreq
= neon_3reg_wide
[op
][3];
5432 if (((undefreq
& 1) && (size
!= 0)) ||
5433 ((undefreq
& 2) && (size
== 0)) ||
5434 ((undefreq
& 4) && u
)) {
5437 if ((src1_wide
&& (rn
& 1)) ||
5438 (src2_wide
&& (rm
& 1)) ||
5439 (!src2_wide
&& (rd
& 1))) {
5443 /* Avoid overlapping operands. Wide source operands are
5444 always aligned so will never overlap with wide
5445 destinations in problematic ways. */
5446 if (rd
== rm
&& !src2_wide
) {
5447 tmp
= neon_load_reg(rm
, 1);
5448 neon_store_scratch(2, tmp
);
5449 } else if (rd
== rn
&& !src1_wide
) {
5450 tmp
= neon_load_reg(rn
, 1);
5451 neon_store_scratch(2, tmp
);
5454 for (pass
= 0; pass
< 2; pass
++) {
5456 neon_load_reg64(cpu_V0
, rn
+ pass
);
5459 if (pass
== 1 && rd
== rn
) {
5460 tmp
= neon_load_scratch(2);
5462 tmp
= neon_load_reg(rn
, pass
);
5465 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5469 neon_load_reg64(cpu_V1
, rm
+ pass
);
5472 if (pass
== 1 && rd
== rm
) {
5473 tmp2
= neon_load_scratch(2);
5475 tmp2
= neon_load_reg(rm
, pass
);
5478 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5482 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5483 gen_neon_addl(size
);
5485 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5486 gen_neon_subl(size
);
5488 case 5: case 7: /* VABAL, VABDL */
5489 switch ((size
<< 1) | u
) {
5491 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5494 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5497 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5500 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5503 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5506 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5510 tcg_temp_free_i32(tmp2
);
5511 tcg_temp_free_i32(tmp
);
5513 case 8: case 9: case 10: case 11: case 12: case 13:
5514 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5515 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5517 case 14: /* Polynomial VMULL */
5518 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5519 tcg_temp_free_i32(tmp2
);
5520 tcg_temp_free_i32(tmp
);
5522 default: /* 15 is RESERVED: caught earlier */
5527 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5528 neon_store_reg64(cpu_V0
, rd
+ pass
);
5529 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5531 neon_load_reg64(cpu_V1
, rd
+ pass
);
5533 case 10: /* VMLSL */
5534 gen_neon_negl(cpu_V0
, size
);
5536 case 5: case 8: /* VABAL, VMLAL */
5537 gen_neon_addl(size
);
5539 case 9: case 11: /* VQDMLAL, VQDMLSL */
5540 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5542 gen_neon_negl(cpu_V0
, size
);
5544 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5549 neon_store_reg64(cpu_V0
, rd
+ pass
);
5550 } else if (op
== 4 || op
== 6) {
5551 /* Narrowing operation. */
5552 tmp
= tcg_temp_new_i32();
5556 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5559 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5562 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5563 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5570 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5573 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5576 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5577 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5578 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5586 neon_store_reg(rd
, 0, tmp3
);
5587 neon_store_reg(rd
, 1, tmp
);
5590 /* Write back the result. */
5591 neon_store_reg64(cpu_V0
, rd
+ pass
);
5595 /* Two registers and a scalar. NB that for ops of this form
5596 * the ARM ARM labels bit 24 as Q, but it is in our variable
5603 case 1: /* Float VMLA scalar */
5604 case 5: /* Floating point VMLS scalar */
5605 case 9: /* Floating point VMUL scalar */
5610 case 0: /* Integer VMLA scalar */
5611 case 4: /* Integer VMLS scalar */
5612 case 8: /* Integer VMUL scalar */
5613 case 12: /* VQDMULH scalar */
5614 case 13: /* VQRDMULH scalar */
5615 if (u
&& ((rd
| rn
) & 1)) {
5618 tmp
= neon_get_scalar(size
, rm
);
5619 neon_store_scratch(0, tmp
);
5620 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5621 tmp
= neon_load_scratch(0);
5622 tmp2
= neon_load_reg(rn
, pass
);
5625 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5627 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5629 } else if (op
== 13) {
5631 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5633 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5635 } else if (op
& 1) {
5636 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5637 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5638 tcg_temp_free_ptr(fpstatus
);
5641 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5642 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5643 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5647 tcg_temp_free_i32(tmp2
);
5650 tmp2
= neon_load_reg(rd
, pass
);
5653 gen_neon_add(size
, tmp
, tmp2
);
5657 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5658 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5659 tcg_temp_free_ptr(fpstatus
);
5663 gen_neon_rsb(size
, tmp
, tmp2
);
5667 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5668 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5669 tcg_temp_free_ptr(fpstatus
);
5675 tcg_temp_free_i32(tmp2
);
5677 neon_store_reg(rd
, pass
, tmp
);
5680 case 3: /* VQDMLAL scalar */
5681 case 7: /* VQDMLSL scalar */
5682 case 11: /* VQDMULL scalar */
5687 case 2: /* VMLAL sclar */
5688 case 6: /* VMLSL scalar */
5689 case 10: /* VMULL scalar */
5693 tmp2
= neon_get_scalar(size
, rm
);
5694 /* We need a copy of tmp2 because gen_neon_mull
5695 * deletes it during pass 0. */
5696 tmp4
= tcg_temp_new_i32();
5697 tcg_gen_mov_i32(tmp4
, tmp2
);
5698 tmp3
= neon_load_reg(rn
, 1);
5700 for (pass
= 0; pass
< 2; pass
++) {
5702 tmp
= neon_load_reg(rn
, 0);
5707 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5709 neon_load_reg64(cpu_V1
, rd
+ pass
);
5713 gen_neon_negl(cpu_V0
, size
);
5716 gen_neon_addl(size
);
5719 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5721 gen_neon_negl(cpu_V0
, size
);
5723 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5729 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5734 neon_store_reg64(cpu_V0
, rd
+ pass
);
5739 default: /* 14 and 15 are RESERVED */
5743 } else { /* size == 3 */
5746 imm
= (insn
>> 8) & 0xf;
5751 if (q
&& ((rd
| rn
| rm
) & 1)) {
5756 neon_load_reg64(cpu_V0
, rn
);
5758 neon_load_reg64(cpu_V1
, rn
+ 1);
5760 } else if (imm
== 8) {
5761 neon_load_reg64(cpu_V0
, rn
+ 1);
5763 neon_load_reg64(cpu_V1
, rm
);
5766 tmp64
= tcg_temp_new_i64();
5768 neon_load_reg64(cpu_V0
, rn
);
5769 neon_load_reg64(tmp64
, rn
+ 1);
5771 neon_load_reg64(cpu_V0
, rn
+ 1);
5772 neon_load_reg64(tmp64
, rm
);
5774 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5775 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5776 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5778 neon_load_reg64(cpu_V1
, rm
);
5780 neon_load_reg64(cpu_V1
, rm
+ 1);
5783 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5784 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5785 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5786 tcg_temp_free_i64(tmp64
);
5789 neon_load_reg64(cpu_V0
, rn
);
5790 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5791 neon_load_reg64(cpu_V1
, rm
);
5792 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5793 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5795 neon_store_reg64(cpu_V0
, rd
);
5797 neon_store_reg64(cpu_V1
, rd
+ 1);
5799 } else if ((insn
& (1 << 11)) == 0) {
5800 /* Two register misc. */
5801 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5802 size
= (insn
>> 18) & 3;
5803 /* UNDEF for unknown op values and bad op-size combinations */
5804 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5807 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5808 q
&& ((rm
| rd
) & 1)) {
5812 case NEON_2RM_VREV64
:
5813 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5814 tmp
= neon_load_reg(rm
, pass
* 2);
5815 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5817 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5818 case 1: gen_swap_half(tmp
); break;
5819 case 2: /* no-op */ break;
5822 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5824 neon_store_reg(rd
, pass
* 2, tmp2
);
5827 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5828 case 1: gen_swap_half(tmp2
); break;
5831 neon_store_reg(rd
, pass
* 2, tmp2
);
5835 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5836 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5837 for (pass
= 0; pass
< q
+ 1; pass
++) {
5838 tmp
= neon_load_reg(rm
, pass
* 2);
5839 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5840 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5841 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5843 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5844 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5845 case 2: tcg_gen_add_i64(CPU_V001
); break;
5848 if (op
>= NEON_2RM_VPADAL
) {
5850 neon_load_reg64(cpu_V1
, rd
+ pass
);
5851 gen_neon_addl(size
);
5853 neon_store_reg64(cpu_V0
, rd
+ pass
);
5859 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5860 tmp
= neon_load_reg(rm
, n
);
5861 tmp2
= neon_load_reg(rd
, n
+ 1);
5862 neon_store_reg(rm
, n
, tmp2
);
5863 neon_store_reg(rd
, n
+ 1, tmp
);
5870 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5875 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5879 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5880 /* also VQMOVUN; op field and mnemonics don't line up */
5885 for (pass
= 0; pass
< 2; pass
++) {
5886 neon_load_reg64(cpu_V0
, rm
+ pass
);
5887 tmp
= tcg_temp_new_i32();
5888 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5893 neon_store_reg(rd
, 0, tmp2
);
5894 neon_store_reg(rd
, 1, tmp
);
5898 case NEON_2RM_VSHLL
:
5899 if (q
|| (rd
& 1)) {
5902 tmp
= neon_load_reg(rm
, 0);
5903 tmp2
= neon_load_reg(rm
, 1);
5904 for (pass
= 0; pass
< 2; pass
++) {
5907 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5908 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5909 neon_store_reg64(cpu_V0
, rd
+ pass
);
5912 case NEON_2RM_VCVT_F16_F32
:
5913 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5917 tmp
= tcg_temp_new_i32();
5918 tmp2
= tcg_temp_new_i32();
5919 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5920 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5921 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5922 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5923 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5924 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5925 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5926 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5927 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5928 neon_store_reg(rd
, 0, tmp2
);
5929 tmp2
= tcg_temp_new_i32();
5930 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5931 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5932 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5933 neon_store_reg(rd
, 1, tmp2
);
5934 tcg_temp_free_i32(tmp
);
5936 case NEON_2RM_VCVT_F32_F16
:
5937 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5941 tmp3
= tcg_temp_new_i32();
5942 tmp
= neon_load_reg(rm
, 0);
5943 tmp2
= neon_load_reg(rm
, 1);
5944 tcg_gen_ext16u_i32(tmp3
, tmp
);
5945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5946 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5947 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5949 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5950 tcg_temp_free_i32(tmp
);
5951 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5952 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5953 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5954 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5955 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5956 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5957 tcg_temp_free_i32(tmp2
);
5958 tcg_temp_free_i32(tmp3
);
5962 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5963 if (neon_2rm_is_float_op(op
)) {
5964 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5965 neon_reg_offset(rm
, pass
));
5968 tmp
= neon_load_reg(rm
, pass
);
5971 case NEON_2RM_VREV32
:
5973 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5974 case 1: gen_swap_half(tmp
); break;
5978 case NEON_2RM_VREV16
:
5983 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5984 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5985 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5991 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5992 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5993 case 2: gen_helper_clz(tmp
, tmp
); break;
5998 gen_helper_neon_cnt_u8(tmp
, tmp
);
6001 tcg_gen_not_i32(tmp
, tmp
);
6003 case NEON_2RM_VQABS
:
6006 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6009 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6012 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6017 case NEON_2RM_VQNEG
:
6020 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6023 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6026 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6031 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6032 tmp2
= tcg_const_i32(0);
6034 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6035 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6036 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6039 tcg_temp_free(tmp2
);
6040 if (op
== NEON_2RM_VCLE0
) {
6041 tcg_gen_not_i32(tmp
, tmp
);
6044 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6045 tmp2
= tcg_const_i32(0);
6047 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6048 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6049 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6052 tcg_temp_free(tmp2
);
6053 if (op
== NEON_2RM_VCLT0
) {
6054 tcg_gen_not_i32(tmp
, tmp
);
6057 case NEON_2RM_VCEQ0
:
6058 tmp2
= tcg_const_i32(0);
6060 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6061 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6062 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6065 tcg_temp_free(tmp2
);
6069 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6070 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6071 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6076 tmp2
= tcg_const_i32(0);
6077 gen_neon_rsb(size
, tmp
, tmp2
);
6078 tcg_temp_free(tmp2
);
6080 case NEON_2RM_VCGT0_F
:
6082 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6083 tmp2
= tcg_const_i32(0);
6084 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6085 tcg_temp_free(tmp2
);
6086 tcg_temp_free_ptr(fpstatus
);
6089 case NEON_2RM_VCGE0_F
:
6091 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6092 tmp2
= tcg_const_i32(0);
6093 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6094 tcg_temp_free(tmp2
);
6095 tcg_temp_free_ptr(fpstatus
);
6098 case NEON_2RM_VCEQ0_F
:
6100 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6101 tmp2
= tcg_const_i32(0);
6102 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6103 tcg_temp_free(tmp2
);
6104 tcg_temp_free_ptr(fpstatus
);
6107 case NEON_2RM_VCLE0_F
:
6109 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6110 tmp2
= tcg_const_i32(0);
6111 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6112 tcg_temp_free(tmp2
);
6113 tcg_temp_free_ptr(fpstatus
);
6116 case NEON_2RM_VCLT0_F
:
6118 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6119 tmp2
= tcg_const_i32(0);
6120 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6121 tcg_temp_free(tmp2
);
6122 tcg_temp_free_ptr(fpstatus
);
6125 case NEON_2RM_VABS_F
:
6128 case NEON_2RM_VNEG_F
:
6132 tmp2
= neon_load_reg(rd
, pass
);
6133 neon_store_reg(rm
, pass
, tmp2
);
6136 tmp2
= neon_load_reg(rd
, pass
);
6138 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6139 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6142 neon_store_reg(rm
, pass
, tmp2
);
6144 case NEON_2RM_VRECPE
:
6145 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6147 case NEON_2RM_VRSQRTE
:
6148 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6150 case NEON_2RM_VRECPE_F
:
6151 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6153 case NEON_2RM_VRSQRTE_F
:
6154 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6156 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6159 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6162 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6163 gen_vfp_tosiz(0, 1);
6165 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6166 gen_vfp_touiz(0, 1);
6169 /* Reserved op values were caught by the
6170 * neon_2rm_sizes[] check earlier.
6174 if (neon_2rm_is_float_op(op
)) {
6175 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6176 neon_reg_offset(rd
, pass
));
6178 neon_store_reg(rd
, pass
, tmp
);
6183 } else if ((insn
& (1 << 10)) == 0) {
6185 int n
= ((insn
>> 8) & 3) + 1;
6186 if ((rn
+ n
) > 32) {
6187 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6188 * helper function running off the end of the register file.
6193 if (insn
& (1 << 6)) {
6194 tmp
= neon_load_reg(rd
, 0);
6196 tmp
= tcg_temp_new_i32();
6197 tcg_gen_movi_i32(tmp
, 0);
6199 tmp2
= neon_load_reg(rm
, 0);
6200 tmp4
= tcg_const_i32(rn
);
6201 tmp5
= tcg_const_i32(n
);
6202 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
6203 tcg_temp_free_i32(tmp
);
6204 if (insn
& (1 << 6)) {
6205 tmp
= neon_load_reg(rd
, 1);
6207 tmp
= tcg_temp_new_i32();
6208 tcg_gen_movi_i32(tmp
, 0);
6210 tmp3
= neon_load_reg(rm
, 1);
6211 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
6212 tcg_temp_free_i32(tmp5
);
6213 tcg_temp_free_i32(tmp4
);
6214 neon_store_reg(rd
, 0, tmp2
);
6215 neon_store_reg(rd
, 1, tmp3
);
6216 tcg_temp_free_i32(tmp
);
6217 } else if ((insn
& 0x380) == 0) {
6219 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6222 if (insn
& (1 << 19)) {
6223 tmp
= neon_load_reg(rm
, 1);
6225 tmp
= neon_load_reg(rm
, 0);
6227 if (insn
& (1 << 16)) {
6228 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6229 } else if (insn
& (1 << 17)) {
6230 if ((insn
>> 18) & 1)
6231 gen_neon_dup_high16(tmp
);
6233 gen_neon_dup_low16(tmp
);
6235 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6236 tmp2
= tcg_temp_new_i32();
6237 tcg_gen_mov_i32(tmp2
, tmp
);
6238 neon_store_reg(rd
, pass
, tmp2
);
6240 tcg_temp_free_i32(tmp
);
6249 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6251 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6252 const ARMCPRegInfo
*ri
;
6253 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6255 cpnum
= (insn
>> 8) & 0xf;
6256 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6257 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6260 /* First check for coprocessor space used for actual instructions */
6264 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6265 return disas_iwmmxt_insn(env
, s
, insn
);
6266 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6267 return disas_dsp_insn(env
, s
, insn
);
6272 return disas_vfp_insn (env
, s
, insn
);
6277 /* Otherwise treat as a generic register access */
6278 is64
= (insn
& (1 << 25)) == 0;
6279 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6287 opc1
= (insn
>> 4) & 0xf;
6289 rt2
= (insn
>> 16) & 0xf;
6291 crn
= (insn
>> 16) & 0xf;
6292 opc1
= (insn
>> 21) & 7;
6293 opc2
= (insn
>> 5) & 7;
6296 isread
= (insn
>> 20) & 1;
6297 rt
= (insn
>> 12) & 0xf;
6299 ri
= get_arm_cp_reginfo(cpu
,
6300 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6302 /* Check access permissions */
6303 if (!cp_access_ok(env
, ri
, isread
)) {
6307 /* Handle special cases first */
6308 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6315 gen_set_pc_im(s
->pc
);
6316 s
->is_jmp
= DISAS_WFI
;
6327 if (ri
->type
& ARM_CP_CONST
) {
6328 tmp64
= tcg_const_i64(ri
->resetvalue
);
6329 } else if (ri
->readfn
) {
6331 gen_set_pc_im(s
->pc
);
6332 tmp64
= tcg_temp_new_i64();
6333 tmpptr
= tcg_const_ptr(ri
);
6334 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6335 tcg_temp_free_ptr(tmpptr
);
6337 tmp64
= tcg_temp_new_i64();
6338 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6340 tmp
= tcg_temp_new_i32();
6341 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6342 store_reg(s
, rt
, tmp
);
6343 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6344 tmp
= tcg_temp_new_i32();
6345 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6346 tcg_temp_free_i64(tmp64
);
6347 store_reg(s
, rt2
, tmp
);
6350 if (ri
->type
& ARM_CP_CONST
) {
6351 tmp
= tcg_const_i32(ri
->resetvalue
);
6352 } else if (ri
->readfn
) {
6354 gen_set_pc_im(s
->pc
);
6355 tmp
= tcg_temp_new_i32();
6356 tmpptr
= tcg_const_ptr(ri
);
6357 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6358 tcg_temp_free_ptr(tmpptr
);
6360 tmp
= load_cpu_offset(ri
->fieldoffset
);
6363 /* Destination register of r15 for 32 bit loads sets
6364 * the condition codes from the high 4 bits of the value
6367 tcg_temp_free_i32(tmp
);
6369 store_reg(s
, rt
, tmp
);
6374 if (ri
->type
& ARM_CP_CONST
) {
6375 /* If not forbidden by access permissions, treat as WI */
6381 TCGv_i64 tmp64
= tcg_temp_new_i64();
6382 tmplo
= load_reg(s
, rt
);
6383 tmphi
= load_reg(s
, rt2
);
6384 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6385 tcg_temp_free_i32(tmplo
);
6386 tcg_temp_free_i32(tmphi
);
6388 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6389 gen_set_pc_im(s
->pc
);
6390 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6391 tcg_temp_free_ptr(tmpptr
);
6393 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6395 tcg_temp_free_i64(tmp64
);
6400 gen_set_pc_im(s
->pc
);
6401 tmp
= load_reg(s
, rt
);
6402 tmpptr
= tcg_const_ptr(ri
);
6403 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6404 tcg_temp_free_ptr(tmpptr
);
6405 tcg_temp_free_i32(tmp
);
6407 TCGv tmp
= load_reg(s
, rt
);
6408 store_cpu_offset(tmp
, ri
->fieldoffset
);
6411 /* We default to ending the TB on a coprocessor register write,
6412 * but allow this to be suppressed by the register definition
6413 * (usually only necessary to work around guest bugs).
6415 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6426 /* Store a 64-bit value to a register pair. Clobbers val. */
6427 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6430 tmp
= tcg_temp_new_i32();
6431 tcg_gen_trunc_i64_i32(tmp
, val
);
6432 store_reg(s
, rlow
, tmp
);
6433 tmp
= tcg_temp_new_i32();
6434 tcg_gen_shri_i64(val
, val
, 32);
6435 tcg_gen_trunc_i64_i32(tmp
, val
);
6436 store_reg(s
, rhigh
, tmp
);
6439 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6440 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6445 /* Load value and extend to 64 bits. */
6446 tmp
= tcg_temp_new_i64();
6447 tmp2
= load_reg(s
, rlow
);
6448 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6449 tcg_temp_free_i32(tmp2
);
6450 tcg_gen_add_i64(val
, val
, tmp
);
6451 tcg_temp_free_i64(tmp
);
6454 /* load and add a 64-bit value from a register pair. */
6455 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6461 /* Load 64-bit value rd:rn. */
6462 tmpl
= load_reg(s
, rlow
);
6463 tmph
= load_reg(s
, rhigh
);
6464 tmp
= tcg_temp_new_i64();
6465 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6466 tcg_temp_free_i32(tmpl
);
6467 tcg_temp_free_i32(tmph
);
6468 tcg_gen_add_i64(val
, val
, tmp
);
6469 tcg_temp_free_i64(tmp
);
6472 /* Set N and Z flags from hi|lo. */
6473 static void gen_logicq_cc(TCGv lo
, TCGv hi
)
6475 tcg_gen_mov_i32(cpu_NF
, hi
);
6476 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
6479 /* Load/Store exclusive instructions are implemented by remembering
6480 the value/address loaded, and seeing if these are the same
6481 when the store is performed. This should be sufficient to implement
6482 the architecturally mandated semantics, and avoids having to monitor
6485 In system emulation mode only one CPU will be running at once, so
6486 this sequence is effectively atomic. In user emulation mode we
6487 throw an exception and handle the atomic operation elsewhere. */
6488 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6489 TCGv addr
, int size
)
6495 tmp
= gen_ld8u(addr
, IS_USER(s
));
6498 tmp
= gen_ld16u(addr
, IS_USER(s
));
6502 tmp
= gen_ld32(addr
, IS_USER(s
));
6507 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6508 store_reg(s
, rt
, tmp
);
6510 TCGv tmp2
= tcg_temp_new_i32();
6511 tcg_gen_addi_i32(tmp2
, addr
, 4);
6512 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6513 tcg_temp_free_i32(tmp2
);
6514 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6515 store_reg(s
, rt2
, tmp
);
6517 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6520 static void gen_clrex(DisasContext
*s
)
6522 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6525 #ifdef CONFIG_USER_ONLY
6526 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6527 TCGv addr
, int size
)
6529 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6530 tcg_gen_movi_i32(cpu_exclusive_info
,
6531 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6532 gen_exception_insn(s
, 4, EXCP_STREX
);
6535 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6536 TCGv addr
, int size
)
6542 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6548 fail_label
= gen_new_label();
6549 done_label
= gen_new_label();
6550 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6553 tmp
= gen_ld8u(addr
, IS_USER(s
));
6556 tmp
= gen_ld16u(addr
, IS_USER(s
));
6560 tmp
= gen_ld32(addr
, IS_USER(s
));
6565 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6566 tcg_temp_free_i32(tmp
);
6568 TCGv tmp2
= tcg_temp_new_i32();
6569 tcg_gen_addi_i32(tmp2
, addr
, 4);
6570 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6571 tcg_temp_free_i32(tmp2
);
6572 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6573 tcg_temp_free_i32(tmp
);
6575 tmp
= load_reg(s
, rt
);
6578 gen_st8(tmp
, addr
, IS_USER(s
));
6581 gen_st16(tmp
, addr
, IS_USER(s
));
6585 gen_st32(tmp
, addr
, IS_USER(s
));
6591 tcg_gen_addi_i32(addr
, addr
, 4);
6592 tmp
= load_reg(s
, rt2
);
6593 gen_st32(tmp
, addr
, IS_USER(s
));
6595 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6596 tcg_gen_br(done_label
);
6597 gen_set_label(fail_label
);
6598 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6599 gen_set_label(done_label
);
6600 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6604 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6606 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6613 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
6616 /* M variants do not implement ARM mode. */
6621 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6622 * choose to UNDEF. In ARMv5 and above the space is used
6623 * for miscellaneous unconditional instructions.
6627 /* Unconditional instructions. */
6628 if (((insn
>> 25) & 7) == 1) {
6629 /* NEON Data processing. */
6630 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6633 if (disas_neon_data_insn(env
, s
, insn
))
6637 if ((insn
& 0x0f100000) == 0x04000000) {
6638 /* NEON load/store. */
6639 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6642 if (disas_neon_ls_insn(env
, s
, insn
))
6646 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6647 ((insn
& 0x0f30f010) == 0x0710f000)) {
6648 if ((insn
& (1 << 22)) == 0) {
6650 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6654 /* Otherwise PLD; v5TE+ */
6658 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6659 ((insn
& 0x0f70f010) == 0x0650f000)) {
6661 return; /* PLI; V7 */
6663 if (((insn
& 0x0f700000) == 0x04100000) ||
6664 ((insn
& 0x0f700010) == 0x06100000)) {
6665 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6668 return; /* v7MP: Unallocated memory hint: must NOP */
6671 if ((insn
& 0x0ffffdff) == 0x01010000) {
6674 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6675 /* Dynamic endianness switching not implemented. */
6679 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6680 switch ((insn
>> 4) & 0xf) {
6689 /* We don't emulate caches so these are a no-op. */
6694 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6700 op1
= (insn
& 0x1f);
6701 addr
= tcg_temp_new_i32();
6702 tmp
= tcg_const_i32(op1
);
6703 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6704 tcg_temp_free_i32(tmp
);
6705 i
= (insn
>> 23) & 3;
6707 case 0: offset
= -4; break; /* DA */
6708 case 1: offset
= 0; break; /* IA */
6709 case 2: offset
= -8; break; /* DB */
6710 case 3: offset
= 4; break; /* IB */
6714 tcg_gen_addi_i32(addr
, addr
, offset
);
6715 tmp
= load_reg(s
, 14);
6716 gen_st32(tmp
, addr
, 0);
6717 tmp
= load_cpu_field(spsr
);
6718 tcg_gen_addi_i32(addr
, addr
, 4);
6719 gen_st32(tmp
, addr
, 0);
6720 if (insn
& (1 << 21)) {
6721 /* Base writeback. */
6723 case 0: offset
= -8; break;
6724 case 1: offset
= 4; break;
6725 case 2: offset
= -4; break;
6726 case 3: offset
= 0; break;
6730 tcg_gen_addi_i32(addr
, addr
, offset
);
6731 tmp
= tcg_const_i32(op1
);
6732 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6733 tcg_temp_free_i32(tmp
);
6734 tcg_temp_free_i32(addr
);
6736 tcg_temp_free_i32(addr
);
6739 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6745 rn
= (insn
>> 16) & 0xf;
6746 addr
= load_reg(s
, rn
);
6747 i
= (insn
>> 23) & 3;
6749 case 0: offset
= -4; break; /* DA */
6750 case 1: offset
= 0; break; /* IA */
6751 case 2: offset
= -8; break; /* DB */
6752 case 3: offset
= 4; break; /* IB */
6756 tcg_gen_addi_i32(addr
, addr
, offset
);
6757 /* Load PC into tmp and CPSR into tmp2. */
6758 tmp
= gen_ld32(addr
, 0);
6759 tcg_gen_addi_i32(addr
, addr
, 4);
6760 tmp2
= gen_ld32(addr
, 0);
6761 if (insn
& (1 << 21)) {
6762 /* Base writeback. */
6764 case 0: offset
= -8; break;
6765 case 1: offset
= 4; break;
6766 case 2: offset
= -4; break;
6767 case 3: offset
= 0; break;
6771 tcg_gen_addi_i32(addr
, addr
, offset
);
6772 store_reg(s
, rn
, addr
);
6774 tcg_temp_free_i32(addr
);
6776 gen_rfe(s
, tmp
, tmp2
);
6778 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6779 /* branch link and change to thumb (blx <offset>) */
6782 val
= (uint32_t)s
->pc
;
6783 tmp
= tcg_temp_new_i32();
6784 tcg_gen_movi_i32(tmp
, val
);
6785 store_reg(s
, 14, tmp
);
6786 /* Sign-extend the 24-bit offset */
6787 offset
= (((int32_t)insn
) << 8) >> 8;
6788 /* offset * 4 + bit24 * 2 + (thumb bit) */
6789 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6790 /* pipeline offset */
6792 /* protected by ARCH(5); above, near the start of uncond block */
6795 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6796 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6797 /* iWMMXt register transfer. */
6798 if (env
->cp15
.c15_cpar
& (1 << 1))
6799 if (!disas_iwmmxt_insn(env
, s
, insn
))
6802 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6803 /* Coprocessor double register transfer. */
6805 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6806 /* Additional coprocessor register transfer. */
6807 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6810 /* cps (privileged) */
6814 if (insn
& (1 << 19)) {
6815 if (insn
& (1 << 8))
6817 if (insn
& (1 << 7))
6819 if (insn
& (1 << 6))
6821 if (insn
& (1 << 18))
6824 if (insn
& (1 << 17)) {
6826 val
|= (insn
& 0x1f);
6829 gen_set_psr_im(s
, mask
, 0, val
);
6836 /* if not always execute, we generate a conditional jump to
6838 s
->condlabel
= gen_new_label();
6839 gen_test_cc(cond
^ 1, s
->condlabel
);
6842 if ((insn
& 0x0f900000) == 0x03000000) {
6843 if ((insn
& (1 << 21)) == 0) {
6845 rd
= (insn
>> 12) & 0xf;
6846 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6847 if ((insn
& (1 << 22)) == 0) {
6849 tmp
= tcg_temp_new_i32();
6850 tcg_gen_movi_i32(tmp
, val
);
6853 tmp
= load_reg(s
, rd
);
6854 tcg_gen_ext16u_i32(tmp
, tmp
);
6855 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6857 store_reg(s
, rd
, tmp
);
6859 if (((insn
>> 12) & 0xf) != 0xf)
6861 if (((insn
>> 16) & 0xf) == 0) {
6862 gen_nop_hint(s
, insn
& 0xff);
6864 /* CPSR = immediate */
6866 shift
= ((insn
>> 8) & 0xf) * 2;
6868 val
= (val
>> shift
) | (val
<< (32 - shift
));
6869 i
= ((insn
& (1 << 22)) != 0);
6870 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6874 } else if ((insn
& 0x0f900000) == 0x01000000
6875 && (insn
& 0x00000090) != 0x00000090) {
6876 /* miscellaneous instructions */
6877 op1
= (insn
>> 21) & 3;
6878 sh
= (insn
>> 4) & 0xf;
6881 case 0x0: /* move program status register */
6884 tmp
= load_reg(s
, rm
);
6885 i
= ((op1
& 2) != 0);
6886 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6890 rd
= (insn
>> 12) & 0xf;
6894 tmp
= load_cpu_field(spsr
);
6896 tmp
= tcg_temp_new_i32();
6897 gen_helper_cpsr_read(tmp
, cpu_env
);
6899 store_reg(s
, rd
, tmp
);
6904 /* branch/exchange thumb (bx). */
6906 tmp
= load_reg(s
, rm
);
6908 } else if (op1
== 3) {
6911 rd
= (insn
>> 12) & 0xf;
6912 tmp
= load_reg(s
, rm
);
6913 gen_helper_clz(tmp
, tmp
);
6914 store_reg(s
, rd
, tmp
);
6922 /* Trivial implementation equivalent to bx. */
6923 tmp
= load_reg(s
, rm
);
6934 /* branch link/exchange thumb (blx) */
6935 tmp
= load_reg(s
, rm
);
6936 tmp2
= tcg_temp_new_i32();
6937 tcg_gen_movi_i32(tmp2
, s
->pc
);
6938 store_reg(s
, 14, tmp2
);
6941 case 0x5: /* saturating add/subtract */
6943 rd
= (insn
>> 12) & 0xf;
6944 rn
= (insn
>> 16) & 0xf;
6945 tmp
= load_reg(s
, rm
);
6946 tmp2
= load_reg(s
, rn
);
6948 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
6950 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6952 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6953 tcg_temp_free_i32(tmp2
);
6954 store_reg(s
, rd
, tmp
);
6957 /* SMC instruction (op1 == 3)
6958 and undefined instructions (op1 == 0 || op1 == 2)
6965 gen_exception_insn(s
, 4, EXCP_BKPT
);
6967 case 0x8: /* signed multiply */
6972 rs
= (insn
>> 8) & 0xf;
6973 rn
= (insn
>> 12) & 0xf;
6974 rd
= (insn
>> 16) & 0xf;
6976 /* (32 * 16) >> 16 */
6977 tmp
= load_reg(s
, rm
);
6978 tmp2
= load_reg(s
, rs
);
6980 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6983 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6984 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6985 tmp
= tcg_temp_new_i32();
6986 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6987 tcg_temp_free_i64(tmp64
);
6988 if ((sh
& 2) == 0) {
6989 tmp2
= load_reg(s
, rn
);
6990 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6991 tcg_temp_free_i32(tmp2
);
6993 store_reg(s
, rd
, tmp
);
6996 tmp
= load_reg(s
, rm
);
6997 tmp2
= load_reg(s
, rs
);
6998 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6999 tcg_temp_free_i32(tmp2
);
7001 tmp64
= tcg_temp_new_i64();
7002 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7003 tcg_temp_free_i32(tmp
);
7004 gen_addq(s
, tmp64
, rn
, rd
);
7005 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7006 tcg_temp_free_i64(tmp64
);
7009 tmp2
= load_reg(s
, rn
);
7010 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7011 tcg_temp_free_i32(tmp2
);
7013 store_reg(s
, rd
, tmp
);
7020 } else if (((insn
& 0x0e000000) == 0 &&
7021 (insn
& 0x00000090) != 0x90) ||
7022 ((insn
& 0x0e000000) == (1 << 25))) {
7023 int set_cc
, logic_cc
, shiftop
;
7025 op1
= (insn
>> 21) & 0xf;
7026 set_cc
= (insn
>> 20) & 1;
7027 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7029 /* data processing instruction */
7030 if (insn
& (1 << 25)) {
7031 /* immediate operand */
7033 shift
= ((insn
>> 8) & 0xf) * 2;
7035 val
= (val
>> shift
) | (val
<< (32 - shift
));
7037 tmp2
= tcg_temp_new_i32();
7038 tcg_gen_movi_i32(tmp2
, val
);
7039 if (logic_cc
&& shift
) {
7040 gen_set_CF_bit31(tmp2
);
7045 tmp2
= load_reg(s
, rm
);
7046 shiftop
= (insn
>> 5) & 3;
7047 if (!(insn
& (1 << 4))) {
7048 shift
= (insn
>> 7) & 0x1f;
7049 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7051 rs
= (insn
>> 8) & 0xf;
7052 tmp
= load_reg(s
, rs
);
7053 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7056 if (op1
!= 0x0f && op1
!= 0x0d) {
7057 rn
= (insn
>> 16) & 0xf;
7058 tmp
= load_reg(s
, rn
);
7062 rd
= (insn
>> 12) & 0xf;
7065 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7069 store_reg_bx(env
, s
, rd
, tmp
);
7072 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7076 store_reg_bx(env
, s
, rd
, tmp
);
7079 if (set_cc
&& rd
== 15) {
7080 /* SUBS r15, ... is used for exception return. */
7084 gen_sub_CC(tmp
, tmp
, tmp2
);
7085 gen_exception_return(s
, tmp
);
7088 gen_sub_CC(tmp
, tmp
, tmp2
);
7090 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7092 store_reg_bx(env
, s
, rd
, tmp
);
7097 gen_sub_CC(tmp
, tmp2
, tmp
);
7099 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7101 store_reg_bx(env
, s
, rd
, tmp
);
7105 gen_add_CC(tmp
, tmp
, tmp2
);
7107 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7109 store_reg_bx(env
, s
, rd
, tmp
);
7113 gen_adc_CC(tmp
, tmp
, tmp2
);
7115 gen_add_carry(tmp
, tmp
, tmp2
);
7117 store_reg_bx(env
, s
, rd
, tmp
);
7121 gen_sbc_CC(tmp
, tmp
, tmp2
);
7123 gen_sub_carry(tmp
, tmp
, tmp2
);
7125 store_reg_bx(env
, s
, rd
, tmp
);
7129 gen_sbc_CC(tmp
, tmp2
, tmp
);
7131 gen_sub_carry(tmp
, tmp2
, tmp
);
7133 store_reg_bx(env
, s
, rd
, tmp
);
7137 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7140 tcg_temp_free_i32(tmp
);
7144 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7147 tcg_temp_free_i32(tmp
);
7151 gen_sub_CC(tmp
, tmp
, tmp2
);
7153 tcg_temp_free_i32(tmp
);
7157 gen_add_CC(tmp
, tmp
, tmp2
);
7159 tcg_temp_free_i32(tmp
);
7162 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7166 store_reg_bx(env
, s
, rd
, tmp
);
7169 if (logic_cc
&& rd
== 15) {
7170 /* MOVS r15, ... is used for exception return. */
7174 gen_exception_return(s
, tmp2
);
7179 store_reg_bx(env
, s
, rd
, tmp2
);
7183 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7187 store_reg_bx(env
, s
, rd
, tmp
);
7191 tcg_gen_not_i32(tmp2
, tmp2
);
7195 store_reg_bx(env
, s
, rd
, tmp2
);
7198 if (op1
!= 0x0f && op1
!= 0x0d) {
7199 tcg_temp_free_i32(tmp2
);
7202 /* other instructions */
7203 op1
= (insn
>> 24) & 0xf;
7207 /* multiplies, extra load/stores */
7208 sh
= (insn
>> 5) & 3;
7211 rd
= (insn
>> 16) & 0xf;
7212 rn
= (insn
>> 12) & 0xf;
7213 rs
= (insn
>> 8) & 0xf;
7215 op1
= (insn
>> 20) & 0xf;
7217 case 0: case 1: case 2: case 3: case 6:
7219 tmp
= load_reg(s
, rs
);
7220 tmp2
= load_reg(s
, rm
);
7221 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7222 tcg_temp_free_i32(tmp2
);
7223 if (insn
& (1 << 22)) {
7224 /* Subtract (mls) */
7226 tmp2
= load_reg(s
, rn
);
7227 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7228 tcg_temp_free_i32(tmp2
);
7229 } else if (insn
& (1 << 21)) {
7231 tmp2
= load_reg(s
, rn
);
7232 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7233 tcg_temp_free_i32(tmp2
);
7235 if (insn
& (1 << 20))
7237 store_reg(s
, rd
, tmp
);
7240 /* 64 bit mul double accumulate (UMAAL) */
7242 tmp
= load_reg(s
, rs
);
7243 tmp2
= load_reg(s
, rm
);
7244 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7245 gen_addq_lo(s
, tmp64
, rn
);
7246 gen_addq_lo(s
, tmp64
, rd
);
7247 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7248 tcg_temp_free_i64(tmp64
);
7250 case 8: case 9: case 10: case 11:
7251 case 12: case 13: case 14: case 15:
7252 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7253 tmp
= load_reg(s
, rs
);
7254 tmp2
= load_reg(s
, rm
);
7255 if (insn
& (1 << 22)) {
7256 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
7258 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
7260 if (insn
& (1 << 21)) { /* mult accumulate */
7261 TCGv al
= load_reg(s
, rn
);
7262 TCGv ah
= load_reg(s
, rd
);
7263 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
7267 if (insn
& (1 << 20)) {
7268 gen_logicq_cc(tmp
, tmp2
);
7270 store_reg(s
, rn
, tmp
);
7271 store_reg(s
, rd
, tmp2
);
7277 rn
= (insn
>> 16) & 0xf;
7278 rd
= (insn
>> 12) & 0xf;
7279 if (insn
& (1 << 23)) {
7280 /* load/store exclusive */
7281 op1
= (insn
>> 21) & 0x3;
7286 addr
= tcg_temp_local_new_i32();
7287 load_reg_var(s
, addr
, rn
);
7288 if (insn
& (1 << 20)) {
7291 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7293 case 1: /* ldrexd */
7294 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7296 case 2: /* ldrexb */
7297 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7299 case 3: /* ldrexh */
7300 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7309 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7311 case 1: /* strexd */
7312 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7314 case 2: /* strexb */
7315 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7317 case 3: /* strexh */
7318 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7324 tcg_temp_free(addr
);
7326 /* SWP instruction */
7329 /* ??? This is not really atomic. However we know
7330 we never have multiple CPUs running in parallel,
7331 so it is good enough. */
7332 addr
= load_reg(s
, rn
);
7333 tmp
= load_reg(s
, rm
);
7334 if (insn
& (1 << 22)) {
7335 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7336 gen_st8(tmp
, addr
, IS_USER(s
));
7338 tmp2
= gen_ld32(addr
, IS_USER(s
));
7339 gen_st32(tmp
, addr
, IS_USER(s
));
7341 tcg_temp_free_i32(addr
);
7342 store_reg(s
, rd
, tmp2
);
7348 /* Misc load/store */
7349 rn
= (insn
>> 16) & 0xf;
7350 rd
= (insn
>> 12) & 0xf;
7351 addr
= load_reg(s
, rn
);
7352 if (insn
& (1 << 24))
7353 gen_add_datah_offset(s
, insn
, 0, addr
);
7355 if (insn
& (1 << 20)) {
7359 tmp
= gen_ld16u(addr
, IS_USER(s
));
7362 tmp
= gen_ld8s(addr
, IS_USER(s
));
7366 tmp
= gen_ld16s(addr
, IS_USER(s
));
7370 } else if (sh
& 2) {
7375 tmp
= load_reg(s
, rd
);
7376 gen_st32(tmp
, addr
, IS_USER(s
));
7377 tcg_gen_addi_i32(addr
, addr
, 4);
7378 tmp
= load_reg(s
, rd
+ 1);
7379 gen_st32(tmp
, addr
, IS_USER(s
));
7383 tmp
= gen_ld32(addr
, IS_USER(s
));
7384 store_reg(s
, rd
, tmp
);
7385 tcg_gen_addi_i32(addr
, addr
, 4);
7386 tmp
= gen_ld32(addr
, IS_USER(s
));
7390 address_offset
= -4;
7393 tmp
= load_reg(s
, rd
);
7394 gen_st16(tmp
, addr
, IS_USER(s
));
7397 /* Perform base writeback before the loaded value to
7398 ensure correct behavior with overlapping index registers.
7399 ldrd with base writeback is is undefined if the
7400 destination and index registers overlap. */
7401 if (!(insn
& (1 << 24))) {
7402 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7403 store_reg(s
, rn
, addr
);
7404 } else if (insn
& (1 << 21)) {
7406 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7407 store_reg(s
, rn
, addr
);
7409 tcg_temp_free_i32(addr
);
7412 /* Complete the load. */
7413 store_reg(s
, rd
, tmp
);
7422 if (insn
& (1 << 4)) {
7424 /* Armv6 Media instructions. */
7426 rn
= (insn
>> 16) & 0xf;
7427 rd
= (insn
>> 12) & 0xf;
7428 rs
= (insn
>> 8) & 0xf;
7429 switch ((insn
>> 23) & 3) {
7430 case 0: /* Parallel add/subtract. */
7431 op1
= (insn
>> 20) & 7;
7432 tmp
= load_reg(s
, rn
);
7433 tmp2
= load_reg(s
, rm
);
7434 sh
= (insn
>> 5) & 7;
7435 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7437 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7438 tcg_temp_free_i32(tmp2
);
7439 store_reg(s
, rd
, tmp
);
7442 if ((insn
& 0x00700020) == 0) {
7443 /* Halfword pack. */
7444 tmp
= load_reg(s
, rn
);
7445 tmp2
= load_reg(s
, rm
);
7446 shift
= (insn
>> 7) & 0x1f;
7447 if (insn
& (1 << 6)) {
7451 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7452 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7453 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7457 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7458 tcg_gen_ext16u_i32(tmp
, tmp
);
7459 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7461 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7462 tcg_temp_free_i32(tmp2
);
7463 store_reg(s
, rd
, tmp
);
7464 } else if ((insn
& 0x00200020) == 0x00200000) {
7466 tmp
= load_reg(s
, rm
);
7467 shift
= (insn
>> 7) & 0x1f;
7468 if (insn
& (1 << 6)) {
7471 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7473 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7475 sh
= (insn
>> 16) & 0x1f;
7476 tmp2
= tcg_const_i32(sh
);
7477 if (insn
& (1 << 22))
7478 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
7480 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
7481 tcg_temp_free_i32(tmp2
);
7482 store_reg(s
, rd
, tmp
);
7483 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7485 tmp
= load_reg(s
, rm
);
7486 sh
= (insn
>> 16) & 0x1f;
7487 tmp2
= tcg_const_i32(sh
);
7488 if (insn
& (1 << 22))
7489 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
7491 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
7492 tcg_temp_free_i32(tmp2
);
7493 store_reg(s
, rd
, tmp
);
7494 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7496 tmp
= load_reg(s
, rn
);
7497 tmp2
= load_reg(s
, rm
);
7498 tmp3
= tcg_temp_new_i32();
7499 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7500 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7501 tcg_temp_free_i32(tmp3
);
7502 tcg_temp_free_i32(tmp2
);
7503 store_reg(s
, rd
, tmp
);
7504 } else if ((insn
& 0x000003e0) == 0x00000060) {
7505 tmp
= load_reg(s
, rm
);
7506 shift
= (insn
>> 10) & 3;
7507 /* ??? In many cases it's not necessary to do a
7508 rotate, a shift is sufficient. */
7510 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7511 op1
= (insn
>> 20) & 7;
7513 case 0: gen_sxtb16(tmp
); break;
7514 case 2: gen_sxtb(tmp
); break;
7515 case 3: gen_sxth(tmp
); break;
7516 case 4: gen_uxtb16(tmp
); break;
7517 case 6: gen_uxtb(tmp
); break;
7518 case 7: gen_uxth(tmp
); break;
7519 default: goto illegal_op
;
7522 tmp2
= load_reg(s
, rn
);
7523 if ((op1
& 3) == 0) {
7524 gen_add16(tmp
, tmp2
);
7526 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7527 tcg_temp_free_i32(tmp2
);
7530 store_reg(s
, rd
, tmp
);
7531 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7533 tmp
= load_reg(s
, rm
);
7534 if (insn
& (1 << 22)) {
7535 if (insn
& (1 << 7)) {
7539 gen_helper_rbit(tmp
, tmp
);
7542 if (insn
& (1 << 7))
7545 tcg_gen_bswap32_i32(tmp
, tmp
);
7547 store_reg(s
, rd
, tmp
);
7552 case 2: /* Multiplies (Type 3). */
7553 switch ((insn
>> 20) & 0x7) {
7555 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7556 /* op2 not 00x or 11x : UNDEF */
7559 /* Signed multiply most significant [accumulate].
7560 (SMMUL, SMMLA, SMMLS) */
7561 tmp
= load_reg(s
, rm
);
7562 tmp2
= load_reg(s
, rs
);
7563 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7566 tmp
= load_reg(s
, rd
);
7567 if (insn
& (1 << 6)) {
7568 tmp64
= gen_subq_msw(tmp64
, tmp
);
7570 tmp64
= gen_addq_msw(tmp64
, tmp
);
7573 if (insn
& (1 << 5)) {
7574 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7576 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7577 tmp
= tcg_temp_new_i32();
7578 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7579 tcg_temp_free_i64(tmp64
);
7580 store_reg(s
, rn
, tmp
);
7584 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7585 if (insn
& (1 << 7)) {
7588 tmp
= load_reg(s
, rm
);
7589 tmp2
= load_reg(s
, rs
);
7590 if (insn
& (1 << 5))
7591 gen_swap_half(tmp2
);
7592 gen_smul_dual(tmp
, tmp2
);
7593 if (insn
& (1 << 6)) {
7594 /* This subtraction cannot overflow. */
7595 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7597 /* This addition cannot overflow 32 bits;
7598 * however it may overflow considered as a signed
7599 * operation, in which case we must set the Q flag.
7601 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7603 tcg_temp_free_i32(tmp2
);
7604 if (insn
& (1 << 22)) {
7605 /* smlald, smlsld */
7606 tmp64
= tcg_temp_new_i64();
7607 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7608 tcg_temp_free_i32(tmp
);
7609 gen_addq(s
, tmp64
, rd
, rn
);
7610 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7611 tcg_temp_free_i64(tmp64
);
7613 /* smuad, smusd, smlad, smlsd */
7616 tmp2
= load_reg(s
, rd
);
7617 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7618 tcg_temp_free_i32(tmp2
);
7620 store_reg(s
, rn
, tmp
);
7626 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7629 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7632 tmp
= load_reg(s
, rm
);
7633 tmp2
= load_reg(s
, rs
);
7634 if (insn
& (1 << 21)) {
7635 gen_helper_udiv(tmp
, tmp
, tmp2
);
7637 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7639 tcg_temp_free_i32(tmp2
);
7640 store_reg(s
, rn
, tmp
);
7647 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7649 case 0: /* Unsigned sum of absolute differences. */
7651 tmp
= load_reg(s
, rm
);
7652 tmp2
= load_reg(s
, rs
);
7653 gen_helper_usad8(tmp
, tmp
, tmp2
);
7654 tcg_temp_free_i32(tmp2
);
7656 tmp2
= load_reg(s
, rd
);
7657 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7658 tcg_temp_free_i32(tmp2
);
7660 store_reg(s
, rn
, tmp
);
7662 case 0x20: case 0x24: case 0x28: case 0x2c:
7663 /* Bitfield insert/clear. */
7665 shift
= (insn
>> 7) & 0x1f;
7666 i
= (insn
>> 16) & 0x1f;
7669 tmp
= tcg_temp_new_i32();
7670 tcg_gen_movi_i32(tmp
, 0);
7672 tmp
= load_reg(s
, rm
);
7675 tmp2
= load_reg(s
, rd
);
7676 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
7677 tcg_temp_free_i32(tmp2
);
7679 store_reg(s
, rd
, tmp
);
7681 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7682 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7684 tmp
= load_reg(s
, rm
);
7685 shift
= (insn
>> 7) & 0x1f;
7686 i
= ((insn
>> 16) & 0x1f) + 1;
7691 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7693 gen_sbfx(tmp
, shift
, i
);
7696 store_reg(s
, rd
, tmp
);
7706 /* Check for undefined extension instructions
7707 * per the ARM Bible IE:
7708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7710 sh
= (0xf << 20) | (0xf << 4);
7711 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7715 /* load/store byte/word */
7716 rn
= (insn
>> 16) & 0xf;
7717 rd
= (insn
>> 12) & 0xf;
7718 tmp2
= load_reg(s
, rn
);
7719 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7720 if (insn
& (1 << 24))
7721 gen_add_data_offset(s
, insn
, tmp2
);
7722 if (insn
& (1 << 20)) {
7724 if (insn
& (1 << 22)) {
7725 tmp
= gen_ld8u(tmp2
, i
);
7727 tmp
= gen_ld32(tmp2
, i
);
7731 tmp
= load_reg(s
, rd
);
7732 if (insn
& (1 << 22))
7733 gen_st8(tmp
, tmp2
, i
);
7735 gen_st32(tmp
, tmp2
, i
);
7737 if (!(insn
& (1 << 24))) {
7738 gen_add_data_offset(s
, insn
, tmp2
);
7739 store_reg(s
, rn
, tmp2
);
7740 } else if (insn
& (1 << 21)) {
7741 store_reg(s
, rn
, tmp2
);
7743 tcg_temp_free_i32(tmp2
);
7745 if (insn
& (1 << 20)) {
7746 /* Complete the load. */
7747 store_reg_from_load(env
, s
, rd
, tmp
);
7753 int j
, n
, user
, loaded_base
;
7755 /* load/store multiple words */
7756 /* XXX: store correct base if write back */
7758 if (insn
& (1 << 22)) {
7760 goto illegal_op
; /* only usable in supervisor mode */
7762 if ((insn
& (1 << 15)) == 0)
7765 rn
= (insn
>> 16) & 0xf;
7766 addr
= load_reg(s
, rn
);
7768 /* compute total size */
7770 TCGV_UNUSED(loaded_var
);
7773 if (insn
& (1 << i
))
7776 /* XXX: test invalid n == 0 case ? */
7777 if (insn
& (1 << 23)) {
7778 if (insn
& (1 << 24)) {
7780 tcg_gen_addi_i32(addr
, addr
, 4);
7782 /* post increment */
7785 if (insn
& (1 << 24)) {
7787 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7789 /* post decrement */
7791 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7796 if (insn
& (1 << i
)) {
7797 if (insn
& (1 << 20)) {
7799 tmp
= gen_ld32(addr
, IS_USER(s
));
7801 tmp2
= tcg_const_i32(i
);
7802 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
7803 tcg_temp_free_i32(tmp2
);
7804 tcg_temp_free_i32(tmp
);
7805 } else if (i
== rn
) {
7809 store_reg_from_load(env
, s
, i
, tmp
);
7814 /* special case: r15 = PC + 8 */
7815 val
= (long)s
->pc
+ 4;
7816 tmp
= tcg_temp_new_i32();
7817 tcg_gen_movi_i32(tmp
, val
);
7819 tmp
= tcg_temp_new_i32();
7820 tmp2
= tcg_const_i32(i
);
7821 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
7822 tcg_temp_free_i32(tmp2
);
7824 tmp
= load_reg(s
, i
);
7826 gen_st32(tmp
, addr
, IS_USER(s
));
7829 /* no need to add after the last transfer */
7831 tcg_gen_addi_i32(addr
, addr
, 4);
7834 if (insn
& (1 << 21)) {
7836 if (insn
& (1 << 23)) {
7837 if (insn
& (1 << 24)) {
7840 /* post increment */
7841 tcg_gen_addi_i32(addr
, addr
, 4);
7844 if (insn
& (1 << 24)) {
7847 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7849 /* post decrement */
7850 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7853 store_reg(s
, rn
, addr
);
7855 tcg_temp_free_i32(addr
);
7858 store_reg(s
, rn
, loaded_var
);
7860 if ((insn
& (1 << 22)) && !user
) {
7861 /* Restore CPSR from SPSR. */
7862 tmp
= load_cpu_field(spsr
);
7863 gen_set_cpsr(tmp
, 0xffffffff);
7864 tcg_temp_free_i32(tmp
);
7865 s
->is_jmp
= DISAS_UPDATE
;
7874 /* branch (and link) */
7875 val
= (int32_t)s
->pc
;
7876 if (insn
& (1 << 24)) {
7877 tmp
= tcg_temp_new_i32();
7878 tcg_gen_movi_i32(tmp
, val
);
7879 store_reg(s
, 14, tmp
);
7881 offset
= (((int32_t)insn
<< 8) >> 8);
7882 val
+= (offset
<< 2) + 4;
7890 if (disas_coproc_insn(env
, s
, insn
))
7895 gen_set_pc_im(s
->pc
);
7896 s
->is_jmp
= DISAS_SWI
;
7900 gen_exception_insn(s
, 4, EXCP_UDEF
);
7906 /* Return true if this is a Thumb-2 logical op. */
7908 thumb2_logic_op(int op
)
7913 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7914 then set condition code flags based on the result of the operation.
7915 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7916 to the high bit of T1.
7917 Returns zero if the opcode is valid. */
7920 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7927 tcg_gen_and_i32(t0
, t0
, t1
);
7931 tcg_gen_andc_i32(t0
, t0
, t1
);
7935 tcg_gen_or_i32(t0
, t0
, t1
);
7939 tcg_gen_orc_i32(t0
, t0
, t1
);
7943 tcg_gen_xor_i32(t0
, t0
, t1
);
7948 gen_add_CC(t0
, t0
, t1
);
7950 tcg_gen_add_i32(t0
, t0
, t1
);
7954 gen_adc_CC(t0
, t0
, t1
);
7960 gen_sbc_CC(t0
, t0
, t1
);
7962 gen_sub_carry(t0
, t0
, t1
);
7967 gen_sub_CC(t0
, t0
, t1
);
7969 tcg_gen_sub_i32(t0
, t0
, t1
);
7973 gen_sub_CC(t0
, t1
, t0
);
7975 tcg_gen_sub_i32(t0
, t1
, t0
);
7977 default: /* 5, 6, 7, 9, 12, 15. */
7983 gen_set_CF_bit31(t1
);
7988 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7990 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7992 uint32_t insn
, imm
, shift
, offset
;
7993 uint32_t rd
, rn
, rm
, rs
;
8004 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
8005 || arm_feature (env
, ARM_FEATURE_M
))) {
8006 /* Thumb-1 cores may need to treat bl and blx as a pair of
8007 16-bit instructions to get correct prefetch abort behavior. */
8009 if ((insn
& (1 << 12)) == 0) {
8011 /* Second half of blx. */
8012 offset
= ((insn
& 0x7ff) << 1);
8013 tmp
= load_reg(s
, 14);
8014 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8015 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8017 tmp2
= tcg_temp_new_i32();
8018 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8019 store_reg(s
, 14, tmp2
);
8023 if (insn
& (1 << 11)) {
8024 /* Second half of bl. */
8025 offset
= ((insn
& 0x7ff) << 1) | 1;
8026 tmp
= load_reg(s
, 14);
8027 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8029 tmp2
= tcg_temp_new_i32();
8030 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8031 store_reg(s
, 14, tmp2
);
8035 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
8036 /* Instruction spans a page boundary. Implement it as two
8037 16-bit instructions in case the second half causes an
8039 offset
= ((int32_t)insn
<< 21) >> 9;
8040 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
8043 /* Fall through to 32-bit decode. */
8046 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
8048 insn
|= (uint32_t)insn_hw1
<< 16;
8050 if ((insn
& 0xf800e800) != 0xf000e800) {
8054 rn
= (insn
>> 16) & 0xf;
8055 rs
= (insn
>> 12) & 0xf;
8056 rd
= (insn
>> 8) & 0xf;
8058 switch ((insn
>> 25) & 0xf) {
8059 case 0: case 1: case 2: case 3:
8060 /* 16-bit instructions. Should never happen. */
8063 if (insn
& (1 << 22)) {
8064 /* Other load/store, table branch. */
8065 if (insn
& 0x01200000) {
8066 /* Load/store doubleword. */
8068 addr
= tcg_temp_new_i32();
8069 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8071 addr
= load_reg(s
, rn
);
8073 offset
= (insn
& 0xff) * 4;
8074 if ((insn
& (1 << 23)) == 0)
8076 if (insn
& (1 << 24)) {
8077 tcg_gen_addi_i32(addr
, addr
, offset
);
8080 if (insn
& (1 << 20)) {
8082 tmp
= gen_ld32(addr
, IS_USER(s
));
8083 store_reg(s
, rs
, tmp
);
8084 tcg_gen_addi_i32(addr
, addr
, 4);
8085 tmp
= gen_ld32(addr
, IS_USER(s
));
8086 store_reg(s
, rd
, tmp
);
8089 tmp
= load_reg(s
, rs
);
8090 gen_st32(tmp
, addr
, IS_USER(s
));
8091 tcg_gen_addi_i32(addr
, addr
, 4);
8092 tmp
= load_reg(s
, rd
);
8093 gen_st32(tmp
, addr
, IS_USER(s
));
8095 if (insn
& (1 << 21)) {
8096 /* Base writeback. */
8099 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8100 store_reg(s
, rn
, addr
);
8102 tcg_temp_free_i32(addr
);
8104 } else if ((insn
& (1 << 23)) == 0) {
8105 /* Load/store exclusive word. */
8106 addr
= tcg_temp_local_new();
8107 load_reg_var(s
, addr
, rn
);
8108 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8109 if (insn
& (1 << 20)) {
8110 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8112 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8114 tcg_temp_free(addr
);
8115 } else if ((insn
& (1 << 6)) == 0) {
8118 addr
= tcg_temp_new_i32();
8119 tcg_gen_movi_i32(addr
, s
->pc
);
8121 addr
= load_reg(s
, rn
);
8123 tmp
= load_reg(s
, rm
);
8124 tcg_gen_add_i32(addr
, addr
, tmp
);
8125 if (insn
& (1 << 4)) {
8127 tcg_gen_add_i32(addr
, addr
, tmp
);
8128 tcg_temp_free_i32(tmp
);
8129 tmp
= gen_ld16u(addr
, IS_USER(s
));
8131 tcg_temp_free_i32(tmp
);
8132 tmp
= gen_ld8u(addr
, IS_USER(s
));
8134 tcg_temp_free_i32(addr
);
8135 tcg_gen_shli_i32(tmp
, tmp
, 1);
8136 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8137 store_reg(s
, 15, tmp
);
8139 /* Load/store exclusive byte/halfword/doubleword. */
8141 op
= (insn
>> 4) & 0x3;
8145 addr
= tcg_temp_local_new();
8146 load_reg_var(s
, addr
, rn
);
8147 if (insn
& (1 << 20)) {
8148 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8150 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8152 tcg_temp_free(addr
);
8155 /* Load/store multiple, RFE, SRS. */
8156 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8157 /* Not available in user mode. */
8160 if (insn
& (1 << 20)) {
8162 addr
= load_reg(s
, rn
);
8163 if ((insn
& (1 << 24)) == 0)
8164 tcg_gen_addi_i32(addr
, addr
, -8);
8165 /* Load PC into tmp and CPSR into tmp2. */
8166 tmp
= gen_ld32(addr
, 0);
8167 tcg_gen_addi_i32(addr
, addr
, 4);
8168 tmp2
= gen_ld32(addr
, 0);
8169 if (insn
& (1 << 21)) {
8170 /* Base writeback. */
8171 if (insn
& (1 << 24)) {
8172 tcg_gen_addi_i32(addr
, addr
, 4);
8174 tcg_gen_addi_i32(addr
, addr
, -4);
8176 store_reg(s
, rn
, addr
);
8178 tcg_temp_free_i32(addr
);
8180 gen_rfe(s
, tmp
, tmp2
);
8184 addr
= tcg_temp_new_i32();
8185 tmp
= tcg_const_i32(op
);
8186 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8187 tcg_temp_free_i32(tmp
);
8188 if ((insn
& (1 << 24)) == 0) {
8189 tcg_gen_addi_i32(addr
, addr
, -8);
8191 tmp
= load_reg(s
, 14);
8192 gen_st32(tmp
, addr
, 0);
8193 tcg_gen_addi_i32(addr
, addr
, 4);
8194 tmp
= tcg_temp_new_i32();
8195 gen_helper_cpsr_read(tmp
, cpu_env
);
8196 gen_st32(tmp
, addr
, 0);
8197 if (insn
& (1 << 21)) {
8198 if ((insn
& (1 << 24)) == 0) {
8199 tcg_gen_addi_i32(addr
, addr
, -4);
8201 tcg_gen_addi_i32(addr
, addr
, 4);
8203 tmp
= tcg_const_i32(op
);
8204 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8205 tcg_temp_free_i32(tmp
);
8207 tcg_temp_free_i32(addr
);
8211 int i
, loaded_base
= 0;
8213 /* Load/store multiple. */
8214 addr
= load_reg(s
, rn
);
8216 for (i
= 0; i
< 16; i
++) {
8217 if (insn
& (1 << i
))
8220 if (insn
& (1 << 24)) {
8221 tcg_gen_addi_i32(addr
, addr
, -offset
);
8224 TCGV_UNUSED(loaded_var
);
8225 for (i
= 0; i
< 16; i
++) {
8226 if ((insn
& (1 << i
)) == 0)
8228 if (insn
& (1 << 20)) {
8230 tmp
= gen_ld32(addr
, IS_USER(s
));
8233 } else if (i
== rn
) {
8237 store_reg(s
, i
, tmp
);
8241 tmp
= load_reg(s
, i
);
8242 gen_st32(tmp
, addr
, IS_USER(s
));
8244 tcg_gen_addi_i32(addr
, addr
, 4);
8247 store_reg(s
, rn
, loaded_var
);
8249 if (insn
& (1 << 21)) {
8250 /* Base register writeback. */
8251 if (insn
& (1 << 24)) {
8252 tcg_gen_addi_i32(addr
, addr
, -offset
);
8254 /* Fault if writeback register is in register list. */
8255 if (insn
& (1 << rn
))
8257 store_reg(s
, rn
, addr
);
8259 tcg_temp_free_i32(addr
);
8266 op
= (insn
>> 21) & 0xf;
8268 /* Halfword pack. */
8269 tmp
= load_reg(s
, rn
);
8270 tmp2
= load_reg(s
, rm
);
8271 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8272 if (insn
& (1 << 5)) {
8276 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8277 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8278 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8282 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8283 tcg_gen_ext16u_i32(tmp
, tmp
);
8284 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8286 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8287 tcg_temp_free_i32(tmp2
);
8288 store_reg(s
, rd
, tmp
);
8290 /* Data processing register constant shift. */
8292 tmp
= tcg_temp_new_i32();
8293 tcg_gen_movi_i32(tmp
, 0);
8295 tmp
= load_reg(s
, rn
);
8297 tmp2
= load_reg(s
, rm
);
8299 shiftop
= (insn
>> 4) & 3;
8300 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8301 conds
= (insn
& (1 << 20)) != 0;
8302 logic_cc
= (conds
&& thumb2_logic_op(op
));
8303 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8304 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8306 tcg_temp_free_i32(tmp2
);
8308 store_reg(s
, rd
, tmp
);
8310 tcg_temp_free_i32(tmp
);
8314 case 13: /* Misc data processing. */
8315 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8316 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8319 case 0: /* Register controlled shift. */
8320 tmp
= load_reg(s
, rn
);
8321 tmp2
= load_reg(s
, rm
);
8322 if ((insn
& 0x70) != 0)
8324 op
= (insn
>> 21) & 3;
8325 logic_cc
= (insn
& (1 << 20)) != 0;
8326 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8329 store_reg_bx(env
, s
, rd
, tmp
);
8331 case 1: /* Sign/zero extend. */
8332 tmp
= load_reg(s
, rm
);
8333 shift
= (insn
>> 4) & 3;
8334 /* ??? In many cases it's not necessary to do a
8335 rotate, a shift is sufficient. */
8337 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8338 op
= (insn
>> 20) & 7;
8340 case 0: gen_sxth(tmp
); break;
8341 case 1: gen_uxth(tmp
); break;
8342 case 2: gen_sxtb16(tmp
); break;
8343 case 3: gen_uxtb16(tmp
); break;
8344 case 4: gen_sxtb(tmp
); break;
8345 case 5: gen_uxtb(tmp
); break;
8346 default: goto illegal_op
;
8349 tmp2
= load_reg(s
, rn
);
8350 if ((op
>> 1) == 1) {
8351 gen_add16(tmp
, tmp2
);
8353 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8354 tcg_temp_free_i32(tmp2
);
8357 store_reg(s
, rd
, tmp
);
8359 case 2: /* SIMD add/subtract. */
8360 op
= (insn
>> 20) & 7;
8361 shift
= (insn
>> 4) & 7;
8362 if ((op
& 3) == 3 || (shift
& 3) == 3)
8364 tmp
= load_reg(s
, rn
);
8365 tmp2
= load_reg(s
, rm
);
8366 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8367 tcg_temp_free_i32(tmp2
);
8368 store_reg(s
, rd
, tmp
);
8370 case 3: /* Other data processing. */
8371 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8373 /* Saturating add/subtract. */
8374 tmp
= load_reg(s
, rn
);
8375 tmp2
= load_reg(s
, rm
);
8377 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
8379 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
8381 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8382 tcg_temp_free_i32(tmp2
);
8384 tmp
= load_reg(s
, rn
);
8386 case 0x0a: /* rbit */
8387 gen_helper_rbit(tmp
, tmp
);
8389 case 0x08: /* rev */
8390 tcg_gen_bswap32_i32(tmp
, tmp
);
8392 case 0x09: /* rev16 */
8395 case 0x0b: /* revsh */
8398 case 0x10: /* sel */
8399 tmp2
= load_reg(s
, rm
);
8400 tmp3
= tcg_temp_new_i32();
8401 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8402 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8403 tcg_temp_free_i32(tmp3
);
8404 tcg_temp_free_i32(tmp2
);
8406 case 0x18: /* clz */
8407 gen_helper_clz(tmp
, tmp
);
8413 store_reg(s
, rd
, tmp
);
8415 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8416 op
= (insn
>> 4) & 0xf;
8417 tmp
= load_reg(s
, rn
);
8418 tmp2
= load_reg(s
, rm
);
8419 switch ((insn
>> 20) & 7) {
8420 case 0: /* 32 x 32 -> 32 */
8421 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8422 tcg_temp_free_i32(tmp2
);
8424 tmp2
= load_reg(s
, rs
);
8426 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8428 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8429 tcg_temp_free_i32(tmp2
);
8432 case 1: /* 16 x 16 -> 32 */
8433 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8434 tcg_temp_free_i32(tmp2
);
8436 tmp2
= load_reg(s
, rs
);
8437 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8438 tcg_temp_free_i32(tmp2
);
8441 case 2: /* Dual multiply add. */
8442 case 4: /* Dual multiply subtract. */
8444 gen_swap_half(tmp2
);
8445 gen_smul_dual(tmp
, tmp2
);
8446 if (insn
& (1 << 22)) {
8447 /* This subtraction cannot overflow. */
8448 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8450 /* This addition cannot overflow 32 bits;
8451 * however it may overflow considered as a signed
8452 * operation, in which case we must set the Q flag.
8454 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8456 tcg_temp_free_i32(tmp2
);
8459 tmp2
= load_reg(s
, rs
);
8460 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8461 tcg_temp_free_i32(tmp2
);
8464 case 3: /* 32 * 16 -> 32msb */
8466 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8469 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8470 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8471 tmp
= tcg_temp_new_i32();
8472 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8473 tcg_temp_free_i64(tmp64
);
8476 tmp2
= load_reg(s
, rs
);
8477 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8478 tcg_temp_free_i32(tmp2
);
8481 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8482 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8484 tmp
= load_reg(s
, rs
);
8485 if (insn
& (1 << 20)) {
8486 tmp64
= gen_addq_msw(tmp64
, tmp
);
8488 tmp64
= gen_subq_msw(tmp64
, tmp
);
8491 if (insn
& (1 << 4)) {
8492 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8494 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8495 tmp
= tcg_temp_new_i32();
8496 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8497 tcg_temp_free_i64(tmp64
);
8499 case 7: /* Unsigned sum of absolute differences. */
8500 gen_helper_usad8(tmp
, tmp
, tmp2
);
8501 tcg_temp_free_i32(tmp2
);
8503 tmp2
= load_reg(s
, rs
);
8504 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8505 tcg_temp_free_i32(tmp2
);
8509 store_reg(s
, rd
, tmp
);
8511 case 6: case 7: /* 64-bit multiply, Divide. */
8512 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8513 tmp
= load_reg(s
, rn
);
8514 tmp2
= load_reg(s
, rm
);
8515 if ((op
& 0x50) == 0x10) {
8517 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8521 gen_helper_udiv(tmp
, tmp
, tmp2
);
8523 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8524 tcg_temp_free_i32(tmp2
);
8525 store_reg(s
, rd
, tmp
);
8526 } else if ((op
& 0xe) == 0xc) {
8527 /* Dual multiply accumulate long. */
8529 gen_swap_half(tmp2
);
8530 gen_smul_dual(tmp
, tmp2
);
8532 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8534 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8536 tcg_temp_free_i32(tmp2
);
8538 tmp64
= tcg_temp_new_i64();
8539 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8540 tcg_temp_free_i32(tmp
);
8541 gen_addq(s
, tmp64
, rs
, rd
);
8542 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8543 tcg_temp_free_i64(tmp64
);
8546 /* Unsigned 64-bit multiply */
8547 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8551 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8552 tcg_temp_free_i32(tmp2
);
8553 tmp64
= tcg_temp_new_i64();
8554 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8555 tcg_temp_free_i32(tmp
);
8557 /* Signed 64-bit multiply */
8558 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8563 gen_addq_lo(s
, tmp64
, rs
);
8564 gen_addq_lo(s
, tmp64
, rd
);
8565 } else if (op
& 0x40) {
8566 /* 64-bit accumulate. */
8567 gen_addq(s
, tmp64
, rs
, rd
);
8569 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8570 tcg_temp_free_i64(tmp64
);
8575 case 6: case 7: case 14: case 15:
8577 if (((insn
>> 24) & 3) == 3) {
8578 /* Translate into the equivalent ARM encoding. */
8579 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8580 if (disas_neon_data_insn(env
, s
, insn
))
8583 if (insn
& (1 << 28))
8585 if (disas_coproc_insn (env
, s
, insn
))
8589 case 8: case 9: case 10: case 11:
8590 if (insn
& (1 << 15)) {
8591 /* Branches, misc control. */
8592 if (insn
& 0x5000) {
8593 /* Unconditional branch. */
8594 /* signextend(hw1[10:0]) -> offset[:12]. */
8595 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8596 /* hw1[10:0] -> offset[11:1]. */
8597 offset
|= (insn
& 0x7ff) << 1;
8598 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8599 offset[24:22] already have the same value because of the
8600 sign extension above. */
8601 offset
^= ((~insn
) & (1 << 13)) << 10;
8602 offset
^= ((~insn
) & (1 << 11)) << 11;
8604 if (insn
& (1 << 14)) {
8605 /* Branch and link. */
8606 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8610 if (insn
& (1 << 12)) {
8615 offset
&= ~(uint32_t)2;
8616 /* thumb2 bx, no need to check */
8617 gen_bx_im(s
, offset
);
8619 } else if (((insn
>> 23) & 7) == 7) {
8621 if (insn
& (1 << 13))
8624 if (insn
& (1 << 26)) {
8625 /* Secure monitor call (v6Z) */
8626 goto illegal_op
; /* not implemented. */
8628 op
= (insn
>> 20) & 7;
8630 case 0: /* msr cpsr. */
8632 tmp
= load_reg(s
, rn
);
8633 addr
= tcg_const_i32(insn
& 0xff);
8634 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8635 tcg_temp_free_i32(addr
);
8636 tcg_temp_free_i32(tmp
);
8641 case 1: /* msr spsr. */
8644 tmp
= load_reg(s
, rn
);
8646 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8650 case 2: /* cps, nop-hint. */
8651 if (((insn
>> 8) & 7) == 0) {
8652 gen_nop_hint(s
, insn
& 0xff);
8654 /* Implemented as NOP in user mode. */
8659 if (insn
& (1 << 10)) {
8660 if (insn
& (1 << 7))
8662 if (insn
& (1 << 6))
8664 if (insn
& (1 << 5))
8666 if (insn
& (1 << 9))
8667 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8669 if (insn
& (1 << 8)) {
8671 imm
|= (insn
& 0x1f);
8674 gen_set_psr_im(s
, offset
, 0, imm
);
8677 case 3: /* Special control operations. */
8679 op
= (insn
>> 4) & 0xf;
8687 /* These execute as NOPs. */
8694 /* Trivial implementation equivalent to bx. */
8695 tmp
= load_reg(s
, rn
);
8698 case 5: /* Exception return. */
8702 if (rn
!= 14 || rd
!= 15) {
8705 tmp
= load_reg(s
, rn
);
8706 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8707 gen_exception_return(s
, tmp
);
8709 case 6: /* mrs cpsr. */
8710 tmp
= tcg_temp_new_i32();
8712 addr
= tcg_const_i32(insn
& 0xff);
8713 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8714 tcg_temp_free_i32(addr
);
8716 gen_helper_cpsr_read(tmp
, cpu_env
);
8718 store_reg(s
, rd
, tmp
);
8720 case 7: /* mrs spsr. */
8721 /* Not accessible in user mode. */
8722 if (IS_USER(s
) || IS_M(env
))
8724 tmp
= load_cpu_field(spsr
);
8725 store_reg(s
, rd
, tmp
);
8730 /* Conditional branch. */
8731 op
= (insn
>> 22) & 0xf;
8732 /* Generate a conditional jump to next instruction. */
8733 s
->condlabel
= gen_new_label();
8734 gen_test_cc(op
^ 1, s
->condlabel
);
8737 /* offset[11:1] = insn[10:0] */
8738 offset
= (insn
& 0x7ff) << 1;
8739 /* offset[17:12] = insn[21:16]. */
8740 offset
|= (insn
& 0x003f0000) >> 4;
8741 /* offset[31:20] = insn[26]. */
8742 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8743 /* offset[18] = insn[13]. */
8744 offset
|= (insn
& (1 << 13)) << 5;
8745 /* offset[19] = insn[11]. */
8746 offset
|= (insn
& (1 << 11)) << 8;
8748 /* jump to the offset */
8749 gen_jmp(s
, s
->pc
+ offset
);
8752 /* Data processing immediate. */
8753 if (insn
& (1 << 25)) {
8754 if (insn
& (1 << 24)) {
8755 if (insn
& (1 << 20))
8757 /* Bitfield/Saturate. */
8758 op
= (insn
>> 21) & 7;
8760 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8762 tmp
= tcg_temp_new_i32();
8763 tcg_gen_movi_i32(tmp
, 0);
8765 tmp
= load_reg(s
, rn
);
8768 case 2: /* Signed bitfield extract. */
8770 if (shift
+ imm
> 32)
8773 gen_sbfx(tmp
, shift
, imm
);
8775 case 6: /* Unsigned bitfield extract. */
8777 if (shift
+ imm
> 32)
8780 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8782 case 3: /* Bitfield insert/clear. */
8785 imm
= imm
+ 1 - shift
;
8787 tmp2
= load_reg(s
, rd
);
8788 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
8789 tcg_temp_free_i32(tmp2
);
8794 default: /* Saturate. */
8797 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8799 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8801 tmp2
= tcg_const_i32(imm
);
8804 if ((op
& 1) && shift
== 0)
8805 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8807 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8810 if ((op
& 1) && shift
== 0)
8811 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8813 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8815 tcg_temp_free_i32(tmp2
);
8818 store_reg(s
, rd
, tmp
);
8820 imm
= ((insn
& 0x04000000) >> 15)
8821 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8822 if (insn
& (1 << 22)) {
8823 /* 16-bit immediate. */
8824 imm
|= (insn
>> 4) & 0xf000;
8825 if (insn
& (1 << 23)) {
8827 tmp
= load_reg(s
, rd
);
8828 tcg_gen_ext16u_i32(tmp
, tmp
);
8829 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8832 tmp
= tcg_temp_new_i32();
8833 tcg_gen_movi_i32(tmp
, imm
);
8836 /* Add/sub 12-bit immediate. */
8838 offset
= s
->pc
& ~(uint32_t)3;
8839 if (insn
& (1 << 23))
8843 tmp
= tcg_temp_new_i32();
8844 tcg_gen_movi_i32(tmp
, offset
);
8846 tmp
= load_reg(s
, rn
);
8847 if (insn
& (1 << 23))
8848 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8850 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8853 store_reg(s
, rd
, tmp
);
8856 int shifter_out
= 0;
8857 /* modified 12-bit immediate. */
8858 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8859 imm
= (insn
& 0xff);
8862 /* Nothing to do. */
8864 case 1: /* 00XY00XY */
8867 case 2: /* XY00XY00 */
8871 case 3: /* XYXYXYXY */
8875 default: /* Rotated constant. */
8876 shift
= (shift
<< 1) | (imm
>> 7);
8878 imm
= imm
<< (32 - shift
);
8882 tmp2
= tcg_temp_new_i32();
8883 tcg_gen_movi_i32(tmp2
, imm
);
8884 rn
= (insn
>> 16) & 0xf;
8886 tmp
= tcg_temp_new_i32();
8887 tcg_gen_movi_i32(tmp
, 0);
8889 tmp
= load_reg(s
, rn
);
8891 op
= (insn
>> 21) & 0xf;
8892 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8893 shifter_out
, tmp
, tmp2
))
8895 tcg_temp_free_i32(tmp2
);
8896 rd
= (insn
>> 8) & 0xf;
8898 store_reg(s
, rd
, tmp
);
8900 tcg_temp_free_i32(tmp
);
8905 case 12: /* Load/store single data item. */
8910 if ((insn
& 0x01100000) == 0x01000000) {
8911 if (disas_neon_ls_insn(env
, s
, insn
))
8915 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8917 if (!(insn
& (1 << 20))) {
8921 /* Byte or halfword load space with dest == r15 : memory hints.
8922 * Catch them early so we don't emit pointless addressing code.
8923 * This space is a mix of:
8924 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8925 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8927 * unallocated hints, which must be treated as NOPs
8928 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8929 * which is easiest for the decoding logic
8930 * Some space which must UNDEF
8932 int op1
= (insn
>> 23) & 3;
8933 int op2
= (insn
>> 6) & 0x3f;
8938 /* UNPREDICTABLE, unallocated hint or
8939 * PLD/PLDW/PLI (literal)
8944 return 0; /* PLD/PLDW/PLI or unallocated hint */
8946 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8947 return 0; /* PLD/PLDW/PLI or unallocated hint */
8949 /* UNDEF space, or an UNPREDICTABLE */
8955 addr
= tcg_temp_new_i32();
8957 /* s->pc has already been incremented by 4. */
8958 imm
= s
->pc
& 0xfffffffc;
8959 if (insn
& (1 << 23))
8960 imm
+= insn
& 0xfff;
8962 imm
-= insn
& 0xfff;
8963 tcg_gen_movi_i32(addr
, imm
);
8965 addr
= load_reg(s
, rn
);
8966 if (insn
& (1 << 23)) {
8967 /* Positive offset. */
8969 tcg_gen_addi_i32(addr
, addr
, imm
);
8972 switch ((insn
>> 8) & 0xf) {
8973 case 0x0: /* Shifted Register. */
8974 shift
= (insn
>> 4) & 0xf;
8976 tcg_temp_free_i32(addr
);
8979 tmp
= load_reg(s
, rm
);
8981 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8982 tcg_gen_add_i32(addr
, addr
, tmp
);
8983 tcg_temp_free_i32(tmp
);
8985 case 0xc: /* Negative offset. */
8986 tcg_gen_addi_i32(addr
, addr
, -imm
);
8988 case 0xe: /* User privilege. */
8989 tcg_gen_addi_i32(addr
, addr
, imm
);
8992 case 0x9: /* Post-decrement. */
8995 case 0xb: /* Post-increment. */
8999 case 0xd: /* Pre-decrement. */
9002 case 0xf: /* Pre-increment. */
9003 tcg_gen_addi_i32(addr
, addr
, imm
);
9007 tcg_temp_free_i32(addr
);
9012 if (insn
& (1 << 20)) {
9015 case 0: tmp
= gen_ld8u(addr
, user
); break;
9016 case 4: tmp
= gen_ld8s(addr
, user
); break;
9017 case 1: tmp
= gen_ld16u(addr
, user
); break;
9018 case 5: tmp
= gen_ld16s(addr
, user
); break;
9019 case 2: tmp
= gen_ld32(addr
, user
); break;
9021 tcg_temp_free_i32(addr
);
9027 store_reg(s
, rs
, tmp
);
9031 tmp
= load_reg(s
, rs
);
9033 case 0: gen_st8(tmp
, addr
, user
); break;
9034 case 1: gen_st16(tmp
, addr
, user
); break;
9035 case 2: gen_st32(tmp
, addr
, user
); break;
9037 tcg_temp_free_i32(addr
);
9042 tcg_gen_addi_i32(addr
, addr
, imm
);
9044 store_reg(s
, rn
, addr
);
9046 tcg_temp_free_i32(addr
);
9058 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
9060 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9067 if (s
->condexec_mask
) {
9068 cond
= s
->condexec_cond
;
9069 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9070 s
->condlabel
= gen_new_label();
9071 gen_test_cc(cond
^ 1, s
->condlabel
);
9076 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9079 switch (insn
>> 12) {
9083 op
= (insn
>> 11) & 3;
9086 rn
= (insn
>> 3) & 7;
9087 tmp
= load_reg(s
, rn
);
9088 if (insn
& (1 << 10)) {
9090 tmp2
= tcg_temp_new_i32();
9091 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9094 rm
= (insn
>> 6) & 7;
9095 tmp2
= load_reg(s
, rm
);
9097 if (insn
& (1 << 9)) {
9098 if (s
->condexec_mask
)
9099 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9101 gen_sub_CC(tmp
, tmp
, tmp2
);
9103 if (s
->condexec_mask
)
9104 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9106 gen_add_CC(tmp
, tmp
, tmp2
);
9108 tcg_temp_free_i32(tmp2
);
9109 store_reg(s
, rd
, tmp
);
9111 /* shift immediate */
9112 rm
= (insn
>> 3) & 7;
9113 shift
= (insn
>> 6) & 0x1f;
9114 tmp
= load_reg(s
, rm
);
9115 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9116 if (!s
->condexec_mask
)
9118 store_reg(s
, rd
, tmp
);
9122 /* arithmetic large immediate */
9123 op
= (insn
>> 11) & 3;
9124 rd
= (insn
>> 8) & 0x7;
9125 if (op
== 0) { /* mov */
9126 tmp
= tcg_temp_new_i32();
9127 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9128 if (!s
->condexec_mask
)
9130 store_reg(s
, rd
, tmp
);
9132 tmp
= load_reg(s
, rd
);
9133 tmp2
= tcg_temp_new_i32();
9134 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9137 gen_sub_CC(tmp
, tmp
, tmp2
);
9138 tcg_temp_free_i32(tmp
);
9139 tcg_temp_free_i32(tmp2
);
9142 if (s
->condexec_mask
)
9143 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9145 gen_add_CC(tmp
, tmp
, tmp2
);
9146 tcg_temp_free_i32(tmp2
);
9147 store_reg(s
, rd
, tmp
);
9150 if (s
->condexec_mask
)
9151 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9153 gen_sub_CC(tmp
, tmp
, tmp2
);
9154 tcg_temp_free_i32(tmp2
);
9155 store_reg(s
, rd
, tmp
);
9161 if (insn
& (1 << 11)) {
9162 rd
= (insn
>> 8) & 7;
9163 /* load pc-relative. Bit 1 of PC is ignored. */
9164 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9165 val
&= ~(uint32_t)2;
9166 addr
= tcg_temp_new_i32();
9167 tcg_gen_movi_i32(addr
, val
);
9168 tmp
= gen_ld32(addr
, IS_USER(s
));
9169 tcg_temp_free_i32(addr
);
9170 store_reg(s
, rd
, tmp
);
9173 if (insn
& (1 << 10)) {
9174 /* data processing extended or blx */
9175 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9176 rm
= (insn
>> 3) & 0xf;
9177 op
= (insn
>> 8) & 3;
9180 tmp
= load_reg(s
, rd
);
9181 tmp2
= load_reg(s
, rm
);
9182 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9183 tcg_temp_free_i32(tmp2
);
9184 store_reg(s
, rd
, tmp
);
9187 tmp
= load_reg(s
, rd
);
9188 tmp2
= load_reg(s
, rm
);
9189 gen_sub_CC(tmp
, tmp
, tmp2
);
9190 tcg_temp_free_i32(tmp2
);
9191 tcg_temp_free_i32(tmp
);
9193 case 2: /* mov/cpy */
9194 tmp
= load_reg(s
, rm
);
9195 store_reg(s
, rd
, tmp
);
9197 case 3:/* branch [and link] exchange thumb register */
9198 tmp
= load_reg(s
, rm
);
9199 if (insn
& (1 << 7)) {
9201 val
= (uint32_t)s
->pc
| 1;
9202 tmp2
= tcg_temp_new_i32();
9203 tcg_gen_movi_i32(tmp2
, val
);
9204 store_reg(s
, 14, tmp2
);
9206 /* already thumb, no need to check */
9213 /* data processing register */
9215 rm
= (insn
>> 3) & 7;
9216 op
= (insn
>> 6) & 0xf;
9217 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9218 /* the shift/rotate ops want the operands backwards */
9227 if (op
== 9) { /* neg */
9228 tmp
= tcg_temp_new_i32();
9229 tcg_gen_movi_i32(tmp
, 0);
9230 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9231 tmp
= load_reg(s
, rd
);
9236 tmp2
= load_reg(s
, rm
);
9239 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9240 if (!s
->condexec_mask
)
9244 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9245 if (!s
->condexec_mask
)
9249 if (s
->condexec_mask
) {
9250 gen_shl(tmp2
, tmp2
, tmp
);
9252 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9257 if (s
->condexec_mask
) {
9258 gen_shr(tmp2
, tmp2
, tmp
);
9260 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9265 if (s
->condexec_mask
) {
9266 gen_sar(tmp2
, tmp2
, tmp
);
9268 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9273 if (s
->condexec_mask
) {
9276 gen_adc_CC(tmp
, tmp
, tmp2
);
9280 if (s
->condexec_mask
) {
9281 gen_sub_carry(tmp
, tmp
, tmp2
);
9283 gen_sbc_CC(tmp
, tmp
, tmp2
);
9287 if (s
->condexec_mask
) {
9288 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9289 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9291 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9296 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9301 if (s
->condexec_mask
)
9302 tcg_gen_neg_i32(tmp
, tmp2
);
9304 gen_sub_CC(tmp
, tmp
, tmp2
);
9307 gen_sub_CC(tmp
, tmp
, tmp2
);
9311 gen_add_CC(tmp
, tmp
, tmp2
);
9315 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9316 if (!s
->condexec_mask
)
9320 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9321 if (!s
->condexec_mask
)
9325 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9326 if (!s
->condexec_mask
)
9330 tcg_gen_not_i32(tmp2
, tmp2
);
9331 if (!s
->condexec_mask
)
9339 store_reg(s
, rm
, tmp2
);
9341 tcg_temp_free_i32(tmp
);
9343 store_reg(s
, rd
, tmp
);
9344 tcg_temp_free_i32(tmp2
);
9347 tcg_temp_free_i32(tmp
);
9348 tcg_temp_free_i32(tmp2
);
9353 /* load/store register offset. */
9355 rn
= (insn
>> 3) & 7;
9356 rm
= (insn
>> 6) & 7;
9357 op
= (insn
>> 9) & 7;
9358 addr
= load_reg(s
, rn
);
9359 tmp
= load_reg(s
, rm
);
9360 tcg_gen_add_i32(addr
, addr
, tmp
);
9361 tcg_temp_free_i32(tmp
);
9363 if (op
< 3) /* store */
9364 tmp
= load_reg(s
, rd
);
9368 gen_st32(tmp
, addr
, IS_USER(s
));
9371 gen_st16(tmp
, addr
, IS_USER(s
));
9374 gen_st8(tmp
, addr
, IS_USER(s
));
9377 tmp
= gen_ld8s(addr
, IS_USER(s
));
9380 tmp
= gen_ld32(addr
, IS_USER(s
));
9383 tmp
= gen_ld16u(addr
, IS_USER(s
));
9386 tmp
= gen_ld8u(addr
, IS_USER(s
));
9389 tmp
= gen_ld16s(addr
, IS_USER(s
));
9392 if (op
>= 3) /* load */
9393 store_reg(s
, rd
, tmp
);
9394 tcg_temp_free_i32(addr
);
9398 /* load/store word immediate offset */
9400 rn
= (insn
>> 3) & 7;
9401 addr
= load_reg(s
, rn
);
9402 val
= (insn
>> 4) & 0x7c;
9403 tcg_gen_addi_i32(addr
, addr
, val
);
9405 if (insn
& (1 << 11)) {
9407 tmp
= gen_ld32(addr
, IS_USER(s
));
9408 store_reg(s
, rd
, tmp
);
9411 tmp
= load_reg(s
, rd
);
9412 gen_st32(tmp
, addr
, IS_USER(s
));
9414 tcg_temp_free_i32(addr
);
9418 /* load/store byte immediate offset */
9420 rn
= (insn
>> 3) & 7;
9421 addr
= load_reg(s
, rn
);
9422 val
= (insn
>> 6) & 0x1f;
9423 tcg_gen_addi_i32(addr
, addr
, val
);
9425 if (insn
& (1 << 11)) {
9427 tmp
= gen_ld8u(addr
, IS_USER(s
));
9428 store_reg(s
, rd
, tmp
);
9431 tmp
= load_reg(s
, rd
);
9432 gen_st8(tmp
, addr
, IS_USER(s
));
9434 tcg_temp_free_i32(addr
);
9438 /* load/store halfword immediate offset */
9440 rn
= (insn
>> 3) & 7;
9441 addr
= load_reg(s
, rn
);
9442 val
= (insn
>> 5) & 0x3e;
9443 tcg_gen_addi_i32(addr
, addr
, val
);
9445 if (insn
& (1 << 11)) {
9447 tmp
= gen_ld16u(addr
, IS_USER(s
));
9448 store_reg(s
, rd
, tmp
);
9451 tmp
= load_reg(s
, rd
);
9452 gen_st16(tmp
, addr
, IS_USER(s
));
9454 tcg_temp_free_i32(addr
);
9458 /* load/store from stack */
9459 rd
= (insn
>> 8) & 7;
9460 addr
= load_reg(s
, 13);
9461 val
= (insn
& 0xff) * 4;
9462 tcg_gen_addi_i32(addr
, addr
, val
);
9464 if (insn
& (1 << 11)) {
9466 tmp
= gen_ld32(addr
, IS_USER(s
));
9467 store_reg(s
, rd
, tmp
);
9470 tmp
= load_reg(s
, rd
);
9471 gen_st32(tmp
, addr
, IS_USER(s
));
9473 tcg_temp_free_i32(addr
);
9477 /* add to high reg */
9478 rd
= (insn
>> 8) & 7;
9479 if (insn
& (1 << 11)) {
9481 tmp
= load_reg(s
, 13);
9483 /* PC. bit 1 is ignored. */
9484 tmp
= tcg_temp_new_i32();
9485 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9487 val
= (insn
& 0xff) * 4;
9488 tcg_gen_addi_i32(tmp
, tmp
, val
);
9489 store_reg(s
, rd
, tmp
);
9494 op
= (insn
>> 8) & 0xf;
9497 /* adjust stack pointer */
9498 tmp
= load_reg(s
, 13);
9499 val
= (insn
& 0x7f) * 4;
9500 if (insn
& (1 << 7))
9501 val
= -(int32_t)val
;
9502 tcg_gen_addi_i32(tmp
, tmp
, val
);
9503 store_reg(s
, 13, tmp
);
9506 case 2: /* sign/zero extend. */
9509 rm
= (insn
>> 3) & 7;
9510 tmp
= load_reg(s
, rm
);
9511 switch ((insn
>> 6) & 3) {
9512 case 0: gen_sxth(tmp
); break;
9513 case 1: gen_sxtb(tmp
); break;
9514 case 2: gen_uxth(tmp
); break;
9515 case 3: gen_uxtb(tmp
); break;
9517 store_reg(s
, rd
, tmp
);
9519 case 4: case 5: case 0xc: case 0xd:
9521 addr
= load_reg(s
, 13);
9522 if (insn
& (1 << 8))
9526 for (i
= 0; i
< 8; i
++) {
9527 if (insn
& (1 << i
))
9530 if ((insn
& (1 << 11)) == 0) {
9531 tcg_gen_addi_i32(addr
, addr
, -offset
);
9533 for (i
= 0; i
< 8; i
++) {
9534 if (insn
& (1 << i
)) {
9535 if (insn
& (1 << 11)) {
9537 tmp
= gen_ld32(addr
, IS_USER(s
));
9538 store_reg(s
, i
, tmp
);
9541 tmp
= load_reg(s
, i
);
9542 gen_st32(tmp
, addr
, IS_USER(s
));
9544 /* advance to the next address. */
9545 tcg_gen_addi_i32(addr
, addr
, 4);
9549 if (insn
& (1 << 8)) {
9550 if (insn
& (1 << 11)) {
9552 tmp
= gen_ld32(addr
, IS_USER(s
));
9553 /* don't set the pc until the rest of the instruction
9557 tmp
= load_reg(s
, 14);
9558 gen_st32(tmp
, addr
, IS_USER(s
));
9560 tcg_gen_addi_i32(addr
, addr
, 4);
9562 if ((insn
& (1 << 11)) == 0) {
9563 tcg_gen_addi_i32(addr
, addr
, -offset
);
9565 /* write back the new stack pointer */
9566 store_reg(s
, 13, addr
);
9567 /* set the new PC value */
9568 if ((insn
& 0x0900) == 0x0900) {
9569 store_reg_from_load(env
, s
, 15, tmp
);
9573 case 1: case 3: case 9: case 11: /* czb */
9575 tmp
= load_reg(s
, rm
);
9576 s
->condlabel
= gen_new_label();
9578 if (insn
& (1 << 11))
9579 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9581 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9582 tcg_temp_free_i32(tmp
);
9583 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9584 val
= (uint32_t)s
->pc
+ 2;
9589 case 15: /* IT, nop-hint. */
9590 if ((insn
& 0xf) == 0) {
9591 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9595 s
->condexec_cond
= (insn
>> 4) & 0xe;
9596 s
->condexec_mask
= insn
& 0x1f;
9597 /* No actual code generated for this insn, just setup state. */
9600 case 0xe: /* bkpt */
9602 gen_exception_insn(s
, 2, EXCP_BKPT
);
9607 rn
= (insn
>> 3) & 0x7;
9609 tmp
= load_reg(s
, rn
);
9610 switch ((insn
>> 6) & 3) {
9611 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9612 case 1: gen_rev16(tmp
); break;
9613 case 3: gen_revsh(tmp
); break;
9614 default: goto illegal_op
;
9616 store_reg(s
, rd
, tmp
);
9620 switch ((insn
>> 5) & 7) {
9624 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9625 /* Dynamic endianness switching not implemented. */
9636 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9639 addr
= tcg_const_i32(19);
9640 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9641 tcg_temp_free_i32(addr
);
9645 addr
= tcg_const_i32(16);
9646 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9647 tcg_temp_free_i32(addr
);
9649 tcg_temp_free_i32(tmp
);
9652 if (insn
& (1 << 4)) {
9653 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9657 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9672 /* load/store multiple */
9674 TCGV_UNUSED(loaded_var
);
9675 rn
= (insn
>> 8) & 0x7;
9676 addr
= load_reg(s
, rn
);
9677 for (i
= 0; i
< 8; i
++) {
9678 if (insn
& (1 << i
)) {
9679 if (insn
& (1 << 11)) {
9681 tmp
= gen_ld32(addr
, IS_USER(s
));
9685 store_reg(s
, i
, tmp
);
9689 tmp
= load_reg(s
, i
);
9690 gen_st32(tmp
, addr
, IS_USER(s
));
9692 /* advance to the next address */
9693 tcg_gen_addi_i32(addr
, addr
, 4);
9696 if ((insn
& (1 << rn
)) == 0) {
9697 /* base reg not in list: base register writeback */
9698 store_reg(s
, rn
, addr
);
9700 /* base reg in list: if load, complete it now */
9701 if (insn
& (1 << 11)) {
9702 store_reg(s
, rn
, loaded_var
);
9704 tcg_temp_free_i32(addr
);
9709 /* conditional branch or swi */
9710 cond
= (insn
>> 8) & 0xf;
9716 gen_set_pc_im(s
->pc
);
9717 s
->is_jmp
= DISAS_SWI
;
9720 /* generate a conditional jump to next instruction */
9721 s
->condlabel
= gen_new_label();
9722 gen_test_cc(cond
^ 1, s
->condlabel
);
9725 /* jump to the offset */
9726 val
= (uint32_t)s
->pc
+ 2;
9727 offset
= ((int32_t)insn
<< 24) >> 24;
9733 if (insn
& (1 << 11)) {
9734 if (disas_thumb2_insn(env
, s
, insn
))
9738 /* unconditional branch */
9739 val
= (uint32_t)s
->pc
;
9740 offset
= ((int32_t)insn
<< 21) >> 21;
9741 val
+= (offset
<< 1) + 2;
9746 if (disas_thumb2_insn(env
, s
, insn
))
9752 gen_exception_insn(s
, 4, EXCP_UDEF
);
9756 gen_exception_insn(s
, 2, EXCP_UDEF
);
9759 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9760 basic block 'tb'. If search_pc is TRUE, also generate PC
9761 information for each intermediate instruction. */
9762 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9763 TranslationBlock
*tb
,
9766 DisasContext dc1
, *dc
= &dc1
;
9768 uint16_t *gen_opc_end
;
9770 target_ulong pc_start
;
9771 uint32_t next_page_start
;
9775 /* generate intermediate code */
9780 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
9782 dc
->is_jmp
= DISAS_NEXT
;
9784 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9786 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9787 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9788 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9789 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9790 #if !defined(CONFIG_USER_ONLY)
9791 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9793 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9794 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9795 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9796 cpu_F0s
= tcg_temp_new_i32();
9797 cpu_F1s
= tcg_temp_new_i32();
9798 cpu_F0d
= tcg_temp_new_i64();
9799 cpu_F1d
= tcg_temp_new_i64();
9802 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9803 cpu_M0
= tcg_temp_new_i64();
9804 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9807 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9809 max_insns
= CF_COUNT_MASK
;
9813 tcg_clear_temp_count();
9815 /* A note on handling of the condexec (IT) bits:
9817 * We want to avoid the overhead of having to write the updated condexec
9818 * bits back to the CPUARMState for every instruction in an IT block. So:
9819 * (1) if the condexec bits are not already zero then we write
9820 * zero back into the CPUARMState now. This avoids complications trying
9821 * to do it at the end of the block. (For example if we don't do this
9822 * it's hard to identify whether we can safely skip writing condexec
9823 * at the end of the TB, which we definitely want to do for the case
9824 * where a TB doesn't do anything with the IT state at all.)
9825 * (2) if we are going to leave the TB then we call gen_set_condexec()
9826 * which will write the correct value into CPUARMState if zero is wrong.
9827 * This is done both for leaving the TB at the end, and for leaving
9828 * it because of an exception we know will happen, which is done in
9829 * gen_exception_insn(). The latter is necessary because we need to
9830 * leave the TB with the PC/IT state just prior to execution of the
9831 * instruction which caused the exception.
9832 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9833 * then the CPUARMState will be wrong and we need to reset it.
9834 * This is handled in the same way as restoration of the
9835 * PC in these situations: we will be called again with search_pc=1
9836 * and generate a mapping of the condexec bits for each PC in
9837 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9838 * this to restore the condexec bits.
9840 * Note that there are no instructions which can read the condexec
9841 * bits, and none which can write non-static values to them, so
9842 * we don't need to care about whether CPUARMState is correct in the
9846 /* Reset the conditional execution bits immediately. This avoids
9847 complications trying to do it at the end of the block. */
9848 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9850 TCGv tmp
= tcg_temp_new_i32();
9851 tcg_gen_movi_i32(tmp
, 0);
9852 store_cpu_field(tmp
, condexec_bits
);
9855 #ifdef CONFIG_USER_ONLY
9856 /* Intercept jump to the magic kernel page. */
9857 if (dc
->pc
>= 0xffff0000) {
9858 /* We always get here via a jump, so know we are not in a
9859 conditional execution block. */
9860 gen_exception(EXCP_KERNEL_TRAP
);
9861 dc
->is_jmp
= DISAS_UPDATE
;
9865 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9866 /* We always get here via a jump, so know we are not in a
9867 conditional execution block. */
9868 gen_exception(EXCP_EXCEPTION_EXIT
);
9869 dc
->is_jmp
= DISAS_UPDATE
;
9874 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9875 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9876 if (bp
->pc
== dc
->pc
) {
9877 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9878 /* Advance PC so that clearing the breakpoint will
9879 invalidate this TB. */
9881 goto done_generating
;
9887 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
9891 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
9893 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
9894 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9895 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
9896 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
9899 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9902 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
9903 tcg_gen_debug_insn_start(dc
->pc
);
9907 disas_thumb_insn(env
, dc
);
9908 if (dc
->condexec_mask
) {
9909 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9910 | ((dc
->condexec_mask
>> 4) & 1);
9911 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9912 if (dc
->condexec_mask
== 0) {
9913 dc
->condexec_cond
= 0;
9917 disas_arm_insn(env
, dc
);
9920 if (dc
->condjmp
&& !dc
->is_jmp
) {
9921 gen_set_label(dc
->condlabel
);
9925 if (tcg_check_temp_count()) {
9926 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9929 /* Translation stops when a conditional branch is encountered.
9930 * Otherwise the subsequent code could get translated several times.
9931 * Also stop translation when a page boundary is reached. This
9932 * ensures prefetch aborts occur at the right place. */
9934 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
9935 !env
->singlestep_enabled
&&
9937 dc
->pc
< next_page_start
&&
9938 num_insns
< max_insns
);
9940 if (tb
->cflags
& CF_LAST_IO
) {
9942 /* FIXME: This can theoretically happen with self-modifying
9944 cpu_abort(env
, "IO on conditional branch instruction");
9949 /* At this stage dc->condjmp will only be set when the skipped
9950 instruction was a conditional branch or trap, and the PC has
9951 already been written. */
9952 if (unlikely(env
->singlestep_enabled
)) {
9953 /* Make sure the pc is updated, and raise a debug exception. */
9955 gen_set_condexec(dc
);
9956 if (dc
->is_jmp
== DISAS_SWI
) {
9957 gen_exception(EXCP_SWI
);
9959 gen_exception(EXCP_DEBUG
);
9961 gen_set_label(dc
->condlabel
);
9963 if (dc
->condjmp
|| !dc
->is_jmp
) {
9964 gen_set_pc_im(dc
->pc
);
9967 gen_set_condexec(dc
);
9968 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9969 gen_exception(EXCP_SWI
);
9971 /* FIXME: Single stepping a WFI insn will not halt
9973 gen_exception(EXCP_DEBUG
);
9976 /* While branches must always occur at the end of an IT block,
9977 there are a few other things that can cause us to terminate
9978 the TB in the middle of an IT block:
9979 - Exception generating instructions (bkpt, swi, undefined).
9981 - Hardware watchpoints.
9982 Hardware breakpoints have already been handled and skip this code.
9984 gen_set_condexec(dc
);
9985 switch(dc
->is_jmp
) {
9987 gen_goto_tb(dc
, 1, dc
->pc
);
9992 /* indicate that the hash table must be used to find the next TB */
9996 /* nothing more to generate */
9999 gen_helper_wfi(cpu_env
);
10002 gen_exception(EXCP_SWI
);
10006 gen_set_label(dc
->condlabel
);
10007 gen_set_condexec(dc
);
10008 gen_goto_tb(dc
, 1, dc
->pc
);
10014 gen_icount_end(tb
, num_insns
);
10015 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
10018 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
10019 qemu_log("----------------\n");
10020 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
10021 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
,
10022 dc
->thumb
| (dc
->bswap_code
<< 1));
10027 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
10030 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
10032 tb
->size
= dc
->pc
- pc_start
;
10033 tb
->icount
= num_insns
;
10037 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
10039 gen_intermediate_code_internal(env
, tb
, 0);
10042 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
10044 gen_intermediate_code_internal(env
, tb
, 1);
10047 static const char *cpu_mode_names
[16] = {
10048 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10049 "???", "???", "???", "und", "???", "???", "???", "sys"
10052 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
10058 for(i
=0;i
<16;i
++) {
10059 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10061 cpu_fprintf(f
, "\n");
10063 cpu_fprintf(f
, " ");
10065 psr
= cpsr_read(env
);
10066 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10068 psr
& (1 << 31) ? 'N' : '-',
10069 psr
& (1 << 30) ? 'Z' : '-',
10070 psr
& (1 << 29) ? 'C' : '-',
10071 psr
& (1 << 28) ? 'V' : '-',
10072 psr
& CPSR_T
? 'T' : 'A',
10073 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10075 if (flags
& CPU_DUMP_FPU
) {
10076 int numvfpregs
= 0;
10077 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
10080 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
10083 for (i
= 0; i
< numvfpregs
; i
++) {
10084 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
10085 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
10086 i
* 2, (uint32_t)v
,
10087 i
* 2 + 1, (uint32_t)(v
>> 32),
10090 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10094 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10096 env
->regs
[15] = tcg_ctx
.gen_opc_pc
[pc_pos
];
10097 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];