4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext
{
53 /* Nonzero if this instruction has been conditionally skipped. */
55 /* The label that will be jumped to when the instruction is skipped. */
57 /* Thumb-2 condtional execution bits. */
60 struct TranslationBlock
*tb
;
61 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_exclusive_addr
;
89 static TCGv_i32 cpu_exclusive_val
;
90 static TCGv_i32 cpu_exclusive_high
;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test
;
93 static TCGv_i32 cpu_exclusive_info
;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s
, cpu_F1s
;
98 static TCGv_i64 cpu_F0d
, cpu_F1d
;
100 #include "gen-icount.h"
102 static const char *regnames
[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, regs
[i
]),
118 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
120 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUState
, exclusive_val
), "exclusive_val");
122 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUState
, exclusive_high
), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUState
, exclusive_test
), "exclusive_test");
127 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUState
, exclusive_info
), "exclusive_info");
135 static inline TCGv
load_cpu_offset(int offset
)
137 TCGv tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
172 TCGv tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
201 TCGv tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp
)
210 TCGv tmp
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp
, excp
);
212 gen_helper_exception(tmp
);
213 tcg_temp_free_i32(tmp
);
216 static void gen_smul_dual(TCGv a
, TCGv b
)
218 TCGv tmp1
= tcg_temp_new_i32();
219 TCGv tmp2
= tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1
, a
);
221 tcg_gen_ext16s_i32(tmp2
, b
);
222 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_gen_sari_i32(a
, a
, 16);
225 tcg_gen_sari_i32(b
, b
, 16);
226 tcg_gen_mul_i32(b
, b
, a
);
227 tcg_gen_mov_i32(a
, tmp1
);
228 tcg_temp_free_i32(tmp1
);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var
)
234 TCGv tmp
= tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp
, var
, 8);
236 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
237 tcg_gen_shli_i32(var
, var
, 8);
238 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
239 tcg_gen_or_i32(var
, var
, tmp
);
240 tcg_temp_free_i32(tmp
);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var
)
246 tcg_gen_ext16u_i32(var
, var
);
247 tcg_gen_bswap16_i32(var
, var
);
248 tcg_gen_ext16s_i32(var
, var
);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
255 tcg_gen_shri_i32(var
, var
, shift
);
256 tcg_gen_andi_i32(var
, var
, mask
);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var
, int shift
, int width
)
265 tcg_gen_sari_i32(var
, var
, shift
);
266 if (shift
+ width
< 32) {
267 signbit
= 1u << (width
- 1);
268 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
269 tcg_gen_xori_i32(var
, var
, signbit
);
270 tcg_gen_subi_i32(var
, var
, signbit
);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
277 tcg_gen_andi_i32(val
, val
, mask
);
278 tcg_gen_shli_i32(val
, val
, shift
);
279 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
280 tcg_gen_or_i32(dest
, base
, val
);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
286 TCGv_i64 tmp64
= tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64
, b
);
289 tcg_temp_free_i32(b
);
290 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
291 tcg_gen_add_i64(a
, tmp64
, a
);
293 tcg_temp_free_i64(tmp64
);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
300 TCGv_i64 tmp64
= tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64
, b
);
303 tcg_temp_free_i32(b
);
304 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
305 tcg_gen_sub_i64(a
, tmp64
, a
);
307 tcg_temp_free_i64(tmp64
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
320 tcg_temp_free_i32(a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
322 tcg_temp_free_i32(b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
324 tcg_temp_free_i64(tmp2
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
330 TCGv_i64 tmp1
= tcg_temp_new_i64();
331 TCGv_i64 tmp2
= tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_temp_free_i32(a
);
335 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_temp_free_i32(b
);
337 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i64(tmp2
);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var
)
345 TCGv tmp
= tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp
, var
, 16);
347 tcg_gen_shli_i32(var
, var
, 16);
348 tcg_gen_or_i32(var
, var
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0
, TCGv t1
)
361 TCGv tmp
= tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp
, t0
, t1
);
363 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
364 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
365 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
366 tcg_gen_add_i32(t0
, t0
, t1
);
367 tcg_gen_xor_i32(t0
, t0
, tmp
);
368 tcg_temp_free_i32(tmp
);
369 tcg_temp_free_i32(t1
);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var
)
377 TCGv tmp
= tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp
, var
, 31);
380 tcg_temp_free_i32(tmp
);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var
)
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
391 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tmp
= load_cpu_field(CF
);
396 tcg_gen_add_i32(t0
, t0
, tmp
);
397 tcg_temp_free_i32(tmp
);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
404 tcg_gen_add_i32(dest
, t0
, t1
);
405 tmp
= load_cpu_field(CF
);
406 tcg_gen_add_i32(dest
, dest
, tmp
);
407 tcg_temp_free_i32(tmp
);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
414 tcg_gen_sub_i32(dest
, t0
, t1
);
415 tmp
= load_cpu_field(CF
);
416 tcg_gen_add_i32(dest
, dest
, tmp
);
417 tcg_gen_subi_i32(dest
, dest
, 1);
418 tcg_temp_free_i32(tmp
);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var
, int shift
)
426 TCGv tmp
= tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp
, var
, 1);
430 tcg_gen_shri_i32(tmp
, var
, shift
);
432 tcg_gen_andi_i32(tmp
, tmp
, 1);
435 tcg_temp_free_i32(tmp
);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
445 shifter_out_im(var
, 32 - shift
);
446 tcg_gen_shli_i32(var
, var
, shift
);
452 tcg_gen_shri_i32(var
, var
, 31);
455 tcg_gen_movi_i32(var
, 0);
458 shifter_out_im(var
, shift
- 1);
459 tcg_gen_shri_i32(var
, var
, shift
);
466 shifter_out_im(var
, shift
- 1);
469 tcg_gen_sari_i32(var
, var
, shift
);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var
, shift
- 1);
475 tcg_gen_rotri_i32(var
, var
, shift
); break;
477 TCGv tmp
= load_cpu_field(CF
);
479 shifter_out_im(var
, 0);
480 tcg_gen_shri_i32(var
, var
, 1);
481 tcg_gen_shli_i32(tmp
, tmp
, 31);
482 tcg_gen_or_i32(var
, var
, tmp
);
483 tcg_temp_free_i32(tmp
);
488 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
489 TCGv shift
, int flags
)
493 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
494 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
495 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
496 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
500 case 0: gen_helper_shl(var
, var
, shift
); break;
501 case 1: gen_helper_shr(var
, var
, shift
); break;
502 case 2: gen_helper_sar(var
, var
, shift
); break;
503 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
504 tcg_gen_rotr_i32(var
, var
, shift
); break;
507 tcg_temp_free_i32(shift
);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp
= tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
529 tcg_temp_free_ptr(tmp
);
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc
, int label
)
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
615 tmp
= load_cpu_field(ZF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
623 tmp
= load_cpu_field(CF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
631 tmp
= load_cpu_field(NF
);
632 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
639 tmp
= load_cpu_field(VF
);
640 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
642 case 8: /* hi: C && !Z */
643 inv
= gen_new_label();
644 tmp
= load_cpu_field(CF
);
645 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
646 tcg_temp_free_i32(tmp
);
647 tmp
= load_cpu_field(ZF
);
648 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
651 case 9: /* ls: !C || Z */
652 tmp
= load_cpu_field(CF
);
653 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 tcg_temp_free_i32(tmp
);
655 tmp
= load_cpu_field(ZF
);
656 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp
= load_cpu_field(VF
);
660 tmp2
= load_cpu_field(NF
);
661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
662 tcg_temp_free_i32(tmp2
);
663 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp
= load_cpu_field(VF
);
667 tmp2
= load_cpu_field(NF
);
668 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_temp_free_i32(tmp2
);
670 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
672 case 12: /* gt: !Z && N == V */
673 inv
= gen_new_label();
674 tmp
= load_cpu_field(ZF
);
675 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
676 tcg_temp_free_i32(tmp
);
677 tmp
= load_cpu_field(VF
);
678 tmp2
= load_cpu_field(NF
);
679 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
680 tcg_temp_free_i32(tmp2
);
681 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
684 case 13: /* le: Z || N != V */
685 tmp
= load_cpu_field(ZF
);
686 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
687 tcg_temp_free_i32(tmp
);
688 tmp
= load_cpu_field(VF
);
689 tmp2
= load_cpu_field(NF
);
690 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
691 tcg_temp_free_i32(tmp2
);
692 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
695 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
698 tcg_temp_free_i32(tmp
);
701 static const uint8_t table_logic_cc
[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
725 s
->is_jmp
= DISAS_UPDATE
;
726 if (s
->thumb
!= (addr
& 1)) {
727 tmp
= tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp
, addr
& 1);
729 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
730 tcg_temp_free_i32(tmp
);
732 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext
*s
, TCGv var
)
738 s
->is_jmp
= DISAS_UPDATE
;
739 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
740 tcg_gen_andi_i32(var
, var
, 1);
741 store_cpu_field(var
, thumb
);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
750 if (reg
== 15 && ENABLE_ARCH_7
) {
753 store_reg(s
, reg
, var
);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState
*env
, DisasContext
*s
,
764 if (reg
== 15 && ENABLE_ARCH_5
) {
767 store_reg(s
, reg
, var
);
771 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
777 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
783 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
785 TCGv tmp
= tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
789 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
791 TCGv tmp
= tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
795 static inline TCGv
gen_ld32(TCGv addr
, int index
)
797 TCGv tmp
= tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
801 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp
, addr
, index
);
807 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
809 tcg_gen_qemu_st8(val
, addr
, index
);
810 tcg_temp_free_i32(val
);
812 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
814 tcg_gen_qemu_st16(val
, addr
, index
);
815 tcg_temp_free_i32(val
);
817 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
819 tcg_gen_qemu_st32(val
, addr
, index
);
820 tcg_temp_free_i32(val
);
822 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
824 tcg_gen_qemu_st64(val
, addr
, index
);
825 tcg_temp_free_i64(val
);
828 static inline void gen_set_pc_im(uint32_t val
)
830 tcg_gen_movi_i32(cpu_R
[15], val
);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext
*s
)
836 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
837 s
->is_jmp
= DISAS_UPDATE
;
840 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
843 int val
, rm
, shift
, shiftop
;
846 if (!(insn
& (1 << 25))) {
849 if (!(insn
& (1 << 23)))
852 tcg_gen_addi_i32(var
, var
, val
);
856 shift
= (insn
>> 7) & 0x1f;
857 shiftop
= (insn
>> 5) & 3;
858 offset
= load_reg(s
, rm
);
859 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
860 if (!(insn
& (1 << 23)))
861 tcg_gen_sub_i32(var
, var
, offset
);
863 tcg_gen_add_i32(var
, var
, offset
);
864 tcg_temp_free_i32(offset
);
868 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
874 if (insn
& (1 << 22)) {
876 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
877 if (!(insn
& (1 << 23)))
881 tcg_gen_addi_i32(var
, var
, val
);
885 tcg_gen_addi_i32(var
, var
, extra
);
887 offset
= load_reg(s
, rm
);
888 if (!(insn
& (1 << 23)))
889 tcg_gen_sub_i32(var
, var
, offset
);
891 tcg_gen_add_i32(var
, var
, offset
);
892 tcg_temp_free_i32(offset
);
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
912 static inline void gen_vfp_abs(int dp
)
915 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
917 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
920 static inline void gen_vfp_neg(int dp
)
923 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
925 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
928 static inline void gen_vfp_sqrt(int dp
)
931 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
933 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
936 static inline void gen_vfp_cmp(int dp
)
939 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
941 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
944 static inline void gen_vfp_cmpe(int dp
)
947 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
949 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
952 static inline void gen_vfp_F1_ld0(int dp
)
955 tcg_gen_movi_i64(cpu_F1d
, 0);
957 tcg_gen_movi_i32(cpu_F1s
, 0);
960 static inline void gen_vfp_uito(int dp
)
963 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
965 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
968 static inline void gen_vfp_sito(int dp
)
971 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
973 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
976 static inline void gen_vfp_toui(int dp
)
979 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
981 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
984 static inline void gen_vfp_touiz(int dp
)
987 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
989 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
992 static inline void gen_vfp_tosi(int dp
)
995 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
997 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1000 static inline void gen_vfp_tosiz(int dp
)
1003 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1005 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1028 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1031 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1033 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1036 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1039 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1041 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1045 vfp_reg_offset (int dp
, int reg
)
1048 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1050 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1051 + offsetof(CPU_DoubleU
, l
.upper
);
1053 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1054 + offsetof(CPU_DoubleU
, l
.lower
);
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1061 neon_reg_offset (int reg
, int n
)
1065 return vfp_reg_offset(0, sreg
);
1068 static TCGv
neon_load_reg(int reg
, int pass
)
1070 TCGv tmp
= tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1075 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1077 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1078 tcg_temp_free_i32(var
);
1081 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1083 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1086 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1088 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1096 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1099 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1101 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1104 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1107 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1109 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1112 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1115 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1117 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1120 #define ARM_CP_RW_BIT (1 << 20)
1122 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1124 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1127 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1129 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1132 static inline TCGv
iwmmxt_load_creg(int reg
)
1134 TCGv var
= tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1139 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1141 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1142 tcg_temp_free_i32(var
);
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1147 iwmmxt_store_reg(cpu_M0
, rn
);
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1152 iwmmxt_load_reg(cpu_M0
, rn
);
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1157 iwmmxt_load_reg(cpu_V1
, rn
);
1158 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1163 iwmmxt_load_reg(cpu_V1
, rn
);
1164 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1169 iwmmxt_load_reg(cpu_V1
, rn
);
1170 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1202 IWMMXT_OP_SIZE(unpackl
)
1203 IWMMXT_OP_SIZE(unpackh
)
1205 IWMMXT_OP_1(unpacklub
)
1206 IWMMXT_OP_1(unpackluw
)
1207 IWMMXT_OP_1(unpacklul
)
1208 IWMMXT_OP_1(unpackhub
)
1209 IWMMXT_OP_1(unpackhuw
)
1210 IWMMXT_OP_1(unpackhul
)
1211 IWMMXT_OP_1(unpacklsb
)
1212 IWMMXT_OP_1(unpacklsw
)
1213 IWMMXT_OP_1(unpacklsl
)
1214 IWMMXT_OP_1(unpackhsb
)
1215 IWMMXT_OP_1(unpackhsw
)
1216 IWMMXT_OP_1(unpackhsl
)
1218 IWMMXT_OP_SIZE(cmpeq
)
1219 IWMMXT_OP_SIZE(cmpgtu
)
1220 IWMMXT_OP_SIZE(cmpgts
)
1222 IWMMXT_OP_SIZE(mins
)
1223 IWMMXT_OP_SIZE(minu
)
1224 IWMMXT_OP_SIZE(maxs
)
1225 IWMMXT_OP_SIZE(maxu
)
1227 IWMMXT_OP_SIZE(subn
)
1228 IWMMXT_OP_SIZE(addn
)
1229 IWMMXT_OP_SIZE(subu
)
1230 IWMMXT_OP_SIZE(addu
)
1231 IWMMXT_OP_SIZE(subs
)
1232 IWMMXT_OP_SIZE(adds
)
1248 static void gen_op_iwmmxt_set_mup(void)
1251 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1252 tcg_gen_ori_i32(tmp
, tmp
, 2);
1253 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1256 static void gen_op_iwmmxt_set_cup(void)
1259 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1260 tcg_gen_ori_i32(tmp
, tmp
, 1);
1261 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1266 TCGv tmp
= tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1268 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1273 iwmmxt_load_reg(cpu_V1
, rn
);
1274 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1275 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1278 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1284 rd
= (insn
>> 16) & 0xf;
1285 tmp
= load_reg(s
, rd
);
1287 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1288 if (insn
& (1 << 24)) {
1290 if (insn
& (1 << 23))
1291 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1293 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1294 tcg_gen_mov_i32(dest
, tmp
);
1295 if (insn
& (1 << 21))
1296 store_reg(s
, rd
, tmp
);
1298 tcg_temp_free_i32(tmp
);
1299 } else if (insn
& (1 << 21)) {
1301 tcg_gen_mov_i32(dest
, tmp
);
1302 if (insn
& (1 << 23))
1303 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1305 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1306 store_reg(s
, rd
, tmp
);
1307 } else if (!(insn
& (1 << 23)))
1312 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1314 int rd
= (insn
>> 0) & 0xf;
1317 if (insn
& (1 << 8)) {
1318 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1321 tmp
= iwmmxt_load_creg(rd
);
1324 tmp
= tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0
, rd
);
1326 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1328 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1329 tcg_gen_mov_i32(dest
, tmp
);
1330 tcg_temp_free_i32(tmp
);
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1339 int rdhi
, rdlo
, rd0
, rd1
, i
;
1341 TCGv tmp
, tmp2
, tmp3
;
1343 if ((insn
& 0x0e000e00) == 0x0c000000) {
1344 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1346 rdlo
= (insn
>> 12) & 0xf;
1347 rdhi
= (insn
>> 16) & 0xf;
1348 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0
, wrd
);
1350 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1351 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1355 iwmmxt_store_reg(cpu_V0
, wrd
);
1356 gen_op_iwmmxt_set_mup();
1361 wrd
= (insn
>> 12) & 0xf;
1362 addr
= tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1364 tcg_temp_free_i32(addr
);
1367 if (insn
& ARM_CP_RW_BIT
) {
1368 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1369 tmp
= tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1371 iwmmxt_store_creg(wrd
, tmp
);
1374 if (insn
& (1 << 8)) {
1375 if (insn
& (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1378 } else { /* WLDRW wRd */
1379 tmp
= gen_ld32(addr
, IS_USER(s
));
1382 if (insn
& (1 << 22)) { /* WLDRH */
1383 tmp
= gen_ld16u(addr
, IS_USER(s
));
1384 } else { /* WLDRB */
1385 tmp
= gen_ld8u(addr
, IS_USER(s
));
1389 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1390 tcg_temp_free_i32(tmp
);
1392 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1395 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1396 tmp
= iwmmxt_load_creg(wrd
);
1397 gen_st32(tmp
, addr
, IS_USER(s
));
1399 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1400 tmp
= tcg_temp_new_i32();
1401 if (insn
& (1 << 8)) {
1402 if (insn
& (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp
);
1404 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1407 gen_st32(tmp
, addr
, IS_USER(s
));
1410 if (insn
& (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1412 gen_st16(tmp
, addr
, IS_USER(s
));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1415 gen_st8(tmp
, addr
, IS_USER(s
));
1420 tcg_temp_free_i32(addr
);
1424 if ((insn
& 0x0f000000) != 0x0e000000)
1427 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd
= (insn
>> 12) & 0xf;
1430 rd0
= (insn
>> 0) & 0xf;
1431 rd1
= (insn
>> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1439 case 0x011: /* TMCR */
1442 rd
= (insn
>> 12) & 0xf;
1443 wrd
= (insn
>> 16) & 0xf;
1445 case ARM_IWMMXT_wCID
:
1446 case ARM_IWMMXT_wCASF
:
1448 case ARM_IWMMXT_wCon
:
1449 gen_op_iwmmxt_set_cup();
1451 case ARM_IWMMXT_wCSSF
:
1452 tmp
= iwmmxt_load_creg(wrd
);
1453 tmp2
= load_reg(s
, rd
);
1454 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1455 tcg_temp_free_i32(tmp2
);
1456 iwmmxt_store_creg(wrd
, tmp
);
1458 case ARM_IWMMXT_wCGR0
:
1459 case ARM_IWMMXT_wCGR1
:
1460 case ARM_IWMMXT_wCGR2
:
1461 case ARM_IWMMXT_wCGR3
:
1462 gen_op_iwmmxt_set_cup();
1463 tmp
= load_reg(s
, rd
);
1464 iwmmxt_store_creg(wrd
, tmp
);
1470 case 0x100: /* WXOR */
1471 wrd
= (insn
>> 12) & 0xf;
1472 rd0
= (insn
>> 0) & 0xf;
1473 rd1
= (insn
>> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1481 case 0x111: /* TMRC */
1484 rd
= (insn
>> 12) & 0xf;
1485 wrd
= (insn
>> 16) & 0xf;
1486 tmp
= iwmmxt_load_creg(wrd
);
1487 store_reg(s
, rd
, tmp
);
1489 case 0x300: /* WANDN */
1490 wrd
= (insn
>> 12) & 0xf;
1491 rd0
= (insn
>> 0) & 0xf;
1492 rd1
= (insn
>> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1494 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1501 case 0x200: /* WAND */
1502 wrd
= (insn
>> 12) & 0xf;
1503 rd0
= (insn
>> 0) & 0xf;
1504 rd1
= (insn
>> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd
= (insn
>> 12) & 0xf;
1514 rd0
= (insn
>> 0) & 0xf;
1515 rd1
= (insn
>> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1517 if (insn
& (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1522 gen_op_iwmmxt_set_mup();
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd
= (insn
>> 12) & 0xf;
1526 rd0
= (insn
>> 16) & 0xf;
1527 rd1
= (insn
>> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1529 switch ((insn
>> 22) & 3) {
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1542 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 16) & 0xf;
1549 rd1
= (insn
>> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 switch ((insn
>> 22) & 3) {
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1564 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd
= (insn
>> 12) & 0xf;
1570 rd0
= (insn
>> 16) & 0xf;
1571 rd1
= (insn
>> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1573 if (insn
& (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1577 if (!(insn
& (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1580 gen_op_iwmmxt_set_mup();
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd
= (insn
>> 12) & 0xf;
1584 rd0
= (insn
>> 16) & 0xf;
1585 rd1
= (insn
>> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1587 if (insn
& (1 << 21)) {
1588 if (insn
& (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1593 if (insn
& (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 gen_op_iwmmxt_set_mup();
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd
= (insn
>> 12) & 0xf;
1603 rd0
= (insn
>> 16) & 0xf;
1604 rd1
= (insn
>> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1606 if (insn
& (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1610 if (!(insn
& (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1
, wrd
);
1612 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1615 gen_op_iwmmxt_set_mup();
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd
= (insn
>> 12) & 0xf;
1619 rd0
= (insn
>> 16) & 0xf;
1620 rd1
= (insn
>> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1622 switch ((insn
>> 22) & 3) {
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1635 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd
= (insn
>> 12) & 0xf;
1641 rd0
= (insn
>> 16) & 0xf;
1642 rd1
= (insn
>> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1644 if (insn
& (1 << 22)) {
1645 if (insn
& (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1650 if (insn
& (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd
= (insn
>> 12) & 0xf;
1661 rd0
= (insn
>> 16) & 0xf;
1662 rd1
= (insn
>> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1664 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1665 tcg_gen_andi_i32(tmp
, tmp
, 7);
1666 iwmmxt_load_reg(cpu_V1
, rd1
);
1667 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1668 tcg_temp_free_i32(tmp
);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1670 gen_op_iwmmxt_set_mup();
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn
>> 6) & 3) == 3)
1675 rd
= (insn
>> 12) & 0xf;
1676 wrd
= (insn
>> 16) & 0xf;
1677 tmp
= load_reg(s
, rd
);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1679 switch ((insn
>> 6) & 3) {
1681 tmp2
= tcg_const_i32(0xff);
1682 tmp3
= tcg_const_i32((insn
& 7) << 3);
1685 tmp2
= tcg_const_i32(0xffff);
1686 tmp3
= tcg_const_i32((insn
& 3) << 4);
1689 tmp2
= tcg_const_i32(0xffffffff);
1690 tmp3
= tcg_const_i32((insn
& 1) << 5);
1696 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1697 tcg_temp_free(tmp3
);
1698 tcg_temp_free(tmp2
);
1699 tcg_temp_free_i32(tmp
);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1701 gen_op_iwmmxt_set_mup();
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd
= (insn
>> 12) & 0xf;
1705 wrd
= (insn
>> 16) & 0xf;
1706 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1708 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1709 tmp
= tcg_temp_new_i32();
1710 switch ((insn
>> 22) & 3) {
1712 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1715 tcg_gen_ext8s_i32(tmp
, tmp
);
1717 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1721 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1724 tcg_gen_ext16s_i32(tmp
, tmp
);
1726 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1730 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1734 store_reg(s
, rd
, tmp
);
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1739 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1740 switch ((insn
>> 22) & 3) {
1742 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1745 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1748 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1751 tcg_gen_shli_i32(tmp
, tmp
, 28);
1753 tcg_temp_free_i32(tmp
);
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn
>> 6) & 3) == 3)
1758 rd
= (insn
>> 12) & 0xf;
1759 wrd
= (insn
>> 16) & 0xf;
1760 tmp
= load_reg(s
, rd
);
1761 switch ((insn
>> 6) & 3) {
1763 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1766 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1769 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1772 tcg_temp_free_i32(tmp
);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1774 gen_op_iwmmxt_set_mup();
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1779 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1780 tmp2
= tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2
, tmp
);
1782 switch ((insn
>> 22) & 3) {
1784 for (i
= 0; i
< 7; i
++) {
1785 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1786 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1790 for (i
= 0; i
< 3; i
++) {
1791 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1792 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1796 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1797 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1801 tcg_temp_free_i32(tmp2
);
1802 tcg_temp_free_i32(tmp
);
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd
= (insn
>> 12) & 0xf;
1806 rd0
= (insn
>> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1808 switch ((insn
>> 22) & 3) {
1810 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1813 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1816 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1821 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1822 gen_op_iwmmxt_set_mup();
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1827 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1828 tmp2
= tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2
, tmp
);
1830 switch ((insn
>> 22) & 3) {
1832 for (i
= 0; i
< 7; i
++) {
1833 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1834 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1838 for (i
= 0; i
< 3; i
++) {
1839 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1840 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1844 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1845 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1849 tcg_temp_free_i32(tmp2
);
1850 tcg_temp_free_i32(tmp
);
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd
= (insn
>> 12) & 0xf;
1854 rd0
= (insn
>> 16) & 0xf;
1855 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1857 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1858 tmp
= tcg_temp_new_i32();
1859 switch ((insn
>> 22) & 3) {
1861 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1864 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1867 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1870 store_reg(s
, rd
, tmp
);
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd
= (insn
>> 12) & 0xf;
1875 rd0
= (insn
>> 16) & 0xf;
1876 rd1
= (insn
>> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1878 switch ((insn
>> 22) & 3) {
1880 if (insn
& (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1886 if (insn
& (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1892 if (insn
& (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1900 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd
= (insn
>> 12) & 0xf;
1907 rd0
= (insn
>> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1909 switch ((insn
>> 22) & 3) {
1911 if (insn
& (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1914 gen_op_iwmmxt_unpacklub_M0();
1917 if (insn
& (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1920 gen_op_iwmmxt_unpackluw_M0();
1923 if (insn
& (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1926 gen_op_iwmmxt_unpacklul_M0();
1931 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd
= (insn
>> 12) & 0xf;
1938 rd0
= (insn
>> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1940 switch ((insn
>> 22) & 3) {
1942 if (insn
& (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1945 gen_op_iwmmxt_unpackhub_M0();
1948 if (insn
& (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1951 gen_op_iwmmxt_unpackhuw_M0();
1954 if (insn
& (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1957 gen_op_iwmmxt_unpackhul_M0();
1962 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn
>> 22) & 3) == 0)
1970 wrd
= (insn
>> 12) & 0xf;
1971 rd0
= (insn
>> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 tmp
= tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
1975 tcg_temp_free_i32(tmp
);
1978 switch ((insn
>> 22) & 3) {
1980 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_M0
, tmp
);
1983 gen_helper_iwmmxt_srll(cpu_M0
, cpu_M0
, tmp
);
1986 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_M0
, tmp
);
1989 tcg_temp_free_i32(tmp
);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn
>> 22) & 3) == 0)
1998 wrd
= (insn
>> 12) & 0xf;
1999 rd0
= (insn
>> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2001 tmp
= tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2003 tcg_temp_free_i32(tmp
);
2006 switch ((insn
>> 22) & 3) {
2008 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_M0
, tmp
);
2011 gen_helper_iwmmxt_sral(cpu_M0
, cpu_M0
, tmp
);
2014 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_M0
, tmp
);
2017 tcg_temp_free_i32(tmp
);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn
>> 22) & 3) == 0)
2026 wrd
= (insn
>> 12) & 0xf;
2027 rd0
= (insn
>> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2029 tmp
= tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2031 tcg_temp_free_i32(tmp
);
2034 switch ((insn
>> 22) & 3) {
2036 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_M0
, tmp
);
2039 gen_helper_iwmmxt_slll(cpu_M0
, cpu_M0
, tmp
);
2042 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_M0
, tmp
);
2045 tcg_temp_free_i32(tmp
);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn
>> 22) & 3) == 0)
2054 wrd
= (insn
>> 12) & 0xf;
2055 rd0
= (insn
>> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2057 tmp
= tcg_temp_new_i32();
2058 switch ((insn
>> 22) & 3) {
2060 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2061 tcg_temp_free_i32(tmp
);
2064 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_M0
, tmp
);
2067 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2068 tcg_temp_free_i32(tmp
);
2071 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_M0
, tmp
);
2074 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2075 tcg_temp_free_i32(tmp
);
2078 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_M0
, tmp
);
2081 tcg_temp_free_i32(tmp
);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd
= (insn
>> 12) & 0xf;
2089 rd0
= (insn
>> 16) & 0xf;
2090 rd1
= (insn
>> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2092 switch ((insn
>> 22) & 3) {
2094 if (insn
& (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2097 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2100 if (insn
& (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2106 if (insn
& (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2109 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2114 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2115 gen_op_iwmmxt_set_mup();
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd
= (insn
>> 12) & 0xf;
2120 rd0
= (insn
>> 16) & 0xf;
2121 rd1
= (insn
>> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2123 switch ((insn
>> 22) & 3) {
2125 if (insn
& (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2131 if (insn
& (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2137 if (insn
& (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2145 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2146 gen_op_iwmmxt_set_mup();
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd
= (insn
>> 12) & 0xf;
2151 rd0
= (insn
>> 16) & 0xf;
2152 rd1
= (insn
>> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2154 tmp
= tcg_const_i32((insn
>> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1
, rd1
);
2156 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2159 gen_op_iwmmxt_set_mup();
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd
= (insn
>> 12) & 0xf;
2166 rd0
= (insn
>> 16) & 0xf;
2167 rd1
= (insn
>> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2169 switch ((insn
>> 20) & 0xf) {
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2174 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2192 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd
= (insn
>> 12) & 0xf;
2209 rd0
= (insn
>> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2211 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_M0
, tmp
);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd
= (insn
>> 12) & 0xf;
2223 rd0
= (insn
>> 16) & 0xf;
2224 rd1
= (insn
>> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2226 switch ((insn
>> 20) & 0xf) {
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2231 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2249 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2257 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2267 wrd
= (insn
>> 12) & 0xf;
2268 rd0
= (insn
>> 16) & 0xf;
2269 rd1
= (insn
>> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2271 switch ((insn
>> 22) & 3) {
2273 if (insn
& (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2279 if (insn
& (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2282 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2285 if (insn
& (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd
= (insn
>> 5) & 0xf;
2300 rd0
= (insn
>> 12) & 0xf;
2301 rd1
= (insn
>> 0) & 0xf;
2302 if (rd0
== 0xf || rd1
== 0xf)
2304 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2305 tmp
= load_reg(s
, rd0
);
2306 tmp2
= load_reg(s
, rd1
);
2307 switch ((insn
>> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn
& (1 << 16))
2316 tcg_gen_shri_i32(tmp
, tmp
, 16);
2317 if (insn
& (1 << 17))
2318 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2322 tcg_temp_free_i32(tmp2
);
2323 tcg_temp_free_i32(tmp
);
2326 tcg_temp_free_i32(tmp2
);
2327 tcg_temp_free_i32(tmp
);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2329 gen_op_iwmmxt_set_mup();
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2342 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2345 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0
= (insn
>> 12) & 0xf;
2349 acc
= (insn
>> 5) & 7;
2354 tmp
= load_reg(s
, rd0
);
2355 tmp2
= load_reg(s
, rd1
);
2356 switch ((insn
>> 16) & 0xf) {
2358 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn
& (1 << 16))
2368 tcg_gen_shri_i32(tmp
, tmp
, 16);
2369 if (insn
& (1 << 17))
2370 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2376 tcg_temp_free_i32(tmp2
);
2377 tcg_temp_free_i32(tmp
);
2379 gen_op_iwmmxt_movq_wRn_M0(acc
);
2383 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi
= (insn
>> 16) & 0xf;
2386 rdlo
= (insn
>> 12) & 0xf;
2392 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0
, acc
);
2394 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2395 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2397 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2399 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2400 iwmmxt_store_reg(cpu_V0
, acc
);
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2413 uint32_t rd
= (insn
>> 12) & 0xf;
2414 uint32_t cp
= (insn
>> 8) & 0xf;
2419 if (insn
& ARM_CP_RW_BIT
) {
2420 if (!env
->cp
[cp
].cp_read
)
2422 gen_set_pc_im(s
->pc
);
2423 tmp
= tcg_temp_new_i32();
2424 tmp2
= tcg_const_i32(insn
);
2425 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2426 tcg_temp_free(tmp2
);
2427 store_reg(s
, rd
, tmp
);
2429 if (!env
->cp
[cp
].cp_write
)
2431 gen_set_pc_im(s
->pc
);
2432 tmp
= load_reg(s
, rd
);
2433 tmp2
= tcg_const_i32(insn
);
2434 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2435 tcg_temp_free(tmp2
);
2436 tcg_temp_free_i32(tmp
);
2441 static int cp15_user_ok(uint32_t insn
)
2443 int cpn
= (insn
>> 16) & 0xf;
2444 int cpm
= insn
& 0xf;
2445 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2447 if (cpn
== 13 && cpm
== 0) {
2449 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2453 /* ISB, DSB, DMB. */
2454 if ((cpm
== 5 && op
== 4)
2455 || (cpm
== 10 && (op
== 4 || op
== 5)))
2461 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2464 int cpn
= (insn
>> 16) & 0xf;
2465 int cpm
= insn
& 0xf;
2466 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2468 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2471 if (!(cpn
== 13 && cpm
== 0))
2474 if (insn
& ARM_CP_RW_BIT
) {
2477 tmp
= load_cpu_field(cp15
.c13_tls1
);
2480 tmp
= load_cpu_field(cp15
.c13_tls2
);
2483 tmp
= load_cpu_field(cp15
.c13_tls3
);
2488 store_reg(s
, rd
, tmp
);
2491 tmp
= load_reg(s
, rd
);
2494 store_cpu_field(tmp
, cp15
.c13_tls1
);
2497 store_cpu_field(tmp
, cp15
.c13_tls2
);
2500 store_cpu_field(tmp
, cp15
.c13_tls3
);
2503 tcg_temp_free_i32(tmp
);
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env
, ARM_FEATURE_M
))
2521 if ((insn
& (1 << 25)) == 0) {
2522 if (insn
& (1 << 20)) {
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2529 if ((insn
& (1 << 4)) == 0) {
2533 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2540 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2544 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s
->pc
);
2547 s
->is_jmp
= DISAS_WFI
;
2552 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2556 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s
->pc
);
2559 s
->is_jmp
= DISAS_WFI
;
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2568 rd
= (insn
>> 12) & 0xf;
2570 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2573 tmp2
= tcg_const_i32(insn
);
2574 if (insn
& ARM_CP_RW_BIT
) {
2575 tmp
= tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2577 /* If the destination register is r15 then sets condition codes. */
2579 store_reg(s
, rd
, tmp
);
2581 tcg_temp_free_i32(tmp
);
2583 tmp
= load_reg(s
, rd
);
2584 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2585 tcg_temp_free_i32(tmp
);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2590 (insn
& 0x0fff0fff) != 0x0e010f10)
2593 tcg_temp_free_i32(tmp2
);
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2605 if (insn & (1 << (smallbit))) \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2617 /* Move between integer and VFP cores. */
2618 static TCGv
gen_vfp_mrs(void)
2620 TCGv tmp
= tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2625 static void gen_vfp_msr(TCGv tmp
)
2627 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2628 tcg_temp_free_i32(tmp
);
2631 static void gen_neon_dup_u8(TCGv var
, int shift
)
2633 TCGv tmp
= tcg_temp_new_i32();
2635 tcg_gen_shri_i32(var
, var
, shift
);
2636 tcg_gen_ext8u_i32(var
, var
);
2637 tcg_gen_shli_i32(tmp
, var
, 8);
2638 tcg_gen_or_i32(var
, var
, tmp
);
2639 tcg_gen_shli_i32(tmp
, var
, 16);
2640 tcg_gen_or_i32(var
, var
, tmp
);
2641 tcg_temp_free_i32(tmp
);
2644 static void gen_neon_dup_low16(TCGv var
)
2646 TCGv tmp
= tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var
, var
);
2648 tcg_gen_shli_i32(tmp
, var
, 16);
2649 tcg_gen_or_i32(var
, var
, tmp
);
2650 tcg_temp_free_i32(tmp
);
2653 static void gen_neon_dup_high16(TCGv var
)
2655 TCGv tmp
= tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2657 tcg_gen_shri_i32(tmp
, var
, 16);
2658 tcg_gen_or_i32(var
, var
, tmp
);
2659 tcg_temp_free_i32(tmp
);
2662 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2668 tmp
= gen_ld8u(addr
, IS_USER(s
));
2669 gen_neon_dup_u8(tmp
, 0);
2672 tmp
= gen_ld16u(addr
, IS_USER(s
));
2673 gen_neon_dup_low16(tmp
);
2676 tmp
= gen_ld32(addr
, IS_USER(s
));
2678 default: /* Avoid compiler warnings. */
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2688 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2694 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2697 if (!s
->vfp_enabled
) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2701 rn
= (insn
>> 16) & 0xf;
2702 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2703 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2706 dp
= ((insn
& 0xf00) == 0xb00);
2707 switch ((insn
>> 24) & 0xf) {
2709 if (insn
& (1 << 4)) {
2710 /* single register transfer */
2711 rd
= (insn
>> 12) & 0xf;
2716 VFP_DREG_N(rn
, insn
);
2719 if (insn
& 0x00c00060
2720 && !arm_feature(env
, ARM_FEATURE_NEON
))
2723 pass
= (insn
>> 21) & 1;
2724 if (insn
& (1 << 22)) {
2726 offset
= ((insn
>> 5) & 3) * 8;
2727 } else if (insn
& (1 << 5)) {
2729 offset
= (insn
& (1 << 6)) ? 16 : 0;
2734 if (insn
& ARM_CP_RW_BIT
) {
2736 tmp
= neon_load_reg(rn
, pass
);
2740 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2741 if (insn
& (1 << 23))
2747 if (insn
& (1 << 23)) {
2749 tcg_gen_shri_i32(tmp
, tmp
, 16);
2755 tcg_gen_sari_i32(tmp
, tmp
, 16);
2764 store_reg(s
, rd
, tmp
);
2767 tmp
= load_reg(s
, rd
);
2768 if (insn
& (1 << 23)) {
2771 gen_neon_dup_u8(tmp
, 0);
2772 } else if (size
== 1) {
2773 gen_neon_dup_low16(tmp
);
2775 for (n
= 0; n
<= pass
* 2; n
++) {
2776 tmp2
= tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2
, tmp
);
2778 neon_store_reg(rn
, n
, tmp2
);
2780 neon_store_reg(rn
, n
, tmp
);
2785 tmp2
= neon_load_reg(rn
, pass
);
2786 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2787 tcg_temp_free_i32(tmp2
);
2790 tmp2
= neon_load_reg(rn
, pass
);
2791 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2792 tcg_temp_free_i32(tmp2
);
2797 neon_store_reg(rn
, pass
, tmp
);
2801 if ((insn
& 0x6f) != 0x00)
2803 rn
= VFP_SREG_N(insn
);
2804 if (insn
& ARM_CP_RW_BIT
) {
2806 if (insn
& (1 << 21)) {
2807 /* system register */
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2816 && arm_feature(env
, ARM_FEATURE_VFP3
))
2818 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2823 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2825 case ARM_VFP_FPINST
:
2826 case ARM_VFP_FPINST2
:
2827 /* Not present in VFP3. */
2829 || arm_feature(env
, ARM_FEATURE_VFP3
))
2831 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2835 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2836 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2838 tmp
= tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2845 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2847 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2853 gen_mov_F0_vreg(0, rn
);
2854 tmp
= gen_vfp_mrs();
2857 /* Set the 4 flag bits in the CPSR. */
2859 tcg_temp_free_i32(tmp
);
2861 store_reg(s
, rd
, tmp
);
2865 tmp
= load_reg(s
, rd
);
2866 if (insn
& (1 << 21)) {
2868 /* system register */
2873 /* Writes are ignored. */
2876 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2877 tcg_temp_free_i32(tmp
);
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2886 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2889 case ARM_VFP_FPINST
:
2890 case ARM_VFP_FPINST2
:
2891 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2898 gen_mov_vreg_F0(0, rn
);
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2909 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2911 /* rn is register number */
2912 VFP_DREG_N(rn
, insn
);
2915 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd
= VFP_SREG_D(insn
);
2919 VFP_DREG_D(rd
, insn
);
2922 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2926 rm
= VFP_SREG_M(insn
);
2928 VFP_DREG_M(rm
, insn
);
2931 rn
= VFP_SREG_N(insn
);
2932 if (op
== 15 && rn
== 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd
, insn
);
2936 rd
= VFP_SREG_D(insn
);
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2941 rm
= VFP_SREG_M(insn
);
2944 veclen
= s
->vec_len
;
2945 if (op
== 15 && rn
> 3)
2948 /* Shut up compiler warnings. */
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd
& bank_mask
) == 0) {
2965 delta_d
= (s
->vec_stride
>> 1) + 1;
2967 delta_d
= s
->vec_stride
+ 1;
2969 if ((rm
& bank_mask
) == 0) {
2970 /* mixed scalar/vector */
2979 /* Load the initial operands. */
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm
);
2990 gen_mov_F0_vreg(dp
, rd
);
2991 gen_mov_F1_vreg(dp
, rm
);
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp
, rd
);
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp
, rd
);
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp
, rm
);
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp
, rn
);
3018 gen_mov_F1_vreg(dp
, rm
);
3022 /* Perform the calculation. */
3024 case 0: /* mac: fd + (fn * fm) */
3026 gen_mov_F1_vreg(dp
, rd
);
3029 case 1: /* nmac: fd - (fn * fm) */
3032 gen_mov_F1_vreg(dp
, rd
);
3035 case 2: /* msc: -fd + (fn * fm) */
3037 gen_mov_F1_vreg(dp
, rd
);
3040 case 3: /* nmsc: -fd - (fn * fm) */
3043 gen_mov_F1_vreg(dp
, rd
);
3046 case 4: /* mul: fn * fm */
3049 case 5: /* nmul: -(fn * fm) */
3053 case 6: /* add: fn + fm */
3056 case 7: /* sub: fn - fm */
3059 case 8: /* div: fn / fm */
3062 case 14: /* fconst */
3063 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3066 n
= (insn
<< 12) & 0x80000000;
3067 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3074 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3081 tcg_gen_movi_i32(cpu_F0s
, n
);
3084 case 15: /* extension space */
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3101 tmp
= gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp
, tmp
);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3104 tcg_temp_free_i32(tmp
);
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3109 tmp
= gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp
, tmp
, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3112 tcg_temp_free_i32(tmp
);
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3117 tmp
= tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3119 gen_mov_F0_vreg(0, rd
);
3120 tmp2
= gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3122 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3123 tcg_temp_free_i32(tmp2
);
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3129 tmp
= tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3131 tcg_gen_shli_i32(tmp
, tmp
, 16);
3132 gen_mov_F0_vreg(0, rd
);
3133 tmp2
= gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3135 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3136 tcg_temp_free_i32(tmp2
);
3148 case 11: /* cmpez */
3152 case 15: /* single<->double conversion */
3154 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3156 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3158 case 16: /* fuito */
3161 case 17: /* fsito */
3164 case 20: /* fshto */
3165 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3167 gen_vfp_shto(dp
, 16 - rm
);
3169 case 21: /* fslto */
3170 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3172 gen_vfp_slto(dp
, 32 - rm
);
3174 case 22: /* fuhto */
3175 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3177 gen_vfp_uhto(dp
, 16 - rm
);
3179 case 23: /* fulto */
3180 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3182 gen_vfp_ulto(dp
, 32 - rm
);
3184 case 24: /* ftoui */
3187 case 25: /* ftouiz */
3190 case 26: /* ftosi */
3193 case 27: /* ftosiz */
3196 case 28: /* ftosh */
3197 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3199 gen_vfp_tosh(dp
, 16 - rm
);
3201 case 29: /* ftosl */
3202 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3204 gen_vfp_tosl(dp
, 32 - rm
);
3206 case 30: /* ftouh */
3207 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3209 gen_vfp_touh(dp
, 16 - rm
);
3211 case 31: /* ftoul */
3212 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3214 gen_vfp_toul(dp
, 32 - rm
);
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn
);
3221 default: /* undefined */
3222 printf ("op:%d\n", op
);
3226 /* Write back the result. */
3227 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd
);
3232 else if (op
== 15 && rn
== 15)
3234 gen_mov_vreg_F0(!dp
, rd
);
3236 gen_mov_vreg_F0(dp
, rd
);
3238 /* break out of the loop if we have finished */
3242 if (op
== 15 && delta_m
== 0) {
3243 /* single source one-many */
3245 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3247 gen_mov_vreg_F0(dp
, rd
);
3251 /* Setup the next operands. */
3253 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3257 /* One source operand. */
3258 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3260 gen_mov_F0_vreg(dp
, rm
);
3262 /* Two source operands. */
3263 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3265 gen_mov_F0_vreg(dp
, rn
);
3267 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3269 gen_mov_F1_vreg(dp
, rm
);
3277 if ((insn
& 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn
= (insn
>> 16) & 0xf;
3280 rd
= (insn
>> 12) & 0xf;
3282 VFP_DREG_M(rm
, insn
);
3284 rm
= VFP_SREG_M(insn
);
3287 if (insn
& ARM_CP_RW_BIT
) {
3290 gen_mov_F0_vreg(0, rm
* 2);
3291 tmp
= gen_vfp_mrs();
3292 store_reg(s
, rd
, tmp
);
3293 gen_mov_F0_vreg(0, rm
* 2 + 1);
3294 tmp
= gen_vfp_mrs();
3295 store_reg(s
, rn
, tmp
);
3297 gen_mov_F0_vreg(0, rm
);
3298 tmp
= gen_vfp_mrs();
3299 store_reg(s
, rd
, tmp
);
3300 gen_mov_F0_vreg(0, rm
+ 1);
3301 tmp
= gen_vfp_mrs();
3302 store_reg(s
, rn
, tmp
);
3307 tmp
= load_reg(s
, rd
);
3309 gen_mov_vreg_F0(0, rm
* 2);
3310 tmp
= load_reg(s
, rn
);
3312 gen_mov_vreg_F0(0, rm
* 2 + 1);
3314 tmp
= load_reg(s
, rd
);
3316 gen_mov_vreg_F0(0, rm
);
3317 tmp
= load_reg(s
, rn
);
3319 gen_mov_vreg_F0(0, rm
+ 1);
3324 rn
= (insn
>> 16) & 0xf;
3326 VFP_DREG_D(rd
, insn
);
3328 rd
= VFP_SREG_D(insn
);
3329 if (s
->thumb
&& rn
== 15) {
3330 addr
= tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3333 addr
= load_reg(s
, rn
);
3335 if ((insn
& 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset
= (insn
& 0xff) << 2;
3338 if ((insn
& (1 << 23)) == 0)
3340 tcg_gen_addi_i32(addr
, addr
, offset
);
3341 if (insn
& (1 << 20)) {
3342 gen_vfp_ld(s
, dp
, addr
);
3343 gen_mov_vreg_F0(dp
, rd
);
3345 gen_mov_F0_vreg(dp
, rd
);
3346 gen_vfp_st(s
, dp
, addr
);
3348 tcg_temp_free_i32(addr
);
3350 /* load/store multiple */
3352 n
= (insn
>> 1) & 0x7f;
3356 if (insn
& (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3363 for (i
= 0; i
< n
; i
++) {
3364 if (insn
& ARM_CP_RW_BIT
) {
3366 gen_vfp_ld(s
, dp
, addr
);
3367 gen_mov_vreg_F0(dp
, rd
+ i
);
3370 gen_mov_F0_vreg(dp
, rd
+ i
);
3371 gen_vfp_st(s
, dp
, addr
);
3373 tcg_gen_addi_i32(addr
, addr
, offset
);
3375 if (insn
& (1 << 21)) {
3377 if (insn
& (1 << 24))
3378 offset
= -offset
* n
;
3379 else if (dp
&& (insn
& 1))
3385 tcg_gen_addi_i32(addr
, addr
, offset
);
3386 store_reg(s
, rn
, addr
);
3388 tcg_temp_free_i32(addr
);
3394 /* Should never happen. */
3400 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3402 TranslationBlock
*tb
;
3405 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3407 gen_set_pc_im(dest
);
3408 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3410 gen_set_pc_im(dest
);
3415 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3417 if (unlikely(s
->singlestep_enabled
)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3423 gen_goto_tb(s
, 0, dest
);
3424 s
->is_jmp
= DISAS_TB_JUMP
;
3428 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3431 tcg_gen_sari_i32(t0
, t0
, 16);
3435 tcg_gen_sari_i32(t1
, t1
, 16);
3438 tcg_gen_mul_i32(t0
, t0
, t1
);
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3446 if (flags
& (1 << 0))
3448 if (flags
& (1 << 1))
3450 if (flags
& (1 << 2))
3452 if (flags
& (1 << 3))
3455 /* Mask out undefined bits. */
3456 mask
&= ~CPSR_RESERVED
;
3457 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3459 if (!arm_feature(env
, ARM_FEATURE_V5
))
3460 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3461 if (!arm_feature(env
, ARM_FEATURE_V6
))
3462 mask
&= ~(CPSR_E
| CPSR_GE
);
3463 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3465 /* Mask out execution state bits. */
3468 /* Mask out privileged bits. */
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3479 /* ??? This is also undefined in system mode. */
3483 tmp
= load_cpu_field(spsr
);
3484 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3485 tcg_gen_andi_i32(t0
, t0
, mask
);
3486 tcg_gen_or_i32(tmp
, tmp
, t0
);
3487 store_cpu_field(tmp
, spsr
);
3489 gen_set_cpsr(t0
, mask
);
3491 tcg_temp_free_i32(t0
);
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3500 tmp
= tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp
, val
);
3502 return gen_set_psr(s
, mask
, spsr
, tmp
);
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3509 store_reg(s
, 15, pc
);
3510 tmp
= load_cpu_field(spsr
);
3511 gen_set_cpsr(tmp
, 0xffffffff);
3512 tcg_temp_free_i32(tmp
);
3513 s
->is_jmp
= DISAS_UPDATE
;
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3519 gen_set_cpsr(cpsr
, 0xffffffff);
3520 tcg_temp_free_i32(cpsr
);
3521 store_reg(s
, 15, pc
);
3522 s
->is_jmp
= DISAS_UPDATE
;
3526 gen_set_condexec (DisasContext
*s
)
3528 if (s
->condexec_mask
) {
3529 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3530 TCGv tmp
= tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp
, val
);
3532 store_cpu_field(tmp
, condexec_bits
);
3536 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3538 gen_set_condexec(s
);
3539 gen_set_pc_im(s
->pc
- offset
);
3540 gen_exception(excp
);
3541 s
->is_jmp
= DISAS_JUMP
;
3544 static void gen_nop_hint(DisasContext
*s
, int val
)
3548 gen_set_pc_im(s
->pc
);
3549 s
->is_jmp
= DISAS_WFI
;
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3561 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3564 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3565 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3566 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3571 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3574 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3575 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3576 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3607 default: return 1; \
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3630 default: return 1; \
3633 static TCGv
neon_load_scratch(int scratch
)
3635 TCGv tmp
= tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3640 static void neon_store_scratch(int scratch
, TCGv var
)
3642 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3643 tcg_temp_free_i32(var
);
3646 static inline TCGv
neon_get_scalar(int size
, int reg
)
3650 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3652 gen_neon_dup_high16(tmp
);
3654 gen_neon_dup_low16(tmp
);
3657 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3662 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3665 if (size
== 3 || (!q
&& size
== 2)) {
3668 tmp
= tcg_const_i32(rd
);
3669 tmp2
= tcg_const_i32(rm
);
3673 gen_helper_neon_qunzip8(tmp
, tmp2
);
3676 gen_helper_neon_qunzip16(tmp
, tmp2
);
3679 gen_helper_neon_qunzip32(tmp
, tmp2
);
3687 gen_helper_neon_unzip8(tmp
, tmp2
);
3690 gen_helper_neon_unzip16(tmp
, tmp2
);
3696 tcg_temp_free_i32(tmp
);
3697 tcg_temp_free_i32(tmp2
);
3701 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3704 if (size
== 3 || (!q
&& size
== 2)) {
3707 tmp
= tcg_const_i32(rd
);
3708 tmp2
= tcg_const_i32(rm
);
3712 gen_helper_neon_qzip8(tmp
, tmp2
);
3715 gen_helper_neon_qzip16(tmp
, tmp2
);
3718 gen_helper_neon_qzip32(tmp
, tmp2
);
3726 gen_helper_neon_zip8(tmp
, tmp2
);
3729 gen_helper_neon_zip16(tmp
, tmp2
);
3735 tcg_temp_free_i32(tmp
);
3736 tcg_temp_free_i32(tmp2
);
3740 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3744 rd
= tcg_temp_new_i32();
3745 tmp
= tcg_temp_new_i32();
3747 tcg_gen_shli_i32(rd
, t0
, 8);
3748 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3750 tcg_gen_or_i32(rd
, rd
, tmp
);
3752 tcg_gen_shri_i32(t1
, t1
, 8);
3753 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3755 tcg_gen_or_i32(t1
, t1
, tmp
);
3756 tcg_gen_mov_i32(t0
, rd
);
3758 tcg_temp_free_i32(tmp
);
3759 tcg_temp_free_i32(rd
);
3762 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3766 rd
= tcg_temp_new_i32();
3767 tmp
= tcg_temp_new_i32();
3769 tcg_gen_shli_i32(rd
, t0
, 16);
3770 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3771 tcg_gen_or_i32(rd
, rd
, tmp
);
3772 tcg_gen_shri_i32(t1
, t1
, 16);
3773 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3774 tcg_gen_or_i32(t1
, t1
, tmp
);
3775 tcg_gen_mov_i32(t0
, rd
);
3777 tcg_temp_free_i32(tmp
);
3778 tcg_temp_free_i32(rd
);
3786 } neon_ls_element_type
[11] = {
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3821 if (!s
->vfp_enabled
)
3823 VFP_DREG_D(rd
, insn
);
3824 rn
= (insn
>> 16) & 0xf;
3826 load
= (insn
& (1 << 21)) != 0;
3827 if ((insn
& (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op
= (insn
>> 8) & 0xf;
3830 size
= (insn
>> 6) & 3;
3833 nregs
= neon_ls_element_type
[op
].nregs
;
3834 interleave
= neon_ls_element_type
[op
].interleave
;
3835 spacing
= neon_ls_element_type
[op
].spacing
;
3836 if (size
== 3 && (interleave
| spacing
) != 1)
3838 addr
= tcg_temp_new_i32();
3839 load_reg_var(s
, addr
, rn
);
3840 stride
= (1 << size
) * interleave
;
3841 for (reg
= 0; reg
< nregs
; reg
++) {
3842 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3843 load_reg_var(s
, addr
, rn
);
3844 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3845 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3846 load_reg_var(s
, addr
, rn
);
3847 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3851 tmp64
= gen_ld64(addr
, IS_USER(s
));
3852 neon_store_reg64(tmp64
, rd
);
3853 tcg_temp_free_i64(tmp64
);
3855 tmp64
= tcg_temp_new_i64();
3856 neon_load_reg64(tmp64
, rd
);
3857 gen_st64(tmp64
, addr
, IS_USER(s
));
3859 tcg_gen_addi_i32(addr
, addr
, stride
);
3861 for (pass
= 0; pass
< 2; pass
++) {
3864 tmp
= gen_ld32(addr
, IS_USER(s
));
3865 neon_store_reg(rd
, pass
, tmp
);
3867 tmp
= neon_load_reg(rd
, pass
);
3868 gen_st32(tmp
, addr
, IS_USER(s
));
3870 tcg_gen_addi_i32(addr
, addr
, stride
);
3871 } else if (size
== 1) {
3873 tmp
= gen_ld16u(addr
, IS_USER(s
));
3874 tcg_gen_addi_i32(addr
, addr
, stride
);
3875 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3876 tcg_gen_addi_i32(addr
, addr
, stride
);
3877 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3878 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3879 tcg_temp_free_i32(tmp2
);
3880 neon_store_reg(rd
, pass
, tmp
);
3882 tmp
= neon_load_reg(rd
, pass
);
3883 tmp2
= tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3885 gen_st16(tmp
, addr
, IS_USER(s
));
3886 tcg_gen_addi_i32(addr
, addr
, stride
);
3887 gen_st16(tmp2
, addr
, IS_USER(s
));
3888 tcg_gen_addi_i32(addr
, addr
, stride
);
3890 } else /* size == 0 */ {
3893 for (n
= 0; n
< 4; n
++) {
3894 tmp
= gen_ld8u(addr
, IS_USER(s
));
3895 tcg_gen_addi_i32(addr
, addr
, stride
);
3899 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3900 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3901 tcg_temp_free_i32(tmp
);
3904 neon_store_reg(rd
, pass
, tmp2
);
3906 tmp2
= neon_load_reg(rd
, pass
);
3907 for (n
= 0; n
< 4; n
++) {
3908 tmp
= tcg_temp_new_i32();
3910 tcg_gen_mov_i32(tmp
, tmp2
);
3912 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3914 gen_st8(tmp
, addr
, IS_USER(s
));
3915 tcg_gen_addi_i32(addr
, addr
, stride
);
3917 tcg_temp_free_i32(tmp2
);
3924 tcg_temp_free_i32(addr
);
3927 size
= (insn
>> 10) & 3;
3929 /* Load single element to all lanes. */
3930 int a
= (insn
>> 4) & 1;
3934 size
= (insn
>> 6) & 3;
3935 nregs
= ((insn
>> 8) & 3) + 1;
3938 if (nregs
!= 4 || a
== 0) {
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3944 if (nregs
== 1 && a
== 1 && size
== 0) {
3947 if (nregs
== 3 && a
== 1) {
3950 addr
= tcg_temp_new_i32();
3951 load_reg_var(s
, addr
, rn
);
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp
= gen_load_and_replicate(s
, addr
, size
);
3955 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3956 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3957 if (insn
& (1 << 5)) {
3958 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3959 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3961 tcg_temp_free_i32(tmp
);
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride
= (insn
& (1 << 5)) ? 2 : 1;
3965 for (reg
= 0; reg
< nregs
; reg
++) {
3966 tmp
= gen_load_and_replicate(s
, addr
, size
);
3967 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3968 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3969 tcg_temp_free_i32(tmp
);
3970 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3974 tcg_temp_free_i32(addr
);
3975 stride
= (1 << size
) * nregs
;
3977 /* Single element. */
3978 pass
= (insn
>> 7) & 1;
3981 shift
= ((insn
>> 5) & 3) * 8;
3985 shift
= ((insn
>> 6) & 1) * 16;
3986 stride
= (insn
& (1 << 5)) ? 2 : 1;
3990 stride
= (insn
& (1 << 6)) ? 2 : 1;
3995 nregs
= ((insn
>> 8) & 3) + 1;
3996 addr
= tcg_temp_new_i32();
3997 load_reg_var(s
, addr
, rn
);
3998 for (reg
= 0; reg
< nregs
; reg
++) {
4002 tmp
= gen_ld8u(addr
, IS_USER(s
));
4005 tmp
= gen_ld16u(addr
, IS_USER(s
));
4008 tmp
= gen_ld32(addr
, IS_USER(s
));
4010 default: /* Avoid compiler warnings. */
4014 tmp2
= neon_load_reg(rd
, pass
);
4015 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4016 tcg_temp_free_i32(tmp2
);
4018 neon_store_reg(rd
, pass
, tmp
);
4019 } else { /* Store */
4020 tmp
= neon_load_reg(rd
, pass
);
4022 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4025 gen_st8(tmp
, addr
, IS_USER(s
));
4028 gen_st16(tmp
, addr
, IS_USER(s
));
4031 gen_st32(tmp
, addr
, IS_USER(s
));
4036 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4038 tcg_temp_free_i32(addr
);
4039 stride
= nregs
* (1 << size
);
4045 base
= load_reg(s
, rn
);
4047 tcg_gen_addi_i32(base
, base
, stride
);
4050 index
= load_reg(s
, rm
);
4051 tcg_gen_add_i32(base
, base
, index
);
4052 tcg_temp_free_i32(index
);
4054 store_reg(s
, rn
, base
);
4059 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4062 tcg_gen_and_i32(t
, t
, c
);
4063 tcg_gen_andc_i32(f
, f
, c
);
4064 tcg_gen_or_i32(dest
, t
, f
);
4067 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4070 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4071 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4077 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4080 case 0: gen_helper_neon_narrow_sat_s8(dest
, src
); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest
, src
); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest
, src
); break;
4087 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4090 case 0: gen_helper_neon_narrow_sat_u8(dest
, src
); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest
, src
); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest
, src
); break;
4097 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4100 case 0: gen_helper_neon_unarrow_sat8(dest
, src
); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest
, src
); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest
, src
); break;
4107 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4113 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4114 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4119 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4120 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4127 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4128 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4133 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4134 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4141 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4145 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4146 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4147 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4152 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4153 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4154 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4158 tcg_temp_free_i32(src
);
4161 static inline void gen_neon_addl(int size
)
4164 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4166 case 2: tcg_gen_add_i64(CPU_V001
); break;
4171 static inline void gen_neon_subl(int size
)
4174 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4181 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4184 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4185 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4186 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4191 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4194 case 1: gen_helper_neon_addl_saturate_s32(op0
, op0
, op1
); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0
, op0
, op1
); break;
4200 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4204 switch ((size
<< 1) | u
) {
4205 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4206 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4207 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4208 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4210 tmp
= gen_muls_i64_i32(a
, b
);
4211 tcg_gen_mov_i64(dest
, tmp
);
4212 tcg_temp_free_i64(tmp
);
4215 tmp
= gen_mulu_i64_i32(a
, b
);
4216 tcg_gen_mov_i64(dest
, tmp
);
4217 tcg_temp_free_i64(tmp
);
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4225 tcg_temp_free_i32(a
);
4226 tcg_temp_free_i32(b
);
4230 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4234 gen_neon_unarrow_sats(size
, dest
, src
);
4236 gen_neon_narrow(size
, dest
, src
);
4240 gen_neon_narrow_satu(size
, dest
, src
);
4242 gen_neon_narrow_sats(size
, dest
, src
);
4247 /* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4251 #define NEON_3R_VHADD 0
4252 #define NEON_3R_VQADD 1
4253 #define NEON_3R_VRHADD 2
4254 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255 #define NEON_3R_VHSUB 4
4256 #define NEON_3R_VQSUB 5
4257 #define NEON_3R_VCGT 6
4258 #define NEON_3R_VCGE 7
4259 #define NEON_3R_VSHL 8
4260 #define NEON_3R_VQSHL 9
4261 #define NEON_3R_VRSHL 10
4262 #define NEON_3R_VQRSHL 11
4263 #define NEON_3R_VMAX 12
4264 #define NEON_3R_VMIN 13
4265 #define NEON_3R_VABD 14
4266 #define NEON_3R_VABA 15
4267 #define NEON_3R_VADD_VSUB 16
4268 #define NEON_3R_VTST_VCEQ 17
4269 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270 #define NEON_3R_VMUL 19
4271 #define NEON_3R_VPMAX 20
4272 #define NEON_3R_VPMIN 21
4273 #define NEON_3R_VQDMULH_VQRDMULH 22
4274 #define NEON_3R_VPADD 23
4275 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4282 static const uint8_t neon_3r_sizes
[] = {
4283 [NEON_3R_VHADD
] = 0x7,
4284 [NEON_3R_VQADD
] = 0xf,
4285 [NEON_3R_VRHADD
] = 0x7,
4286 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB
] = 0x7,
4288 [NEON_3R_VQSUB
] = 0xf,
4289 [NEON_3R_VCGT
] = 0x7,
4290 [NEON_3R_VCGE
] = 0x7,
4291 [NEON_3R_VSHL
] = 0xf,
4292 [NEON_3R_VQSHL
] = 0xf,
4293 [NEON_3R_VRSHL
] = 0xf,
4294 [NEON_3R_VQRSHL
] = 0xf,
4295 [NEON_3R_VMAX
] = 0x7,
4296 [NEON_3R_VMIN
] = 0x7,
4297 [NEON_3R_VABD
] = 0x7,
4298 [NEON_3R_VABA
] = 0x7,
4299 [NEON_3R_VADD_VSUB
] = 0xf,
4300 [NEON_3R_VTST_VCEQ
] = 0x7,
4301 [NEON_3R_VML
] = 0x7,
4302 [NEON_3R_VMUL
] = 0x7,
4303 [NEON_3R_VPMAX
] = 0x7,
4304 [NEON_3R_VPMIN
] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4306 [NEON_3R_VPADD
] = 0x7,
4307 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4315 /* Translate a NEON data processing instruction. Return nonzero if the
4316 instruction is invalid.
4317 We process data in a mixture of 32-bit and 64-bit chunks.
4318 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4320 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4332 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4335 if (!s
->vfp_enabled
)
4337 q
= (insn
& (1 << 6)) != 0;
4338 u
= (insn
>> 24) & 1;
4339 VFP_DREG_D(rd
, insn
);
4340 VFP_DREG_N(rn
, insn
);
4341 VFP_DREG_M(rm
, insn
);
4342 size
= (insn
>> 20) & 3;
4343 if ((insn
& (1 << 23)) == 0) {
4344 /* Three register same length. */
4345 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4346 /* Catch invalid op and bad size combinations: UNDEF */
4347 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4350 /* All insns of this form UNDEF for either this condition or the
4351 * superset of cases "Q==1"; we catch the latter later.
4353 if (q
&& ((rd
| rn
| rm
) & 1)) {
4356 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4357 /* 64-bit element instructions. */
4358 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4359 neon_load_reg64(cpu_V0
, rn
+ pass
);
4360 neon_load_reg64(cpu_V1
, rm
+ pass
);
4364 gen_helper_neon_qadd_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4366 gen_helper_neon_qadd_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4371 gen_helper_neon_qsub_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4373 gen_helper_neon_qsub_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4378 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4380 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4385 gen_helper_neon_qshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4387 gen_helper_neon_qshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4392 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4394 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4397 case NEON_3R_VQRSHL
:
4399 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4401 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4404 case NEON_3R_VADD_VSUB
:
4406 tcg_gen_sub_i64(CPU_V001
);
4408 tcg_gen_add_i64(CPU_V001
);
4414 neon_store_reg64(cpu_V0
, rd
+ pass
);
4423 case NEON_3R_VQRSHL
:
4426 /* Shift instruction operands are reversed. */
4441 case NEON_3R_FLOAT_ARITH
:
4442 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4444 case NEON_3R_FLOAT_MINMAX
:
4445 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4447 case NEON_3R_FLOAT_CMP
:
4449 /* no encoding for U=0 C=1x */
4453 case NEON_3R_FLOAT_ACMP
:
4458 case NEON_3R_VRECPS_VRSQRTS
:
4464 if (u
&& (size
!= 0)) {
4465 /* UNDEF on invalid size for polynomial subcase */
4473 if (pairwise
&& q
) {
4474 /* All the pairwise insns UNDEF if Q is set */
4478 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4483 tmp
= neon_load_reg(rn
, 0);
4484 tmp2
= neon_load_reg(rn
, 1);
4486 tmp
= neon_load_reg(rm
, 0);
4487 tmp2
= neon_load_reg(rm
, 1);
4491 tmp
= neon_load_reg(rn
, pass
);
4492 tmp2
= neon_load_reg(rm
, pass
);
4496 GEN_NEON_INTEGER_OP(hadd
);
4499 GEN_NEON_INTEGER_OP(qadd
);
4501 case NEON_3R_VRHADD
:
4502 GEN_NEON_INTEGER_OP(rhadd
);
4504 case NEON_3R_LOGIC
: /* Logic ops. */
4505 switch ((u
<< 2) | size
) {
4507 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4510 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4513 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4516 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4519 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4522 tmp3
= neon_load_reg(rd
, pass
);
4523 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4524 tcg_temp_free_i32(tmp3
);
4527 tmp3
= neon_load_reg(rd
, pass
);
4528 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4529 tcg_temp_free_i32(tmp3
);
4532 tmp3
= neon_load_reg(rd
, pass
);
4533 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4534 tcg_temp_free_i32(tmp3
);
4539 GEN_NEON_INTEGER_OP(hsub
);
4542 GEN_NEON_INTEGER_OP(qsub
);
4545 GEN_NEON_INTEGER_OP(cgt
);
4548 GEN_NEON_INTEGER_OP(cge
);
4551 GEN_NEON_INTEGER_OP(shl
);
4554 GEN_NEON_INTEGER_OP(qshl
);
4557 GEN_NEON_INTEGER_OP(rshl
);
4559 case NEON_3R_VQRSHL
:
4560 GEN_NEON_INTEGER_OP(qrshl
);
4563 GEN_NEON_INTEGER_OP(max
);
4566 GEN_NEON_INTEGER_OP(min
);
4569 GEN_NEON_INTEGER_OP(abd
);
4572 GEN_NEON_INTEGER_OP(abd
);
4573 tcg_temp_free_i32(tmp2
);
4574 tmp2
= neon_load_reg(rd
, pass
);
4575 gen_neon_add(size
, tmp
, tmp2
);
4577 case NEON_3R_VADD_VSUB
:
4578 if (!u
) { /* VADD */
4579 gen_neon_add(size
, tmp
, tmp2
);
4582 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4583 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4584 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4589 case NEON_3R_VTST_VCEQ
:
4590 if (!u
) { /* VTST */
4592 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4593 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4594 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4599 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4600 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4601 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4606 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4608 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4609 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4610 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4613 tcg_temp_free_i32(tmp2
);
4614 tmp2
= neon_load_reg(rd
, pass
);
4616 gen_neon_rsb(size
, tmp
, tmp2
);
4618 gen_neon_add(size
, tmp
, tmp2
);
4622 if (u
) { /* polynomial */
4623 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4624 } else { /* Integer */
4626 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4627 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4628 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4634 GEN_NEON_INTEGER_OP(pmax
);
4637 GEN_NEON_INTEGER_OP(pmin
);
4639 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4640 if (!u
) { /* VQDMULH */
4642 case 1: gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
); break;
4643 case 2: gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
); break;
4646 } else { /* VQRDMULH */
4648 case 1: gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
); break;
4649 case 2: gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
); break;
4656 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4657 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4658 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4662 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4663 switch ((u
<< 2) | size
) {
4665 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4668 gen_helper_neon_sub_f32(tmp
, tmp
, tmp2
);
4671 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4674 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
);
4680 case NEON_3R_FLOAT_MULTIPLY
:
4681 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
4683 tcg_temp_free_i32(tmp2
);
4684 tmp2
= neon_load_reg(rd
, pass
);
4686 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
4688 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
4692 case NEON_3R_FLOAT_CMP
:
4694 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
4697 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
4699 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
4702 case NEON_3R_FLOAT_ACMP
:
4704 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
);
4706 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
);
4708 case NEON_3R_FLOAT_MINMAX
:
4710 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
);
4712 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
);
4714 case NEON_3R_VRECPS_VRSQRTS
:
4716 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4718 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4723 tcg_temp_free_i32(tmp2
);
4725 /* Save the result. For elementwise operations we can put it
4726 straight into the destination register. For pairwise operations
4727 we have to be careful to avoid clobbering the source operands. */
4728 if (pairwise
&& rd
== rm
) {
4729 neon_store_scratch(pass
, tmp
);
4731 neon_store_reg(rd
, pass
, tmp
);
4735 if (pairwise
&& rd
== rm
) {
4736 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4737 tmp
= neon_load_scratch(pass
);
4738 neon_store_reg(rd
, pass
, tmp
);
4741 /* End of 3 register same size operations. */
4742 } else if (insn
& (1 << 4)) {
4743 if ((insn
& 0x00380080) != 0) {
4744 /* Two registers and shift. */
4745 op
= (insn
>> 8) & 0xf;
4746 if (insn
& (1 << 7)) {
4754 while ((insn
& (1 << (size
+ 19))) == 0)
4757 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4758 /* To avoid excessive dumplication of ops we implement shift
4759 by immediate using the variable shift operations. */
4761 /* Shift by immediate:
4762 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4763 if (q
&& ((rd
| rm
) & 1)) {
4766 if (!u
&& (op
== 4 || op
== 6)) {
4769 /* Right shifts are encoded as N - shift, where N is the
4770 element size in bits. */
4772 shift
= shift
- (1 << (size
+ 3));
4780 imm
= (uint8_t) shift
;
4785 imm
= (uint16_t) shift
;
4796 for (pass
= 0; pass
< count
; pass
++) {
4798 neon_load_reg64(cpu_V0
, rm
+ pass
);
4799 tcg_gen_movi_i64(cpu_V1
, imm
);
4804 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4806 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4811 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4813 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4816 case 5: /* VSHL, VSLI */
4817 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4819 case 6: /* VQSHLU */
4820 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4824 gen_helper_neon_qshl_u64(cpu_V0
,
4827 gen_helper_neon_qshl_s64(cpu_V0
,
4832 if (op
== 1 || op
== 3) {
4834 neon_load_reg64(cpu_V1
, rd
+ pass
);
4835 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4836 } else if (op
== 4 || (op
== 5 && u
)) {
4838 neon_load_reg64(cpu_V1
, rd
+ pass
);
4840 if (shift
< -63 || shift
> 63) {
4844 mask
= 0xffffffffffffffffull
>> -shift
;
4846 mask
= 0xffffffffffffffffull
<< shift
;
4849 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
4850 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4852 neon_store_reg64(cpu_V0
, rd
+ pass
);
4853 } else { /* size < 3 */
4854 /* Operands in T0 and T1. */
4855 tmp
= neon_load_reg(rm
, pass
);
4856 tmp2
= tcg_temp_new_i32();
4857 tcg_gen_movi_i32(tmp2
, imm
);
4861 GEN_NEON_INTEGER_OP(shl
);
4865 GEN_NEON_INTEGER_OP(rshl
);
4868 case 5: /* VSHL, VSLI */
4870 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
4871 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
4872 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
4876 case 6: /* VQSHLU */
4879 gen_helper_neon_qshlu_s8(tmp
, tmp
, tmp2
);
4882 gen_helper_neon_qshlu_s16(tmp
, tmp
, tmp2
);
4885 gen_helper_neon_qshlu_s32(tmp
, tmp
, tmp2
);
4892 GEN_NEON_INTEGER_OP(qshl
);
4895 tcg_temp_free_i32(tmp2
);
4897 if (op
== 1 || op
== 3) {
4899 tmp2
= neon_load_reg(rd
, pass
);
4900 gen_neon_add(size
, tmp
, tmp2
);
4901 tcg_temp_free_i32(tmp2
);
4902 } else if (op
== 4 || (op
== 5 && u
)) {
4907 mask
= 0xff >> -shift
;
4909 mask
= (uint8_t)(0xff << shift
);
4915 mask
= 0xffff >> -shift
;
4917 mask
= (uint16_t)(0xffff << shift
);
4921 if (shift
< -31 || shift
> 31) {
4925 mask
= 0xffffffffu
>> -shift
;
4927 mask
= 0xffffffffu
<< shift
;
4933 tmp2
= neon_load_reg(rd
, pass
);
4934 tcg_gen_andi_i32(tmp
, tmp
, mask
);
4935 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
4936 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4937 tcg_temp_free_i32(tmp2
);
4939 neon_store_reg(rd
, pass
, tmp
);
4942 } else if (op
< 10) {
4943 /* Shift by immediate and narrow:
4944 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4945 int input_unsigned
= (op
== 8) ? !u
: u
;
4949 shift
= shift
- (1 << (size
+ 3));
4952 tmp64
= tcg_const_i64(shift
);
4953 neon_load_reg64(cpu_V0
, rm
);
4954 neon_load_reg64(cpu_V1
, rm
+ 1);
4955 for (pass
= 0; pass
< 2; pass
++) {
4963 if (input_unsigned
) {
4964 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
4966 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
4969 if (input_unsigned
) {
4970 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
4972 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
4975 tmp
= tcg_temp_new_i32();
4976 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
4977 neon_store_reg(rd
, pass
, tmp
);
4979 tcg_temp_free_i64(tmp64
);
4982 imm
= (uint16_t)shift
;
4986 imm
= (uint32_t)shift
;
4988 tmp2
= tcg_const_i32(imm
);
4989 tmp4
= neon_load_reg(rm
+ 1, 0);
4990 tmp5
= neon_load_reg(rm
+ 1, 1);
4991 for (pass
= 0; pass
< 2; pass
++) {
4993 tmp
= neon_load_reg(rm
, 0);
4997 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5000 tmp3
= neon_load_reg(rm
, 1);
5004 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5006 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5007 tcg_temp_free_i32(tmp
);
5008 tcg_temp_free_i32(tmp3
);
5009 tmp
= tcg_temp_new_i32();
5010 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5011 neon_store_reg(rd
, pass
, tmp
);
5013 tcg_temp_free_i32(tmp2
);
5015 } else if (op
== 10) {
5017 if (q
|| (rd
& 1)) {
5020 tmp
= neon_load_reg(rm
, 0);
5021 tmp2
= neon_load_reg(rm
, 1);
5022 for (pass
= 0; pass
< 2; pass
++) {
5026 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5029 /* The shift is less than the width of the source
5030 type, so we can just shift the whole register. */
5031 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5032 /* Widen the result of shift: we need to clear
5033 * the potential overflow bits resulting from
5034 * left bits of the narrow input appearing as
5035 * right bits of left the neighbour narrow
5037 if (size
< 2 || !u
) {
5040 imm
= (0xffu
>> (8 - shift
));
5042 } else if (size
== 1) {
5043 imm
= 0xffff >> (16 - shift
);
5046 imm
= 0xffffffff >> (32 - shift
);
5049 imm64
= imm
| (((uint64_t)imm
) << 32);
5053 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5056 neon_store_reg64(cpu_V0
, rd
+ pass
);
5058 } else if (op
>= 14) {
5059 /* VCVT fixed-point. */
5060 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5063 /* We have already masked out the must-be-1 top bit of imm6,
5064 * hence this 32-shift where the ARM ARM has 64-imm6.
5067 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5068 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5071 gen_vfp_ulto(0, shift
);
5073 gen_vfp_slto(0, shift
);
5076 gen_vfp_toul(0, shift
);
5078 gen_vfp_tosl(0, shift
);
5080 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5085 } else { /* (insn & 0x00380080) == 0 */
5088 op
= (insn
>> 8) & 0xf;
5089 /* One register and immediate. */
5090 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5091 invert
= (insn
& (1 << 5)) != 0;
5109 imm
= (imm
<< 8) | (imm
<< 24);
5112 imm
= (imm
<< 8) | 0xff;
5115 imm
= (imm
<< 16) | 0xffff;
5118 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5123 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5124 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5130 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5131 if (op
& 1 && op
< 12) {
5132 tmp
= neon_load_reg(rd
, pass
);
5134 /* The immediate value has already been inverted, so
5136 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5138 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5142 tmp
= tcg_temp_new_i32();
5143 if (op
== 14 && invert
) {
5147 for (n
= 0; n
< 4; n
++) {
5148 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5149 val
|= 0xff << (n
* 8);
5151 tcg_gen_movi_i32(tmp
, val
);
5153 tcg_gen_movi_i32(tmp
, imm
);
5156 neon_store_reg(rd
, pass
, tmp
);
5159 } else { /* (insn & 0x00800010 == 0x00800000) */
5161 op
= (insn
>> 8) & 0xf;
5162 if ((insn
& (1 << 6)) == 0) {
5163 /* Three registers of different lengths. */
5167 /* prewiden, src1_wide, src2_wide */
5168 static const int neon_3reg_wide
[16][3] = {
5169 {1, 0, 0}, /* VADDL */
5170 {1, 1, 0}, /* VADDW */
5171 {1, 0, 0}, /* VSUBL */
5172 {1, 1, 0}, /* VSUBW */
5173 {0, 1, 1}, /* VADDHN */
5174 {0, 0, 0}, /* VABAL */
5175 {0, 1, 1}, /* VSUBHN */
5176 {0, 0, 0}, /* VABDL */
5177 {0, 0, 0}, /* VMLAL */
5178 {0, 0, 0}, /* VQDMLAL */
5179 {0, 0, 0}, /* VMLSL */
5180 {0, 0, 0}, /* VQDMLSL */
5181 {0, 0, 0}, /* Integer VMULL */
5182 {0, 0, 0}, /* VQDMULL */
5183 {0, 0, 0} /* Polynomial VMULL */
5186 prewiden
= neon_3reg_wide
[op
][0];
5187 src1_wide
= neon_3reg_wide
[op
][1];
5188 src2_wide
= neon_3reg_wide
[op
][2];
5190 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
5193 /* Avoid overlapping operands. Wide source operands are
5194 always aligned so will never overlap with wide
5195 destinations in problematic ways. */
5196 if (rd
== rm
&& !src2_wide
) {
5197 tmp
= neon_load_reg(rm
, 1);
5198 neon_store_scratch(2, tmp
);
5199 } else if (rd
== rn
&& !src1_wide
) {
5200 tmp
= neon_load_reg(rn
, 1);
5201 neon_store_scratch(2, tmp
);
5204 for (pass
= 0; pass
< 2; pass
++) {
5206 neon_load_reg64(cpu_V0
, rn
+ pass
);
5209 if (pass
== 1 && rd
== rn
) {
5210 tmp
= neon_load_scratch(2);
5212 tmp
= neon_load_reg(rn
, pass
);
5215 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5219 neon_load_reg64(cpu_V1
, rm
+ pass
);
5222 if (pass
== 1 && rd
== rm
) {
5223 tmp2
= neon_load_scratch(2);
5225 tmp2
= neon_load_reg(rm
, pass
);
5228 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5232 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5233 gen_neon_addl(size
);
5235 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5236 gen_neon_subl(size
);
5238 case 5: case 7: /* VABAL, VABDL */
5239 switch ((size
<< 1) | u
) {
5241 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5244 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5247 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5250 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5253 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5256 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5260 tcg_temp_free_i32(tmp2
);
5261 tcg_temp_free_i32(tmp
);
5263 case 8: case 9: case 10: case 11: case 12: case 13:
5264 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5265 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5267 case 14: /* Polynomial VMULL */
5268 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5269 tcg_temp_free_i32(tmp2
);
5270 tcg_temp_free_i32(tmp
);
5272 default: /* 15 is RESERVED. */
5277 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5278 neon_store_reg64(cpu_V0
, rd
+ pass
);
5279 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5281 neon_load_reg64(cpu_V1
, rd
+ pass
);
5283 case 10: /* VMLSL */
5284 gen_neon_negl(cpu_V0
, size
);
5286 case 5: case 8: /* VABAL, VMLAL */
5287 gen_neon_addl(size
);
5289 case 9: case 11: /* VQDMLAL, VQDMLSL */
5290 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5292 gen_neon_negl(cpu_V0
, size
);
5294 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5299 neon_store_reg64(cpu_V0
, rd
+ pass
);
5300 } else if (op
== 4 || op
== 6) {
5301 /* Narrowing operation. */
5302 tmp
= tcg_temp_new_i32();
5306 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5309 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5312 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5313 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5320 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5323 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5326 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5327 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5328 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5336 neon_store_reg(rd
, 0, tmp3
);
5337 neon_store_reg(rd
, 1, tmp
);
5340 /* Write back the result. */
5341 neon_store_reg64(cpu_V0
, rd
+ pass
);
5345 /* Two registers and a scalar. */
5347 case 0: /* Integer VMLA scalar */
5348 case 1: /* Float VMLA scalar */
5349 case 4: /* Integer VMLS scalar */
5350 case 5: /* Floating point VMLS scalar */
5351 case 8: /* Integer VMUL scalar */
5352 case 9: /* Floating point VMUL scalar */
5353 case 12: /* VQDMULH scalar */
5354 case 13: /* VQRDMULH scalar */
5355 tmp
= neon_get_scalar(size
, rm
);
5356 neon_store_scratch(0, tmp
);
5357 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5358 tmp
= neon_load_scratch(0);
5359 tmp2
= neon_load_reg(rn
, pass
);
5362 gen_helper_neon_qdmulh_s16(tmp
, tmp
, tmp2
);
5364 gen_helper_neon_qdmulh_s32(tmp
, tmp
, tmp2
);
5366 } else if (op
== 13) {
5368 gen_helper_neon_qrdmulh_s16(tmp
, tmp
, tmp2
);
5370 gen_helper_neon_qrdmulh_s32(tmp
, tmp
, tmp2
);
5372 } else if (op
& 1) {
5373 gen_helper_neon_mul_f32(tmp
, tmp
, tmp2
);
5376 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5377 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5378 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5382 tcg_temp_free_i32(tmp2
);
5385 tmp2
= neon_load_reg(rd
, pass
);
5388 gen_neon_add(size
, tmp
, tmp2
);
5391 gen_helper_neon_add_f32(tmp
, tmp
, tmp2
);
5394 gen_neon_rsb(size
, tmp
, tmp2
);
5397 gen_helper_neon_sub_f32(tmp
, tmp2
, tmp
);
5402 tcg_temp_free_i32(tmp2
);
5404 neon_store_reg(rd
, pass
, tmp
);
5407 case 2: /* VMLAL sclar */
5408 case 3: /* VQDMLAL scalar */
5409 case 6: /* VMLSL scalar */
5410 case 7: /* VQDMLSL scalar */
5411 case 10: /* VMULL scalar */
5412 case 11: /* VQDMULL scalar */
5413 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5416 tmp2
= neon_get_scalar(size
, rm
);
5417 /* We need a copy of tmp2 because gen_neon_mull
5418 * deletes it during pass 0. */
5419 tmp4
= tcg_temp_new_i32();
5420 tcg_gen_mov_i32(tmp4
, tmp2
);
5421 tmp3
= neon_load_reg(rn
, 1);
5423 for (pass
= 0; pass
< 2; pass
++) {
5425 tmp
= neon_load_reg(rn
, 0);
5430 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5432 neon_load_reg64(cpu_V1
, rd
+ pass
);
5436 gen_neon_negl(cpu_V0
, size
);
5439 gen_neon_addl(size
);
5442 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5444 gen_neon_negl(cpu_V0
, size
);
5446 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5452 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5457 neon_store_reg64(cpu_V0
, rd
+ pass
);
5462 default: /* 14 and 15 are RESERVED */
5466 } else { /* size == 3 */
5469 imm
= (insn
>> 8) & 0xf;
5475 neon_load_reg64(cpu_V0
, rn
);
5477 neon_load_reg64(cpu_V1
, rn
+ 1);
5479 } else if (imm
== 8) {
5480 neon_load_reg64(cpu_V0
, rn
+ 1);
5482 neon_load_reg64(cpu_V1
, rm
);
5485 tmp64
= tcg_temp_new_i64();
5487 neon_load_reg64(cpu_V0
, rn
);
5488 neon_load_reg64(tmp64
, rn
+ 1);
5490 neon_load_reg64(cpu_V0
, rn
+ 1);
5491 neon_load_reg64(tmp64
, rm
);
5493 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5494 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5495 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5497 neon_load_reg64(cpu_V1
, rm
);
5499 neon_load_reg64(cpu_V1
, rm
+ 1);
5502 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5503 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5504 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5505 tcg_temp_free_i64(tmp64
);
5508 neon_load_reg64(cpu_V0
, rn
);
5509 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5510 neon_load_reg64(cpu_V1
, rm
);
5511 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5512 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5514 neon_store_reg64(cpu_V0
, rd
);
5516 neon_store_reg64(cpu_V1
, rd
+ 1);
5518 } else if ((insn
& (1 << 11)) == 0) {
5519 /* Two register misc. */
5520 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5521 size
= (insn
>> 18) & 3;
5523 case 0: /* VREV64 */
5526 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5527 tmp
= neon_load_reg(rm
, pass
* 2);
5528 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5530 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5531 case 1: gen_swap_half(tmp
); break;
5532 case 2: /* no-op */ break;
5535 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5537 neon_store_reg(rd
, pass
* 2, tmp2
);
5540 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5541 case 1: gen_swap_half(tmp2
); break;
5544 neon_store_reg(rd
, pass
* 2, tmp2
);
5548 case 4: case 5: /* VPADDL */
5549 case 12: case 13: /* VPADAL */
5552 for (pass
= 0; pass
< q
+ 1; pass
++) {
5553 tmp
= neon_load_reg(rm
, pass
* 2);
5554 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5555 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5556 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5558 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5559 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5560 case 2: tcg_gen_add_i64(CPU_V001
); break;
5565 neon_load_reg64(cpu_V1
, rd
+ pass
);
5566 gen_neon_addl(size
);
5568 neon_store_reg64(cpu_V0
, rd
+ pass
);
5574 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5575 tmp
= neon_load_reg(rm
, n
);
5576 tmp2
= neon_load_reg(rd
, n
+ 1);
5577 neon_store_reg(rm
, n
, tmp2
);
5578 neon_store_reg(rd
, n
+ 1, tmp
);
5585 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5590 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5594 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5598 for (pass
= 0; pass
< 2; pass
++) {
5599 neon_load_reg64(cpu_V0
, rm
+ pass
);
5600 tmp
= tcg_temp_new_i32();
5601 gen_neon_narrow_op(op
== 36, q
, size
, tmp
, cpu_V0
);
5605 neon_store_reg(rd
, 0, tmp2
);
5606 neon_store_reg(rd
, 1, tmp
);
5610 case 38: /* VSHLL */
5613 tmp
= neon_load_reg(rm
, 0);
5614 tmp2
= neon_load_reg(rm
, 1);
5615 for (pass
= 0; pass
< 2; pass
++) {
5618 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5619 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5620 neon_store_reg64(cpu_V0
, rd
+ pass
);
5623 case 44: /* VCVT.F16.F32 */
5624 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5626 tmp
= tcg_temp_new_i32();
5627 tmp2
= tcg_temp_new_i32();
5628 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5629 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5630 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5631 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5632 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5633 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5634 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5635 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5636 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5637 neon_store_reg(rd
, 0, tmp2
);
5638 tmp2
= tcg_temp_new_i32();
5639 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5640 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5641 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5642 neon_store_reg(rd
, 1, tmp2
);
5643 tcg_temp_free_i32(tmp
);
5645 case 46: /* VCVT.F32.F16 */
5646 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
5648 tmp3
= tcg_temp_new_i32();
5649 tmp
= neon_load_reg(rm
, 0);
5650 tmp2
= neon_load_reg(rm
, 1);
5651 tcg_gen_ext16u_i32(tmp3
, tmp
);
5652 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5653 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5654 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5655 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5656 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5657 tcg_temp_free_i32(tmp
);
5658 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5659 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5660 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5661 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5662 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5663 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5664 tcg_temp_free_i32(tmp2
);
5665 tcg_temp_free_i32(tmp3
);
5669 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5670 if (op
== 30 || op
== 31 || op
>= 58) {
5671 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5672 neon_reg_offset(rm
, pass
));
5675 tmp
= neon_load_reg(rm
, pass
);
5678 case 1: /* VREV32 */
5680 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5681 case 1: gen_swap_half(tmp
); break;
5685 case 2: /* VREV16 */
5692 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5693 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5694 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5700 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5701 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5702 case 2: gen_helper_clz(tmp
, tmp
); break;
5709 gen_helper_neon_cnt_u8(tmp
, tmp
);
5714 tcg_gen_not_i32(tmp
, tmp
);
5716 case 14: /* VQABS */
5718 case 0: gen_helper_neon_qabs_s8(tmp
, tmp
); break;
5719 case 1: gen_helper_neon_qabs_s16(tmp
, tmp
); break;
5720 case 2: gen_helper_neon_qabs_s32(tmp
, tmp
); break;
5724 case 15: /* VQNEG */
5726 case 0: gen_helper_neon_qneg_s8(tmp
, tmp
); break;
5727 case 1: gen_helper_neon_qneg_s16(tmp
, tmp
); break;
5728 case 2: gen_helper_neon_qneg_s32(tmp
, tmp
); break;
5732 case 16: case 19: /* VCGT #0, VCLE #0 */
5733 tmp2
= tcg_const_i32(0);
5735 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5736 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5737 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5740 tcg_temp_free(tmp2
);
5742 tcg_gen_not_i32(tmp
, tmp
);
5744 case 17: case 20: /* VCGE #0, VCLT #0 */
5745 tmp2
= tcg_const_i32(0);
5747 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5748 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5749 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5752 tcg_temp_free(tmp2
);
5754 tcg_gen_not_i32(tmp
, tmp
);
5756 case 18: /* VCEQ #0 */
5757 tmp2
= tcg_const_i32(0);
5759 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5760 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5761 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5764 tcg_temp_free(tmp2
);
5768 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
5769 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
5770 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
5777 tmp2
= tcg_const_i32(0);
5778 gen_neon_rsb(size
, tmp
, tmp2
);
5779 tcg_temp_free(tmp2
);
5781 case 24: /* Float VCGT #0 */
5782 tmp2
= tcg_const_i32(0);
5783 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
);
5784 tcg_temp_free(tmp2
);
5786 case 25: /* Float VCGE #0 */
5787 tmp2
= tcg_const_i32(0);
5788 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
);
5789 tcg_temp_free(tmp2
);
5791 case 26: /* Float VCEQ #0 */
5792 tmp2
= tcg_const_i32(0);
5793 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
);
5794 tcg_temp_free(tmp2
);
5796 case 27: /* Float VCLE #0 */
5797 tmp2
= tcg_const_i32(0);
5798 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
);
5799 tcg_temp_free(tmp2
);
5801 case 28: /* Float VCLT #0 */
5802 tmp2
= tcg_const_i32(0);
5803 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
);
5804 tcg_temp_free(tmp2
);
5806 case 30: /* Float VABS */
5809 case 31: /* Float VNEG */
5813 tmp2
= neon_load_reg(rd
, pass
);
5814 neon_store_reg(rm
, pass
, tmp2
);
5817 tmp2
= neon_load_reg(rd
, pass
);
5819 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
5820 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
5824 neon_store_reg(rm
, pass
, tmp2
);
5826 case 56: /* Integer VRECPE */
5827 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
5829 case 57: /* Integer VRSQRTE */
5830 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
5832 case 58: /* Float VRECPE */
5833 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5835 case 59: /* Float VRSQRTE */
5836 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5838 case 60: /* VCVT.F32.S32 */
5841 case 61: /* VCVT.F32.U32 */
5844 case 62: /* VCVT.S32.F32 */
5847 case 63: /* VCVT.U32.F32 */
5851 /* Reserved: 21, 29, 39-56 */
5854 if (op
== 30 || op
== 31 || op
>= 58) {
5855 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5856 neon_reg_offset(rd
, pass
));
5858 neon_store_reg(rd
, pass
, tmp
);
5863 } else if ((insn
& (1 << 10)) == 0) {
5865 int n
= ((insn
>> 5) & 0x18) + 8;
5866 if (insn
& (1 << 6)) {
5867 tmp
= neon_load_reg(rd
, 0);
5869 tmp
= tcg_temp_new_i32();
5870 tcg_gen_movi_i32(tmp
, 0);
5872 tmp2
= neon_load_reg(rm
, 0);
5873 tmp4
= tcg_const_i32(rn
);
5874 tmp5
= tcg_const_i32(n
);
5875 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
5876 tcg_temp_free_i32(tmp
);
5877 if (insn
& (1 << 6)) {
5878 tmp
= neon_load_reg(rd
, 1);
5880 tmp
= tcg_temp_new_i32();
5881 tcg_gen_movi_i32(tmp
, 0);
5883 tmp3
= neon_load_reg(rm
, 1);
5884 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
5885 tcg_temp_free_i32(tmp5
);
5886 tcg_temp_free_i32(tmp4
);
5887 neon_store_reg(rd
, 0, tmp2
);
5888 neon_store_reg(rd
, 1, tmp3
);
5889 tcg_temp_free_i32(tmp
);
5890 } else if ((insn
& 0x380) == 0) {
5892 if (insn
& (1 << 19)) {
5893 tmp
= neon_load_reg(rm
, 1);
5895 tmp
= neon_load_reg(rm
, 0);
5897 if (insn
& (1 << 16)) {
5898 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
5899 } else if (insn
& (1 << 17)) {
5900 if ((insn
>> 18) & 1)
5901 gen_neon_dup_high16(tmp
);
5903 gen_neon_dup_low16(tmp
);
5905 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5906 tmp2
= tcg_temp_new_i32();
5907 tcg_gen_mov_i32(tmp2
, tmp
);
5908 neon_store_reg(rd
, pass
, tmp2
);
5910 tcg_temp_free_i32(tmp
);
5919 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5921 int crn
= (insn
>> 16) & 0xf;
5922 int crm
= insn
& 0xf;
5923 int op1
= (insn
>> 21) & 7;
5924 int op2
= (insn
>> 5) & 7;
5925 int rt
= (insn
>> 12) & 0xf;
5928 /* Minimal set of debug registers, since we don't support debug */
5929 if (op1
== 0 && crn
== 0 && op2
== 0) {
5932 /* DBGDIDR: just RAZ. In particular this means the
5933 * "debug architecture version" bits will read as
5934 * a reserved value, which should cause Linux to
5935 * not try to use the debug hardware.
5937 tmp
= tcg_const_i32(0);
5938 store_reg(s
, rt
, tmp
);
5942 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5943 * don't implement memory mapped debug components
5945 if (ENABLE_ARCH_7
) {
5946 tmp
= tcg_const_i32(0);
5947 store_reg(s
, rt
, tmp
);
5956 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5957 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5961 tmp
= load_cpu_field(teecr
);
5962 store_reg(s
, rt
, tmp
);
5965 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5967 if (IS_USER(s
) && (env
->teecr
& 1))
5969 tmp
= load_cpu_field(teehbr
);
5970 store_reg(s
, rt
, tmp
);
5974 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5975 op1
, crn
, crm
, op2
);
5979 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5981 int crn
= (insn
>> 16) & 0xf;
5982 int crm
= insn
& 0xf;
5983 int op1
= (insn
>> 21) & 7;
5984 int op2
= (insn
>> 5) & 7;
5985 int rt
= (insn
>> 12) & 0xf;
5988 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5989 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5993 tmp
= load_reg(s
, rt
);
5994 gen_helper_set_teecr(cpu_env
, tmp
);
5995 tcg_temp_free_i32(tmp
);
5998 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6000 if (IS_USER(s
) && (env
->teecr
& 1))
6002 tmp
= load_reg(s
, rt
);
6003 store_cpu_field(tmp
, teehbr
);
6007 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6008 op1
, crn
, crm
, op2
);
6012 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6016 cpnum
= (insn
>> 8) & 0xf;
6017 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6018 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6024 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6025 return disas_iwmmxt_insn(env
, s
, insn
);
6026 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6027 return disas_dsp_insn(env
, s
, insn
);
6032 return disas_vfp_insn (env
, s
, insn
);
6034 /* Coprocessors 7-15 are architecturally reserved by ARM.
6035 Unfortunately Intel decided to ignore this. */
6036 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
6038 if (insn
& (1 << 20))
6039 return disas_cp14_read(env
, s
, insn
);
6041 return disas_cp14_write(env
, s
, insn
);
6043 return disas_cp15_insn (env
, s
, insn
);
6046 /* Unknown coprocessor. See if the board has hooked it. */
6047 return disas_cp_insn (env
, s
, insn
);
6052 /* Store a 64-bit value to a register pair. Clobbers val. */
6053 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6056 tmp
= tcg_temp_new_i32();
6057 tcg_gen_trunc_i64_i32(tmp
, val
);
6058 store_reg(s
, rlow
, tmp
);
6059 tmp
= tcg_temp_new_i32();
6060 tcg_gen_shri_i64(val
, val
, 32);
6061 tcg_gen_trunc_i64_i32(tmp
, val
);
6062 store_reg(s
, rhigh
, tmp
);
6065 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6066 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6071 /* Load value and extend to 64 bits. */
6072 tmp
= tcg_temp_new_i64();
6073 tmp2
= load_reg(s
, rlow
);
6074 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6075 tcg_temp_free_i32(tmp2
);
6076 tcg_gen_add_i64(val
, val
, tmp
);
6077 tcg_temp_free_i64(tmp
);
6080 /* load and add a 64-bit value from a register pair. */
6081 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6087 /* Load 64-bit value rd:rn. */
6088 tmpl
= load_reg(s
, rlow
);
6089 tmph
= load_reg(s
, rhigh
);
6090 tmp
= tcg_temp_new_i64();
6091 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6092 tcg_temp_free_i32(tmpl
);
6093 tcg_temp_free_i32(tmph
);
6094 tcg_gen_add_i64(val
, val
, tmp
);
6095 tcg_temp_free_i64(tmp
);
6098 /* Set N and Z flags from a 64-bit value. */
6099 static void gen_logicq_cc(TCGv_i64 val
)
6101 TCGv tmp
= tcg_temp_new_i32();
6102 gen_helper_logicq_cc(tmp
, val
);
6104 tcg_temp_free_i32(tmp
);
6107 /* Load/Store exclusive instructions are implemented by remembering
6108 the value/address loaded, and seeing if these are the same
6109 when the store is performed. This should be is sufficient to implement
6110 the architecturally mandated semantics, and avoids having to monitor
6113 In system emulation mode only one CPU will be running at once, so
6114 this sequence is effectively atomic. In user emulation mode we
6115 throw an exception and handle the atomic operation elsewhere. */
6116 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6117 TCGv addr
, int size
)
6123 tmp
= gen_ld8u(addr
, IS_USER(s
));
6126 tmp
= gen_ld16u(addr
, IS_USER(s
));
6130 tmp
= gen_ld32(addr
, IS_USER(s
));
6135 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6136 store_reg(s
, rt
, tmp
);
6138 TCGv tmp2
= tcg_temp_new_i32();
6139 tcg_gen_addi_i32(tmp2
, addr
, 4);
6140 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6141 tcg_temp_free_i32(tmp2
);
6142 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6143 store_reg(s
, rt2
, tmp
);
6145 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6148 static void gen_clrex(DisasContext
*s
)
6150 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6153 #ifdef CONFIG_USER_ONLY
6154 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6155 TCGv addr
, int size
)
6157 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6158 tcg_gen_movi_i32(cpu_exclusive_info
,
6159 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6160 gen_exception_insn(s
, 4, EXCP_STREX
);
6163 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6164 TCGv addr
, int size
)
6170 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6176 fail_label
= gen_new_label();
6177 done_label
= gen_new_label();
6178 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6181 tmp
= gen_ld8u(addr
, IS_USER(s
));
6184 tmp
= gen_ld16u(addr
, IS_USER(s
));
6188 tmp
= gen_ld32(addr
, IS_USER(s
));
6193 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6194 tcg_temp_free_i32(tmp
);
6196 TCGv tmp2
= tcg_temp_new_i32();
6197 tcg_gen_addi_i32(tmp2
, addr
, 4);
6198 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6199 tcg_temp_free_i32(tmp2
);
6200 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6201 tcg_temp_free_i32(tmp
);
6203 tmp
= load_reg(s
, rt
);
6206 gen_st8(tmp
, addr
, IS_USER(s
));
6209 gen_st16(tmp
, addr
, IS_USER(s
));
6213 gen_st32(tmp
, addr
, IS_USER(s
));
6219 tcg_gen_addi_i32(addr
, addr
, 4);
6220 tmp
= load_reg(s
, rt2
);
6221 gen_st32(tmp
, addr
, IS_USER(s
));
6223 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6224 tcg_gen_br(done_label
);
6225 gen_set_label(fail_label
);
6226 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6227 gen_set_label(done_label
);
6228 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6232 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6234 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6241 insn
= ldl_code(s
->pc
);
6244 /* M variants do not implement ARM mode. */
6249 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6250 * choose to UNDEF. In ARMv5 and above the space is used
6251 * for miscellaneous unconditional instructions.
6255 /* Unconditional instructions. */
6256 if (((insn
>> 25) & 7) == 1) {
6257 /* NEON Data processing. */
6258 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6261 if (disas_neon_data_insn(env
, s
, insn
))
6265 if ((insn
& 0x0f100000) == 0x04000000) {
6266 /* NEON load/store. */
6267 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6270 if (disas_neon_ls_insn(env
, s
, insn
))
6274 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6275 ((insn
& 0x0f30f010) == 0x0710f000)) {
6276 if ((insn
& (1 << 22)) == 0) {
6278 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6282 /* Otherwise PLD; v5TE+ */
6286 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6287 ((insn
& 0x0f70f010) == 0x0650f000)) {
6289 return; /* PLI; V7 */
6291 if (((insn
& 0x0f700000) == 0x04100000) ||
6292 ((insn
& 0x0f700010) == 0x06100000)) {
6293 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6296 return; /* v7MP: Unallocated memory hint: must NOP */
6299 if ((insn
& 0x0ffffdff) == 0x01010000) {
6302 if (insn
& (1 << 9)) {
6303 /* BE8 mode not implemented. */
6307 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6308 switch ((insn
>> 4) & 0xf) {
6317 /* We don't emulate caches so these are a no-op. */
6322 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6328 op1
= (insn
& 0x1f);
6329 addr
= tcg_temp_new_i32();
6330 tmp
= tcg_const_i32(op1
);
6331 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6332 tcg_temp_free_i32(tmp
);
6333 i
= (insn
>> 23) & 3;
6335 case 0: offset
= -4; break; /* DA */
6336 case 1: offset
= 0; break; /* IA */
6337 case 2: offset
= -8; break; /* DB */
6338 case 3: offset
= 4; break; /* IB */
6342 tcg_gen_addi_i32(addr
, addr
, offset
);
6343 tmp
= load_reg(s
, 14);
6344 gen_st32(tmp
, addr
, 0);
6345 tmp
= load_cpu_field(spsr
);
6346 tcg_gen_addi_i32(addr
, addr
, 4);
6347 gen_st32(tmp
, addr
, 0);
6348 if (insn
& (1 << 21)) {
6349 /* Base writeback. */
6351 case 0: offset
= -8; break;
6352 case 1: offset
= 4; break;
6353 case 2: offset
= -4; break;
6354 case 3: offset
= 0; break;
6358 tcg_gen_addi_i32(addr
, addr
, offset
);
6359 tmp
= tcg_const_i32(op1
);
6360 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6361 tcg_temp_free_i32(tmp
);
6362 tcg_temp_free_i32(addr
);
6364 tcg_temp_free_i32(addr
);
6367 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6373 rn
= (insn
>> 16) & 0xf;
6374 addr
= load_reg(s
, rn
);
6375 i
= (insn
>> 23) & 3;
6377 case 0: offset
= -4; break; /* DA */
6378 case 1: offset
= 0; break; /* IA */
6379 case 2: offset
= -8; break; /* DB */
6380 case 3: offset
= 4; break; /* IB */
6384 tcg_gen_addi_i32(addr
, addr
, offset
);
6385 /* Load PC into tmp and CPSR into tmp2. */
6386 tmp
= gen_ld32(addr
, 0);
6387 tcg_gen_addi_i32(addr
, addr
, 4);
6388 tmp2
= gen_ld32(addr
, 0);
6389 if (insn
& (1 << 21)) {
6390 /* Base writeback. */
6392 case 0: offset
= -8; break;
6393 case 1: offset
= 4; break;
6394 case 2: offset
= -4; break;
6395 case 3: offset
= 0; break;
6399 tcg_gen_addi_i32(addr
, addr
, offset
);
6400 store_reg(s
, rn
, addr
);
6402 tcg_temp_free_i32(addr
);
6404 gen_rfe(s
, tmp
, tmp2
);
6406 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6407 /* branch link and change to thumb (blx <offset>) */
6410 val
= (uint32_t)s
->pc
;
6411 tmp
= tcg_temp_new_i32();
6412 tcg_gen_movi_i32(tmp
, val
);
6413 store_reg(s
, 14, tmp
);
6414 /* Sign-extend the 24-bit offset */
6415 offset
= (((int32_t)insn
) << 8) >> 8;
6416 /* offset * 4 + bit24 * 2 + (thumb bit) */
6417 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6418 /* pipeline offset */
6420 /* protected by ARCH(5); above, near the start of uncond block */
6423 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6424 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6425 /* iWMMXt register transfer. */
6426 if (env
->cp15
.c15_cpar
& (1 << 1))
6427 if (!disas_iwmmxt_insn(env
, s
, insn
))
6430 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6431 /* Coprocessor double register transfer. */
6433 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6434 /* Additional coprocessor register transfer. */
6435 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6438 /* cps (privileged) */
6442 if (insn
& (1 << 19)) {
6443 if (insn
& (1 << 8))
6445 if (insn
& (1 << 7))
6447 if (insn
& (1 << 6))
6449 if (insn
& (1 << 18))
6452 if (insn
& (1 << 17)) {
6454 val
|= (insn
& 0x1f);
6457 gen_set_psr_im(s
, mask
, 0, val
);
6464 /* if not always execute, we generate a conditional jump to
6466 s
->condlabel
= gen_new_label();
6467 gen_test_cc(cond
^ 1, s
->condlabel
);
6470 if ((insn
& 0x0f900000) == 0x03000000) {
6471 if ((insn
& (1 << 21)) == 0) {
6473 rd
= (insn
>> 12) & 0xf;
6474 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6475 if ((insn
& (1 << 22)) == 0) {
6477 tmp
= tcg_temp_new_i32();
6478 tcg_gen_movi_i32(tmp
, val
);
6481 tmp
= load_reg(s
, rd
);
6482 tcg_gen_ext16u_i32(tmp
, tmp
);
6483 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6485 store_reg(s
, rd
, tmp
);
6487 if (((insn
>> 12) & 0xf) != 0xf)
6489 if (((insn
>> 16) & 0xf) == 0) {
6490 gen_nop_hint(s
, insn
& 0xff);
6492 /* CPSR = immediate */
6494 shift
= ((insn
>> 8) & 0xf) * 2;
6496 val
= (val
>> shift
) | (val
<< (32 - shift
));
6497 i
= ((insn
& (1 << 22)) != 0);
6498 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6502 } else if ((insn
& 0x0f900000) == 0x01000000
6503 && (insn
& 0x00000090) != 0x00000090) {
6504 /* miscellaneous instructions */
6505 op1
= (insn
>> 21) & 3;
6506 sh
= (insn
>> 4) & 0xf;
6509 case 0x0: /* move program status register */
6512 tmp
= load_reg(s
, rm
);
6513 i
= ((op1
& 2) != 0);
6514 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6518 rd
= (insn
>> 12) & 0xf;
6522 tmp
= load_cpu_field(spsr
);
6524 tmp
= tcg_temp_new_i32();
6525 gen_helper_cpsr_read(tmp
);
6527 store_reg(s
, rd
, tmp
);
6532 /* branch/exchange thumb (bx). */
6534 tmp
= load_reg(s
, rm
);
6536 } else if (op1
== 3) {
6539 rd
= (insn
>> 12) & 0xf;
6540 tmp
= load_reg(s
, rm
);
6541 gen_helper_clz(tmp
, tmp
);
6542 store_reg(s
, rd
, tmp
);
6550 /* Trivial implementation equivalent to bx. */
6551 tmp
= load_reg(s
, rm
);
6562 /* branch link/exchange thumb (blx) */
6563 tmp
= load_reg(s
, rm
);
6564 tmp2
= tcg_temp_new_i32();
6565 tcg_gen_movi_i32(tmp2
, s
->pc
);
6566 store_reg(s
, 14, tmp2
);
6569 case 0x5: /* saturating add/subtract */
6571 rd
= (insn
>> 12) & 0xf;
6572 rn
= (insn
>> 16) & 0xf;
6573 tmp
= load_reg(s
, rm
);
6574 tmp2
= load_reg(s
, rn
);
6576 gen_helper_double_saturate(tmp2
, tmp2
);
6578 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6580 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6581 tcg_temp_free_i32(tmp2
);
6582 store_reg(s
, rd
, tmp
);
6585 /* SMC instruction (op1 == 3)
6586 and undefined instructions (op1 == 0 || op1 == 2)
6593 gen_exception_insn(s
, 4, EXCP_BKPT
);
6595 case 0x8: /* signed multiply */
6600 rs
= (insn
>> 8) & 0xf;
6601 rn
= (insn
>> 12) & 0xf;
6602 rd
= (insn
>> 16) & 0xf;
6604 /* (32 * 16) >> 16 */
6605 tmp
= load_reg(s
, rm
);
6606 tmp2
= load_reg(s
, rs
);
6608 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6611 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6612 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6613 tmp
= tcg_temp_new_i32();
6614 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6615 tcg_temp_free_i64(tmp64
);
6616 if ((sh
& 2) == 0) {
6617 tmp2
= load_reg(s
, rn
);
6618 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6619 tcg_temp_free_i32(tmp2
);
6621 store_reg(s
, rd
, tmp
);
6624 tmp
= load_reg(s
, rm
);
6625 tmp2
= load_reg(s
, rs
);
6626 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6627 tcg_temp_free_i32(tmp2
);
6629 tmp64
= tcg_temp_new_i64();
6630 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6631 tcg_temp_free_i32(tmp
);
6632 gen_addq(s
, tmp64
, rn
, rd
);
6633 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6634 tcg_temp_free_i64(tmp64
);
6637 tmp2
= load_reg(s
, rn
);
6638 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6639 tcg_temp_free_i32(tmp2
);
6641 store_reg(s
, rd
, tmp
);
6648 } else if (((insn
& 0x0e000000) == 0 &&
6649 (insn
& 0x00000090) != 0x90) ||
6650 ((insn
& 0x0e000000) == (1 << 25))) {
6651 int set_cc
, logic_cc
, shiftop
;
6653 op1
= (insn
>> 21) & 0xf;
6654 set_cc
= (insn
>> 20) & 1;
6655 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6657 /* data processing instruction */
6658 if (insn
& (1 << 25)) {
6659 /* immediate operand */
6661 shift
= ((insn
>> 8) & 0xf) * 2;
6663 val
= (val
>> shift
) | (val
<< (32 - shift
));
6665 tmp2
= tcg_temp_new_i32();
6666 tcg_gen_movi_i32(tmp2
, val
);
6667 if (logic_cc
&& shift
) {
6668 gen_set_CF_bit31(tmp2
);
6673 tmp2
= load_reg(s
, rm
);
6674 shiftop
= (insn
>> 5) & 3;
6675 if (!(insn
& (1 << 4))) {
6676 shift
= (insn
>> 7) & 0x1f;
6677 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6679 rs
= (insn
>> 8) & 0xf;
6680 tmp
= load_reg(s
, rs
);
6681 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6684 if (op1
!= 0x0f && op1
!= 0x0d) {
6685 rn
= (insn
>> 16) & 0xf;
6686 tmp
= load_reg(s
, rn
);
6690 rd
= (insn
>> 12) & 0xf;
6693 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6697 store_reg_bx(env
, s
, rd
, tmp
);
6700 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6704 store_reg_bx(env
, s
, rd
, tmp
);
6707 if (set_cc
&& rd
== 15) {
6708 /* SUBS r15, ... is used for exception return. */
6712 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6713 gen_exception_return(s
, tmp
);
6716 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6718 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6720 store_reg_bx(env
, s
, rd
, tmp
);
6725 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6727 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6729 store_reg_bx(env
, s
, rd
, tmp
);
6733 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6735 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6737 store_reg_bx(env
, s
, rd
, tmp
);
6741 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6743 gen_add_carry(tmp
, tmp
, tmp2
);
6745 store_reg_bx(env
, s
, rd
, tmp
);
6749 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6751 gen_sub_carry(tmp
, tmp
, tmp2
);
6753 store_reg_bx(env
, s
, rd
, tmp
);
6757 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6759 gen_sub_carry(tmp
, tmp2
, tmp
);
6761 store_reg_bx(env
, s
, rd
, tmp
);
6765 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6768 tcg_temp_free_i32(tmp
);
6772 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6775 tcg_temp_free_i32(tmp
);
6779 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6781 tcg_temp_free_i32(tmp
);
6785 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6787 tcg_temp_free_i32(tmp
);
6790 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6794 store_reg_bx(env
, s
, rd
, tmp
);
6797 if (logic_cc
&& rd
== 15) {
6798 /* MOVS r15, ... is used for exception return. */
6802 gen_exception_return(s
, tmp2
);
6807 store_reg_bx(env
, s
, rd
, tmp2
);
6811 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
6815 store_reg_bx(env
, s
, rd
, tmp
);
6819 tcg_gen_not_i32(tmp2
, tmp2
);
6823 store_reg_bx(env
, s
, rd
, tmp2
);
6826 if (op1
!= 0x0f && op1
!= 0x0d) {
6827 tcg_temp_free_i32(tmp2
);
6830 /* other instructions */
6831 op1
= (insn
>> 24) & 0xf;
6835 /* multiplies, extra load/stores */
6836 sh
= (insn
>> 5) & 3;
6839 rd
= (insn
>> 16) & 0xf;
6840 rn
= (insn
>> 12) & 0xf;
6841 rs
= (insn
>> 8) & 0xf;
6843 op1
= (insn
>> 20) & 0xf;
6845 case 0: case 1: case 2: case 3: case 6:
6847 tmp
= load_reg(s
, rs
);
6848 tmp2
= load_reg(s
, rm
);
6849 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6850 tcg_temp_free_i32(tmp2
);
6851 if (insn
& (1 << 22)) {
6852 /* Subtract (mls) */
6854 tmp2
= load_reg(s
, rn
);
6855 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6856 tcg_temp_free_i32(tmp2
);
6857 } else if (insn
& (1 << 21)) {
6859 tmp2
= load_reg(s
, rn
);
6860 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6861 tcg_temp_free_i32(tmp2
);
6863 if (insn
& (1 << 20))
6865 store_reg(s
, rd
, tmp
);
6868 /* 64 bit mul double accumulate (UMAAL) */
6870 tmp
= load_reg(s
, rs
);
6871 tmp2
= load_reg(s
, rm
);
6872 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6873 gen_addq_lo(s
, tmp64
, rn
);
6874 gen_addq_lo(s
, tmp64
, rd
);
6875 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6876 tcg_temp_free_i64(tmp64
);
6878 case 8: case 9: case 10: case 11:
6879 case 12: case 13: case 14: case 15:
6880 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6881 tmp
= load_reg(s
, rs
);
6882 tmp2
= load_reg(s
, rm
);
6883 if (insn
& (1 << 22)) {
6884 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6886 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6888 if (insn
& (1 << 21)) { /* mult accumulate */
6889 gen_addq(s
, tmp64
, rn
, rd
);
6891 if (insn
& (1 << 20)) {
6892 gen_logicq_cc(tmp64
);
6894 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6895 tcg_temp_free_i64(tmp64
);
6901 rn
= (insn
>> 16) & 0xf;
6902 rd
= (insn
>> 12) & 0xf;
6903 if (insn
& (1 << 23)) {
6904 /* load/store exclusive */
6905 op1
= (insn
>> 21) & 0x3;
6910 addr
= tcg_temp_local_new_i32();
6911 load_reg_var(s
, addr
, rn
);
6912 if (insn
& (1 << 20)) {
6915 gen_load_exclusive(s
, rd
, 15, addr
, 2);
6917 case 1: /* ldrexd */
6918 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
6920 case 2: /* ldrexb */
6921 gen_load_exclusive(s
, rd
, 15, addr
, 0);
6923 case 3: /* ldrexh */
6924 gen_load_exclusive(s
, rd
, 15, addr
, 1);
6933 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
6935 case 1: /* strexd */
6936 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
6938 case 2: /* strexb */
6939 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
6941 case 3: /* strexh */
6942 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
6948 tcg_temp_free(addr
);
6950 /* SWP instruction */
6953 /* ??? This is not really atomic. However we know
6954 we never have multiple CPUs running in parallel,
6955 so it is good enough. */
6956 addr
= load_reg(s
, rn
);
6957 tmp
= load_reg(s
, rm
);
6958 if (insn
& (1 << 22)) {
6959 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6960 gen_st8(tmp
, addr
, IS_USER(s
));
6962 tmp2
= gen_ld32(addr
, IS_USER(s
));
6963 gen_st32(tmp
, addr
, IS_USER(s
));
6965 tcg_temp_free_i32(addr
);
6966 store_reg(s
, rd
, tmp2
);
6972 /* Misc load/store */
6973 rn
= (insn
>> 16) & 0xf;
6974 rd
= (insn
>> 12) & 0xf;
6975 addr
= load_reg(s
, rn
);
6976 if (insn
& (1 << 24))
6977 gen_add_datah_offset(s
, insn
, 0, addr
);
6979 if (insn
& (1 << 20)) {
6983 tmp
= gen_ld16u(addr
, IS_USER(s
));
6986 tmp
= gen_ld8s(addr
, IS_USER(s
));
6990 tmp
= gen_ld16s(addr
, IS_USER(s
));
6994 } else if (sh
& 2) {
6999 tmp
= load_reg(s
, rd
);
7000 gen_st32(tmp
, addr
, IS_USER(s
));
7001 tcg_gen_addi_i32(addr
, addr
, 4);
7002 tmp
= load_reg(s
, rd
+ 1);
7003 gen_st32(tmp
, addr
, IS_USER(s
));
7007 tmp
= gen_ld32(addr
, IS_USER(s
));
7008 store_reg(s
, rd
, tmp
);
7009 tcg_gen_addi_i32(addr
, addr
, 4);
7010 tmp
= gen_ld32(addr
, IS_USER(s
));
7014 address_offset
= -4;
7017 tmp
= load_reg(s
, rd
);
7018 gen_st16(tmp
, addr
, IS_USER(s
));
7021 /* Perform base writeback before the loaded value to
7022 ensure correct behavior with overlapping index registers.
7023 ldrd with base writeback is is undefined if the
7024 destination and index registers overlap. */
7025 if (!(insn
& (1 << 24))) {
7026 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7027 store_reg(s
, rn
, addr
);
7028 } else if (insn
& (1 << 21)) {
7030 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7031 store_reg(s
, rn
, addr
);
7033 tcg_temp_free_i32(addr
);
7036 /* Complete the load. */
7037 store_reg(s
, rd
, tmp
);
7046 if (insn
& (1 << 4)) {
7048 /* Armv6 Media instructions. */
7050 rn
= (insn
>> 16) & 0xf;
7051 rd
= (insn
>> 12) & 0xf;
7052 rs
= (insn
>> 8) & 0xf;
7053 switch ((insn
>> 23) & 3) {
7054 case 0: /* Parallel add/subtract. */
7055 op1
= (insn
>> 20) & 7;
7056 tmp
= load_reg(s
, rn
);
7057 tmp2
= load_reg(s
, rm
);
7058 sh
= (insn
>> 5) & 7;
7059 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7061 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7062 tcg_temp_free_i32(tmp2
);
7063 store_reg(s
, rd
, tmp
);
7066 if ((insn
& 0x00700020) == 0) {
7067 /* Halfword pack. */
7068 tmp
= load_reg(s
, rn
);
7069 tmp2
= load_reg(s
, rm
);
7070 shift
= (insn
>> 7) & 0x1f;
7071 if (insn
& (1 << 6)) {
7075 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7076 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7077 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7081 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7082 tcg_gen_ext16u_i32(tmp
, tmp
);
7083 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7085 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7086 tcg_temp_free_i32(tmp2
);
7087 store_reg(s
, rd
, tmp
);
7088 } else if ((insn
& 0x00200020) == 0x00200000) {
7090 tmp
= load_reg(s
, rm
);
7091 shift
= (insn
>> 7) & 0x1f;
7092 if (insn
& (1 << 6)) {
7095 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7097 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7099 sh
= (insn
>> 16) & 0x1f;
7100 tmp2
= tcg_const_i32(sh
);
7101 if (insn
& (1 << 22))
7102 gen_helper_usat(tmp
, tmp
, tmp2
);
7104 gen_helper_ssat(tmp
, tmp
, tmp2
);
7105 tcg_temp_free_i32(tmp2
);
7106 store_reg(s
, rd
, tmp
);
7107 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7109 tmp
= load_reg(s
, rm
);
7110 sh
= (insn
>> 16) & 0x1f;
7111 tmp2
= tcg_const_i32(sh
);
7112 if (insn
& (1 << 22))
7113 gen_helper_usat16(tmp
, tmp
, tmp2
);
7115 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7116 tcg_temp_free_i32(tmp2
);
7117 store_reg(s
, rd
, tmp
);
7118 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7120 tmp
= load_reg(s
, rn
);
7121 tmp2
= load_reg(s
, rm
);
7122 tmp3
= tcg_temp_new_i32();
7123 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7124 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7125 tcg_temp_free_i32(tmp3
);
7126 tcg_temp_free_i32(tmp2
);
7127 store_reg(s
, rd
, tmp
);
7128 } else if ((insn
& 0x000003e0) == 0x00000060) {
7129 tmp
= load_reg(s
, rm
);
7130 shift
= (insn
>> 10) & 3;
7131 /* ??? In many cases it's not neccessary to do a
7132 rotate, a shift is sufficient. */
7134 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7135 op1
= (insn
>> 20) & 7;
7137 case 0: gen_sxtb16(tmp
); break;
7138 case 2: gen_sxtb(tmp
); break;
7139 case 3: gen_sxth(tmp
); break;
7140 case 4: gen_uxtb16(tmp
); break;
7141 case 6: gen_uxtb(tmp
); break;
7142 case 7: gen_uxth(tmp
); break;
7143 default: goto illegal_op
;
7146 tmp2
= load_reg(s
, rn
);
7147 if ((op1
& 3) == 0) {
7148 gen_add16(tmp
, tmp2
);
7150 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7151 tcg_temp_free_i32(tmp2
);
7154 store_reg(s
, rd
, tmp
);
7155 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7157 tmp
= load_reg(s
, rm
);
7158 if (insn
& (1 << 22)) {
7159 if (insn
& (1 << 7)) {
7163 gen_helper_rbit(tmp
, tmp
);
7166 if (insn
& (1 << 7))
7169 tcg_gen_bswap32_i32(tmp
, tmp
);
7171 store_reg(s
, rd
, tmp
);
7176 case 2: /* Multiplies (Type 3). */
7177 tmp
= load_reg(s
, rm
);
7178 tmp2
= load_reg(s
, rs
);
7179 if (insn
& (1 << 20)) {
7180 /* Signed multiply most significant [accumulate].
7181 (SMMUL, SMMLA, SMMLS) */
7182 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7185 tmp
= load_reg(s
, rd
);
7186 if (insn
& (1 << 6)) {
7187 tmp64
= gen_subq_msw(tmp64
, tmp
);
7189 tmp64
= gen_addq_msw(tmp64
, tmp
);
7192 if (insn
& (1 << 5)) {
7193 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7195 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7196 tmp
= tcg_temp_new_i32();
7197 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7198 tcg_temp_free_i64(tmp64
);
7199 store_reg(s
, rn
, tmp
);
7201 if (insn
& (1 << 5))
7202 gen_swap_half(tmp2
);
7203 gen_smul_dual(tmp
, tmp2
);
7204 if (insn
& (1 << 6)) {
7205 /* This subtraction cannot overflow. */
7206 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7208 /* This addition cannot overflow 32 bits;
7209 * however it may overflow considered as a signed
7210 * operation, in which case we must set the Q flag.
7212 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7214 tcg_temp_free_i32(tmp2
);
7215 if (insn
& (1 << 22)) {
7216 /* smlald, smlsld */
7217 tmp64
= tcg_temp_new_i64();
7218 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7219 tcg_temp_free_i32(tmp
);
7220 gen_addq(s
, tmp64
, rd
, rn
);
7221 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7222 tcg_temp_free_i64(tmp64
);
7224 /* smuad, smusd, smlad, smlsd */
7227 tmp2
= load_reg(s
, rd
);
7228 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7229 tcg_temp_free_i32(tmp2
);
7231 store_reg(s
, rn
, tmp
);
7236 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7238 case 0: /* Unsigned sum of absolute differences. */
7240 tmp
= load_reg(s
, rm
);
7241 tmp2
= load_reg(s
, rs
);
7242 gen_helper_usad8(tmp
, tmp
, tmp2
);
7243 tcg_temp_free_i32(tmp2
);
7245 tmp2
= load_reg(s
, rd
);
7246 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7247 tcg_temp_free_i32(tmp2
);
7249 store_reg(s
, rn
, tmp
);
7251 case 0x20: case 0x24: case 0x28: case 0x2c:
7252 /* Bitfield insert/clear. */
7254 shift
= (insn
>> 7) & 0x1f;
7255 i
= (insn
>> 16) & 0x1f;
7258 tmp
= tcg_temp_new_i32();
7259 tcg_gen_movi_i32(tmp
, 0);
7261 tmp
= load_reg(s
, rm
);
7264 tmp2
= load_reg(s
, rd
);
7265 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7266 tcg_temp_free_i32(tmp2
);
7268 store_reg(s
, rd
, tmp
);
7270 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7271 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7273 tmp
= load_reg(s
, rm
);
7274 shift
= (insn
>> 7) & 0x1f;
7275 i
= ((insn
>> 16) & 0x1f) + 1;
7280 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7282 gen_sbfx(tmp
, shift
, i
);
7285 store_reg(s
, rd
, tmp
);
7295 /* Check for undefined extension instructions
7296 * per the ARM Bible IE:
7297 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7299 sh
= (0xf << 20) | (0xf << 4);
7300 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7304 /* load/store byte/word */
7305 rn
= (insn
>> 16) & 0xf;
7306 rd
= (insn
>> 12) & 0xf;
7307 tmp2
= load_reg(s
, rn
);
7308 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7309 if (insn
& (1 << 24))
7310 gen_add_data_offset(s
, insn
, tmp2
);
7311 if (insn
& (1 << 20)) {
7313 if (insn
& (1 << 22)) {
7314 tmp
= gen_ld8u(tmp2
, i
);
7316 tmp
= gen_ld32(tmp2
, i
);
7320 tmp
= load_reg(s
, rd
);
7321 if (insn
& (1 << 22))
7322 gen_st8(tmp
, tmp2
, i
);
7324 gen_st32(tmp
, tmp2
, i
);
7326 if (!(insn
& (1 << 24))) {
7327 gen_add_data_offset(s
, insn
, tmp2
);
7328 store_reg(s
, rn
, tmp2
);
7329 } else if (insn
& (1 << 21)) {
7330 store_reg(s
, rn
, tmp2
);
7332 tcg_temp_free_i32(tmp2
);
7334 if (insn
& (1 << 20)) {
7335 /* Complete the load. */
7336 store_reg_from_load(env
, s
, rd
, tmp
);
7342 int j
, n
, user
, loaded_base
;
7344 /* load/store multiple words */
7345 /* XXX: store correct base if write back */
7347 if (insn
& (1 << 22)) {
7349 goto illegal_op
; /* only usable in supervisor mode */
7351 if ((insn
& (1 << 15)) == 0)
7354 rn
= (insn
>> 16) & 0xf;
7355 addr
= load_reg(s
, rn
);
7357 /* compute total size */
7359 TCGV_UNUSED(loaded_var
);
7362 if (insn
& (1 << i
))
7365 /* XXX: test invalid n == 0 case ? */
7366 if (insn
& (1 << 23)) {
7367 if (insn
& (1 << 24)) {
7369 tcg_gen_addi_i32(addr
, addr
, 4);
7371 /* post increment */
7374 if (insn
& (1 << 24)) {
7376 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7378 /* post decrement */
7380 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7385 if (insn
& (1 << i
)) {
7386 if (insn
& (1 << 20)) {
7388 tmp
= gen_ld32(addr
, IS_USER(s
));
7390 tmp2
= tcg_const_i32(i
);
7391 gen_helper_set_user_reg(tmp2
, tmp
);
7392 tcg_temp_free_i32(tmp2
);
7393 tcg_temp_free_i32(tmp
);
7394 } else if (i
== rn
) {
7398 store_reg_from_load(env
, s
, i
, tmp
);
7403 /* special case: r15 = PC + 8 */
7404 val
= (long)s
->pc
+ 4;
7405 tmp
= tcg_temp_new_i32();
7406 tcg_gen_movi_i32(tmp
, val
);
7408 tmp
= tcg_temp_new_i32();
7409 tmp2
= tcg_const_i32(i
);
7410 gen_helper_get_user_reg(tmp
, tmp2
);
7411 tcg_temp_free_i32(tmp2
);
7413 tmp
= load_reg(s
, i
);
7415 gen_st32(tmp
, addr
, IS_USER(s
));
7418 /* no need to add after the last transfer */
7420 tcg_gen_addi_i32(addr
, addr
, 4);
7423 if (insn
& (1 << 21)) {
7425 if (insn
& (1 << 23)) {
7426 if (insn
& (1 << 24)) {
7429 /* post increment */
7430 tcg_gen_addi_i32(addr
, addr
, 4);
7433 if (insn
& (1 << 24)) {
7436 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7438 /* post decrement */
7439 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7442 store_reg(s
, rn
, addr
);
7444 tcg_temp_free_i32(addr
);
7447 store_reg(s
, rn
, loaded_var
);
7449 if ((insn
& (1 << 22)) && !user
) {
7450 /* Restore CPSR from SPSR. */
7451 tmp
= load_cpu_field(spsr
);
7452 gen_set_cpsr(tmp
, 0xffffffff);
7453 tcg_temp_free_i32(tmp
);
7454 s
->is_jmp
= DISAS_UPDATE
;
7463 /* branch (and link) */
7464 val
= (int32_t)s
->pc
;
7465 if (insn
& (1 << 24)) {
7466 tmp
= tcg_temp_new_i32();
7467 tcg_gen_movi_i32(tmp
, val
);
7468 store_reg(s
, 14, tmp
);
7470 offset
= (((int32_t)insn
<< 8) >> 8);
7471 val
+= (offset
<< 2) + 4;
7479 if (disas_coproc_insn(env
, s
, insn
))
7484 gen_set_pc_im(s
->pc
);
7485 s
->is_jmp
= DISAS_SWI
;
7489 gen_exception_insn(s
, 4, EXCP_UDEF
);
7495 /* Return true if this is a Thumb-2 logical op. */
7497 thumb2_logic_op(int op
)
7502 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7503 then set condition code flags based on the result of the operation.
7504 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7505 to the high bit of T1.
7506 Returns zero if the opcode is valid. */
7509 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7516 tcg_gen_and_i32(t0
, t0
, t1
);
7520 tcg_gen_andc_i32(t0
, t0
, t1
);
7524 tcg_gen_or_i32(t0
, t0
, t1
);
7528 tcg_gen_orc_i32(t0
, t0
, t1
);
7532 tcg_gen_xor_i32(t0
, t0
, t1
);
7537 gen_helper_add_cc(t0
, t0
, t1
);
7539 tcg_gen_add_i32(t0
, t0
, t1
);
7543 gen_helper_adc_cc(t0
, t0
, t1
);
7549 gen_helper_sbc_cc(t0
, t0
, t1
);
7551 gen_sub_carry(t0
, t0
, t1
);
7555 gen_helper_sub_cc(t0
, t0
, t1
);
7557 tcg_gen_sub_i32(t0
, t0
, t1
);
7561 gen_helper_sub_cc(t0
, t1
, t0
);
7563 tcg_gen_sub_i32(t0
, t1
, t0
);
7565 default: /* 5, 6, 7, 9, 12, 15. */
7571 gen_set_CF_bit31(t1
);
7576 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7578 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7580 uint32_t insn
, imm
, shift
, offset
;
7581 uint32_t rd
, rn
, rm
, rs
;
7592 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7593 || arm_feature (env
, ARM_FEATURE_M
))) {
7594 /* Thumb-1 cores may need to treat bl and blx as a pair of
7595 16-bit instructions to get correct prefetch abort behavior. */
7597 if ((insn
& (1 << 12)) == 0) {
7599 /* Second half of blx. */
7600 offset
= ((insn
& 0x7ff) << 1);
7601 tmp
= load_reg(s
, 14);
7602 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7603 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7605 tmp2
= tcg_temp_new_i32();
7606 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7607 store_reg(s
, 14, tmp2
);
7611 if (insn
& (1 << 11)) {
7612 /* Second half of bl. */
7613 offset
= ((insn
& 0x7ff) << 1) | 1;
7614 tmp
= load_reg(s
, 14);
7615 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7617 tmp2
= tcg_temp_new_i32();
7618 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7619 store_reg(s
, 14, tmp2
);
7623 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7624 /* Instruction spans a page boundary. Implement it as two
7625 16-bit instructions in case the second half causes an
7627 offset
= ((int32_t)insn
<< 21) >> 9;
7628 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7631 /* Fall through to 32-bit decode. */
7634 insn
= lduw_code(s
->pc
);
7636 insn
|= (uint32_t)insn_hw1
<< 16;
7638 if ((insn
& 0xf800e800) != 0xf000e800) {
7642 rn
= (insn
>> 16) & 0xf;
7643 rs
= (insn
>> 12) & 0xf;
7644 rd
= (insn
>> 8) & 0xf;
7646 switch ((insn
>> 25) & 0xf) {
7647 case 0: case 1: case 2: case 3:
7648 /* 16-bit instructions. Should never happen. */
7651 if (insn
& (1 << 22)) {
7652 /* Other load/store, table branch. */
7653 if (insn
& 0x01200000) {
7654 /* Load/store doubleword. */
7656 addr
= tcg_temp_new_i32();
7657 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7659 addr
= load_reg(s
, rn
);
7661 offset
= (insn
& 0xff) * 4;
7662 if ((insn
& (1 << 23)) == 0)
7664 if (insn
& (1 << 24)) {
7665 tcg_gen_addi_i32(addr
, addr
, offset
);
7668 if (insn
& (1 << 20)) {
7670 tmp
= gen_ld32(addr
, IS_USER(s
));
7671 store_reg(s
, rs
, tmp
);
7672 tcg_gen_addi_i32(addr
, addr
, 4);
7673 tmp
= gen_ld32(addr
, IS_USER(s
));
7674 store_reg(s
, rd
, tmp
);
7677 tmp
= load_reg(s
, rs
);
7678 gen_st32(tmp
, addr
, IS_USER(s
));
7679 tcg_gen_addi_i32(addr
, addr
, 4);
7680 tmp
= load_reg(s
, rd
);
7681 gen_st32(tmp
, addr
, IS_USER(s
));
7683 if (insn
& (1 << 21)) {
7684 /* Base writeback. */
7687 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7688 store_reg(s
, rn
, addr
);
7690 tcg_temp_free_i32(addr
);
7692 } else if ((insn
& (1 << 23)) == 0) {
7693 /* Load/store exclusive word. */
7694 addr
= tcg_temp_local_new();
7695 load_reg_var(s
, addr
, rn
);
7696 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
7697 if (insn
& (1 << 20)) {
7698 gen_load_exclusive(s
, rs
, 15, addr
, 2);
7700 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
7702 tcg_temp_free(addr
);
7703 } else if ((insn
& (1 << 6)) == 0) {
7706 addr
= tcg_temp_new_i32();
7707 tcg_gen_movi_i32(addr
, s
->pc
);
7709 addr
= load_reg(s
, rn
);
7711 tmp
= load_reg(s
, rm
);
7712 tcg_gen_add_i32(addr
, addr
, tmp
);
7713 if (insn
& (1 << 4)) {
7715 tcg_gen_add_i32(addr
, addr
, tmp
);
7716 tcg_temp_free_i32(tmp
);
7717 tmp
= gen_ld16u(addr
, IS_USER(s
));
7719 tcg_temp_free_i32(tmp
);
7720 tmp
= gen_ld8u(addr
, IS_USER(s
));
7722 tcg_temp_free_i32(addr
);
7723 tcg_gen_shli_i32(tmp
, tmp
, 1);
7724 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7725 store_reg(s
, 15, tmp
);
7727 /* Load/store exclusive byte/halfword/doubleword. */
7729 op
= (insn
>> 4) & 0x3;
7733 addr
= tcg_temp_local_new();
7734 load_reg_var(s
, addr
, rn
);
7735 if (insn
& (1 << 20)) {
7736 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
7738 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
7740 tcg_temp_free(addr
);
7743 /* Load/store multiple, RFE, SRS. */
7744 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7745 /* Not available in user mode. */
7748 if (insn
& (1 << 20)) {
7750 addr
= load_reg(s
, rn
);
7751 if ((insn
& (1 << 24)) == 0)
7752 tcg_gen_addi_i32(addr
, addr
, -8);
7753 /* Load PC into tmp and CPSR into tmp2. */
7754 tmp
= gen_ld32(addr
, 0);
7755 tcg_gen_addi_i32(addr
, addr
, 4);
7756 tmp2
= gen_ld32(addr
, 0);
7757 if (insn
& (1 << 21)) {
7758 /* Base writeback. */
7759 if (insn
& (1 << 24)) {
7760 tcg_gen_addi_i32(addr
, addr
, 4);
7762 tcg_gen_addi_i32(addr
, addr
, -4);
7764 store_reg(s
, rn
, addr
);
7766 tcg_temp_free_i32(addr
);
7768 gen_rfe(s
, tmp
, tmp2
);
7772 addr
= tcg_temp_new_i32();
7773 tmp
= tcg_const_i32(op
);
7774 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7775 tcg_temp_free_i32(tmp
);
7776 if ((insn
& (1 << 24)) == 0) {
7777 tcg_gen_addi_i32(addr
, addr
, -8);
7779 tmp
= load_reg(s
, 14);
7780 gen_st32(tmp
, addr
, 0);
7781 tcg_gen_addi_i32(addr
, addr
, 4);
7782 tmp
= tcg_temp_new_i32();
7783 gen_helper_cpsr_read(tmp
);
7784 gen_st32(tmp
, addr
, 0);
7785 if (insn
& (1 << 21)) {
7786 if ((insn
& (1 << 24)) == 0) {
7787 tcg_gen_addi_i32(addr
, addr
, -4);
7789 tcg_gen_addi_i32(addr
, addr
, 4);
7791 tmp
= tcg_const_i32(op
);
7792 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7793 tcg_temp_free_i32(tmp
);
7795 tcg_temp_free_i32(addr
);
7800 /* Load/store multiple. */
7801 addr
= load_reg(s
, rn
);
7803 for (i
= 0; i
< 16; i
++) {
7804 if (insn
& (1 << i
))
7807 if (insn
& (1 << 24)) {
7808 tcg_gen_addi_i32(addr
, addr
, -offset
);
7811 for (i
= 0; i
< 16; i
++) {
7812 if ((insn
& (1 << i
)) == 0)
7814 if (insn
& (1 << 20)) {
7816 tmp
= gen_ld32(addr
, IS_USER(s
));
7820 store_reg(s
, i
, tmp
);
7824 tmp
= load_reg(s
, i
);
7825 gen_st32(tmp
, addr
, IS_USER(s
));
7827 tcg_gen_addi_i32(addr
, addr
, 4);
7829 if (insn
& (1 << 21)) {
7830 /* Base register writeback. */
7831 if (insn
& (1 << 24)) {
7832 tcg_gen_addi_i32(addr
, addr
, -offset
);
7834 /* Fault if writeback register is in register list. */
7835 if (insn
& (1 << rn
))
7837 store_reg(s
, rn
, addr
);
7839 tcg_temp_free_i32(addr
);
7846 op
= (insn
>> 21) & 0xf;
7848 /* Halfword pack. */
7849 tmp
= load_reg(s
, rn
);
7850 tmp2
= load_reg(s
, rm
);
7851 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
7852 if (insn
& (1 << 5)) {
7856 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7857 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7858 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7862 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7863 tcg_gen_ext16u_i32(tmp
, tmp
);
7864 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7866 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7867 tcg_temp_free_i32(tmp2
);
7868 store_reg(s
, rd
, tmp
);
7870 /* Data processing register constant shift. */
7872 tmp
= tcg_temp_new_i32();
7873 tcg_gen_movi_i32(tmp
, 0);
7875 tmp
= load_reg(s
, rn
);
7877 tmp2
= load_reg(s
, rm
);
7879 shiftop
= (insn
>> 4) & 3;
7880 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7881 conds
= (insn
& (1 << 20)) != 0;
7882 logic_cc
= (conds
&& thumb2_logic_op(op
));
7883 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7884 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
7886 tcg_temp_free_i32(tmp2
);
7888 store_reg(s
, rd
, tmp
);
7890 tcg_temp_free_i32(tmp
);
7894 case 13: /* Misc data processing. */
7895 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7896 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7899 case 0: /* Register controlled shift. */
7900 tmp
= load_reg(s
, rn
);
7901 tmp2
= load_reg(s
, rm
);
7902 if ((insn
& 0x70) != 0)
7904 op
= (insn
>> 21) & 3;
7905 logic_cc
= (insn
& (1 << 20)) != 0;
7906 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7909 store_reg_bx(env
, s
, rd
, tmp
);
7911 case 1: /* Sign/zero extend. */
7912 tmp
= load_reg(s
, rm
);
7913 shift
= (insn
>> 4) & 3;
7914 /* ??? In many cases it's not neccessary to do a
7915 rotate, a shift is sufficient. */
7917 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7918 op
= (insn
>> 20) & 7;
7920 case 0: gen_sxth(tmp
); break;
7921 case 1: gen_uxth(tmp
); break;
7922 case 2: gen_sxtb16(tmp
); break;
7923 case 3: gen_uxtb16(tmp
); break;
7924 case 4: gen_sxtb(tmp
); break;
7925 case 5: gen_uxtb(tmp
); break;
7926 default: goto illegal_op
;
7929 tmp2
= load_reg(s
, rn
);
7930 if ((op
>> 1) == 1) {
7931 gen_add16(tmp
, tmp2
);
7933 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7934 tcg_temp_free_i32(tmp2
);
7937 store_reg(s
, rd
, tmp
);
7939 case 2: /* SIMD add/subtract. */
7940 op
= (insn
>> 20) & 7;
7941 shift
= (insn
>> 4) & 7;
7942 if ((op
& 3) == 3 || (shift
& 3) == 3)
7944 tmp
= load_reg(s
, rn
);
7945 tmp2
= load_reg(s
, rm
);
7946 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7947 tcg_temp_free_i32(tmp2
);
7948 store_reg(s
, rd
, tmp
);
7950 case 3: /* Other data processing. */
7951 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7953 /* Saturating add/subtract. */
7954 tmp
= load_reg(s
, rn
);
7955 tmp2
= load_reg(s
, rm
);
7957 gen_helper_double_saturate(tmp
, tmp
);
7959 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7961 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7962 tcg_temp_free_i32(tmp2
);
7964 tmp
= load_reg(s
, rn
);
7966 case 0x0a: /* rbit */
7967 gen_helper_rbit(tmp
, tmp
);
7969 case 0x08: /* rev */
7970 tcg_gen_bswap32_i32(tmp
, tmp
);
7972 case 0x09: /* rev16 */
7975 case 0x0b: /* revsh */
7978 case 0x10: /* sel */
7979 tmp2
= load_reg(s
, rm
);
7980 tmp3
= tcg_temp_new_i32();
7981 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7982 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7983 tcg_temp_free_i32(tmp3
);
7984 tcg_temp_free_i32(tmp2
);
7986 case 0x18: /* clz */
7987 gen_helper_clz(tmp
, tmp
);
7993 store_reg(s
, rd
, tmp
);
7995 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7996 op
= (insn
>> 4) & 0xf;
7997 tmp
= load_reg(s
, rn
);
7998 tmp2
= load_reg(s
, rm
);
7999 switch ((insn
>> 20) & 7) {
8000 case 0: /* 32 x 32 -> 32 */
8001 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8002 tcg_temp_free_i32(tmp2
);
8004 tmp2
= load_reg(s
, rs
);
8006 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8008 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8009 tcg_temp_free_i32(tmp2
);
8012 case 1: /* 16 x 16 -> 32 */
8013 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8014 tcg_temp_free_i32(tmp2
);
8016 tmp2
= load_reg(s
, rs
);
8017 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8018 tcg_temp_free_i32(tmp2
);
8021 case 2: /* Dual multiply add. */
8022 case 4: /* Dual multiply subtract. */
8024 gen_swap_half(tmp2
);
8025 gen_smul_dual(tmp
, tmp2
);
8026 if (insn
& (1 << 22)) {
8027 /* This subtraction cannot overflow. */
8028 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8030 /* This addition cannot overflow 32 bits;
8031 * however it may overflow considered as a signed
8032 * operation, in which case we must set the Q flag.
8034 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8036 tcg_temp_free_i32(tmp2
);
8039 tmp2
= load_reg(s
, rs
);
8040 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8041 tcg_temp_free_i32(tmp2
);
8044 case 3: /* 32 * 16 -> 32msb */
8046 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8049 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8050 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8051 tmp
= tcg_temp_new_i32();
8052 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8053 tcg_temp_free_i64(tmp64
);
8056 tmp2
= load_reg(s
, rs
);
8057 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8058 tcg_temp_free_i32(tmp2
);
8061 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8062 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8064 tmp
= load_reg(s
, rs
);
8065 if (insn
& (1 << 20)) {
8066 tmp64
= gen_addq_msw(tmp64
, tmp
);
8068 tmp64
= gen_subq_msw(tmp64
, tmp
);
8071 if (insn
& (1 << 4)) {
8072 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8074 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8075 tmp
= tcg_temp_new_i32();
8076 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8077 tcg_temp_free_i64(tmp64
);
8079 case 7: /* Unsigned sum of absolute differences. */
8080 gen_helper_usad8(tmp
, tmp
, tmp2
);
8081 tcg_temp_free_i32(tmp2
);
8083 tmp2
= load_reg(s
, rs
);
8084 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8085 tcg_temp_free_i32(tmp2
);
8089 store_reg(s
, rd
, tmp
);
8091 case 6: case 7: /* 64-bit multiply, Divide. */
8092 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8093 tmp
= load_reg(s
, rn
);
8094 tmp2
= load_reg(s
, rm
);
8095 if ((op
& 0x50) == 0x10) {
8097 if (!arm_feature(env
, ARM_FEATURE_DIV
))
8100 gen_helper_udiv(tmp
, tmp
, tmp2
);
8102 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8103 tcg_temp_free_i32(tmp2
);
8104 store_reg(s
, rd
, tmp
);
8105 } else if ((op
& 0xe) == 0xc) {
8106 /* Dual multiply accumulate long. */
8108 gen_swap_half(tmp2
);
8109 gen_smul_dual(tmp
, tmp2
);
8111 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8113 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8115 tcg_temp_free_i32(tmp2
);
8117 tmp64
= tcg_temp_new_i64();
8118 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8119 tcg_temp_free_i32(tmp
);
8120 gen_addq(s
, tmp64
, rs
, rd
);
8121 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8122 tcg_temp_free_i64(tmp64
);
8125 /* Unsigned 64-bit multiply */
8126 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8130 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8131 tcg_temp_free_i32(tmp2
);
8132 tmp64
= tcg_temp_new_i64();
8133 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8134 tcg_temp_free_i32(tmp
);
8136 /* Signed 64-bit multiply */
8137 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8142 gen_addq_lo(s
, tmp64
, rs
);
8143 gen_addq_lo(s
, tmp64
, rd
);
8144 } else if (op
& 0x40) {
8145 /* 64-bit accumulate. */
8146 gen_addq(s
, tmp64
, rs
, rd
);
8148 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8149 tcg_temp_free_i64(tmp64
);
8154 case 6: case 7: case 14: case 15:
8156 if (((insn
>> 24) & 3) == 3) {
8157 /* Translate into the equivalent ARM encoding. */
8158 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8159 if (disas_neon_data_insn(env
, s
, insn
))
8162 if (insn
& (1 << 28))
8164 if (disas_coproc_insn (env
, s
, insn
))
8168 case 8: case 9: case 10: case 11:
8169 if (insn
& (1 << 15)) {
8170 /* Branches, misc control. */
8171 if (insn
& 0x5000) {
8172 /* Unconditional branch. */
8173 /* signextend(hw1[10:0]) -> offset[:12]. */
8174 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8175 /* hw1[10:0] -> offset[11:1]. */
8176 offset
|= (insn
& 0x7ff) << 1;
8177 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8178 offset[24:22] already have the same value because of the
8179 sign extension above. */
8180 offset
^= ((~insn
) & (1 << 13)) << 10;
8181 offset
^= ((~insn
) & (1 << 11)) << 11;
8183 if (insn
& (1 << 14)) {
8184 /* Branch and link. */
8185 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8189 if (insn
& (1 << 12)) {
8194 offset
&= ~(uint32_t)2;
8195 /* thumb2 bx, no need to check */
8196 gen_bx_im(s
, offset
);
8198 } else if (((insn
>> 23) & 7) == 7) {
8200 if (insn
& (1 << 13))
8203 if (insn
& (1 << 26)) {
8204 /* Secure monitor call (v6Z) */
8205 goto illegal_op
; /* not implemented. */
8207 op
= (insn
>> 20) & 7;
8209 case 0: /* msr cpsr. */
8211 tmp
= load_reg(s
, rn
);
8212 addr
= tcg_const_i32(insn
& 0xff);
8213 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8214 tcg_temp_free_i32(addr
);
8215 tcg_temp_free_i32(tmp
);
8220 case 1: /* msr spsr. */
8223 tmp
= load_reg(s
, rn
);
8225 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8229 case 2: /* cps, nop-hint. */
8230 if (((insn
>> 8) & 7) == 0) {
8231 gen_nop_hint(s
, insn
& 0xff);
8233 /* Implemented as NOP in user mode. */
8238 if (insn
& (1 << 10)) {
8239 if (insn
& (1 << 7))
8241 if (insn
& (1 << 6))
8243 if (insn
& (1 << 5))
8245 if (insn
& (1 << 9))
8246 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8248 if (insn
& (1 << 8)) {
8250 imm
|= (insn
& 0x1f);
8253 gen_set_psr_im(s
, offset
, 0, imm
);
8256 case 3: /* Special control operations. */
8258 op
= (insn
>> 4) & 0xf;
8266 /* These execute as NOPs. */
8273 /* Trivial implementation equivalent to bx. */
8274 tmp
= load_reg(s
, rn
);
8277 case 5: /* Exception return. */
8281 if (rn
!= 14 || rd
!= 15) {
8284 tmp
= load_reg(s
, rn
);
8285 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8286 gen_exception_return(s
, tmp
);
8288 case 6: /* mrs cpsr. */
8289 tmp
= tcg_temp_new_i32();
8291 addr
= tcg_const_i32(insn
& 0xff);
8292 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8293 tcg_temp_free_i32(addr
);
8295 gen_helper_cpsr_read(tmp
);
8297 store_reg(s
, rd
, tmp
);
8299 case 7: /* mrs spsr. */
8300 /* Not accessible in user mode. */
8301 if (IS_USER(s
) || IS_M(env
))
8303 tmp
= load_cpu_field(spsr
);
8304 store_reg(s
, rd
, tmp
);
8309 /* Conditional branch. */
8310 op
= (insn
>> 22) & 0xf;
8311 /* Generate a conditional jump to next instruction. */
8312 s
->condlabel
= gen_new_label();
8313 gen_test_cc(op
^ 1, s
->condlabel
);
8316 /* offset[11:1] = insn[10:0] */
8317 offset
= (insn
& 0x7ff) << 1;
8318 /* offset[17:12] = insn[21:16]. */
8319 offset
|= (insn
& 0x003f0000) >> 4;
8320 /* offset[31:20] = insn[26]. */
8321 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8322 /* offset[18] = insn[13]. */
8323 offset
|= (insn
& (1 << 13)) << 5;
8324 /* offset[19] = insn[11]. */
8325 offset
|= (insn
& (1 << 11)) << 8;
8327 /* jump to the offset */
8328 gen_jmp(s
, s
->pc
+ offset
);
8331 /* Data processing immediate. */
8332 if (insn
& (1 << 25)) {
8333 if (insn
& (1 << 24)) {
8334 if (insn
& (1 << 20))
8336 /* Bitfield/Saturate. */
8337 op
= (insn
>> 21) & 7;
8339 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8341 tmp
= tcg_temp_new_i32();
8342 tcg_gen_movi_i32(tmp
, 0);
8344 tmp
= load_reg(s
, rn
);
8347 case 2: /* Signed bitfield extract. */
8349 if (shift
+ imm
> 32)
8352 gen_sbfx(tmp
, shift
, imm
);
8354 case 6: /* Unsigned bitfield extract. */
8356 if (shift
+ imm
> 32)
8359 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8361 case 3: /* Bitfield insert/clear. */
8364 imm
= imm
+ 1 - shift
;
8366 tmp2
= load_reg(s
, rd
);
8367 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8368 tcg_temp_free_i32(tmp2
);
8373 default: /* Saturate. */
8376 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8378 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8380 tmp2
= tcg_const_i32(imm
);
8383 if ((op
& 1) && shift
== 0)
8384 gen_helper_usat16(tmp
, tmp
, tmp2
);
8386 gen_helper_usat(tmp
, tmp
, tmp2
);
8389 if ((op
& 1) && shift
== 0)
8390 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8392 gen_helper_ssat(tmp
, tmp
, tmp2
);
8394 tcg_temp_free_i32(tmp2
);
8397 store_reg(s
, rd
, tmp
);
8399 imm
= ((insn
& 0x04000000) >> 15)
8400 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8401 if (insn
& (1 << 22)) {
8402 /* 16-bit immediate. */
8403 imm
|= (insn
>> 4) & 0xf000;
8404 if (insn
& (1 << 23)) {
8406 tmp
= load_reg(s
, rd
);
8407 tcg_gen_ext16u_i32(tmp
, tmp
);
8408 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8411 tmp
= tcg_temp_new_i32();
8412 tcg_gen_movi_i32(tmp
, imm
);
8415 /* Add/sub 12-bit immediate. */
8417 offset
= s
->pc
& ~(uint32_t)3;
8418 if (insn
& (1 << 23))
8422 tmp
= tcg_temp_new_i32();
8423 tcg_gen_movi_i32(tmp
, offset
);
8425 tmp
= load_reg(s
, rn
);
8426 if (insn
& (1 << 23))
8427 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8429 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8432 store_reg(s
, rd
, tmp
);
8435 int shifter_out
= 0;
8436 /* modified 12-bit immediate. */
8437 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8438 imm
= (insn
& 0xff);
8441 /* Nothing to do. */
8443 case 1: /* 00XY00XY */
8446 case 2: /* XY00XY00 */
8450 case 3: /* XYXYXYXY */
8454 default: /* Rotated constant. */
8455 shift
= (shift
<< 1) | (imm
>> 7);
8457 imm
= imm
<< (32 - shift
);
8461 tmp2
= tcg_temp_new_i32();
8462 tcg_gen_movi_i32(tmp2
, imm
);
8463 rn
= (insn
>> 16) & 0xf;
8465 tmp
= tcg_temp_new_i32();
8466 tcg_gen_movi_i32(tmp
, 0);
8468 tmp
= load_reg(s
, rn
);
8470 op
= (insn
>> 21) & 0xf;
8471 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8472 shifter_out
, tmp
, tmp2
))
8474 tcg_temp_free_i32(tmp2
);
8475 rd
= (insn
>> 8) & 0xf;
8477 store_reg(s
, rd
, tmp
);
8479 tcg_temp_free_i32(tmp
);
8484 case 12: /* Load/store single data item. */
8489 if ((insn
& 0x01100000) == 0x01000000) {
8490 if (disas_neon_ls_insn(env
, s
, insn
))
8494 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8496 if (!(insn
& (1 << 20))) {
8500 /* Byte or halfword load space with dest == r15 : memory hints.
8501 * Catch them early so we don't emit pointless addressing code.
8502 * This space is a mix of:
8503 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8504 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8506 * unallocated hints, which must be treated as NOPs
8507 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8508 * which is easiest for the decoding logic
8509 * Some space which must UNDEF
8511 int op1
= (insn
>> 23) & 3;
8512 int op2
= (insn
>> 6) & 0x3f;
8517 /* UNPREDICTABLE or unallocated hint */
8521 return 0; /* PLD* or unallocated hint */
8523 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8524 return 0; /* PLD* or unallocated hint */
8526 /* UNDEF space, or an UNPREDICTABLE */
8532 addr
= tcg_temp_new_i32();
8534 /* s->pc has already been incremented by 4. */
8535 imm
= s
->pc
& 0xfffffffc;
8536 if (insn
& (1 << 23))
8537 imm
+= insn
& 0xfff;
8539 imm
-= insn
& 0xfff;
8540 tcg_gen_movi_i32(addr
, imm
);
8542 addr
= load_reg(s
, rn
);
8543 if (insn
& (1 << 23)) {
8544 /* Positive offset. */
8546 tcg_gen_addi_i32(addr
, addr
, imm
);
8549 switch ((insn
>> 8) & 0xf) {
8550 case 0x0: /* Shifted Register. */
8551 shift
= (insn
>> 4) & 0xf;
8553 tcg_temp_free_i32(addr
);
8556 tmp
= load_reg(s
, rm
);
8558 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8559 tcg_gen_add_i32(addr
, addr
, tmp
);
8560 tcg_temp_free_i32(tmp
);
8562 case 0xc: /* Negative offset. */
8563 tcg_gen_addi_i32(addr
, addr
, -imm
);
8565 case 0xe: /* User privilege. */
8566 tcg_gen_addi_i32(addr
, addr
, imm
);
8569 case 0x9: /* Post-decrement. */
8572 case 0xb: /* Post-increment. */
8576 case 0xd: /* Pre-decrement. */
8579 case 0xf: /* Pre-increment. */
8580 tcg_gen_addi_i32(addr
, addr
, imm
);
8584 tcg_temp_free_i32(addr
);
8589 if (insn
& (1 << 20)) {
8592 case 0: tmp
= gen_ld8u(addr
, user
); break;
8593 case 4: tmp
= gen_ld8s(addr
, user
); break;
8594 case 1: tmp
= gen_ld16u(addr
, user
); break;
8595 case 5: tmp
= gen_ld16s(addr
, user
); break;
8596 case 2: tmp
= gen_ld32(addr
, user
); break;
8598 tcg_temp_free_i32(addr
);
8604 store_reg(s
, rs
, tmp
);
8608 tmp
= load_reg(s
, rs
);
8610 case 0: gen_st8(tmp
, addr
, user
); break;
8611 case 1: gen_st16(tmp
, addr
, user
); break;
8612 case 2: gen_st32(tmp
, addr
, user
); break;
8614 tcg_temp_free_i32(addr
);
8619 tcg_gen_addi_i32(addr
, addr
, imm
);
8621 store_reg(s
, rn
, addr
);
8623 tcg_temp_free_i32(addr
);
8635 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8637 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8644 if (s
->condexec_mask
) {
8645 cond
= s
->condexec_cond
;
8646 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
8647 s
->condlabel
= gen_new_label();
8648 gen_test_cc(cond
^ 1, s
->condlabel
);
8653 insn
= lduw_code(s
->pc
);
8656 switch (insn
>> 12) {
8660 op
= (insn
>> 11) & 3;
8663 rn
= (insn
>> 3) & 7;
8664 tmp
= load_reg(s
, rn
);
8665 if (insn
& (1 << 10)) {
8667 tmp2
= tcg_temp_new_i32();
8668 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
8671 rm
= (insn
>> 6) & 7;
8672 tmp2
= load_reg(s
, rm
);
8674 if (insn
& (1 << 9)) {
8675 if (s
->condexec_mask
)
8676 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8678 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8680 if (s
->condexec_mask
)
8681 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8683 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8685 tcg_temp_free_i32(tmp2
);
8686 store_reg(s
, rd
, tmp
);
8688 /* shift immediate */
8689 rm
= (insn
>> 3) & 7;
8690 shift
= (insn
>> 6) & 0x1f;
8691 tmp
= load_reg(s
, rm
);
8692 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8693 if (!s
->condexec_mask
)
8695 store_reg(s
, rd
, tmp
);
8699 /* arithmetic large immediate */
8700 op
= (insn
>> 11) & 3;
8701 rd
= (insn
>> 8) & 0x7;
8702 if (op
== 0) { /* mov */
8703 tmp
= tcg_temp_new_i32();
8704 tcg_gen_movi_i32(tmp
, insn
& 0xff);
8705 if (!s
->condexec_mask
)
8707 store_reg(s
, rd
, tmp
);
8709 tmp
= load_reg(s
, rd
);
8710 tmp2
= tcg_temp_new_i32();
8711 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
8714 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8715 tcg_temp_free_i32(tmp
);
8716 tcg_temp_free_i32(tmp2
);
8719 if (s
->condexec_mask
)
8720 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8722 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8723 tcg_temp_free_i32(tmp2
);
8724 store_reg(s
, rd
, tmp
);
8727 if (s
->condexec_mask
)
8728 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8730 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8731 tcg_temp_free_i32(tmp2
);
8732 store_reg(s
, rd
, tmp
);
8738 if (insn
& (1 << 11)) {
8739 rd
= (insn
>> 8) & 7;
8740 /* load pc-relative. Bit 1 of PC is ignored. */
8741 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8742 val
&= ~(uint32_t)2;
8743 addr
= tcg_temp_new_i32();
8744 tcg_gen_movi_i32(addr
, val
);
8745 tmp
= gen_ld32(addr
, IS_USER(s
));
8746 tcg_temp_free_i32(addr
);
8747 store_reg(s
, rd
, tmp
);
8750 if (insn
& (1 << 10)) {
8751 /* data processing extended or blx */
8752 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8753 rm
= (insn
>> 3) & 0xf;
8754 op
= (insn
>> 8) & 3;
8757 tmp
= load_reg(s
, rd
);
8758 tmp2
= load_reg(s
, rm
);
8759 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8760 tcg_temp_free_i32(tmp2
);
8761 store_reg(s
, rd
, tmp
);
8764 tmp
= load_reg(s
, rd
);
8765 tmp2
= load_reg(s
, rm
);
8766 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8767 tcg_temp_free_i32(tmp2
);
8768 tcg_temp_free_i32(tmp
);
8770 case 2: /* mov/cpy */
8771 tmp
= load_reg(s
, rm
);
8772 store_reg(s
, rd
, tmp
);
8774 case 3:/* branch [and link] exchange thumb register */
8775 tmp
= load_reg(s
, rm
);
8776 if (insn
& (1 << 7)) {
8778 val
= (uint32_t)s
->pc
| 1;
8779 tmp2
= tcg_temp_new_i32();
8780 tcg_gen_movi_i32(tmp2
, val
);
8781 store_reg(s
, 14, tmp2
);
8783 /* already thumb, no need to check */
8790 /* data processing register */
8792 rm
= (insn
>> 3) & 7;
8793 op
= (insn
>> 6) & 0xf;
8794 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8795 /* the shift/rotate ops want the operands backwards */
8804 if (op
== 9) { /* neg */
8805 tmp
= tcg_temp_new_i32();
8806 tcg_gen_movi_i32(tmp
, 0);
8807 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
8808 tmp
= load_reg(s
, rd
);
8813 tmp2
= load_reg(s
, rm
);
8816 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8817 if (!s
->condexec_mask
)
8821 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8822 if (!s
->condexec_mask
)
8826 if (s
->condexec_mask
) {
8827 gen_helper_shl(tmp2
, tmp2
, tmp
);
8829 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
8834 if (s
->condexec_mask
) {
8835 gen_helper_shr(tmp2
, tmp2
, tmp
);
8837 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
8842 if (s
->condexec_mask
) {
8843 gen_helper_sar(tmp2
, tmp2
, tmp
);
8845 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
8850 if (s
->condexec_mask
)
8853 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
8856 if (s
->condexec_mask
)
8857 gen_sub_carry(tmp
, tmp
, tmp2
);
8859 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
8862 if (s
->condexec_mask
) {
8863 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
8864 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
8866 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
8871 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8876 if (s
->condexec_mask
)
8877 tcg_gen_neg_i32(tmp
, tmp2
);
8879 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8882 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
8886 gen_helper_add_cc(tmp
, tmp
, tmp2
);
8890 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8891 if (!s
->condexec_mask
)
8895 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8896 if (!s
->condexec_mask
)
8900 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8901 if (!s
->condexec_mask
)
8905 tcg_gen_not_i32(tmp2
, tmp2
);
8906 if (!s
->condexec_mask
)
8914 store_reg(s
, rm
, tmp2
);
8916 tcg_temp_free_i32(tmp
);
8918 store_reg(s
, rd
, tmp
);
8919 tcg_temp_free_i32(tmp2
);
8922 tcg_temp_free_i32(tmp
);
8923 tcg_temp_free_i32(tmp2
);
8928 /* load/store register offset. */
8930 rn
= (insn
>> 3) & 7;
8931 rm
= (insn
>> 6) & 7;
8932 op
= (insn
>> 9) & 7;
8933 addr
= load_reg(s
, rn
);
8934 tmp
= load_reg(s
, rm
);
8935 tcg_gen_add_i32(addr
, addr
, tmp
);
8936 tcg_temp_free_i32(tmp
);
8938 if (op
< 3) /* store */
8939 tmp
= load_reg(s
, rd
);
8943 gen_st32(tmp
, addr
, IS_USER(s
));
8946 gen_st16(tmp
, addr
, IS_USER(s
));
8949 gen_st8(tmp
, addr
, IS_USER(s
));
8952 tmp
= gen_ld8s(addr
, IS_USER(s
));
8955 tmp
= gen_ld32(addr
, IS_USER(s
));
8958 tmp
= gen_ld16u(addr
, IS_USER(s
));
8961 tmp
= gen_ld8u(addr
, IS_USER(s
));
8964 tmp
= gen_ld16s(addr
, IS_USER(s
));
8967 if (op
>= 3) /* load */
8968 store_reg(s
, rd
, tmp
);
8969 tcg_temp_free_i32(addr
);
8973 /* load/store word immediate offset */
8975 rn
= (insn
>> 3) & 7;
8976 addr
= load_reg(s
, rn
);
8977 val
= (insn
>> 4) & 0x7c;
8978 tcg_gen_addi_i32(addr
, addr
, val
);
8980 if (insn
& (1 << 11)) {
8982 tmp
= gen_ld32(addr
, IS_USER(s
));
8983 store_reg(s
, rd
, tmp
);
8986 tmp
= load_reg(s
, rd
);
8987 gen_st32(tmp
, addr
, IS_USER(s
));
8989 tcg_temp_free_i32(addr
);
8993 /* load/store byte immediate offset */
8995 rn
= (insn
>> 3) & 7;
8996 addr
= load_reg(s
, rn
);
8997 val
= (insn
>> 6) & 0x1f;
8998 tcg_gen_addi_i32(addr
, addr
, val
);
9000 if (insn
& (1 << 11)) {
9002 tmp
= gen_ld8u(addr
, IS_USER(s
));
9003 store_reg(s
, rd
, tmp
);
9006 tmp
= load_reg(s
, rd
);
9007 gen_st8(tmp
, addr
, IS_USER(s
));
9009 tcg_temp_free_i32(addr
);
9013 /* load/store halfword immediate offset */
9015 rn
= (insn
>> 3) & 7;
9016 addr
= load_reg(s
, rn
);
9017 val
= (insn
>> 5) & 0x3e;
9018 tcg_gen_addi_i32(addr
, addr
, val
);
9020 if (insn
& (1 << 11)) {
9022 tmp
= gen_ld16u(addr
, IS_USER(s
));
9023 store_reg(s
, rd
, tmp
);
9026 tmp
= load_reg(s
, rd
);
9027 gen_st16(tmp
, addr
, IS_USER(s
));
9029 tcg_temp_free_i32(addr
);
9033 /* load/store from stack */
9034 rd
= (insn
>> 8) & 7;
9035 addr
= load_reg(s
, 13);
9036 val
= (insn
& 0xff) * 4;
9037 tcg_gen_addi_i32(addr
, addr
, val
);
9039 if (insn
& (1 << 11)) {
9041 tmp
= gen_ld32(addr
, IS_USER(s
));
9042 store_reg(s
, rd
, tmp
);
9045 tmp
= load_reg(s
, rd
);
9046 gen_st32(tmp
, addr
, IS_USER(s
));
9048 tcg_temp_free_i32(addr
);
9052 /* add to high reg */
9053 rd
= (insn
>> 8) & 7;
9054 if (insn
& (1 << 11)) {
9056 tmp
= load_reg(s
, 13);
9058 /* PC. bit 1 is ignored. */
9059 tmp
= tcg_temp_new_i32();
9060 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9062 val
= (insn
& 0xff) * 4;
9063 tcg_gen_addi_i32(tmp
, tmp
, val
);
9064 store_reg(s
, rd
, tmp
);
9069 op
= (insn
>> 8) & 0xf;
9072 /* adjust stack pointer */
9073 tmp
= load_reg(s
, 13);
9074 val
= (insn
& 0x7f) * 4;
9075 if (insn
& (1 << 7))
9076 val
= -(int32_t)val
;
9077 tcg_gen_addi_i32(tmp
, tmp
, val
);
9078 store_reg(s
, 13, tmp
);
9081 case 2: /* sign/zero extend. */
9084 rm
= (insn
>> 3) & 7;
9085 tmp
= load_reg(s
, rm
);
9086 switch ((insn
>> 6) & 3) {
9087 case 0: gen_sxth(tmp
); break;
9088 case 1: gen_sxtb(tmp
); break;
9089 case 2: gen_uxth(tmp
); break;
9090 case 3: gen_uxtb(tmp
); break;
9092 store_reg(s
, rd
, tmp
);
9094 case 4: case 5: case 0xc: case 0xd:
9096 addr
= load_reg(s
, 13);
9097 if (insn
& (1 << 8))
9101 for (i
= 0; i
< 8; i
++) {
9102 if (insn
& (1 << i
))
9105 if ((insn
& (1 << 11)) == 0) {
9106 tcg_gen_addi_i32(addr
, addr
, -offset
);
9108 for (i
= 0; i
< 8; i
++) {
9109 if (insn
& (1 << i
)) {
9110 if (insn
& (1 << 11)) {
9112 tmp
= gen_ld32(addr
, IS_USER(s
));
9113 store_reg(s
, i
, tmp
);
9116 tmp
= load_reg(s
, i
);
9117 gen_st32(tmp
, addr
, IS_USER(s
));
9119 /* advance to the next address. */
9120 tcg_gen_addi_i32(addr
, addr
, 4);
9124 if (insn
& (1 << 8)) {
9125 if (insn
& (1 << 11)) {
9127 tmp
= gen_ld32(addr
, IS_USER(s
));
9128 /* don't set the pc until the rest of the instruction
9132 tmp
= load_reg(s
, 14);
9133 gen_st32(tmp
, addr
, IS_USER(s
));
9135 tcg_gen_addi_i32(addr
, addr
, 4);
9137 if ((insn
& (1 << 11)) == 0) {
9138 tcg_gen_addi_i32(addr
, addr
, -offset
);
9140 /* write back the new stack pointer */
9141 store_reg(s
, 13, addr
);
9142 /* set the new PC value */
9143 if ((insn
& 0x0900) == 0x0900) {
9144 store_reg_from_load(env
, s
, 15, tmp
);
9148 case 1: case 3: case 9: case 11: /* czb */
9150 tmp
= load_reg(s
, rm
);
9151 s
->condlabel
= gen_new_label();
9153 if (insn
& (1 << 11))
9154 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9156 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9157 tcg_temp_free_i32(tmp
);
9158 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9159 val
= (uint32_t)s
->pc
+ 2;
9164 case 15: /* IT, nop-hint. */
9165 if ((insn
& 0xf) == 0) {
9166 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9170 s
->condexec_cond
= (insn
>> 4) & 0xe;
9171 s
->condexec_mask
= insn
& 0x1f;
9172 /* No actual code generated for this insn, just setup state. */
9175 case 0xe: /* bkpt */
9177 gen_exception_insn(s
, 2, EXCP_BKPT
);
9182 rn
= (insn
>> 3) & 0x7;
9184 tmp
= load_reg(s
, rn
);
9185 switch ((insn
>> 6) & 3) {
9186 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9187 case 1: gen_rev16(tmp
); break;
9188 case 3: gen_revsh(tmp
); break;
9189 default: goto illegal_op
;
9191 store_reg(s
, rd
, tmp
);
9199 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9202 addr
= tcg_const_i32(16);
9203 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9204 tcg_temp_free_i32(addr
);
9208 addr
= tcg_const_i32(17);
9209 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9210 tcg_temp_free_i32(addr
);
9212 tcg_temp_free_i32(tmp
);
9215 if (insn
& (1 << 4))
9216 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9219 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9229 /* load/store multiple */
9230 rn
= (insn
>> 8) & 0x7;
9231 addr
= load_reg(s
, rn
);
9232 for (i
= 0; i
< 8; i
++) {
9233 if (insn
& (1 << i
)) {
9234 if (insn
& (1 << 11)) {
9236 tmp
= gen_ld32(addr
, IS_USER(s
));
9237 store_reg(s
, i
, tmp
);
9240 tmp
= load_reg(s
, i
);
9241 gen_st32(tmp
, addr
, IS_USER(s
));
9243 /* advance to the next address */
9244 tcg_gen_addi_i32(addr
, addr
, 4);
9247 /* Base register writeback. */
9248 if ((insn
& (1 << rn
)) == 0) {
9249 store_reg(s
, rn
, addr
);
9251 tcg_temp_free_i32(addr
);
9256 /* conditional branch or swi */
9257 cond
= (insn
>> 8) & 0xf;
9263 gen_set_pc_im(s
->pc
);
9264 s
->is_jmp
= DISAS_SWI
;
9267 /* generate a conditional jump to next instruction */
9268 s
->condlabel
= gen_new_label();
9269 gen_test_cc(cond
^ 1, s
->condlabel
);
9272 /* jump to the offset */
9273 val
= (uint32_t)s
->pc
+ 2;
9274 offset
= ((int32_t)insn
<< 24) >> 24;
9280 if (insn
& (1 << 11)) {
9281 if (disas_thumb2_insn(env
, s
, insn
))
9285 /* unconditional branch */
9286 val
= (uint32_t)s
->pc
;
9287 offset
= ((int32_t)insn
<< 21) >> 21;
9288 val
+= (offset
<< 1) + 2;
9293 if (disas_thumb2_insn(env
, s
, insn
))
9299 gen_exception_insn(s
, 4, EXCP_UDEF
);
9303 gen_exception_insn(s
, 2, EXCP_UDEF
);
9306 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9307 basic block 'tb'. If search_pc is TRUE, also generate PC
9308 information for each intermediate instruction. */
9309 static inline void gen_intermediate_code_internal(CPUState
*env
,
9310 TranslationBlock
*tb
,
9313 DisasContext dc1
, *dc
= &dc1
;
9315 uint16_t *gen_opc_end
;
9317 target_ulong pc_start
;
9318 uint32_t next_page_start
;
9322 /* generate intermediate code */
9327 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9329 dc
->is_jmp
= DISAS_NEXT
;
9331 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9333 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9334 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9335 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9336 #if !defined(CONFIG_USER_ONLY)
9337 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9339 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9340 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9341 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9342 cpu_F0s
= tcg_temp_new_i32();
9343 cpu_F1s
= tcg_temp_new_i32();
9344 cpu_F0d
= tcg_temp_new_i64();
9345 cpu_F1d
= tcg_temp_new_i64();
9348 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9349 cpu_M0
= tcg_temp_new_i64();
9350 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9353 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9355 max_insns
= CF_COUNT_MASK
;
9359 tcg_clear_temp_count();
9361 /* A note on handling of the condexec (IT) bits:
9363 * We want to avoid the overhead of having to write the updated condexec
9364 * bits back to the CPUState for every instruction in an IT block. So:
9365 * (1) if the condexec bits are not already zero then we write
9366 * zero back into the CPUState now. This avoids complications trying
9367 * to do it at the end of the block. (For example if we don't do this
9368 * it's hard to identify whether we can safely skip writing condexec
9369 * at the end of the TB, which we definitely want to do for the case
9370 * where a TB doesn't do anything with the IT state at all.)
9371 * (2) if we are going to leave the TB then we call gen_set_condexec()
9372 * which will write the correct value into CPUState if zero is wrong.
9373 * This is done both for leaving the TB at the end, and for leaving
9374 * it because of an exception we know will happen, which is done in
9375 * gen_exception_insn(). The latter is necessary because we need to
9376 * leave the TB with the PC/IT state just prior to execution of the
9377 * instruction which caused the exception.
9378 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9379 * then the CPUState will be wrong and we need to reset it.
9380 * This is handled in the same way as restoration of the
9381 * PC in these situations: we will be called again with search_pc=1
9382 * and generate a mapping of the condexec bits for each PC in
9383 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9384 * the condexec bits.
9386 * Note that there are no instructions which can read the condexec
9387 * bits, and none which can write non-static values to them, so
9388 * we don't need to care about whether CPUState is correct in the
9392 /* Reset the conditional execution bits immediately. This avoids
9393 complications trying to do it at the end of the block. */
9394 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9396 TCGv tmp
= tcg_temp_new_i32();
9397 tcg_gen_movi_i32(tmp
, 0);
9398 store_cpu_field(tmp
, condexec_bits
);
9401 #ifdef CONFIG_USER_ONLY
9402 /* Intercept jump to the magic kernel page. */
9403 if (dc
->pc
>= 0xffff0000) {
9404 /* We always get here via a jump, so know we are not in a
9405 conditional execution block. */
9406 gen_exception(EXCP_KERNEL_TRAP
);
9407 dc
->is_jmp
= DISAS_UPDATE
;
9411 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9412 /* We always get here via a jump, so know we are not in a
9413 conditional execution block. */
9414 gen_exception(EXCP_EXCEPTION_EXIT
);
9415 dc
->is_jmp
= DISAS_UPDATE
;
9420 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9421 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9422 if (bp
->pc
== dc
->pc
) {
9423 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9424 /* Advance PC so that clearing the breakpoint will
9425 invalidate this TB. */
9427 goto done_generating
;
9433 j
= gen_opc_ptr
- gen_opc_buf
;
9437 gen_opc_instr_start
[lj
++] = 0;
9439 gen_opc_pc
[lj
] = dc
->pc
;
9440 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9441 gen_opc_instr_start
[lj
] = 1;
9442 gen_opc_icount
[lj
] = num_insns
;
9445 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9448 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9449 tcg_gen_debug_insn_start(dc
->pc
);
9453 disas_thumb_insn(env
, dc
);
9454 if (dc
->condexec_mask
) {
9455 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9456 | ((dc
->condexec_mask
>> 4) & 1);
9457 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9458 if (dc
->condexec_mask
== 0) {
9459 dc
->condexec_cond
= 0;
9463 disas_arm_insn(env
, dc
);
9466 if (dc
->condjmp
&& !dc
->is_jmp
) {
9467 gen_set_label(dc
->condlabel
);
9471 if (tcg_check_temp_count()) {
9472 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9475 /* Translation stops when a conditional branch is encountered.
9476 * Otherwise the subsequent code could get translated several times.
9477 * Also stop translation when a page boundary is reached. This
9478 * ensures prefetch aborts occur at the right place. */
9480 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9481 !env
->singlestep_enabled
&&
9483 dc
->pc
< next_page_start
&&
9484 num_insns
< max_insns
);
9486 if (tb
->cflags
& CF_LAST_IO
) {
9488 /* FIXME: This can theoretically happen with self-modifying
9490 cpu_abort(env
, "IO on conditional branch instruction");
9495 /* At this stage dc->condjmp will only be set when the skipped
9496 instruction was a conditional branch or trap, and the PC has
9497 already been written. */
9498 if (unlikely(env
->singlestep_enabled
)) {
9499 /* Make sure the pc is updated, and raise a debug exception. */
9501 gen_set_condexec(dc
);
9502 if (dc
->is_jmp
== DISAS_SWI
) {
9503 gen_exception(EXCP_SWI
);
9505 gen_exception(EXCP_DEBUG
);
9507 gen_set_label(dc
->condlabel
);
9509 if (dc
->condjmp
|| !dc
->is_jmp
) {
9510 gen_set_pc_im(dc
->pc
);
9513 gen_set_condexec(dc
);
9514 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9515 gen_exception(EXCP_SWI
);
9517 /* FIXME: Single stepping a WFI insn will not halt
9519 gen_exception(EXCP_DEBUG
);
9522 /* While branches must always occur at the end of an IT block,
9523 there are a few other things that can cause us to terminate
9524 the TB in the middel of an IT block:
9525 - Exception generating instructions (bkpt, swi, undefined).
9527 - Hardware watchpoints.
9528 Hardware breakpoints have already been handled and skip this code.
9530 gen_set_condexec(dc
);
9531 switch(dc
->is_jmp
) {
9533 gen_goto_tb(dc
, 1, dc
->pc
);
9538 /* indicate that the hash table must be used to find the next TB */
9542 /* nothing more to generate */
9548 gen_exception(EXCP_SWI
);
9552 gen_set_label(dc
->condlabel
);
9553 gen_set_condexec(dc
);
9554 gen_goto_tb(dc
, 1, dc
->pc
);
9560 gen_icount_end(tb
, num_insns
);
9561 *gen_opc_ptr
= INDEX_op_end
;
9564 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9565 qemu_log("----------------\n");
9566 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9567 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9572 j
= gen_opc_ptr
- gen_opc_buf
;
9575 gen_opc_instr_start
[lj
++] = 0;
9577 tb
->size
= dc
->pc
- pc_start
;
9578 tb
->icount
= num_insns
;
9582 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9584 gen_intermediate_code_internal(env
, tb
, 0);
9587 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9589 gen_intermediate_code_internal(env
, tb
, 1);
9592 static const char *cpu_mode_names
[16] = {
9593 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9594 "???", "???", "???", "und", "???", "???", "???", "sys"
9597 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9607 /* ??? This assumes float64 and double have the same layout.
9608 Oh well, it's only debug dumps. */
9617 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9619 cpu_fprintf(f
, "\n");
9621 cpu_fprintf(f
, " ");
9623 psr
= cpsr_read(env
);
9624 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
9626 psr
& (1 << 31) ? 'N' : '-',
9627 psr
& (1 << 30) ? 'Z' : '-',
9628 psr
& (1 << 29) ? 'C' : '-',
9629 psr
& (1 << 28) ? 'V' : '-',
9630 psr
& CPSR_T
? 'T' : 'A',
9631 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
9634 for (i
= 0; i
< 16; i
++) {
9635 d
.d
= env
->vfp
.regs
[i
];
9639 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9640 i
* 2, (int)s0
.i
, s0
.s
,
9641 i
* 2 + 1, (int)s1
.i
, s1
.s
,
9642 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
9645 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
9649 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
9650 unsigned long searched_pc
, int pc_pos
, void *puc
)
9652 env
->regs
[15] = gen_opc_pc
[pc_pos
];
9653 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];