4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext
{
53 /* Nonzero if this instruction has been conditionally skipped. */
55 /* The label that will be jumped to when the instruction is skipped. */
57 /* Thumb-2 condtional execution bits. */
60 struct TranslationBlock
*tb
;
61 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_exclusive_addr
;
89 static TCGv_i32 cpu_exclusive_val
;
90 static TCGv_i32 cpu_exclusive_high
;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test
;
93 static TCGv_i32 cpu_exclusive_info
;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s
, cpu_F1s
;
98 static TCGv_i64 cpu_F0d
, cpu_F1d
;
100 #include "gen-icount.h"
102 static const char *regnames
[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 16; i
++) {
114 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUState
, regs
[i
]),
118 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
120 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUState
, exclusive_val
), "exclusive_val");
122 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUState
, exclusive_high
), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUState
, exclusive_test
), "exclusive_test");
127 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUState
, exclusive_info
), "exclusive_info");
135 static inline TCGv
load_cpu_offset(int offset
)
137 TCGv tmp
= tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var
, int offset
)
146 tcg_gen_st_i32(var
, cpu_env
, offset
);
147 tcg_temp_free_i32(var
);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr
= (long)s
->pc
+ 2;
162 addr
= (long)s
->pc
+ 4;
163 tcg_gen_movi_i32(var
, addr
);
165 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
172 TCGv tmp
= tcg_temp_new_i32();
173 load_reg_var(s
, tmp
, reg
);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
182 tcg_gen_andi_i32(var
, var
, ~1);
183 s
->is_jmp
= DISAS_JUMP
;
185 tcg_gen_mov_i32(cpu_R
[reg
], var
);
186 tcg_temp_free_i32(var
);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
201 TCGv tmp_mask
= tcg_const_i32(mask
);
202 gen_helper_cpsr_write(var
, tmp_mask
);
203 tcg_temp_free_i32(tmp_mask
);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp
)
210 TCGv tmp
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp
, excp
);
212 gen_helper_exception(tmp
);
213 tcg_temp_free_i32(tmp
);
216 static void gen_smul_dual(TCGv a
, TCGv b
)
218 TCGv tmp1
= tcg_temp_new_i32();
219 TCGv tmp2
= tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1
, a
);
221 tcg_gen_ext16s_i32(tmp2
, b
);
222 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_gen_sari_i32(a
, a
, 16);
225 tcg_gen_sari_i32(b
, b
, 16);
226 tcg_gen_mul_i32(b
, b
, a
);
227 tcg_gen_mov_i32(a
, tmp1
);
228 tcg_temp_free_i32(tmp1
);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var
)
234 TCGv tmp
= tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp
, var
, 8);
236 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
237 tcg_gen_shli_i32(var
, var
, 8);
238 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
239 tcg_gen_or_i32(var
, var
, tmp
);
240 tcg_temp_free_i32(tmp
);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var
)
246 tcg_gen_ext16u_i32(var
, var
);
247 tcg_gen_bswap16_i32(var
, var
);
248 tcg_gen_ext16s_i32(var
, var
);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
255 tcg_gen_shri_i32(var
, var
, shift
);
256 tcg_gen_andi_i32(var
, var
, mask
);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var
, int shift
, int width
)
265 tcg_gen_sari_i32(var
, var
, shift
);
266 if (shift
+ width
< 32) {
267 signbit
= 1u << (width
- 1);
268 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
269 tcg_gen_xori_i32(var
, var
, signbit
);
270 tcg_gen_subi_i32(var
, var
, signbit
);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
277 tcg_gen_andi_i32(val
, val
, mask
);
278 tcg_gen_shli_i32(val
, val
, shift
);
279 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
280 tcg_gen_or_i32(dest
, base
, val
);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
286 TCGv_i64 tmp64
= tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64
, b
);
289 tcg_temp_free_i32(b
);
290 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
291 tcg_gen_add_i64(a
, tmp64
, a
);
293 tcg_temp_free_i64(tmp64
);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
300 TCGv_i64 tmp64
= tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64
, b
);
303 tcg_temp_free_i32(b
);
304 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
305 tcg_gen_sub_i64(a
, tmp64
, a
);
307 tcg_temp_free_i64(tmp64
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
320 tcg_temp_free_i32(a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
322 tcg_temp_free_i32(b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
324 tcg_temp_free_i64(tmp2
);
328 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
330 TCGv_i64 tmp1
= tcg_temp_new_i64();
331 TCGv_i64 tmp2
= tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_temp_free_i32(a
);
335 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_temp_free_i32(b
);
337 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
338 tcg_temp_free_i64(tmp2
);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var
)
345 TCGv tmp
= tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp
, var
, 16);
347 tcg_gen_shli_i32(var
, var
, 16);
348 tcg_gen_or_i32(var
, var
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0
, TCGv t1
)
361 TCGv tmp
= tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp
, t0
, t1
);
363 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
364 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
365 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
366 tcg_gen_add_i32(t0
, t0
, t1
);
367 tcg_gen_xor_i32(t0
, t0
, tmp
);
368 tcg_temp_free_i32(tmp
);
369 tcg_temp_free_i32(t1
);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var
)
377 TCGv tmp
= tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp
, var
, 31);
380 tcg_temp_free_i32(tmp
);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var
)
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
391 static void gen_adc(TCGv t0
, TCGv t1
)
394 tcg_gen_add_i32(t0
, t0
, t1
);
395 tmp
= load_cpu_field(CF
);
396 tcg_gen_add_i32(t0
, t0
, tmp
);
397 tcg_temp_free_i32(tmp
);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
404 tcg_gen_add_i32(dest
, t0
, t1
);
405 tmp
= load_cpu_field(CF
);
406 tcg_gen_add_i32(dest
, dest
, tmp
);
407 tcg_temp_free_i32(tmp
);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
414 tcg_gen_sub_i32(dest
, t0
, t1
);
415 tmp
= load_cpu_field(CF
);
416 tcg_gen_add_i32(dest
, dest
, tmp
);
417 tcg_gen_subi_i32(dest
, dest
, 1);
418 tcg_temp_free_i32(tmp
);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var
, int shift
)
426 TCGv tmp
= tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp
, var
, 1);
430 tcg_gen_shri_i32(tmp
, var
, shift
);
432 tcg_gen_andi_i32(tmp
, tmp
, 1);
435 tcg_temp_free_i32(tmp
);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
445 shifter_out_im(var
, 32 - shift
);
446 tcg_gen_shli_i32(var
, var
, shift
);
452 tcg_gen_shri_i32(var
, var
, 31);
455 tcg_gen_movi_i32(var
, 0);
458 shifter_out_im(var
, shift
- 1);
459 tcg_gen_shri_i32(var
, var
, shift
);
466 shifter_out_im(var
, shift
- 1);
469 tcg_gen_sari_i32(var
, var
, shift
);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var
, shift
- 1);
475 tcg_gen_rotri_i32(var
, var
, shift
); break;
477 TCGv tmp
= load_cpu_field(CF
);
479 shifter_out_im(var
, 0);
480 tcg_gen_shri_i32(var
, var
, 1);
481 tcg_gen_shli_i32(tmp
, tmp
, 31);
482 tcg_gen_or_i32(var
, var
, tmp
);
483 tcg_temp_free_i32(tmp
);
488 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
489 TCGv shift
, int flags
)
493 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
494 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
495 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
496 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
500 case 0: gen_helper_shl(var
, var
, shift
); break;
501 case 1: gen_helper_shr(var
, var
, shift
); break;
502 case 2: gen_helper_sar(var
, var
, shift
); break;
503 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
504 tcg_gen_rotr_i32(var
, var
, shift
); break;
507 tcg_temp_free_i32(shift
);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp
= tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
529 tcg_temp_free_ptr(tmp
);
532 tmp
= tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
535 tcg_temp_free_ptr(tmp
);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc
, int label
)
611 tmp
= load_cpu_field(ZF
);
612 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
615 tmp
= load_cpu_field(ZF
);
616 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
619 tmp
= load_cpu_field(CF
);
620 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
623 tmp
= load_cpu_field(CF
);
624 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
627 tmp
= load_cpu_field(NF
);
628 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
631 tmp
= load_cpu_field(NF
);
632 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
635 tmp
= load_cpu_field(VF
);
636 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
639 tmp
= load_cpu_field(VF
);
640 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
642 case 8: /* hi: C && !Z */
643 inv
= gen_new_label();
644 tmp
= load_cpu_field(CF
);
645 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
646 tcg_temp_free_i32(tmp
);
647 tmp
= load_cpu_field(ZF
);
648 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
651 case 9: /* ls: !C || Z */
652 tmp
= load_cpu_field(CF
);
653 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
654 tcg_temp_free_i32(tmp
);
655 tmp
= load_cpu_field(ZF
);
656 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp
= load_cpu_field(VF
);
660 tmp2
= load_cpu_field(NF
);
661 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
662 tcg_temp_free_i32(tmp2
);
663 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp
= load_cpu_field(VF
);
667 tmp2
= load_cpu_field(NF
);
668 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
669 tcg_temp_free_i32(tmp2
);
670 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
672 case 12: /* gt: !Z && N == V */
673 inv
= gen_new_label();
674 tmp
= load_cpu_field(ZF
);
675 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
676 tcg_temp_free_i32(tmp
);
677 tmp
= load_cpu_field(VF
);
678 tmp2
= load_cpu_field(NF
);
679 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
680 tcg_temp_free_i32(tmp2
);
681 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
684 case 13: /* le: Z || N != V */
685 tmp
= load_cpu_field(ZF
);
686 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
687 tcg_temp_free_i32(tmp
);
688 tmp
= load_cpu_field(VF
);
689 tmp2
= load_cpu_field(NF
);
690 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
691 tcg_temp_free_i32(tmp2
);
692 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
695 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
698 tcg_temp_free_i32(tmp
);
701 static const uint8_t table_logic_cc
[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
725 s
->is_jmp
= DISAS_UPDATE
;
726 if (s
->thumb
!= (addr
& 1)) {
727 tmp
= tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp
, addr
& 1);
729 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
730 tcg_temp_free_i32(tmp
);
732 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext
*s
, TCGv var
)
738 s
->is_jmp
= DISAS_UPDATE
;
739 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
740 tcg_gen_andi_i32(var
, var
, 1);
741 store_cpu_field(var
, thumb
);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
750 if (reg
== 15 && ENABLE_ARCH_7
) {
753 store_reg(s
, reg
, var
);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState
*env
, DisasContext
*s
,
764 if (reg
== 15 && ENABLE_ARCH_5
) {
767 store_reg(s
, reg
, var
);
771 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
773 TCGv tmp
= tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
777 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
779 TCGv tmp
= tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
783 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
785 TCGv tmp
= tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
789 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
791 TCGv tmp
= tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
795 static inline TCGv
gen_ld32(TCGv addr
, int index
)
797 TCGv tmp
= tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
801 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp
, addr
, index
);
807 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
809 tcg_gen_qemu_st8(val
, addr
, index
);
810 tcg_temp_free_i32(val
);
812 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
814 tcg_gen_qemu_st16(val
, addr
, index
);
815 tcg_temp_free_i32(val
);
817 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
819 tcg_gen_qemu_st32(val
, addr
, index
);
820 tcg_temp_free_i32(val
);
822 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
824 tcg_gen_qemu_st64(val
, addr
, index
);
825 tcg_temp_free_i64(val
);
828 static inline void gen_set_pc_im(uint32_t val
)
830 tcg_gen_movi_i32(cpu_R
[15], val
);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext
*s
)
836 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
837 s
->is_jmp
= DISAS_UPDATE
;
840 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
843 int val
, rm
, shift
, shiftop
;
846 if (!(insn
& (1 << 25))) {
849 if (!(insn
& (1 << 23)))
852 tcg_gen_addi_i32(var
, var
, val
);
856 shift
= (insn
>> 7) & 0x1f;
857 shiftop
= (insn
>> 5) & 3;
858 offset
= load_reg(s
, rm
);
859 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
860 if (!(insn
& (1 << 23)))
861 tcg_gen_sub_i32(var
, var
, offset
);
863 tcg_gen_add_i32(var
, var
, offset
);
864 tcg_temp_free_i32(offset
);
868 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
874 if (insn
& (1 << 22)) {
876 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
877 if (!(insn
& (1 << 23)))
881 tcg_gen_addi_i32(var
, var
, val
);
885 tcg_gen_addi_i32(var
, var
, extra
);
887 offset
= load_reg(s
, rm
);
888 if (!(insn
& (1 << 23)))
889 tcg_gen_sub_i32(var
, var
, offset
);
891 tcg_gen_add_i32(var
, var
, offset
);
892 tcg_temp_free_i32(offset
);
896 static TCGv_ptr
get_fpstatus_ptr(int neon
)
898 TCGv_ptr statusptr
= tcg_temp_new_ptr();
901 offset
= offsetof(CPUState
, vfp
.standard_fp_status
);
903 offset
= offsetof(CPUState
, vfp
.fp_status
);
905 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
928 static inline void gen_vfp_F1_mul(int dp
)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst
= get_fpstatus_ptr(0);
933 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
935 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
937 tcg_temp_free_ptr(fpst
);
940 static inline void gen_vfp_F1_neg(int dp
)
942 /* Like gen_vfp_neg() but put result in F1 */
944 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
946 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
950 static inline void gen_vfp_abs(int dp
)
953 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
955 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
958 static inline void gen_vfp_neg(int dp
)
961 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
963 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
966 static inline void gen_vfp_sqrt(int dp
)
969 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
971 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
974 static inline void gen_vfp_cmp(int dp
)
977 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
979 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
982 static inline void gen_vfp_cmpe(int dp
)
985 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
987 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
990 static inline void gen_vfp_F1_ld0(int dp
)
993 tcg_gen_movi_i64(cpu_F1d
, 0);
995 tcg_gen_movi_i32(cpu_F1s
, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1055 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1058 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1060 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1063 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1066 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1068 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1072 vfp_reg_offset (int dp
, int reg
)
1075 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1077 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1078 + offsetof(CPU_DoubleU
, l
.upper
);
1080 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1081 + offsetof(CPU_DoubleU
, l
.lower
);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1088 neon_reg_offset (int reg
, int n
)
1092 return vfp_reg_offset(0, sreg
);
1095 static TCGv
neon_load_reg(int reg
, int pass
)
1097 TCGv tmp
= tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1102 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1104 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1105 tcg_temp_free_i32(var
);
1108 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1110 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1113 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1115 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1126 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1128 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1131 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1134 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1136 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1139 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1142 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1144 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1151 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1156 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1159 static inline TCGv
iwmmxt_load_creg(int reg
)
1161 TCGv var
= tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1166 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1168 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1169 tcg_temp_free_i32(var
);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1174 iwmmxt_store_reg(cpu_M0
, rn
);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1179 iwmmxt_load_reg(cpu_M0
, rn
);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1184 iwmmxt_load_reg(cpu_V1
, rn
);
1185 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1190 iwmmxt_load_reg(cpu_V1
, rn
);
1191 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1196 iwmmxt_load_reg(cpu_V1
, rn
);
1197 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1236 IWMMXT_OP_ENV_SIZE(unpackl
)
1237 IWMMXT_OP_ENV_SIZE(unpackh
)
1239 IWMMXT_OP_ENV1(unpacklub
)
1240 IWMMXT_OP_ENV1(unpackluw
)
1241 IWMMXT_OP_ENV1(unpacklul
)
1242 IWMMXT_OP_ENV1(unpackhub
)
1243 IWMMXT_OP_ENV1(unpackhuw
)
1244 IWMMXT_OP_ENV1(unpackhul
)
1245 IWMMXT_OP_ENV1(unpacklsb
)
1246 IWMMXT_OP_ENV1(unpacklsw
)
1247 IWMMXT_OP_ENV1(unpacklsl
)
1248 IWMMXT_OP_ENV1(unpackhsb
)
1249 IWMMXT_OP_ENV1(unpackhsw
)
1250 IWMMXT_OP_ENV1(unpackhsl
)
1252 IWMMXT_OP_ENV_SIZE(cmpeq
)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1254 IWMMXT_OP_ENV_SIZE(cmpgts
)
1256 IWMMXT_OP_ENV_SIZE(mins
)
1257 IWMMXT_OP_ENV_SIZE(minu
)
1258 IWMMXT_OP_ENV_SIZE(maxs
)
1259 IWMMXT_OP_ENV_SIZE(maxu
)
1261 IWMMXT_OP_ENV_SIZE(subn
)
1262 IWMMXT_OP_ENV_SIZE(addn
)
1263 IWMMXT_OP_ENV_SIZE(subu
)
1264 IWMMXT_OP_ENV_SIZE(addu
)
1265 IWMMXT_OP_ENV_SIZE(subs
)
1266 IWMMXT_OP_ENV_SIZE(adds
)
1268 IWMMXT_OP_ENV(avgb0
)
1269 IWMMXT_OP_ENV(avgb1
)
1270 IWMMXT_OP_ENV(avgw0
)
1271 IWMMXT_OP_ENV(avgw1
)
1275 IWMMXT_OP_ENV(packuw
)
1276 IWMMXT_OP_ENV(packul
)
1277 IWMMXT_OP_ENV(packuq
)
1278 IWMMXT_OP_ENV(packsw
)
1279 IWMMXT_OP_ENV(packsl
)
1280 IWMMXT_OP_ENV(packsq
)
1282 static void gen_op_iwmmxt_set_mup(void)
1285 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1286 tcg_gen_ori_i32(tmp
, tmp
, 2);
1287 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1290 static void gen_op_iwmmxt_set_cup(void)
1293 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1294 tcg_gen_ori_i32(tmp
, tmp
, 1);
1295 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp
= tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1302 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1307 iwmmxt_load_reg(cpu_V1
, rn
);
1308 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1309 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1312 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1318 rd
= (insn
>> 16) & 0xf;
1319 tmp
= load_reg(s
, rd
);
1321 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1322 if (insn
& (1 << 24)) {
1324 if (insn
& (1 << 23))
1325 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1327 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1328 tcg_gen_mov_i32(dest
, tmp
);
1329 if (insn
& (1 << 21))
1330 store_reg(s
, rd
, tmp
);
1332 tcg_temp_free_i32(tmp
);
1333 } else if (insn
& (1 << 21)) {
1335 tcg_gen_mov_i32(dest
, tmp
);
1336 if (insn
& (1 << 23))
1337 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1339 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1340 store_reg(s
, rd
, tmp
);
1341 } else if (!(insn
& (1 << 23)))
1346 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1348 int rd
= (insn
>> 0) & 0xf;
1351 if (insn
& (1 << 8)) {
1352 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1355 tmp
= iwmmxt_load_creg(rd
);
1358 tmp
= tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0
, rd
);
1360 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1362 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1363 tcg_gen_mov_i32(dest
, tmp
);
1364 tcg_temp_free_i32(tmp
);
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1373 int rdhi
, rdlo
, rd0
, rd1
, i
;
1375 TCGv tmp
, tmp2
, tmp3
;
1377 if ((insn
& 0x0e000e00) == 0x0c000000) {
1378 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1380 rdlo
= (insn
>> 12) & 0xf;
1381 rdhi
= (insn
>> 16) & 0xf;
1382 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0
, wrd
);
1384 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1385 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1389 iwmmxt_store_reg(cpu_V0
, wrd
);
1390 gen_op_iwmmxt_set_mup();
1395 wrd
= (insn
>> 12) & 0xf;
1396 addr
= tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1398 tcg_temp_free_i32(addr
);
1401 if (insn
& ARM_CP_RW_BIT
) {
1402 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1403 tmp
= tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1405 iwmmxt_store_creg(wrd
, tmp
);
1408 if (insn
& (1 << 8)) {
1409 if (insn
& (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1412 } else { /* WLDRW wRd */
1413 tmp
= gen_ld32(addr
, IS_USER(s
));
1416 if (insn
& (1 << 22)) { /* WLDRH */
1417 tmp
= gen_ld16u(addr
, IS_USER(s
));
1418 } else { /* WLDRB */
1419 tmp
= gen_ld8u(addr
, IS_USER(s
));
1423 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1424 tcg_temp_free_i32(tmp
);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1429 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1430 tmp
= iwmmxt_load_creg(wrd
);
1431 gen_st32(tmp
, addr
, IS_USER(s
));
1433 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1434 tmp
= tcg_temp_new_i32();
1435 if (insn
& (1 << 8)) {
1436 if (insn
& (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp
);
1438 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1441 gen_st32(tmp
, addr
, IS_USER(s
));
1444 if (insn
& (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1446 gen_st16(tmp
, addr
, IS_USER(s
));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1449 gen_st8(tmp
, addr
, IS_USER(s
));
1454 tcg_temp_free_i32(addr
);
1458 if ((insn
& 0x0f000000) != 0x0e000000)
1461 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd
= (insn
>> 12) & 0xf;
1464 rd0
= (insn
>> 0) & 0xf;
1465 rd1
= (insn
>> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1473 case 0x011: /* TMCR */
1476 rd
= (insn
>> 12) & 0xf;
1477 wrd
= (insn
>> 16) & 0xf;
1479 case ARM_IWMMXT_wCID
:
1480 case ARM_IWMMXT_wCASF
:
1482 case ARM_IWMMXT_wCon
:
1483 gen_op_iwmmxt_set_cup();
1485 case ARM_IWMMXT_wCSSF
:
1486 tmp
= iwmmxt_load_creg(wrd
);
1487 tmp2
= load_reg(s
, rd
);
1488 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1489 tcg_temp_free_i32(tmp2
);
1490 iwmmxt_store_creg(wrd
, tmp
);
1492 case ARM_IWMMXT_wCGR0
:
1493 case ARM_IWMMXT_wCGR1
:
1494 case ARM_IWMMXT_wCGR2
:
1495 case ARM_IWMMXT_wCGR3
:
1496 gen_op_iwmmxt_set_cup();
1497 tmp
= load_reg(s
, rd
);
1498 iwmmxt_store_creg(wrd
, tmp
);
1504 case 0x100: /* WXOR */
1505 wrd
= (insn
>> 12) & 0xf;
1506 rd0
= (insn
>> 0) & 0xf;
1507 rd1
= (insn
>> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1515 case 0x111: /* TMRC */
1518 rd
= (insn
>> 12) & 0xf;
1519 wrd
= (insn
>> 16) & 0xf;
1520 tmp
= iwmmxt_load_creg(wrd
);
1521 store_reg(s
, rd
, tmp
);
1523 case 0x300: /* WANDN */
1524 wrd
= (insn
>> 12) & 0xf;
1525 rd0
= (insn
>> 0) & 0xf;
1526 rd1
= (insn
>> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1528 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x200: /* WAND */
1536 wrd
= (insn
>> 12) & 0xf;
1537 rd0
= (insn
>> 0) & 0xf;
1538 rd1
= (insn
>> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 0) & 0xf;
1549 rd1
= (insn
>> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 if (insn
& (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1556 gen_op_iwmmxt_set_mup();
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd
= (insn
>> 12) & 0xf;
1560 rd0
= (insn
>> 16) & 0xf;
1561 rd1
= (insn
>> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1563 switch ((insn
>> 22) & 3) {
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd
= (insn
>> 12) & 0xf;
1582 rd0
= (insn
>> 16) & 0xf;
1583 rd1
= (insn
>> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1585 switch ((insn
>> 22) & 3) {
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd
= (insn
>> 12) & 0xf;
1604 rd0
= (insn
>> 16) & 0xf;
1605 rd1
= (insn
>> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1607 if (insn
& (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1611 if (!(insn
& (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd
= (insn
>> 12) & 0xf;
1618 rd0
= (insn
>> 16) & 0xf;
1619 rd1
= (insn
>> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1621 if (insn
& (1 << 21)) {
1622 if (insn
& (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1627 if (insn
& (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1633 gen_op_iwmmxt_set_mup();
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd
= (insn
>> 12) & 0xf;
1637 rd0
= (insn
>> 16) & 0xf;
1638 rd1
= (insn
>> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1640 if (insn
& (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1644 if (!(insn
& (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1
, wrd
);
1646 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1649 gen_op_iwmmxt_set_mup();
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd
= (insn
>> 12) & 0xf;
1653 rd0
= (insn
>> 16) & 0xf;
1654 rd1
= (insn
>> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1656 switch ((insn
>> 22) & 3) {
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd
= (insn
>> 12) & 0xf;
1675 rd0
= (insn
>> 16) & 0xf;
1676 rd1
= (insn
>> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1678 if (insn
& (1 << 22)) {
1679 if (insn
& (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1684 if (insn
& (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd
= (insn
>> 12) & 0xf;
1695 rd0
= (insn
>> 16) & 0xf;
1696 rd1
= (insn
>> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1698 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1699 tcg_gen_andi_i32(tmp
, tmp
, 7);
1700 iwmmxt_load_reg(cpu_V1
, rd1
);
1701 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1702 tcg_temp_free_i32(tmp
);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1704 gen_op_iwmmxt_set_mup();
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn
>> 6) & 3) == 3)
1709 rd
= (insn
>> 12) & 0xf;
1710 wrd
= (insn
>> 16) & 0xf;
1711 tmp
= load_reg(s
, rd
);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1713 switch ((insn
>> 6) & 3) {
1715 tmp2
= tcg_const_i32(0xff);
1716 tmp3
= tcg_const_i32((insn
& 7) << 3);
1719 tmp2
= tcg_const_i32(0xffff);
1720 tmp3
= tcg_const_i32((insn
& 3) << 4);
1723 tmp2
= tcg_const_i32(0xffffffff);
1724 tmp3
= tcg_const_i32((insn
& 1) << 5);
1730 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1731 tcg_temp_free(tmp3
);
1732 tcg_temp_free(tmp2
);
1733 tcg_temp_free_i32(tmp
);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1735 gen_op_iwmmxt_set_mup();
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd
= (insn
>> 12) & 0xf;
1739 wrd
= (insn
>> 16) & 0xf;
1740 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1742 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1743 tmp
= tcg_temp_new_i32();
1744 switch ((insn
>> 22) & 3) {
1746 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1749 tcg_gen_ext8s_i32(tmp
, tmp
);
1751 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1755 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1758 tcg_gen_ext16s_i32(tmp
, tmp
);
1760 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1764 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1768 store_reg(s
, rd
, tmp
);
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1773 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1774 switch ((insn
>> 22) & 3) {
1776 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1779 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1782 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1785 tcg_gen_shli_i32(tmp
, tmp
, 28);
1787 tcg_temp_free_i32(tmp
);
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn
>> 6) & 3) == 3)
1792 rd
= (insn
>> 12) & 0xf;
1793 wrd
= (insn
>> 16) & 0xf;
1794 tmp
= load_reg(s
, rd
);
1795 switch ((insn
>> 6) & 3) {
1797 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1800 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1803 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1806 tcg_temp_free_i32(tmp
);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1808 gen_op_iwmmxt_set_mup();
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1813 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1814 tmp2
= tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2
, tmp
);
1816 switch ((insn
>> 22) & 3) {
1818 for (i
= 0; i
< 7; i
++) {
1819 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1820 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1824 for (i
= 0; i
< 3; i
++) {
1825 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1826 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1830 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1831 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1835 tcg_temp_free_i32(tmp2
);
1836 tcg_temp_free_i32(tmp
);
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd
= (insn
>> 12) & 0xf;
1840 rd0
= (insn
>> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1842 switch ((insn
>> 22) & 3) {
1844 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1847 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1850 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1856 gen_op_iwmmxt_set_mup();
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1861 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1862 tmp2
= tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2
, tmp
);
1864 switch ((insn
>> 22) & 3) {
1866 for (i
= 0; i
< 7; i
++) {
1867 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1868 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1872 for (i
= 0; i
< 3; i
++) {
1873 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1874 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1878 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1879 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1883 tcg_temp_free_i32(tmp2
);
1884 tcg_temp_free_i32(tmp
);
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd
= (insn
>> 12) & 0xf;
1888 rd0
= (insn
>> 16) & 0xf;
1889 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1892 tmp
= tcg_temp_new_i32();
1893 switch ((insn
>> 22) & 3) {
1895 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1898 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1901 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1904 store_reg(s
, rd
, tmp
);
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd
= (insn
>> 12) & 0xf;
1909 rd0
= (insn
>> 16) & 0xf;
1910 rd1
= (insn
>> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1912 switch ((insn
>> 22) & 3) {
1914 if (insn
& (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1920 if (insn
& (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1926 if (insn
& (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1934 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd
= (insn
>> 12) & 0xf;
1941 rd0
= (insn
>> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1943 switch ((insn
>> 22) & 3) {
1945 if (insn
& (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1948 gen_op_iwmmxt_unpacklub_M0();
1951 if (insn
& (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1954 gen_op_iwmmxt_unpackluw_M0();
1957 if (insn
& (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1960 gen_op_iwmmxt_unpacklul_M0();
1965 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd
= (insn
>> 12) & 0xf;
1972 rd0
= (insn
>> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1974 switch ((insn
>> 22) & 3) {
1976 if (insn
& (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1979 gen_op_iwmmxt_unpackhub_M0();
1982 if (insn
& (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1985 gen_op_iwmmxt_unpackhuw_M0();
1988 if (insn
& (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1991 gen_op_iwmmxt_unpackhul_M0();
1996 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn
>> 22) & 3) == 0)
2004 wrd
= (insn
>> 12) & 0xf;
2005 rd0
= (insn
>> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 tmp
= tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2009 tcg_temp_free_i32(tmp
);
2012 switch ((insn
>> 22) & 3) {
2014 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2017 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2020 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2023 tcg_temp_free_i32(tmp
);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn
>> 22) & 3) == 0)
2032 wrd
= (insn
>> 12) & 0xf;
2033 rd0
= (insn
>> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2035 tmp
= tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2037 tcg_temp_free_i32(tmp
);
2040 switch ((insn
>> 22) & 3) {
2042 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2045 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2048 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2051 tcg_temp_free_i32(tmp
);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn
>> 22) & 3) == 0)
2060 wrd
= (insn
>> 12) & 0xf;
2061 rd0
= (insn
>> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 tmp
= tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2065 tcg_temp_free_i32(tmp
);
2068 switch ((insn
>> 22) & 3) {
2070 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2073 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2076 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2079 tcg_temp_free_i32(tmp
);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn
>> 22) & 3) == 0)
2088 wrd
= (insn
>> 12) & 0xf;
2089 rd0
= (insn
>> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2091 tmp
= tcg_temp_new_i32();
2092 switch ((insn
>> 22) & 3) {
2094 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2095 tcg_temp_free_i32(tmp
);
2098 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2101 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2102 tcg_temp_free_i32(tmp
);
2105 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2108 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2109 tcg_temp_free_i32(tmp
);
2112 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2115 tcg_temp_free_i32(tmp
);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd
= (insn
>> 12) & 0xf;
2123 rd0
= (insn
>> 16) & 0xf;
2124 rd1
= (insn
>> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2126 switch ((insn
>> 22) & 3) {
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2131 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2134 if (insn
& (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2140 if (insn
& (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2143 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd
= (insn
>> 12) & 0xf;
2154 rd0
= (insn
>> 16) & 0xf;
2155 rd1
= (insn
>> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2157 switch ((insn
>> 22) & 3) {
2159 if (insn
& (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2165 if (insn
& (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2171 if (insn
& (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd
= (insn
>> 12) & 0xf;
2185 rd0
= (insn
>> 16) & 0xf;
2186 rd1
= (insn
>> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2188 tmp
= tcg_const_i32((insn
>> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1
, rd1
);
2190 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2193 gen_op_iwmmxt_set_mup();
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd
= (insn
>> 12) & 0xf;
2200 rd0
= (insn
>> 16) & 0xf;
2201 rd1
= (insn
>> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2203 switch ((insn
>> 20) & 0xf) {
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2208 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2226 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd
= (insn
>> 12) & 0xf;
2243 rd0
= (insn
>> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2245 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 rd1
= (insn
>> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2260 switch ((insn
>> 20) & 0xf) {
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2265 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2283 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2301 wrd
= (insn
>> 12) & 0xf;
2302 rd0
= (insn
>> 16) & 0xf;
2303 rd1
= (insn
>> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2305 switch ((insn
>> 22) & 3) {
2307 if (insn
& (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2313 if (insn
& (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2316 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2319 if (insn
& (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd
= (insn
>> 5) & 0xf;
2334 rd0
= (insn
>> 12) & 0xf;
2335 rd1
= (insn
>> 0) & 0xf;
2336 if (rd0
== 0xf || rd1
== 0xf)
2338 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2339 tmp
= load_reg(s
, rd0
);
2340 tmp2
= load_reg(s
, rd1
);
2341 switch ((insn
>> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn
& (1 << 16))
2350 tcg_gen_shri_i32(tmp
, tmp
, 16);
2351 if (insn
& (1 << 17))
2352 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2356 tcg_temp_free_i32(tmp2
);
2357 tcg_temp_free_i32(tmp
);
2360 tcg_temp_free_i32(tmp2
);
2361 tcg_temp_free_i32(tmp
);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2363 gen_op_iwmmxt_set_mup();
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2376 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2379 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0
= (insn
>> 12) & 0xf;
2383 acc
= (insn
>> 5) & 7;
2388 tmp
= load_reg(s
, rd0
);
2389 tmp2
= load_reg(s
, rd1
);
2390 switch ((insn
>> 16) & 0xf) {
2392 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn
& (1 << 16))
2402 tcg_gen_shri_i32(tmp
, tmp
, 16);
2403 if (insn
& (1 << 17))
2404 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2410 tcg_temp_free_i32(tmp2
);
2411 tcg_temp_free_i32(tmp
);
2413 gen_op_iwmmxt_movq_wRn_M0(acc
);
2417 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi
= (insn
>> 16) & 0xf;
2420 rdlo
= (insn
>> 12) & 0xf;
2426 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0
, acc
);
2428 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2429 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2431 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2433 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2434 iwmmxt_store_reg(cpu_V0
, acc
);
2442 /* Disassemble system coprocessor instruction. Return nonzero if
2443 instruction is not defined. */
2444 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2447 uint32_t rd
= (insn
>> 12) & 0xf;
2448 uint32_t cp
= (insn
>> 8) & 0xf;
2453 if (insn
& ARM_CP_RW_BIT
) {
2454 if (!env
->cp
[cp
].cp_read
)
2456 gen_set_pc_im(s
->pc
);
2457 tmp
= tcg_temp_new_i32();
2458 tmp2
= tcg_const_i32(insn
);
2459 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2460 tcg_temp_free(tmp2
);
2461 store_reg(s
, rd
, tmp
);
2463 if (!env
->cp
[cp
].cp_write
)
2465 gen_set_pc_im(s
->pc
);
2466 tmp
= load_reg(s
, rd
);
2467 tmp2
= tcg_const_i32(insn
);
2468 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2469 tcg_temp_free(tmp2
);
2470 tcg_temp_free_i32(tmp
);
2475 static int cp15_user_ok(CPUState
*env
, uint32_t insn
)
2477 int cpn
= (insn
>> 16) & 0xf;
2478 int cpm
= insn
& 0xf;
2479 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2481 if (arm_feature(env
, ARM_FEATURE_V7
) && cpn
== 9) {
2482 /* Performance monitor registers fall into three categories:
2483 * (a) always UNDEF in usermode
2484 * (b) UNDEF only if PMUSERENR.EN is 0
2485 * (c) always read OK and UNDEF on write (PMUSERENR only)
2487 if ((cpm
== 12 && (op
< 6)) ||
2488 (cpm
== 13 && (op
< 3))) {
2489 return env
->cp15
.c9_pmuserenr
;
2490 } else if (cpm
== 14 && op
== 0 && (insn
& ARM_CP_RW_BIT
)) {
2491 /* PMUSERENR, read only */
2497 if (cpn
== 13 && cpm
== 0) {
2499 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2503 /* ISB, DSB, DMB. */
2504 if ((cpm
== 5 && op
== 4)
2505 || (cpm
== 10 && (op
== 4 || op
== 5)))
2511 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2514 int cpn
= (insn
>> 16) & 0xf;
2515 int cpm
= insn
& 0xf;
2516 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2518 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2521 if (!(cpn
== 13 && cpm
== 0))
2524 if (insn
& ARM_CP_RW_BIT
) {
2527 tmp
= load_cpu_field(cp15
.c13_tls1
);
2530 tmp
= load_cpu_field(cp15
.c13_tls2
);
2533 tmp
= load_cpu_field(cp15
.c13_tls3
);
2538 store_reg(s
, rd
, tmp
);
2541 tmp
= load_reg(s
, rd
);
2544 store_cpu_field(tmp
, cp15
.c13_tls1
);
2547 store_cpu_field(tmp
, cp15
.c13_tls2
);
2550 store_cpu_field(tmp
, cp15
.c13_tls3
);
2553 tcg_temp_free_i32(tmp
);
2560 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2561 instruction is not defined. */
2562 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2567 /* M profile cores use memory mapped registers instead of cp15. */
2568 if (arm_feature(env
, ARM_FEATURE_M
))
2571 if ((insn
& (1 << 25)) == 0) {
2572 if (insn
& (1 << 20)) {
2576 /* mcrr. Used for block cache operations, so implement as no-op. */
2579 if ((insn
& (1 << 4)) == 0) {
2583 if (IS_USER(s
) && !cp15_user_ok(env
, insn
)) {
2587 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2588 * instructions rather than a separate instruction.
2590 if ((insn
& 0x0fff0fff) == 0x0e070f90) {
2591 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2592 * In v7, this must NOP.
2594 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2595 /* Wait for interrupt. */
2596 gen_set_pc_im(s
->pc
);
2597 s
->is_jmp
= DISAS_WFI
;
2602 if ((insn
& 0x0fff0fff) == 0x0e070f58) {
2603 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2604 * so this is slightly over-broad.
2606 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
2607 /* Wait for interrupt. */
2608 gen_set_pc_im(s
->pc
);
2609 s
->is_jmp
= DISAS_WFI
;
2612 /* Otherwise fall through to handle via helper function.
2613 * In particular, on v7 and some v6 cores this is one of
2614 * the VA-PA registers.
2618 rd
= (insn
>> 12) & 0xf;
2620 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2623 tmp2
= tcg_const_i32(insn
);
2624 if (insn
& ARM_CP_RW_BIT
) {
2625 tmp
= tcg_temp_new_i32();
2626 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2627 /* If the destination register is r15 then sets condition codes. */
2629 store_reg(s
, rd
, tmp
);
2631 tcg_temp_free_i32(tmp
);
2633 tmp
= load_reg(s
, rd
);
2634 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2635 tcg_temp_free_i32(tmp
);
2636 /* Normally we would always end the TB here, but Linux
2637 * arch/arm/mach-pxa/sleep.S expects two instructions following
2638 * an MMU enable to execute from cache. Imitate this behaviour. */
2639 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2640 (insn
& 0x0fff0fff) != 0x0e010f10)
2643 tcg_temp_free_i32(tmp2
);
2647 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2648 #define VFP_SREG(insn, bigbit, smallbit) \
2649 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2650 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2651 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2652 reg = (((insn) >> (bigbit)) & 0x0f) \
2653 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2655 if (insn & (1 << (smallbit))) \
2657 reg = ((insn) >> (bigbit)) & 0x0f; \
2660 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2661 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2662 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2663 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2664 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2665 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2667 /* Move between integer and VFP cores. */
2668 static TCGv
gen_vfp_mrs(void)
2670 TCGv tmp
= tcg_temp_new_i32();
2671 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2675 static void gen_vfp_msr(TCGv tmp
)
2677 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2678 tcg_temp_free_i32(tmp
);
2681 static void gen_neon_dup_u8(TCGv var
, int shift
)
2683 TCGv tmp
= tcg_temp_new_i32();
2685 tcg_gen_shri_i32(var
, var
, shift
);
2686 tcg_gen_ext8u_i32(var
, var
);
2687 tcg_gen_shli_i32(tmp
, var
, 8);
2688 tcg_gen_or_i32(var
, var
, tmp
);
2689 tcg_gen_shli_i32(tmp
, var
, 16);
2690 tcg_gen_or_i32(var
, var
, tmp
);
2691 tcg_temp_free_i32(tmp
);
2694 static void gen_neon_dup_low16(TCGv var
)
2696 TCGv tmp
= tcg_temp_new_i32();
2697 tcg_gen_ext16u_i32(var
, var
);
2698 tcg_gen_shli_i32(tmp
, var
, 16);
2699 tcg_gen_or_i32(var
, var
, tmp
);
2700 tcg_temp_free_i32(tmp
);
2703 static void gen_neon_dup_high16(TCGv var
)
2705 TCGv tmp
= tcg_temp_new_i32();
2706 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2707 tcg_gen_shri_i32(tmp
, var
, 16);
2708 tcg_gen_or_i32(var
, var
, tmp
);
2709 tcg_temp_free_i32(tmp
);
2712 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2714 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2718 tmp
= gen_ld8u(addr
, IS_USER(s
));
2719 gen_neon_dup_u8(tmp
, 0);
2722 tmp
= gen_ld16u(addr
, IS_USER(s
));
2723 gen_neon_dup_low16(tmp
);
2726 tmp
= gen_ld32(addr
, IS_USER(s
));
2728 default: /* Avoid compiler warnings. */
2734 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2735 (ie. an undefined instruction). */
2736 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2738 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2744 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2747 if (!s
->vfp_enabled
) {
2748 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2749 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2751 rn
= (insn
>> 16) & 0xf;
2752 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2753 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2756 dp
= ((insn
& 0xf00) == 0xb00);
2757 switch ((insn
>> 24) & 0xf) {
2759 if (insn
& (1 << 4)) {
2760 /* single register transfer */
2761 rd
= (insn
>> 12) & 0xf;
2766 VFP_DREG_N(rn
, insn
);
2769 if (insn
& 0x00c00060
2770 && !arm_feature(env
, ARM_FEATURE_NEON
))
2773 pass
= (insn
>> 21) & 1;
2774 if (insn
& (1 << 22)) {
2776 offset
= ((insn
>> 5) & 3) * 8;
2777 } else if (insn
& (1 << 5)) {
2779 offset
= (insn
& (1 << 6)) ? 16 : 0;
2784 if (insn
& ARM_CP_RW_BIT
) {
2786 tmp
= neon_load_reg(rn
, pass
);
2790 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2791 if (insn
& (1 << 23))
2797 if (insn
& (1 << 23)) {
2799 tcg_gen_shri_i32(tmp
, tmp
, 16);
2805 tcg_gen_sari_i32(tmp
, tmp
, 16);
2814 store_reg(s
, rd
, tmp
);
2817 tmp
= load_reg(s
, rd
);
2818 if (insn
& (1 << 23)) {
2821 gen_neon_dup_u8(tmp
, 0);
2822 } else if (size
== 1) {
2823 gen_neon_dup_low16(tmp
);
2825 for (n
= 0; n
<= pass
* 2; n
++) {
2826 tmp2
= tcg_temp_new_i32();
2827 tcg_gen_mov_i32(tmp2
, tmp
);
2828 neon_store_reg(rn
, n
, tmp2
);
2830 neon_store_reg(rn
, n
, tmp
);
2835 tmp2
= neon_load_reg(rn
, pass
);
2836 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2837 tcg_temp_free_i32(tmp2
);
2840 tmp2
= neon_load_reg(rn
, pass
);
2841 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2842 tcg_temp_free_i32(tmp2
);
2847 neon_store_reg(rn
, pass
, tmp
);
2851 if ((insn
& 0x6f) != 0x00)
2853 rn
= VFP_SREG_N(insn
);
2854 if (insn
& ARM_CP_RW_BIT
) {
2856 if (insn
& (1 << 21)) {
2857 /* system register */
2862 /* VFP2 allows access to FSID from userspace.
2863 VFP3 restricts all id registers to privileged
2866 && arm_feature(env
, ARM_FEATURE_VFP3
))
2868 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2873 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2875 case ARM_VFP_FPINST
:
2876 case ARM_VFP_FPINST2
:
2877 /* Not present in VFP3. */
2879 || arm_feature(env
, ARM_FEATURE_VFP3
))
2881 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2885 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2886 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2888 tmp
= tcg_temp_new_i32();
2889 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2895 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2897 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2903 gen_mov_F0_vreg(0, rn
);
2904 tmp
= gen_vfp_mrs();
2907 /* Set the 4 flag bits in the CPSR. */
2909 tcg_temp_free_i32(tmp
);
2911 store_reg(s
, rd
, tmp
);
2915 tmp
= load_reg(s
, rd
);
2916 if (insn
& (1 << 21)) {
2918 /* system register */
2923 /* Writes are ignored. */
2926 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2927 tcg_temp_free_i32(tmp
);
2933 /* TODO: VFP subarchitecture support.
2934 * For now, keep the EN bit only */
2935 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2936 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2939 case ARM_VFP_FPINST
:
2940 case ARM_VFP_FPINST2
:
2941 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2948 gen_mov_vreg_F0(0, rn
);
2953 /* data processing */
2954 /* The opcode is in bits 23, 21, 20 and 6. */
2955 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2959 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2961 /* rn is register number */
2962 VFP_DREG_N(rn
, insn
);
2965 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2966 /* Integer or single precision destination. */
2967 rd
= VFP_SREG_D(insn
);
2969 VFP_DREG_D(rd
, insn
);
2972 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2973 /* VCVT from int is always from S reg regardless of dp bit.
2974 * VCVT with immediate frac_bits has same format as SREG_M
2976 rm
= VFP_SREG_M(insn
);
2978 VFP_DREG_M(rm
, insn
);
2981 rn
= VFP_SREG_N(insn
);
2982 if (op
== 15 && rn
== 15) {
2983 /* Double precision destination. */
2984 VFP_DREG_D(rd
, insn
);
2986 rd
= VFP_SREG_D(insn
);
2988 /* NB that we implicitly rely on the encoding for the frac_bits
2989 * in VCVT of fixed to float being the same as that of an SREG_M
2991 rm
= VFP_SREG_M(insn
);
2994 veclen
= s
->vec_len
;
2995 if (op
== 15 && rn
> 3)
2998 /* Shut up compiler warnings. */
3009 /* Figure out what type of vector operation this is. */
3010 if ((rd
& bank_mask
) == 0) {
3015 delta_d
= (s
->vec_stride
>> 1) + 1;
3017 delta_d
= s
->vec_stride
+ 1;
3019 if ((rm
& bank_mask
) == 0) {
3020 /* mixed scalar/vector */
3029 /* Load the initial operands. */
3034 /* Integer source */
3035 gen_mov_F0_vreg(0, rm
);
3040 gen_mov_F0_vreg(dp
, rd
);
3041 gen_mov_F1_vreg(dp
, rm
);
3045 /* Compare with zero */
3046 gen_mov_F0_vreg(dp
, rd
);
3057 /* Source and destination the same. */
3058 gen_mov_F0_vreg(dp
, rd
);
3061 /* One source operand. */
3062 gen_mov_F0_vreg(dp
, rm
);
3066 /* Two source operands. */
3067 gen_mov_F0_vreg(dp
, rn
);
3068 gen_mov_F1_vreg(dp
, rm
);
3072 /* Perform the calculation. */
3074 case 0: /* VMLA: fd + (fn * fm) */
3075 /* Note that order of inputs to the add matters for NaNs */
3077 gen_mov_F0_vreg(dp
, rd
);
3080 case 1: /* VMLS: fd + -(fn * fm) */
3083 gen_mov_F0_vreg(dp
, rd
);
3086 case 2: /* VNMLS: -fd + (fn * fm) */
3087 /* Note that it isn't valid to replace (-A + B) with (B - A)
3088 * or similar plausible looking simplifications
3089 * because this will give wrong results for NaNs.
3092 gen_mov_F0_vreg(dp
, rd
);
3096 case 3: /* VNMLA: -fd + -(fn * fm) */
3099 gen_mov_F0_vreg(dp
, rd
);
3103 case 4: /* mul: fn * fm */
3106 case 5: /* nmul: -(fn * fm) */
3110 case 6: /* add: fn + fm */
3113 case 7: /* sub: fn - fm */
3116 case 8: /* div: fn / fm */
3119 case 14: /* fconst */
3120 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3123 n
= (insn
<< 12) & 0x80000000;
3124 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3131 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3138 tcg_gen_movi_i32(cpu_F0s
, n
);
3141 case 15: /* extension space */
3155 case 4: /* vcvtb.f32.f16 */
3156 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3158 tmp
= gen_vfp_mrs();
3159 tcg_gen_ext16u_i32(tmp
, tmp
);
3160 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3161 tcg_temp_free_i32(tmp
);
3163 case 5: /* vcvtt.f32.f16 */
3164 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3166 tmp
= gen_vfp_mrs();
3167 tcg_gen_shri_i32(tmp
, tmp
, 16);
3168 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3169 tcg_temp_free_i32(tmp
);
3171 case 6: /* vcvtb.f16.f32 */
3172 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3174 tmp
= tcg_temp_new_i32();
3175 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3176 gen_mov_F0_vreg(0, rd
);
3177 tmp2
= gen_vfp_mrs();
3178 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3179 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3180 tcg_temp_free_i32(tmp2
);
3183 case 7: /* vcvtt.f16.f32 */
3184 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
))
3186 tmp
= tcg_temp_new_i32();
3187 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3188 tcg_gen_shli_i32(tmp
, tmp
, 16);
3189 gen_mov_F0_vreg(0, rd
);
3190 tmp2
= gen_vfp_mrs();
3191 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3192 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3193 tcg_temp_free_i32(tmp2
);
3205 case 11: /* cmpez */
3209 case 15: /* single<->double conversion */
3211 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3213 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3215 case 16: /* fuito */
3216 gen_vfp_uito(dp
, 0);
3218 case 17: /* fsito */
3219 gen_vfp_sito(dp
, 0);
3221 case 20: /* fshto */
3222 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3224 gen_vfp_shto(dp
, 16 - rm
, 0);
3226 case 21: /* fslto */
3227 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3229 gen_vfp_slto(dp
, 32 - rm
, 0);
3231 case 22: /* fuhto */
3232 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3234 gen_vfp_uhto(dp
, 16 - rm
, 0);
3236 case 23: /* fulto */
3237 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3239 gen_vfp_ulto(dp
, 32 - rm
, 0);
3241 case 24: /* ftoui */
3242 gen_vfp_toui(dp
, 0);
3244 case 25: /* ftouiz */
3245 gen_vfp_touiz(dp
, 0);
3247 case 26: /* ftosi */
3248 gen_vfp_tosi(dp
, 0);
3250 case 27: /* ftosiz */
3251 gen_vfp_tosiz(dp
, 0);
3253 case 28: /* ftosh */
3254 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3256 gen_vfp_tosh(dp
, 16 - rm
, 0);
3258 case 29: /* ftosl */
3259 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3261 gen_vfp_tosl(dp
, 32 - rm
, 0);
3263 case 30: /* ftouh */
3264 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3266 gen_vfp_touh(dp
, 16 - rm
, 0);
3268 case 31: /* ftoul */
3269 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3271 gen_vfp_toul(dp
, 32 - rm
, 0);
3273 default: /* undefined */
3274 printf ("rn:%d\n", rn
);
3278 default: /* undefined */
3279 printf ("op:%d\n", op
);
3283 /* Write back the result. */
3284 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3285 ; /* Comparison, do nothing. */
3286 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3287 /* VCVT double to int: always integer result. */
3288 gen_mov_vreg_F0(0, rd
);
3289 else if (op
== 15 && rn
== 15)
3291 gen_mov_vreg_F0(!dp
, rd
);
3293 gen_mov_vreg_F0(dp
, rd
);
3295 /* break out of the loop if we have finished */
3299 if (op
== 15 && delta_m
== 0) {
3300 /* single source one-many */
3302 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3304 gen_mov_vreg_F0(dp
, rd
);
3308 /* Setup the next operands. */
3310 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3314 /* One source operand. */
3315 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3317 gen_mov_F0_vreg(dp
, rm
);
3319 /* Two source operands. */
3320 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3322 gen_mov_F0_vreg(dp
, rn
);
3324 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3326 gen_mov_F1_vreg(dp
, rm
);
3334 if ((insn
& 0x03e00000) == 0x00400000) {
3335 /* two-register transfer */
3336 rn
= (insn
>> 16) & 0xf;
3337 rd
= (insn
>> 12) & 0xf;
3339 VFP_DREG_M(rm
, insn
);
3341 rm
= VFP_SREG_M(insn
);
3344 if (insn
& ARM_CP_RW_BIT
) {
3347 gen_mov_F0_vreg(0, rm
* 2);
3348 tmp
= gen_vfp_mrs();
3349 store_reg(s
, rd
, tmp
);
3350 gen_mov_F0_vreg(0, rm
* 2 + 1);
3351 tmp
= gen_vfp_mrs();
3352 store_reg(s
, rn
, tmp
);
3354 gen_mov_F0_vreg(0, rm
);
3355 tmp
= gen_vfp_mrs();
3356 store_reg(s
, rd
, tmp
);
3357 gen_mov_F0_vreg(0, rm
+ 1);
3358 tmp
= gen_vfp_mrs();
3359 store_reg(s
, rn
, tmp
);
3364 tmp
= load_reg(s
, rd
);
3366 gen_mov_vreg_F0(0, rm
* 2);
3367 tmp
= load_reg(s
, rn
);
3369 gen_mov_vreg_F0(0, rm
* 2 + 1);
3371 tmp
= load_reg(s
, rd
);
3373 gen_mov_vreg_F0(0, rm
);
3374 tmp
= load_reg(s
, rn
);
3376 gen_mov_vreg_F0(0, rm
+ 1);
3381 rn
= (insn
>> 16) & 0xf;
3383 VFP_DREG_D(rd
, insn
);
3385 rd
= VFP_SREG_D(insn
);
3386 if (s
->thumb
&& rn
== 15) {
3387 addr
= tcg_temp_new_i32();
3388 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3390 addr
= load_reg(s
, rn
);
3392 if ((insn
& 0x01200000) == 0x01000000) {
3393 /* Single load/store */
3394 offset
= (insn
& 0xff) << 2;
3395 if ((insn
& (1 << 23)) == 0)
3397 tcg_gen_addi_i32(addr
, addr
, offset
);
3398 if (insn
& (1 << 20)) {
3399 gen_vfp_ld(s
, dp
, addr
);
3400 gen_mov_vreg_F0(dp
, rd
);
3402 gen_mov_F0_vreg(dp
, rd
);
3403 gen_vfp_st(s
, dp
, addr
);
3405 tcg_temp_free_i32(addr
);
3407 /* load/store multiple */
3409 n
= (insn
>> 1) & 0x7f;
3413 if (insn
& (1 << 24)) /* pre-decrement */
3414 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3420 for (i
= 0; i
< n
; i
++) {
3421 if (insn
& ARM_CP_RW_BIT
) {
3423 gen_vfp_ld(s
, dp
, addr
);
3424 gen_mov_vreg_F0(dp
, rd
+ i
);
3427 gen_mov_F0_vreg(dp
, rd
+ i
);
3428 gen_vfp_st(s
, dp
, addr
);
3430 tcg_gen_addi_i32(addr
, addr
, offset
);
3432 if (insn
& (1 << 21)) {
3434 if (insn
& (1 << 24))
3435 offset
= -offset
* n
;
3436 else if (dp
&& (insn
& 1))
3442 tcg_gen_addi_i32(addr
, addr
, offset
);
3443 store_reg(s
, rn
, addr
);
3445 tcg_temp_free_i32(addr
);
3451 /* Should never happen. */
3457 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3459 TranslationBlock
*tb
;
3462 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3464 gen_set_pc_im(dest
);
3465 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3467 gen_set_pc_im(dest
);
3472 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3474 if (unlikely(s
->singlestep_enabled
)) {
3475 /* An indirect jump so that we still trigger the debug exception. */
3480 gen_goto_tb(s
, 0, dest
);
3481 s
->is_jmp
= DISAS_TB_JUMP
;
3485 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3488 tcg_gen_sari_i32(t0
, t0
, 16);
3492 tcg_gen_sari_i32(t1
, t1
, 16);
3495 tcg_gen_mul_i32(t0
, t0
, t1
);
3498 /* Return the mask of PSR bits set by a MSR instruction. */
3499 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3503 if (flags
& (1 << 0))
3505 if (flags
& (1 << 1))
3507 if (flags
& (1 << 2))
3509 if (flags
& (1 << 3))
3512 /* Mask out undefined bits. */
3513 mask
&= ~CPSR_RESERVED
;
3514 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3516 if (!arm_feature(env
, ARM_FEATURE_V5
))
3517 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3518 if (!arm_feature(env
, ARM_FEATURE_V6
))
3519 mask
&= ~(CPSR_E
| CPSR_GE
);
3520 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3522 /* Mask out execution state bits. */
3525 /* Mask out privileged bits. */
3531 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3532 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3536 /* ??? This is also undefined in system mode. */
3540 tmp
= load_cpu_field(spsr
);
3541 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3542 tcg_gen_andi_i32(t0
, t0
, mask
);
3543 tcg_gen_or_i32(tmp
, tmp
, t0
);
3544 store_cpu_field(tmp
, spsr
);
3546 gen_set_cpsr(t0
, mask
);
3548 tcg_temp_free_i32(t0
);
3553 /* Returns nonzero if access to the PSR is not permitted. */
3554 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3557 tmp
= tcg_temp_new_i32();
3558 tcg_gen_movi_i32(tmp
, val
);
3559 return gen_set_psr(s
, mask
, spsr
, tmp
);
3562 /* Generate an old-style exception return. Marks pc as dead. */
3563 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3566 store_reg(s
, 15, pc
);
3567 tmp
= load_cpu_field(spsr
);
3568 gen_set_cpsr(tmp
, 0xffffffff);
3569 tcg_temp_free_i32(tmp
);
3570 s
->is_jmp
= DISAS_UPDATE
;
3573 /* Generate a v6 exception return. Marks both values as dead. */
3574 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3576 gen_set_cpsr(cpsr
, 0xffffffff);
3577 tcg_temp_free_i32(cpsr
);
3578 store_reg(s
, 15, pc
);
3579 s
->is_jmp
= DISAS_UPDATE
;
3583 gen_set_condexec (DisasContext
*s
)
3585 if (s
->condexec_mask
) {
3586 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3587 TCGv tmp
= tcg_temp_new_i32();
3588 tcg_gen_movi_i32(tmp
, val
);
3589 store_cpu_field(tmp
, condexec_bits
);
3593 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3595 gen_set_condexec(s
);
3596 gen_set_pc_im(s
->pc
- offset
);
3597 gen_exception(excp
);
3598 s
->is_jmp
= DISAS_JUMP
;
3601 static void gen_nop_hint(DisasContext
*s
, int val
)
3605 gen_set_pc_im(s
->pc
);
3606 s
->is_jmp
= DISAS_WFI
;
3610 /* TODO: Implement SEV and WFE. May help SMP performance. */
3616 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3618 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3621 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3622 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3623 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3628 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3631 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3632 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3633 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3638 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3639 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3640 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3641 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3642 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3644 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3645 switch ((size << 1) | u) { \
3647 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3650 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3653 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3656 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3659 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3662 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3664 default: return 1; \
3667 #define GEN_NEON_INTEGER_OP(name) do { \
3668 switch ((size << 1) | u) { \
3670 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3673 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3676 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3679 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3682 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3685 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3687 default: return 1; \
3690 static TCGv
neon_load_scratch(int scratch
)
3692 TCGv tmp
= tcg_temp_new_i32();
3693 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3697 static void neon_store_scratch(int scratch
, TCGv var
)
3699 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3700 tcg_temp_free_i32(var
);
3703 static inline TCGv
neon_get_scalar(int size
, int reg
)
3707 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3709 gen_neon_dup_high16(tmp
);
3711 gen_neon_dup_low16(tmp
);
3714 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3719 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3722 if (!q
&& size
== 2) {
3725 tmp
= tcg_const_i32(rd
);
3726 tmp2
= tcg_const_i32(rm
);
3730 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3733 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3736 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3744 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3747 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3753 tcg_temp_free_i32(tmp
);
3754 tcg_temp_free_i32(tmp2
);
3758 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3761 if (!q
&& size
== 2) {
3764 tmp
= tcg_const_i32(rd
);
3765 tmp2
= tcg_const_i32(rm
);
3769 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3772 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3775 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3783 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3786 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3792 tcg_temp_free_i32(tmp
);
3793 tcg_temp_free_i32(tmp2
);
3797 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3801 rd
= tcg_temp_new_i32();
3802 tmp
= tcg_temp_new_i32();
3804 tcg_gen_shli_i32(rd
, t0
, 8);
3805 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3806 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3807 tcg_gen_or_i32(rd
, rd
, tmp
);
3809 tcg_gen_shri_i32(t1
, t1
, 8);
3810 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3811 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3812 tcg_gen_or_i32(t1
, t1
, tmp
);
3813 tcg_gen_mov_i32(t0
, rd
);
3815 tcg_temp_free_i32(tmp
);
3816 tcg_temp_free_i32(rd
);
3819 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3823 rd
= tcg_temp_new_i32();
3824 tmp
= tcg_temp_new_i32();
3826 tcg_gen_shli_i32(rd
, t0
, 16);
3827 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3828 tcg_gen_or_i32(rd
, rd
, tmp
);
3829 tcg_gen_shri_i32(t1
, t1
, 16);
3830 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3831 tcg_gen_or_i32(t1
, t1
, tmp
);
3832 tcg_gen_mov_i32(t0
, rd
);
3834 tcg_temp_free_i32(tmp
);
3835 tcg_temp_free_i32(rd
);
3843 } neon_ls_element_type
[11] = {
3857 /* Translate a NEON load/store element instruction. Return nonzero if the
3858 instruction is invalid. */
3859 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3878 if (!s
->vfp_enabled
)
3880 VFP_DREG_D(rd
, insn
);
3881 rn
= (insn
>> 16) & 0xf;
3883 load
= (insn
& (1 << 21)) != 0;
3884 if ((insn
& (1 << 23)) == 0) {
3885 /* Load store all elements. */
3886 op
= (insn
>> 8) & 0xf;
3887 size
= (insn
>> 6) & 3;
3890 /* Catch UNDEF cases for bad values of align field */
3893 if (((insn
>> 5) & 1) == 1) {
3898 if (((insn
>> 4) & 3) == 3) {
3905 nregs
= neon_ls_element_type
[op
].nregs
;
3906 interleave
= neon_ls_element_type
[op
].interleave
;
3907 spacing
= neon_ls_element_type
[op
].spacing
;
3908 if (size
== 3 && (interleave
| spacing
) != 1)
3910 addr
= tcg_temp_new_i32();
3911 load_reg_var(s
, addr
, rn
);
3912 stride
= (1 << size
) * interleave
;
3913 for (reg
= 0; reg
< nregs
; reg
++) {
3914 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3915 load_reg_var(s
, addr
, rn
);
3916 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3917 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3918 load_reg_var(s
, addr
, rn
);
3919 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3923 tmp64
= gen_ld64(addr
, IS_USER(s
));
3924 neon_store_reg64(tmp64
, rd
);
3925 tcg_temp_free_i64(tmp64
);
3927 tmp64
= tcg_temp_new_i64();
3928 neon_load_reg64(tmp64
, rd
);
3929 gen_st64(tmp64
, addr
, IS_USER(s
));
3931 tcg_gen_addi_i32(addr
, addr
, stride
);
3933 for (pass
= 0; pass
< 2; pass
++) {
3936 tmp
= gen_ld32(addr
, IS_USER(s
));
3937 neon_store_reg(rd
, pass
, tmp
);
3939 tmp
= neon_load_reg(rd
, pass
);
3940 gen_st32(tmp
, addr
, IS_USER(s
));
3942 tcg_gen_addi_i32(addr
, addr
, stride
);
3943 } else if (size
== 1) {
3945 tmp
= gen_ld16u(addr
, IS_USER(s
));
3946 tcg_gen_addi_i32(addr
, addr
, stride
);
3947 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3948 tcg_gen_addi_i32(addr
, addr
, stride
);
3949 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3950 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3951 tcg_temp_free_i32(tmp2
);
3952 neon_store_reg(rd
, pass
, tmp
);
3954 tmp
= neon_load_reg(rd
, pass
);
3955 tmp2
= tcg_temp_new_i32();
3956 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3957 gen_st16(tmp
, addr
, IS_USER(s
));
3958 tcg_gen_addi_i32(addr
, addr
, stride
);
3959 gen_st16(tmp2
, addr
, IS_USER(s
));
3960 tcg_gen_addi_i32(addr
, addr
, stride
);
3962 } else /* size == 0 */ {
3965 for (n
= 0; n
< 4; n
++) {
3966 tmp
= gen_ld8u(addr
, IS_USER(s
));
3967 tcg_gen_addi_i32(addr
, addr
, stride
);
3971 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3972 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3973 tcg_temp_free_i32(tmp
);
3976 neon_store_reg(rd
, pass
, tmp2
);
3978 tmp2
= neon_load_reg(rd
, pass
);
3979 for (n
= 0; n
< 4; n
++) {
3980 tmp
= tcg_temp_new_i32();
3982 tcg_gen_mov_i32(tmp
, tmp2
);
3984 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3986 gen_st8(tmp
, addr
, IS_USER(s
));
3987 tcg_gen_addi_i32(addr
, addr
, stride
);
3989 tcg_temp_free_i32(tmp2
);
3996 tcg_temp_free_i32(addr
);
3999 size
= (insn
>> 10) & 3;
4001 /* Load single element to all lanes. */
4002 int a
= (insn
>> 4) & 1;
4006 size
= (insn
>> 6) & 3;
4007 nregs
= ((insn
>> 8) & 3) + 1;
4010 if (nregs
!= 4 || a
== 0) {
4013 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4016 if (nregs
== 1 && a
== 1 && size
== 0) {
4019 if (nregs
== 3 && a
== 1) {
4022 addr
= tcg_temp_new_i32();
4023 load_reg_var(s
, addr
, rn
);
4025 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4026 tmp
= gen_load_and_replicate(s
, addr
, size
);
4027 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4028 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4029 if (insn
& (1 << 5)) {
4030 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4031 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4033 tcg_temp_free_i32(tmp
);
4035 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4036 stride
= (insn
& (1 << 5)) ? 2 : 1;
4037 for (reg
= 0; reg
< nregs
; reg
++) {
4038 tmp
= gen_load_and_replicate(s
, addr
, size
);
4039 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4040 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4041 tcg_temp_free_i32(tmp
);
4042 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4046 tcg_temp_free_i32(addr
);
4047 stride
= (1 << size
) * nregs
;
4049 /* Single element. */
4050 int idx
= (insn
>> 4) & 0xf;
4051 pass
= (insn
>> 7) & 1;
4054 shift
= ((insn
>> 5) & 3) * 8;
4058 shift
= ((insn
>> 6) & 1) * 16;
4059 stride
= (insn
& (1 << 5)) ? 2 : 1;
4063 stride
= (insn
& (1 << 6)) ? 2 : 1;
4068 nregs
= ((insn
>> 8) & 3) + 1;
4069 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4072 if (((idx
& (1 << size
)) != 0) ||
4073 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4078 if ((idx
& 1) != 0) {
4083 if (size
== 2 && (idx
& 2) != 0) {
4088 if ((size
== 2) && ((idx
& 3) == 3)) {
4095 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4096 /* Attempts to write off the end of the register file
4097 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4098 * the neon_load_reg() would write off the end of the array.
4102 addr
= tcg_temp_new_i32();
4103 load_reg_var(s
, addr
, rn
);
4104 for (reg
= 0; reg
< nregs
; reg
++) {
4108 tmp
= gen_ld8u(addr
, IS_USER(s
));
4111 tmp
= gen_ld16u(addr
, IS_USER(s
));
4114 tmp
= gen_ld32(addr
, IS_USER(s
));
4116 default: /* Avoid compiler warnings. */
4120 tmp2
= neon_load_reg(rd
, pass
);
4121 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4122 tcg_temp_free_i32(tmp2
);
4124 neon_store_reg(rd
, pass
, tmp
);
4125 } else { /* Store */
4126 tmp
= neon_load_reg(rd
, pass
);
4128 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4131 gen_st8(tmp
, addr
, IS_USER(s
));
4134 gen_st16(tmp
, addr
, IS_USER(s
));
4137 gen_st32(tmp
, addr
, IS_USER(s
));
4142 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4144 tcg_temp_free_i32(addr
);
4145 stride
= nregs
* (1 << size
);
4151 base
= load_reg(s
, rn
);
4153 tcg_gen_addi_i32(base
, base
, stride
);
4156 index
= load_reg(s
, rm
);
4157 tcg_gen_add_i32(base
, base
, index
);
4158 tcg_temp_free_i32(index
);
4160 store_reg(s
, rn
, base
);
4165 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4166 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4168 tcg_gen_and_i32(t
, t
, c
);
4169 tcg_gen_andc_i32(f
, f
, c
);
4170 tcg_gen_or_i32(dest
, t
, f
);
4173 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4176 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4177 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4178 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4183 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4186 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4187 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4188 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4193 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4196 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4197 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4198 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4203 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4206 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4207 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4208 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4213 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4219 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4220 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4225 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4226 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4233 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4234 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4239 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4240 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4247 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4251 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4252 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4253 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4258 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4259 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4260 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4264 tcg_temp_free_i32(src
);
4267 static inline void gen_neon_addl(int size
)
4270 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4271 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4272 case 2: tcg_gen_add_i64(CPU_V001
); break;
4277 static inline void gen_neon_subl(int size
)
4280 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4281 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4282 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4287 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4290 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4291 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4292 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4297 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4300 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4301 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4306 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4310 switch ((size
<< 1) | u
) {
4311 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4312 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4313 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4314 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4316 tmp
= gen_muls_i64_i32(a
, b
);
4317 tcg_gen_mov_i64(dest
, tmp
);
4318 tcg_temp_free_i64(tmp
);
4321 tmp
= gen_mulu_i64_i32(a
, b
);
4322 tcg_gen_mov_i64(dest
, tmp
);
4323 tcg_temp_free_i64(tmp
);
4328 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4329 Don't forget to clean them now. */
4331 tcg_temp_free_i32(a
);
4332 tcg_temp_free_i32(b
);
4336 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4340 gen_neon_unarrow_sats(size
, dest
, src
);
4342 gen_neon_narrow(size
, dest
, src
);
4346 gen_neon_narrow_satu(size
, dest
, src
);
4348 gen_neon_narrow_sats(size
, dest
, src
);
4353 /* Symbolic constants for op fields for Neon 3-register same-length.
4354 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4357 #define NEON_3R_VHADD 0
4358 #define NEON_3R_VQADD 1
4359 #define NEON_3R_VRHADD 2
4360 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4361 #define NEON_3R_VHSUB 4
4362 #define NEON_3R_VQSUB 5
4363 #define NEON_3R_VCGT 6
4364 #define NEON_3R_VCGE 7
4365 #define NEON_3R_VSHL 8
4366 #define NEON_3R_VQSHL 9
4367 #define NEON_3R_VRSHL 10
4368 #define NEON_3R_VQRSHL 11
4369 #define NEON_3R_VMAX 12
4370 #define NEON_3R_VMIN 13
4371 #define NEON_3R_VABD 14
4372 #define NEON_3R_VABA 15
4373 #define NEON_3R_VADD_VSUB 16
4374 #define NEON_3R_VTST_VCEQ 17
4375 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4376 #define NEON_3R_VMUL 19
4377 #define NEON_3R_VPMAX 20
4378 #define NEON_3R_VPMIN 21
4379 #define NEON_3R_VQDMULH_VQRDMULH 22
4380 #define NEON_3R_VPADD 23
4381 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4382 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4383 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4384 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4385 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4386 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4388 static const uint8_t neon_3r_sizes
[] = {
4389 [NEON_3R_VHADD
] = 0x7,
4390 [NEON_3R_VQADD
] = 0xf,
4391 [NEON_3R_VRHADD
] = 0x7,
4392 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4393 [NEON_3R_VHSUB
] = 0x7,
4394 [NEON_3R_VQSUB
] = 0xf,
4395 [NEON_3R_VCGT
] = 0x7,
4396 [NEON_3R_VCGE
] = 0x7,
4397 [NEON_3R_VSHL
] = 0xf,
4398 [NEON_3R_VQSHL
] = 0xf,
4399 [NEON_3R_VRSHL
] = 0xf,
4400 [NEON_3R_VQRSHL
] = 0xf,
4401 [NEON_3R_VMAX
] = 0x7,
4402 [NEON_3R_VMIN
] = 0x7,
4403 [NEON_3R_VABD
] = 0x7,
4404 [NEON_3R_VABA
] = 0x7,
4405 [NEON_3R_VADD_VSUB
] = 0xf,
4406 [NEON_3R_VTST_VCEQ
] = 0x7,
4407 [NEON_3R_VML
] = 0x7,
4408 [NEON_3R_VMUL
] = 0x7,
4409 [NEON_3R_VPMAX
] = 0x7,
4410 [NEON_3R_VPMIN
] = 0x7,
4411 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4412 [NEON_3R_VPADD
] = 0x7,
4413 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4414 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4415 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4416 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4417 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4418 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4421 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4422 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4425 #define NEON_2RM_VREV64 0
4426 #define NEON_2RM_VREV32 1
4427 #define NEON_2RM_VREV16 2
4428 #define NEON_2RM_VPADDL 4
4429 #define NEON_2RM_VPADDL_U 5
4430 #define NEON_2RM_VCLS 8
4431 #define NEON_2RM_VCLZ 9
4432 #define NEON_2RM_VCNT 10
4433 #define NEON_2RM_VMVN 11
4434 #define NEON_2RM_VPADAL 12
4435 #define NEON_2RM_VPADAL_U 13
4436 #define NEON_2RM_VQABS 14
4437 #define NEON_2RM_VQNEG 15
4438 #define NEON_2RM_VCGT0 16
4439 #define NEON_2RM_VCGE0 17
4440 #define NEON_2RM_VCEQ0 18
4441 #define NEON_2RM_VCLE0 19
4442 #define NEON_2RM_VCLT0 20
4443 #define NEON_2RM_VABS 22
4444 #define NEON_2RM_VNEG 23
4445 #define NEON_2RM_VCGT0_F 24
4446 #define NEON_2RM_VCGE0_F 25
4447 #define NEON_2RM_VCEQ0_F 26
4448 #define NEON_2RM_VCLE0_F 27
4449 #define NEON_2RM_VCLT0_F 28
4450 #define NEON_2RM_VABS_F 30
4451 #define NEON_2RM_VNEG_F 31
4452 #define NEON_2RM_VSWP 32
4453 #define NEON_2RM_VTRN 33
4454 #define NEON_2RM_VUZP 34
4455 #define NEON_2RM_VZIP 35
4456 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4457 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4458 #define NEON_2RM_VSHLL 38
4459 #define NEON_2RM_VCVT_F16_F32 44
4460 #define NEON_2RM_VCVT_F32_F16 46
4461 #define NEON_2RM_VRECPE 56
4462 #define NEON_2RM_VRSQRTE 57
4463 #define NEON_2RM_VRECPE_F 58
4464 #define NEON_2RM_VRSQRTE_F 59
4465 #define NEON_2RM_VCVT_FS 60
4466 #define NEON_2RM_VCVT_FU 61
4467 #define NEON_2RM_VCVT_SF 62
4468 #define NEON_2RM_VCVT_UF 63
4470 static int neon_2rm_is_float_op(int op
)
4472 /* Return true if this neon 2reg-misc op is float-to-float */
4473 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4474 op
>= NEON_2RM_VRECPE_F
);
4477 /* Each entry in this array has bit n set if the insn allows
4478 * size value n (otherwise it will UNDEF). Since unallocated
4479 * op values will have no bits set they always UNDEF.
4481 static const uint8_t neon_2rm_sizes
[] = {
4482 [NEON_2RM_VREV64
] = 0x7,
4483 [NEON_2RM_VREV32
] = 0x3,
4484 [NEON_2RM_VREV16
] = 0x1,
4485 [NEON_2RM_VPADDL
] = 0x7,
4486 [NEON_2RM_VPADDL_U
] = 0x7,
4487 [NEON_2RM_VCLS
] = 0x7,
4488 [NEON_2RM_VCLZ
] = 0x7,
4489 [NEON_2RM_VCNT
] = 0x1,
4490 [NEON_2RM_VMVN
] = 0x1,
4491 [NEON_2RM_VPADAL
] = 0x7,
4492 [NEON_2RM_VPADAL_U
] = 0x7,
4493 [NEON_2RM_VQABS
] = 0x7,
4494 [NEON_2RM_VQNEG
] = 0x7,
4495 [NEON_2RM_VCGT0
] = 0x7,
4496 [NEON_2RM_VCGE0
] = 0x7,
4497 [NEON_2RM_VCEQ0
] = 0x7,
4498 [NEON_2RM_VCLE0
] = 0x7,
4499 [NEON_2RM_VCLT0
] = 0x7,
4500 [NEON_2RM_VABS
] = 0x7,
4501 [NEON_2RM_VNEG
] = 0x7,
4502 [NEON_2RM_VCGT0_F
] = 0x4,
4503 [NEON_2RM_VCGE0_F
] = 0x4,
4504 [NEON_2RM_VCEQ0_F
] = 0x4,
4505 [NEON_2RM_VCLE0_F
] = 0x4,
4506 [NEON_2RM_VCLT0_F
] = 0x4,
4507 [NEON_2RM_VABS_F
] = 0x4,
4508 [NEON_2RM_VNEG_F
] = 0x4,
4509 [NEON_2RM_VSWP
] = 0x1,
4510 [NEON_2RM_VTRN
] = 0x7,
4511 [NEON_2RM_VUZP
] = 0x7,
4512 [NEON_2RM_VZIP
] = 0x7,
4513 [NEON_2RM_VMOVN
] = 0x7,
4514 [NEON_2RM_VQMOVN
] = 0x7,
4515 [NEON_2RM_VSHLL
] = 0x7,
4516 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4517 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4518 [NEON_2RM_VRECPE
] = 0x4,
4519 [NEON_2RM_VRSQRTE
] = 0x4,
4520 [NEON_2RM_VRECPE_F
] = 0x4,
4521 [NEON_2RM_VRSQRTE_F
] = 0x4,
4522 [NEON_2RM_VCVT_FS
] = 0x4,
4523 [NEON_2RM_VCVT_FU
] = 0x4,
4524 [NEON_2RM_VCVT_SF
] = 0x4,
4525 [NEON_2RM_VCVT_UF
] = 0x4,
4528 /* Translate a NEON data processing instruction. Return nonzero if the
4529 instruction is invalid.
4530 We process data in a mixture of 32-bit and 64-bit chunks.
4531 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4533 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4545 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4548 if (!s
->vfp_enabled
)
4550 q
= (insn
& (1 << 6)) != 0;
4551 u
= (insn
>> 24) & 1;
4552 VFP_DREG_D(rd
, insn
);
4553 VFP_DREG_N(rn
, insn
);
4554 VFP_DREG_M(rm
, insn
);
4555 size
= (insn
>> 20) & 3;
4556 if ((insn
& (1 << 23)) == 0) {
4557 /* Three register same length. */
4558 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4559 /* Catch invalid op and bad size combinations: UNDEF */
4560 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4563 /* All insns of this form UNDEF for either this condition or the
4564 * superset of cases "Q==1"; we catch the latter later.
4566 if (q
&& ((rd
| rn
| rm
) & 1)) {
4569 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4570 /* 64-bit element instructions. */
4571 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4572 neon_load_reg64(cpu_V0
, rn
+ pass
);
4573 neon_load_reg64(cpu_V1
, rm
+ pass
);
4577 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4580 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4586 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4589 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4595 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4597 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4602 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4605 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4611 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4613 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4616 case NEON_3R_VQRSHL
:
4618 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4621 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4625 case NEON_3R_VADD_VSUB
:
4627 tcg_gen_sub_i64(CPU_V001
);
4629 tcg_gen_add_i64(CPU_V001
);
4635 neon_store_reg64(cpu_V0
, rd
+ pass
);
4644 case NEON_3R_VQRSHL
:
4647 /* Shift instruction operands are reversed. */
4662 case NEON_3R_FLOAT_ARITH
:
4663 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4665 case NEON_3R_FLOAT_MINMAX
:
4666 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4668 case NEON_3R_FLOAT_CMP
:
4670 /* no encoding for U=0 C=1x */
4674 case NEON_3R_FLOAT_ACMP
:
4679 case NEON_3R_VRECPS_VRSQRTS
:
4685 if (u
&& (size
!= 0)) {
4686 /* UNDEF on invalid size for polynomial subcase */
4694 if (pairwise
&& q
) {
4695 /* All the pairwise insns UNDEF if Q is set */
4699 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4704 tmp
= neon_load_reg(rn
, 0);
4705 tmp2
= neon_load_reg(rn
, 1);
4707 tmp
= neon_load_reg(rm
, 0);
4708 tmp2
= neon_load_reg(rm
, 1);
4712 tmp
= neon_load_reg(rn
, pass
);
4713 tmp2
= neon_load_reg(rm
, pass
);
4717 GEN_NEON_INTEGER_OP(hadd
);
4720 GEN_NEON_INTEGER_OP_ENV(qadd
);
4722 case NEON_3R_VRHADD
:
4723 GEN_NEON_INTEGER_OP(rhadd
);
4725 case NEON_3R_LOGIC
: /* Logic ops. */
4726 switch ((u
<< 2) | size
) {
4728 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4731 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4734 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4737 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4740 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4743 tmp3
= neon_load_reg(rd
, pass
);
4744 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4745 tcg_temp_free_i32(tmp3
);
4748 tmp3
= neon_load_reg(rd
, pass
);
4749 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4750 tcg_temp_free_i32(tmp3
);
4753 tmp3
= neon_load_reg(rd
, pass
);
4754 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4755 tcg_temp_free_i32(tmp3
);
4760 GEN_NEON_INTEGER_OP(hsub
);
4763 GEN_NEON_INTEGER_OP_ENV(qsub
);
4766 GEN_NEON_INTEGER_OP(cgt
);
4769 GEN_NEON_INTEGER_OP(cge
);
4772 GEN_NEON_INTEGER_OP(shl
);
4775 GEN_NEON_INTEGER_OP_ENV(qshl
);
4778 GEN_NEON_INTEGER_OP(rshl
);
4780 case NEON_3R_VQRSHL
:
4781 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4784 GEN_NEON_INTEGER_OP(max
);
4787 GEN_NEON_INTEGER_OP(min
);
4790 GEN_NEON_INTEGER_OP(abd
);
4793 GEN_NEON_INTEGER_OP(abd
);
4794 tcg_temp_free_i32(tmp2
);
4795 tmp2
= neon_load_reg(rd
, pass
);
4796 gen_neon_add(size
, tmp
, tmp2
);
4798 case NEON_3R_VADD_VSUB
:
4799 if (!u
) { /* VADD */
4800 gen_neon_add(size
, tmp
, tmp2
);
4803 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4804 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4805 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4810 case NEON_3R_VTST_VCEQ
:
4811 if (!u
) { /* VTST */
4813 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4814 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4815 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4820 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4821 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4822 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4827 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4829 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4830 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4831 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4834 tcg_temp_free_i32(tmp2
);
4835 tmp2
= neon_load_reg(rd
, pass
);
4837 gen_neon_rsb(size
, tmp
, tmp2
);
4839 gen_neon_add(size
, tmp
, tmp2
);
4843 if (u
) { /* polynomial */
4844 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4845 } else { /* Integer */
4847 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4848 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4849 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4855 GEN_NEON_INTEGER_OP(pmax
);
4858 GEN_NEON_INTEGER_OP(pmin
);
4860 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4861 if (!u
) { /* VQDMULH */
4864 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4867 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4871 } else { /* VQRDMULH */
4874 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4877 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4885 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4886 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4887 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4891 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4893 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4894 switch ((u
<< 2) | size
) {
4897 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4900 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4903 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4908 tcg_temp_free_ptr(fpstatus
);
4911 case NEON_3R_FLOAT_MULTIPLY
:
4913 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4914 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4916 tcg_temp_free_i32(tmp2
);
4917 tmp2
= neon_load_reg(rd
, pass
);
4919 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4921 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4924 tcg_temp_free_ptr(fpstatus
);
4927 case NEON_3R_FLOAT_CMP
:
4929 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4931 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4934 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4936 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4939 tcg_temp_free_ptr(fpstatus
);
4942 case NEON_3R_FLOAT_ACMP
:
4944 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4946 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4948 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4950 tcg_temp_free_ptr(fpstatus
);
4953 case NEON_3R_FLOAT_MINMAX
:
4955 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4957 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4959 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4961 tcg_temp_free_ptr(fpstatus
);
4964 case NEON_3R_VRECPS_VRSQRTS
:
4966 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4968 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4973 tcg_temp_free_i32(tmp2
);
4975 /* Save the result. For elementwise operations we can put it
4976 straight into the destination register. For pairwise operations
4977 we have to be careful to avoid clobbering the source operands. */
4978 if (pairwise
&& rd
== rm
) {
4979 neon_store_scratch(pass
, tmp
);
4981 neon_store_reg(rd
, pass
, tmp
);
4985 if (pairwise
&& rd
== rm
) {
4986 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4987 tmp
= neon_load_scratch(pass
);
4988 neon_store_reg(rd
, pass
, tmp
);
4991 /* End of 3 register same size operations. */
4992 } else if (insn
& (1 << 4)) {
4993 if ((insn
& 0x00380080) != 0) {
4994 /* Two registers and shift. */
4995 op
= (insn
>> 8) & 0xf;
4996 if (insn
& (1 << 7)) {
5004 while ((insn
& (1 << (size
+ 19))) == 0)
5007 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5008 /* To avoid excessive dumplication of ops we implement shift
5009 by immediate using the variable shift operations. */
5011 /* Shift by immediate:
5012 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5013 if (q
&& ((rd
| rm
) & 1)) {
5016 if (!u
&& (op
== 4 || op
== 6)) {
5019 /* Right shifts are encoded as N - shift, where N is the
5020 element size in bits. */
5022 shift
= shift
- (1 << (size
+ 3));
5030 imm
= (uint8_t) shift
;
5035 imm
= (uint16_t) shift
;
5046 for (pass
= 0; pass
< count
; pass
++) {
5048 neon_load_reg64(cpu_V0
, rm
+ pass
);
5049 tcg_gen_movi_i64(cpu_V1
, imm
);
5054 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5056 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5061 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5063 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5066 case 5: /* VSHL, VSLI */
5067 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5069 case 6: /* VQSHLU */
5070 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5075 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5078 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5083 if (op
== 1 || op
== 3) {
5085 neon_load_reg64(cpu_V1
, rd
+ pass
);
5086 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5087 } else if (op
== 4 || (op
== 5 && u
)) {
5089 neon_load_reg64(cpu_V1
, rd
+ pass
);
5091 if (shift
< -63 || shift
> 63) {
5095 mask
= 0xffffffffffffffffull
>> -shift
;
5097 mask
= 0xffffffffffffffffull
<< shift
;
5100 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5101 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5103 neon_store_reg64(cpu_V0
, rd
+ pass
);
5104 } else { /* size < 3 */
5105 /* Operands in T0 and T1. */
5106 tmp
= neon_load_reg(rm
, pass
);
5107 tmp2
= tcg_temp_new_i32();
5108 tcg_gen_movi_i32(tmp2
, imm
);
5112 GEN_NEON_INTEGER_OP(shl
);
5116 GEN_NEON_INTEGER_OP(rshl
);
5119 case 5: /* VSHL, VSLI */
5121 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5122 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5123 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5127 case 6: /* VQSHLU */
5130 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5134 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5138 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5146 GEN_NEON_INTEGER_OP_ENV(qshl
);
5149 tcg_temp_free_i32(tmp2
);
5151 if (op
== 1 || op
== 3) {
5153 tmp2
= neon_load_reg(rd
, pass
);
5154 gen_neon_add(size
, tmp
, tmp2
);
5155 tcg_temp_free_i32(tmp2
);
5156 } else if (op
== 4 || (op
== 5 && u
)) {
5161 mask
= 0xff >> -shift
;
5163 mask
= (uint8_t)(0xff << shift
);
5169 mask
= 0xffff >> -shift
;
5171 mask
= (uint16_t)(0xffff << shift
);
5175 if (shift
< -31 || shift
> 31) {
5179 mask
= 0xffffffffu
>> -shift
;
5181 mask
= 0xffffffffu
<< shift
;
5187 tmp2
= neon_load_reg(rd
, pass
);
5188 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5189 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5190 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5191 tcg_temp_free_i32(tmp2
);
5193 neon_store_reg(rd
, pass
, tmp
);
5196 } else if (op
< 10) {
5197 /* Shift by immediate and narrow:
5198 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5199 int input_unsigned
= (op
== 8) ? !u
: u
;
5203 shift
= shift
- (1 << (size
+ 3));
5206 tmp64
= tcg_const_i64(shift
);
5207 neon_load_reg64(cpu_V0
, rm
);
5208 neon_load_reg64(cpu_V1
, rm
+ 1);
5209 for (pass
= 0; pass
< 2; pass
++) {
5217 if (input_unsigned
) {
5218 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5220 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5223 if (input_unsigned
) {
5224 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5226 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5229 tmp
= tcg_temp_new_i32();
5230 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5231 neon_store_reg(rd
, pass
, tmp
);
5233 tcg_temp_free_i64(tmp64
);
5236 imm
= (uint16_t)shift
;
5240 imm
= (uint32_t)shift
;
5242 tmp2
= tcg_const_i32(imm
);
5243 tmp4
= neon_load_reg(rm
+ 1, 0);
5244 tmp5
= neon_load_reg(rm
+ 1, 1);
5245 for (pass
= 0; pass
< 2; pass
++) {
5247 tmp
= neon_load_reg(rm
, 0);
5251 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5254 tmp3
= neon_load_reg(rm
, 1);
5258 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5260 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5261 tcg_temp_free_i32(tmp
);
5262 tcg_temp_free_i32(tmp3
);
5263 tmp
= tcg_temp_new_i32();
5264 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5265 neon_store_reg(rd
, pass
, tmp
);
5267 tcg_temp_free_i32(tmp2
);
5269 } else if (op
== 10) {
5271 if (q
|| (rd
& 1)) {
5274 tmp
= neon_load_reg(rm
, 0);
5275 tmp2
= neon_load_reg(rm
, 1);
5276 for (pass
= 0; pass
< 2; pass
++) {
5280 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5283 /* The shift is less than the width of the source
5284 type, so we can just shift the whole register. */
5285 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5286 /* Widen the result of shift: we need to clear
5287 * the potential overflow bits resulting from
5288 * left bits of the narrow input appearing as
5289 * right bits of left the neighbour narrow
5291 if (size
< 2 || !u
) {
5294 imm
= (0xffu
>> (8 - shift
));
5296 } else if (size
== 1) {
5297 imm
= 0xffff >> (16 - shift
);
5300 imm
= 0xffffffff >> (32 - shift
);
5303 imm64
= imm
| (((uint64_t)imm
) << 32);
5307 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5310 neon_store_reg64(cpu_V0
, rd
+ pass
);
5312 } else if (op
>= 14) {
5313 /* VCVT fixed-point. */
5314 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5317 /* We have already masked out the must-be-1 top bit of imm6,
5318 * hence this 32-shift where the ARM ARM has 64-imm6.
5321 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5322 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5325 gen_vfp_ulto(0, shift
, 1);
5327 gen_vfp_slto(0, shift
, 1);
5330 gen_vfp_toul(0, shift
, 1);
5332 gen_vfp_tosl(0, shift
, 1);
5334 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5339 } else { /* (insn & 0x00380080) == 0 */
5341 if (q
&& (rd
& 1)) {
5345 op
= (insn
>> 8) & 0xf;
5346 /* One register and immediate. */
5347 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5348 invert
= (insn
& (1 << 5)) != 0;
5349 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5350 * We choose to not special-case this and will behave as if a
5351 * valid constant encoding of 0 had been given.
5370 imm
= (imm
<< 8) | (imm
<< 24);
5373 imm
= (imm
<< 8) | 0xff;
5376 imm
= (imm
<< 16) | 0xffff;
5379 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5387 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5388 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5394 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5395 if (op
& 1 && op
< 12) {
5396 tmp
= neon_load_reg(rd
, pass
);
5398 /* The immediate value has already been inverted, so
5400 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5402 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5406 tmp
= tcg_temp_new_i32();
5407 if (op
== 14 && invert
) {
5411 for (n
= 0; n
< 4; n
++) {
5412 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5413 val
|= 0xff << (n
* 8);
5415 tcg_gen_movi_i32(tmp
, val
);
5417 tcg_gen_movi_i32(tmp
, imm
);
5420 neon_store_reg(rd
, pass
, tmp
);
5423 } else { /* (insn & 0x00800010 == 0x00800000) */
5425 op
= (insn
>> 8) & 0xf;
5426 if ((insn
& (1 << 6)) == 0) {
5427 /* Three registers of different lengths. */
5431 /* undefreq: bit 0 : UNDEF if size != 0
5432 * bit 1 : UNDEF if size == 0
5433 * bit 2 : UNDEF if U == 1
5434 * Note that [1:0] set implies 'always UNDEF'
5437 /* prewiden, src1_wide, src2_wide, undefreq */
5438 static const int neon_3reg_wide
[16][4] = {
5439 {1, 0, 0, 0}, /* VADDL */
5440 {1, 1, 0, 0}, /* VADDW */
5441 {1, 0, 0, 0}, /* VSUBL */
5442 {1, 1, 0, 0}, /* VSUBW */
5443 {0, 1, 1, 0}, /* VADDHN */
5444 {0, 0, 0, 0}, /* VABAL */
5445 {0, 1, 1, 0}, /* VSUBHN */
5446 {0, 0, 0, 0}, /* VABDL */
5447 {0, 0, 0, 0}, /* VMLAL */
5448 {0, 0, 0, 6}, /* VQDMLAL */
5449 {0, 0, 0, 0}, /* VMLSL */
5450 {0, 0, 0, 6}, /* VQDMLSL */
5451 {0, 0, 0, 0}, /* Integer VMULL */
5452 {0, 0, 0, 2}, /* VQDMULL */
5453 {0, 0, 0, 5}, /* Polynomial VMULL */
5454 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5457 prewiden
= neon_3reg_wide
[op
][0];
5458 src1_wide
= neon_3reg_wide
[op
][1];
5459 src2_wide
= neon_3reg_wide
[op
][2];
5460 undefreq
= neon_3reg_wide
[op
][3];
5462 if (((undefreq
& 1) && (size
!= 0)) ||
5463 ((undefreq
& 2) && (size
== 0)) ||
5464 ((undefreq
& 4) && u
)) {
5467 if ((src1_wide
&& (rn
& 1)) ||
5468 (src2_wide
&& (rm
& 1)) ||
5469 (!src2_wide
&& (rd
& 1))) {
5473 /* Avoid overlapping operands. Wide source operands are
5474 always aligned so will never overlap with wide
5475 destinations in problematic ways. */
5476 if (rd
== rm
&& !src2_wide
) {
5477 tmp
= neon_load_reg(rm
, 1);
5478 neon_store_scratch(2, tmp
);
5479 } else if (rd
== rn
&& !src1_wide
) {
5480 tmp
= neon_load_reg(rn
, 1);
5481 neon_store_scratch(2, tmp
);
5484 for (pass
= 0; pass
< 2; pass
++) {
5486 neon_load_reg64(cpu_V0
, rn
+ pass
);
5489 if (pass
== 1 && rd
== rn
) {
5490 tmp
= neon_load_scratch(2);
5492 tmp
= neon_load_reg(rn
, pass
);
5495 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5499 neon_load_reg64(cpu_V1
, rm
+ pass
);
5502 if (pass
== 1 && rd
== rm
) {
5503 tmp2
= neon_load_scratch(2);
5505 tmp2
= neon_load_reg(rm
, pass
);
5508 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5512 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5513 gen_neon_addl(size
);
5515 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5516 gen_neon_subl(size
);
5518 case 5: case 7: /* VABAL, VABDL */
5519 switch ((size
<< 1) | u
) {
5521 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5524 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5527 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5530 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5533 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5536 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5540 tcg_temp_free_i32(tmp2
);
5541 tcg_temp_free_i32(tmp
);
5543 case 8: case 9: case 10: case 11: case 12: case 13:
5544 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5545 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5547 case 14: /* Polynomial VMULL */
5548 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5549 tcg_temp_free_i32(tmp2
);
5550 tcg_temp_free_i32(tmp
);
5552 default: /* 15 is RESERVED: caught earlier */
5557 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5558 neon_store_reg64(cpu_V0
, rd
+ pass
);
5559 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5561 neon_load_reg64(cpu_V1
, rd
+ pass
);
5563 case 10: /* VMLSL */
5564 gen_neon_negl(cpu_V0
, size
);
5566 case 5: case 8: /* VABAL, VMLAL */
5567 gen_neon_addl(size
);
5569 case 9: case 11: /* VQDMLAL, VQDMLSL */
5570 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5572 gen_neon_negl(cpu_V0
, size
);
5574 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5579 neon_store_reg64(cpu_V0
, rd
+ pass
);
5580 } else if (op
== 4 || op
== 6) {
5581 /* Narrowing operation. */
5582 tmp
= tcg_temp_new_i32();
5586 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5589 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5592 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5593 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5600 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5603 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5606 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5607 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5608 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5616 neon_store_reg(rd
, 0, tmp3
);
5617 neon_store_reg(rd
, 1, tmp
);
5620 /* Write back the result. */
5621 neon_store_reg64(cpu_V0
, rd
+ pass
);
5625 /* Two registers and a scalar. NB that for ops of this form
5626 * the ARM ARM labels bit 24 as Q, but it is in our variable
5633 case 1: /* Float VMLA scalar */
5634 case 5: /* Floating point VMLS scalar */
5635 case 9: /* Floating point VMUL scalar */
5640 case 0: /* Integer VMLA scalar */
5641 case 4: /* Integer VMLS scalar */
5642 case 8: /* Integer VMUL scalar */
5643 case 12: /* VQDMULH scalar */
5644 case 13: /* VQRDMULH scalar */
5645 if (u
&& ((rd
| rn
) & 1)) {
5648 tmp
= neon_get_scalar(size
, rm
);
5649 neon_store_scratch(0, tmp
);
5650 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5651 tmp
= neon_load_scratch(0);
5652 tmp2
= neon_load_reg(rn
, pass
);
5655 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5657 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5659 } else if (op
== 13) {
5661 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5663 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5665 } else if (op
& 1) {
5666 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5667 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5668 tcg_temp_free_ptr(fpstatus
);
5671 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5672 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5673 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5677 tcg_temp_free_i32(tmp2
);
5680 tmp2
= neon_load_reg(rd
, pass
);
5683 gen_neon_add(size
, tmp
, tmp2
);
5687 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5688 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5689 tcg_temp_free_ptr(fpstatus
);
5693 gen_neon_rsb(size
, tmp
, tmp2
);
5697 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5698 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5699 tcg_temp_free_ptr(fpstatus
);
5705 tcg_temp_free_i32(tmp2
);
5707 neon_store_reg(rd
, pass
, tmp
);
5710 case 3: /* VQDMLAL scalar */
5711 case 7: /* VQDMLSL scalar */
5712 case 11: /* VQDMULL scalar */
5717 case 2: /* VMLAL sclar */
5718 case 6: /* VMLSL scalar */
5719 case 10: /* VMULL scalar */
5723 tmp2
= neon_get_scalar(size
, rm
);
5724 /* We need a copy of tmp2 because gen_neon_mull
5725 * deletes it during pass 0. */
5726 tmp4
= tcg_temp_new_i32();
5727 tcg_gen_mov_i32(tmp4
, tmp2
);
5728 tmp3
= neon_load_reg(rn
, 1);
5730 for (pass
= 0; pass
< 2; pass
++) {
5732 tmp
= neon_load_reg(rn
, 0);
5737 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5739 neon_load_reg64(cpu_V1
, rd
+ pass
);
5743 gen_neon_negl(cpu_V0
, size
);
5746 gen_neon_addl(size
);
5749 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5751 gen_neon_negl(cpu_V0
, size
);
5753 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5759 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5764 neon_store_reg64(cpu_V0
, rd
+ pass
);
5769 default: /* 14 and 15 are RESERVED */
5773 } else { /* size == 3 */
5776 imm
= (insn
>> 8) & 0xf;
5781 if (q
&& ((rd
| rn
| rm
) & 1)) {
5786 neon_load_reg64(cpu_V0
, rn
);
5788 neon_load_reg64(cpu_V1
, rn
+ 1);
5790 } else if (imm
== 8) {
5791 neon_load_reg64(cpu_V0
, rn
+ 1);
5793 neon_load_reg64(cpu_V1
, rm
);
5796 tmp64
= tcg_temp_new_i64();
5798 neon_load_reg64(cpu_V0
, rn
);
5799 neon_load_reg64(tmp64
, rn
+ 1);
5801 neon_load_reg64(cpu_V0
, rn
+ 1);
5802 neon_load_reg64(tmp64
, rm
);
5804 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5805 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5806 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5808 neon_load_reg64(cpu_V1
, rm
);
5810 neon_load_reg64(cpu_V1
, rm
+ 1);
5813 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5814 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5815 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5816 tcg_temp_free_i64(tmp64
);
5819 neon_load_reg64(cpu_V0
, rn
);
5820 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5821 neon_load_reg64(cpu_V1
, rm
);
5822 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5823 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5825 neon_store_reg64(cpu_V0
, rd
);
5827 neon_store_reg64(cpu_V1
, rd
+ 1);
5829 } else if ((insn
& (1 << 11)) == 0) {
5830 /* Two register misc. */
5831 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5832 size
= (insn
>> 18) & 3;
5833 /* UNDEF for unknown op values and bad op-size combinations */
5834 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5837 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5838 q
&& ((rm
| rd
) & 1)) {
5842 case NEON_2RM_VREV64
:
5843 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5844 tmp
= neon_load_reg(rm
, pass
* 2);
5845 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5847 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5848 case 1: gen_swap_half(tmp
); break;
5849 case 2: /* no-op */ break;
5852 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5854 neon_store_reg(rd
, pass
* 2, tmp2
);
5857 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5858 case 1: gen_swap_half(tmp2
); break;
5861 neon_store_reg(rd
, pass
* 2, tmp2
);
5865 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5866 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5867 for (pass
= 0; pass
< q
+ 1; pass
++) {
5868 tmp
= neon_load_reg(rm
, pass
* 2);
5869 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5870 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5871 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5873 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5874 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5875 case 2: tcg_gen_add_i64(CPU_V001
); break;
5878 if (op
>= NEON_2RM_VPADAL
) {
5880 neon_load_reg64(cpu_V1
, rd
+ pass
);
5881 gen_neon_addl(size
);
5883 neon_store_reg64(cpu_V0
, rd
+ pass
);
5889 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5890 tmp
= neon_load_reg(rm
, n
);
5891 tmp2
= neon_load_reg(rd
, n
+ 1);
5892 neon_store_reg(rm
, n
, tmp2
);
5893 neon_store_reg(rd
, n
+ 1, tmp
);
5900 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5905 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5909 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5910 /* also VQMOVUN; op field and mnemonics don't line up */
5915 for (pass
= 0; pass
< 2; pass
++) {
5916 neon_load_reg64(cpu_V0
, rm
+ pass
);
5917 tmp
= tcg_temp_new_i32();
5918 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5923 neon_store_reg(rd
, 0, tmp2
);
5924 neon_store_reg(rd
, 1, tmp
);
5928 case NEON_2RM_VSHLL
:
5929 if (q
|| (rd
& 1)) {
5932 tmp
= neon_load_reg(rm
, 0);
5933 tmp2
= neon_load_reg(rm
, 1);
5934 for (pass
= 0; pass
< 2; pass
++) {
5937 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5938 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5939 neon_store_reg64(cpu_V0
, rd
+ pass
);
5942 case NEON_2RM_VCVT_F16_F32
:
5943 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5947 tmp
= tcg_temp_new_i32();
5948 tmp2
= tcg_temp_new_i32();
5949 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5950 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5951 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5952 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5953 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5954 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5955 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5956 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5957 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5958 neon_store_reg(rd
, 0, tmp2
);
5959 tmp2
= tcg_temp_new_i32();
5960 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5961 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5962 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5963 neon_store_reg(rd
, 1, tmp2
);
5964 tcg_temp_free_i32(tmp
);
5966 case NEON_2RM_VCVT_F32_F16
:
5967 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5971 tmp3
= tcg_temp_new_i32();
5972 tmp
= neon_load_reg(rm
, 0);
5973 tmp2
= neon_load_reg(rm
, 1);
5974 tcg_gen_ext16u_i32(tmp3
, tmp
);
5975 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5976 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5977 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5978 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5979 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5980 tcg_temp_free_i32(tmp
);
5981 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5982 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5983 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5984 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5985 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5986 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5987 tcg_temp_free_i32(tmp2
);
5988 tcg_temp_free_i32(tmp3
);
5992 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5993 if (neon_2rm_is_float_op(op
)) {
5994 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5995 neon_reg_offset(rm
, pass
));
5998 tmp
= neon_load_reg(rm
, pass
);
6001 case NEON_2RM_VREV32
:
6003 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6004 case 1: gen_swap_half(tmp
); break;
6008 case NEON_2RM_VREV16
:
6013 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6014 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6015 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6021 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6022 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6023 case 2: gen_helper_clz(tmp
, tmp
); break;
6028 gen_helper_neon_cnt_u8(tmp
, tmp
);
6031 tcg_gen_not_i32(tmp
, tmp
);
6033 case NEON_2RM_VQABS
:
6036 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6039 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6042 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6047 case NEON_2RM_VQNEG
:
6050 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6053 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6056 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6061 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6062 tmp2
= tcg_const_i32(0);
6064 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6065 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6066 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6069 tcg_temp_free(tmp2
);
6070 if (op
== NEON_2RM_VCLE0
) {
6071 tcg_gen_not_i32(tmp
, tmp
);
6074 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6075 tmp2
= tcg_const_i32(0);
6077 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6078 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6079 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6082 tcg_temp_free(tmp2
);
6083 if (op
== NEON_2RM_VCLT0
) {
6084 tcg_gen_not_i32(tmp
, tmp
);
6087 case NEON_2RM_VCEQ0
:
6088 tmp2
= tcg_const_i32(0);
6090 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6091 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6092 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6095 tcg_temp_free(tmp2
);
6099 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6100 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6101 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6106 tmp2
= tcg_const_i32(0);
6107 gen_neon_rsb(size
, tmp
, tmp2
);
6108 tcg_temp_free(tmp2
);
6110 case NEON_2RM_VCGT0_F
:
6112 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6113 tmp2
= tcg_const_i32(0);
6114 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6115 tcg_temp_free(tmp2
);
6116 tcg_temp_free_ptr(fpstatus
);
6119 case NEON_2RM_VCGE0_F
:
6121 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6122 tmp2
= tcg_const_i32(0);
6123 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6124 tcg_temp_free(tmp2
);
6125 tcg_temp_free_ptr(fpstatus
);
6128 case NEON_2RM_VCEQ0_F
:
6130 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6131 tmp2
= tcg_const_i32(0);
6132 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6133 tcg_temp_free(tmp2
);
6134 tcg_temp_free_ptr(fpstatus
);
6137 case NEON_2RM_VCLE0_F
:
6139 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6140 tmp2
= tcg_const_i32(0);
6141 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6142 tcg_temp_free(tmp2
);
6143 tcg_temp_free_ptr(fpstatus
);
6146 case NEON_2RM_VCLT0_F
:
6148 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6149 tmp2
= tcg_const_i32(0);
6150 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6151 tcg_temp_free(tmp2
);
6152 tcg_temp_free_ptr(fpstatus
);
6155 case NEON_2RM_VABS_F
:
6158 case NEON_2RM_VNEG_F
:
6162 tmp2
= neon_load_reg(rd
, pass
);
6163 neon_store_reg(rm
, pass
, tmp2
);
6166 tmp2
= neon_load_reg(rd
, pass
);
6168 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6169 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6172 neon_store_reg(rm
, pass
, tmp2
);
6174 case NEON_2RM_VRECPE
:
6175 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6177 case NEON_2RM_VRSQRTE
:
6178 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6180 case NEON_2RM_VRECPE_F
:
6181 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6183 case NEON_2RM_VRSQRTE_F
:
6184 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6186 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6189 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6192 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6193 gen_vfp_tosiz(0, 1);
6195 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6196 gen_vfp_touiz(0, 1);
6199 /* Reserved op values were caught by the
6200 * neon_2rm_sizes[] check earlier.
6204 if (neon_2rm_is_float_op(op
)) {
6205 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6206 neon_reg_offset(rd
, pass
));
6208 neon_store_reg(rd
, pass
, tmp
);
6213 } else if ((insn
& (1 << 10)) == 0) {
6215 int n
= ((insn
>> 8) & 3) + 1;
6216 if ((rn
+ n
) > 32) {
6217 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6218 * helper function running off the end of the register file.
6223 if (insn
& (1 << 6)) {
6224 tmp
= neon_load_reg(rd
, 0);
6226 tmp
= tcg_temp_new_i32();
6227 tcg_gen_movi_i32(tmp
, 0);
6229 tmp2
= neon_load_reg(rm
, 0);
6230 tmp4
= tcg_const_i32(rn
);
6231 tmp5
= tcg_const_i32(n
);
6232 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6233 tcg_temp_free_i32(tmp
);
6234 if (insn
& (1 << 6)) {
6235 tmp
= neon_load_reg(rd
, 1);
6237 tmp
= tcg_temp_new_i32();
6238 tcg_gen_movi_i32(tmp
, 0);
6240 tmp3
= neon_load_reg(rm
, 1);
6241 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6242 tcg_temp_free_i32(tmp5
);
6243 tcg_temp_free_i32(tmp4
);
6244 neon_store_reg(rd
, 0, tmp2
);
6245 neon_store_reg(rd
, 1, tmp3
);
6246 tcg_temp_free_i32(tmp
);
6247 } else if ((insn
& 0x380) == 0) {
6249 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6252 if (insn
& (1 << 19)) {
6253 tmp
= neon_load_reg(rm
, 1);
6255 tmp
= neon_load_reg(rm
, 0);
6257 if (insn
& (1 << 16)) {
6258 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6259 } else if (insn
& (1 << 17)) {
6260 if ((insn
>> 18) & 1)
6261 gen_neon_dup_high16(tmp
);
6263 gen_neon_dup_low16(tmp
);
6265 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6266 tmp2
= tcg_temp_new_i32();
6267 tcg_gen_mov_i32(tmp2
, tmp
);
6268 neon_store_reg(rd
, pass
, tmp2
);
6270 tcg_temp_free_i32(tmp
);
6279 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6281 int crn
= (insn
>> 16) & 0xf;
6282 int crm
= insn
& 0xf;
6283 int op1
= (insn
>> 21) & 7;
6284 int op2
= (insn
>> 5) & 7;
6285 int rt
= (insn
>> 12) & 0xf;
6288 /* Minimal set of debug registers, since we don't support debug */
6289 if (op1
== 0 && crn
== 0 && op2
== 0) {
6292 /* DBGDIDR: just RAZ. In particular this means the
6293 * "debug architecture version" bits will read as
6294 * a reserved value, which should cause Linux to
6295 * not try to use the debug hardware.
6297 tmp
= tcg_const_i32(0);
6298 store_reg(s
, rt
, tmp
);
6302 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6303 * don't implement memory mapped debug components
6305 if (ENABLE_ARCH_7
) {
6306 tmp
= tcg_const_i32(0);
6307 store_reg(s
, rt
, tmp
);
6316 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6317 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6321 tmp
= load_cpu_field(teecr
);
6322 store_reg(s
, rt
, tmp
);
6325 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6327 if (IS_USER(s
) && (env
->teecr
& 1))
6329 tmp
= load_cpu_field(teehbr
);
6330 store_reg(s
, rt
, tmp
);
6334 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6335 op1
, crn
, crm
, op2
);
6339 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6341 int crn
= (insn
>> 16) & 0xf;
6342 int crm
= insn
& 0xf;
6343 int op1
= (insn
>> 21) & 7;
6344 int op2
= (insn
>> 5) & 7;
6345 int rt
= (insn
>> 12) & 0xf;
6348 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6349 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6353 tmp
= load_reg(s
, rt
);
6354 gen_helper_set_teecr(cpu_env
, tmp
);
6355 tcg_temp_free_i32(tmp
);
6358 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6360 if (IS_USER(s
) && (env
->teecr
& 1))
6362 tmp
= load_reg(s
, rt
);
6363 store_cpu_field(tmp
, teehbr
);
6367 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6368 op1
, crn
, crm
, op2
);
6372 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6376 cpnum
= (insn
>> 8) & 0xf;
6377 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6378 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6384 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6385 return disas_iwmmxt_insn(env
, s
, insn
);
6386 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6387 return disas_dsp_insn(env
, s
, insn
);
6392 return disas_vfp_insn (env
, s
, insn
);
6394 /* Coprocessors 7-15 are architecturally reserved by ARM.
6395 Unfortunately Intel decided to ignore this. */
6396 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
6398 if (insn
& (1 << 20))
6399 return disas_cp14_read(env
, s
, insn
);
6401 return disas_cp14_write(env
, s
, insn
);
6403 return disas_cp15_insn (env
, s
, insn
);
6406 /* Unknown coprocessor. See if the board has hooked it. */
6407 return disas_cp_insn (env
, s
, insn
);
6412 /* Store a 64-bit value to a register pair. Clobbers val. */
6413 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6416 tmp
= tcg_temp_new_i32();
6417 tcg_gen_trunc_i64_i32(tmp
, val
);
6418 store_reg(s
, rlow
, tmp
);
6419 tmp
= tcg_temp_new_i32();
6420 tcg_gen_shri_i64(val
, val
, 32);
6421 tcg_gen_trunc_i64_i32(tmp
, val
);
6422 store_reg(s
, rhigh
, tmp
);
6425 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6426 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6431 /* Load value and extend to 64 bits. */
6432 tmp
= tcg_temp_new_i64();
6433 tmp2
= load_reg(s
, rlow
);
6434 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6435 tcg_temp_free_i32(tmp2
);
6436 tcg_gen_add_i64(val
, val
, tmp
);
6437 tcg_temp_free_i64(tmp
);
6440 /* load and add a 64-bit value from a register pair. */
6441 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6447 /* Load 64-bit value rd:rn. */
6448 tmpl
= load_reg(s
, rlow
);
6449 tmph
= load_reg(s
, rhigh
);
6450 tmp
= tcg_temp_new_i64();
6451 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6452 tcg_temp_free_i32(tmpl
);
6453 tcg_temp_free_i32(tmph
);
6454 tcg_gen_add_i64(val
, val
, tmp
);
6455 tcg_temp_free_i64(tmp
);
6458 /* Set N and Z flags from a 64-bit value. */
6459 static void gen_logicq_cc(TCGv_i64 val
)
6461 TCGv tmp
= tcg_temp_new_i32();
6462 gen_helper_logicq_cc(tmp
, val
);
6464 tcg_temp_free_i32(tmp
);
6467 /* Load/Store exclusive instructions are implemented by remembering
6468 the value/address loaded, and seeing if these are the same
6469 when the store is performed. This should be is sufficient to implement
6470 the architecturally mandated semantics, and avoids having to monitor
6473 In system emulation mode only one CPU will be running at once, so
6474 this sequence is effectively atomic. In user emulation mode we
6475 throw an exception and handle the atomic operation elsewhere. */
6476 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6477 TCGv addr
, int size
)
6483 tmp
= gen_ld8u(addr
, IS_USER(s
));
6486 tmp
= gen_ld16u(addr
, IS_USER(s
));
6490 tmp
= gen_ld32(addr
, IS_USER(s
));
6495 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6496 store_reg(s
, rt
, tmp
);
6498 TCGv tmp2
= tcg_temp_new_i32();
6499 tcg_gen_addi_i32(tmp2
, addr
, 4);
6500 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6501 tcg_temp_free_i32(tmp2
);
6502 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6503 store_reg(s
, rt2
, tmp
);
6505 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6508 static void gen_clrex(DisasContext
*s
)
6510 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6513 #ifdef CONFIG_USER_ONLY
6514 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6515 TCGv addr
, int size
)
6517 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6518 tcg_gen_movi_i32(cpu_exclusive_info
,
6519 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6520 gen_exception_insn(s
, 4, EXCP_STREX
);
6523 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6524 TCGv addr
, int size
)
6530 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6536 fail_label
= gen_new_label();
6537 done_label
= gen_new_label();
6538 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6541 tmp
= gen_ld8u(addr
, IS_USER(s
));
6544 tmp
= gen_ld16u(addr
, IS_USER(s
));
6548 tmp
= gen_ld32(addr
, IS_USER(s
));
6553 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6554 tcg_temp_free_i32(tmp
);
6556 TCGv tmp2
= tcg_temp_new_i32();
6557 tcg_gen_addi_i32(tmp2
, addr
, 4);
6558 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6559 tcg_temp_free_i32(tmp2
);
6560 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6561 tcg_temp_free_i32(tmp
);
6563 tmp
= load_reg(s
, rt
);
6566 gen_st8(tmp
, addr
, IS_USER(s
));
6569 gen_st16(tmp
, addr
, IS_USER(s
));
6573 gen_st32(tmp
, addr
, IS_USER(s
));
6579 tcg_gen_addi_i32(addr
, addr
, 4);
6580 tmp
= load_reg(s
, rt2
);
6581 gen_st32(tmp
, addr
, IS_USER(s
));
6583 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6584 tcg_gen_br(done_label
);
6585 gen_set_label(fail_label
);
6586 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6587 gen_set_label(done_label
);
6588 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6592 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6594 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6601 insn
= ldl_code(s
->pc
);
6604 /* M variants do not implement ARM mode. */
6609 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6610 * choose to UNDEF. In ARMv5 and above the space is used
6611 * for miscellaneous unconditional instructions.
6615 /* Unconditional instructions. */
6616 if (((insn
>> 25) & 7) == 1) {
6617 /* NEON Data processing. */
6618 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6621 if (disas_neon_data_insn(env
, s
, insn
))
6625 if ((insn
& 0x0f100000) == 0x04000000) {
6626 /* NEON load/store. */
6627 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6630 if (disas_neon_ls_insn(env
, s
, insn
))
6634 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6635 ((insn
& 0x0f30f010) == 0x0710f000)) {
6636 if ((insn
& (1 << 22)) == 0) {
6638 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6642 /* Otherwise PLD; v5TE+ */
6646 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6647 ((insn
& 0x0f70f010) == 0x0650f000)) {
6649 return; /* PLI; V7 */
6651 if (((insn
& 0x0f700000) == 0x04100000) ||
6652 ((insn
& 0x0f700010) == 0x06100000)) {
6653 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6656 return; /* v7MP: Unallocated memory hint: must NOP */
6659 if ((insn
& 0x0ffffdff) == 0x01010000) {
6662 if (insn
& (1 << 9)) {
6663 /* BE8 mode not implemented. */
6667 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6668 switch ((insn
>> 4) & 0xf) {
6677 /* We don't emulate caches so these are a no-op. */
6682 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6688 op1
= (insn
& 0x1f);
6689 addr
= tcg_temp_new_i32();
6690 tmp
= tcg_const_i32(op1
);
6691 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6692 tcg_temp_free_i32(tmp
);
6693 i
= (insn
>> 23) & 3;
6695 case 0: offset
= -4; break; /* DA */
6696 case 1: offset
= 0; break; /* IA */
6697 case 2: offset
= -8; break; /* DB */
6698 case 3: offset
= 4; break; /* IB */
6702 tcg_gen_addi_i32(addr
, addr
, offset
);
6703 tmp
= load_reg(s
, 14);
6704 gen_st32(tmp
, addr
, 0);
6705 tmp
= load_cpu_field(spsr
);
6706 tcg_gen_addi_i32(addr
, addr
, 4);
6707 gen_st32(tmp
, addr
, 0);
6708 if (insn
& (1 << 21)) {
6709 /* Base writeback. */
6711 case 0: offset
= -8; break;
6712 case 1: offset
= 4; break;
6713 case 2: offset
= -4; break;
6714 case 3: offset
= 0; break;
6718 tcg_gen_addi_i32(addr
, addr
, offset
);
6719 tmp
= tcg_const_i32(op1
);
6720 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6721 tcg_temp_free_i32(tmp
);
6722 tcg_temp_free_i32(addr
);
6724 tcg_temp_free_i32(addr
);
6727 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6733 rn
= (insn
>> 16) & 0xf;
6734 addr
= load_reg(s
, rn
);
6735 i
= (insn
>> 23) & 3;
6737 case 0: offset
= -4; break; /* DA */
6738 case 1: offset
= 0; break; /* IA */
6739 case 2: offset
= -8; break; /* DB */
6740 case 3: offset
= 4; break; /* IB */
6744 tcg_gen_addi_i32(addr
, addr
, offset
);
6745 /* Load PC into tmp and CPSR into tmp2. */
6746 tmp
= gen_ld32(addr
, 0);
6747 tcg_gen_addi_i32(addr
, addr
, 4);
6748 tmp2
= gen_ld32(addr
, 0);
6749 if (insn
& (1 << 21)) {
6750 /* Base writeback. */
6752 case 0: offset
= -8; break;
6753 case 1: offset
= 4; break;
6754 case 2: offset
= -4; break;
6755 case 3: offset
= 0; break;
6759 tcg_gen_addi_i32(addr
, addr
, offset
);
6760 store_reg(s
, rn
, addr
);
6762 tcg_temp_free_i32(addr
);
6764 gen_rfe(s
, tmp
, tmp2
);
6766 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6767 /* branch link and change to thumb (blx <offset>) */
6770 val
= (uint32_t)s
->pc
;
6771 tmp
= tcg_temp_new_i32();
6772 tcg_gen_movi_i32(tmp
, val
);
6773 store_reg(s
, 14, tmp
);
6774 /* Sign-extend the 24-bit offset */
6775 offset
= (((int32_t)insn
) << 8) >> 8;
6776 /* offset * 4 + bit24 * 2 + (thumb bit) */
6777 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6778 /* pipeline offset */
6780 /* protected by ARCH(5); above, near the start of uncond block */
6783 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6784 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6785 /* iWMMXt register transfer. */
6786 if (env
->cp15
.c15_cpar
& (1 << 1))
6787 if (!disas_iwmmxt_insn(env
, s
, insn
))
6790 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6791 /* Coprocessor double register transfer. */
6793 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6794 /* Additional coprocessor register transfer. */
6795 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6798 /* cps (privileged) */
6802 if (insn
& (1 << 19)) {
6803 if (insn
& (1 << 8))
6805 if (insn
& (1 << 7))
6807 if (insn
& (1 << 6))
6809 if (insn
& (1 << 18))
6812 if (insn
& (1 << 17)) {
6814 val
|= (insn
& 0x1f);
6817 gen_set_psr_im(s
, mask
, 0, val
);
6824 /* if not always execute, we generate a conditional jump to
6826 s
->condlabel
= gen_new_label();
6827 gen_test_cc(cond
^ 1, s
->condlabel
);
6830 if ((insn
& 0x0f900000) == 0x03000000) {
6831 if ((insn
& (1 << 21)) == 0) {
6833 rd
= (insn
>> 12) & 0xf;
6834 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6835 if ((insn
& (1 << 22)) == 0) {
6837 tmp
= tcg_temp_new_i32();
6838 tcg_gen_movi_i32(tmp
, val
);
6841 tmp
= load_reg(s
, rd
);
6842 tcg_gen_ext16u_i32(tmp
, tmp
);
6843 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6845 store_reg(s
, rd
, tmp
);
6847 if (((insn
>> 12) & 0xf) != 0xf)
6849 if (((insn
>> 16) & 0xf) == 0) {
6850 gen_nop_hint(s
, insn
& 0xff);
6852 /* CPSR = immediate */
6854 shift
= ((insn
>> 8) & 0xf) * 2;
6856 val
= (val
>> shift
) | (val
<< (32 - shift
));
6857 i
= ((insn
& (1 << 22)) != 0);
6858 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6862 } else if ((insn
& 0x0f900000) == 0x01000000
6863 && (insn
& 0x00000090) != 0x00000090) {
6864 /* miscellaneous instructions */
6865 op1
= (insn
>> 21) & 3;
6866 sh
= (insn
>> 4) & 0xf;
6869 case 0x0: /* move program status register */
6872 tmp
= load_reg(s
, rm
);
6873 i
= ((op1
& 2) != 0);
6874 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6878 rd
= (insn
>> 12) & 0xf;
6882 tmp
= load_cpu_field(spsr
);
6884 tmp
= tcg_temp_new_i32();
6885 gen_helper_cpsr_read(tmp
);
6887 store_reg(s
, rd
, tmp
);
6892 /* branch/exchange thumb (bx). */
6894 tmp
= load_reg(s
, rm
);
6896 } else if (op1
== 3) {
6899 rd
= (insn
>> 12) & 0xf;
6900 tmp
= load_reg(s
, rm
);
6901 gen_helper_clz(tmp
, tmp
);
6902 store_reg(s
, rd
, tmp
);
6910 /* Trivial implementation equivalent to bx. */
6911 tmp
= load_reg(s
, rm
);
6922 /* branch link/exchange thumb (blx) */
6923 tmp
= load_reg(s
, rm
);
6924 tmp2
= tcg_temp_new_i32();
6925 tcg_gen_movi_i32(tmp2
, s
->pc
);
6926 store_reg(s
, 14, tmp2
);
6929 case 0x5: /* saturating add/subtract */
6931 rd
= (insn
>> 12) & 0xf;
6932 rn
= (insn
>> 16) & 0xf;
6933 tmp
= load_reg(s
, rm
);
6934 tmp2
= load_reg(s
, rn
);
6936 gen_helper_double_saturate(tmp2
, tmp2
);
6938 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6940 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6941 tcg_temp_free_i32(tmp2
);
6942 store_reg(s
, rd
, tmp
);
6945 /* SMC instruction (op1 == 3)
6946 and undefined instructions (op1 == 0 || op1 == 2)
6953 gen_exception_insn(s
, 4, EXCP_BKPT
);
6955 case 0x8: /* signed multiply */
6960 rs
= (insn
>> 8) & 0xf;
6961 rn
= (insn
>> 12) & 0xf;
6962 rd
= (insn
>> 16) & 0xf;
6964 /* (32 * 16) >> 16 */
6965 tmp
= load_reg(s
, rm
);
6966 tmp2
= load_reg(s
, rs
);
6968 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6971 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6972 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6973 tmp
= tcg_temp_new_i32();
6974 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6975 tcg_temp_free_i64(tmp64
);
6976 if ((sh
& 2) == 0) {
6977 tmp2
= load_reg(s
, rn
);
6978 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6979 tcg_temp_free_i32(tmp2
);
6981 store_reg(s
, rd
, tmp
);
6984 tmp
= load_reg(s
, rm
);
6985 tmp2
= load_reg(s
, rs
);
6986 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6987 tcg_temp_free_i32(tmp2
);
6989 tmp64
= tcg_temp_new_i64();
6990 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6991 tcg_temp_free_i32(tmp
);
6992 gen_addq(s
, tmp64
, rn
, rd
);
6993 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6994 tcg_temp_free_i64(tmp64
);
6997 tmp2
= load_reg(s
, rn
);
6998 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6999 tcg_temp_free_i32(tmp2
);
7001 store_reg(s
, rd
, tmp
);
7008 } else if (((insn
& 0x0e000000) == 0 &&
7009 (insn
& 0x00000090) != 0x90) ||
7010 ((insn
& 0x0e000000) == (1 << 25))) {
7011 int set_cc
, logic_cc
, shiftop
;
7013 op1
= (insn
>> 21) & 0xf;
7014 set_cc
= (insn
>> 20) & 1;
7015 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7017 /* data processing instruction */
7018 if (insn
& (1 << 25)) {
7019 /* immediate operand */
7021 shift
= ((insn
>> 8) & 0xf) * 2;
7023 val
= (val
>> shift
) | (val
<< (32 - shift
));
7025 tmp2
= tcg_temp_new_i32();
7026 tcg_gen_movi_i32(tmp2
, val
);
7027 if (logic_cc
&& shift
) {
7028 gen_set_CF_bit31(tmp2
);
7033 tmp2
= load_reg(s
, rm
);
7034 shiftop
= (insn
>> 5) & 3;
7035 if (!(insn
& (1 << 4))) {
7036 shift
= (insn
>> 7) & 0x1f;
7037 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7039 rs
= (insn
>> 8) & 0xf;
7040 tmp
= load_reg(s
, rs
);
7041 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7044 if (op1
!= 0x0f && op1
!= 0x0d) {
7045 rn
= (insn
>> 16) & 0xf;
7046 tmp
= load_reg(s
, rn
);
7050 rd
= (insn
>> 12) & 0xf;
7053 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7057 store_reg_bx(env
, s
, rd
, tmp
);
7060 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7064 store_reg_bx(env
, s
, rd
, tmp
);
7067 if (set_cc
&& rd
== 15) {
7068 /* SUBS r15, ... is used for exception return. */
7072 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7073 gen_exception_return(s
, tmp
);
7076 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7078 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7080 store_reg_bx(env
, s
, rd
, tmp
);
7085 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
7087 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7089 store_reg_bx(env
, s
, rd
, tmp
);
7093 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7095 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7097 store_reg_bx(env
, s
, rd
, tmp
);
7101 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
7103 gen_add_carry(tmp
, tmp
, tmp2
);
7105 store_reg_bx(env
, s
, rd
, tmp
);
7109 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
7111 gen_sub_carry(tmp
, tmp
, tmp2
);
7113 store_reg_bx(env
, s
, rd
, tmp
);
7117 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
7119 gen_sub_carry(tmp
, tmp2
, tmp
);
7121 store_reg_bx(env
, s
, rd
, tmp
);
7125 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7128 tcg_temp_free_i32(tmp
);
7132 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7135 tcg_temp_free_i32(tmp
);
7139 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7141 tcg_temp_free_i32(tmp
);
7145 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7147 tcg_temp_free_i32(tmp
);
7150 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7154 store_reg_bx(env
, s
, rd
, tmp
);
7157 if (logic_cc
&& rd
== 15) {
7158 /* MOVS r15, ... is used for exception return. */
7162 gen_exception_return(s
, tmp2
);
7167 store_reg_bx(env
, s
, rd
, tmp2
);
7171 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7175 store_reg_bx(env
, s
, rd
, tmp
);
7179 tcg_gen_not_i32(tmp2
, tmp2
);
7183 store_reg_bx(env
, s
, rd
, tmp2
);
7186 if (op1
!= 0x0f && op1
!= 0x0d) {
7187 tcg_temp_free_i32(tmp2
);
7190 /* other instructions */
7191 op1
= (insn
>> 24) & 0xf;
7195 /* multiplies, extra load/stores */
7196 sh
= (insn
>> 5) & 3;
7199 rd
= (insn
>> 16) & 0xf;
7200 rn
= (insn
>> 12) & 0xf;
7201 rs
= (insn
>> 8) & 0xf;
7203 op1
= (insn
>> 20) & 0xf;
7205 case 0: case 1: case 2: case 3: case 6:
7207 tmp
= load_reg(s
, rs
);
7208 tmp2
= load_reg(s
, rm
);
7209 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7210 tcg_temp_free_i32(tmp2
);
7211 if (insn
& (1 << 22)) {
7212 /* Subtract (mls) */
7214 tmp2
= load_reg(s
, rn
);
7215 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7216 tcg_temp_free_i32(tmp2
);
7217 } else if (insn
& (1 << 21)) {
7219 tmp2
= load_reg(s
, rn
);
7220 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7221 tcg_temp_free_i32(tmp2
);
7223 if (insn
& (1 << 20))
7225 store_reg(s
, rd
, tmp
);
7228 /* 64 bit mul double accumulate (UMAAL) */
7230 tmp
= load_reg(s
, rs
);
7231 tmp2
= load_reg(s
, rm
);
7232 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7233 gen_addq_lo(s
, tmp64
, rn
);
7234 gen_addq_lo(s
, tmp64
, rd
);
7235 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7236 tcg_temp_free_i64(tmp64
);
7238 case 8: case 9: case 10: case 11:
7239 case 12: case 13: case 14: case 15:
7240 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7241 tmp
= load_reg(s
, rs
);
7242 tmp2
= load_reg(s
, rm
);
7243 if (insn
& (1 << 22)) {
7244 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7246 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7248 if (insn
& (1 << 21)) { /* mult accumulate */
7249 gen_addq(s
, tmp64
, rn
, rd
);
7251 if (insn
& (1 << 20)) {
7252 gen_logicq_cc(tmp64
);
7254 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7255 tcg_temp_free_i64(tmp64
);
7261 rn
= (insn
>> 16) & 0xf;
7262 rd
= (insn
>> 12) & 0xf;
7263 if (insn
& (1 << 23)) {
7264 /* load/store exclusive */
7265 op1
= (insn
>> 21) & 0x3;
7270 addr
= tcg_temp_local_new_i32();
7271 load_reg_var(s
, addr
, rn
);
7272 if (insn
& (1 << 20)) {
7275 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7277 case 1: /* ldrexd */
7278 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7280 case 2: /* ldrexb */
7281 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7283 case 3: /* ldrexh */
7284 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7293 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7295 case 1: /* strexd */
7296 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7298 case 2: /* strexb */
7299 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7301 case 3: /* strexh */
7302 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7308 tcg_temp_free(addr
);
7310 /* SWP instruction */
7313 /* ??? This is not really atomic. However we know
7314 we never have multiple CPUs running in parallel,
7315 so it is good enough. */
7316 addr
= load_reg(s
, rn
);
7317 tmp
= load_reg(s
, rm
);
7318 if (insn
& (1 << 22)) {
7319 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7320 gen_st8(tmp
, addr
, IS_USER(s
));
7322 tmp2
= gen_ld32(addr
, IS_USER(s
));
7323 gen_st32(tmp
, addr
, IS_USER(s
));
7325 tcg_temp_free_i32(addr
);
7326 store_reg(s
, rd
, tmp2
);
7332 /* Misc load/store */
7333 rn
= (insn
>> 16) & 0xf;
7334 rd
= (insn
>> 12) & 0xf;
7335 addr
= load_reg(s
, rn
);
7336 if (insn
& (1 << 24))
7337 gen_add_datah_offset(s
, insn
, 0, addr
);
7339 if (insn
& (1 << 20)) {
7343 tmp
= gen_ld16u(addr
, IS_USER(s
));
7346 tmp
= gen_ld8s(addr
, IS_USER(s
));
7350 tmp
= gen_ld16s(addr
, IS_USER(s
));
7354 } else if (sh
& 2) {
7359 tmp
= load_reg(s
, rd
);
7360 gen_st32(tmp
, addr
, IS_USER(s
));
7361 tcg_gen_addi_i32(addr
, addr
, 4);
7362 tmp
= load_reg(s
, rd
+ 1);
7363 gen_st32(tmp
, addr
, IS_USER(s
));
7367 tmp
= gen_ld32(addr
, IS_USER(s
));
7368 store_reg(s
, rd
, tmp
);
7369 tcg_gen_addi_i32(addr
, addr
, 4);
7370 tmp
= gen_ld32(addr
, IS_USER(s
));
7374 address_offset
= -4;
7377 tmp
= load_reg(s
, rd
);
7378 gen_st16(tmp
, addr
, IS_USER(s
));
7381 /* Perform base writeback before the loaded value to
7382 ensure correct behavior with overlapping index registers.
7383 ldrd with base writeback is is undefined if the
7384 destination and index registers overlap. */
7385 if (!(insn
& (1 << 24))) {
7386 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7387 store_reg(s
, rn
, addr
);
7388 } else if (insn
& (1 << 21)) {
7390 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7391 store_reg(s
, rn
, addr
);
7393 tcg_temp_free_i32(addr
);
7396 /* Complete the load. */
7397 store_reg(s
, rd
, tmp
);
7406 if (insn
& (1 << 4)) {
7408 /* Armv6 Media instructions. */
7410 rn
= (insn
>> 16) & 0xf;
7411 rd
= (insn
>> 12) & 0xf;
7412 rs
= (insn
>> 8) & 0xf;
7413 switch ((insn
>> 23) & 3) {
7414 case 0: /* Parallel add/subtract. */
7415 op1
= (insn
>> 20) & 7;
7416 tmp
= load_reg(s
, rn
);
7417 tmp2
= load_reg(s
, rm
);
7418 sh
= (insn
>> 5) & 7;
7419 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7421 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7422 tcg_temp_free_i32(tmp2
);
7423 store_reg(s
, rd
, tmp
);
7426 if ((insn
& 0x00700020) == 0) {
7427 /* Halfword pack. */
7428 tmp
= load_reg(s
, rn
);
7429 tmp2
= load_reg(s
, rm
);
7430 shift
= (insn
>> 7) & 0x1f;
7431 if (insn
& (1 << 6)) {
7435 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7436 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7437 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7441 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7442 tcg_gen_ext16u_i32(tmp
, tmp
);
7443 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7445 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7446 tcg_temp_free_i32(tmp2
);
7447 store_reg(s
, rd
, tmp
);
7448 } else if ((insn
& 0x00200020) == 0x00200000) {
7450 tmp
= load_reg(s
, rm
);
7451 shift
= (insn
>> 7) & 0x1f;
7452 if (insn
& (1 << 6)) {
7455 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7457 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7459 sh
= (insn
>> 16) & 0x1f;
7460 tmp2
= tcg_const_i32(sh
);
7461 if (insn
& (1 << 22))
7462 gen_helper_usat(tmp
, tmp
, tmp2
);
7464 gen_helper_ssat(tmp
, tmp
, tmp2
);
7465 tcg_temp_free_i32(tmp2
);
7466 store_reg(s
, rd
, tmp
);
7467 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7469 tmp
= load_reg(s
, rm
);
7470 sh
= (insn
>> 16) & 0x1f;
7471 tmp2
= tcg_const_i32(sh
);
7472 if (insn
& (1 << 22))
7473 gen_helper_usat16(tmp
, tmp
, tmp2
);
7475 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7476 tcg_temp_free_i32(tmp2
);
7477 store_reg(s
, rd
, tmp
);
7478 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7480 tmp
= load_reg(s
, rn
);
7481 tmp2
= load_reg(s
, rm
);
7482 tmp3
= tcg_temp_new_i32();
7483 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7484 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7485 tcg_temp_free_i32(tmp3
);
7486 tcg_temp_free_i32(tmp2
);
7487 store_reg(s
, rd
, tmp
);
7488 } else if ((insn
& 0x000003e0) == 0x00000060) {
7489 tmp
= load_reg(s
, rm
);
7490 shift
= (insn
>> 10) & 3;
7491 /* ??? In many cases it's not necessary to do a
7492 rotate, a shift is sufficient. */
7494 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7495 op1
= (insn
>> 20) & 7;
7497 case 0: gen_sxtb16(tmp
); break;
7498 case 2: gen_sxtb(tmp
); break;
7499 case 3: gen_sxth(tmp
); break;
7500 case 4: gen_uxtb16(tmp
); break;
7501 case 6: gen_uxtb(tmp
); break;
7502 case 7: gen_uxth(tmp
); break;
7503 default: goto illegal_op
;
7506 tmp2
= load_reg(s
, rn
);
7507 if ((op1
& 3) == 0) {
7508 gen_add16(tmp
, tmp2
);
7510 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7511 tcg_temp_free_i32(tmp2
);
7514 store_reg(s
, rd
, tmp
);
7515 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7517 tmp
= load_reg(s
, rm
);
7518 if (insn
& (1 << 22)) {
7519 if (insn
& (1 << 7)) {
7523 gen_helper_rbit(tmp
, tmp
);
7526 if (insn
& (1 << 7))
7529 tcg_gen_bswap32_i32(tmp
, tmp
);
7531 store_reg(s
, rd
, tmp
);
7536 case 2: /* Multiplies (Type 3). */
7537 tmp
= load_reg(s
, rm
);
7538 tmp2
= load_reg(s
, rs
);
7539 if (insn
& (1 << 20)) {
7540 /* Signed multiply most significant [accumulate].
7541 (SMMUL, SMMLA, SMMLS) */
7542 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7545 tmp
= load_reg(s
, rd
);
7546 if (insn
& (1 << 6)) {
7547 tmp64
= gen_subq_msw(tmp64
, tmp
);
7549 tmp64
= gen_addq_msw(tmp64
, tmp
);
7552 if (insn
& (1 << 5)) {
7553 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7555 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7556 tmp
= tcg_temp_new_i32();
7557 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7558 tcg_temp_free_i64(tmp64
);
7559 store_reg(s
, rn
, tmp
);
7561 if (insn
& (1 << 5))
7562 gen_swap_half(tmp2
);
7563 gen_smul_dual(tmp
, tmp2
);
7564 if (insn
& (1 << 6)) {
7565 /* This subtraction cannot overflow. */
7566 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7568 /* This addition cannot overflow 32 bits;
7569 * however it may overflow considered as a signed
7570 * operation, in which case we must set the Q flag.
7572 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7574 tcg_temp_free_i32(tmp2
);
7575 if (insn
& (1 << 22)) {
7576 /* smlald, smlsld */
7577 tmp64
= tcg_temp_new_i64();
7578 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7579 tcg_temp_free_i32(tmp
);
7580 gen_addq(s
, tmp64
, rd
, rn
);
7581 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7582 tcg_temp_free_i64(tmp64
);
7584 /* smuad, smusd, smlad, smlsd */
7587 tmp2
= load_reg(s
, rd
);
7588 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7589 tcg_temp_free_i32(tmp2
);
7591 store_reg(s
, rn
, tmp
);
7596 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7598 case 0: /* Unsigned sum of absolute differences. */
7600 tmp
= load_reg(s
, rm
);
7601 tmp2
= load_reg(s
, rs
);
7602 gen_helper_usad8(tmp
, tmp
, tmp2
);
7603 tcg_temp_free_i32(tmp2
);
7605 tmp2
= load_reg(s
, rd
);
7606 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7607 tcg_temp_free_i32(tmp2
);
7609 store_reg(s
, rn
, tmp
);
7611 case 0x20: case 0x24: case 0x28: case 0x2c:
7612 /* Bitfield insert/clear. */
7614 shift
= (insn
>> 7) & 0x1f;
7615 i
= (insn
>> 16) & 0x1f;
7618 tmp
= tcg_temp_new_i32();
7619 tcg_gen_movi_i32(tmp
, 0);
7621 tmp
= load_reg(s
, rm
);
7624 tmp2
= load_reg(s
, rd
);
7625 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7626 tcg_temp_free_i32(tmp2
);
7628 store_reg(s
, rd
, tmp
);
7630 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7631 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7633 tmp
= load_reg(s
, rm
);
7634 shift
= (insn
>> 7) & 0x1f;
7635 i
= ((insn
>> 16) & 0x1f) + 1;
7640 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7642 gen_sbfx(tmp
, shift
, i
);
7645 store_reg(s
, rd
, tmp
);
7655 /* Check for undefined extension instructions
7656 * per the ARM Bible IE:
7657 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7659 sh
= (0xf << 20) | (0xf << 4);
7660 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7664 /* load/store byte/word */
7665 rn
= (insn
>> 16) & 0xf;
7666 rd
= (insn
>> 12) & 0xf;
7667 tmp2
= load_reg(s
, rn
);
7668 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7669 if (insn
& (1 << 24))
7670 gen_add_data_offset(s
, insn
, tmp2
);
7671 if (insn
& (1 << 20)) {
7673 if (insn
& (1 << 22)) {
7674 tmp
= gen_ld8u(tmp2
, i
);
7676 tmp
= gen_ld32(tmp2
, i
);
7680 tmp
= load_reg(s
, rd
);
7681 if (insn
& (1 << 22))
7682 gen_st8(tmp
, tmp2
, i
);
7684 gen_st32(tmp
, tmp2
, i
);
7686 if (!(insn
& (1 << 24))) {
7687 gen_add_data_offset(s
, insn
, tmp2
);
7688 store_reg(s
, rn
, tmp2
);
7689 } else if (insn
& (1 << 21)) {
7690 store_reg(s
, rn
, tmp2
);
7692 tcg_temp_free_i32(tmp2
);
7694 if (insn
& (1 << 20)) {
7695 /* Complete the load. */
7696 store_reg_from_load(env
, s
, rd
, tmp
);
7702 int j
, n
, user
, loaded_base
;
7704 /* load/store multiple words */
7705 /* XXX: store correct base if write back */
7707 if (insn
& (1 << 22)) {
7709 goto illegal_op
; /* only usable in supervisor mode */
7711 if ((insn
& (1 << 15)) == 0)
7714 rn
= (insn
>> 16) & 0xf;
7715 addr
= load_reg(s
, rn
);
7717 /* compute total size */
7719 TCGV_UNUSED(loaded_var
);
7722 if (insn
& (1 << i
))
7725 /* XXX: test invalid n == 0 case ? */
7726 if (insn
& (1 << 23)) {
7727 if (insn
& (1 << 24)) {
7729 tcg_gen_addi_i32(addr
, addr
, 4);
7731 /* post increment */
7734 if (insn
& (1 << 24)) {
7736 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7738 /* post decrement */
7740 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7745 if (insn
& (1 << i
)) {
7746 if (insn
& (1 << 20)) {
7748 tmp
= gen_ld32(addr
, IS_USER(s
));
7750 tmp2
= tcg_const_i32(i
);
7751 gen_helper_set_user_reg(tmp2
, tmp
);
7752 tcg_temp_free_i32(tmp2
);
7753 tcg_temp_free_i32(tmp
);
7754 } else if (i
== rn
) {
7758 store_reg_from_load(env
, s
, i
, tmp
);
7763 /* special case: r15 = PC + 8 */
7764 val
= (long)s
->pc
+ 4;
7765 tmp
= tcg_temp_new_i32();
7766 tcg_gen_movi_i32(tmp
, val
);
7768 tmp
= tcg_temp_new_i32();
7769 tmp2
= tcg_const_i32(i
);
7770 gen_helper_get_user_reg(tmp
, tmp2
);
7771 tcg_temp_free_i32(tmp2
);
7773 tmp
= load_reg(s
, i
);
7775 gen_st32(tmp
, addr
, IS_USER(s
));
7778 /* no need to add after the last transfer */
7780 tcg_gen_addi_i32(addr
, addr
, 4);
7783 if (insn
& (1 << 21)) {
7785 if (insn
& (1 << 23)) {
7786 if (insn
& (1 << 24)) {
7789 /* post increment */
7790 tcg_gen_addi_i32(addr
, addr
, 4);
7793 if (insn
& (1 << 24)) {
7796 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7798 /* post decrement */
7799 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7802 store_reg(s
, rn
, addr
);
7804 tcg_temp_free_i32(addr
);
7807 store_reg(s
, rn
, loaded_var
);
7809 if ((insn
& (1 << 22)) && !user
) {
7810 /* Restore CPSR from SPSR. */
7811 tmp
= load_cpu_field(spsr
);
7812 gen_set_cpsr(tmp
, 0xffffffff);
7813 tcg_temp_free_i32(tmp
);
7814 s
->is_jmp
= DISAS_UPDATE
;
7823 /* branch (and link) */
7824 val
= (int32_t)s
->pc
;
7825 if (insn
& (1 << 24)) {
7826 tmp
= tcg_temp_new_i32();
7827 tcg_gen_movi_i32(tmp
, val
);
7828 store_reg(s
, 14, tmp
);
7830 offset
= (((int32_t)insn
<< 8) >> 8);
7831 val
+= (offset
<< 2) + 4;
7839 if (disas_coproc_insn(env
, s
, insn
))
7844 gen_set_pc_im(s
->pc
);
7845 s
->is_jmp
= DISAS_SWI
;
7849 gen_exception_insn(s
, 4, EXCP_UDEF
);
7855 /* Return true if this is a Thumb-2 logical op. */
7857 thumb2_logic_op(int op
)
7862 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7863 then set condition code flags based on the result of the operation.
7864 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7865 to the high bit of T1.
7866 Returns zero if the opcode is valid. */
7869 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7876 tcg_gen_and_i32(t0
, t0
, t1
);
7880 tcg_gen_andc_i32(t0
, t0
, t1
);
7884 tcg_gen_or_i32(t0
, t0
, t1
);
7888 tcg_gen_orc_i32(t0
, t0
, t1
);
7892 tcg_gen_xor_i32(t0
, t0
, t1
);
7897 gen_helper_add_cc(t0
, t0
, t1
);
7899 tcg_gen_add_i32(t0
, t0
, t1
);
7903 gen_helper_adc_cc(t0
, t0
, t1
);
7909 gen_helper_sbc_cc(t0
, t0
, t1
);
7911 gen_sub_carry(t0
, t0
, t1
);
7915 gen_helper_sub_cc(t0
, t0
, t1
);
7917 tcg_gen_sub_i32(t0
, t0
, t1
);
7921 gen_helper_sub_cc(t0
, t1
, t0
);
7923 tcg_gen_sub_i32(t0
, t1
, t0
);
7925 default: /* 5, 6, 7, 9, 12, 15. */
7931 gen_set_CF_bit31(t1
);
7936 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7938 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7940 uint32_t insn
, imm
, shift
, offset
;
7941 uint32_t rd
, rn
, rm
, rs
;
7952 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7953 || arm_feature (env
, ARM_FEATURE_M
))) {
7954 /* Thumb-1 cores may need to treat bl and blx as a pair of
7955 16-bit instructions to get correct prefetch abort behavior. */
7957 if ((insn
& (1 << 12)) == 0) {
7959 /* Second half of blx. */
7960 offset
= ((insn
& 0x7ff) << 1);
7961 tmp
= load_reg(s
, 14);
7962 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7963 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7965 tmp2
= tcg_temp_new_i32();
7966 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7967 store_reg(s
, 14, tmp2
);
7971 if (insn
& (1 << 11)) {
7972 /* Second half of bl. */
7973 offset
= ((insn
& 0x7ff) << 1) | 1;
7974 tmp
= load_reg(s
, 14);
7975 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7977 tmp2
= tcg_temp_new_i32();
7978 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7979 store_reg(s
, 14, tmp2
);
7983 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7984 /* Instruction spans a page boundary. Implement it as two
7985 16-bit instructions in case the second half causes an
7987 offset
= ((int32_t)insn
<< 21) >> 9;
7988 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7991 /* Fall through to 32-bit decode. */
7994 insn
= lduw_code(s
->pc
);
7996 insn
|= (uint32_t)insn_hw1
<< 16;
7998 if ((insn
& 0xf800e800) != 0xf000e800) {
8002 rn
= (insn
>> 16) & 0xf;
8003 rs
= (insn
>> 12) & 0xf;
8004 rd
= (insn
>> 8) & 0xf;
8006 switch ((insn
>> 25) & 0xf) {
8007 case 0: case 1: case 2: case 3:
8008 /* 16-bit instructions. Should never happen. */
8011 if (insn
& (1 << 22)) {
8012 /* Other load/store, table branch. */
8013 if (insn
& 0x01200000) {
8014 /* Load/store doubleword. */
8016 addr
= tcg_temp_new_i32();
8017 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8019 addr
= load_reg(s
, rn
);
8021 offset
= (insn
& 0xff) * 4;
8022 if ((insn
& (1 << 23)) == 0)
8024 if (insn
& (1 << 24)) {
8025 tcg_gen_addi_i32(addr
, addr
, offset
);
8028 if (insn
& (1 << 20)) {
8030 tmp
= gen_ld32(addr
, IS_USER(s
));
8031 store_reg(s
, rs
, tmp
);
8032 tcg_gen_addi_i32(addr
, addr
, 4);
8033 tmp
= gen_ld32(addr
, IS_USER(s
));
8034 store_reg(s
, rd
, tmp
);
8037 tmp
= load_reg(s
, rs
);
8038 gen_st32(tmp
, addr
, IS_USER(s
));
8039 tcg_gen_addi_i32(addr
, addr
, 4);
8040 tmp
= load_reg(s
, rd
);
8041 gen_st32(tmp
, addr
, IS_USER(s
));
8043 if (insn
& (1 << 21)) {
8044 /* Base writeback. */
8047 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8048 store_reg(s
, rn
, addr
);
8050 tcg_temp_free_i32(addr
);
8052 } else if ((insn
& (1 << 23)) == 0) {
8053 /* Load/store exclusive word. */
8054 addr
= tcg_temp_local_new();
8055 load_reg_var(s
, addr
, rn
);
8056 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8057 if (insn
& (1 << 20)) {
8058 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8060 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8062 tcg_temp_free(addr
);
8063 } else if ((insn
& (1 << 6)) == 0) {
8066 addr
= tcg_temp_new_i32();
8067 tcg_gen_movi_i32(addr
, s
->pc
);
8069 addr
= load_reg(s
, rn
);
8071 tmp
= load_reg(s
, rm
);
8072 tcg_gen_add_i32(addr
, addr
, tmp
);
8073 if (insn
& (1 << 4)) {
8075 tcg_gen_add_i32(addr
, addr
, tmp
);
8076 tcg_temp_free_i32(tmp
);
8077 tmp
= gen_ld16u(addr
, IS_USER(s
));
8079 tcg_temp_free_i32(tmp
);
8080 tmp
= gen_ld8u(addr
, IS_USER(s
));
8082 tcg_temp_free_i32(addr
);
8083 tcg_gen_shli_i32(tmp
, tmp
, 1);
8084 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8085 store_reg(s
, 15, tmp
);
8087 /* Load/store exclusive byte/halfword/doubleword. */
8089 op
= (insn
>> 4) & 0x3;
8093 addr
= tcg_temp_local_new();
8094 load_reg_var(s
, addr
, rn
);
8095 if (insn
& (1 << 20)) {
8096 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8098 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8100 tcg_temp_free(addr
);
8103 /* Load/store multiple, RFE, SRS. */
8104 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8105 /* Not available in user mode. */
8108 if (insn
& (1 << 20)) {
8110 addr
= load_reg(s
, rn
);
8111 if ((insn
& (1 << 24)) == 0)
8112 tcg_gen_addi_i32(addr
, addr
, -8);
8113 /* Load PC into tmp and CPSR into tmp2. */
8114 tmp
= gen_ld32(addr
, 0);
8115 tcg_gen_addi_i32(addr
, addr
, 4);
8116 tmp2
= gen_ld32(addr
, 0);
8117 if (insn
& (1 << 21)) {
8118 /* Base writeback. */
8119 if (insn
& (1 << 24)) {
8120 tcg_gen_addi_i32(addr
, addr
, 4);
8122 tcg_gen_addi_i32(addr
, addr
, -4);
8124 store_reg(s
, rn
, addr
);
8126 tcg_temp_free_i32(addr
);
8128 gen_rfe(s
, tmp
, tmp2
);
8132 addr
= tcg_temp_new_i32();
8133 tmp
= tcg_const_i32(op
);
8134 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8135 tcg_temp_free_i32(tmp
);
8136 if ((insn
& (1 << 24)) == 0) {
8137 tcg_gen_addi_i32(addr
, addr
, -8);
8139 tmp
= load_reg(s
, 14);
8140 gen_st32(tmp
, addr
, 0);
8141 tcg_gen_addi_i32(addr
, addr
, 4);
8142 tmp
= tcg_temp_new_i32();
8143 gen_helper_cpsr_read(tmp
);
8144 gen_st32(tmp
, addr
, 0);
8145 if (insn
& (1 << 21)) {
8146 if ((insn
& (1 << 24)) == 0) {
8147 tcg_gen_addi_i32(addr
, addr
, -4);
8149 tcg_gen_addi_i32(addr
, addr
, 4);
8151 tmp
= tcg_const_i32(op
);
8152 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8153 tcg_temp_free_i32(tmp
);
8155 tcg_temp_free_i32(addr
);
8159 int i
, loaded_base
= 0;
8161 /* Load/store multiple. */
8162 addr
= load_reg(s
, rn
);
8164 for (i
= 0; i
< 16; i
++) {
8165 if (insn
& (1 << i
))
8168 if (insn
& (1 << 24)) {
8169 tcg_gen_addi_i32(addr
, addr
, -offset
);
8172 TCGV_UNUSED(loaded_var
);
8173 for (i
= 0; i
< 16; i
++) {
8174 if ((insn
& (1 << i
)) == 0)
8176 if (insn
& (1 << 20)) {
8178 tmp
= gen_ld32(addr
, IS_USER(s
));
8181 } else if (i
== rn
) {
8185 store_reg(s
, i
, tmp
);
8189 tmp
= load_reg(s
, i
);
8190 gen_st32(tmp
, addr
, IS_USER(s
));
8192 tcg_gen_addi_i32(addr
, addr
, 4);
8195 store_reg(s
, rn
, loaded_var
);
8197 if (insn
& (1 << 21)) {
8198 /* Base register writeback. */
8199 if (insn
& (1 << 24)) {
8200 tcg_gen_addi_i32(addr
, addr
, -offset
);
8202 /* Fault if writeback register is in register list. */
8203 if (insn
& (1 << rn
))
8205 store_reg(s
, rn
, addr
);
8207 tcg_temp_free_i32(addr
);
8214 op
= (insn
>> 21) & 0xf;
8216 /* Halfword pack. */
8217 tmp
= load_reg(s
, rn
);
8218 tmp2
= load_reg(s
, rm
);
8219 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8220 if (insn
& (1 << 5)) {
8224 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8225 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8226 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8230 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8231 tcg_gen_ext16u_i32(tmp
, tmp
);
8232 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8234 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8235 tcg_temp_free_i32(tmp2
);
8236 store_reg(s
, rd
, tmp
);
8238 /* Data processing register constant shift. */
8240 tmp
= tcg_temp_new_i32();
8241 tcg_gen_movi_i32(tmp
, 0);
8243 tmp
= load_reg(s
, rn
);
8245 tmp2
= load_reg(s
, rm
);
8247 shiftop
= (insn
>> 4) & 3;
8248 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8249 conds
= (insn
& (1 << 20)) != 0;
8250 logic_cc
= (conds
&& thumb2_logic_op(op
));
8251 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8252 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8254 tcg_temp_free_i32(tmp2
);
8256 store_reg(s
, rd
, tmp
);
8258 tcg_temp_free_i32(tmp
);
8262 case 13: /* Misc data processing. */
8263 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8264 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8267 case 0: /* Register controlled shift. */
8268 tmp
= load_reg(s
, rn
);
8269 tmp2
= load_reg(s
, rm
);
8270 if ((insn
& 0x70) != 0)
8272 op
= (insn
>> 21) & 3;
8273 logic_cc
= (insn
& (1 << 20)) != 0;
8274 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8277 store_reg_bx(env
, s
, rd
, tmp
);
8279 case 1: /* Sign/zero extend. */
8280 tmp
= load_reg(s
, rm
);
8281 shift
= (insn
>> 4) & 3;
8282 /* ??? In many cases it's not necessary to do a
8283 rotate, a shift is sufficient. */
8285 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8286 op
= (insn
>> 20) & 7;
8288 case 0: gen_sxth(tmp
); break;
8289 case 1: gen_uxth(tmp
); break;
8290 case 2: gen_sxtb16(tmp
); break;
8291 case 3: gen_uxtb16(tmp
); break;
8292 case 4: gen_sxtb(tmp
); break;
8293 case 5: gen_uxtb(tmp
); break;
8294 default: goto illegal_op
;
8297 tmp2
= load_reg(s
, rn
);
8298 if ((op
>> 1) == 1) {
8299 gen_add16(tmp
, tmp2
);
8301 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8302 tcg_temp_free_i32(tmp2
);
8305 store_reg(s
, rd
, tmp
);
8307 case 2: /* SIMD add/subtract. */
8308 op
= (insn
>> 20) & 7;
8309 shift
= (insn
>> 4) & 7;
8310 if ((op
& 3) == 3 || (shift
& 3) == 3)
8312 tmp
= load_reg(s
, rn
);
8313 tmp2
= load_reg(s
, rm
);
8314 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8315 tcg_temp_free_i32(tmp2
);
8316 store_reg(s
, rd
, tmp
);
8318 case 3: /* Other data processing. */
8319 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8321 /* Saturating add/subtract. */
8322 tmp
= load_reg(s
, rn
);
8323 tmp2
= load_reg(s
, rm
);
8325 gen_helper_double_saturate(tmp
, tmp
);
8327 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8329 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8330 tcg_temp_free_i32(tmp2
);
8332 tmp
= load_reg(s
, rn
);
8334 case 0x0a: /* rbit */
8335 gen_helper_rbit(tmp
, tmp
);
8337 case 0x08: /* rev */
8338 tcg_gen_bswap32_i32(tmp
, tmp
);
8340 case 0x09: /* rev16 */
8343 case 0x0b: /* revsh */
8346 case 0x10: /* sel */
8347 tmp2
= load_reg(s
, rm
);
8348 tmp3
= tcg_temp_new_i32();
8349 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
8350 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8351 tcg_temp_free_i32(tmp3
);
8352 tcg_temp_free_i32(tmp2
);
8354 case 0x18: /* clz */
8355 gen_helper_clz(tmp
, tmp
);
8361 store_reg(s
, rd
, tmp
);
8363 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8364 op
= (insn
>> 4) & 0xf;
8365 tmp
= load_reg(s
, rn
);
8366 tmp2
= load_reg(s
, rm
);
8367 switch ((insn
>> 20) & 7) {
8368 case 0: /* 32 x 32 -> 32 */
8369 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8370 tcg_temp_free_i32(tmp2
);
8372 tmp2
= load_reg(s
, rs
);
8374 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8376 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8377 tcg_temp_free_i32(tmp2
);
8380 case 1: /* 16 x 16 -> 32 */
8381 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8382 tcg_temp_free_i32(tmp2
);
8384 tmp2
= load_reg(s
, rs
);
8385 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8386 tcg_temp_free_i32(tmp2
);
8389 case 2: /* Dual multiply add. */
8390 case 4: /* Dual multiply subtract. */
8392 gen_swap_half(tmp2
);
8393 gen_smul_dual(tmp
, tmp2
);
8394 if (insn
& (1 << 22)) {
8395 /* This subtraction cannot overflow. */
8396 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8398 /* This addition cannot overflow 32 bits;
8399 * however it may overflow considered as a signed
8400 * operation, in which case we must set the Q flag.
8402 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8404 tcg_temp_free_i32(tmp2
);
8407 tmp2
= load_reg(s
, rs
);
8408 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8409 tcg_temp_free_i32(tmp2
);
8412 case 3: /* 32 * 16 -> 32msb */
8414 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8417 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8418 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8419 tmp
= tcg_temp_new_i32();
8420 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8421 tcg_temp_free_i64(tmp64
);
8424 tmp2
= load_reg(s
, rs
);
8425 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8426 tcg_temp_free_i32(tmp2
);
8429 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8430 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8432 tmp
= load_reg(s
, rs
);
8433 if (insn
& (1 << 20)) {
8434 tmp64
= gen_addq_msw(tmp64
, tmp
);
8436 tmp64
= gen_subq_msw(tmp64
, tmp
);
8439 if (insn
& (1 << 4)) {
8440 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8442 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8443 tmp
= tcg_temp_new_i32();
8444 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8445 tcg_temp_free_i64(tmp64
);
8447 case 7: /* Unsigned sum of absolute differences. */
8448 gen_helper_usad8(tmp
, tmp
, tmp2
);
8449 tcg_temp_free_i32(tmp2
);
8451 tmp2
= load_reg(s
, rs
);
8452 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8453 tcg_temp_free_i32(tmp2
);
8457 store_reg(s
, rd
, tmp
);
8459 case 6: case 7: /* 64-bit multiply, Divide. */
8460 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8461 tmp
= load_reg(s
, rn
);
8462 tmp2
= load_reg(s
, rm
);
8463 if ((op
& 0x50) == 0x10) {
8465 if (!arm_feature(env
, ARM_FEATURE_DIV
))
8468 gen_helper_udiv(tmp
, tmp
, tmp2
);
8470 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8471 tcg_temp_free_i32(tmp2
);
8472 store_reg(s
, rd
, tmp
);
8473 } else if ((op
& 0xe) == 0xc) {
8474 /* Dual multiply accumulate long. */
8476 gen_swap_half(tmp2
);
8477 gen_smul_dual(tmp
, tmp2
);
8479 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8481 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8483 tcg_temp_free_i32(tmp2
);
8485 tmp64
= tcg_temp_new_i64();
8486 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8487 tcg_temp_free_i32(tmp
);
8488 gen_addq(s
, tmp64
, rs
, rd
);
8489 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8490 tcg_temp_free_i64(tmp64
);
8493 /* Unsigned 64-bit multiply */
8494 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8498 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8499 tcg_temp_free_i32(tmp2
);
8500 tmp64
= tcg_temp_new_i64();
8501 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8502 tcg_temp_free_i32(tmp
);
8504 /* Signed 64-bit multiply */
8505 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8510 gen_addq_lo(s
, tmp64
, rs
);
8511 gen_addq_lo(s
, tmp64
, rd
);
8512 } else if (op
& 0x40) {
8513 /* 64-bit accumulate. */
8514 gen_addq(s
, tmp64
, rs
, rd
);
8516 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8517 tcg_temp_free_i64(tmp64
);
8522 case 6: case 7: case 14: case 15:
8524 if (((insn
>> 24) & 3) == 3) {
8525 /* Translate into the equivalent ARM encoding. */
8526 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8527 if (disas_neon_data_insn(env
, s
, insn
))
8530 if (insn
& (1 << 28))
8532 if (disas_coproc_insn (env
, s
, insn
))
8536 case 8: case 9: case 10: case 11:
8537 if (insn
& (1 << 15)) {
8538 /* Branches, misc control. */
8539 if (insn
& 0x5000) {
8540 /* Unconditional branch. */
8541 /* signextend(hw1[10:0]) -> offset[:12]. */
8542 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8543 /* hw1[10:0] -> offset[11:1]. */
8544 offset
|= (insn
& 0x7ff) << 1;
8545 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8546 offset[24:22] already have the same value because of the
8547 sign extension above. */
8548 offset
^= ((~insn
) & (1 << 13)) << 10;
8549 offset
^= ((~insn
) & (1 << 11)) << 11;
8551 if (insn
& (1 << 14)) {
8552 /* Branch and link. */
8553 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8557 if (insn
& (1 << 12)) {
8562 offset
&= ~(uint32_t)2;
8563 /* thumb2 bx, no need to check */
8564 gen_bx_im(s
, offset
);
8566 } else if (((insn
>> 23) & 7) == 7) {
8568 if (insn
& (1 << 13))
8571 if (insn
& (1 << 26)) {
8572 /* Secure monitor call (v6Z) */
8573 goto illegal_op
; /* not implemented. */
8575 op
= (insn
>> 20) & 7;
8577 case 0: /* msr cpsr. */
8579 tmp
= load_reg(s
, rn
);
8580 addr
= tcg_const_i32(insn
& 0xff);
8581 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8582 tcg_temp_free_i32(addr
);
8583 tcg_temp_free_i32(tmp
);
8588 case 1: /* msr spsr. */
8591 tmp
= load_reg(s
, rn
);
8593 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8597 case 2: /* cps, nop-hint. */
8598 if (((insn
>> 8) & 7) == 0) {
8599 gen_nop_hint(s
, insn
& 0xff);
8601 /* Implemented as NOP in user mode. */
8606 if (insn
& (1 << 10)) {
8607 if (insn
& (1 << 7))
8609 if (insn
& (1 << 6))
8611 if (insn
& (1 << 5))
8613 if (insn
& (1 << 9))
8614 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8616 if (insn
& (1 << 8)) {
8618 imm
|= (insn
& 0x1f);
8621 gen_set_psr_im(s
, offset
, 0, imm
);
8624 case 3: /* Special control operations. */
8626 op
= (insn
>> 4) & 0xf;
8634 /* These execute as NOPs. */
8641 /* Trivial implementation equivalent to bx. */
8642 tmp
= load_reg(s
, rn
);
8645 case 5: /* Exception return. */
8649 if (rn
!= 14 || rd
!= 15) {
8652 tmp
= load_reg(s
, rn
);
8653 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8654 gen_exception_return(s
, tmp
);
8656 case 6: /* mrs cpsr. */
8657 tmp
= tcg_temp_new_i32();
8659 addr
= tcg_const_i32(insn
& 0xff);
8660 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8661 tcg_temp_free_i32(addr
);
8663 gen_helper_cpsr_read(tmp
);
8665 store_reg(s
, rd
, tmp
);
8667 case 7: /* mrs spsr. */
8668 /* Not accessible in user mode. */
8669 if (IS_USER(s
) || IS_M(env
))
8671 tmp
= load_cpu_field(spsr
);
8672 store_reg(s
, rd
, tmp
);
8677 /* Conditional branch. */
8678 op
= (insn
>> 22) & 0xf;
8679 /* Generate a conditional jump to next instruction. */
8680 s
->condlabel
= gen_new_label();
8681 gen_test_cc(op
^ 1, s
->condlabel
);
8684 /* offset[11:1] = insn[10:0] */
8685 offset
= (insn
& 0x7ff) << 1;
8686 /* offset[17:12] = insn[21:16]. */
8687 offset
|= (insn
& 0x003f0000) >> 4;
8688 /* offset[31:20] = insn[26]. */
8689 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8690 /* offset[18] = insn[13]. */
8691 offset
|= (insn
& (1 << 13)) << 5;
8692 /* offset[19] = insn[11]. */
8693 offset
|= (insn
& (1 << 11)) << 8;
8695 /* jump to the offset */
8696 gen_jmp(s
, s
->pc
+ offset
);
8699 /* Data processing immediate. */
8700 if (insn
& (1 << 25)) {
8701 if (insn
& (1 << 24)) {
8702 if (insn
& (1 << 20))
8704 /* Bitfield/Saturate. */
8705 op
= (insn
>> 21) & 7;
8707 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8709 tmp
= tcg_temp_new_i32();
8710 tcg_gen_movi_i32(tmp
, 0);
8712 tmp
= load_reg(s
, rn
);
8715 case 2: /* Signed bitfield extract. */
8717 if (shift
+ imm
> 32)
8720 gen_sbfx(tmp
, shift
, imm
);
8722 case 6: /* Unsigned bitfield extract. */
8724 if (shift
+ imm
> 32)
8727 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8729 case 3: /* Bitfield insert/clear. */
8732 imm
= imm
+ 1 - shift
;
8734 tmp2
= load_reg(s
, rd
);
8735 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8736 tcg_temp_free_i32(tmp2
);
8741 default: /* Saturate. */
8744 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8746 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8748 tmp2
= tcg_const_i32(imm
);
8751 if ((op
& 1) && shift
== 0)
8752 gen_helper_usat16(tmp
, tmp
, tmp2
);
8754 gen_helper_usat(tmp
, tmp
, tmp2
);
8757 if ((op
& 1) && shift
== 0)
8758 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8760 gen_helper_ssat(tmp
, tmp
, tmp2
);
8762 tcg_temp_free_i32(tmp2
);
8765 store_reg(s
, rd
, tmp
);
8767 imm
= ((insn
& 0x04000000) >> 15)
8768 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8769 if (insn
& (1 << 22)) {
8770 /* 16-bit immediate. */
8771 imm
|= (insn
>> 4) & 0xf000;
8772 if (insn
& (1 << 23)) {
8774 tmp
= load_reg(s
, rd
);
8775 tcg_gen_ext16u_i32(tmp
, tmp
);
8776 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8779 tmp
= tcg_temp_new_i32();
8780 tcg_gen_movi_i32(tmp
, imm
);
8783 /* Add/sub 12-bit immediate. */
8785 offset
= s
->pc
& ~(uint32_t)3;
8786 if (insn
& (1 << 23))
8790 tmp
= tcg_temp_new_i32();
8791 tcg_gen_movi_i32(tmp
, offset
);
8793 tmp
= load_reg(s
, rn
);
8794 if (insn
& (1 << 23))
8795 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8797 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8800 store_reg(s
, rd
, tmp
);
8803 int shifter_out
= 0;
8804 /* modified 12-bit immediate. */
8805 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8806 imm
= (insn
& 0xff);
8809 /* Nothing to do. */
8811 case 1: /* 00XY00XY */
8814 case 2: /* XY00XY00 */
8818 case 3: /* XYXYXYXY */
8822 default: /* Rotated constant. */
8823 shift
= (shift
<< 1) | (imm
>> 7);
8825 imm
= imm
<< (32 - shift
);
8829 tmp2
= tcg_temp_new_i32();
8830 tcg_gen_movi_i32(tmp2
, imm
);
8831 rn
= (insn
>> 16) & 0xf;
8833 tmp
= tcg_temp_new_i32();
8834 tcg_gen_movi_i32(tmp
, 0);
8836 tmp
= load_reg(s
, rn
);
8838 op
= (insn
>> 21) & 0xf;
8839 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8840 shifter_out
, tmp
, tmp2
))
8842 tcg_temp_free_i32(tmp2
);
8843 rd
= (insn
>> 8) & 0xf;
8845 store_reg(s
, rd
, tmp
);
8847 tcg_temp_free_i32(tmp
);
8852 case 12: /* Load/store single data item. */
8857 if ((insn
& 0x01100000) == 0x01000000) {
8858 if (disas_neon_ls_insn(env
, s
, insn
))
8862 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8864 if (!(insn
& (1 << 20))) {
8868 /* Byte or halfword load space with dest == r15 : memory hints.
8869 * Catch them early so we don't emit pointless addressing code.
8870 * This space is a mix of:
8871 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8872 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8874 * unallocated hints, which must be treated as NOPs
8875 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8876 * which is easiest for the decoding logic
8877 * Some space which must UNDEF
8879 int op1
= (insn
>> 23) & 3;
8880 int op2
= (insn
>> 6) & 0x3f;
8885 /* UNPREDICTABLE or unallocated hint */
8889 return 0; /* PLD* or unallocated hint */
8891 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8892 return 0; /* PLD* or unallocated hint */
8894 /* UNDEF space, or an UNPREDICTABLE */
8900 addr
= tcg_temp_new_i32();
8902 /* s->pc has already been incremented by 4. */
8903 imm
= s
->pc
& 0xfffffffc;
8904 if (insn
& (1 << 23))
8905 imm
+= insn
& 0xfff;
8907 imm
-= insn
& 0xfff;
8908 tcg_gen_movi_i32(addr
, imm
);
8910 addr
= load_reg(s
, rn
);
8911 if (insn
& (1 << 23)) {
8912 /* Positive offset. */
8914 tcg_gen_addi_i32(addr
, addr
, imm
);
8917 switch ((insn
>> 8) & 0xf) {
8918 case 0x0: /* Shifted Register. */
8919 shift
= (insn
>> 4) & 0xf;
8921 tcg_temp_free_i32(addr
);
8924 tmp
= load_reg(s
, rm
);
8926 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8927 tcg_gen_add_i32(addr
, addr
, tmp
);
8928 tcg_temp_free_i32(tmp
);
8930 case 0xc: /* Negative offset. */
8931 tcg_gen_addi_i32(addr
, addr
, -imm
);
8933 case 0xe: /* User privilege. */
8934 tcg_gen_addi_i32(addr
, addr
, imm
);
8937 case 0x9: /* Post-decrement. */
8940 case 0xb: /* Post-increment. */
8944 case 0xd: /* Pre-decrement. */
8947 case 0xf: /* Pre-increment. */
8948 tcg_gen_addi_i32(addr
, addr
, imm
);
8952 tcg_temp_free_i32(addr
);
8957 if (insn
& (1 << 20)) {
8960 case 0: tmp
= gen_ld8u(addr
, user
); break;
8961 case 4: tmp
= gen_ld8s(addr
, user
); break;
8962 case 1: tmp
= gen_ld16u(addr
, user
); break;
8963 case 5: tmp
= gen_ld16s(addr
, user
); break;
8964 case 2: tmp
= gen_ld32(addr
, user
); break;
8966 tcg_temp_free_i32(addr
);
8972 store_reg(s
, rs
, tmp
);
8976 tmp
= load_reg(s
, rs
);
8978 case 0: gen_st8(tmp
, addr
, user
); break;
8979 case 1: gen_st16(tmp
, addr
, user
); break;
8980 case 2: gen_st32(tmp
, addr
, user
); break;
8982 tcg_temp_free_i32(addr
);
8987 tcg_gen_addi_i32(addr
, addr
, imm
);
8989 store_reg(s
, rn
, addr
);
8991 tcg_temp_free_i32(addr
);
9003 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
9005 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9012 if (s
->condexec_mask
) {
9013 cond
= s
->condexec_cond
;
9014 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9015 s
->condlabel
= gen_new_label();
9016 gen_test_cc(cond
^ 1, s
->condlabel
);
9021 insn
= lduw_code(s
->pc
);
9024 switch (insn
>> 12) {
9028 op
= (insn
>> 11) & 3;
9031 rn
= (insn
>> 3) & 7;
9032 tmp
= load_reg(s
, rn
);
9033 if (insn
& (1 << 10)) {
9035 tmp2
= tcg_temp_new_i32();
9036 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9039 rm
= (insn
>> 6) & 7;
9040 tmp2
= load_reg(s
, rm
);
9042 if (insn
& (1 << 9)) {
9043 if (s
->condexec_mask
)
9044 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9046 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9048 if (s
->condexec_mask
)
9049 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9051 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9053 tcg_temp_free_i32(tmp2
);
9054 store_reg(s
, rd
, tmp
);
9056 /* shift immediate */
9057 rm
= (insn
>> 3) & 7;
9058 shift
= (insn
>> 6) & 0x1f;
9059 tmp
= load_reg(s
, rm
);
9060 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9061 if (!s
->condexec_mask
)
9063 store_reg(s
, rd
, tmp
);
9067 /* arithmetic large immediate */
9068 op
= (insn
>> 11) & 3;
9069 rd
= (insn
>> 8) & 0x7;
9070 if (op
== 0) { /* mov */
9071 tmp
= tcg_temp_new_i32();
9072 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9073 if (!s
->condexec_mask
)
9075 store_reg(s
, rd
, tmp
);
9077 tmp
= load_reg(s
, rd
);
9078 tmp2
= tcg_temp_new_i32();
9079 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9082 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9083 tcg_temp_free_i32(tmp
);
9084 tcg_temp_free_i32(tmp2
);
9087 if (s
->condexec_mask
)
9088 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9090 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9091 tcg_temp_free_i32(tmp2
);
9092 store_reg(s
, rd
, tmp
);
9095 if (s
->condexec_mask
)
9096 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9098 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9099 tcg_temp_free_i32(tmp2
);
9100 store_reg(s
, rd
, tmp
);
9106 if (insn
& (1 << 11)) {
9107 rd
= (insn
>> 8) & 7;
9108 /* load pc-relative. Bit 1 of PC is ignored. */
9109 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9110 val
&= ~(uint32_t)2;
9111 addr
= tcg_temp_new_i32();
9112 tcg_gen_movi_i32(addr
, val
);
9113 tmp
= gen_ld32(addr
, IS_USER(s
));
9114 tcg_temp_free_i32(addr
);
9115 store_reg(s
, rd
, tmp
);
9118 if (insn
& (1 << 10)) {
9119 /* data processing extended or blx */
9120 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9121 rm
= (insn
>> 3) & 0xf;
9122 op
= (insn
>> 8) & 3;
9125 tmp
= load_reg(s
, rd
);
9126 tmp2
= load_reg(s
, rm
);
9127 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9128 tcg_temp_free_i32(tmp2
);
9129 store_reg(s
, rd
, tmp
);
9132 tmp
= load_reg(s
, rd
);
9133 tmp2
= load_reg(s
, rm
);
9134 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9135 tcg_temp_free_i32(tmp2
);
9136 tcg_temp_free_i32(tmp
);
9138 case 2: /* mov/cpy */
9139 tmp
= load_reg(s
, rm
);
9140 store_reg(s
, rd
, tmp
);
9142 case 3:/* branch [and link] exchange thumb register */
9143 tmp
= load_reg(s
, rm
);
9144 if (insn
& (1 << 7)) {
9146 val
= (uint32_t)s
->pc
| 1;
9147 tmp2
= tcg_temp_new_i32();
9148 tcg_gen_movi_i32(tmp2
, val
);
9149 store_reg(s
, 14, tmp2
);
9151 /* already thumb, no need to check */
9158 /* data processing register */
9160 rm
= (insn
>> 3) & 7;
9161 op
= (insn
>> 6) & 0xf;
9162 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9163 /* the shift/rotate ops want the operands backwards */
9172 if (op
== 9) { /* neg */
9173 tmp
= tcg_temp_new_i32();
9174 tcg_gen_movi_i32(tmp
, 0);
9175 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9176 tmp
= load_reg(s
, rd
);
9181 tmp2
= load_reg(s
, rm
);
9184 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9185 if (!s
->condexec_mask
)
9189 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9190 if (!s
->condexec_mask
)
9194 if (s
->condexec_mask
) {
9195 gen_helper_shl(tmp2
, tmp2
, tmp
);
9197 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9202 if (s
->condexec_mask
) {
9203 gen_helper_shr(tmp2
, tmp2
, tmp
);
9205 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9210 if (s
->condexec_mask
) {
9211 gen_helper_sar(tmp2
, tmp2
, tmp
);
9213 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9218 if (s
->condexec_mask
)
9221 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9224 if (s
->condexec_mask
)
9225 gen_sub_carry(tmp
, tmp
, tmp2
);
9227 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9230 if (s
->condexec_mask
) {
9231 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9232 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9234 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9239 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9244 if (s
->condexec_mask
)
9245 tcg_gen_neg_i32(tmp
, tmp2
);
9247 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9250 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9254 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9258 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9259 if (!s
->condexec_mask
)
9263 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9264 if (!s
->condexec_mask
)
9268 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9269 if (!s
->condexec_mask
)
9273 tcg_gen_not_i32(tmp2
, tmp2
);
9274 if (!s
->condexec_mask
)
9282 store_reg(s
, rm
, tmp2
);
9284 tcg_temp_free_i32(tmp
);
9286 store_reg(s
, rd
, tmp
);
9287 tcg_temp_free_i32(tmp2
);
9290 tcg_temp_free_i32(tmp
);
9291 tcg_temp_free_i32(tmp2
);
9296 /* load/store register offset. */
9298 rn
= (insn
>> 3) & 7;
9299 rm
= (insn
>> 6) & 7;
9300 op
= (insn
>> 9) & 7;
9301 addr
= load_reg(s
, rn
);
9302 tmp
= load_reg(s
, rm
);
9303 tcg_gen_add_i32(addr
, addr
, tmp
);
9304 tcg_temp_free_i32(tmp
);
9306 if (op
< 3) /* store */
9307 tmp
= load_reg(s
, rd
);
9311 gen_st32(tmp
, addr
, IS_USER(s
));
9314 gen_st16(tmp
, addr
, IS_USER(s
));
9317 gen_st8(tmp
, addr
, IS_USER(s
));
9320 tmp
= gen_ld8s(addr
, IS_USER(s
));
9323 tmp
= gen_ld32(addr
, IS_USER(s
));
9326 tmp
= gen_ld16u(addr
, IS_USER(s
));
9329 tmp
= gen_ld8u(addr
, IS_USER(s
));
9332 tmp
= gen_ld16s(addr
, IS_USER(s
));
9335 if (op
>= 3) /* load */
9336 store_reg(s
, rd
, tmp
);
9337 tcg_temp_free_i32(addr
);
9341 /* load/store word immediate offset */
9343 rn
= (insn
>> 3) & 7;
9344 addr
= load_reg(s
, rn
);
9345 val
= (insn
>> 4) & 0x7c;
9346 tcg_gen_addi_i32(addr
, addr
, val
);
9348 if (insn
& (1 << 11)) {
9350 tmp
= gen_ld32(addr
, IS_USER(s
));
9351 store_reg(s
, rd
, tmp
);
9354 tmp
= load_reg(s
, rd
);
9355 gen_st32(tmp
, addr
, IS_USER(s
));
9357 tcg_temp_free_i32(addr
);
9361 /* load/store byte immediate offset */
9363 rn
= (insn
>> 3) & 7;
9364 addr
= load_reg(s
, rn
);
9365 val
= (insn
>> 6) & 0x1f;
9366 tcg_gen_addi_i32(addr
, addr
, val
);
9368 if (insn
& (1 << 11)) {
9370 tmp
= gen_ld8u(addr
, IS_USER(s
));
9371 store_reg(s
, rd
, tmp
);
9374 tmp
= load_reg(s
, rd
);
9375 gen_st8(tmp
, addr
, IS_USER(s
));
9377 tcg_temp_free_i32(addr
);
9381 /* load/store halfword immediate offset */
9383 rn
= (insn
>> 3) & 7;
9384 addr
= load_reg(s
, rn
);
9385 val
= (insn
>> 5) & 0x3e;
9386 tcg_gen_addi_i32(addr
, addr
, val
);
9388 if (insn
& (1 << 11)) {
9390 tmp
= gen_ld16u(addr
, IS_USER(s
));
9391 store_reg(s
, rd
, tmp
);
9394 tmp
= load_reg(s
, rd
);
9395 gen_st16(tmp
, addr
, IS_USER(s
));
9397 tcg_temp_free_i32(addr
);
9401 /* load/store from stack */
9402 rd
= (insn
>> 8) & 7;
9403 addr
= load_reg(s
, 13);
9404 val
= (insn
& 0xff) * 4;
9405 tcg_gen_addi_i32(addr
, addr
, val
);
9407 if (insn
& (1 << 11)) {
9409 tmp
= gen_ld32(addr
, IS_USER(s
));
9410 store_reg(s
, rd
, tmp
);
9413 tmp
= load_reg(s
, rd
);
9414 gen_st32(tmp
, addr
, IS_USER(s
));
9416 tcg_temp_free_i32(addr
);
9420 /* add to high reg */
9421 rd
= (insn
>> 8) & 7;
9422 if (insn
& (1 << 11)) {
9424 tmp
= load_reg(s
, 13);
9426 /* PC. bit 1 is ignored. */
9427 tmp
= tcg_temp_new_i32();
9428 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9430 val
= (insn
& 0xff) * 4;
9431 tcg_gen_addi_i32(tmp
, tmp
, val
);
9432 store_reg(s
, rd
, tmp
);
9437 op
= (insn
>> 8) & 0xf;
9440 /* adjust stack pointer */
9441 tmp
= load_reg(s
, 13);
9442 val
= (insn
& 0x7f) * 4;
9443 if (insn
& (1 << 7))
9444 val
= -(int32_t)val
;
9445 tcg_gen_addi_i32(tmp
, tmp
, val
);
9446 store_reg(s
, 13, tmp
);
9449 case 2: /* sign/zero extend. */
9452 rm
= (insn
>> 3) & 7;
9453 tmp
= load_reg(s
, rm
);
9454 switch ((insn
>> 6) & 3) {
9455 case 0: gen_sxth(tmp
); break;
9456 case 1: gen_sxtb(tmp
); break;
9457 case 2: gen_uxth(tmp
); break;
9458 case 3: gen_uxtb(tmp
); break;
9460 store_reg(s
, rd
, tmp
);
9462 case 4: case 5: case 0xc: case 0xd:
9464 addr
= load_reg(s
, 13);
9465 if (insn
& (1 << 8))
9469 for (i
= 0; i
< 8; i
++) {
9470 if (insn
& (1 << i
))
9473 if ((insn
& (1 << 11)) == 0) {
9474 tcg_gen_addi_i32(addr
, addr
, -offset
);
9476 for (i
= 0; i
< 8; i
++) {
9477 if (insn
& (1 << i
)) {
9478 if (insn
& (1 << 11)) {
9480 tmp
= gen_ld32(addr
, IS_USER(s
));
9481 store_reg(s
, i
, tmp
);
9484 tmp
= load_reg(s
, i
);
9485 gen_st32(tmp
, addr
, IS_USER(s
));
9487 /* advance to the next address. */
9488 tcg_gen_addi_i32(addr
, addr
, 4);
9492 if (insn
& (1 << 8)) {
9493 if (insn
& (1 << 11)) {
9495 tmp
= gen_ld32(addr
, IS_USER(s
));
9496 /* don't set the pc until the rest of the instruction
9500 tmp
= load_reg(s
, 14);
9501 gen_st32(tmp
, addr
, IS_USER(s
));
9503 tcg_gen_addi_i32(addr
, addr
, 4);
9505 if ((insn
& (1 << 11)) == 0) {
9506 tcg_gen_addi_i32(addr
, addr
, -offset
);
9508 /* write back the new stack pointer */
9509 store_reg(s
, 13, addr
);
9510 /* set the new PC value */
9511 if ((insn
& 0x0900) == 0x0900) {
9512 store_reg_from_load(env
, s
, 15, tmp
);
9516 case 1: case 3: case 9: case 11: /* czb */
9518 tmp
= load_reg(s
, rm
);
9519 s
->condlabel
= gen_new_label();
9521 if (insn
& (1 << 11))
9522 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9524 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9525 tcg_temp_free_i32(tmp
);
9526 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9527 val
= (uint32_t)s
->pc
+ 2;
9532 case 15: /* IT, nop-hint. */
9533 if ((insn
& 0xf) == 0) {
9534 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9538 s
->condexec_cond
= (insn
>> 4) & 0xe;
9539 s
->condexec_mask
= insn
& 0x1f;
9540 /* No actual code generated for this insn, just setup state. */
9543 case 0xe: /* bkpt */
9545 gen_exception_insn(s
, 2, EXCP_BKPT
);
9550 rn
= (insn
>> 3) & 0x7;
9552 tmp
= load_reg(s
, rn
);
9553 switch ((insn
>> 6) & 3) {
9554 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9555 case 1: gen_rev16(tmp
); break;
9556 case 3: gen_revsh(tmp
); break;
9557 default: goto illegal_op
;
9559 store_reg(s
, rd
, tmp
);
9567 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9570 addr
= tcg_const_i32(16);
9571 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9572 tcg_temp_free_i32(addr
);
9576 addr
= tcg_const_i32(17);
9577 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9578 tcg_temp_free_i32(addr
);
9580 tcg_temp_free_i32(tmp
);
9583 if (insn
& (1 << 4))
9584 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9587 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9598 /* load/store multiple */
9600 TCGV_UNUSED(loaded_var
);
9601 rn
= (insn
>> 8) & 0x7;
9602 addr
= load_reg(s
, rn
);
9603 for (i
= 0; i
< 8; i
++) {
9604 if (insn
& (1 << i
)) {
9605 if (insn
& (1 << 11)) {
9607 tmp
= gen_ld32(addr
, IS_USER(s
));
9611 store_reg(s
, i
, tmp
);
9615 tmp
= load_reg(s
, i
);
9616 gen_st32(tmp
, addr
, IS_USER(s
));
9618 /* advance to the next address */
9619 tcg_gen_addi_i32(addr
, addr
, 4);
9622 if ((insn
& (1 << rn
)) == 0) {
9623 /* base reg not in list: base register writeback */
9624 store_reg(s
, rn
, addr
);
9626 /* base reg in list: if load, complete it now */
9627 if (insn
& (1 << 11)) {
9628 store_reg(s
, rn
, loaded_var
);
9630 tcg_temp_free_i32(addr
);
9635 /* conditional branch or swi */
9636 cond
= (insn
>> 8) & 0xf;
9642 gen_set_pc_im(s
->pc
);
9643 s
->is_jmp
= DISAS_SWI
;
9646 /* generate a conditional jump to next instruction */
9647 s
->condlabel
= gen_new_label();
9648 gen_test_cc(cond
^ 1, s
->condlabel
);
9651 /* jump to the offset */
9652 val
= (uint32_t)s
->pc
+ 2;
9653 offset
= ((int32_t)insn
<< 24) >> 24;
9659 if (insn
& (1 << 11)) {
9660 if (disas_thumb2_insn(env
, s
, insn
))
9664 /* unconditional branch */
9665 val
= (uint32_t)s
->pc
;
9666 offset
= ((int32_t)insn
<< 21) >> 21;
9667 val
+= (offset
<< 1) + 2;
9672 if (disas_thumb2_insn(env
, s
, insn
))
9678 gen_exception_insn(s
, 4, EXCP_UDEF
);
9682 gen_exception_insn(s
, 2, EXCP_UDEF
);
9685 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9686 basic block 'tb'. If search_pc is TRUE, also generate PC
9687 information for each intermediate instruction. */
9688 static inline void gen_intermediate_code_internal(CPUState
*env
,
9689 TranslationBlock
*tb
,
9692 DisasContext dc1
, *dc
= &dc1
;
9694 uint16_t *gen_opc_end
;
9696 target_ulong pc_start
;
9697 uint32_t next_page_start
;
9701 /* generate intermediate code */
9706 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9708 dc
->is_jmp
= DISAS_NEXT
;
9710 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9712 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9713 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9714 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9715 #if !defined(CONFIG_USER_ONLY)
9716 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9718 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9719 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9720 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9721 cpu_F0s
= tcg_temp_new_i32();
9722 cpu_F1s
= tcg_temp_new_i32();
9723 cpu_F0d
= tcg_temp_new_i64();
9724 cpu_F1d
= tcg_temp_new_i64();
9727 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9728 cpu_M0
= tcg_temp_new_i64();
9729 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9732 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9734 max_insns
= CF_COUNT_MASK
;
9738 tcg_clear_temp_count();
9740 /* A note on handling of the condexec (IT) bits:
9742 * We want to avoid the overhead of having to write the updated condexec
9743 * bits back to the CPUState for every instruction in an IT block. So:
9744 * (1) if the condexec bits are not already zero then we write
9745 * zero back into the CPUState now. This avoids complications trying
9746 * to do it at the end of the block. (For example if we don't do this
9747 * it's hard to identify whether we can safely skip writing condexec
9748 * at the end of the TB, which we definitely want to do for the case
9749 * where a TB doesn't do anything with the IT state at all.)
9750 * (2) if we are going to leave the TB then we call gen_set_condexec()
9751 * which will write the correct value into CPUState if zero is wrong.
9752 * This is done both for leaving the TB at the end, and for leaving
9753 * it because of an exception we know will happen, which is done in
9754 * gen_exception_insn(). The latter is necessary because we need to
9755 * leave the TB with the PC/IT state just prior to execution of the
9756 * instruction which caused the exception.
9757 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9758 * then the CPUState will be wrong and we need to reset it.
9759 * This is handled in the same way as restoration of the
9760 * PC in these situations: we will be called again with search_pc=1
9761 * and generate a mapping of the condexec bits for each PC in
9762 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9763 * this to restore the condexec bits.
9765 * Note that there are no instructions which can read the condexec
9766 * bits, and none which can write non-static values to them, so
9767 * we don't need to care about whether CPUState is correct in the
9771 /* Reset the conditional execution bits immediately. This avoids
9772 complications trying to do it at the end of the block. */
9773 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9775 TCGv tmp
= tcg_temp_new_i32();
9776 tcg_gen_movi_i32(tmp
, 0);
9777 store_cpu_field(tmp
, condexec_bits
);
9780 #ifdef CONFIG_USER_ONLY
9781 /* Intercept jump to the magic kernel page. */
9782 if (dc
->pc
>= 0xffff0000) {
9783 /* We always get here via a jump, so know we are not in a
9784 conditional execution block. */
9785 gen_exception(EXCP_KERNEL_TRAP
);
9786 dc
->is_jmp
= DISAS_UPDATE
;
9790 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9791 /* We always get here via a jump, so know we are not in a
9792 conditional execution block. */
9793 gen_exception(EXCP_EXCEPTION_EXIT
);
9794 dc
->is_jmp
= DISAS_UPDATE
;
9799 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9800 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9801 if (bp
->pc
== dc
->pc
) {
9802 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9803 /* Advance PC so that clearing the breakpoint will
9804 invalidate this TB. */
9806 goto done_generating
;
9812 j
= gen_opc_ptr
- gen_opc_buf
;
9816 gen_opc_instr_start
[lj
++] = 0;
9818 gen_opc_pc
[lj
] = dc
->pc
;
9819 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9820 gen_opc_instr_start
[lj
] = 1;
9821 gen_opc_icount
[lj
] = num_insns
;
9824 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9827 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9828 tcg_gen_debug_insn_start(dc
->pc
);
9832 disas_thumb_insn(env
, dc
);
9833 if (dc
->condexec_mask
) {
9834 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9835 | ((dc
->condexec_mask
>> 4) & 1);
9836 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9837 if (dc
->condexec_mask
== 0) {
9838 dc
->condexec_cond
= 0;
9842 disas_arm_insn(env
, dc
);
9845 if (dc
->condjmp
&& !dc
->is_jmp
) {
9846 gen_set_label(dc
->condlabel
);
9850 if (tcg_check_temp_count()) {
9851 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9854 /* Translation stops when a conditional branch is encountered.
9855 * Otherwise the subsequent code could get translated several times.
9856 * Also stop translation when a page boundary is reached. This
9857 * ensures prefetch aborts occur at the right place. */
9859 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9860 !env
->singlestep_enabled
&&
9862 dc
->pc
< next_page_start
&&
9863 num_insns
< max_insns
);
9865 if (tb
->cflags
& CF_LAST_IO
) {
9867 /* FIXME: This can theoretically happen with self-modifying
9869 cpu_abort(env
, "IO on conditional branch instruction");
9874 /* At this stage dc->condjmp will only be set when the skipped
9875 instruction was a conditional branch or trap, and the PC has
9876 already been written. */
9877 if (unlikely(env
->singlestep_enabled
)) {
9878 /* Make sure the pc is updated, and raise a debug exception. */
9880 gen_set_condexec(dc
);
9881 if (dc
->is_jmp
== DISAS_SWI
) {
9882 gen_exception(EXCP_SWI
);
9884 gen_exception(EXCP_DEBUG
);
9886 gen_set_label(dc
->condlabel
);
9888 if (dc
->condjmp
|| !dc
->is_jmp
) {
9889 gen_set_pc_im(dc
->pc
);
9892 gen_set_condexec(dc
);
9893 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9894 gen_exception(EXCP_SWI
);
9896 /* FIXME: Single stepping a WFI insn will not halt
9898 gen_exception(EXCP_DEBUG
);
9901 /* While branches must always occur at the end of an IT block,
9902 there are a few other things that can cause us to terminate
9903 the TB in the middel of an IT block:
9904 - Exception generating instructions (bkpt, swi, undefined).
9906 - Hardware watchpoints.
9907 Hardware breakpoints have already been handled and skip this code.
9909 gen_set_condexec(dc
);
9910 switch(dc
->is_jmp
) {
9912 gen_goto_tb(dc
, 1, dc
->pc
);
9917 /* indicate that the hash table must be used to find the next TB */
9921 /* nothing more to generate */
9927 gen_exception(EXCP_SWI
);
9931 gen_set_label(dc
->condlabel
);
9932 gen_set_condexec(dc
);
9933 gen_goto_tb(dc
, 1, dc
->pc
);
9939 gen_icount_end(tb
, num_insns
);
9940 *gen_opc_ptr
= INDEX_op_end
;
9943 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9944 qemu_log("----------------\n");
9945 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9946 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9951 j
= gen_opc_ptr
- gen_opc_buf
;
9954 gen_opc_instr_start
[lj
++] = 0;
9956 tb
->size
= dc
->pc
- pc_start
;
9957 tb
->icount
= num_insns
;
9961 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
9963 gen_intermediate_code_internal(env
, tb
, 0);
9966 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
9968 gen_intermediate_code_internal(env
, tb
, 1);
9971 static const char *cpu_mode_names
[16] = {
9972 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9973 "???", "???", "???", "und", "???", "???", "???", "sys"
9976 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
9986 /* ??? This assumes float64 and double have the same layout.
9987 Oh well, it's only debug dumps. */
9996 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
9998 cpu_fprintf(f
, "\n");
10000 cpu_fprintf(f
, " ");
10002 psr
= cpsr_read(env
);
10003 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10005 psr
& (1 << 31) ? 'N' : '-',
10006 psr
& (1 << 30) ? 'Z' : '-',
10007 psr
& (1 << 29) ? 'C' : '-',
10008 psr
& (1 << 28) ? 'V' : '-',
10009 psr
& CPSR_T
? 'T' : 'A',
10010 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10013 for (i
= 0; i
< 16; i
++) {
10014 d
.d
= env
->vfp
.regs
[i
];
10018 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10019 i
* 2, (int)s0
.i
, s0
.s
,
10020 i
* 2 + 1, (int)s1
.i
, s1
.s
,
10021 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
10024 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10028 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
10030 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10031 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];