]>
git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
64 #if defined(CONFIG_USER_ONLY)
67 #define IS_USER(s) (s->user)
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
75 static TCGv_ptr cpu_env
;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
78 static TCGv_i32 cpu_R
[16];
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s
, cpu_F1s
;
83 static TCGv_i64 cpu_F0d
, cpu_F1d
;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 static const char *regnames
[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
92 /* initialize TCG globals. */
93 void arm_translate_init(void)
97 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
99 cpu_T
[0] = tcg_global_reg_new_i32(TCG_AREG1
, "T0");
100 cpu_T
[1] = tcg_global_reg_new_i32(TCG_AREG2
, "T1");
102 for (i
= 0; i
< 16; i
++) {
103 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
104 offsetof(CPUState
, regs
[i
]),
112 static int num_temps
;
114 /* Allocate a temporary variable. */
115 static TCGv_i32
new_tmp(void)
118 return tcg_temp_new_i32();
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp
)
128 static inline TCGv
load_cpu_offset(int offset
)
130 TCGv tmp
= new_tmp();
131 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
135 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
137 static inline void store_cpu_offset(TCGv var
, int offset
)
139 tcg_gen_st_i32(var
, cpu_env
, offset
);
143 #define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
146 /* Set a variable to the value of a CPU register. */
147 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
151 /* normaly, since we updated PC, we need only to add one insn */
153 addr
= (long)s
->pc
+ 2;
155 addr
= (long)s
->pc
+ 4;
156 tcg_gen_movi_i32(var
, addr
);
158 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
162 /* Create a new temporary and set it to the value of a CPU register. */
163 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
165 TCGv tmp
= new_tmp();
166 load_reg_var(s
, tmp
, reg
);
170 /* Set a CPU register. The source must be a temporary and will be
172 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
175 tcg_gen_andi_i32(var
, var
, ~1);
176 s
->is_jmp
= DISAS_JUMP
;
178 tcg_gen_mov_i32(cpu_R
[reg
], var
);
183 /* Basic operations. */
184 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
185 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
188 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
190 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
193 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
194 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
195 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
196 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
197 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
199 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
203 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
204 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
205 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
207 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
208 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
210 /* Value extensions. */
211 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
212 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
213 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
214 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
216 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
217 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
219 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
221 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
222 /* Set NZCV flags from the high 4 bits of var. */
223 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
225 static void gen_exception(int excp
)
227 TCGv tmp
= new_tmp();
228 tcg_gen_movi_i32(tmp
, excp
);
229 gen_helper_exception(tmp
);
233 static void gen_smul_dual(TCGv a
, TCGv b
)
235 TCGv tmp1
= new_tmp();
236 TCGv tmp2
= new_tmp();
237 tcg_gen_ext16s_i32(tmp1
, a
);
238 tcg_gen_ext16s_i32(tmp2
, b
);
239 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
241 tcg_gen_sari_i32(a
, a
, 16);
242 tcg_gen_sari_i32(b
, b
, 16);
243 tcg_gen_mul_i32(b
, b
, a
);
244 tcg_gen_mov_i32(a
, tmp1
);
248 /* Byteswap each halfword. */
249 static void gen_rev16(TCGv var
)
251 TCGv tmp
= new_tmp();
252 tcg_gen_shri_i32(tmp
, var
, 8);
253 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
254 tcg_gen_shli_i32(var
, var
, 8);
255 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
256 tcg_gen_or_i32(var
, var
, tmp
);
260 /* Byteswap low halfword and sign extend. */
261 static void gen_revsh(TCGv var
)
263 TCGv tmp
= new_tmp();
264 tcg_gen_shri_i32(tmp
, var
, 8);
265 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
266 tcg_gen_shli_i32(var
, var
, 8);
267 tcg_gen_ext8s_i32(var
, var
);
268 tcg_gen_or_i32(var
, var
, tmp
);
272 /* Unsigned bitfield extract. */
273 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
276 tcg_gen_shri_i32(var
, var
, shift
);
277 tcg_gen_andi_i32(var
, var
, mask
);
280 /* Signed bitfield extract. */
281 static void gen_sbfx(TCGv var
, int shift
, int width
)
286 tcg_gen_sari_i32(var
, var
, shift
);
287 if (shift
+ width
< 32) {
288 signbit
= 1u << (width
- 1);
289 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
290 tcg_gen_xori_i32(var
, var
, signbit
);
291 tcg_gen_subi_i32(var
, var
, signbit
);
295 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
296 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
298 tcg_gen_andi_i32(val
, val
, mask
);
299 tcg_gen_shli_i32(val
, val
, shift
);
300 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
301 tcg_gen_or_i32(dest
, base
, val
);
304 /* Round the top 32 bits of a 64-bit value. */
305 static void gen_roundqd(TCGv a
, TCGv b
)
307 tcg_gen_shri_i32(a
, a
, 31);
308 tcg_gen_add_i32(a
, a
, b
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
327 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
340 /* Unsigned 32x32->64 multiply. */
341 static void gen_op_mull_T0_T1(void)
343 TCGv_i64 tmp1
= tcg_temp_new_i64();
344 TCGv_i64 tmp2
= tcg_temp_new_i64();
346 tcg_gen_extu_i32_i64(tmp1
, cpu_T
[0]);
347 tcg_gen_extu_i32_i64(tmp2
, cpu_T
[1]);
348 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
349 tcg_gen_trunc_i64_i32(cpu_T
[0], tmp1
);
350 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
351 tcg_gen_trunc_i64_i32(cpu_T
[1], tmp1
);
354 /* Signed 32x32->64 multiply. */
355 static void gen_imull(TCGv a
, TCGv b
)
357 TCGv_i64 tmp1
= tcg_temp_new_i64();
358 TCGv_i64 tmp2
= tcg_temp_new_i64();
360 tcg_gen_ext_i32_i64(tmp1
, a
);
361 tcg_gen_ext_i32_i64(tmp2
, b
);
362 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
363 tcg_gen_trunc_i64_i32(a
, tmp1
);
364 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
365 tcg_gen_trunc_i64_i32(b
, tmp1
);
368 /* Swap low and high halfwords. */
369 static void gen_swap_half(TCGv var
)
371 TCGv tmp
= new_tmp();
372 tcg_gen_shri_i32(tmp
, var
, 16);
373 tcg_gen_shli_i32(var
, var
, 16);
374 tcg_gen_or_i32(var
, var
, tmp
);
378 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
379 tmp = (t0 ^ t1) & 0x8000;
382 t0 = (t0 + t1) ^ tmp;
385 static void gen_add16(TCGv t0
, TCGv t1
)
387 TCGv tmp
= new_tmp();
388 tcg_gen_xor_i32(tmp
, t0
, t1
);
389 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
390 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
391 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
392 tcg_gen_add_i32(t0
, t0
, t1
);
393 tcg_gen_xor_i32(t0
, t0
, tmp
);
398 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
400 /* Set CF to the top bit of var. */
401 static void gen_set_CF_bit31(TCGv var
)
403 TCGv tmp
= new_tmp();
404 tcg_gen_shri_i32(tmp
, var
, 31);
409 /* Set N and Z flags from var. */
410 static inline void gen_logic_CC(TCGv var
)
412 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
413 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
417 static void gen_adc_T0_T1(void)
421 tmp
= load_cpu_field(CF
);
422 tcg_gen_add_i32(cpu_T
[0], cpu_T
[0], tmp
);
426 /* dest = T0 + T1 + CF. */
427 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
430 tcg_gen_add_i32(dest
, t0
, t1
);
431 tmp
= load_cpu_field(CF
);
432 tcg_gen_add_i32(dest
, dest
, tmp
);
436 /* dest = T0 - T1 + CF - 1. */
437 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
440 tcg_gen_sub_i32(dest
, t0
, t1
);
441 tmp
= load_cpu_field(CF
);
442 tcg_gen_add_i32(dest
, dest
, tmp
);
443 tcg_gen_subi_i32(dest
, dest
, 1);
447 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
448 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
450 /* T0 &= ~T1. Clobbers T1. */
451 /* FIXME: Implement bic natively. */
452 static inline void tcg_gen_bic_i32(TCGv dest
, TCGv t0
, TCGv t1
)
454 TCGv tmp
= new_tmp();
455 tcg_gen_not_i32(tmp
, t1
);
456 tcg_gen_and_i32(dest
, t0
, tmp
);
459 static inline void gen_op_bicl_T0_T1(void)
465 /* FIXME: Implement this natively. */
466 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
468 /* FIXME: Implement this natively. */
469 static void tcg_gen_rori_i32(TCGv t0
, TCGv t1
, int i
)
477 tcg_gen_shri_i32(tmp
, t1
, i
);
478 tcg_gen_shli_i32(t1
, t1
, 32 - i
);
479 tcg_gen_or_i32(t0
, t1
, tmp
);
483 static void shifter_out_im(TCGv var
, int shift
)
485 TCGv tmp
= new_tmp();
487 tcg_gen_andi_i32(tmp
, var
, 1);
489 tcg_gen_shri_i32(tmp
, var
, shift
);
491 tcg_gen_andi_i32(tmp
, tmp
, 1);
497 /* Shift by immediate. Includes special handling for shift == 0. */
498 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
504 shifter_out_im(var
, 32 - shift
);
505 tcg_gen_shli_i32(var
, var
, shift
);
511 tcg_gen_shri_i32(var
, var
, 31);
514 tcg_gen_movi_i32(var
, 0);
517 shifter_out_im(var
, shift
- 1);
518 tcg_gen_shri_i32(var
, var
, shift
);
525 shifter_out_im(var
, shift
- 1);
528 tcg_gen_sari_i32(var
, var
, shift
);
530 case 3: /* ROR/RRX */
533 shifter_out_im(var
, shift
- 1);
534 tcg_gen_rori_i32(var
, var
, shift
); break;
536 TCGv tmp
= load_cpu_field(CF
);
538 shifter_out_im(var
, 0);
539 tcg_gen_shri_i32(var
, var
, 1);
540 tcg_gen_shli_i32(tmp
, tmp
, 31);
541 tcg_gen_or_i32(var
, var
, tmp
);
547 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
548 TCGv shift
, int flags
)
552 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
553 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
554 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
555 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
559 case 0: gen_helper_shl(var
, var
, shift
); break;
560 case 1: gen_helper_shr(var
, var
, shift
); break;
561 case 2: gen_helper_sar(var
, var
, shift
); break;
562 case 3: gen_helper_ror(var
, var
, shift
); break;
568 #define PAS_OP(pfx) \
570 case 0: gen_pas_helper(glue(pfx,add16)); break; \
571 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
572 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
573 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
574 case 4: gen_pas_helper(glue(pfx,add8)); break; \
575 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
577 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
582 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 tmp
= tcg_temp_new_ptr();
585 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
589 tmp
= tcg_temp_new_ptr();
590 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
593 #undef gen_pas_helper
594 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 #undef gen_pas_helper
612 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
613 #define PAS_OP(pfx) \
615 case 0: gen_pas_helper(glue(pfx,add8)); break; \
616 case 1: gen_pas_helper(glue(pfx,add16)); break; \
617 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
618 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
619 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
620 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
622 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
627 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
629 tmp
= tcg_temp_new_ptr();
630 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
634 tmp
= tcg_temp_new_ptr();
635 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
638 #undef gen_pas_helper
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
652 #undef gen_pas_helper
657 static void gen_test_cc(int cc
, int label
)
665 tmp
= load_cpu_field(ZF
);
666 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
669 tmp
= load_cpu_field(ZF
);
670 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
673 tmp
= load_cpu_field(CF
);
674 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
677 tmp
= load_cpu_field(CF
);
678 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
681 tmp
= load_cpu_field(NF
);
682 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
685 tmp
= load_cpu_field(NF
);
686 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
689 tmp
= load_cpu_field(VF
);
690 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
693 tmp
= load_cpu_field(VF
);
694 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
696 case 8: /* hi: C && !Z */
697 inv
= gen_new_label();
698 tmp
= load_cpu_field(CF
);
699 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
701 tmp
= load_cpu_field(ZF
);
702 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
705 case 9: /* ls: !C || Z */
706 tmp
= load_cpu_field(CF
);
707 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
709 tmp
= load_cpu_field(ZF
);
710 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
712 case 10: /* ge: N == V -> N ^ V == 0 */
713 tmp
= load_cpu_field(VF
);
714 tmp2
= load_cpu_field(NF
);
715 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
717 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
719 case 11: /* lt: N != V -> N ^ V != 0 */
720 tmp
= load_cpu_field(VF
);
721 tmp2
= load_cpu_field(NF
);
722 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
724 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
726 case 12: /* gt: !Z && N == V */
727 inv
= gen_new_label();
728 tmp
= load_cpu_field(ZF
);
729 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
731 tmp
= load_cpu_field(VF
);
732 tmp2
= load_cpu_field(NF
);
733 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
735 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
738 case 13: /* le: Z || N != V */
739 tmp
= load_cpu_field(ZF
);
740 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
742 tmp
= load_cpu_field(VF
);
743 tmp2
= load_cpu_field(NF
);
744 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
746 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
749 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
755 static const uint8_t table_logic_cc
[16] = {
774 /* Set PC and Thumb state from an immediate address. */
775 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
779 s
->is_jmp
= DISAS_UPDATE
;
780 if (s
->thumb
!= (addr
& 1)) {
782 tcg_gen_movi_i32(tmp
, addr
& 1);
783 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
786 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
789 /* Set PC and Thumb state from var. var is marked as dead. */
790 static inline void gen_bx(DisasContext
*s
, TCGv var
)
792 s
->is_jmp
= DISAS_UPDATE
;
793 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
794 tcg_gen_andi_i32(var
, var
, 1);
795 store_cpu_field(var
, thumb
);
798 /* Variant of store_reg which uses branch&exchange logic when storing
799 to r15 in ARM architecture v7 and above. The source must be a temporary
800 and will be marked as dead. */
801 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
804 if (reg
== 15 && ENABLE_ARCH_7
) {
807 store_reg(s
, reg
, var
);
811 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
813 TCGv tmp
= new_tmp();
814 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
817 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
819 TCGv tmp
= new_tmp();
820 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
823 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
825 TCGv tmp
= new_tmp();
826 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
829 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
831 TCGv tmp
= new_tmp();
832 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
835 static inline TCGv
gen_ld32(TCGv addr
, int index
)
837 TCGv tmp
= new_tmp();
838 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
841 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
843 tcg_gen_qemu_st8(val
, addr
, index
);
846 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
848 tcg_gen_qemu_st16(val
, addr
, index
);
851 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
853 tcg_gen_qemu_st32(val
, addr
, index
);
857 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
859 load_reg_var(s
, cpu_T
[0], reg
);
862 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
864 load_reg_var(s
, cpu_T
[1], reg
);
867 static inline void gen_set_pc_im(uint32_t val
)
869 tcg_gen_movi_i32(cpu_R
[15], val
);
872 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
877 tcg_gen_andi_i32(tmp
, cpu_T
[t
], ~1);
881 tcg_gen_mov_i32(cpu_R
[reg
], tmp
);
884 s
->is_jmp
= DISAS_JUMP
;
888 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
890 gen_movl_reg_TN(s
, reg
, 0);
893 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
895 gen_movl_reg_TN(s
, reg
, 1);
898 /* Force a TB lookup after an instruction that changes the CPU state. */
899 static inline void gen_lookup_tb(DisasContext
*s
)
901 gen_op_movl_T0_im(s
->pc
);
902 gen_movl_reg_T0(s
, 15);
903 s
->is_jmp
= DISAS_UPDATE
;
906 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
909 int val
, rm
, shift
, shiftop
;
912 if (!(insn
& (1 << 25))) {
915 if (!(insn
& (1 << 23)))
918 tcg_gen_addi_i32(var
, var
, val
);
922 shift
= (insn
>> 7) & 0x1f;
923 shiftop
= (insn
>> 5) & 3;
924 offset
= load_reg(s
, rm
);
925 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
926 if (!(insn
& (1 << 23)))
927 tcg_gen_sub_i32(var
, var
, offset
);
929 tcg_gen_add_i32(var
, var
, offset
);
934 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
940 if (insn
& (1 << 22)) {
942 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
943 if (!(insn
& (1 << 23)))
947 tcg_gen_addi_i32(var
, var
, val
);
951 tcg_gen_addi_i32(var
, var
, extra
);
953 offset
= load_reg(s
, rm
);
954 if (!(insn
& (1 << 23)))
955 tcg_gen_sub_i32(var
, var
, offset
);
957 tcg_gen_add_i32(var
, var
, offset
);
962 #define VFP_OP2(name) \
963 static inline void gen_vfp_##name(int dp) \
966 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
968 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
978 static inline void gen_vfp_abs(int dp
)
981 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
983 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
986 static inline void gen_vfp_neg(int dp
)
989 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
991 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
994 static inline void gen_vfp_sqrt(int dp
)
997 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
999 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1002 static inline void gen_vfp_cmp(int dp
)
1005 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1007 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1010 static inline void gen_vfp_cmpe(int dp
)
1013 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1015 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1018 static inline void gen_vfp_F1_ld0(int dp
)
1021 tcg_gen_movi_i64(cpu_F1d
, 0);
1023 tcg_gen_movi_i32(cpu_F1s
, 0);
1026 static inline void gen_vfp_uito(int dp
)
1029 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1031 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1034 static inline void gen_vfp_sito(int dp
)
1037 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1039 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1042 static inline void gen_vfp_toui(int dp
)
1045 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
1047 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
1050 static inline void gen_vfp_touiz(int dp
)
1053 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1055 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1058 static inline void gen_vfp_tosi(int dp
)
1061 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
1063 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1066 static inline void gen_vfp_tosiz(int dp
)
1069 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1071 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1074 #define VFP_GEN_FIX(name) \
1075 static inline void gen_vfp_##name(int dp, int shift) \
1078 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1080 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1092 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
1095 tcg_gen_qemu_ld64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1097 tcg_gen_qemu_ld32u(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1100 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
1103 tcg_gen_qemu_st64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1105 tcg_gen_qemu_st32(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1109 vfp_reg_offset (int dp
, int reg
)
1112 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1114 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1115 + offsetof(CPU_DoubleU
, l
.upper
);
1117 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1118 + offsetof(CPU_DoubleU
, l
.lower
);
1122 /* Return the offset of a 32-bit piece of a NEON register.
1123 zero is the least significant end of the register. */
1125 neon_reg_offset (int reg
, int n
)
1129 return vfp_reg_offset(0, sreg
);
1132 /* FIXME: Remove these. */
1133 #define neon_T0 cpu_T[0]
1134 #define neon_T1 cpu_T[1]
1135 #define NEON_GET_REG(T, reg, n) \
1136 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1137 #define NEON_SET_REG(T, reg, n) \
1138 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1140 static TCGv
neon_load_reg(int reg
, int pass
)
1142 TCGv tmp
= new_tmp();
1143 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1147 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1149 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1153 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1155 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1158 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1160 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1163 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1164 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1165 #define tcg_gen_st_f32 tcg_gen_st_i32
1166 #define tcg_gen_st_f64 tcg_gen_st_i64
1168 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1171 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1173 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1176 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1179 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1181 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1184 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1187 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1189 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1192 #define ARM_CP_RW_BIT (1 << 20)
1194 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1196 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1199 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1201 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1204 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg
)
1206 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1209 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg
)
1211 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1214 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg
)
1216 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1219 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1221 iwmmxt_store_reg(cpu_M0
, rn
);
1224 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1226 iwmmxt_load_reg(cpu_M0
, rn
);
1229 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1231 iwmmxt_load_reg(cpu_V1
, rn
);
1232 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1235 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1237 iwmmxt_load_reg(cpu_V1
, rn
);
1238 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1241 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1243 iwmmxt_load_reg(cpu_V1
, rn
);
1244 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1247 #define IWMMXT_OP(name) \
1248 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1250 iwmmxt_load_reg(cpu_V1, rn); \
1251 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1254 #define IWMMXT_OP_ENV(name) \
1255 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1257 iwmmxt_load_reg(cpu_V1, rn); \
1258 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1261 #define IWMMXT_OP_ENV_SIZE(name) \
1262 IWMMXT_OP_ENV(name##b) \
1263 IWMMXT_OP_ENV(name##w) \
1264 IWMMXT_OP_ENV(name##l)
1266 #define IWMMXT_OP_ENV1(name) \
1267 static inline void gen_op_iwmmxt_##name##_M0(void) \
1269 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1283 IWMMXT_OP_ENV_SIZE(unpackl
)
1284 IWMMXT_OP_ENV_SIZE(unpackh
)
1286 IWMMXT_OP_ENV1(unpacklub
)
1287 IWMMXT_OP_ENV1(unpackluw
)
1288 IWMMXT_OP_ENV1(unpacklul
)
1289 IWMMXT_OP_ENV1(unpackhub
)
1290 IWMMXT_OP_ENV1(unpackhuw
)
1291 IWMMXT_OP_ENV1(unpackhul
)
1292 IWMMXT_OP_ENV1(unpacklsb
)
1293 IWMMXT_OP_ENV1(unpacklsw
)
1294 IWMMXT_OP_ENV1(unpacklsl
)
1295 IWMMXT_OP_ENV1(unpackhsb
)
1296 IWMMXT_OP_ENV1(unpackhsw
)
1297 IWMMXT_OP_ENV1(unpackhsl
)
1299 IWMMXT_OP_ENV_SIZE(cmpeq
)
1300 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1301 IWMMXT_OP_ENV_SIZE(cmpgts
)
1303 IWMMXT_OP_ENV_SIZE(mins
)
1304 IWMMXT_OP_ENV_SIZE(minu
)
1305 IWMMXT_OP_ENV_SIZE(maxs
)
1306 IWMMXT_OP_ENV_SIZE(maxu
)
1308 IWMMXT_OP_ENV_SIZE(subn
)
1309 IWMMXT_OP_ENV_SIZE(addn
)
1310 IWMMXT_OP_ENV_SIZE(subu
)
1311 IWMMXT_OP_ENV_SIZE(addu
)
1312 IWMMXT_OP_ENV_SIZE(subs
)
1313 IWMMXT_OP_ENV_SIZE(adds
)
1315 IWMMXT_OP_ENV(avgb0
)
1316 IWMMXT_OP_ENV(avgb1
)
1317 IWMMXT_OP_ENV(avgw0
)
1318 IWMMXT_OP_ENV(avgw1
)
1322 IWMMXT_OP_ENV(packuw
)
1323 IWMMXT_OP_ENV(packul
)
1324 IWMMXT_OP_ENV(packuq
)
1325 IWMMXT_OP_ENV(packsw
)
1326 IWMMXT_OP_ENV(packsl
)
1327 IWMMXT_OP_ENV(packsq
)
1329 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1331 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1334 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1336 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1339 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1344 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn
)
1346 iwmmxt_load_reg(cpu_V1
, rn
);
1347 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, cpu_T
[0]);
1350 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift
)
1352 TCGv tmp
= tcg_const_i32(shift
);
1353 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1], tmp
);
1356 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift
)
1358 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1359 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1360 tcg_gen_ext8s_i32(cpu_T
[0], cpu_T
[0]);
1363 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift
)
1365 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1366 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1367 tcg_gen_ext16s_i32(cpu_T
[0], cpu_T
[0]);
1370 static inline void gen_op_iwmmxt_extru_T0_M0(int shift
, uint32_t mask
)
1372 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1373 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1375 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
1378 static void gen_op_iwmmxt_set_mup(void)
1381 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1382 tcg_gen_ori_i32(tmp
, tmp
, 2);
1383 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1386 static void gen_op_iwmmxt_set_cup(void)
1389 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1390 tcg_gen_ori_i32(tmp
, tmp
, 1);
1391 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1394 static void gen_op_iwmmxt_setpsr_nz(void)
1396 TCGv tmp
= new_tmp();
1397 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1398 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1401 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1403 iwmmxt_load_reg(cpu_V1
, rn
);
1404 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1405 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1409 static void gen_iwmmxt_movl_T0_T1_wRn(int rn
)
1411 iwmmxt_load_reg(cpu_V0
, rn
);
1412 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_V0
);
1413 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1414 tcg_gen_trunc_i64_i32(cpu_T
[1], cpu_V0
);
1417 static void gen_iwmmxt_movl_wRn_T0_T1(int rn
)
1419 tcg_gen_concat_i32_i64(cpu_V0
, cpu_T
[0], cpu_T
[1]);
1420 iwmmxt_store_reg(cpu_V0
, rn
);
1423 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
1428 rd
= (insn
>> 16) & 0xf;
1429 gen_movl_T1_reg(s
, rd
);
1431 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1432 if (insn
& (1 << 24)) {
1434 if (insn
& (1 << 23))
1435 gen_op_addl_T1_im(offset
);
1437 gen_op_addl_T1_im(-offset
);
1439 if (insn
& (1 << 21))
1440 gen_movl_reg_T1(s
, rd
);
1441 } else if (insn
& (1 << 21)) {
1443 if (insn
& (1 << 23))
1444 gen_op_movl_T0_im(offset
);
1446 gen_op_movl_T0_im(- offset
);
1447 gen_op_addl_T0_T1();
1448 gen_movl_reg_T0(s
, rd
);
1449 } else if (!(insn
& (1 << 23)))
1454 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
1456 int rd
= (insn
>> 0) & 0xf;
1458 if (insn
& (1 << 8))
1459 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
1462 gen_op_iwmmxt_movl_T0_wCx(rd
);
1464 gen_iwmmxt_movl_T0_T1_wRn(rd
);
1466 gen_op_movl_T1_im(mask
);
1467 gen_op_andl_T0_T1();
1471 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1472 (ie. an undefined instruction). */
1473 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1476 int rdhi
, rdlo
, rd0
, rd1
, i
;
1479 if ((insn
& 0x0e000e00) == 0x0c000000) {
1480 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1482 rdlo
= (insn
>> 12) & 0xf;
1483 rdhi
= (insn
>> 16) & 0xf;
1484 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1485 gen_iwmmxt_movl_T0_T1_wRn(wrd
);
1486 gen_movl_reg_T0(s
, rdlo
);
1487 gen_movl_reg_T1(s
, rdhi
);
1488 } else { /* TMCRR */
1489 gen_movl_T0_reg(s
, rdlo
);
1490 gen_movl_T1_reg(s
, rdhi
);
1491 gen_iwmmxt_movl_wRn_T0_T1(wrd
);
1492 gen_op_iwmmxt_set_mup();
1497 wrd
= (insn
>> 12) & 0xf;
1498 if (gen_iwmmxt_address(s
, insn
))
1500 if (insn
& ARM_CP_RW_BIT
) {
1501 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1502 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1503 tcg_gen_mov_i32(cpu_T
[0], tmp
);
1505 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1508 if (insn
& (1 << 8)) {
1509 if (insn
& (1 << 22)) { /* WLDRD */
1510 tcg_gen_qemu_ld64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1512 } else { /* WLDRW wRd */
1513 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1516 if (insn
& (1 << 22)) { /* WLDRH */
1517 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
1518 } else { /* WLDRB */
1519 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
1523 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1529 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1530 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1532 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
1533 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1535 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1537 if (insn
& (1 << 8)) {
1538 if (insn
& (1 << 22)) { /* WSTRD */
1540 tcg_gen_qemu_st64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1541 } else { /* WSTRW wRd */
1542 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1543 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1546 if (insn
& (1 << 22)) { /* WSTRH */
1547 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1548 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
1549 } else { /* WSTRB */
1550 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1551 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
1559 if ((insn
& 0x0f000000) != 0x0e000000)
1562 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1563 case 0x000: /* WOR */
1564 wrd
= (insn
>> 12) & 0xf;
1565 rd0
= (insn
>> 0) & 0xf;
1566 rd1
= (insn
>> 16) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1568 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1569 gen_op_iwmmxt_setpsr_nz();
1570 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1571 gen_op_iwmmxt_set_mup();
1572 gen_op_iwmmxt_set_cup();
1574 case 0x011: /* TMCR */
1577 rd
= (insn
>> 12) & 0xf;
1578 wrd
= (insn
>> 16) & 0xf;
1580 case ARM_IWMMXT_wCID
:
1581 case ARM_IWMMXT_wCASF
:
1583 case ARM_IWMMXT_wCon
:
1584 gen_op_iwmmxt_set_cup();
1586 case ARM_IWMMXT_wCSSF
:
1587 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1588 gen_movl_T1_reg(s
, rd
);
1589 gen_op_bicl_T0_T1();
1590 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1592 case ARM_IWMMXT_wCGR0
:
1593 case ARM_IWMMXT_wCGR1
:
1594 case ARM_IWMMXT_wCGR2
:
1595 case ARM_IWMMXT_wCGR3
:
1596 gen_op_iwmmxt_set_cup();
1597 gen_movl_reg_T0(s
, rd
);
1598 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1604 case 0x100: /* WXOR */
1605 wrd
= (insn
>> 12) & 0xf;
1606 rd0
= (insn
>> 0) & 0xf;
1607 rd1
= (insn
>> 16) & 0xf;
1608 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1609 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1610 gen_op_iwmmxt_setpsr_nz();
1611 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1612 gen_op_iwmmxt_set_mup();
1613 gen_op_iwmmxt_set_cup();
1615 case 0x111: /* TMRC */
1618 rd
= (insn
>> 12) & 0xf;
1619 wrd
= (insn
>> 16) & 0xf;
1620 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1621 gen_movl_reg_T0(s
, rd
);
1623 case 0x300: /* WANDN */
1624 wrd
= (insn
>> 12) & 0xf;
1625 rd0
= (insn
>> 0) & 0xf;
1626 rd1
= (insn
>> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1628 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1629 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1630 gen_op_iwmmxt_setpsr_nz();
1631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1632 gen_op_iwmmxt_set_mup();
1633 gen_op_iwmmxt_set_cup();
1635 case 0x200: /* WAND */
1636 wrd
= (insn
>> 12) & 0xf;
1637 rd0
= (insn
>> 0) & 0xf;
1638 rd1
= (insn
>> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1640 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1646 case 0x810: case 0xa10: /* WMADD */
1647 wrd
= (insn
>> 12) & 0xf;
1648 rd0
= (insn
>> 0) & 0xf;
1649 rd1
= (insn
>> 16) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1651 if (insn
& (1 << 21))
1652 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1654 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1656 gen_op_iwmmxt_set_mup();
1658 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1659 wrd
= (insn
>> 12) & 0xf;
1660 rd0
= (insn
>> 16) & 0xf;
1661 rd1
= (insn
>> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1663 switch ((insn
>> 22) & 3) {
1665 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1668 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1671 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1676 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1677 gen_op_iwmmxt_set_mup();
1678 gen_op_iwmmxt_set_cup();
1680 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1681 wrd
= (insn
>> 12) & 0xf;
1682 rd0
= (insn
>> 16) & 0xf;
1683 rd1
= (insn
>> 0) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1685 switch ((insn
>> 22) & 3) {
1687 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1690 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1693 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1698 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1699 gen_op_iwmmxt_set_mup();
1700 gen_op_iwmmxt_set_cup();
1702 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1703 wrd
= (insn
>> 12) & 0xf;
1704 rd0
= (insn
>> 16) & 0xf;
1705 rd1
= (insn
>> 0) & 0xf;
1706 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1707 if (insn
& (1 << 22))
1708 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1710 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1711 if (!(insn
& (1 << 20)))
1712 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1713 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1714 gen_op_iwmmxt_set_mup();
1716 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1717 wrd
= (insn
>> 12) & 0xf;
1718 rd0
= (insn
>> 16) & 0xf;
1719 rd1
= (insn
>> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1721 if (insn
& (1 << 21)) {
1722 if (insn
& (1 << 20))
1723 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1725 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1727 if (insn
& (1 << 20))
1728 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1730 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1732 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1733 gen_op_iwmmxt_set_mup();
1735 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1736 wrd
= (insn
>> 12) & 0xf;
1737 rd0
= (insn
>> 16) & 0xf;
1738 rd1
= (insn
>> 0) & 0xf;
1739 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1740 if (insn
& (1 << 21))
1741 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1743 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1744 if (!(insn
& (1 << 20))) {
1745 iwmmxt_load_reg(cpu_V1
, wrd
);
1746 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1748 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1749 gen_op_iwmmxt_set_mup();
1751 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1752 wrd
= (insn
>> 12) & 0xf;
1753 rd0
= (insn
>> 16) & 0xf;
1754 rd1
= (insn
>> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1756 switch ((insn
>> 22) & 3) {
1758 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1761 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1764 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1769 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1773 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1774 wrd
= (insn
>> 12) & 0xf;
1775 rd0
= (insn
>> 16) & 0xf;
1776 rd1
= (insn
>> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1778 if (insn
& (1 << 22)) {
1779 if (insn
& (1 << 20))
1780 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1782 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1784 if (insn
& (1 << 20))
1785 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1787 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1789 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1790 gen_op_iwmmxt_set_mup();
1791 gen_op_iwmmxt_set_cup();
1793 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1794 wrd
= (insn
>> 12) & 0xf;
1795 rd0
= (insn
>> 16) & 0xf;
1796 rd1
= (insn
>> 0) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1798 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1799 gen_op_movl_T1_im(7);
1800 gen_op_andl_T0_T1();
1801 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1803 gen_op_iwmmxt_set_mup();
1805 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1806 rd
= (insn
>> 12) & 0xf;
1807 wrd
= (insn
>> 16) & 0xf;
1808 gen_movl_T0_reg(s
, rd
);
1809 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1810 switch ((insn
>> 6) & 3) {
1812 gen_op_movl_T1_im(0xff);
1813 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
1816 gen_op_movl_T1_im(0xffff);
1817 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
1820 gen_op_movl_T1_im(0xffffffff);
1821 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
1826 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1827 gen_op_iwmmxt_set_mup();
1829 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1830 rd
= (insn
>> 12) & 0xf;
1831 wrd
= (insn
>> 16) & 0xf;
1834 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1835 switch ((insn
>> 22) & 3) {
1838 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
1840 gen_op_iwmmxt_extru_T0_M0((insn
& 7) << 3, 0xff);
1845 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
1847 gen_op_iwmmxt_extru_T0_M0((insn
& 3) << 4, 0xffff);
1851 gen_op_iwmmxt_extru_T0_M0((insn
& 1) << 5, ~0u);
1856 gen_movl_reg_T0(s
, rd
);
1858 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1859 if ((insn
& 0x000ff008) != 0x0003f000)
1861 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1862 switch ((insn
>> 22) & 3) {
1864 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
1867 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
1870 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1875 gen_op_shll_T1_im(28);
1876 gen_set_nzcv(cpu_T
[1]);
1878 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1879 rd
= (insn
>> 12) & 0xf;
1880 wrd
= (insn
>> 16) & 0xf;
1881 gen_movl_T0_reg(s
, rd
);
1882 switch ((insn
>> 6) & 3) {
1884 gen_helper_iwmmxt_bcstb(cpu_M0
, cpu_T
[0]);
1887 gen_helper_iwmmxt_bcstw(cpu_M0
, cpu_T
[0]);
1890 gen_helper_iwmmxt_bcstl(cpu_M0
, cpu_T
[0]);
1895 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1896 gen_op_iwmmxt_set_mup();
1898 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1899 if ((insn
& 0x000ff00f) != 0x0003f000)
1901 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1902 switch ((insn
>> 22) & 3) {
1904 for (i
= 0; i
< 7; i
++) {
1905 gen_op_shll_T1_im(4);
1906 gen_op_andl_T0_T1();
1910 for (i
= 0; i
< 3; i
++) {
1911 gen_op_shll_T1_im(8);
1912 gen_op_andl_T0_T1();
1916 gen_op_shll_T1_im(16);
1917 gen_op_andl_T0_T1();
1922 gen_set_nzcv(cpu_T
[0]);
1924 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1925 wrd
= (insn
>> 12) & 0xf;
1926 rd0
= (insn
>> 16) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1928 switch ((insn
>> 22) & 3) {
1930 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1933 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1936 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1941 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1942 gen_op_iwmmxt_set_mup();
1944 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1945 if ((insn
& 0x000ff00f) != 0x0003f000)
1947 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1948 switch ((insn
>> 22) & 3) {
1950 for (i
= 0; i
< 7; i
++) {
1951 gen_op_shll_T1_im(4);
1956 for (i
= 0; i
< 3; i
++) {
1957 gen_op_shll_T1_im(8);
1962 gen_op_shll_T1_im(16);
1968 gen_set_nzcv(cpu_T
[0]);
1970 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1971 rd
= (insn
>> 12) & 0xf;
1972 rd0
= (insn
>> 16) & 0xf;
1973 if ((insn
& 0xf) != 0)
1975 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1976 switch ((insn
>> 22) & 3) {
1978 gen_helper_iwmmxt_msbb(cpu_T
[0], cpu_M0
);
1981 gen_helper_iwmmxt_msbw(cpu_T
[0], cpu_M0
);
1984 gen_helper_iwmmxt_msbl(cpu_T
[0], cpu_M0
);
1989 gen_movl_reg_T0(s
, rd
);
1991 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1992 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1993 wrd
= (insn
>> 12) & 0xf;
1994 rd0
= (insn
>> 16) & 0xf;
1995 rd1
= (insn
>> 0) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1997 switch ((insn
>> 22) & 3) {
1999 if (insn
& (1 << 21))
2000 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2002 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2005 if (insn
& (1 << 21))
2006 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2008 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2011 if (insn
& (1 << 21))
2012 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2014 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2019 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2023 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2024 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2025 wrd
= (insn
>> 12) & 0xf;
2026 rd0
= (insn
>> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2028 switch ((insn
>> 22) & 3) {
2030 if (insn
& (1 << 21))
2031 gen_op_iwmmxt_unpacklsb_M0();
2033 gen_op_iwmmxt_unpacklub_M0();
2036 if (insn
& (1 << 21))
2037 gen_op_iwmmxt_unpacklsw_M0();
2039 gen_op_iwmmxt_unpackluw_M0();
2042 if (insn
& (1 << 21))
2043 gen_op_iwmmxt_unpacklsl_M0();
2045 gen_op_iwmmxt_unpacklul_M0();
2050 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2054 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2055 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2056 wrd
= (insn
>> 12) & 0xf;
2057 rd0
= (insn
>> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2059 switch ((insn
>> 22) & 3) {
2061 if (insn
& (1 << 21))
2062 gen_op_iwmmxt_unpackhsb_M0();
2064 gen_op_iwmmxt_unpackhub_M0();
2067 if (insn
& (1 << 21))
2068 gen_op_iwmmxt_unpackhsw_M0();
2070 gen_op_iwmmxt_unpackhuw_M0();
2073 if (insn
& (1 << 21))
2074 gen_op_iwmmxt_unpackhsl_M0();
2076 gen_op_iwmmxt_unpackhul_M0();
2081 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2085 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2086 case 0x214: case 0x614: case 0xa14: case 0xe14:
2087 wrd
= (insn
>> 12) & 0xf;
2088 rd0
= (insn
>> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2090 if (gen_iwmmxt_shift(insn
, 0xff))
2092 switch ((insn
>> 22) & 3) {
2096 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2099 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2102 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2105 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2109 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2110 case 0x014: case 0x414: case 0x814: case 0xc14:
2111 wrd
= (insn
>> 12) & 0xf;
2112 rd0
= (insn
>> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2114 if (gen_iwmmxt_shift(insn
, 0xff))
2116 switch ((insn
>> 22) & 3) {
2120 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2123 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2126 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2129 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2133 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2134 case 0x114: case 0x514: case 0x914: case 0xd14:
2135 wrd
= (insn
>> 12) & 0xf;
2136 rd0
= (insn
>> 16) & 0xf;
2137 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2138 if (gen_iwmmxt_shift(insn
, 0xff))
2140 switch ((insn
>> 22) & 3) {
2144 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2147 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2150 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2153 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2157 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2158 case 0x314: case 0x714: case 0xb14: case 0xf14:
2159 wrd
= (insn
>> 12) & 0xf;
2160 rd0
= (insn
>> 16) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2162 switch ((insn
>> 22) & 3) {
2166 if (gen_iwmmxt_shift(insn
, 0xf))
2168 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2171 if (gen_iwmmxt_shift(insn
, 0x1f))
2173 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2176 if (gen_iwmmxt_shift(insn
, 0x3f))
2178 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2181 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2182 gen_op_iwmmxt_set_mup();
2183 gen_op_iwmmxt_set_cup();
2185 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2186 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2187 wrd
= (insn
>> 12) & 0xf;
2188 rd0
= (insn
>> 16) & 0xf;
2189 rd1
= (insn
>> 0) & 0xf;
2190 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2191 switch ((insn
>> 22) & 3) {
2193 if (insn
& (1 << 21))
2194 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2196 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2199 if (insn
& (1 << 21))
2200 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2202 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2205 if (insn
& (1 << 21))
2206 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2208 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2213 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2214 gen_op_iwmmxt_set_mup();
2216 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2217 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2218 wrd
= (insn
>> 12) & 0xf;
2219 rd0
= (insn
>> 16) & 0xf;
2220 rd1
= (insn
>> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2222 switch ((insn
>> 22) & 3) {
2224 if (insn
& (1 << 21))
2225 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2227 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2230 if (insn
& (1 << 21))
2231 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2233 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2236 if (insn
& (1 << 21))
2237 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2239 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2244 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2245 gen_op_iwmmxt_set_mup();
2247 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2248 case 0x402: case 0x502: case 0x602: case 0x702:
2249 wrd
= (insn
>> 12) & 0xf;
2250 rd0
= (insn
>> 16) & 0xf;
2251 rd1
= (insn
>> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2253 gen_op_movl_T0_im((insn
>> 20) & 3);
2254 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
2255 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2256 gen_op_iwmmxt_set_mup();
2258 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2259 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2260 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2261 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2262 wrd
= (insn
>> 12) & 0xf;
2263 rd0
= (insn
>> 16) & 0xf;
2264 rd1
= (insn
>> 0) & 0xf;
2265 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2266 switch ((insn
>> 20) & 0xf) {
2268 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2271 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2274 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2277 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2280 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2283 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2286 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2289 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2292 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2297 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2298 gen_op_iwmmxt_set_mup();
2299 gen_op_iwmmxt_set_cup();
2301 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2302 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2303 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2304 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2305 wrd
= (insn
>> 12) & 0xf;
2306 rd0
= (insn
>> 16) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2308 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2309 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2310 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2314 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2315 case 0x418: case 0x518: case 0x618: case 0x718:
2316 case 0x818: case 0x918: case 0xa18: case 0xb18:
2317 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2318 wrd
= (insn
>> 12) & 0xf;
2319 rd0
= (insn
>> 16) & 0xf;
2320 rd1
= (insn
>> 0) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2322 switch ((insn
>> 20) & 0xf) {
2324 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2327 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2330 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2333 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2336 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2339 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2342 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2345 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2348 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2353 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2354 gen_op_iwmmxt_set_mup();
2355 gen_op_iwmmxt_set_cup();
2357 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2358 case 0x408: case 0x508: case 0x608: case 0x708:
2359 case 0x808: case 0x908: case 0xa08: case 0xb08:
2360 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2361 wrd
= (insn
>> 12) & 0xf;
2362 rd0
= (insn
>> 16) & 0xf;
2363 rd1
= (insn
>> 0) & 0xf;
2364 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2365 if (!(insn
& (1 << 20)))
2367 switch ((insn
>> 22) & 3) {
2371 if (insn
& (1 << 21))
2372 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2374 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2377 if (insn
& (1 << 21))
2378 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2380 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2383 if (insn
& (1 << 21))
2384 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2386 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2389 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2393 case 0x201: case 0x203: case 0x205: case 0x207:
2394 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2395 case 0x211: case 0x213: case 0x215: case 0x217:
2396 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2397 wrd
= (insn
>> 5) & 0xf;
2398 rd0
= (insn
>> 12) & 0xf;
2399 rd1
= (insn
>> 0) & 0xf;
2400 if (rd0
== 0xf || rd1
== 0xf)
2402 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2403 switch ((insn
>> 16) & 0xf) {
2404 case 0x0: /* TMIA */
2405 gen_movl_T0_reg(s
, rd0
);
2406 gen_movl_T1_reg(s
, rd1
);
2407 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2409 case 0x8: /* TMIAPH */
2410 gen_movl_T0_reg(s
, rd0
);
2411 gen_movl_T1_reg(s
, rd1
);
2412 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2414 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2415 gen_movl_T1_reg(s
, rd0
);
2416 if (insn
& (1 << 16))
2417 gen_op_shrl_T1_im(16);
2418 gen_op_movl_T0_T1();
2419 gen_movl_T1_reg(s
, rd1
);
2420 if (insn
& (1 << 17))
2421 gen_op_shrl_T1_im(16);
2422 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2427 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2428 gen_op_iwmmxt_set_mup();
2437 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2438 (ie. an undefined instruction). */
2439 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2441 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2443 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2444 /* Multiply with Internal Accumulate Format */
2445 rd0
= (insn
>> 12) & 0xf;
2447 acc
= (insn
>> 5) & 7;
2452 switch ((insn
>> 16) & 0xf) {
2454 gen_movl_T0_reg(s
, rd0
);
2455 gen_movl_T1_reg(s
, rd1
);
2456 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2458 case 0x8: /* MIAPH */
2459 gen_movl_T0_reg(s
, rd0
);
2460 gen_movl_T1_reg(s
, rd1
);
2461 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2463 case 0xc: /* MIABB */
2464 case 0xd: /* MIABT */
2465 case 0xe: /* MIATB */
2466 case 0xf: /* MIATT */
2467 gen_movl_T1_reg(s
, rd0
);
2468 if (insn
& (1 << 16))
2469 gen_op_shrl_T1_im(16);
2470 gen_op_movl_T0_T1();
2471 gen_movl_T1_reg(s
, rd1
);
2472 if (insn
& (1 << 17))
2473 gen_op_shrl_T1_im(16);
2474 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2480 gen_op_iwmmxt_movq_wRn_M0(acc
);
2484 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2485 /* Internal Accumulator Access Format */
2486 rdhi
= (insn
>> 16) & 0xf;
2487 rdlo
= (insn
>> 12) & 0xf;
2493 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2494 gen_iwmmxt_movl_T0_T1_wRn(acc
);
2495 gen_movl_reg_T0(s
, rdlo
);
2496 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2497 gen_op_andl_T0_T1();
2498 gen_movl_reg_T0(s
, rdhi
);
2500 gen_movl_T0_reg(s
, rdlo
);
2501 gen_movl_T1_reg(s
, rdhi
);
2502 gen_iwmmxt_movl_wRn_T0_T1(acc
);
2510 /* Disassemble system coprocessor instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2515 uint32_t rd
= (insn
>> 12) & 0xf;
2516 uint32_t cp
= (insn
>> 8) & 0xf;
2521 if (insn
& ARM_CP_RW_BIT
) {
2522 if (!env
->cp
[cp
].cp_read
)
2524 gen_set_pc_im(s
->pc
);
2526 gen_helper_get_cp(tmp
, cpu_env
, tcg_const_i32(insn
));
2527 store_reg(s
, rd
, tmp
);
2529 if (!env
->cp
[cp
].cp_write
)
2531 gen_set_pc_im(s
->pc
);
2532 tmp
= load_reg(s
, rd
);
2533 gen_helper_set_cp(cpu_env
, tcg_const_i32(insn
), tmp
);
2539 static int cp15_user_ok(uint32_t insn
)
2541 int cpn
= (insn
>> 16) & 0xf;
2542 int cpm
= insn
& 0xf;
2543 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2545 if (cpn
== 13 && cpm
== 0) {
2547 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2551 /* ISB, DSB, DMB. */
2552 if ((cpm
== 5 && op
== 4)
2553 || (cpm
== 10 && (op
== 4 || op
== 5)))
2559 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2560 instruction is not defined. */
2561 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2566 /* M profile cores use memory mapped registers instead of cp15. */
2567 if (arm_feature(env
, ARM_FEATURE_M
))
2570 if ((insn
& (1 << 25)) == 0) {
2571 if (insn
& (1 << 20)) {
2575 /* mcrr. Used for block cache operations, so implement as no-op. */
2578 if ((insn
& (1 << 4)) == 0) {
2582 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2585 if ((insn
& 0x0fff0fff) == 0x0e070f90
2586 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2587 /* Wait for interrupt. */
2588 gen_set_pc_im(s
->pc
);
2589 s
->is_jmp
= DISAS_WFI
;
2592 rd
= (insn
>> 12) & 0xf;
2593 if (insn
& ARM_CP_RW_BIT
) {
2595 gen_helper_get_cp15(tmp
, cpu_env
, tcg_const_i32(insn
));
2596 /* If the destination register is r15 then sets condition codes. */
2598 store_reg(s
, rd
, tmp
);
2602 tmp
= load_reg(s
, rd
);
2603 gen_helper_set_cp15(cpu_env
, tcg_const_i32(insn
), tmp
);
2605 /* Normally we would always end the TB here, but Linux
2606 * arch/arm/mach-pxa/sleep.S expects two instructions following
2607 * an MMU enable to execute from cache. Imitate this behaviour. */
2608 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2609 (insn
& 0x0fff0fff) != 0x0e010f10)
2615 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2616 #define VFP_SREG(insn, bigbit, smallbit) \
2617 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2618 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2619 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2620 reg = (((insn) >> (bigbit)) & 0x0f) \
2621 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2623 if (insn & (1 << (smallbit))) \
2625 reg = ((insn) >> (bigbit)) & 0x0f; \
2628 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2629 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2630 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2631 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2632 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2633 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2635 /* Move between integer and VFP cores. */
2636 static TCGv
gen_vfp_mrs(void)
2638 TCGv tmp
= new_tmp();
2639 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2643 static void gen_vfp_msr(TCGv tmp
)
2645 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2650 vfp_enabled(CPUState
* env
)
2652 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2655 static void gen_neon_dup_u8(TCGv var
, int shift
)
2657 TCGv tmp
= new_tmp();
2659 tcg_gen_shri_i32(var
, var
, shift
);
2660 tcg_gen_ext8u_i32(var
, var
);
2661 tcg_gen_shli_i32(tmp
, var
, 8);
2662 tcg_gen_or_i32(var
, var
, tmp
);
2663 tcg_gen_shli_i32(tmp
, var
, 16);
2664 tcg_gen_or_i32(var
, var
, tmp
);
2668 static void gen_neon_dup_low16(TCGv var
)
2670 TCGv tmp
= new_tmp();
2671 tcg_gen_ext16u_i32(var
, var
);
2672 tcg_gen_shli_i32(tmp
, var
, 16);
2673 tcg_gen_or_i32(var
, var
, tmp
);
2677 static void gen_neon_dup_high16(TCGv var
)
2679 TCGv tmp
= new_tmp();
2680 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2681 tcg_gen_shri_i32(tmp
, var
, 16);
2682 tcg_gen_or_i32(var
, var
, tmp
);
2686 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2687 (ie. an undefined instruction). */
2688 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2690 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2695 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2698 if (!vfp_enabled(env
)) {
2699 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2700 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2702 rn
= (insn
>> 16) & 0xf;
2703 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2704 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2707 dp
= ((insn
& 0xf00) == 0xb00);
2708 switch ((insn
>> 24) & 0xf) {
2710 if (insn
& (1 << 4)) {
2711 /* single register transfer */
2712 rd
= (insn
>> 12) & 0xf;
2717 VFP_DREG_N(rn
, insn
);
2720 if (insn
& 0x00c00060
2721 && !arm_feature(env
, ARM_FEATURE_NEON
))
2724 pass
= (insn
>> 21) & 1;
2725 if (insn
& (1 << 22)) {
2727 offset
= ((insn
>> 5) & 3) * 8;
2728 } else if (insn
& (1 << 5)) {
2730 offset
= (insn
& (1 << 6)) ? 16 : 0;
2735 if (insn
& ARM_CP_RW_BIT
) {
2737 tmp
= neon_load_reg(rn
, pass
);
2741 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2742 if (insn
& (1 << 23))
2748 if (insn
& (1 << 23)) {
2750 tcg_gen_shri_i32(tmp
, tmp
, 16);
2756 tcg_gen_sari_i32(tmp
, tmp
, 16);
2765 store_reg(s
, rd
, tmp
);
2768 tmp
= load_reg(s
, rd
);
2769 if (insn
& (1 << 23)) {
2772 gen_neon_dup_u8(tmp
, 0);
2773 } else if (size
== 1) {
2774 gen_neon_dup_low16(tmp
);
2776 for (n
= 0; n
<= pass
* 2; n
++) {
2778 tcg_gen_mov_i32(tmp2
, tmp
);
2779 neon_store_reg(rn
, n
, tmp2
);
2781 neon_store_reg(rn
, n
, tmp
);
2786 tmp2
= neon_load_reg(rn
, pass
);
2787 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2791 tmp2
= neon_load_reg(rn
, pass
);
2792 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2798 neon_store_reg(rn
, pass
, tmp
);
2802 if ((insn
& 0x6f) != 0x00)
2804 rn
= VFP_SREG_N(insn
);
2805 if (insn
& ARM_CP_RW_BIT
) {
2807 if (insn
& (1 << 21)) {
2808 /* system register */
2813 /* VFP2 allows access to FSID from userspace.
2814 VFP3 restricts all id registers to privileged
2817 && arm_feature(env
, ARM_FEATURE_VFP3
))
2819 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2824 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2826 case ARM_VFP_FPINST
:
2827 case ARM_VFP_FPINST2
:
2828 /* Not present in VFP3. */
2830 || arm_feature(env
, ARM_FEATURE_VFP3
))
2832 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2836 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2837 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2840 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2846 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2848 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2854 gen_mov_F0_vreg(0, rn
);
2855 tmp
= gen_vfp_mrs();
2858 /* Set the 4 flag bits in the CPSR. */
2862 store_reg(s
, rd
, tmp
);
2866 tmp
= load_reg(s
, rd
);
2867 if (insn
& (1 << 21)) {
2869 /* system register */
2874 /* Writes are ignored. */
2877 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2884 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2887 case ARM_VFP_FPINST
:
2888 case ARM_VFP_FPINST2
:
2889 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2896 gen_mov_vreg_F0(0, rn
);
2901 /* data processing */
2902 /* The opcode is in bits 23, 21, 20 and 6. */
2903 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2907 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2909 /* rn is register number */
2910 VFP_DREG_N(rn
, insn
);
2913 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2914 /* Integer or single precision destination. */
2915 rd
= VFP_SREG_D(insn
);
2917 VFP_DREG_D(rd
, insn
);
2920 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2921 /* Integer source. */
2922 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2924 VFP_DREG_M(rm
, insn
);
2927 rn
= VFP_SREG_N(insn
);
2928 if (op
== 15 && rn
== 15) {
2929 /* Double precision destination. */
2930 VFP_DREG_D(rd
, insn
);
2932 rd
= VFP_SREG_D(insn
);
2934 rm
= VFP_SREG_M(insn
);
2937 veclen
= env
->vfp
.vec_len
;
2938 if (op
== 15 && rn
> 3)
2941 /* Shut up compiler warnings. */
2952 /* Figure out what type of vector operation this is. */
2953 if ((rd
& bank_mask
) == 0) {
2958 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2960 delta_d
= env
->vfp
.vec_stride
+ 1;
2962 if ((rm
& bank_mask
) == 0) {
2963 /* mixed scalar/vector */
2972 /* Load the initial operands. */
2977 /* Integer source */
2978 gen_mov_F0_vreg(0, rm
);
2983 gen_mov_F0_vreg(dp
, rd
);
2984 gen_mov_F1_vreg(dp
, rm
);
2988 /* Compare with zero */
2989 gen_mov_F0_vreg(dp
, rd
);
3000 /* Source and destination the same. */
3001 gen_mov_F0_vreg(dp
, rd
);
3004 /* One source operand. */
3005 gen_mov_F0_vreg(dp
, rm
);
3009 /* Two source operands. */
3010 gen_mov_F0_vreg(dp
, rn
);
3011 gen_mov_F1_vreg(dp
, rm
);
3015 /* Perform the calculation. */
3017 case 0: /* mac: fd + (fn * fm) */
3019 gen_mov_F1_vreg(dp
, rd
);
3022 case 1: /* nmac: fd - (fn * fm) */
3025 gen_mov_F1_vreg(dp
, rd
);
3028 case 2: /* msc: -fd + (fn * fm) */
3030 gen_mov_F1_vreg(dp
, rd
);
3033 case 3: /* nmsc: -fd - (fn * fm) */
3036 gen_mov_F1_vreg(dp
, rd
);
3039 case 4: /* mul: fn * fm */
3042 case 5: /* nmul: -(fn * fm) */
3046 case 6: /* add: fn + fm */
3049 case 7: /* sub: fn - fm */
3052 case 8: /* div: fn / fm */
3055 case 14: /* fconst */
3056 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3059 n
= (insn
<< 12) & 0x80000000;
3060 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3067 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3074 tcg_gen_movi_i32(cpu_F0s
, n
);
3077 case 15: /* extension space */
3100 case 11: /* cmpez */
3104 case 15: /* single<->double conversion */
3106 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3108 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3110 case 16: /* fuito */
3113 case 17: /* fsito */
3116 case 20: /* fshto */
3117 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3119 gen_vfp_shto(dp
, 16 - rm
);
3121 case 21: /* fslto */
3122 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3124 gen_vfp_slto(dp
, 32 - rm
);
3126 case 22: /* fuhto */
3127 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3129 gen_vfp_uhto(dp
, 16 - rm
);
3131 case 23: /* fulto */
3132 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3134 gen_vfp_ulto(dp
, 32 - rm
);
3136 case 24: /* ftoui */
3139 case 25: /* ftouiz */
3142 case 26: /* ftosi */
3145 case 27: /* ftosiz */
3148 case 28: /* ftosh */
3149 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3151 gen_vfp_tosh(dp
, 16 - rm
);
3153 case 29: /* ftosl */
3154 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3156 gen_vfp_tosl(dp
, 32 - rm
);
3158 case 30: /* ftouh */
3159 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3161 gen_vfp_touh(dp
, 16 - rm
);
3163 case 31: /* ftoul */
3164 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3166 gen_vfp_toul(dp
, 32 - rm
);
3168 default: /* undefined */
3169 printf ("rn:%d\n", rn
);
3173 default: /* undefined */
3174 printf ("op:%d\n", op
);
3178 /* Write back the result. */
3179 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3180 ; /* Comparison, do nothing. */
3181 else if (op
== 15 && rn
> 17)
3182 /* Integer result. */
3183 gen_mov_vreg_F0(0, rd
);
3184 else if (op
== 15 && rn
== 15)
3186 gen_mov_vreg_F0(!dp
, rd
);
3188 gen_mov_vreg_F0(dp
, rd
);
3190 /* break out of the loop if we have finished */
3194 if (op
== 15 && delta_m
== 0) {
3195 /* single source one-many */
3197 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3199 gen_mov_vreg_F0(dp
, rd
);
3203 /* Setup the next operands. */
3205 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3209 /* One source operand. */
3210 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3212 gen_mov_F0_vreg(dp
, rm
);
3214 /* Two source operands. */
3215 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3217 gen_mov_F0_vreg(dp
, rn
);
3219 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3221 gen_mov_F1_vreg(dp
, rm
);
3229 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3230 /* two-register transfer */
3231 rn
= (insn
>> 16) & 0xf;
3232 rd
= (insn
>> 12) & 0xf;
3234 VFP_DREG_M(rm
, insn
);
3236 rm
= VFP_SREG_M(insn
);
3239 if (insn
& ARM_CP_RW_BIT
) {
3242 gen_mov_F0_vreg(0, rm
* 2);
3243 tmp
= gen_vfp_mrs();
3244 store_reg(s
, rd
, tmp
);
3245 gen_mov_F0_vreg(0, rm
* 2 + 1);
3246 tmp
= gen_vfp_mrs();
3247 store_reg(s
, rn
, tmp
);
3249 gen_mov_F0_vreg(0, rm
);
3250 tmp
= gen_vfp_mrs();
3251 store_reg(s
, rn
, tmp
);
3252 gen_mov_F0_vreg(0, rm
+ 1);
3253 tmp
= gen_vfp_mrs();
3254 store_reg(s
, rd
, tmp
);
3259 tmp
= load_reg(s
, rd
);
3261 gen_mov_vreg_F0(0, rm
* 2);
3262 tmp
= load_reg(s
, rn
);
3264 gen_mov_vreg_F0(0, rm
* 2 + 1);
3266 tmp
= load_reg(s
, rn
);
3268 gen_mov_vreg_F0(0, rm
);
3269 tmp
= load_reg(s
, rd
);
3271 gen_mov_vreg_F0(0, rm
+ 1);
3276 rn
= (insn
>> 16) & 0xf;
3278 VFP_DREG_D(rd
, insn
);
3280 rd
= VFP_SREG_D(insn
);
3281 if (s
->thumb
&& rn
== 15) {
3282 gen_op_movl_T1_im(s
->pc
& ~2);
3284 gen_movl_T1_reg(s
, rn
);
3286 if ((insn
& 0x01200000) == 0x01000000) {
3287 /* Single load/store */
3288 offset
= (insn
& 0xff) << 2;
3289 if ((insn
& (1 << 23)) == 0)
3291 gen_op_addl_T1_im(offset
);
3292 if (insn
& (1 << 20)) {
3294 gen_mov_vreg_F0(dp
, rd
);
3296 gen_mov_F0_vreg(dp
, rd
);
3300 /* load/store multiple */
3302 n
= (insn
>> 1) & 0x7f;
3306 if (insn
& (1 << 24)) /* pre-decrement */
3307 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
3313 for (i
= 0; i
< n
; i
++) {
3314 if (insn
& ARM_CP_RW_BIT
) {
3317 gen_mov_vreg_F0(dp
, rd
+ i
);
3320 gen_mov_F0_vreg(dp
, rd
+ i
);
3323 gen_op_addl_T1_im(offset
);
3325 if (insn
& (1 << 21)) {
3327 if (insn
& (1 << 24))
3328 offset
= -offset
* n
;
3329 else if (dp
&& (insn
& 1))
3335 gen_op_addl_T1_im(offset
);
3336 gen_movl_reg_T1(s
, rn
);
3342 /* Should never happen. */
3348 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3350 TranslationBlock
*tb
;
3353 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3355 gen_set_pc_im(dest
);
3356 tcg_gen_exit_tb((long)tb
+ n
);
3358 gen_set_pc_im(dest
);
3363 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3365 if (unlikely(s
->singlestep_enabled
)) {
3366 /* An indirect jump so that we still trigger the debug exception. */
3371 gen_goto_tb(s
, 0, dest
);
3372 s
->is_jmp
= DISAS_TB_JUMP
;
3376 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3379 tcg_gen_sari_i32(t0
, t0
, 16);
3383 tcg_gen_sari_i32(t1
, t1
, 16);
3386 tcg_gen_mul_i32(t0
, t0
, t1
);
3389 /* Return the mask of PSR bits set by a MSR instruction. */
3390 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3394 if (flags
& (1 << 0))
3396 if (flags
& (1 << 1))
3398 if (flags
& (1 << 2))
3400 if (flags
& (1 << 3))
3403 /* Mask out undefined bits. */
3404 mask
&= ~CPSR_RESERVED
;
3405 if (!arm_feature(env
, ARM_FEATURE_V6
))
3406 mask
&= ~(CPSR_E
| CPSR_GE
);
3407 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3409 /* Mask out execution state bits. */
3412 /* Mask out privileged bits. */
3418 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3419 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3423 /* ??? This is also undefined in system mode. */
3427 tmp
= load_cpu_field(spsr
);
3428 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3429 tcg_gen_andi_i32(t0
, t0
, mask
);
3430 tcg_gen_or_i32(tmp
, tmp
, t0
);
3431 store_cpu_field(tmp
, spsr
);
3433 gen_set_cpsr(t0
, mask
);
3440 /* Returns nonzero if access to the PSR is not permitted. */
3441 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3445 tcg_gen_movi_i32(tmp
, val
);
3446 return gen_set_psr(s
, mask
, spsr
, tmp
);
3449 /* Generate an old-style exception return. Marks pc as dead. */
3450 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3453 store_reg(s
, 15, pc
);
3454 tmp
= load_cpu_field(spsr
);
3455 gen_set_cpsr(tmp
, 0xffffffff);
3457 s
->is_jmp
= DISAS_UPDATE
;
3460 /* Generate a v6 exception return. Marks both values as dead. */
3461 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3463 gen_set_cpsr(cpsr
, 0xffffffff);
3465 store_reg(s
, 15, pc
);
3466 s
->is_jmp
= DISAS_UPDATE
;
3470 gen_set_condexec (DisasContext
*s
)
3472 if (s
->condexec_mask
) {
3473 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3474 TCGv tmp
= new_tmp();
3475 tcg_gen_movi_i32(tmp
, val
);
3476 store_cpu_field(tmp
, condexec_bits
);
3480 static void gen_nop_hint(DisasContext
*s
, int val
)
3484 gen_set_pc_im(s
->pc
);
3485 s
->is_jmp
= DISAS_WFI
;
3489 /* TODO: Implement SEV and WFE. May help SMP performance. */
3495 /* These macros help make the code more readable when migrating from the
3496 old dyngen helpers. They should probably be removed when
3497 T0/T1 are removed. */
3498 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3499 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3501 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3503 static inline int gen_neon_add(int size
)
3506 case 0: gen_helper_neon_add_u8(CPU_T001
); break;
3507 case 1: gen_helper_neon_add_u16(CPU_T001
); break;
3508 case 2: gen_op_addl_T0_T1(); break;
3514 static inline void gen_neon_rsb(int size
)
3517 case 0: gen_helper_neon_sub_u8(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3518 case 1: gen_helper_neon_sub_u16(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3519 case 2: gen_op_rsbl_T0_T1(); break;
3524 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3525 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3526 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3527 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3528 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3530 /* FIXME: This is wrong. They set the wrong overflow bit. */
3531 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3532 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3533 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3534 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3536 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3537 switch ((size << 1) | u) { \
3539 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3548 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3551 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3556 default: return 1; \
3559 #define GEN_NEON_INTEGER_OP(name) do { \
3560 switch ((size << 1) | u) { \
3562 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3571 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3574 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3579 default: return 1; \
3583 gen_neon_movl_scratch_T0(int scratch
)
3587 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3588 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offset
);
3592 gen_neon_movl_scratch_T1(int scratch
)
3596 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3597 tcg_gen_st_i32(cpu_T
[1], cpu_env
, offset
);
3601 gen_neon_movl_T0_scratch(int scratch
)
3605 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3606 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offset
);
3610 gen_neon_movl_T1_scratch(int scratch
)
3614 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3615 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offset
);
3618 static inline void gen_neon_get_scalar(int size
, int reg
)
3621 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
3623 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
3625 gen_neon_dup_low16(cpu_T
[0]);
3627 gen_neon_dup_high16(cpu_T
[0]);
3631 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3635 for (n
= 0; n
< q
+ 1; n
+= 2) {
3636 NEON_GET_REG(T0
, reg
, n
);
3637 NEON_GET_REG(T0
, reg
, n
+ n
);
3639 case 0: gen_helper_neon_unzip_u8(); break;
3640 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3641 case 2: /* no-op */; break;
3644 gen_neon_movl_scratch_T0(tmp
+ n
);
3645 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
3653 } neon_ls_element_type
[11] = {
3667 /* Translate a NEON load/store element instruction. Return nonzero if the
3668 instruction is invalid. */
3669 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3685 if (!vfp_enabled(env
))
3687 VFP_DREG_D(rd
, insn
);
3688 rn
= (insn
>> 16) & 0xf;
3690 load
= (insn
& (1 << 21)) != 0;
3691 if ((insn
& (1 << 23)) == 0) {
3692 /* Load store all elements. */
3693 op
= (insn
>> 8) & 0xf;
3694 size
= (insn
>> 6) & 3;
3695 if (op
> 10 || size
== 3)
3697 nregs
= neon_ls_element_type
[op
].nregs
;
3698 interleave
= neon_ls_element_type
[op
].interleave
;
3699 gen_movl_T1_reg(s
, rn
);
3700 stride
= (1 << size
) * interleave
;
3701 for (reg
= 0; reg
< nregs
; reg
++) {
3702 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3703 gen_movl_T1_reg(s
, rn
);
3704 gen_op_addl_T1_im((1 << size
) * reg
);
3705 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3706 gen_movl_T1_reg(s
, rn
);
3707 gen_op_addl_T1_im(1 << size
);
3709 for (pass
= 0; pass
< 2; pass
++) {
3712 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3713 neon_store_reg(rd
, pass
, tmp
);
3715 tmp
= neon_load_reg(rd
, pass
);
3716 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3718 gen_op_addl_T1_im(stride
);
3719 } else if (size
== 1) {
3721 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3722 gen_op_addl_T1_im(stride
);
3723 tmp2
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3724 gen_op_addl_T1_im(stride
);
3725 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3727 neon_store_reg(rd
, pass
, tmp
);
3729 tmp
= neon_load_reg(rd
, pass
);
3731 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3732 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3733 gen_op_addl_T1_im(stride
);
3734 gen_st16(tmp2
, cpu_T
[1], IS_USER(s
));
3735 gen_op_addl_T1_im(stride
);
3737 } else /* size == 0 */ {
3740 for (n
= 0; n
< 4; n
++) {
3741 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3742 gen_op_addl_T1_im(stride
);
3746 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3750 neon_store_reg(rd
, pass
, tmp2
);
3752 tmp2
= neon_load_reg(rd
, pass
);
3753 for (n
= 0; n
< 4; n
++) {
3756 tcg_gen_mov_i32(tmp
, tmp2
);
3758 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3760 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3761 gen_op_addl_T1_im(stride
);
3767 rd
+= neon_ls_element_type
[op
].spacing
;
3771 size
= (insn
>> 10) & 3;
3773 /* Load single element to all lanes. */
3776 size
= (insn
>> 6) & 3;
3777 nregs
= ((insn
>> 8) & 3) + 1;
3778 stride
= (insn
& (1 << 5)) ? 2 : 1;
3779 gen_movl_T1_reg(s
, rn
);
3780 for (reg
= 0; reg
< nregs
; reg
++) {
3783 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3784 gen_neon_dup_u8(tmp
, 0);
3787 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3788 gen_neon_dup_low16(tmp
);
3791 tmp
= gen_ld32(cpu_T
[0], IS_USER(s
));
3795 default: /* Avoid compiler warnings. */
3798 gen_op_addl_T1_im(1 << size
);
3800 tcg_gen_mov_i32(tmp2
, tmp
);
3801 neon_store_reg(rd
, 0, tmp2
);
3802 neon_store_reg(rd
, 1, tmp
);
3805 stride
= (1 << size
) * nregs
;
3807 /* Single element. */
3808 pass
= (insn
>> 7) & 1;
3811 shift
= ((insn
>> 5) & 3) * 8;
3815 shift
= ((insn
>> 6) & 1) * 16;
3816 stride
= (insn
& (1 << 5)) ? 2 : 1;
3820 stride
= (insn
& (1 << 6)) ? 2 : 1;
3825 nregs
= ((insn
>> 8) & 3) + 1;
3826 gen_movl_T1_reg(s
, rn
);
3827 for (reg
= 0; reg
< nregs
; reg
++) {
3831 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3834 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3837 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3839 default: /* Avoid compiler warnings. */
3843 tmp2
= neon_load_reg(rd
, pass
);
3844 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3847 neon_store_reg(rd
, pass
, tmp
);
3848 } else { /* Store */
3849 tmp
= neon_load_reg(rd
, pass
);
3851 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3854 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3857 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3860 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3865 gen_op_addl_T1_im(1 << size
);
3867 stride
= nregs
* (1 << size
);
3873 base
= load_reg(s
, rn
);
3875 tcg_gen_addi_i32(base
, base
, stride
);
3878 index
= load_reg(s
, rm
);
3879 tcg_gen_add_i32(base
, base
, index
);
3882 store_reg(s
, rn
, base
);
3887 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3888 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
3890 tcg_gen_and_i32(t
, t
, c
);
3891 tcg_gen_bic_i32(f
, f
, c
);
3892 tcg_gen_or_i32(dest
, t
, f
);
3895 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
3898 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3899 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3900 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
3905 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
3908 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3909 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3910 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3915 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
3918 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3919 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3920 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3925 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
3931 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3932 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3937 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3938 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3945 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3946 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3951 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3952 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
3959 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
3963 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3964 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3965 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3970 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3971 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3972 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3979 static inline void gen_neon_addl(int size
)
3982 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3983 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3984 case 2: tcg_gen_add_i64(CPU_V001
); break;
3989 static inline void gen_neon_subl(int size
)
3992 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3993 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3994 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3999 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4002 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4003 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4004 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4009 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4012 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4013 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4018 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4022 switch ((size
<< 1) | u
) {
4023 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4024 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4025 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4026 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4028 tmp
= gen_muls_i64_i32(a
, b
);
4029 tcg_gen_mov_i64(dest
, tmp
);
4032 tmp
= gen_mulu_i64_i32(a
, b
);
4033 tcg_gen_mov_i64(dest
, tmp
);
4043 /* Translate a NEON data processing instruction. Return nonzero if the
4044 instruction is invalid.
4045 We process data in a mixture of 32-bit and 64-bit chunks.
4046 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4048 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4066 if (!vfp_enabled(env
))
4068 q
= (insn
& (1 << 6)) != 0;
4069 u
= (insn
>> 24) & 1;
4070 VFP_DREG_D(rd
, insn
);
4071 VFP_DREG_N(rn
, insn
);
4072 VFP_DREG_M(rm
, insn
);
4073 size
= (insn
>> 20) & 3;
4074 if ((insn
& (1 << 23)) == 0) {
4075 /* Three register same length. */
4076 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4077 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4078 || op
== 10 || op
== 11 || op
== 16)) {
4079 /* 64-bit element instructions. */
4080 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4081 neon_load_reg64(cpu_V0
, rn
+ pass
);
4082 neon_load_reg64(cpu_V1
, rm
+ pass
);
4086 gen_helper_neon_add_saturate_u64(CPU_V001
);
4088 gen_helper_neon_add_saturate_s64(CPU_V001
);
4093 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4095 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4100 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4102 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4107 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4110 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4114 case 10: /* VRSHL */
4116 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4118 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4121 case 11: /* VQRSHL */
4123 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4126 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4132 tcg_gen_sub_i64(CPU_V001
);
4134 tcg_gen_add_i64(CPU_V001
);
4140 neon_store_reg64(cpu_V0
, rd
+ pass
);
4147 case 10: /* VRSHL */
4148 case 11: /* VQRSHL */
4151 /* Shift instruction operands are reversed. */
4158 case 20: /* VPMAX */
4159 case 21: /* VPMIN */
4160 case 23: /* VPADD */
4163 case 26: /* VPADD (float) */
4164 pairwise
= (u
&& size
< 2);
4166 case 30: /* VPMIN/VPMAX (float) */
4173 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4182 NEON_GET_REG(T0
, rn
, n
);
4183 NEON_GET_REG(T1
, rn
, n
+ 1);
4185 NEON_GET_REG(T0
, rm
, n
);
4186 NEON_GET_REG(T1
, rm
, n
+ 1);
4190 NEON_GET_REG(T0
, rn
, pass
);
4191 NEON_GET_REG(T1
, rm
, pass
);
4195 GEN_NEON_INTEGER_OP(hadd
);
4198 GEN_NEON_INTEGER_OP_ENV(qadd
);
4200 case 2: /* VRHADD */
4201 GEN_NEON_INTEGER_OP(rhadd
);
4203 case 3: /* Logic ops. */
4204 switch ((u
<< 2) | size
) {
4206 gen_op_andl_T0_T1();
4209 gen_op_bicl_T0_T1();
4219 gen_op_xorl_T0_T1();
4222 tmp
= neon_load_reg(rd
, pass
);
4223 gen_neon_bsl(cpu_T
[0], cpu_T
[0], cpu_T
[1], tmp
);
4227 tmp
= neon_load_reg(rd
, pass
);
4228 gen_neon_bsl(cpu_T
[0], cpu_T
[0], tmp
, cpu_T
[1]);
4232 tmp
= neon_load_reg(rd
, pass
);
4233 gen_neon_bsl(cpu_T
[0], tmp
, cpu_T
[0], cpu_T
[1]);
4239 GEN_NEON_INTEGER_OP(hsub
);
4242 GEN_NEON_INTEGER_OP_ENV(qsub
);
4245 GEN_NEON_INTEGER_OP(cgt
);
4248 GEN_NEON_INTEGER_OP(cge
);
4251 GEN_NEON_INTEGER_OP(shl
);
4254 GEN_NEON_INTEGER_OP_ENV(qshl
);
4256 case 10: /* VRSHL */
4257 GEN_NEON_INTEGER_OP(rshl
);
4259 case 11: /* VQRSHL */
4260 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4263 GEN_NEON_INTEGER_OP(max
);
4266 GEN_NEON_INTEGER_OP(min
);
4269 GEN_NEON_INTEGER_OP(abd
);
4272 GEN_NEON_INTEGER_OP(abd
);
4273 NEON_GET_REG(T1
, rd
, pass
);
4277 if (!u
) { /* VADD */
4278 if (gen_neon_add(size
))
4282 case 0: gen_helper_neon_sub_u8(CPU_T001
); break;
4283 case 1: gen_helper_neon_sub_u16(CPU_T001
); break;
4284 case 2: gen_op_subl_T0_T1(); break;
4290 if (!u
) { /* VTST */
4292 case 0: gen_helper_neon_tst_u8(CPU_T001
); break;
4293 case 1: gen_helper_neon_tst_u16(CPU_T001
); break;
4294 case 2: gen_helper_neon_tst_u32(CPU_T001
); break;
4299 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
4300 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
4301 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
4306 case 18: /* Multiply. */
4308 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4309 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4310 case 2: gen_op_mul_T0_T1(); break;
4313 NEON_GET_REG(T1
, rd
, pass
);
4321 if (u
) { /* polynomial */
4322 gen_helper_neon_mul_p8(CPU_T001
);
4323 } else { /* Integer */
4325 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4326 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4327 case 2: gen_op_mul_T0_T1(); break;
4332 case 20: /* VPMAX */
4333 GEN_NEON_INTEGER_OP(pmax
);
4335 case 21: /* VPMIN */
4336 GEN_NEON_INTEGER_OP(pmin
);
4338 case 22: /* Hultiply high. */
4339 if (!u
) { /* VQDMULH */
4341 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01
); break;
4342 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01
); break;
4345 } else { /* VQRDHMUL */
4347 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01
); break;
4348 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01
); break;
4353 case 23: /* VPADD */
4357 case 0: gen_helper_neon_padd_u8(CPU_T001
); break;
4358 case 1: gen_helper_neon_padd_u16(CPU_T001
); break;
4359 case 2: gen_op_addl_T0_T1(); break;
4363 case 26: /* Floating point arithnetic. */
4364 switch ((u
<< 2) | size
) {
4366 gen_helper_neon_add_f32(CPU_T001
);
4369 gen_helper_neon_sub_f32(CPU_T001
);
4372 gen_helper_neon_add_f32(CPU_T001
);
4375 gen_helper_neon_abd_f32(CPU_T001
);
4381 case 27: /* Float multiply. */
4382 gen_helper_neon_mul_f32(CPU_T001
);
4384 NEON_GET_REG(T1
, rd
, pass
);
4386 gen_helper_neon_add_f32(CPU_T001
);
4388 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
4392 case 28: /* Float compare. */
4394 gen_helper_neon_ceq_f32(CPU_T001
);
4397 gen_helper_neon_cge_f32(CPU_T001
);
4399 gen_helper_neon_cgt_f32(CPU_T001
);
4402 case 29: /* Float compare absolute. */
4406 gen_helper_neon_acge_f32(CPU_T001
);
4408 gen_helper_neon_acgt_f32(CPU_T001
);
4410 case 30: /* Float min/max. */
4412 gen_helper_neon_max_f32(CPU_T001
);
4414 gen_helper_neon_min_f32(CPU_T001
);
4418 gen_helper_recps_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4420 gen_helper_rsqrts_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4425 /* Save the result. For elementwise operations we can put it
4426 straight into the destination register. For pairwise operations
4427 we have to be careful to avoid clobbering the source operands. */
4428 if (pairwise
&& rd
== rm
) {
4429 gen_neon_movl_scratch_T0(pass
);
4431 NEON_SET_REG(T0
, rd
, pass
);
4435 if (pairwise
&& rd
== rm
) {
4436 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4437 gen_neon_movl_T0_scratch(pass
);
4438 NEON_SET_REG(T0
, rd
, pass
);
4441 /* End of 3 register same size operations. */
4442 } else if (insn
& (1 << 4)) {
4443 if ((insn
& 0x00380080) != 0) {
4444 /* Two registers and shift. */
4445 op
= (insn
>> 8) & 0xf;
4446 if (insn
& (1 << 7)) {
4451 while ((insn
& (1 << (size
+ 19))) == 0)
4454 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4455 /* To avoid excessive dumplication of ops we implement shift
4456 by immediate using the variable shift operations. */
4458 /* Shift by immediate:
4459 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4460 /* Right shifts are encoded as N - shift, where N is the
4461 element size in bits. */
4463 shift
= shift
- (1 << (size
+ 3));
4471 imm
= (uint8_t) shift
;
4476 imm
= (uint16_t) shift
;
4487 for (pass
= 0; pass
< count
; pass
++) {
4489 neon_load_reg64(cpu_V0
, rm
+ pass
);
4490 tcg_gen_movi_i64(cpu_V1
, imm
);
4495 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4497 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4502 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4504 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4509 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4511 case 5: /* VSHL, VSLI */
4512 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4516 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4518 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4520 case 7: /* VQSHLU */
4521 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4524 if (op
== 1 || op
== 3) {
4526 neon_load_reg64(cpu_V0
, rd
+ pass
);
4527 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4528 } else if (op
== 4 || (op
== 5 && u
)) {
4530 cpu_abort(env
, "VS[LR]I.64 not implemented");
4532 neon_store_reg64(cpu_V0
, rd
+ pass
);
4533 } else { /* size < 3 */
4534 /* Operands in T0 and T1. */
4535 gen_op_movl_T1_im(imm
);
4536 NEON_GET_REG(T0
, rm
, pass
);
4540 GEN_NEON_INTEGER_OP(shl
);
4544 GEN_NEON_INTEGER_OP(rshl
);
4549 GEN_NEON_INTEGER_OP(shl
);
4551 case 5: /* VSHL, VSLI */
4553 case 0: gen_helper_neon_shl_u8(CPU_T001
); break;
4554 case 1: gen_helper_neon_shl_u16(CPU_T001
); break;
4555 case 2: gen_helper_neon_shl_u32(CPU_T001
); break;
4560 GEN_NEON_INTEGER_OP_ENV(qshl
);
4562 case 7: /* VQSHLU */
4564 case 0: gen_helper_neon_qshl_u8(CPU_T0E01
); break;
4565 case 1: gen_helper_neon_qshl_u16(CPU_T0E01
); break;
4566 case 2: gen_helper_neon_qshl_u32(CPU_T0E01
); break;
4572 if (op
== 1 || op
== 3) {
4574 NEON_GET_REG(T1
, rd
, pass
);
4576 } else if (op
== 4 || (op
== 5 && u
)) {
4581 imm
= 0xff >> -shift
;
4583 imm
= (uint8_t)(0xff << shift
);
4589 imm
= 0xffff >> -shift
;
4591 imm
= (uint16_t)(0xffff << shift
);
4596 imm
= 0xffffffffu
>> -shift
;
4598 imm
= 0xffffffffu
<< shift
;
4603 tmp
= neon_load_reg(rd
, pass
);
4604 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], imm
);
4605 tcg_gen_andi_i32(tmp
, tmp
, ~imm
);
4606 tcg_gen_or_i32(cpu_T
[0], cpu_T
[0], tmp
);
4608 NEON_SET_REG(T0
, rd
, pass
);
4611 } else if (op
< 10) {
4612 /* Shift by immediate and narrow:
4613 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4614 shift
= shift
- (1 << (size
+ 3));
4618 imm
= (uint16_t)shift
;
4620 tmp2
= tcg_const_i32(imm
);
4621 TCGV_UNUSED_I64(tmp64
);
4624 imm
= (uint32_t)shift
;
4625 tmp2
= tcg_const_i32(imm
);
4626 TCGV_UNUSED_I64(tmp64
);
4629 tmp64
= tcg_const_i64(shift
);
4636 for (pass
= 0; pass
< 2; pass
++) {
4638 neon_load_reg64(cpu_V0
, rm
+ pass
);
4641 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4643 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4646 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4648 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4651 tmp
= neon_load_reg(rm
+ pass
, 0);
4652 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4653 tmp3
= neon_load_reg(rm
+ pass
, 1);
4654 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4655 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4660 if (op
== 8 && !u
) {
4661 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4664 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4666 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4671 neon_store_reg(rd
, 0, tmp2
);
4672 neon_store_reg(rd
, 1, tmp
);
4675 } else if (op
== 10) {
4679 tmp
= neon_load_reg(rm
, 0);
4680 tmp2
= neon_load_reg(rm
, 1);
4681 for (pass
= 0; pass
< 2; pass
++) {
4685 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4688 /* The shift is less than the width of the source
4689 type, so we can just shift the whole register. */
4690 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4691 if (size
< 2 || !u
) {
4694 imm
= (0xffu
>> (8 - shift
));
4697 imm
= 0xffff >> (16 - shift
);
4699 imm64
= imm
| (((uint64_t)imm
) << 32);
4700 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4703 neon_store_reg64(cpu_V0
, rd
+ pass
);
4705 } else if (op
== 15 || op
== 16) {
4706 /* VCVT fixed-point. */
4707 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4708 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4711 gen_vfp_ulto(0, shift
);
4713 gen_vfp_slto(0, shift
);
4716 gen_vfp_toul(0, shift
);
4718 gen_vfp_tosl(0, shift
);
4720 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4725 } else { /* (insn & 0x00380080) == 0 */
4728 op
= (insn
>> 8) & 0xf;
4729 /* One register and immediate. */
4730 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4731 invert
= (insn
& (1 << 5)) != 0;
4749 imm
= (imm
<< 8) | (imm
<< 24);
4752 imm
= (imm
< 8) | 0xff;
4755 imm
= (imm
<< 16) | 0xffff;
4758 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4763 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4764 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4770 if (op
!= 14 || !invert
)
4771 gen_op_movl_T1_im(imm
);
4773 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4774 if (op
& 1 && op
< 12) {
4775 tmp
= neon_load_reg(rd
, pass
);
4777 /* The immediate value has already been inverted, so
4779 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4781 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4786 if (op
== 14 && invert
) {
4789 for (n
= 0; n
< 4; n
++) {
4790 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4791 val
|= 0xff << (n
* 8);
4793 tcg_gen_movi_i32(tmp
, val
);
4795 tcg_gen_movi_i32(tmp
, imm
);
4798 neon_store_reg(rd
, pass
, tmp
);
4801 } else { /* (insn & 0x00800010 == 0x00800000) */
4803 op
= (insn
>> 8) & 0xf;
4804 if ((insn
& (1 << 6)) == 0) {
4805 /* Three registers of different lengths. */
4809 /* prewiden, src1_wide, src2_wide */
4810 static const int neon_3reg_wide
[16][3] = {
4811 {1, 0, 0}, /* VADDL */
4812 {1, 1, 0}, /* VADDW */
4813 {1, 0, 0}, /* VSUBL */
4814 {1, 1, 0}, /* VSUBW */
4815 {0, 1, 1}, /* VADDHN */
4816 {0, 0, 0}, /* VABAL */
4817 {0, 1, 1}, /* VSUBHN */
4818 {0, 0, 0}, /* VABDL */
4819 {0, 0, 0}, /* VMLAL */
4820 {0, 0, 0}, /* VQDMLAL */
4821 {0, 0, 0}, /* VMLSL */
4822 {0, 0, 0}, /* VQDMLSL */
4823 {0, 0, 0}, /* Integer VMULL */
4824 {0, 0, 0}, /* VQDMULL */
4825 {0, 0, 0} /* Polynomial VMULL */
4828 prewiden
= neon_3reg_wide
[op
][0];
4829 src1_wide
= neon_3reg_wide
[op
][1];
4830 src2_wide
= neon_3reg_wide
[op
][2];
4832 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4835 /* Avoid overlapping operands. Wide source operands are
4836 always aligned so will never overlap with wide
4837 destinations in problematic ways. */
4838 if (rd
== rm
&& !src2_wide
) {
4839 NEON_GET_REG(T0
, rm
, 1);
4840 gen_neon_movl_scratch_T0(2);
4841 } else if (rd
== rn
&& !src1_wide
) {
4842 NEON_GET_REG(T0
, rn
, 1);
4843 gen_neon_movl_scratch_T0(2);
4846 for (pass
= 0; pass
< 2; pass
++) {
4848 neon_load_reg64(cpu_V0
, rn
+ pass
);
4851 if (pass
== 1 && rd
== rn
) {
4852 gen_neon_movl_T0_scratch(2);
4854 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
4856 tmp
= neon_load_reg(rn
, pass
);
4859 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4863 neon_load_reg64(cpu_V1
, rm
+ pass
);
4866 if (pass
== 1 && rd
== rm
) {
4867 gen_neon_movl_T0_scratch(2);
4869 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
4871 tmp2
= neon_load_reg(rm
, pass
);
4874 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
4878 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4879 gen_neon_addl(size
);
4881 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4882 gen_neon_subl(size
);
4884 case 5: case 7: /* VABAL, VABDL */
4885 switch ((size
<< 1) | u
) {
4887 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
4890 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
4893 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
4896 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
4899 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
4902 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
4909 case 8: case 9: case 10: case 11: case 12: case 13:
4910 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4911 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
4913 case 14: /* Polynomial VMULL */
4914 cpu_abort(env
, "Polynomial VMULL not implemented");
4916 default: /* 15 is RESERVED. */
4919 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
4921 if (op
== 10 || op
== 11) {
4922 gen_neon_negl(cpu_V0
, size
);
4926 neon_load_reg64(cpu_V1
, rd
+ pass
);
4930 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4931 gen_neon_addl(size
);
4933 case 9: case 11: /* VQDMLAL, VQDMLSL */
4934 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4935 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
4938 case 13: /* VQDMULL */
4939 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4944 neon_store_reg64(cpu_V0
, rd
+ pass
);
4945 } else if (op
== 4 || op
== 6) {
4946 /* Narrowing operation. */
4951 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
4954 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
4957 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4958 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4965 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
4968 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
4971 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
4972 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4973 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4981 neon_store_reg(rd
, 0, tmp3
);
4982 neon_store_reg(rd
, 1, tmp
);
4985 /* Write back the result. */
4986 neon_store_reg64(cpu_V0
, rd
+ pass
);
4990 /* Two registers and a scalar. */
4992 case 0: /* Integer VMLA scalar */
4993 case 1: /* Float VMLA scalar */
4994 case 4: /* Integer VMLS scalar */
4995 case 5: /* Floating point VMLS scalar */
4996 case 8: /* Integer VMUL scalar */
4997 case 9: /* Floating point VMUL scalar */
4998 case 12: /* VQDMULH scalar */
4999 case 13: /* VQRDMULH scalar */
5000 gen_neon_get_scalar(size
, rm
);
5001 gen_neon_movl_scratch_T0(0);
5002 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5004 gen_neon_movl_T0_scratch(0);
5005 NEON_GET_REG(T1
, rn
, pass
);
5008 gen_helper_neon_qdmulh_s16(CPU_T0E01
);
5010 gen_helper_neon_qdmulh_s32(CPU_T0E01
);
5012 } else if (op
== 13) {
5014 gen_helper_neon_qrdmulh_s16(CPU_T0E01
);
5016 gen_helper_neon_qrdmulh_s32(CPU_T0E01
);
5018 } else if (op
& 1) {
5019 gen_helper_neon_mul_f32(CPU_T001
);
5022 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
5023 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
5024 case 2: gen_op_mul_T0_T1(); break;
5030 NEON_GET_REG(T1
, rd
, pass
);
5036 gen_helper_neon_add_f32(CPU_T001
);
5042 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
5048 NEON_SET_REG(T0
, rd
, pass
);
5051 case 2: /* VMLAL sclar */
5052 case 3: /* VQDMLAL scalar */
5053 case 6: /* VMLSL scalar */
5054 case 7: /* VQDMLSL scalar */
5055 case 10: /* VMULL scalar */
5056 case 11: /* VQDMULL scalar */
5057 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5060 gen_neon_get_scalar(size
, rm
);
5061 NEON_GET_REG(T1
, rn
, 1);
5063 for (pass
= 0; pass
< 2; pass
++) {
5065 tmp
= neon_load_reg(rn
, 0);
5068 tcg_gen_mov_i32(tmp
, cpu_T
[1]);
5071 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
5072 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5073 if (op
== 6 || op
== 7) {
5074 gen_neon_negl(cpu_V0
, size
);
5077 neon_load_reg64(cpu_V1
, rd
+ pass
);
5081 gen_neon_addl(size
);
5084 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5085 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5091 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5096 neon_store_reg64(cpu_V0
, rd
+ pass
);
5099 default: /* 14 and 15 are RESERVED */
5103 } else { /* size == 3 */
5106 imm
= (insn
>> 8) & 0xf;
5113 neon_load_reg64(cpu_V0
, rn
);
5115 neon_load_reg64(cpu_V1
, rn
+ 1);
5117 } else if (imm
== 8) {
5118 neon_load_reg64(cpu_V0
, rn
+ 1);
5120 neon_load_reg64(cpu_V1
, rm
);
5123 tmp64
= tcg_temp_new_i64();
5125 neon_load_reg64(cpu_V0
, rn
);
5126 neon_load_reg64(tmp64
, rn
+ 1);
5128 neon_load_reg64(cpu_V0
, rn
+ 1);
5129 neon_load_reg64(tmp64
, rm
);
5131 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5132 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5133 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5135 neon_load_reg64(cpu_V1
, rm
);
5137 neon_load_reg64(cpu_V1
, rm
+ 1);
5140 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5141 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5142 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5145 neon_load_reg64(cpu_V0
, rn
);
5146 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5147 neon_load_reg64(cpu_V1
, rm
);
5148 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5149 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5151 neon_store_reg64(cpu_V0
, rd
);
5153 neon_store_reg64(cpu_V1
, rd
+ 1);
5155 } else if ((insn
& (1 << 11)) == 0) {
5156 /* Two register misc. */
5157 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5158 size
= (insn
>> 18) & 3;
5160 case 0: /* VREV64 */
5163 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5164 NEON_GET_REG(T0
, rm
, pass
* 2);
5165 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
5167 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5168 case 1: gen_swap_half(cpu_T
[0]); break;
5169 case 2: /* no-op */ break;
5172 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
5174 NEON_SET_REG(T1
, rd
, pass
* 2);
5176 gen_op_movl_T0_T1();
5178 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5179 case 1: gen_swap_half(cpu_T
[0]); break;
5182 NEON_SET_REG(T0
, rd
, pass
* 2);
5186 case 4: case 5: /* VPADDL */
5187 case 12: case 13: /* VPADAL */
5190 for (pass
= 0; pass
< q
+ 1; pass
++) {
5191 tmp
= neon_load_reg(rm
, pass
* 2);
5192 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5193 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5194 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5196 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5197 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5198 case 2: tcg_gen_add_i64(CPU_V001
); break;
5203 neon_load_reg64(cpu_V1
, rd
+ pass
);
5204 gen_neon_addl(size
);
5206 neon_store_reg64(cpu_V0
, rd
+ pass
);
5211 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5212 NEON_GET_REG(T0
, rm
, n
);
5213 NEON_GET_REG(T1
, rd
, n
+ 1);
5214 NEON_SET_REG(T1
, rm
, n
);
5215 NEON_SET_REG(T0
, rd
, n
+ 1);
5223 Rd A3 A2 A1 A0 B2 B0 A2 A0
5224 Rm B3 B2 B1 B0 B3 B1 A3 A1
5228 gen_neon_unzip(rd
, q
, 0, size
);
5229 gen_neon_unzip(rm
, q
, 4, size
);
5231 static int unzip_order_q
[8] =
5232 {0, 2, 4, 6, 1, 3, 5, 7};
5233 for (n
= 0; n
< 8; n
++) {
5234 int reg
= (n
< 4) ? rd
: rm
;
5235 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
5236 NEON_SET_REG(T0
, reg
, n
% 4);
5239 static int unzip_order
[4] =
5241 for (n
= 0; n
< 4; n
++) {
5242 int reg
= (n
< 2) ? rd
: rm
;
5243 gen_neon_movl_T0_scratch(unzip_order
[n
]);
5244 NEON_SET_REG(T0
, reg
, n
% 2);
5250 Rd A3 A2 A1 A0 B1 A1 B0 A0
5251 Rm B3 B2 B1 B0 B3 A3 B2 A2
5255 count
= (q
? 4 : 2);
5256 for (n
= 0; n
< count
; n
++) {
5257 NEON_GET_REG(T0
, rd
, n
);
5258 NEON_GET_REG(T1
, rd
, n
);
5260 case 0: gen_helper_neon_zip_u8(); break;
5261 case 1: gen_helper_neon_zip_u16(); break;
5262 case 2: /* no-op */; break;
5265 gen_neon_movl_scratch_T0(n
* 2);
5266 gen_neon_movl_scratch_T1(n
* 2 + 1);
5268 for (n
= 0; n
< count
* 2; n
++) {
5269 int reg
= (n
< count
) ? rd
: rm
;
5270 gen_neon_movl_T0_scratch(n
);
5271 NEON_SET_REG(T0
, reg
, n
% count
);
5274 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5278 for (pass
= 0; pass
< 2; pass
++) {
5279 neon_load_reg64(cpu_V0
, rm
+ pass
);
5281 if (op
== 36 && q
== 0) {
5282 gen_neon_narrow(size
, tmp
, cpu_V0
);
5284 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5286 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5291 neon_store_reg(rd
, 0, tmp2
);
5292 neon_store_reg(rd
, 1, tmp
);
5296 case 38: /* VSHLL */
5299 tmp
= neon_load_reg(rm
, 0);
5300 tmp2
= neon_load_reg(rm
, 1);
5301 for (pass
= 0; pass
< 2; pass
++) {
5304 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5305 neon_store_reg64(cpu_V0
, rd
+ pass
);
5310 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5311 if (op
== 30 || op
== 31 || op
>= 58) {
5312 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5313 neon_reg_offset(rm
, pass
));
5315 NEON_GET_REG(T0
, rm
, pass
);
5318 case 1: /* VREV32 */
5320 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5321 case 1: gen_swap_half(cpu_T
[0]); break;
5325 case 2: /* VREV16 */
5328 gen_rev16(cpu_T
[0]);
5332 case 0: gen_helper_neon_cls_s8(cpu_T
[0], cpu_T
[0]); break;
5333 case 1: gen_helper_neon_cls_s16(cpu_T
[0], cpu_T
[0]); break;
5334 case 2: gen_helper_neon_cls_s32(cpu_T
[0], cpu_T
[0]); break;
5340 case 0: gen_helper_neon_clz_u8(cpu_T
[0], cpu_T
[0]); break;
5341 case 1: gen_helper_neon_clz_u16(cpu_T
[0], cpu_T
[0]); break;
5342 case 2: gen_helper_clz(cpu_T
[0], cpu_T
[0]); break;
5349 gen_helper_neon_cnt_u8(cpu_T
[0], cpu_T
[0]);
5356 case 14: /* VQABS */
5358 case 0: gen_helper_neon_qabs_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5359 case 1: gen_helper_neon_qabs_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5360 case 2: gen_helper_neon_qabs_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5364 case 15: /* VQNEG */
5366 case 0: gen_helper_neon_qneg_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5367 case 1: gen_helper_neon_qneg_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5368 case 2: gen_helper_neon_qneg_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5372 case 16: case 19: /* VCGT #0, VCLE #0 */
5373 gen_op_movl_T1_im(0);
5375 case 0: gen_helper_neon_cgt_s8(CPU_T001
); break;
5376 case 1: gen_helper_neon_cgt_s16(CPU_T001
); break;
5377 case 2: gen_helper_neon_cgt_s32(CPU_T001
); break;
5383 case 17: case 20: /* VCGE #0, VCLT #0 */
5384 gen_op_movl_T1_im(0);
5386 case 0: gen_helper_neon_cge_s8(CPU_T001
); break;
5387 case 1: gen_helper_neon_cge_s16(CPU_T001
); break;
5388 case 2: gen_helper_neon_cge_s32(CPU_T001
); break;
5394 case 18: /* VCEQ #0 */
5395 gen_op_movl_T1_im(0);
5397 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
5398 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
5399 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
5405 case 0: gen_helper_neon_abs_s8(cpu_T
[0], cpu_T
[0]); break;
5406 case 1: gen_helper_neon_abs_s16(cpu_T
[0], cpu_T
[0]); break;
5407 case 2: tcg_gen_abs_i32(cpu_T
[0], cpu_T
[0]); break;
5412 gen_op_movl_T1_im(0);
5417 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5418 gen_op_movl_T1_im(0);
5419 gen_helper_neon_cgt_f32(CPU_T001
);
5423 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5424 gen_op_movl_T1_im(0);
5425 gen_helper_neon_cge_f32(CPU_T001
);
5429 case 26: /* Float VCEQ #0 */
5430 gen_op_movl_T1_im(0);
5431 gen_helper_neon_ceq_f32(CPU_T001
);
5433 case 30: /* Float VABS */
5436 case 31: /* Float VNEG */
5440 NEON_GET_REG(T1
, rd
, pass
);
5441 NEON_SET_REG(T1
, rm
, pass
);
5444 NEON_GET_REG(T1
, rd
, pass
);
5446 case 0: gen_helper_neon_trn_u8(); break;
5447 case 1: gen_helper_neon_trn_u16(); break;
5451 NEON_SET_REG(T1
, rm
, pass
);
5453 case 56: /* Integer VRECPE */
5454 gen_helper_recpe_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5456 case 57: /* Integer VRSQRTE */
5457 gen_helper_rsqrte_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5459 case 58: /* Float VRECPE */
5460 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5462 case 59: /* Float VRSQRTE */
5463 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5465 case 60: /* VCVT.F32.S32 */
5468 case 61: /* VCVT.F32.U32 */
5471 case 62: /* VCVT.S32.F32 */
5474 case 63: /* VCVT.U32.F32 */
5478 /* Reserved: 21, 29, 39-56 */
5481 if (op
== 30 || op
== 31 || op
>= 58) {
5482 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5483 neon_reg_offset(rd
, pass
));
5485 NEON_SET_REG(T0
, rd
, pass
);
5490 } else if ((insn
& (1 << 10)) == 0) {
5492 n
= ((insn
>> 5) & 0x18) + 8;
5493 if (insn
& (1 << 6)) {
5494 tmp
= neon_load_reg(rd
, 0);
5497 tcg_gen_movi_i32(tmp
, 0);
5499 tmp2
= neon_load_reg(rm
, 0);
5500 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tcg_const_i32(rn
),
5503 if (insn
& (1 << 6)) {
5504 tmp
= neon_load_reg(rd
, 1);
5507 tcg_gen_movi_i32(tmp
, 0);
5509 tmp3
= neon_load_reg(rm
, 1);
5510 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tcg_const_i32(rn
),
5512 neon_store_reg(rd
, 0, tmp2
);
5513 neon_store_reg(rd
, 1, tmp3
);
5515 } else if ((insn
& 0x380) == 0) {
5517 if (insn
& (1 << 19)) {
5518 NEON_SET_REG(T0
, rm
, 1);
5520 NEON_SET_REG(T0
, rm
, 0);
5522 if (insn
& (1 << 16)) {
5523 gen_neon_dup_u8(cpu_T
[0], ((insn
>> 17) & 3) * 8);
5524 } else if (insn
& (1 << 17)) {
5525 if ((insn
>> 18) & 1)
5526 gen_neon_dup_high16(cpu_T
[0]);
5528 gen_neon_dup_low16(cpu_T
[0]);
5530 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5531 NEON_SET_REG(T0
, rd
, pass
);
5541 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5543 int crn
= (insn
>> 16) & 0xf;
5544 int crm
= insn
& 0xf;
5545 int op1
= (insn
>> 21) & 7;
5546 int op2
= (insn
>> 5) & 7;
5547 int rt
= (insn
>> 12) & 0xf;
5550 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5551 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5555 tmp
= load_cpu_field(teecr
);
5556 store_reg(s
, rt
, tmp
);
5559 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5561 if (IS_USER(s
) && (env
->teecr
& 1))
5563 tmp
= load_cpu_field(teehbr
);
5564 store_reg(s
, rt
, tmp
);
5568 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5569 op1
, crn
, crm
, op2
);
5573 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5575 int crn
= (insn
>> 16) & 0xf;
5576 int crm
= insn
& 0xf;
5577 int op1
= (insn
>> 21) & 7;
5578 int op2
= (insn
>> 5) & 7;
5579 int rt
= (insn
>> 12) & 0xf;
5582 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5583 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5587 tmp
= load_reg(s
, rt
);
5588 gen_helper_set_teecr(cpu_env
, tmp
);
5592 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5594 if (IS_USER(s
) && (env
->teecr
& 1))
5596 tmp
= load_reg(s
, rt
);
5597 store_cpu_field(tmp
, teehbr
);
5601 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5602 op1
, crn
, crm
, op2
);
5606 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5610 cpnum
= (insn
>> 8) & 0xf;
5611 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5612 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5618 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5619 return disas_iwmmxt_insn(env
, s
, insn
);
5620 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5621 return disas_dsp_insn(env
, s
, insn
);
5626 return disas_vfp_insn (env
, s
, insn
);
5628 /* Coprocessors 7-15 are architecturally reserved by ARM.
5629 Unfortunately Intel decided to ignore this. */
5630 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5632 if (insn
& (1 << 20))
5633 return disas_cp14_read(env
, s
, insn
);
5635 return disas_cp14_write(env
, s
, insn
);
5637 return disas_cp15_insn (env
, s
, insn
);
5640 /* Unknown coprocessor. See if the board has hooked it. */
5641 return disas_cp_insn (env
, s
, insn
);
5646 /* Store a 64-bit value to a register pair. Clobbers val. */
5647 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5651 tcg_gen_trunc_i64_i32(tmp
, val
);
5652 store_reg(s
, rlow
, tmp
);
5654 tcg_gen_shri_i64(val
, val
, 32);
5655 tcg_gen_trunc_i64_i32(tmp
, val
);
5656 store_reg(s
, rhigh
, tmp
);
5659 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5660 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5665 /* Load value and extend to 64 bits. */
5666 tmp
= tcg_temp_new_i64();
5667 tmp2
= load_reg(s
, rlow
);
5668 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5670 tcg_gen_add_i64(val
, val
, tmp
);
5673 /* load and add a 64-bit value from a register pair. */
5674 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5680 /* Load 64-bit value rd:rn. */
5681 tmpl
= load_reg(s
, rlow
);
5682 tmph
= load_reg(s
, rhigh
);
5683 tmp
= tcg_temp_new_i64();
5684 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5687 tcg_gen_add_i64(val
, val
, tmp
);
5690 /* Set N and Z flags from a 64-bit value. */
5691 static void gen_logicq_cc(TCGv_i64 val
)
5693 TCGv tmp
= new_tmp();
5694 gen_helper_logicq_cc(tmp
, val
);
5699 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
5701 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
5708 insn
= ldl_code(s
->pc
);
5711 /* M variants do not implement ARM mode. */
5716 /* Unconditional instructions. */
5717 if (((insn
>> 25) & 7) == 1) {
5718 /* NEON Data processing. */
5719 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5722 if (disas_neon_data_insn(env
, s
, insn
))
5726 if ((insn
& 0x0f100000) == 0x04000000) {
5727 /* NEON load/store. */
5728 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5731 if (disas_neon_ls_insn(env
, s
, insn
))
5735 if ((insn
& 0x0d70f000) == 0x0550f000)
5737 else if ((insn
& 0x0ffffdff) == 0x01010000) {
5740 if (insn
& (1 << 9)) {
5741 /* BE8 mode not implemented. */
5745 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
5746 switch ((insn
>> 4) & 0xf) {
5749 gen_helper_clrex(cpu_env
);
5755 /* We don't emulate caches so these are a no-op. */
5760 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
5766 op1
= (insn
& 0x1f);
5767 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5768 addr
= load_reg(s
, 13);
5771 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op1
));
5773 i
= (insn
>> 23) & 3;
5775 case 0: offset
= -4; break; /* DA */
5776 case 1: offset
= 0; break; /* IA */
5777 case 2: offset
= -8; break; /* DB */
5778 case 3: offset
= 4; break; /* IB */
5782 tcg_gen_addi_i32(addr
, addr
, offset
);
5783 tmp
= load_reg(s
, 14);
5784 gen_st32(tmp
, addr
, 0);
5785 tmp
= load_cpu_field(spsr
);
5786 tcg_gen_addi_i32(addr
, addr
, 4);
5787 gen_st32(tmp
, addr
, 0);
5788 if (insn
& (1 << 21)) {
5789 /* Base writeback. */
5791 case 0: offset
= -8; break;
5792 case 1: offset
= 4; break;
5793 case 2: offset
= -4; break;
5794 case 3: offset
= 0; break;
5798 tcg_gen_addi_i32(addr
, addr
, offset
);
5799 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5800 store_reg(s
, 13, addr
);
5802 gen_helper_set_r13_banked(cpu_env
, tcg_const_i32(op1
), addr
);
5808 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
5814 rn
= (insn
>> 16) & 0xf;
5815 addr
= load_reg(s
, rn
);
5816 i
= (insn
>> 23) & 3;
5818 case 0: offset
= -4; break; /* DA */
5819 case 1: offset
= 0; break; /* IA */
5820 case 2: offset
= -8; break; /* DB */
5821 case 3: offset
= 4; break; /* IB */
5825 tcg_gen_addi_i32(addr
, addr
, offset
);
5826 /* Load PC into tmp and CPSR into tmp2. */
5827 tmp
= gen_ld32(addr
, 0);
5828 tcg_gen_addi_i32(addr
, addr
, 4);
5829 tmp2
= gen_ld32(addr
, 0);
5830 if (insn
& (1 << 21)) {
5831 /* Base writeback. */
5833 case 0: offset
= -8; break;
5834 case 1: offset
= 4; break;
5835 case 2: offset
= -4; break;
5836 case 3: offset
= 0; break;
5840 tcg_gen_addi_i32(addr
, addr
, offset
);
5841 store_reg(s
, rn
, addr
);
5845 gen_rfe(s
, tmp
, tmp2
);
5847 } else if ((insn
& 0x0e000000) == 0x0a000000) {
5848 /* branch link and change to thumb (blx <offset>) */
5851 val
= (uint32_t)s
->pc
;
5853 tcg_gen_movi_i32(tmp
, val
);
5854 store_reg(s
, 14, tmp
);
5855 /* Sign-extend the 24-bit offset */
5856 offset
= (((int32_t)insn
) << 8) >> 8;
5857 /* offset * 4 + bit24 * 2 + (thumb bit) */
5858 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
5859 /* pipeline offset */
5863 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
5864 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5865 /* iWMMXt register transfer. */
5866 if (env
->cp15
.c15_cpar
& (1 << 1))
5867 if (!disas_iwmmxt_insn(env
, s
, insn
))
5870 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
5871 /* Coprocessor double register transfer. */
5872 } else if ((insn
& 0x0f000010) == 0x0e000010) {
5873 /* Additional coprocessor register transfer. */
5874 } else if ((insn
& 0x0ff10020) == 0x01000000) {
5877 /* cps (privileged) */
5881 if (insn
& (1 << 19)) {
5882 if (insn
& (1 << 8))
5884 if (insn
& (1 << 7))
5886 if (insn
& (1 << 6))
5888 if (insn
& (1 << 18))
5891 if (insn
& (1 << 17)) {
5893 val
|= (insn
& 0x1f);
5896 gen_set_psr_im(s
, mask
, 0, val
);
5903 /* if not always execute, we generate a conditional jump to
5905 s
->condlabel
= gen_new_label();
5906 gen_test_cc(cond
^ 1, s
->condlabel
);
5909 if ((insn
& 0x0f900000) == 0x03000000) {
5910 if ((insn
& (1 << 21)) == 0) {
5912 rd
= (insn
>> 12) & 0xf;
5913 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
5914 if ((insn
& (1 << 22)) == 0) {
5917 tcg_gen_movi_i32(tmp
, val
);
5920 tmp
= load_reg(s
, rd
);
5921 tcg_gen_ext16u_i32(tmp
, tmp
);
5922 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
5924 store_reg(s
, rd
, tmp
);
5926 if (((insn
>> 12) & 0xf) != 0xf)
5928 if (((insn
>> 16) & 0xf) == 0) {
5929 gen_nop_hint(s
, insn
& 0xff);
5931 /* CPSR = immediate */
5933 shift
= ((insn
>> 8) & 0xf) * 2;
5935 val
= (val
>> shift
) | (val
<< (32 - shift
));
5936 i
= ((insn
& (1 << 22)) != 0);
5937 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
5941 } else if ((insn
& 0x0f900000) == 0x01000000
5942 && (insn
& 0x00000090) != 0x00000090) {
5943 /* miscellaneous instructions */
5944 op1
= (insn
>> 21) & 3;
5945 sh
= (insn
>> 4) & 0xf;
5948 case 0x0: /* move program status register */
5951 tmp
= load_reg(s
, rm
);
5952 i
= ((op1
& 2) != 0);
5953 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
5957 rd
= (insn
>> 12) & 0xf;
5961 tmp
= load_cpu_field(spsr
);
5964 gen_helper_cpsr_read(tmp
);
5966 store_reg(s
, rd
, tmp
);
5971 /* branch/exchange thumb (bx). */
5972 tmp
= load_reg(s
, rm
);
5974 } else if (op1
== 3) {
5976 rd
= (insn
>> 12) & 0xf;
5977 tmp
= load_reg(s
, rm
);
5978 gen_helper_clz(tmp
, tmp
);
5979 store_reg(s
, rd
, tmp
);
5987 /* Trivial implementation equivalent to bx. */
5988 tmp
= load_reg(s
, rm
);
5998 /* branch link/exchange thumb (blx) */
5999 tmp
= load_reg(s
, rm
);
6001 tcg_gen_movi_i32(tmp2
, s
->pc
);
6002 store_reg(s
, 14, tmp2
);
6005 case 0x5: /* saturating add/subtract */
6006 rd
= (insn
>> 12) & 0xf;
6007 rn
= (insn
>> 16) & 0xf;
6008 tmp
= load_reg(s
, rm
);
6009 tmp2
= load_reg(s
, rn
);
6011 gen_helper_double_saturate(tmp2
, tmp2
);
6013 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6015 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6017 store_reg(s
, rd
, tmp
);
6020 gen_set_condexec(s
);
6021 gen_set_pc_im(s
->pc
- 4);
6022 gen_exception(EXCP_BKPT
);
6023 s
->is_jmp
= DISAS_JUMP
;
6025 case 0x8: /* signed multiply */
6029 rs
= (insn
>> 8) & 0xf;
6030 rn
= (insn
>> 12) & 0xf;
6031 rd
= (insn
>> 16) & 0xf;
6033 /* (32 * 16) >> 16 */
6034 tmp
= load_reg(s
, rm
);
6035 tmp2
= load_reg(s
, rs
);
6037 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6040 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6041 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6043 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6044 if ((sh
& 2) == 0) {
6045 tmp2
= load_reg(s
, rn
);
6046 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6049 store_reg(s
, rd
, tmp
);
6052 tmp
= load_reg(s
, rm
);
6053 tmp2
= load_reg(s
, rs
);
6054 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6057 tmp64
= tcg_temp_new_i64();
6058 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6060 gen_addq(s
, tmp64
, rn
, rd
);
6061 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6064 tmp2
= load_reg(s
, rn
);
6065 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6068 store_reg(s
, rd
, tmp
);
6075 } else if (((insn
& 0x0e000000) == 0 &&
6076 (insn
& 0x00000090) != 0x90) ||
6077 ((insn
& 0x0e000000) == (1 << 25))) {
6078 int set_cc
, logic_cc
, shiftop
;
6080 op1
= (insn
>> 21) & 0xf;
6081 set_cc
= (insn
>> 20) & 1;
6082 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6084 /* data processing instruction */
6085 if (insn
& (1 << 25)) {
6086 /* immediate operand */
6088 shift
= ((insn
>> 8) & 0xf) * 2;
6090 val
= (val
>> shift
) | (val
<< (32 - shift
));
6093 tcg_gen_movi_i32(tmp2
, val
);
6094 if (logic_cc
&& shift
) {
6095 gen_set_CF_bit31(tmp2
);
6100 tmp2
= load_reg(s
, rm
);
6101 shiftop
= (insn
>> 5) & 3;
6102 if (!(insn
& (1 << 4))) {
6103 shift
= (insn
>> 7) & 0x1f;
6104 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6106 rs
= (insn
>> 8) & 0xf;
6107 tmp
= load_reg(s
, rs
);
6108 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6111 if (op1
!= 0x0f && op1
!= 0x0d) {
6112 rn
= (insn
>> 16) & 0xf;
6113 tmp
= load_reg(s
, rn
);
6117 rd
= (insn
>> 12) & 0xf;
6120 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6124 store_reg_bx(env
, s
, rd
, tmp
);
6127 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6131 store_reg_bx(env
, s
, rd
, tmp
);
6134 if (set_cc
&& rd
== 15) {
6135 /* SUBS r15, ... is used for exception return. */
6139 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6140 gen_exception_return(s
, tmp
);
6143 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6145 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6147 store_reg_bx(env
, s
, rd
, tmp
);
6152 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6154 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6156 store_reg_bx(env
, s
, rd
, tmp
);
6160 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6162 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6164 store_reg_bx(env
, s
, rd
, tmp
);
6168 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6170 gen_add_carry(tmp
, tmp
, tmp2
);
6172 store_reg_bx(env
, s
, rd
, tmp
);
6176 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6178 gen_sub_carry(tmp
, tmp
, tmp2
);
6180 store_reg_bx(env
, s
, rd
, tmp
);
6184 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6186 gen_sub_carry(tmp
, tmp2
, tmp
);
6188 store_reg_bx(env
, s
, rd
, tmp
);
6192 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6199 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6206 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6212 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6217 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6221 store_reg_bx(env
, s
, rd
, tmp
);
6224 if (logic_cc
&& rd
== 15) {
6225 /* MOVS r15, ... is used for exception return. */
6229 gen_exception_return(s
, tmp2
);
6234 store_reg_bx(env
, s
, rd
, tmp2
);
6238 tcg_gen_bic_i32(tmp
, tmp
, tmp2
);
6242 store_reg_bx(env
, s
, rd
, tmp
);
6246 tcg_gen_not_i32(tmp2
, tmp2
);
6250 store_reg_bx(env
, s
, rd
, tmp2
);
6253 if (op1
!= 0x0f && op1
!= 0x0d) {
6257 /* other instructions */
6258 op1
= (insn
>> 24) & 0xf;
6262 /* multiplies, extra load/stores */
6263 sh
= (insn
>> 5) & 3;
6266 rd
= (insn
>> 16) & 0xf;
6267 rn
= (insn
>> 12) & 0xf;
6268 rs
= (insn
>> 8) & 0xf;
6270 op1
= (insn
>> 20) & 0xf;
6272 case 0: case 1: case 2: case 3: case 6:
6274 tmp
= load_reg(s
, rs
);
6275 tmp2
= load_reg(s
, rm
);
6276 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6278 if (insn
& (1 << 22)) {
6279 /* Subtract (mls) */
6281 tmp2
= load_reg(s
, rn
);
6282 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6284 } else if (insn
& (1 << 21)) {
6286 tmp2
= load_reg(s
, rn
);
6287 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6290 if (insn
& (1 << 20))
6292 store_reg(s
, rd
, tmp
);
6296 tmp
= load_reg(s
, rs
);
6297 tmp2
= load_reg(s
, rm
);
6298 if (insn
& (1 << 22))
6299 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6301 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6302 if (insn
& (1 << 21)) /* mult accumulate */
6303 gen_addq(s
, tmp64
, rn
, rd
);
6304 if (!(insn
& (1 << 23))) { /* double accumulate */
6306 gen_addq_lo(s
, tmp64
, rn
);
6307 gen_addq_lo(s
, tmp64
, rd
);
6309 if (insn
& (1 << 20))
6310 gen_logicq_cc(tmp64
);
6311 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6315 rn
= (insn
>> 16) & 0xf;
6316 rd
= (insn
>> 12) & 0xf;
6317 if (insn
& (1 << 23)) {
6318 /* load/store exclusive */
6319 op1
= (insn
>> 21) & 0x3;
6324 gen_movl_T1_reg(s
, rn
);
6326 if (insn
& (1 << 20)) {
6327 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
6330 tmp
= gen_ld32(addr
, IS_USER(s
));
6332 case 1: /* ldrexd */
6333 tmp
= gen_ld32(addr
, IS_USER(s
));
6334 store_reg(s
, rd
, tmp
);
6335 tcg_gen_addi_i32(addr
, addr
, 4);
6336 tmp
= gen_ld32(addr
, IS_USER(s
));
6339 case 2: /* ldrexb */
6340 tmp
= gen_ld8u(addr
, IS_USER(s
));
6342 case 3: /* ldrexh */
6343 tmp
= gen_ld16u(addr
, IS_USER(s
));
6348 store_reg(s
, rd
, tmp
);
6350 int label
= gen_new_label();
6352 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
6353 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
6355 tmp
= load_reg(s
,rm
);
6358 gen_st32(tmp
, addr
, IS_USER(s
));
6360 case 1: /* strexd */
6361 gen_st32(tmp
, addr
, IS_USER(s
));
6362 tcg_gen_addi_i32(addr
, addr
, 4);
6363 tmp
= load_reg(s
, rm
+ 1);
6364 gen_st32(tmp
, addr
, IS_USER(s
));
6366 case 2: /* strexb */
6367 gen_st8(tmp
, addr
, IS_USER(s
));
6369 case 3: /* strexh */
6370 gen_st16(tmp
, addr
, IS_USER(s
));
6375 gen_set_label(label
);
6376 gen_movl_reg_T0(s
, rd
);
6379 /* SWP instruction */
6382 /* ??? This is not really atomic. However we know
6383 we never have multiple CPUs running in parallel,
6384 so it is good enough. */
6385 addr
= load_reg(s
, rn
);
6386 tmp
= load_reg(s
, rm
);
6387 if (insn
& (1 << 22)) {
6388 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6389 gen_st8(tmp
, addr
, IS_USER(s
));
6391 tmp2
= gen_ld32(addr
, IS_USER(s
));
6392 gen_st32(tmp
, addr
, IS_USER(s
));
6395 store_reg(s
, rd
, tmp2
);
6401 /* Misc load/store */
6402 rn
= (insn
>> 16) & 0xf;
6403 rd
= (insn
>> 12) & 0xf;
6404 addr
= load_reg(s
, rn
);
6405 if (insn
& (1 << 24))
6406 gen_add_datah_offset(s
, insn
, 0, addr
);
6408 if (insn
& (1 << 20)) {
6412 tmp
= gen_ld16u(addr
, IS_USER(s
));
6415 tmp
= gen_ld8s(addr
, IS_USER(s
));
6419 tmp
= gen_ld16s(addr
, IS_USER(s
));
6423 } else if (sh
& 2) {
6427 tmp
= load_reg(s
, rd
);
6428 gen_st32(tmp
, addr
, IS_USER(s
));
6429 tcg_gen_addi_i32(addr
, addr
, 4);
6430 tmp
= load_reg(s
, rd
+ 1);
6431 gen_st32(tmp
, addr
, IS_USER(s
));
6435 tmp
= gen_ld32(addr
, IS_USER(s
));
6436 store_reg(s
, rd
, tmp
);
6437 tcg_gen_addi_i32(addr
, addr
, 4);
6438 tmp
= gen_ld32(addr
, IS_USER(s
));
6442 address_offset
= -4;
6445 tmp
= load_reg(s
, rd
);
6446 gen_st16(tmp
, addr
, IS_USER(s
));
6449 /* Perform base writeback before the loaded value to
6450 ensure correct behavior with overlapping index registers.
6451 ldrd with base writeback is is undefined if the
6452 destination and index registers overlap. */
6453 if (!(insn
& (1 << 24))) {
6454 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6455 store_reg(s
, rn
, addr
);
6456 } else if (insn
& (1 << 21)) {
6458 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6459 store_reg(s
, rn
, addr
);
6464 /* Complete the load. */
6465 store_reg(s
, rd
, tmp
);
6474 if (insn
& (1 << 4)) {
6476 /* Armv6 Media instructions. */
6478 rn
= (insn
>> 16) & 0xf;
6479 rd
= (insn
>> 12) & 0xf;
6480 rs
= (insn
>> 8) & 0xf;
6481 switch ((insn
>> 23) & 3) {
6482 case 0: /* Parallel add/subtract. */
6483 op1
= (insn
>> 20) & 7;
6484 tmp
= load_reg(s
, rn
);
6485 tmp2
= load_reg(s
, rm
);
6486 sh
= (insn
>> 5) & 7;
6487 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6489 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6491 store_reg(s
, rd
, tmp
);
6494 if ((insn
& 0x00700020) == 0) {
6495 /* Halfword pack. */
6496 tmp
= load_reg(s
, rn
);
6497 tmp2
= load_reg(s
, rm
);
6498 shift
= (insn
>> 7) & 0x1f;
6499 if (insn
& (1 << 6)) {
6503 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6504 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6505 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6509 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6510 tcg_gen_ext16u_i32(tmp
, tmp
);
6511 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6513 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6515 store_reg(s
, rd
, tmp
);
6516 } else if ((insn
& 0x00200020) == 0x00200000) {
6518 tmp
= load_reg(s
, rm
);
6519 shift
= (insn
>> 7) & 0x1f;
6520 if (insn
& (1 << 6)) {
6523 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6525 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6527 sh
= (insn
>> 16) & 0x1f;
6529 if (insn
& (1 << 22))
6530 gen_helper_usat(tmp
, tmp
, tcg_const_i32(sh
));
6532 gen_helper_ssat(tmp
, tmp
, tcg_const_i32(sh
));
6534 store_reg(s
, rd
, tmp
);
6535 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6537 tmp
= load_reg(s
, rm
);
6538 sh
= (insn
>> 16) & 0x1f;
6540 if (insn
& (1 << 22))
6541 gen_helper_usat16(tmp
, tmp
, tcg_const_i32(sh
));
6543 gen_helper_ssat16(tmp
, tmp
, tcg_const_i32(sh
));
6545 store_reg(s
, rd
, tmp
);
6546 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6548 tmp
= load_reg(s
, rn
);
6549 tmp2
= load_reg(s
, rm
);
6551 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6552 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6555 store_reg(s
, rd
, tmp
);
6556 } else if ((insn
& 0x000003e0) == 0x00000060) {
6557 tmp
= load_reg(s
, rm
);
6558 shift
= (insn
>> 10) & 3;
6559 /* ??? In many cases it's not neccessary to do a
6560 rotate, a shift is sufficient. */
6562 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
6563 op1
= (insn
>> 20) & 7;
6565 case 0: gen_sxtb16(tmp
); break;
6566 case 2: gen_sxtb(tmp
); break;
6567 case 3: gen_sxth(tmp
); break;
6568 case 4: gen_uxtb16(tmp
); break;
6569 case 6: gen_uxtb(tmp
); break;
6570 case 7: gen_uxth(tmp
); break;
6571 default: goto illegal_op
;
6574 tmp2
= load_reg(s
, rn
);
6575 if ((op1
& 3) == 0) {
6576 gen_add16(tmp
, tmp2
);
6578 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6582 store_reg(s
, rd
, tmp
);
6583 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6585 tmp
= load_reg(s
, rm
);
6586 if (insn
& (1 << 22)) {
6587 if (insn
& (1 << 7)) {
6591 gen_helper_rbit(tmp
, tmp
);
6594 if (insn
& (1 << 7))
6597 tcg_gen_bswap32_i32(tmp
, tmp
);
6599 store_reg(s
, rd
, tmp
);
6604 case 2: /* Multiplies (Type 3). */
6605 tmp
= load_reg(s
, rm
);
6606 tmp2
= load_reg(s
, rs
);
6607 if (insn
& (1 << 20)) {
6608 /* Signed multiply most significant [accumulate]. */
6609 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6610 if (insn
& (1 << 5))
6611 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6612 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6614 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6616 tmp2
= load_reg(s
, rd
);
6617 if (insn
& (1 << 6)) {
6618 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6620 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6624 store_reg(s
, rn
, tmp
);
6626 if (insn
& (1 << 5))
6627 gen_swap_half(tmp2
);
6628 gen_smul_dual(tmp
, tmp2
);
6629 /* This addition cannot overflow. */
6630 if (insn
& (1 << 6)) {
6631 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6633 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6636 if (insn
& (1 << 22)) {
6637 /* smlald, smlsld */
6638 tmp64
= tcg_temp_new_i64();
6639 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6641 gen_addq(s
, tmp64
, rd
, rn
);
6642 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6644 /* smuad, smusd, smlad, smlsd */
6647 tmp2
= load_reg(s
, rd
);
6648 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6651 store_reg(s
, rn
, tmp
);
6656 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6658 case 0: /* Unsigned sum of absolute differences. */
6660 tmp
= load_reg(s
, rm
);
6661 tmp2
= load_reg(s
, rs
);
6662 gen_helper_usad8(tmp
, tmp
, tmp2
);
6665 tmp2
= load_reg(s
, rd
);
6666 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6669 store_reg(s
, rn
, tmp
);
6671 case 0x20: case 0x24: case 0x28: case 0x2c:
6672 /* Bitfield insert/clear. */
6674 shift
= (insn
>> 7) & 0x1f;
6675 i
= (insn
>> 16) & 0x1f;
6679 tcg_gen_movi_i32(tmp
, 0);
6681 tmp
= load_reg(s
, rm
);
6684 tmp2
= load_reg(s
, rd
);
6685 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
6688 store_reg(s
, rd
, tmp
);
6690 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6691 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6693 tmp
= load_reg(s
, rm
);
6694 shift
= (insn
>> 7) & 0x1f;
6695 i
= ((insn
>> 16) & 0x1f) + 1;
6700 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
6702 gen_sbfx(tmp
, shift
, i
);
6705 store_reg(s
, rd
, tmp
);
6715 /* Check for undefined extension instructions
6716 * per the ARM Bible IE:
6717 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6719 sh
= (0xf << 20) | (0xf << 4);
6720 if (op1
== 0x7 && ((insn
& sh
) == sh
))
6724 /* load/store byte/word */
6725 rn
= (insn
>> 16) & 0xf;
6726 rd
= (insn
>> 12) & 0xf;
6727 tmp2
= load_reg(s
, rn
);
6728 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
6729 if (insn
& (1 << 24))
6730 gen_add_data_offset(s
, insn
, tmp2
);
6731 if (insn
& (1 << 20)) {
6733 if (insn
& (1 << 22)) {
6734 tmp
= gen_ld8u(tmp2
, i
);
6736 tmp
= gen_ld32(tmp2
, i
);
6740 tmp
= load_reg(s
, rd
);
6741 if (insn
& (1 << 22))
6742 gen_st8(tmp
, tmp2
, i
);
6744 gen_st32(tmp
, tmp2
, i
);
6746 if (!(insn
& (1 << 24))) {
6747 gen_add_data_offset(s
, insn
, tmp2
);
6748 store_reg(s
, rn
, tmp2
);
6749 } else if (insn
& (1 << 21)) {
6750 store_reg(s
, rn
, tmp2
);
6754 if (insn
& (1 << 20)) {
6755 /* Complete the load. */
6759 store_reg(s
, rd
, tmp
);
6765 int j
, n
, user
, loaded_base
;
6767 /* load/store multiple words */
6768 /* XXX: store correct base if write back */
6770 if (insn
& (1 << 22)) {
6772 goto illegal_op
; /* only usable in supervisor mode */
6774 if ((insn
& (1 << 15)) == 0)
6777 rn
= (insn
>> 16) & 0xf;
6778 addr
= load_reg(s
, rn
);
6780 /* compute total size */
6782 TCGV_UNUSED(loaded_var
);
6785 if (insn
& (1 << i
))
6788 /* XXX: test invalid n == 0 case ? */
6789 if (insn
& (1 << 23)) {
6790 if (insn
& (1 << 24)) {
6792 tcg_gen_addi_i32(addr
, addr
, 4);
6794 /* post increment */
6797 if (insn
& (1 << 24)) {
6799 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6801 /* post decrement */
6803 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6808 if (insn
& (1 << i
)) {
6809 if (insn
& (1 << 20)) {
6811 tmp
= gen_ld32(addr
, IS_USER(s
));
6815 gen_helper_set_user_reg(tcg_const_i32(i
), tmp
);
6817 } else if (i
== rn
) {
6821 store_reg(s
, i
, tmp
);
6826 /* special case: r15 = PC + 8 */
6827 val
= (long)s
->pc
+ 4;
6829 tcg_gen_movi_i32(tmp
, val
);
6832 gen_helper_get_user_reg(tmp
, tcg_const_i32(i
));
6834 tmp
= load_reg(s
, i
);
6836 gen_st32(tmp
, addr
, IS_USER(s
));
6839 /* no need to add after the last transfer */
6841 tcg_gen_addi_i32(addr
, addr
, 4);
6844 if (insn
& (1 << 21)) {
6846 if (insn
& (1 << 23)) {
6847 if (insn
& (1 << 24)) {
6850 /* post increment */
6851 tcg_gen_addi_i32(addr
, addr
, 4);
6854 if (insn
& (1 << 24)) {
6857 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6859 /* post decrement */
6860 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6863 store_reg(s
, rn
, addr
);
6868 store_reg(s
, rn
, loaded_var
);
6870 if ((insn
& (1 << 22)) && !user
) {
6871 /* Restore CPSR from SPSR. */
6872 tmp
= load_cpu_field(spsr
);
6873 gen_set_cpsr(tmp
, 0xffffffff);
6875 s
->is_jmp
= DISAS_UPDATE
;
6884 /* branch (and link) */
6885 val
= (int32_t)s
->pc
;
6886 if (insn
& (1 << 24)) {
6888 tcg_gen_movi_i32(tmp
, val
);
6889 store_reg(s
, 14, tmp
);
6891 offset
= (((int32_t)insn
<< 8) >> 8);
6892 val
+= (offset
<< 2) + 4;
6900 if (disas_coproc_insn(env
, s
, insn
))
6905 gen_set_pc_im(s
->pc
);
6906 s
->is_jmp
= DISAS_SWI
;
6910 gen_set_condexec(s
);
6911 gen_set_pc_im(s
->pc
- 4);
6912 gen_exception(EXCP_UDEF
);
6913 s
->is_jmp
= DISAS_JUMP
;
6919 /* Return true if this is a Thumb-2 logical op. */
6921 thumb2_logic_op(int op
)
6926 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6927 then set condition code flags based on the result of the operation.
6928 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6929 to the high bit of T1.
6930 Returns zero if the opcode is valid. */
6933 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
6940 gen_op_andl_T0_T1();
6944 gen_op_bicl_T0_T1();
6957 gen_op_xorl_T0_T1();
6962 gen_op_addl_T0_T1_cc();
6964 gen_op_addl_T0_T1();
6968 gen_op_adcl_T0_T1_cc();
6974 gen_op_sbcl_T0_T1_cc();
6980 gen_op_subl_T0_T1_cc();
6982 gen_op_subl_T0_T1();
6986 gen_op_rsbl_T0_T1_cc();
6988 gen_op_rsbl_T0_T1();
6990 default: /* 5, 6, 7, 9, 12, 15. */
6994 gen_op_logic_T0_cc();
6996 gen_set_CF_bit31(cpu_T
[1]);
7001 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7003 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7005 uint32_t insn
, imm
, shift
, offset
;
7006 uint32_t rd
, rn
, rm
, rs
;
7017 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7018 || arm_feature (env
, ARM_FEATURE_M
))) {
7019 /* Thumb-1 cores may need to treat bl and blx as a pair of
7020 16-bit instructions to get correct prefetch abort behavior. */
7022 if ((insn
& (1 << 12)) == 0) {
7023 /* Second half of blx. */
7024 offset
= ((insn
& 0x7ff) << 1);
7025 tmp
= load_reg(s
, 14);
7026 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7027 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7030 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7031 store_reg(s
, 14, tmp2
);
7035 if (insn
& (1 << 11)) {
7036 /* Second half of bl. */
7037 offset
= ((insn
& 0x7ff) << 1) | 1;
7038 tmp
= load_reg(s
, 14);
7039 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7042 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7043 store_reg(s
, 14, tmp2
);
7047 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7048 /* Instruction spans a page boundary. Implement it as two
7049 16-bit instructions in case the second half causes an
7051 offset
= ((int32_t)insn
<< 21) >> 9;
7052 gen_op_movl_T0_im(s
->pc
+ 2 + offset
);
7053 gen_movl_reg_T0(s
, 14);
7056 /* Fall through to 32-bit decode. */
7059 insn
= lduw_code(s
->pc
);
7061 insn
|= (uint32_t)insn_hw1
<< 16;
7063 if ((insn
& 0xf800e800) != 0xf000e800) {
7067 rn
= (insn
>> 16) & 0xf;
7068 rs
= (insn
>> 12) & 0xf;
7069 rd
= (insn
>> 8) & 0xf;
7071 switch ((insn
>> 25) & 0xf) {
7072 case 0: case 1: case 2: case 3:
7073 /* 16-bit instructions. Should never happen. */
7076 if (insn
& (1 << 22)) {
7077 /* Other load/store, table branch. */
7078 if (insn
& 0x01200000) {
7079 /* Load/store doubleword. */
7082 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7084 addr
= load_reg(s
, rn
);
7086 offset
= (insn
& 0xff) * 4;
7087 if ((insn
& (1 << 23)) == 0)
7089 if (insn
& (1 << 24)) {
7090 tcg_gen_addi_i32(addr
, addr
, offset
);
7093 if (insn
& (1 << 20)) {
7095 tmp
= gen_ld32(addr
, IS_USER(s
));
7096 store_reg(s
, rs
, tmp
);
7097 tcg_gen_addi_i32(addr
, addr
, 4);
7098 tmp
= gen_ld32(addr
, IS_USER(s
));
7099 store_reg(s
, rd
, tmp
);
7102 tmp
= load_reg(s
, rs
);
7103 gen_st32(tmp
, addr
, IS_USER(s
));
7104 tcg_gen_addi_i32(addr
, addr
, 4);
7105 tmp
= load_reg(s
, rd
);
7106 gen_st32(tmp
, addr
, IS_USER(s
));
7108 if (insn
& (1 << 21)) {
7109 /* Base writeback. */
7112 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7113 store_reg(s
, rn
, addr
);
7117 } else if ((insn
& (1 << 23)) == 0) {
7118 /* Load/store exclusive word. */
7119 gen_movl_T1_reg(s
, rn
);
7121 if (insn
& (1 << 20)) {
7122 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
7123 tmp
= gen_ld32(addr
, IS_USER(s
));
7124 store_reg(s
, rd
, tmp
);
7126 int label
= gen_new_label();
7127 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7128 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
7130 tmp
= load_reg(s
, rs
);
7131 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
7132 gen_set_label(label
);
7133 gen_movl_reg_T0(s
, rd
);
7135 } else if ((insn
& (1 << 6)) == 0) {
7139 tcg_gen_movi_i32(addr
, s
->pc
);
7141 addr
= load_reg(s
, rn
);
7143 tmp
= load_reg(s
, rm
);
7144 tcg_gen_add_i32(addr
, addr
, tmp
);
7145 if (insn
& (1 << 4)) {
7147 tcg_gen_add_i32(addr
, addr
, tmp
);
7149 tmp
= gen_ld16u(addr
, IS_USER(s
));
7152 tmp
= gen_ld8u(addr
, IS_USER(s
));
7155 tcg_gen_shli_i32(tmp
, tmp
, 1);
7156 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7157 store_reg(s
, 15, tmp
);
7159 /* Load/store exclusive byte/halfword/doubleword. */
7160 /* ??? These are not really atomic. However we know
7161 we never have multiple CPUs running in parallel,
7162 so it is good enough. */
7163 op
= (insn
>> 4) & 0x3;
7164 /* Must use a global reg for the address because we have
7165 a conditional branch in the store instruction. */
7166 gen_movl_T1_reg(s
, rn
);
7168 if (insn
& (1 << 20)) {
7169 gen_helper_mark_exclusive(cpu_env
, addr
);
7172 tmp
= gen_ld8u(addr
, IS_USER(s
));
7175 tmp
= gen_ld16u(addr
, IS_USER(s
));
7178 tmp
= gen_ld32(addr
, IS_USER(s
));
7179 tcg_gen_addi_i32(addr
, addr
, 4);
7180 tmp2
= gen_ld32(addr
, IS_USER(s
));
7181 store_reg(s
, rd
, tmp2
);
7186 store_reg(s
, rs
, tmp
);
7188 int label
= gen_new_label();
7189 /* Must use a global that is not killed by the branch. */
7190 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7191 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0], 0, label
);
7192 tmp
= load_reg(s
, rs
);
7195 gen_st8(tmp
, addr
, IS_USER(s
));
7198 gen_st16(tmp
, addr
, IS_USER(s
));
7201 gen_st32(tmp
, addr
, IS_USER(s
));
7202 tcg_gen_addi_i32(addr
, addr
, 4);
7203 tmp
= load_reg(s
, rd
);
7204 gen_st32(tmp
, addr
, IS_USER(s
));
7209 gen_set_label(label
);
7210 gen_movl_reg_T0(s
, rm
);
7214 /* Load/store multiple, RFE, SRS. */
7215 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7216 /* Not available in user mode. */
7219 if (insn
& (1 << 20)) {
7221 addr
= load_reg(s
, rn
);
7222 if ((insn
& (1 << 24)) == 0)
7223 tcg_gen_addi_i32(addr
, addr
, -8);
7224 /* Load PC into tmp and CPSR into tmp2. */
7225 tmp
= gen_ld32(addr
, 0);
7226 tcg_gen_addi_i32(addr
, addr
, 4);
7227 tmp2
= gen_ld32(addr
, 0);
7228 if (insn
& (1 << 21)) {
7229 /* Base writeback. */
7230 if (insn
& (1 << 24)) {
7231 tcg_gen_addi_i32(addr
, addr
, 4);
7233 tcg_gen_addi_i32(addr
, addr
, -4);
7235 store_reg(s
, rn
, addr
);
7239 gen_rfe(s
, tmp
, tmp2
);
7243 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7244 addr
= load_reg(s
, 13);
7247 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op
));
7249 if ((insn
& (1 << 24)) == 0) {
7250 tcg_gen_addi_i32(addr
, addr
, -8);
7252 tmp
= load_reg(s
, 14);
7253 gen_st32(tmp
, addr
, 0);
7254 tcg_gen_addi_i32(addr
, addr
, 4);
7256 gen_helper_cpsr_read(tmp
);
7257 gen_st32(tmp
, addr
, 0);
7258 if (insn
& (1 << 21)) {
7259 if ((insn
& (1 << 24)) == 0) {
7260 tcg_gen_addi_i32(addr
, addr
, -4);
7262 tcg_gen_addi_i32(addr
, addr
, 4);
7264 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7265 store_reg(s
, 13, addr
);
7267 gen_helper_set_r13_banked(cpu_env
,
7268 tcg_const_i32(op
), addr
);
7276 /* Load/store multiple. */
7277 addr
= load_reg(s
, rn
);
7279 for (i
= 0; i
< 16; i
++) {
7280 if (insn
& (1 << i
))
7283 if (insn
& (1 << 24)) {
7284 tcg_gen_addi_i32(addr
, addr
, -offset
);
7287 for (i
= 0; i
< 16; i
++) {
7288 if ((insn
& (1 << i
)) == 0)
7290 if (insn
& (1 << 20)) {
7292 tmp
= gen_ld32(addr
, IS_USER(s
));
7296 store_reg(s
, i
, tmp
);
7300 tmp
= load_reg(s
, i
);
7301 gen_st32(tmp
, addr
, IS_USER(s
));
7303 tcg_gen_addi_i32(addr
, addr
, 4);
7305 if (insn
& (1 << 21)) {
7306 /* Base register writeback. */
7307 if (insn
& (1 << 24)) {
7308 tcg_gen_addi_i32(addr
, addr
, -offset
);
7310 /* Fault if writeback register is in register list. */
7311 if (insn
& (1 << rn
))
7313 store_reg(s
, rn
, addr
);
7320 case 5: /* Data processing register constant shift. */
7322 gen_op_movl_T0_im(0);
7324 gen_movl_T0_reg(s
, rn
);
7325 gen_movl_T1_reg(s
, rm
);
7326 op
= (insn
>> 21) & 0xf;
7327 shiftop
= (insn
>> 4) & 3;
7328 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7329 conds
= (insn
& (1 << 20)) != 0;
7330 logic_cc
= (conds
&& thumb2_logic_op(op
));
7331 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
7332 if (gen_thumb2_data_op(s
, op
, conds
, 0))
7335 gen_movl_reg_T0(s
, rd
);
7337 case 13: /* Misc data processing. */
7338 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7339 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7342 case 0: /* Register controlled shift. */
7343 tmp
= load_reg(s
, rn
);
7344 tmp2
= load_reg(s
, rm
);
7345 if ((insn
& 0x70) != 0)
7347 op
= (insn
>> 21) & 3;
7348 logic_cc
= (insn
& (1 << 20)) != 0;
7349 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7352 store_reg_bx(env
, s
, rd
, tmp
);
7354 case 1: /* Sign/zero extend. */
7355 tmp
= load_reg(s
, rm
);
7356 shift
= (insn
>> 4) & 3;
7357 /* ??? In many cases it's not neccessary to do a
7358 rotate, a shift is sufficient. */
7360 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
7361 op
= (insn
>> 20) & 7;
7363 case 0: gen_sxth(tmp
); break;
7364 case 1: gen_uxth(tmp
); break;
7365 case 2: gen_sxtb16(tmp
); break;
7366 case 3: gen_uxtb16(tmp
); break;
7367 case 4: gen_sxtb(tmp
); break;
7368 case 5: gen_uxtb(tmp
); break;
7369 default: goto illegal_op
;
7372 tmp2
= load_reg(s
, rn
);
7373 if ((op
>> 1) == 1) {
7374 gen_add16(tmp
, tmp2
);
7376 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7380 store_reg(s
, rd
, tmp
);
7382 case 2: /* SIMD add/subtract. */
7383 op
= (insn
>> 20) & 7;
7384 shift
= (insn
>> 4) & 7;
7385 if ((op
& 3) == 3 || (shift
& 3) == 3)
7387 tmp
= load_reg(s
, rn
);
7388 tmp2
= load_reg(s
, rm
);
7389 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7391 store_reg(s
, rd
, tmp
);
7393 case 3: /* Other data processing. */
7394 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7396 /* Saturating add/subtract. */
7397 tmp
= load_reg(s
, rn
);
7398 tmp2
= load_reg(s
, rm
);
7400 gen_helper_double_saturate(tmp
, tmp
);
7402 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7404 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7407 tmp
= load_reg(s
, rn
);
7409 case 0x0a: /* rbit */
7410 gen_helper_rbit(tmp
, tmp
);
7412 case 0x08: /* rev */
7413 tcg_gen_bswap32_i32(tmp
, tmp
);
7415 case 0x09: /* rev16 */
7418 case 0x0b: /* revsh */
7421 case 0x10: /* sel */
7422 tmp2
= load_reg(s
, rm
);
7424 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7425 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7429 case 0x18: /* clz */
7430 gen_helper_clz(tmp
, tmp
);
7436 store_reg(s
, rd
, tmp
);
7438 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7439 op
= (insn
>> 4) & 0xf;
7440 tmp
= load_reg(s
, rn
);
7441 tmp2
= load_reg(s
, rm
);
7442 switch ((insn
>> 20) & 7) {
7443 case 0: /* 32 x 32 -> 32 */
7444 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7447 tmp2
= load_reg(s
, rs
);
7449 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7451 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7455 case 1: /* 16 x 16 -> 32 */
7456 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7459 tmp2
= load_reg(s
, rs
);
7460 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7464 case 2: /* Dual multiply add. */
7465 case 4: /* Dual multiply subtract. */
7467 gen_swap_half(tmp2
);
7468 gen_smul_dual(tmp
, tmp2
);
7469 /* This addition cannot overflow. */
7470 if (insn
& (1 << 22)) {
7471 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7473 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7478 tmp2
= load_reg(s
, rs
);
7479 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7483 case 3: /* 32 * 16 -> 32msb */
7485 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7488 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7489 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7491 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7494 tmp2
= load_reg(s
, rs
);
7495 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7499 case 5: case 6: /* 32 * 32 -> 32msb */
7500 gen_imull(tmp
, tmp2
);
7501 if (insn
& (1 << 5)) {
7502 gen_roundqd(tmp
, tmp2
);
7509 tmp2
= load_reg(s
, rs
);
7510 if (insn
& (1 << 21)) {
7511 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7513 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7518 case 7: /* Unsigned sum of absolute differences. */
7519 gen_helper_usad8(tmp
, tmp
, tmp2
);
7522 tmp2
= load_reg(s
, rs
);
7523 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7528 store_reg(s
, rd
, tmp
);
7530 case 6: case 7: /* 64-bit multiply, Divide. */
7531 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7532 tmp
= load_reg(s
, rn
);
7533 tmp2
= load_reg(s
, rm
);
7534 if ((op
& 0x50) == 0x10) {
7536 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7539 gen_helper_udiv(tmp
, tmp
, tmp2
);
7541 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7543 store_reg(s
, rd
, tmp
);
7544 } else if ((op
& 0xe) == 0xc) {
7545 /* Dual multiply accumulate long. */
7547 gen_swap_half(tmp2
);
7548 gen_smul_dual(tmp
, tmp2
);
7550 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7552 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7556 tmp64
= tcg_temp_new_i64();
7557 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7559 gen_addq(s
, tmp64
, rs
, rd
);
7560 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7563 /* Unsigned 64-bit multiply */
7564 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7568 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7570 tmp64
= tcg_temp_new_i64();
7571 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7574 /* Signed 64-bit multiply */
7575 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7580 gen_addq_lo(s
, tmp64
, rs
);
7581 gen_addq_lo(s
, tmp64
, rd
);
7582 } else if (op
& 0x40) {
7583 /* 64-bit accumulate. */
7584 gen_addq(s
, tmp64
, rs
, rd
);
7586 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7591 case 6: case 7: case 14: case 15:
7593 if (((insn
>> 24) & 3) == 3) {
7594 /* Translate into the equivalent ARM encoding. */
7595 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7596 if (disas_neon_data_insn(env
, s
, insn
))
7599 if (insn
& (1 << 28))
7601 if (disas_coproc_insn (env
, s
, insn
))
7605 case 8: case 9: case 10: case 11:
7606 if (insn
& (1 << 15)) {
7607 /* Branches, misc control. */
7608 if (insn
& 0x5000) {
7609 /* Unconditional branch. */
7610 /* signextend(hw1[10:0]) -> offset[:12]. */
7611 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7612 /* hw1[10:0] -> offset[11:1]. */
7613 offset
|= (insn
& 0x7ff) << 1;
7614 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7615 offset[24:22] already have the same value because of the
7616 sign extension above. */
7617 offset
^= ((~insn
) & (1 << 13)) << 10;
7618 offset
^= ((~insn
) & (1 << 11)) << 11;
7620 if (insn
& (1 << 14)) {
7621 /* Branch and link. */
7622 gen_op_movl_T1_im(s
->pc
| 1);
7623 gen_movl_reg_T1(s
, 14);
7627 if (insn
& (1 << 12)) {
7632 offset
&= ~(uint32_t)2;
7633 gen_bx_im(s
, offset
);
7635 } else if (((insn
>> 23) & 7) == 7) {
7637 if (insn
& (1 << 13))
7640 if (insn
& (1 << 26)) {
7641 /* Secure monitor call (v6Z) */
7642 goto illegal_op
; /* not implemented. */
7644 op
= (insn
>> 20) & 7;
7646 case 0: /* msr cpsr. */
7648 tmp
= load_reg(s
, rn
);
7649 addr
= tcg_const_i32(insn
& 0xff);
7650 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7655 case 1: /* msr spsr. */
7658 tmp
= load_reg(s
, rn
);
7660 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7664 case 2: /* cps, nop-hint. */
7665 if (((insn
>> 8) & 7) == 0) {
7666 gen_nop_hint(s
, insn
& 0xff);
7668 /* Implemented as NOP in user mode. */
7673 if (insn
& (1 << 10)) {
7674 if (insn
& (1 << 7))
7676 if (insn
& (1 << 6))
7678 if (insn
& (1 << 5))
7680 if (insn
& (1 << 9))
7681 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7683 if (insn
& (1 << 8)) {
7685 imm
|= (insn
& 0x1f);
7688 gen_set_psr_im(s
, offset
, 0, imm
);
7691 case 3: /* Special control operations. */
7692 op
= (insn
>> 4) & 0xf;
7695 gen_helper_clrex(cpu_env
);
7700 /* These execute as NOPs. */
7708 /* Trivial implementation equivalent to bx. */
7709 tmp
= load_reg(s
, rn
);
7712 case 5: /* Exception return. */
7713 /* Unpredictable in user mode. */
7715 case 6: /* mrs cpsr. */
7718 addr
= tcg_const_i32(insn
& 0xff);
7719 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
7721 gen_helper_cpsr_read(tmp
);
7723 store_reg(s
, rd
, tmp
);
7725 case 7: /* mrs spsr. */
7726 /* Not accessible in user mode. */
7727 if (IS_USER(s
) || IS_M(env
))
7729 tmp
= load_cpu_field(spsr
);
7730 store_reg(s
, rd
, tmp
);
7735 /* Conditional branch. */
7736 op
= (insn
>> 22) & 0xf;
7737 /* Generate a conditional jump to next instruction. */
7738 s
->condlabel
= gen_new_label();
7739 gen_test_cc(op
^ 1, s
->condlabel
);
7742 /* offset[11:1] = insn[10:0] */
7743 offset
= (insn
& 0x7ff) << 1;
7744 /* offset[17:12] = insn[21:16]. */
7745 offset
|= (insn
& 0x003f0000) >> 4;
7746 /* offset[31:20] = insn[26]. */
7747 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
7748 /* offset[18] = insn[13]. */
7749 offset
|= (insn
& (1 << 13)) << 5;
7750 /* offset[19] = insn[11]. */
7751 offset
|= (insn
& (1 << 11)) << 8;
7753 /* jump to the offset */
7754 gen_jmp(s
, s
->pc
+ offset
);
7757 /* Data processing immediate. */
7758 if (insn
& (1 << 25)) {
7759 if (insn
& (1 << 24)) {
7760 if (insn
& (1 << 20))
7762 /* Bitfield/Saturate. */
7763 op
= (insn
>> 21) & 7;
7765 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7768 tcg_gen_movi_i32(tmp
, 0);
7770 tmp
= load_reg(s
, rn
);
7773 case 2: /* Signed bitfield extract. */
7775 if (shift
+ imm
> 32)
7778 gen_sbfx(tmp
, shift
, imm
);
7780 case 6: /* Unsigned bitfield extract. */
7782 if (shift
+ imm
> 32)
7785 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
7787 case 3: /* Bitfield insert/clear. */
7790 imm
= imm
+ 1 - shift
;
7792 tmp2
= load_reg(s
, rd
);
7793 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
7799 default: /* Saturate. */
7802 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7804 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7806 tmp2
= tcg_const_i32(imm
);
7809 if ((op
& 1) && shift
== 0)
7810 gen_helper_usat16(tmp
, tmp
, tmp2
);
7812 gen_helper_usat(tmp
, tmp
, tmp2
);
7815 if ((op
& 1) && shift
== 0)
7816 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7818 gen_helper_ssat(tmp
, tmp
, tmp2
);
7822 store_reg(s
, rd
, tmp
);
7824 imm
= ((insn
& 0x04000000) >> 15)
7825 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
7826 if (insn
& (1 << 22)) {
7827 /* 16-bit immediate. */
7828 imm
|= (insn
>> 4) & 0xf000;
7829 if (insn
& (1 << 23)) {
7831 tmp
= load_reg(s
, rd
);
7832 tcg_gen_ext16u_i32(tmp
, tmp
);
7833 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
7837 tcg_gen_movi_i32(tmp
, imm
);
7840 /* Add/sub 12-bit immediate. */
7842 offset
= s
->pc
& ~(uint32_t)3;
7843 if (insn
& (1 << 23))
7848 tcg_gen_movi_i32(tmp
, offset
);
7850 tmp
= load_reg(s
, rn
);
7851 if (insn
& (1 << 23))
7852 tcg_gen_subi_i32(tmp
, tmp
, imm
);
7854 tcg_gen_addi_i32(tmp
, tmp
, imm
);
7857 store_reg(s
, rd
, tmp
);
7860 int shifter_out
= 0;
7861 /* modified 12-bit immediate. */
7862 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
7863 imm
= (insn
& 0xff);
7866 /* Nothing to do. */
7868 case 1: /* 00XY00XY */
7871 case 2: /* XY00XY00 */
7875 case 3: /* XYXYXYXY */
7879 default: /* Rotated constant. */
7880 shift
= (shift
<< 1) | (imm
>> 7);
7882 imm
= imm
<< (32 - shift
);
7886 gen_op_movl_T1_im(imm
);
7887 rn
= (insn
>> 16) & 0xf;
7889 gen_op_movl_T0_im(0);
7891 gen_movl_T0_reg(s
, rn
);
7892 op
= (insn
>> 21) & 0xf;
7893 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
7896 rd
= (insn
>> 8) & 0xf;
7898 gen_movl_reg_T0(s
, rd
);
7903 case 12: /* Load/store single data item. */
7908 if ((insn
& 0x01100000) == 0x01000000) {
7909 if (disas_neon_ls_insn(env
, s
, insn
))
7917 /* s->pc has already been incremented by 4. */
7918 imm
= s
->pc
& 0xfffffffc;
7919 if (insn
& (1 << 23))
7920 imm
+= insn
& 0xfff;
7922 imm
-= insn
& 0xfff;
7923 tcg_gen_movi_i32(addr
, imm
);
7925 addr
= load_reg(s
, rn
);
7926 if (insn
& (1 << 23)) {
7927 /* Positive offset. */
7929 tcg_gen_addi_i32(addr
, addr
, imm
);
7931 op
= (insn
>> 8) & 7;
7934 case 0: case 8: /* Shifted Register. */
7935 shift
= (insn
>> 4) & 0xf;
7938 tmp
= load_reg(s
, rm
);
7940 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7941 tcg_gen_add_i32(addr
, addr
, tmp
);
7944 case 4: /* Negative offset. */
7945 tcg_gen_addi_i32(addr
, addr
, -imm
);
7947 case 6: /* User privilege. */
7948 tcg_gen_addi_i32(addr
, addr
, imm
);
7951 case 1: /* Post-decrement. */
7954 case 3: /* Post-increment. */
7958 case 5: /* Pre-decrement. */
7961 case 7: /* Pre-increment. */
7962 tcg_gen_addi_i32(addr
, addr
, imm
);
7970 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
7971 if (insn
& (1 << 20)) {
7973 if (rs
== 15 && op
!= 2) {
7976 /* Memory hint. Implemented as NOP. */
7979 case 0: tmp
= gen_ld8u(addr
, user
); break;
7980 case 4: tmp
= gen_ld8s(addr
, user
); break;
7981 case 1: tmp
= gen_ld16u(addr
, user
); break;
7982 case 5: tmp
= gen_ld16s(addr
, user
); break;
7983 case 2: tmp
= gen_ld32(addr
, user
); break;
7984 default: goto illegal_op
;
7989 store_reg(s
, rs
, tmp
);
7996 tmp
= load_reg(s
, rs
);
7998 case 0: gen_st8(tmp
, addr
, user
); break;
7999 case 1: gen_st16(tmp
, addr
, user
); break;
8000 case 2: gen_st32(tmp
, addr
, user
); break;
8001 default: goto illegal_op
;
8005 tcg_gen_addi_i32(addr
, addr
, imm
);
8007 store_reg(s
, rn
, addr
);
8021 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8023 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8030 if (s
->condexec_mask
) {
8031 cond
= s
->condexec_cond
;
8032 s
->condlabel
= gen_new_label();
8033 gen_test_cc(cond
^ 1, s
->condlabel
);
8037 insn
= lduw_code(s
->pc
);
8040 switch (insn
>> 12) {
8043 op
= (insn
>> 11) & 3;
8046 rn
= (insn
>> 3) & 7;
8047 gen_movl_T0_reg(s
, rn
);
8048 if (insn
& (1 << 10)) {
8050 gen_op_movl_T1_im((insn
>> 6) & 7);
8053 rm
= (insn
>> 6) & 7;
8054 gen_movl_T1_reg(s
, rm
);
8056 if (insn
& (1 << 9)) {
8057 if (s
->condexec_mask
)
8058 gen_op_subl_T0_T1();
8060 gen_op_subl_T0_T1_cc();
8062 if (s
->condexec_mask
)
8063 gen_op_addl_T0_T1();
8065 gen_op_addl_T0_T1_cc();
8067 gen_movl_reg_T0(s
, rd
);
8069 /* shift immediate */
8070 rm
= (insn
>> 3) & 7;
8071 shift
= (insn
>> 6) & 0x1f;
8072 tmp
= load_reg(s
, rm
);
8073 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8074 if (!s
->condexec_mask
)
8076 store_reg(s
, rd
, tmp
);
8080 /* arithmetic large immediate */
8081 op
= (insn
>> 11) & 3;
8082 rd
= (insn
>> 8) & 0x7;
8084 gen_op_movl_T0_im(insn
& 0xff);
8086 gen_movl_T0_reg(s
, rd
);
8087 gen_op_movl_T1_im(insn
& 0xff);
8091 if (!s
->condexec_mask
)
8092 gen_op_logic_T0_cc();
8095 gen_op_subl_T0_T1_cc();
8098 if (s
->condexec_mask
)
8099 gen_op_addl_T0_T1();
8101 gen_op_addl_T0_T1_cc();
8104 if (s
->condexec_mask
)
8105 gen_op_subl_T0_T1();
8107 gen_op_subl_T0_T1_cc();
8111 gen_movl_reg_T0(s
, rd
);
8114 if (insn
& (1 << 11)) {
8115 rd
= (insn
>> 8) & 7;
8116 /* load pc-relative. Bit 1 of PC is ignored. */
8117 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8118 val
&= ~(uint32_t)2;
8120 tcg_gen_movi_i32(addr
, val
);
8121 tmp
= gen_ld32(addr
, IS_USER(s
));
8123 store_reg(s
, rd
, tmp
);
8126 if (insn
& (1 << 10)) {
8127 /* data processing extended or blx */
8128 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8129 rm
= (insn
>> 3) & 0xf;
8130 op
= (insn
>> 8) & 3;
8133 gen_movl_T0_reg(s
, rd
);
8134 gen_movl_T1_reg(s
, rm
);
8135 gen_op_addl_T0_T1();
8136 gen_movl_reg_T0(s
, rd
);
8139 gen_movl_T0_reg(s
, rd
);
8140 gen_movl_T1_reg(s
, rm
);
8141 gen_op_subl_T0_T1_cc();
8143 case 2: /* mov/cpy */
8144 gen_movl_T0_reg(s
, rm
);
8145 gen_movl_reg_T0(s
, rd
);
8147 case 3:/* branch [and link] exchange thumb register */
8148 tmp
= load_reg(s
, rm
);
8149 if (insn
& (1 << 7)) {
8150 val
= (uint32_t)s
->pc
| 1;
8152 tcg_gen_movi_i32(tmp2
, val
);
8153 store_reg(s
, 14, tmp2
);
8161 /* data processing register */
8163 rm
= (insn
>> 3) & 7;
8164 op
= (insn
>> 6) & 0xf;
8165 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8166 /* the shift/rotate ops want the operands backwards */
8175 if (op
== 9) /* neg */
8176 gen_op_movl_T0_im(0);
8177 else if (op
!= 0xf) /* mvn doesn't read its first operand */
8178 gen_movl_T0_reg(s
, rd
);
8180 gen_movl_T1_reg(s
, rm
);
8183 gen_op_andl_T0_T1();
8184 if (!s
->condexec_mask
)
8185 gen_op_logic_T0_cc();
8188 gen_op_xorl_T0_T1();
8189 if (!s
->condexec_mask
)
8190 gen_op_logic_T0_cc();
8193 if (s
->condexec_mask
) {
8194 gen_helper_shl(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8196 gen_helper_shl_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8197 gen_op_logic_T1_cc();
8201 if (s
->condexec_mask
) {
8202 gen_helper_shr(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8204 gen_helper_shr_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8205 gen_op_logic_T1_cc();
8209 if (s
->condexec_mask
) {
8210 gen_helper_sar(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8212 gen_helper_sar_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8213 gen_op_logic_T1_cc();
8217 if (s
->condexec_mask
)
8220 gen_op_adcl_T0_T1_cc();
8223 if (s
->condexec_mask
)
8226 gen_op_sbcl_T0_T1_cc();
8229 if (s
->condexec_mask
) {
8230 gen_helper_ror(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8232 gen_helper_ror_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8233 gen_op_logic_T1_cc();
8237 gen_op_andl_T0_T1();
8238 gen_op_logic_T0_cc();
8242 if (s
->condexec_mask
)
8243 tcg_gen_neg_i32(cpu_T
[0], cpu_T
[1]);
8245 gen_op_subl_T0_T1_cc();
8248 gen_op_subl_T0_T1_cc();
8252 gen_op_addl_T0_T1_cc();
8257 if (!s
->condexec_mask
)
8258 gen_op_logic_T0_cc();
8261 gen_op_mull_T0_T1();
8262 if (!s
->condexec_mask
)
8263 gen_op_logic_T0_cc();
8266 gen_op_bicl_T0_T1();
8267 if (!s
->condexec_mask
)
8268 gen_op_logic_T0_cc();
8272 if (!s
->condexec_mask
)
8273 gen_op_logic_T1_cc();
8280 gen_movl_reg_T1(s
, rm
);
8282 gen_movl_reg_T0(s
, rd
);
8287 /* load/store register offset. */
8289 rn
= (insn
>> 3) & 7;
8290 rm
= (insn
>> 6) & 7;
8291 op
= (insn
>> 9) & 7;
8292 addr
= load_reg(s
, rn
);
8293 tmp
= load_reg(s
, rm
);
8294 tcg_gen_add_i32(addr
, addr
, tmp
);
8297 if (op
< 3) /* store */
8298 tmp
= load_reg(s
, rd
);
8302 gen_st32(tmp
, addr
, IS_USER(s
));
8305 gen_st16(tmp
, addr
, IS_USER(s
));
8308 gen_st8(tmp
, addr
, IS_USER(s
));
8311 tmp
= gen_ld8s(addr
, IS_USER(s
));
8314 tmp
= gen_ld32(addr
, IS_USER(s
));
8317 tmp
= gen_ld16u(addr
, IS_USER(s
));
8320 tmp
= gen_ld8u(addr
, IS_USER(s
));
8323 tmp
= gen_ld16s(addr
, IS_USER(s
));
8326 if (op
>= 3) /* load */
8327 store_reg(s
, rd
, tmp
);
8332 /* load/store word immediate offset */
8334 rn
= (insn
>> 3) & 7;
8335 addr
= load_reg(s
, rn
);
8336 val
= (insn
>> 4) & 0x7c;
8337 tcg_gen_addi_i32(addr
, addr
, val
);
8339 if (insn
& (1 << 11)) {
8341 tmp
= gen_ld32(addr
, IS_USER(s
));
8342 store_reg(s
, rd
, tmp
);
8345 tmp
= load_reg(s
, rd
);
8346 gen_st32(tmp
, addr
, IS_USER(s
));
8352 /* load/store byte immediate offset */
8354 rn
= (insn
>> 3) & 7;
8355 addr
= load_reg(s
, rn
);
8356 val
= (insn
>> 6) & 0x1f;
8357 tcg_gen_addi_i32(addr
, addr
, val
);
8359 if (insn
& (1 << 11)) {
8361 tmp
= gen_ld8u(addr
, IS_USER(s
));
8362 store_reg(s
, rd
, tmp
);
8365 tmp
= load_reg(s
, rd
);
8366 gen_st8(tmp
, addr
, IS_USER(s
));
8372 /* load/store halfword immediate offset */
8374 rn
= (insn
>> 3) & 7;
8375 addr
= load_reg(s
, rn
);
8376 val
= (insn
>> 5) & 0x3e;
8377 tcg_gen_addi_i32(addr
, addr
, val
);
8379 if (insn
& (1 << 11)) {
8381 tmp
= gen_ld16u(addr
, IS_USER(s
));
8382 store_reg(s
, rd
, tmp
);
8385 tmp
= load_reg(s
, rd
);
8386 gen_st16(tmp
, addr
, IS_USER(s
));
8392 /* load/store from stack */
8393 rd
= (insn
>> 8) & 7;
8394 addr
= load_reg(s
, 13);
8395 val
= (insn
& 0xff) * 4;
8396 tcg_gen_addi_i32(addr
, addr
, val
);
8398 if (insn
& (1 << 11)) {
8400 tmp
= gen_ld32(addr
, IS_USER(s
));
8401 store_reg(s
, rd
, tmp
);
8404 tmp
= load_reg(s
, rd
);
8405 gen_st32(tmp
, addr
, IS_USER(s
));
8411 /* add to high reg */
8412 rd
= (insn
>> 8) & 7;
8413 if (insn
& (1 << 11)) {
8415 tmp
= load_reg(s
, 13);
8417 /* PC. bit 1 is ignored. */
8419 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8421 val
= (insn
& 0xff) * 4;
8422 tcg_gen_addi_i32(tmp
, tmp
, val
);
8423 store_reg(s
, rd
, tmp
);
8428 op
= (insn
>> 8) & 0xf;
8431 /* adjust stack pointer */
8432 tmp
= load_reg(s
, 13);
8433 val
= (insn
& 0x7f) * 4;
8434 if (insn
& (1 << 7))
8435 val
= -(int32_t)val
;
8436 tcg_gen_addi_i32(tmp
, tmp
, val
);
8437 store_reg(s
, 13, tmp
);
8440 case 2: /* sign/zero extend. */
8443 rm
= (insn
>> 3) & 7;
8444 tmp
= load_reg(s
, rm
);
8445 switch ((insn
>> 6) & 3) {
8446 case 0: gen_sxth(tmp
); break;
8447 case 1: gen_sxtb(tmp
); break;
8448 case 2: gen_uxth(tmp
); break;
8449 case 3: gen_uxtb(tmp
); break;
8451 store_reg(s
, rd
, tmp
);
8453 case 4: case 5: case 0xc: case 0xd:
8455 addr
= load_reg(s
, 13);
8456 if (insn
& (1 << 8))
8460 for (i
= 0; i
< 8; i
++) {
8461 if (insn
& (1 << i
))
8464 if ((insn
& (1 << 11)) == 0) {
8465 tcg_gen_addi_i32(addr
, addr
, -offset
);
8467 for (i
= 0; i
< 8; i
++) {
8468 if (insn
& (1 << i
)) {
8469 if (insn
& (1 << 11)) {
8471 tmp
= gen_ld32(addr
, IS_USER(s
));
8472 store_reg(s
, i
, tmp
);
8475 tmp
= load_reg(s
, i
);
8476 gen_st32(tmp
, addr
, IS_USER(s
));
8478 /* advance to the next address. */
8479 tcg_gen_addi_i32(addr
, addr
, 4);
8483 if (insn
& (1 << 8)) {
8484 if (insn
& (1 << 11)) {
8486 tmp
= gen_ld32(addr
, IS_USER(s
));
8487 /* don't set the pc until the rest of the instruction
8491 tmp
= load_reg(s
, 14);
8492 gen_st32(tmp
, addr
, IS_USER(s
));
8494 tcg_gen_addi_i32(addr
, addr
, 4);
8496 if ((insn
& (1 << 11)) == 0) {
8497 tcg_gen_addi_i32(addr
, addr
, -offset
);
8499 /* write back the new stack pointer */
8500 store_reg(s
, 13, addr
);
8501 /* set the new PC value */
8502 if ((insn
& 0x0900) == 0x0900)
8506 case 1: case 3: case 9: case 11: /* czb */
8508 tmp
= load_reg(s
, rm
);
8509 s
->condlabel
= gen_new_label();
8511 if (insn
& (1 << 11))
8512 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8514 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8516 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8517 val
= (uint32_t)s
->pc
+ 2;
8522 case 15: /* IT, nop-hint. */
8523 if ((insn
& 0xf) == 0) {
8524 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8528 s
->condexec_cond
= (insn
>> 4) & 0xe;
8529 s
->condexec_mask
= insn
& 0x1f;
8530 /* No actual code generated for this insn, just setup state. */
8533 case 0xe: /* bkpt */
8534 gen_set_condexec(s
);
8535 gen_set_pc_im(s
->pc
- 2);
8536 gen_exception(EXCP_BKPT
);
8537 s
->is_jmp
= DISAS_JUMP
;
8542 rn
= (insn
>> 3) & 0x7;
8544 tmp
= load_reg(s
, rn
);
8545 switch ((insn
>> 6) & 3) {
8546 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8547 case 1: gen_rev16(tmp
); break;
8548 case 3: gen_revsh(tmp
); break;
8549 default: goto illegal_op
;
8551 store_reg(s
, rd
, tmp
);
8559 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8562 addr
= tcg_const_i32(16);
8563 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8567 addr
= tcg_const_i32(17);
8568 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8572 if (insn
& (1 << 4))
8573 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8576 gen_set_psr_im(s
, shift
, 0, ((insn
& 7) << 6) & shift
);
8586 /* load/store multiple */
8587 rn
= (insn
>> 8) & 0x7;
8588 addr
= load_reg(s
, rn
);
8589 for (i
= 0; i
< 8; i
++) {
8590 if (insn
& (1 << i
)) {
8591 if (insn
& (1 << 11)) {
8593 tmp
= gen_ld32(addr
, IS_USER(s
));
8594 store_reg(s
, i
, tmp
);
8597 tmp
= load_reg(s
, i
);
8598 gen_st32(tmp
, addr
, IS_USER(s
));
8600 /* advance to the next address */
8601 tcg_gen_addi_i32(addr
, addr
, 4);
8604 /* Base register writeback. */
8605 if ((insn
& (1 << rn
)) == 0) {
8606 store_reg(s
, rn
, addr
);
8613 /* conditional branch or swi */
8614 cond
= (insn
>> 8) & 0xf;
8620 gen_set_condexec(s
);
8621 gen_set_pc_im(s
->pc
);
8622 s
->is_jmp
= DISAS_SWI
;
8625 /* generate a conditional jump to next instruction */
8626 s
->condlabel
= gen_new_label();
8627 gen_test_cc(cond
^ 1, s
->condlabel
);
8630 /* jump to the offset */
8631 val
= (uint32_t)s
->pc
+ 2;
8632 offset
= ((int32_t)insn
<< 24) >> 24;
8638 if (insn
& (1 << 11)) {
8639 if (disas_thumb2_insn(env
, s
, insn
))
8643 /* unconditional branch */
8644 val
= (uint32_t)s
->pc
;
8645 offset
= ((int32_t)insn
<< 21) >> 21;
8646 val
+= (offset
<< 1) + 2;
8651 if (disas_thumb2_insn(env
, s
, insn
))
8657 gen_set_condexec(s
);
8658 gen_set_pc_im(s
->pc
- 4);
8659 gen_exception(EXCP_UDEF
);
8660 s
->is_jmp
= DISAS_JUMP
;
8664 gen_set_condexec(s
);
8665 gen_set_pc_im(s
->pc
- 2);
8666 gen_exception(EXCP_UDEF
);
8667 s
->is_jmp
= DISAS_JUMP
;
8670 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8671 basic block 'tb'. If search_pc is TRUE, also generate PC
8672 information for each intermediate instruction. */
8673 static inline void gen_intermediate_code_internal(CPUState
*env
,
8674 TranslationBlock
*tb
,
8677 DisasContext dc1
, *dc
= &dc1
;
8679 uint16_t *gen_opc_end
;
8681 target_ulong pc_start
;
8682 uint32_t next_page_start
;
8686 /* generate intermediate code */
8693 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
8695 dc
->is_jmp
= DISAS_NEXT
;
8697 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8699 dc
->thumb
= env
->thumb
;
8700 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
8701 dc
->condexec_cond
= env
->condexec_bits
>> 4;
8702 #if !defined(CONFIG_USER_ONLY)
8704 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
8706 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
8709 cpu_F0s
= tcg_temp_new_i32();
8710 cpu_F1s
= tcg_temp_new_i32();
8711 cpu_F0d
= tcg_temp_new_i64();
8712 cpu_F1d
= tcg_temp_new_i64();
8715 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8716 cpu_M0
= tcg_temp_new_i64();
8717 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
8720 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8722 max_insns
= CF_COUNT_MASK
;
8725 /* Reset the conditional execution bits immediately. This avoids
8726 complications trying to do it at the end of the block. */
8727 if (env
->condexec_bits
)
8729 TCGv tmp
= new_tmp();
8730 tcg_gen_movi_i32(tmp
, 0);
8731 store_cpu_field(tmp
, condexec_bits
);
8734 #ifdef CONFIG_USER_ONLY
8735 /* Intercept jump to the magic kernel page. */
8736 if (dc
->pc
>= 0xffff0000) {
8737 /* We always get here via a jump, so know we are not in a
8738 conditional execution block. */
8739 gen_exception(EXCP_KERNEL_TRAP
);
8740 dc
->is_jmp
= DISAS_UPDATE
;
8744 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
8745 /* We always get here via a jump, so know we are not in a
8746 conditional execution block. */
8747 gen_exception(EXCP_EXCEPTION_EXIT
);
8748 dc
->is_jmp
= DISAS_UPDATE
;
8753 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8754 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8755 if (bp
->pc
== dc
->pc
) {
8756 gen_set_condexec(dc
);
8757 gen_set_pc_im(dc
->pc
);
8758 gen_exception(EXCP_DEBUG
);
8759 dc
->is_jmp
= DISAS_JUMP
;
8760 /* Advance PC so that clearing the breakpoint will
8761 invalidate this TB. */
8763 goto done_generating
;
8769 j
= gen_opc_ptr
- gen_opc_buf
;
8773 gen_opc_instr_start
[lj
++] = 0;
8775 gen_opc_pc
[lj
] = dc
->pc
;
8776 gen_opc_instr_start
[lj
] = 1;
8777 gen_opc_icount
[lj
] = num_insns
;
8780 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8784 disas_thumb_insn(env
, dc
);
8785 if (dc
->condexec_mask
) {
8786 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
8787 | ((dc
->condexec_mask
>> 4) & 1);
8788 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
8789 if (dc
->condexec_mask
== 0) {
8790 dc
->condexec_cond
= 0;
8794 disas_arm_insn(env
, dc
);
8797 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
8801 if (dc
->condjmp
&& !dc
->is_jmp
) {
8802 gen_set_label(dc
->condlabel
);
8805 /* Translation stops when a conditional branch is encountered.
8806 * Otherwise the subsequent code could get translated several times.
8807 * Also stop translation when a page boundary is reached. This
8808 * ensures prefetch aborts occur at the right place. */
8810 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
8811 !env
->singlestep_enabled
&&
8813 dc
->pc
< next_page_start
&&
8814 num_insns
< max_insns
);
8816 if (tb
->cflags
& CF_LAST_IO
) {
8818 /* FIXME: This can theoretically happen with self-modifying
8820 cpu_abort(env
, "IO on conditional branch instruction");
8825 /* At this stage dc->condjmp will only be set when the skipped
8826 instruction was a conditional branch or trap, and the PC has
8827 already been written. */
8828 if (unlikely(env
->singlestep_enabled
)) {
8829 /* Make sure the pc is updated, and raise a debug exception. */
8831 gen_set_condexec(dc
);
8832 if (dc
->is_jmp
== DISAS_SWI
) {
8833 gen_exception(EXCP_SWI
);
8835 gen_exception(EXCP_DEBUG
);
8837 gen_set_label(dc
->condlabel
);
8839 if (dc
->condjmp
|| !dc
->is_jmp
) {
8840 gen_set_pc_im(dc
->pc
);
8843 gen_set_condexec(dc
);
8844 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
8845 gen_exception(EXCP_SWI
);
8847 /* FIXME: Single stepping a WFI insn will not halt
8849 gen_exception(EXCP_DEBUG
);
8852 /* While branches must always occur at the end of an IT block,
8853 there are a few other things that can cause us to terminate
8854 the TB in the middel of an IT block:
8855 - Exception generating instructions (bkpt, swi, undefined).
8857 - Hardware watchpoints.
8858 Hardware breakpoints have already been handled and skip this code.
8860 gen_set_condexec(dc
);
8861 switch(dc
->is_jmp
) {
8863 gen_goto_tb(dc
, 1, dc
->pc
);
8868 /* indicate that the hash table must be used to find the next TB */
8872 /* nothing more to generate */
8878 gen_exception(EXCP_SWI
);
8882 gen_set_label(dc
->condlabel
);
8883 gen_set_condexec(dc
);
8884 gen_goto_tb(dc
, 1, dc
->pc
);
8890 gen_icount_end(tb
, num_insns
);
8891 *gen_opc_ptr
= INDEX_op_end
;
8894 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8895 qemu_log("----------------\n");
8896 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8897 log_target_disas(pc_start
, dc
->pc
- pc_start
, env
->thumb
);
8902 j
= gen_opc_ptr
- gen_opc_buf
;
8905 gen_opc_instr_start
[lj
++] = 0;
8907 tb
->size
= dc
->pc
- pc_start
;
8908 tb
->icount
= num_insns
;
8912 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
8914 gen_intermediate_code_internal(env
, tb
, 0);
8917 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
8919 gen_intermediate_code_internal(env
, tb
, 1);
8922 static const char *cpu_mode_names
[16] = {
8923 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8924 "???", "???", "???", "und", "???", "???", "???", "sys"
8927 void cpu_dump_state(CPUState
*env
, FILE *f
,
8928 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
8938 /* ??? This assumes float64 and double have the same layout.
8939 Oh well, it's only debug dumps. */
8948 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
8950 cpu_fprintf(f
, "\n");
8952 cpu_fprintf(f
, " ");
8954 psr
= cpsr_read(env
);
8955 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
8957 psr
& (1 << 31) ? 'N' : '-',
8958 psr
& (1 << 30) ? 'Z' : '-',
8959 psr
& (1 << 29) ? 'C' : '-',
8960 psr
& (1 << 28) ? 'V' : '-',
8961 psr
& CPSR_T
? 'T' : 'A',
8962 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
8965 for (i
= 0; i
< 16; i
++) {
8966 d
.d
= env
->vfp
.regs
[i
];
8970 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8971 i
* 2, (int)s0
.i
, s0
.s
,
8972 i
* 2 + 1, (int)s1
.i
, s1
.s
,
8973 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
8976 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
8980 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
8981 unsigned long searched_pc
, int pc_pos
, void *puc
)
8983 env
->regs
[15] = gen_opc_pc
[pc_pos
];