4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
62 #if !defined(CONFIG_USER_ONLY)
70 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
72 #if defined(CONFIG_USER_ONLY)
75 #define IS_USER(s) (s->user)
78 /* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
83 static TCGv_ptr cpu_env
;
84 /* We reuse the same 64-bit temporaries for efficiency. */
85 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
86 static TCGv_i32 cpu_R
[16];
87 static TCGv_i32 cpu_exclusive_addr
;
88 static TCGv_i32 cpu_exclusive_val
;
89 static TCGv_i32 cpu_exclusive_high
;
90 #ifdef CONFIG_USER_ONLY
91 static TCGv_i32 cpu_exclusive_test
;
92 static TCGv_i32 cpu_exclusive_info
;
95 /* FIXME: These should be removed. */
96 static TCGv cpu_F0s
, cpu_F1s
;
97 static TCGv_i64 cpu_F0d
, cpu_F1d
;
99 #include "gen-icount.h"
101 static const char *regnames
[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105 /* initialize TCG globals. */
106 void arm_translate_init(void)
110 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
112 for (i
= 0; i
< 16; i
++) {
113 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUState
, regs
[i
]),
117 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUState
, exclusive_addr
), "exclusive_addr");
119 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUState
, exclusive_val
), "exclusive_val");
121 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUState
, exclusive_high
), "exclusive_high");
123 #ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUState
, exclusive_test
), "exclusive_test");
126 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUState
, exclusive_info
), "exclusive_info");
134 static inline TCGv
load_cpu_offset(int offset
)
136 TCGv tmp
= tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143 static inline void store_cpu_offset(TCGv var
, int offset
)
145 tcg_gen_st_i32(var
, cpu_env
, offset
);
146 tcg_temp_free_i32(var
);
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
157 /* normaly, since we updated PC, we need only to add one insn */
159 addr
= (long)s
->pc
+ 2;
161 addr
= (long)s
->pc
+ 4;
162 tcg_gen_movi_i32(var
, addr
);
164 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
171 TCGv tmp
= tcg_temp_new_i32();
172 load_reg_var(s
, tmp
, reg
);
176 /* Set a CPU register. The source must be a temporary and will be
178 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
181 tcg_gen_andi_i32(var
, var
, ~1);
182 s
->is_jmp
= DISAS_JUMP
;
184 tcg_gen_mov_i32(cpu_R
[reg
], var
);
185 tcg_temp_free_i32(var
);
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
198 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
200 TCGv tmp_mask
= tcg_const_i32(mask
);
201 gen_helper_cpsr_write(var
, tmp_mask
);
202 tcg_temp_free_i32(tmp_mask
);
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207 static void gen_exception(int excp
)
209 TCGv tmp
= tcg_temp_new_i32();
210 tcg_gen_movi_i32(tmp
, excp
);
211 gen_helper_exception(tmp
);
212 tcg_temp_free_i32(tmp
);
215 static void gen_smul_dual(TCGv a
, TCGv b
)
217 TCGv tmp1
= tcg_temp_new_i32();
218 TCGv tmp2
= tcg_temp_new_i32();
219 tcg_gen_ext16s_i32(tmp1
, a
);
220 tcg_gen_ext16s_i32(tmp2
, b
);
221 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
222 tcg_temp_free_i32(tmp2
);
223 tcg_gen_sari_i32(a
, a
, 16);
224 tcg_gen_sari_i32(b
, b
, 16);
225 tcg_gen_mul_i32(b
, b
, a
);
226 tcg_gen_mov_i32(a
, tmp1
);
227 tcg_temp_free_i32(tmp1
);
230 /* Byteswap each halfword. */
231 static void gen_rev16(TCGv var
)
233 TCGv tmp
= tcg_temp_new_i32();
234 tcg_gen_shri_i32(tmp
, var
, 8);
235 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
236 tcg_gen_shli_i32(var
, var
, 8);
237 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
238 tcg_gen_or_i32(var
, var
, tmp
);
239 tcg_temp_free_i32(tmp
);
242 /* Byteswap low halfword and sign extend. */
243 static void gen_revsh(TCGv var
)
245 tcg_gen_ext16u_i32(var
, var
);
246 tcg_gen_bswap16_i32(var
, var
);
247 tcg_gen_ext16s_i32(var
, var
);
250 /* Unsigned bitfield extract. */
251 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
254 tcg_gen_shri_i32(var
, var
, shift
);
255 tcg_gen_andi_i32(var
, var
, mask
);
258 /* Signed bitfield extract. */
259 static void gen_sbfx(TCGv var
, int shift
, int width
)
264 tcg_gen_sari_i32(var
, var
, shift
);
265 if (shift
+ width
< 32) {
266 signbit
= 1u << (width
- 1);
267 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
268 tcg_gen_xori_i32(var
, var
, signbit
);
269 tcg_gen_subi_i32(var
, var
, signbit
);
273 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
274 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
276 tcg_gen_andi_i32(val
, val
, mask
);
277 tcg_gen_shli_i32(val
, val
, shift
);
278 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
279 tcg_gen_or_i32(dest
, base
, val
);
282 /* Return (b << 32) + a. Mark inputs as dead */
283 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
285 TCGv_i64 tmp64
= tcg_temp_new_i64();
287 tcg_gen_extu_i32_i64(tmp64
, b
);
288 tcg_temp_free_i32(b
);
289 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
290 tcg_gen_add_i64(a
, tmp64
, a
);
292 tcg_temp_free_i64(tmp64
);
296 /* Return (b << 32) - a. Mark inputs as dead. */
297 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
299 TCGv_i64 tmp64
= tcg_temp_new_i64();
301 tcg_gen_extu_i32_i64(tmp64
, b
);
302 tcg_temp_free_i32(b
);
303 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
304 tcg_gen_sub_i64(a
, tmp64
, a
);
306 tcg_temp_free_i64(tmp64
);
310 /* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
312 /* 32x32->64 multiply. Marks inputs as dead. */
313 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
315 TCGv_i64 tmp1
= tcg_temp_new_i64();
316 TCGv_i64 tmp2
= tcg_temp_new_i64();
318 tcg_gen_extu_i32_i64(tmp1
, a
);
319 tcg_temp_free_i32(a
);
320 tcg_gen_extu_i32_i64(tmp2
, b
);
321 tcg_temp_free_i32(b
);
322 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
323 tcg_temp_free_i64(tmp2
);
327 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1
, a
);
333 tcg_temp_free_i32(a
);
334 tcg_gen_ext_i32_i64(tmp2
, b
);
335 tcg_temp_free_i32(b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
337 tcg_temp_free_i64(tmp2
);
341 /* Swap low and high halfwords. */
342 static void gen_swap_half(TCGv var
)
344 TCGv tmp
= tcg_temp_new_i32();
345 tcg_gen_shri_i32(tmp
, var
, 16);
346 tcg_gen_shli_i32(var
, var
, 16);
347 tcg_gen_or_i32(var
, var
, tmp
);
348 tcg_temp_free_i32(tmp
);
351 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
355 t0 = (t0 + t1) ^ tmp;
358 static void gen_add16(TCGv t0
, TCGv t1
)
360 TCGv tmp
= tcg_temp_new_i32();
361 tcg_gen_xor_i32(tmp
, t0
, t1
);
362 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
363 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
364 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
365 tcg_gen_add_i32(t0
, t0
, t1
);
366 tcg_gen_xor_i32(t0
, t0
, tmp
);
367 tcg_temp_free_i32(tmp
);
368 tcg_temp_free_i32(t1
);
371 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373 /* Set CF to the top bit of var. */
374 static void gen_set_CF_bit31(TCGv var
)
376 TCGv tmp
= tcg_temp_new_i32();
377 tcg_gen_shri_i32(tmp
, var
, 31);
379 tcg_temp_free_i32(tmp
);
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv var
)
385 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
390 static void gen_adc(TCGv t0
, TCGv t1
)
393 tcg_gen_add_i32(t0
, t0
, t1
);
394 tmp
= load_cpu_field(CF
);
395 tcg_gen_add_i32(t0
, t0
, tmp
);
396 tcg_temp_free_i32(tmp
);
399 /* dest = T0 + T1 + CF. */
400 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
403 tcg_gen_add_i32(dest
, t0
, t1
);
404 tmp
= load_cpu_field(CF
);
405 tcg_gen_add_i32(dest
, dest
, tmp
);
406 tcg_temp_free_i32(tmp
);
409 /* dest = T0 - T1 + CF - 1. */
410 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
413 tcg_gen_sub_i32(dest
, t0
, t1
);
414 tmp
= load_cpu_field(CF
);
415 tcg_gen_add_i32(dest
, dest
, tmp
);
416 tcg_gen_subi_i32(dest
, dest
, 1);
417 tcg_temp_free_i32(tmp
);
420 /* FIXME: Implement this natively. */
421 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423 static void shifter_out_im(TCGv var
, int shift
)
425 TCGv tmp
= tcg_temp_new_i32();
427 tcg_gen_andi_i32(tmp
, var
, 1);
429 tcg_gen_shri_i32(tmp
, var
, shift
);
431 tcg_gen_andi_i32(tmp
, tmp
, 1);
434 tcg_temp_free_i32(tmp
);
437 /* Shift by immediate. Includes special handling for shift == 0. */
438 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
444 shifter_out_im(var
, 32 - shift
);
445 tcg_gen_shli_i32(var
, var
, shift
);
451 tcg_gen_shri_i32(var
, var
, 31);
454 tcg_gen_movi_i32(var
, 0);
457 shifter_out_im(var
, shift
- 1);
458 tcg_gen_shri_i32(var
, var
, shift
);
465 shifter_out_im(var
, shift
- 1);
468 tcg_gen_sari_i32(var
, var
, shift
);
470 case 3: /* ROR/RRX */
473 shifter_out_im(var
, shift
- 1);
474 tcg_gen_rotri_i32(var
, var
, shift
); break;
476 TCGv tmp
= load_cpu_field(CF
);
478 shifter_out_im(var
, 0);
479 tcg_gen_shri_i32(var
, var
, 1);
480 tcg_gen_shli_i32(tmp
, tmp
, 31);
481 tcg_gen_or_i32(var
, var
, tmp
);
482 tcg_temp_free_i32(tmp
);
487 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
488 TCGv shift
, int flags
)
492 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
493 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
494 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
495 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
499 case 0: gen_helper_shl(var
, var
, shift
); break;
500 case 1: gen_helper_shr(var
, var
, shift
); break;
501 case 2: gen_helper_sar(var
, var
, shift
); break;
502 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
503 tcg_gen_rotr_i32(var
, var
, shift
); break;
506 tcg_temp_free_i32(shift
);
509 #define PAS_OP(pfx) \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
523 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 tmp
= tcg_temp_new_ptr();
526 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
528 tcg_temp_free_ptr(tmp
);
531 tmp
= tcg_temp_new_ptr();
532 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
534 tcg_temp_free_ptr(tmp
);
536 #undef gen_pas_helper
537 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
550 #undef gen_pas_helper
555 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556 #define PAS_OP(pfx) \
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
570 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 tmp
= tcg_temp_new_ptr();
573 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
575 tcg_temp_free_ptr(tmp
);
578 tmp
= tcg_temp_new_ptr();
579 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
581 tcg_temp_free_ptr(tmp
);
583 #undef gen_pas_helper
584 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
597 #undef gen_pas_helper
602 static void gen_test_cc(int cc
, int label
)
610 tmp
= load_cpu_field(ZF
);
611 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
614 tmp
= load_cpu_field(ZF
);
615 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
618 tmp
= load_cpu_field(CF
);
619 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
622 tmp
= load_cpu_field(CF
);
623 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
626 tmp
= load_cpu_field(NF
);
627 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
630 tmp
= load_cpu_field(NF
);
631 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
634 tmp
= load_cpu_field(VF
);
635 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
638 tmp
= load_cpu_field(VF
);
639 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
641 case 8: /* hi: C && !Z */
642 inv
= gen_new_label();
643 tmp
= load_cpu_field(CF
);
644 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
645 tcg_temp_free_i32(tmp
);
646 tmp
= load_cpu_field(ZF
);
647 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
650 case 9: /* ls: !C || Z */
651 tmp
= load_cpu_field(CF
);
652 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
653 tcg_temp_free_i32(tmp
);
654 tmp
= load_cpu_field(ZF
);
655 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp
= load_cpu_field(VF
);
659 tmp2
= load_cpu_field(NF
);
660 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
661 tcg_temp_free_i32(tmp2
);
662 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp
= load_cpu_field(VF
);
666 tmp2
= load_cpu_field(NF
);
667 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
668 tcg_temp_free_i32(tmp2
);
669 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
671 case 12: /* gt: !Z && N == V */
672 inv
= gen_new_label();
673 tmp
= load_cpu_field(ZF
);
674 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
675 tcg_temp_free_i32(tmp
);
676 tmp
= load_cpu_field(VF
);
677 tmp2
= load_cpu_field(NF
);
678 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
679 tcg_temp_free_i32(tmp2
);
680 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
683 case 13: /* le: Z || N != V */
684 tmp
= load_cpu_field(ZF
);
685 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
686 tcg_temp_free_i32(tmp
);
687 tmp
= load_cpu_field(VF
);
688 tmp2
= load_cpu_field(NF
);
689 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
690 tcg_temp_free_i32(tmp2
);
691 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
694 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
697 tcg_temp_free_i32(tmp
);
700 static const uint8_t table_logic_cc
[16] = {
719 /* Set PC and Thumb state from an immediate address. */
720 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
724 s
->is_jmp
= DISAS_UPDATE
;
725 if (s
->thumb
!= (addr
& 1)) {
726 tmp
= tcg_temp_new_i32();
727 tcg_gen_movi_i32(tmp
, addr
& 1);
728 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
729 tcg_temp_free_i32(tmp
);
731 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
734 /* Set PC and Thumb state from var. var is marked as dead. */
735 static inline void gen_bx(DisasContext
*s
, TCGv var
)
737 s
->is_jmp
= DISAS_UPDATE
;
738 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
739 tcg_gen_andi_i32(var
, var
, 1);
740 store_cpu_field(var
, thumb
);
743 /* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
749 if (reg
== 15 && ENABLE_ARCH_7
) {
752 store_reg(s
, reg
, var
);
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760 static inline void store_reg_from_load(CPUState
*env
, DisasContext
*s
,
763 if (reg
== 15 && ENABLE_ARCH_5
) {
766 store_reg(s
, reg
, var
);
770 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
772 TCGv tmp
= tcg_temp_new_i32();
773 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
776 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
778 TCGv tmp
= tcg_temp_new_i32();
779 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
782 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
784 TCGv tmp
= tcg_temp_new_i32();
785 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
788 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
790 TCGv tmp
= tcg_temp_new_i32();
791 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
794 static inline TCGv
gen_ld32(TCGv addr
, int index
)
796 TCGv tmp
= tcg_temp_new_i32();
797 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
800 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
802 TCGv_i64 tmp
= tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp
, addr
, index
);
806 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
808 tcg_gen_qemu_st8(val
, addr
, index
);
809 tcg_temp_free_i32(val
);
811 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
813 tcg_gen_qemu_st16(val
, addr
, index
);
814 tcg_temp_free_i32(val
);
816 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
818 tcg_gen_qemu_st32(val
, addr
, index
);
819 tcg_temp_free_i32(val
);
821 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
823 tcg_gen_qemu_st64(val
, addr
, index
);
824 tcg_temp_free_i64(val
);
827 static inline void gen_set_pc_im(uint32_t val
)
829 tcg_gen_movi_i32(cpu_R
[15], val
);
832 /* Force a TB lookup after an instruction that changes the CPU state. */
833 static inline void gen_lookup_tb(DisasContext
*s
)
835 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
836 s
->is_jmp
= DISAS_UPDATE
;
839 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
842 int val
, rm
, shift
, shiftop
;
845 if (!(insn
& (1 << 25))) {
848 if (!(insn
& (1 << 23)))
851 tcg_gen_addi_i32(var
, var
, val
);
855 shift
= (insn
>> 7) & 0x1f;
856 shiftop
= (insn
>> 5) & 3;
857 offset
= load_reg(s
, rm
);
858 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
859 if (!(insn
& (1 << 23)))
860 tcg_gen_sub_i32(var
, var
, offset
);
862 tcg_gen_add_i32(var
, var
, offset
);
863 tcg_temp_free_i32(offset
);
867 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
873 if (insn
& (1 << 22)) {
875 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
876 if (!(insn
& (1 << 23)))
880 tcg_gen_addi_i32(var
, var
, val
);
884 tcg_gen_addi_i32(var
, var
, extra
);
886 offset
= load_reg(s
, rm
);
887 if (!(insn
& (1 << 23)))
888 tcg_gen_sub_i32(var
, var
, offset
);
890 tcg_gen_add_i32(var
, var
, offset
);
891 tcg_temp_free_i32(offset
);
895 static TCGv_ptr
get_fpstatus_ptr(int neon
)
897 TCGv_ptr statusptr
= tcg_temp_new_ptr();
900 offset
= offsetof(CPUState
, vfp
.standard_fp_status
);
902 offset
= offsetof(CPUState
, vfp
.fp_status
);
904 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
908 #define VFP_OP2(name) \
909 static inline void gen_vfp_##name(int dp) \
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 tcg_temp_free_ptr(fpst); \
927 static inline void gen_vfp_F1_mul(int dp
)
929 /* Like gen_vfp_mul() but put result in F1 */
930 TCGv_ptr fpst
= get_fpstatus_ptr(0);
932 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
934 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
936 tcg_temp_free_ptr(fpst
);
939 static inline void gen_vfp_F1_neg(int dp
)
941 /* Like gen_vfp_neg() but put result in F1 */
943 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
945 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
949 static inline void gen_vfp_abs(int dp
)
952 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
954 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
957 static inline void gen_vfp_neg(int dp
)
960 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
962 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
965 static inline void gen_vfp_sqrt(int dp
)
968 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
970 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
973 static inline void gen_vfp_cmp(int dp
)
976 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
978 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
981 static inline void gen_vfp_cmpe(int dp
)
984 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
986 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
989 static inline void gen_vfp_F1_ld0(int dp
)
992 tcg_gen_movi_i64(cpu_F1d
, 0);
994 tcg_gen_movi_i32(cpu_F1s
, 0);
997 #define VFP_GEN_ITOF(name) \
998 static inline void gen_vfp_##name(int dp, int neon) \
1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 tcg_temp_free_ptr(statusptr); \
1013 #define VFP_GEN_FTOI(name) \
1014 static inline void gen_vfp_##name(int dp, int neon) \
1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 tcg_temp_free_ptr(statusptr); \
1031 #define VFP_GEN_FIX(name) \
1032 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1034 TCGv tmp_shift = tcg_const_i32(shift); \
1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 tcg_temp_free_i32(tmp_shift); \
1042 tcg_temp_free_ptr(statusptr); \
1054 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1057 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1059 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1062 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1065 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1067 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1071 vfp_reg_offset (int dp
, int reg
)
1074 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1076 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1077 + offsetof(CPU_DoubleU
, l
.upper
);
1079 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1080 + offsetof(CPU_DoubleU
, l
.lower
);
1084 /* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1087 neon_reg_offset (int reg
, int n
)
1091 return vfp_reg_offset(0, sreg
);
1094 static TCGv
neon_load_reg(int reg
, int pass
)
1096 TCGv tmp
= tcg_temp_new_i32();
1097 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1101 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1103 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1104 tcg_temp_free_i32(var
);
1107 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1109 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1112 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1114 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1117 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1118 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1119 #define tcg_gen_st_f32 tcg_gen_st_i32
1120 #define tcg_gen_st_f64 tcg_gen_st_i64
1122 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1125 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1127 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1130 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1133 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1135 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1138 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1141 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1143 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1146 #define ARM_CP_RW_BIT (1 << 20)
1148 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1150 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1153 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1155 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1158 static inline TCGv
iwmmxt_load_creg(int reg
)
1160 TCGv var
= tcg_temp_new_i32();
1161 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1165 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1167 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1168 tcg_temp_free_i32(var
);
1171 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1173 iwmmxt_store_reg(cpu_M0
, rn
);
1176 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1178 iwmmxt_load_reg(cpu_M0
, rn
);
1181 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1183 iwmmxt_load_reg(cpu_V1
, rn
);
1184 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1187 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1189 iwmmxt_load_reg(cpu_V1
, rn
);
1190 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1193 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1195 iwmmxt_load_reg(cpu_V1
, rn
);
1196 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1199 #define IWMMXT_OP(name) \
1200 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1206 #define IWMMXT_OP_ENV(name) \
1207 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1213 #define IWMMXT_OP_ENV_SIZE(name) \
1214 IWMMXT_OP_ENV(name##b) \
1215 IWMMXT_OP_ENV(name##w) \
1216 IWMMXT_OP_ENV(name##l)
1218 #define IWMMXT_OP_ENV1(name) \
1219 static inline void gen_op_iwmmxt_##name##_M0(void) \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1235 IWMMXT_OP_ENV_SIZE(unpackl
)
1236 IWMMXT_OP_ENV_SIZE(unpackh
)
1238 IWMMXT_OP_ENV1(unpacklub
)
1239 IWMMXT_OP_ENV1(unpackluw
)
1240 IWMMXT_OP_ENV1(unpacklul
)
1241 IWMMXT_OP_ENV1(unpackhub
)
1242 IWMMXT_OP_ENV1(unpackhuw
)
1243 IWMMXT_OP_ENV1(unpackhul
)
1244 IWMMXT_OP_ENV1(unpacklsb
)
1245 IWMMXT_OP_ENV1(unpacklsw
)
1246 IWMMXT_OP_ENV1(unpacklsl
)
1247 IWMMXT_OP_ENV1(unpackhsb
)
1248 IWMMXT_OP_ENV1(unpackhsw
)
1249 IWMMXT_OP_ENV1(unpackhsl
)
1251 IWMMXT_OP_ENV_SIZE(cmpeq
)
1252 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1253 IWMMXT_OP_ENV_SIZE(cmpgts
)
1255 IWMMXT_OP_ENV_SIZE(mins
)
1256 IWMMXT_OP_ENV_SIZE(minu
)
1257 IWMMXT_OP_ENV_SIZE(maxs
)
1258 IWMMXT_OP_ENV_SIZE(maxu
)
1260 IWMMXT_OP_ENV_SIZE(subn
)
1261 IWMMXT_OP_ENV_SIZE(addn
)
1262 IWMMXT_OP_ENV_SIZE(subu
)
1263 IWMMXT_OP_ENV_SIZE(addu
)
1264 IWMMXT_OP_ENV_SIZE(subs
)
1265 IWMMXT_OP_ENV_SIZE(adds
)
1267 IWMMXT_OP_ENV(avgb0
)
1268 IWMMXT_OP_ENV(avgb1
)
1269 IWMMXT_OP_ENV(avgw0
)
1270 IWMMXT_OP_ENV(avgw1
)
1274 IWMMXT_OP_ENV(packuw
)
1275 IWMMXT_OP_ENV(packul
)
1276 IWMMXT_OP_ENV(packuq
)
1277 IWMMXT_OP_ENV(packsw
)
1278 IWMMXT_OP_ENV(packsl
)
1279 IWMMXT_OP_ENV(packsq
)
1281 static void gen_op_iwmmxt_set_mup(void)
1284 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1285 tcg_gen_ori_i32(tmp
, tmp
, 2);
1286 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1289 static void gen_op_iwmmxt_set_cup(void)
1292 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1293 tcg_gen_ori_i32(tmp
, tmp
, 1);
1294 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1297 static void gen_op_iwmmxt_setpsr_nz(void)
1299 TCGv tmp
= tcg_temp_new_i32();
1300 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1301 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1304 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1306 iwmmxt_load_reg(cpu_V1
, rn
);
1307 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1308 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1311 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1317 rd
= (insn
>> 16) & 0xf;
1318 tmp
= load_reg(s
, rd
);
1320 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1321 if (insn
& (1 << 24)) {
1323 if (insn
& (1 << 23))
1324 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1326 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1327 tcg_gen_mov_i32(dest
, tmp
);
1328 if (insn
& (1 << 21))
1329 store_reg(s
, rd
, tmp
);
1331 tcg_temp_free_i32(tmp
);
1332 } else if (insn
& (1 << 21)) {
1334 tcg_gen_mov_i32(dest
, tmp
);
1335 if (insn
& (1 << 23))
1336 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1338 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1339 store_reg(s
, rd
, tmp
);
1340 } else if (!(insn
& (1 << 23)))
1345 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1347 int rd
= (insn
>> 0) & 0xf;
1350 if (insn
& (1 << 8)) {
1351 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1354 tmp
= iwmmxt_load_creg(rd
);
1357 tmp
= tcg_temp_new_i32();
1358 iwmmxt_load_reg(cpu_V0
, rd
);
1359 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1361 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1362 tcg_gen_mov_i32(dest
, tmp
);
1363 tcg_temp_free_i32(tmp
);
1367 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1368 (ie. an undefined instruction). */
1369 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1372 int rdhi
, rdlo
, rd0
, rd1
, i
;
1374 TCGv tmp
, tmp2
, tmp3
;
1376 if ((insn
& 0x0e000e00) == 0x0c000000) {
1377 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1379 rdlo
= (insn
>> 12) & 0xf;
1380 rdhi
= (insn
>> 16) & 0xf;
1381 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1382 iwmmxt_load_reg(cpu_V0
, wrd
);
1383 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1384 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1386 } else { /* TMCRR */
1387 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1388 iwmmxt_store_reg(cpu_V0
, wrd
);
1389 gen_op_iwmmxt_set_mup();
1394 wrd
= (insn
>> 12) & 0xf;
1395 addr
= tcg_temp_new_i32();
1396 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1397 tcg_temp_free_i32(addr
);
1400 if (insn
& ARM_CP_RW_BIT
) {
1401 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1402 tmp
= tcg_temp_new_i32();
1403 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1404 iwmmxt_store_creg(wrd
, tmp
);
1407 if (insn
& (1 << 8)) {
1408 if (insn
& (1 << 22)) { /* WLDRD */
1409 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1411 } else { /* WLDRW wRd */
1412 tmp
= gen_ld32(addr
, IS_USER(s
));
1415 if (insn
& (1 << 22)) { /* WLDRH */
1416 tmp
= gen_ld16u(addr
, IS_USER(s
));
1417 } else { /* WLDRB */
1418 tmp
= gen_ld8u(addr
, IS_USER(s
));
1422 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1423 tcg_temp_free_i32(tmp
);
1425 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1428 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1429 tmp
= iwmmxt_load_creg(wrd
);
1430 gen_st32(tmp
, addr
, IS_USER(s
));
1432 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1433 tmp
= tcg_temp_new_i32();
1434 if (insn
& (1 << 8)) {
1435 if (insn
& (1 << 22)) { /* WSTRD */
1436 tcg_temp_free_i32(tmp
);
1437 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1440 gen_st32(tmp
, addr
, IS_USER(s
));
1443 if (insn
& (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1445 gen_st16(tmp
, addr
, IS_USER(s
));
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1448 gen_st8(tmp
, addr
, IS_USER(s
));
1453 tcg_temp_free_i32(addr
);
1457 if ((insn
& 0x0f000000) != 0x0e000000)
1460 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd
= (insn
>> 12) & 0xf;
1463 rd0
= (insn
>> 0) & 0xf;
1464 rd1
= (insn
>> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1472 case 0x011: /* TMCR */
1475 rd
= (insn
>> 12) & 0xf;
1476 wrd
= (insn
>> 16) & 0xf;
1478 case ARM_IWMMXT_wCID
:
1479 case ARM_IWMMXT_wCASF
:
1481 case ARM_IWMMXT_wCon
:
1482 gen_op_iwmmxt_set_cup();
1484 case ARM_IWMMXT_wCSSF
:
1485 tmp
= iwmmxt_load_creg(wrd
);
1486 tmp2
= load_reg(s
, rd
);
1487 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1488 tcg_temp_free_i32(tmp2
);
1489 iwmmxt_store_creg(wrd
, tmp
);
1491 case ARM_IWMMXT_wCGR0
:
1492 case ARM_IWMMXT_wCGR1
:
1493 case ARM_IWMMXT_wCGR2
:
1494 case ARM_IWMMXT_wCGR3
:
1495 gen_op_iwmmxt_set_cup();
1496 tmp
= load_reg(s
, rd
);
1497 iwmmxt_store_creg(wrd
, tmp
);
1503 case 0x100: /* WXOR */
1504 wrd
= (insn
>> 12) & 0xf;
1505 rd0
= (insn
>> 0) & 0xf;
1506 rd1
= (insn
>> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1514 case 0x111: /* TMRC */
1517 rd
= (insn
>> 12) & 0xf;
1518 wrd
= (insn
>> 16) & 0xf;
1519 tmp
= iwmmxt_load_creg(wrd
);
1520 store_reg(s
, rd
, tmp
);
1522 case 0x300: /* WANDN */
1523 wrd
= (insn
>> 12) & 0xf;
1524 rd0
= (insn
>> 0) & 0xf;
1525 rd1
= (insn
>> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1527 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1528 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1534 case 0x200: /* WAND */
1535 wrd
= (insn
>> 12) & 0xf;
1536 rd0
= (insn
>> 0) & 0xf;
1537 rd1
= (insn
>> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd
= (insn
>> 12) & 0xf;
1547 rd0
= (insn
>> 0) & 0xf;
1548 rd1
= (insn
>> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1550 if (insn
& (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1555 gen_op_iwmmxt_set_mup();
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd
= (insn
>> 12) & 0xf;
1559 rd0
= (insn
>> 16) & 0xf;
1560 rd1
= (insn
>> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1562 switch ((insn
>> 22) & 3) {
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd
= (insn
>> 12) & 0xf;
1581 rd0
= (insn
>> 16) & 0xf;
1582 rd1
= (insn
>> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1584 switch ((insn
>> 22) & 3) {
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd
= (insn
>> 12) & 0xf;
1603 rd0
= (insn
>> 16) & 0xf;
1604 rd1
= (insn
>> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1606 if (insn
& (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1610 if (!(insn
& (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1613 gen_op_iwmmxt_set_mup();
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd
= (insn
>> 12) & 0xf;
1617 rd0
= (insn
>> 16) & 0xf;
1618 rd1
= (insn
>> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1620 if (insn
& (1 << 21)) {
1621 if (insn
& (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1626 if (insn
& (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1632 gen_op_iwmmxt_set_mup();
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd
= (insn
>> 12) & 0xf;
1636 rd0
= (insn
>> 16) & 0xf;
1637 rd1
= (insn
>> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1639 if (insn
& (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1643 if (!(insn
& (1 << 20))) {
1644 iwmmxt_load_reg(cpu_V1
, wrd
);
1645 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1647 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1648 gen_op_iwmmxt_set_mup();
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd
= (insn
>> 12) & 0xf;
1652 rd0
= (insn
>> 16) & 0xf;
1653 rd1
= (insn
>> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1655 switch ((insn
>> 22) & 3) {
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd
= (insn
>> 12) & 0xf;
1674 rd0
= (insn
>> 16) & 0xf;
1675 rd1
= (insn
>> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1677 if (insn
& (1 << 22)) {
1678 if (insn
& (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1683 if (insn
& (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1688 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd
= (insn
>> 12) & 0xf;
1694 rd0
= (insn
>> 16) & 0xf;
1695 rd1
= (insn
>> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1697 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1698 tcg_gen_andi_i32(tmp
, tmp
, 7);
1699 iwmmxt_load_reg(cpu_V1
, rd1
);
1700 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1701 tcg_temp_free_i32(tmp
);
1702 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1703 gen_op_iwmmxt_set_mup();
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1706 if (((insn
>> 6) & 3) == 3)
1708 rd
= (insn
>> 12) & 0xf;
1709 wrd
= (insn
>> 16) & 0xf;
1710 tmp
= load_reg(s
, rd
);
1711 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1712 switch ((insn
>> 6) & 3) {
1714 tmp2
= tcg_const_i32(0xff);
1715 tmp3
= tcg_const_i32((insn
& 7) << 3);
1718 tmp2
= tcg_const_i32(0xffff);
1719 tmp3
= tcg_const_i32((insn
& 3) << 4);
1722 tmp2
= tcg_const_i32(0xffffffff);
1723 tmp3
= tcg_const_i32((insn
& 1) << 5);
1729 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1730 tcg_temp_free(tmp3
);
1731 tcg_temp_free(tmp2
);
1732 tcg_temp_free_i32(tmp
);
1733 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1734 gen_op_iwmmxt_set_mup();
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd
= (insn
>> 12) & 0xf;
1738 wrd
= (insn
>> 16) & 0xf;
1739 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1741 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1742 tmp
= tcg_temp_new_i32();
1743 switch ((insn
>> 22) & 3) {
1745 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1748 tcg_gen_ext8s_i32(tmp
, tmp
);
1750 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1754 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1757 tcg_gen_ext16s_i32(tmp
, tmp
);
1759 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1763 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1767 store_reg(s
, rd
, tmp
);
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1770 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1772 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1773 switch ((insn
>> 22) & 3) {
1775 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1778 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1781 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1784 tcg_gen_shli_i32(tmp
, tmp
, 28);
1786 tcg_temp_free_i32(tmp
);
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1789 if (((insn
>> 6) & 3) == 3)
1791 rd
= (insn
>> 12) & 0xf;
1792 wrd
= (insn
>> 16) & 0xf;
1793 tmp
= load_reg(s
, rd
);
1794 switch ((insn
>> 6) & 3) {
1796 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1799 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1802 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1805 tcg_temp_free_i32(tmp
);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1807 gen_op_iwmmxt_set_mup();
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1810 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1812 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1813 tmp2
= tcg_temp_new_i32();
1814 tcg_gen_mov_i32(tmp2
, tmp
);
1815 switch ((insn
>> 22) & 3) {
1817 for (i
= 0; i
< 7; i
++) {
1818 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1819 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1823 for (i
= 0; i
< 3; i
++) {
1824 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1825 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1829 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1830 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1834 tcg_temp_free_i32(tmp2
);
1835 tcg_temp_free_i32(tmp
);
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd
= (insn
>> 12) & 0xf;
1839 rd0
= (insn
>> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1841 switch ((insn
>> 22) & 3) {
1843 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1846 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1849 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1854 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1855 gen_op_iwmmxt_set_mup();
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1858 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1860 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1861 tmp2
= tcg_temp_new_i32();
1862 tcg_gen_mov_i32(tmp2
, tmp
);
1863 switch ((insn
>> 22) & 3) {
1865 for (i
= 0; i
< 7; i
++) {
1866 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1867 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1871 for (i
= 0; i
< 3; i
++) {
1872 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1873 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1877 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1878 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1882 tcg_temp_free_i32(tmp2
);
1883 tcg_temp_free_i32(tmp
);
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd
= (insn
>> 12) & 0xf;
1887 rd0
= (insn
>> 16) & 0xf;
1888 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1890 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1891 tmp
= tcg_temp_new_i32();
1892 switch ((insn
>> 22) & 3) {
1894 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1897 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1900 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1903 store_reg(s
, rd
, tmp
);
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd
= (insn
>> 12) & 0xf;
1908 rd0
= (insn
>> 16) & 0xf;
1909 rd1
= (insn
>> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1911 switch ((insn
>> 22) & 3) {
1913 if (insn
& (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1919 if (insn
& (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1925 if (insn
& (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1933 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd
= (insn
>> 12) & 0xf;
1940 rd0
= (insn
>> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1942 switch ((insn
>> 22) & 3) {
1944 if (insn
& (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1947 gen_op_iwmmxt_unpacklub_M0();
1950 if (insn
& (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1953 gen_op_iwmmxt_unpackluw_M0();
1956 if (insn
& (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1959 gen_op_iwmmxt_unpacklul_M0();
1964 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd
= (insn
>> 12) & 0xf;
1971 rd0
= (insn
>> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 switch ((insn
>> 22) & 3) {
1975 if (insn
& (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1978 gen_op_iwmmxt_unpackhub_M0();
1981 if (insn
& (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1984 gen_op_iwmmxt_unpackhuw_M0();
1987 if (insn
& (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1990 gen_op_iwmmxt_unpackhul_M0();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
2001 if (((insn
>> 22) & 3) == 0)
2003 wrd
= (insn
>> 12) & 0xf;
2004 rd0
= (insn
>> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2006 tmp
= tcg_temp_new_i32();
2007 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2008 tcg_temp_free_i32(tmp
);
2011 switch ((insn
>> 22) & 3) {
2013 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2016 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2019 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2022 tcg_temp_free_i32(tmp
);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
2029 if (((insn
>> 22) & 3) == 0)
2031 wrd
= (insn
>> 12) & 0xf;
2032 rd0
= (insn
>> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2034 tmp
= tcg_temp_new_i32();
2035 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2036 tcg_temp_free_i32(tmp
);
2039 switch ((insn
>> 22) & 3) {
2041 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2044 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2047 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2050 tcg_temp_free_i32(tmp
);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
2057 if (((insn
>> 22) & 3) == 0)
2059 wrd
= (insn
>> 12) & 0xf;
2060 rd0
= (insn
>> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2062 tmp
= tcg_temp_new_i32();
2063 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2064 tcg_temp_free_i32(tmp
);
2067 switch ((insn
>> 22) & 3) {
2069 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2072 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2075 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2078 tcg_temp_free_i32(tmp
);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
2085 if (((insn
>> 22) & 3) == 0)
2087 wrd
= (insn
>> 12) & 0xf;
2088 rd0
= (insn
>> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2090 tmp
= tcg_temp_new_i32();
2091 switch ((insn
>> 22) & 3) {
2093 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2094 tcg_temp_free_i32(tmp
);
2097 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2100 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2101 tcg_temp_free_i32(tmp
);
2104 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2107 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2108 tcg_temp_free_i32(tmp
);
2111 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2114 tcg_temp_free_i32(tmp
);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd
= (insn
>> 12) & 0xf;
2122 rd0
= (insn
>> 16) & 0xf;
2123 rd1
= (insn
>> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2125 switch ((insn
>> 22) & 3) {
2127 if (insn
& (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2130 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2133 if (insn
& (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2139 if (insn
& (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2142 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2148 gen_op_iwmmxt_set_mup();
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd
= (insn
>> 12) & 0xf;
2153 rd0
= (insn
>> 16) & 0xf;
2154 rd1
= (insn
>> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2156 switch ((insn
>> 22) & 3) {
2158 if (insn
& (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2164 if (insn
& (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2170 if (insn
& (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2178 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2179 gen_op_iwmmxt_set_mup();
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd
= (insn
>> 12) & 0xf;
2184 rd0
= (insn
>> 16) & 0xf;
2185 rd1
= (insn
>> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2187 tmp
= tcg_const_i32((insn
>> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1
, rd1
);
2189 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2192 gen_op_iwmmxt_set_mup();
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd
= (insn
>> 12) & 0xf;
2199 rd0
= (insn
>> 16) & 0xf;
2200 rd1
= (insn
>> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2202 switch ((insn
>> 20) & 0xf) {
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2207 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2225 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2233 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd
= (insn
>> 12) & 0xf;
2242 rd0
= (insn
>> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2244 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2245 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2247 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd
= (insn
>> 12) & 0xf;
2256 rd0
= (insn
>> 16) & 0xf;
2257 rd1
= (insn
>> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2259 switch ((insn
>> 20) & 0xf) {
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2264 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2282 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2298 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2300 wrd
= (insn
>> 12) & 0xf;
2301 rd0
= (insn
>> 16) & 0xf;
2302 rd1
= (insn
>> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2304 switch ((insn
>> 22) & 3) {
2306 if (insn
& (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2312 if (insn
& (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2315 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2318 if (insn
& (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2324 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd
= (insn
>> 5) & 0xf;
2333 rd0
= (insn
>> 12) & 0xf;
2334 rd1
= (insn
>> 0) & 0xf;
2335 if (rd0
== 0xf || rd1
== 0xf)
2337 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2338 tmp
= load_reg(s
, rd0
);
2339 tmp2
= load_reg(s
, rd1
);
2340 switch ((insn
>> 16) & 0xf) {
2341 case 0x0: /* TMIA */
2342 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2344 case 0x8: /* TMIAPH */
2345 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2348 if (insn
& (1 << 16))
2349 tcg_gen_shri_i32(tmp
, tmp
, 16);
2350 if (insn
& (1 << 17))
2351 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2355 tcg_temp_free_i32(tmp2
);
2356 tcg_temp_free_i32(tmp
);
2359 tcg_temp_free_i32(tmp2
);
2360 tcg_temp_free_i32(tmp
);
2361 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2362 gen_op_iwmmxt_set_mup();
2371 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2372 (ie. an undefined instruction). */
2373 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2375 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2378 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0
= (insn
>> 12) & 0xf;
2382 acc
= (insn
>> 5) & 7;
2387 tmp
= load_reg(s
, rd0
);
2388 tmp2
= load_reg(s
, rd1
);
2389 switch ((insn
>> 16) & 0xf) {
2391 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2393 case 0x8: /* MIAPH */
2394 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
2400 if (insn
& (1 << 16))
2401 tcg_gen_shri_i32(tmp
, tmp
, 16);
2402 if (insn
& (1 << 17))
2403 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2409 tcg_temp_free_i32(tmp2
);
2410 tcg_temp_free_i32(tmp
);
2412 gen_op_iwmmxt_movq_wRn_M0(acc
);
2416 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi
= (insn
>> 16) & 0xf;
2419 rdlo
= (insn
>> 12) & 0xf;
2425 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2426 iwmmxt_load_reg(cpu_V0
, acc
);
2427 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2428 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2430 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2432 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2433 iwmmxt_store_reg(cpu_V0
, acc
);
2441 /* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2446 uint32_t rd
= (insn
>> 12) & 0xf;
2447 uint32_t cp
= (insn
>> 8) & 0xf;
2452 if (insn
& ARM_CP_RW_BIT
) {
2453 if (!env
->cp
[cp
].cp_read
)
2455 gen_set_pc_im(s
->pc
);
2456 tmp
= tcg_temp_new_i32();
2457 tmp2
= tcg_const_i32(insn
);
2458 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2459 tcg_temp_free(tmp2
);
2460 store_reg(s
, rd
, tmp
);
2462 if (!env
->cp
[cp
].cp_write
)
2464 gen_set_pc_im(s
->pc
);
2465 tmp
= load_reg(s
, rd
);
2466 tmp2
= tcg_const_i32(insn
);
2467 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2468 tcg_temp_free(tmp2
);
2469 tcg_temp_free_i32(tmp
);
2474 static int cp15_user_ok(CPUState
*env
, uint32_t insn
)
2476 int cpn
= (insn
>> 16) & 0xf;
2477 int cpm
= insn
& 0xf;
2478 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2480 if (arm_feature(env
, ARM_FEATURE_V7
) && cpn
== 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2486 if ((cpm
== 12 && (op
< 6)) ||
2487 (cpm
== 13 && (op
< 3))) {
2488 return env
->cp15
.c9_pmuserenr
;
2489 } else if (cpm
== 14 && op
== 0 && (insn
& ARM_CP_RW_BIT
)) {
2490 /* PMUSERENR, read only */
2496 if (cpn
== 13 && cpm
== 0) {
2498 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2504 static int cp15_tls_load_store(CPUState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2507 int cpn
= (insn
>> 16) & 0xf;
2508 int cpm
= insn
& 0xf;
2509 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2511 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2514 if (!(cpn
== 13 && cpm
== 0))
2517 if (insn
& ARM_CP_RW_BIT
) {
2520 tmp
= load_cpu_field(cp15
.c13_tls1
);
2523 tmp
= load_cpu_field(cp15
.c13_tls2
);
2526 tmp
= load_cpu_field(cp15
.c13_tls3
);
2531 store_reg(s
, rd
, tmp
);
2534 tmp
= load_reg(s
, rd
);
2537 store_cpu_field(tmp
, cp15
.c13_tls1
);
2540 store_cpu_field(tmp
, cp15
.c13_tls2
);
2543 store_cpu_field(tmp
, cp15
.c13_tls3
);
2546 tcg_temp_free_i32(tmp
);
2553 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
2555 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env
, ARM_FEATURE_M
))
2564 if ((insn
& (1 << 25)) == 0) {
2565 if (insn
& (1 << 20)) {
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2572 if ((insn
& (1 << 4)) == 0) {
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
2581 switch ((insn
& 0x0fff0fff)) {
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2589 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s
->pc
);
2592 s
->is_jmp
= DISAS_WFI
;
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2599 if (!IS_USER(s
) && !arm_feature(env
, ARM_FEATURE_V6
)) {
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s
->pc
);
2602 s
->is_jmp
= DISAS_WFI
;
2605 /* Otherwise continue to handle via helper function.
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env
, ARM_FEATURE_V6
)) {
2613 return IS_USER(s
) ? 1 : 0;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env
, ARM_FEATURE_V6
)) {
2628 if (IS_USER(s
) && !cp15_user_ok(env
, insn
)) {
2632 rd
= (insn
>> 12) & 0xf;
2634 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2637 tmp2
= tcg_const_i32(insn
);
2638 if (insn
& ARM_CP_RW_BIT
) {
2639 tmp
= tcg_temp_new_i32();
2640 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2641 /* If the destination register is r15 then sets condition codes. */
2643 store_reg(s
, rd
, tmp
);
2645 tcg_temp_free_i32(tmp
);
2647 tmp
= load_reg(s
, rd
);
2648 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2649 tcg_temp_free_i32(tmp
);
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2654 (insn
& 0x0fff0fff) != 0x0e010f10)
2657 tcg_temp_free_i32(tmp2
);
2661 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662 #define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2669 if (insn & (1 << (smallbit))) \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2674 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2681 /* Move between integer and VFP cores. */
2682 static TCGv
gen_vfp_mrs(void)
2684 TCGv tmp
= tcg_temp_new_i32();
2685 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2689 static void gen_vfp_msr(TCGv tmp
)
2691 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2692 tcg_temp_free_i32(tmp
);
2695 static void gen_neon_dup_u8(TCGv var
, int shift
)
2697 TCGv tmp
= tcg_temp_new_i32();
2699 tcg_gen_shri_i32(var
, var
, shift
);
2700 tcg_gen_ext8u_i32(var
, var
);
2701 tcg_gen_shli_i32(tmp
, var
, 8);
2702 tcg_gen_or_i32(var
, var
, tmp
);
2703 tcg_gen_shli_i32(tmp
, var
, 16);
2704 tcg_gen_or_i32(var
, var
, tmp
);
2705 tcg_temp_free_i32(tmp
);
2708 static void gen_neon_dup_low16(TCGv var
)
2710 TCGv tmp
= tcg_temp_new_i32();
2711 tcg_gen_ext16u_i32(var
, var
);
2712 tcg_gen_shli_i32(tmp
, var
, 16);
2713 tcg_gen_or_i32(var
, var
, tmp
);
2714 tcg_temp_free_i32(tmp
);
2717 static void gen_neon_dup_high16(TCGv var
)
2719 TCGv tmp
= tcg_temp_new_i32();
2720 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2721 tcg_gen_shri_i32(tmp
, var
, 16);
2722 tcg_gen_or_i32(var
, var
, tmp
);
2723 tcg_temp_free_i32(tmp
);
2726 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2732 tmp
= gen_ld8u(addr
, IS_USER(s
));
2733 gen_neon_dup_u8(tmp
, 0);
2736 tmp
= gen_ld16u(addr
, IS_USER(s
));
2737 gen_neon_dup_low16(tmp
);
2740 tmp
= gen_ld32(addr
, IS_USER(s
));
2742 default: /* Avoid compiler warnings. */
2748 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2749 (ie. an undefined instruction). */
2750 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2752 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2758 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2761 if (!s
->vfp_enabled
) {
2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2763 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2765 rn
= (insn
>> 16) & 0xf;
2766 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2767 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2770 dp
= ((insn
& 0xf00) == 0xb00);
2771 switch ((insn
>> 24) & 0xf) {
2773 if (insn
& (1 << 4)) {
2774 /* single register transfer */
2775 rd
= (insn
>> 12) & 0xf;
2780 VFP_DREG_N(rn
, insn
);
2783 if (insn
& 0x00c00060
2784 && !arm_feature(env
, ARM_FEATURE_NEON
))
2787 pass
= (insn
>> 21) & 1;
2788 if (insn
& (1 << 22)) {
2790 offset
= ((insn
>> 5) & 3) * 8;
2791 } else if (insn
& (1 << 5)) {
2793 offset
= (insn
& (1 << 6)) ? 16 : 0;
2798 if (insn
& ARM_CP_RW_BIT
) {
2800 tmp
= neon_load_reg(rn
, pass
);
2804 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2805 if (insn
& (1 << 23))
2811 if (insn
& (1 << 23)) {
2813 tcg_gen_shri_i32(tmp
, tmp
, 16);
2819 tcg_gen_sari_i32(tmp
, tmp
, 16);
2828 store_reg(s
, rd
, tmp
);
2831 tmp
= load_reg(s
, rd
);
2832 if (insn
& (1 << 23)) {
2835 gen_neon_dup_u8(tmp
, 0);
2836 } else if (size
== 1) {
2837 gen_neon_dup_low16(tmp
);
2839 for (n
= 0; n
<= pass
* 2; n
++) {
2840 tmp2
= tcg_temp_new_i32();
2841 tcg_gen_mov_i32(tmp2
, tmp
);
2842 neon_store_reg(rn
, n
, tmp2
);
2844 neon_store_reg(rn
, n
, tmp
);
2849 tmp2
= neon_load_reg(rn
, pass
);
2850 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2851 tcg_temp_free_i32(tmp2
);
2854 tmp2
= neon_load_reg(rn
, pass
);
2855 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2856 tcg_temp_free_i32(tmp2
);
2861 neon_store_reg(rn
, pass
, tmp
);
2865 if ((insn
& 0x6f) != 0x00)
2867 rn
= VFP_SREG_N(insn
);
2868 if (insn
& ARM_CP_RW_BIT
) {
2870 if (insn
& (1 << 21)) {
2871 /* system register */
2876 /* VFP2 allows access to FSID from userspace.
2877 VFP3 restricts all id registers to privileged
2880 && arm_feature(env
, ARM_FEATURE_VFP3
))
2882 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2887 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2889 case ARM_VFP_FPINST
:
2890 case ARM_VFP_FPINST2
:
2891 /* Not present in VFP3. */
2893 || arm_feature(env
, ARM_FEATURE_VFP3
))
2895 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2899 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2900 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2902 tmp
= tcg_temp_new_i32();
2903 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2909 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2911 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2917 gen_mov_F0_vreg(0, rn
);
2918 tmp
= gen_vfp_mrs();
2921 /* Set the 4 flag bits in the CPSR. */
2923 tcg_temp_free_i32(tmp
);
2925 store_reg(s
, rd
, tmp
);
2929 tmp
= load_reg(s
, rd
);
2930 if (insn
& (1 << 21)) {
2932 /* system register */
2937 /* Writes are ignored. */
2940 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2941 tcg_temp_free_i32(tmp
);
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2950 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2953 case ARM_VFP_FPINST
:
2954 case ARM_VFP_FPINST2
:
2955 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2962 gen_mov_vreg_F0(0, rn
);
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2973 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2975 /* rn is register number */
2976 VFP_DREG_N(rn
, insn
);
2979 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2980 /* Integer or single precision destination. */
2981 rd
= VFP_SREG_D(insn
);
2983 VFP_DREG_D(rd
, insn
);
2986 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2990 rm
= VFP_SREG_M(insn
);
2992 VFP_DREG_M(rm
, insn
);
2995 rn
= VFP_SREG_N(insn
);
2996 if (op
== 15 && rn
== 15) {
2997 /* Double precision destination. */
2998 VFP_DREG_D(rd
, insn
);
3000 rd
= VFP_SREG_D(insn
);
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3005 rm
= VFP_SREG_M(insn
);
3008 veclen
= s
->vec_len
;
3009 if (op
== 15 && rn
> 3)
3012 /* Shut up compiler warnings. */
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd
& bank_mask
) == 0) {
3029 delta_d
= (s
->vec_stride
>> 1) + 1;
3031 delta_d
= s
->vec_stride
+ 1;
3033 if ((rm
& bank_mask
) == 0) {
3034 /* mixed scalar/vector */
3043 /* Load the initial operands. */
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm
);
3054 gen_mov_F0_vreg(dp
, rd
);
3055 gen_mov_F1_vreg(dp
, rm
);
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp
, rd
);
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp
, rd
);
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3081 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
3084 /* Otherwise fall through */
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp
, rm
);
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp
, rn
);
3093 gen_mov_F1_vreg(dp
, rm
);
3097 /* Perform the calculation. */
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3102 gen_mov_F0_vreg(dp
, rd
);
3105 case 1: /* VMLS: fd + -(fn * fm) */
3108 gen_mov_F0_vreg(dp
, rd
);
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3117 gen_mov_F0_vreg(dp
, rd
);
3121 case 3: /* VNMLA: -fd + -(fn * fm) */
3124 gen_mov_F0_vreg(dp
, rd
);
3128 case 4: /* mul: fn * fm */
3131 case 5: /* nmul: -(fn * fm) */
3135 case 6: /* add: fn + fm */
3138 case 7: /* sub: fn - fm */
3141 case 8: /* div: fn / fm */
3144 case 14: /* fconst */
3145 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3148 n
= (insn
<< 12) & 0x80000000;
3149 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3156 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3163 tcg_gen_movi_i32(cpu_F0s
, n
);
3166 case 15: /* extension space */
3180 case 4: /* vcvtb.f32.f16 */
3181 tmp
= gen_vfp_mrs();
3182 tcg_gen_ext16u_i32(tmp
, tmp
);
3183 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3184 tcg_temp_free_i32(tmp
);
3186 case 5: /* vcvtt.f32.f16 */
3187 tmp
= gen_vfp_mrs();
3188 tcg_gen_shri_i32(tmp
, tmp
, 16);
3189 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3190 tcg_temp_free_i32(tmp
);
3192 case 6: /* vcvtb.f16.f32 */
3193 tmp
= tcg_temp_new_i32();
3194 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3195 gen_mov_F0_vreg(0, rd
);
3196 tmp2
= gen_vfp_mrs();
3197 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3198 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3199 tcg_temp_free_i32(tmp2
);
3202 case 7: /* vcvtt.f16.f32 */
3203 tmp
= tcg_temp_new_i32();
3204 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3205 tcg_gen_shli_i32(tmp
, tmp
, 16);
3206 gen_mov_F0_vreg(0, rd
);
3207 tmp2
= gen_vfp_mrs();
3208 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3209 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3210 tcg_temp_free_i32(tmp2
);
3222 case 11: /* cmpez */
3226 case 15: /* single<->double conversion */
3228 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3230 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3232 case 16: /* fuito */
3233 gen_vfp_uito(dp
, 0);
3235 case 17: /* fsito */
3236 gen_vfp_sito(dp
, 0);
3238 case 20: /* fshto */
3239 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3241 gen_vfp_shto(dp
, 16 - rm
, 0);
3243 case 21: /* fslto */
3244 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3246 gen_vfp_slto(dp
, 32 - rm
, 0);
3248 case 22: /* fuhto */
3249 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3251 gen_vfp_uhto(dp
, 16 - rm
, 0);
3253 case 23: /* fulto */
3254 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3256 gen_vfp_ulto(dp
, 32 - rm
, 0);
3258 case 24: /* ftoui */
3259 gen_vfp_toui(dp
, 0);
3261 case 25: /* ftouiz */
3262 gen_vfp_touiz(dp
, 0);
3264 case 26: /* ftosi */
3265 gen_vfp_tosi(dp
, 0);
3267 case 27: /* ftosiz */
3268 gen_vfp_tosiz(dp
, 0);
3270 case 28: /* ftosh */
3271 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3273 gen_vfp_tosh(dp
, 16 - rm
, 0);
3275 case 29: /* ftosl */
3276 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3278 gen_vfp_tosl(dp
, 32 - rm
, 0);
3280 case 30: /* ftouh */
3281 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3283 gen_vfp_touh(dp
, 16 - rm
, 0);
3285 case 31: /* ftoul */
3286 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3288 gen_vfp_toul(dp
, 32 - rm
, 0);
3290 default: /* undefined */
3291 printf ("rn:%d\n", rn
);
3295 default: /* undefined */
3296 printf ("op:%d\n", op
);
3300 /* Write back the result. */
3301 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3302 ; /* Comparison, do nothing. */
3303 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3304 /* VCVT double to int: always integer result. */
3305 gen_mov_vreg_F0(0, rd
);
3306 else if (op
== 15 && rn
== 15)
3308 gen_mov_vreg_F0(!dp
, rd
);
3310 gen_mov_vreg_F0(dp
, rd
);
3312 /* break out of the loop if we have finished */
3316 if (op
== 15 && delta_m
== 0) {
3317 /* single source one-many */
3319 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3321 gen_mov_vreg_F0(dp
, rd
);
3325 /* Setup the next operands. */
3327 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3331 /* One source operand. */
3332 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3334 gen_mov_F0_vreg(dp
, rm
);
3336 /* Two source operands. */
3337 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3339 gen_mov_F0_vreg(dp
, rn
);
3341 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3343 gen_mov_F1_vreg(dp
, rm
);
3351 if ((insn
& 0x03e00000) == 0x00400000) {
3352 /* two-register transfer */
3353 rn
= (insn
>> 16) & 0xf;
3354 rd
= (insn
>> 12) & 0xf;
3356 VFP_DREG_M(rm
, insn
);
3358 rm
= VFP_SREG_M(insn
);
3361 if (insn
& ARM_CP_RW_BIT
) {
3364 gen_mov_F0_vreg(0, rm
* 2);
3365 tmp
= gen_vfp_mrs();
3366 store_reg(s
, rd
, tmp
);
3367 gen_mov_F0_vreg(0, rm
* 2 + 1);
3368 tmp
= gen_vfp_mrs();
3369 store_reg(s
, rn
, tmp
);
3371 gen_mov_F0_vreg(0, rm
);
3372 tmp
= gen_vfp_mrs();
3373 store_reg(s
, rd
, tmp
);
3374 gen_mov_F0_vreg(0, rm
+ 1);
3375 tmp
= gen_vfp_mrs();
3376 store_reg(s
, rn
, tmp
);
3381 tmp
= load_reg(s
, rd
);
3383 gen_mov_vreg_F0(0, rm
* 2);
3384 tmp
= load_reg(s
, rn
);
3386 gen_mov_vreg_F0(0, rm
* 2 + 1);
3388 tmp
= load_reg(s
, rd
);
3390 gen_mov_vreg_F0(0, rm
);
3391 tmp
= load_reg(s
, rn
);
3393 gen_mov_vreg_F0(0, rm
+ 1);
3398 rn
= (insn
>> 16) & 0xf;
3400 VFP_DREG_D(rd
, insn
);
3402 rd
= VFP_SREG_D(insn
);
3403 if ((insn
& 0x01200000) == 0x01000000) {
3404 /* Single load/store */
3405 offset
= (insn
& 0xff) << 2;
3406 if ((insn
& (1 << 23)) == 0)
3408 if (s
->thumb
&& rn
== 15) {
3409 /* This is actually UNPREDICTABLE */
3410 addr
= tcg_temp_new_i32();
3411 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3413 addr
= load_reg(s
, rn
);
3415 tcg_gen_addi_i32(addr
, addr
, offset
);
3416 if (insn
& (1 << 20)) {
3417 gen_vfp_ld(s
, dp
, addr
);
3418 gen_mov_vreg_F0(dp
, rd
);
3420 gen_mov_F0_vreg(dp
, rd
);
3421 gen_vfp_st(s
, dp
, addr
);
3423 tcg_temp_free_i32(addr
);
3425 /* load/store multiple */
3426 int w
= insn
& (1 << 21);
3428 n
= (insn
>> 1) & 0x7f;
3432 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3433 /* P == U , W == 1 => UNDEF */
3436 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3437 /* UNPREDICTABLE cases for bad immediates: we choose to
3438 * UNDEF to avoid generating huge numbers of TCG ops
3442 if (rn
== 15 && w
) {
3443 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3447 if (s
->thumb
&& rn
== 15) {
3448 /* This is actually UNPREDICTABLE */
3449 addr
= tcg_temp_new_i32();
3450 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3452 addr
= load_reg(s
, rn
);
3454 if (insn
& (1 << 24)) /* pre-decrement */
3455 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3461 for (i
= 0; i
< n
; i
++) {
3462 if (insn
& ARM_CP_RW_BIT
) {
3464 gen_vfp_ld(s
, dp
, addr
);
3465 gen_mov_vreg_F0(dp
, rd
+ i
);
3468 gen_mov_F0_vreg(dp
, rd
+ i
);
3469 gen_vfp_st(s
, dp
, addr
);
3471 tcg_gen_addi_i32(addr
, addr
, offset
);
3475 if (insn
& (1 << 24))
3476 offset
= -offset
* n
;
3477 else if (dp
&& (insn
& 1))
3483 tcg_gen_addi_i32(addr
, addr
, offset
);
3484 store_reg(s
, rn
, addr
);
3486 tcg_temp_free_i32(addr
);
3492 /* Should never happen. */
3498 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3500 TranslationBlock
*tb
;
3503 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3505 gen_set_pc_im(dest
);
3506 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3508 gen_set_pc_im(dest
);
3513 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3515 if (unlikely(s
->singlestep_enabled
)) {
3516 /* An indirect jump so that we still trigger the debug exception. */
3521 gen_goto_tb(s
, 0, dest
);
3522 s
->is_jmp
= DISAS_TB_JUMP
;
3526 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3529 tcg_gen_sari_i32(t0
, t0
, 16);
3533 tcg_gen_sari_i32(t1
, t1
, 16);
3536 tcg_gen_mul_i32(t0
, t0
, t1
);
3539 /* Return the mask of PSR bits set by a MSR instruction. */
3540 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3544 if (flags
& (1 << 0))
3546 if (flags
& (1 << 1))
3548 if (flags
& (1 << 2))
3550 if (flags
& (1 << 3))
3553 /* Mask out undefined bits. */
3554 mask
&= ~CPSR_RESERVED
;
3555 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3557 if (!arm_feature(env
, ARM_FEATURE_V5
))
3558 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3559 if (!arm_feature(env
, ARM_FEATURE_V6
))
3560 mask
&= ~(CPSR_E
| CPSR_GE
);
3561 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3563 /* Mask out execution state bits. */
3566 /* Mask out privileged bits. */
3572 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3573 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3577 /* ??? This is also undefined in system mode. */
3581 tmp
= load_cpu_field(spsr
);
3582 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3583 tcg_gen_andi_i32(t0
, t0
, mask
);
3584 tcg_gen_or_i32(tmp
, tmp
, t0
);
3585 store_cpu_field(tmp
, spsr
);
3587 gen_set_cpsr(t0
, mask
);
3589 tcg_temp_free_i32(t0
);
3594 /* Returns nonzero if access to the PSR is not permitted. */
3595 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3598 tmp
= tcg_temp_new_i32();
3599 tcg_gen_movi_i32(tmp
, val
);
3600 return gen_set_psr(s
, mask
, spsr
, tmp
);
3603 /* Generate an old-style exception return. Marks pc as dead. */
3604 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3607 store_reg(s
, 15, pc
);
3608 tmp
= load_cpu_field(spsr
);
3609 gen_set_cpsr(tmp
, 0xffffffff);
3610 tcg_temp_free_i32(tmp
);
3611 s
->is_jmp
= DISAS_UPDATE
;
3614 /* Generate a v6 exception return. Marks both values as dead. */
3615 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3617 gen_set_cpsr(cpsr
, 0xffffffff);
3618 tcg_temp_free_i32(cpsr
);
3619 store_reg(s
, 15, pc
);
3620 s
->is_jmp
= DISAS_UPDATE
;
3624 gen_set_condexec (DisasContext
*s
)
3626 if (s
->condexec_mask
) {
3627 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3628 TCGv tmp
= tcg_temp_new_i32();
3629 tcg_gen_movi_i32(tmp
, val
);
3630 store_cpu_field(tmp
, condexec_bits
);
3634 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3636 gen_set_condexec(s
);
3637 gen_set_pc_im(s
->pc
- offset
);
3638 gen_exception(excp
);
3639 s
->is_jmp
= DISAS_JUMP
;
3642 static void gen_nop_hint(DisasContext
*s
, int val
)
3646 gen_set_pc_im(s
->pc
);
3647 s
->is_jmp
= DISAS_WFI
;
3651 /* TODO: Implement SEV and WFE. May help SMP performance. */
3657 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3659 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3662 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3663 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3664 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3669 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3672 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3673 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3674 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3679 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3680 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3681 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3682 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3683 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3685 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3686 switch ((size << 1) | u) { \
3688 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3691 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3694 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3697 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3700 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3703 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3705 default: return 1; \
3708 #define GEN_NEON_INTEGER_OP(name) do { \
3709 switch ((size << 1) | u) { \
3711 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3714 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3717 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3720 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3723 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3726 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3728 default: return 1; \
3731 static TCGv
neon_load_scratch(int scratch
)
3733 TCGv tmp
= tcg_temp_new_i32();
3734 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3738 static void neon_store_scratch(int scratch
, TCGv var
)
3740 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3741 tcg_temp_free_i32(var
);
3744 static inline TCGv
neon_get_scalar(int size
, int reg
)
3748 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3750 gen_neon_dup_high16(tmp
);
3752 gen_neon_dup_low16(tmp
);
3755 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3760 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3763 if (!q
&& size
== 2) {
3766 tmp
= tcg_const_i32(rd
);
3767 tmp2
= tcg_const_i32(rm
);
3771 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3774 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3777 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3785 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3788 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3794 tcg_temp_free_i32(tmp
);
3795 tcg_temp_free_i32(tmp2
);
3799 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3802 if (!q
&& size
== 2) {
3805 tmp
= tcg_const_i32(rd
);
3806 tmp2
= tcg_const_i32(rm
);
3810 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3813 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3816 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3824 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3827 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3833 tcg_temp_free_i32(tmp
);
3834 tcg_temp_free_i32(tmp2
);
3838 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3842 rd
= tcg_temp_new_i32();
3843 tmp
= tcg_temp_new_i32();
3845 tcg_gen_shli_i32(rd
, t0
, 8);
3846 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3847 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3848 tcg_gen_or_i32(rd
, rd
, tmp
);
3850 tcg_gen_shri_i32(t1
, t1
, 8);
3851 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3852 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3853 tcg_gen_or_i32(t1
, t1
, tmp
);
3854 tcg_gen_mov_i32(t0
, rd
);
3856 tcg_temp_free_i32(tmp
);
3857 tcg_temp_free_i32(rd
);
3860 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3864 rd
= tcg_temp_new_i32();
3865 tmp
= tcg_temp_new_i32();
3867 tcg_gen_shli_i32(rd
, t0
, 16);
3868 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3869 tcg_gen_or_i32(rd
, rd
, tmp
);
3870 tcg_gen_shri_i32(t1
, t1
, 16);
3871 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3872 tcg_gen_or_i32(t1
, t1
, tmp
);
3873 tcg_gen_mov_i32(t0
, rd
);
3875 tcg_temp_free_i32(tmp
);
3876 tcg_temp_free_i32(rd
);
3884 } neon_ls_element_type
[11] = {
3898 /* Translate a NEON load/store element instruction. Return nonzero if the
3899 instruction is invalid. */
3900 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3919 if (!s
->vfp_enabled
)
3921 VFP_DREG_D(rd
, insn
);
3922 rn
= (insn
>> 16) & 0xf;
3924 load
= (insn
& (1 << 21)) != 0;
3925 if ((insn
& (1 << 23)) == 0) {
3926 /* Load store all elements. */
3927 op
= (insn
>> 8) & 0xf;
3928 size
= (insn
>> 6) & 3;
3931 /* Catch UNDEF cases for bad values of align field */
3934 if (((insn
>> 5) & 1) == 1) {
3939 if (((insn
>> 4) & 3) == 3) {
3946 nregs
= neon_ls_element_type
[op
].nregs
;
3947 interleave
= neon_ls_element_type
[op
].interleave
;
3948 spacing
= neon_ls_element_type
[op
].spacing
;
3949 if (size
== 3 && (interleave
| spacing
) != 1)
3951 addr
= tcg_temp_new_i32();
3952 load_reg_var(s
, addr
, rn
);
3953 stride
= (1 << size
) * interleave
;
3954 for (reg
= 0; reg
< nregs
; reg
++) {
3955 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3956 load_reg_var(s
, addr
, rn
);
3957 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3958 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3959 load_reg_var(s
, addr
, rn
);
3960 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3964 tmp64
= gen_ld64(addr
, IS_USER(s
));
3965 neon_store_reg64(tmp64
, rd
);
3966 tcg_temp_free_i64(tmp64
);
3968 tmp64
= tcg_temp_new_i64();
3969 neon_load_reg64(tmp64
, rd
);
3970 gen_st64(tmp64
, addr
, IS_USER(s
));
3972 tcg_gen_addi_i32(addr
, addr
, stride
);
3974 for (pass
= 0; pass
< 2; pass
++) {
3977 tmp
= gen_ld32(addr
, IS_USER(s
));
3978 neon_store_reg(rd
, pass
, tmp
);
3980 tmp
= neon_load_reg(rd
, pass
);
3981 gen_st32(tmp
, addr
, IS_USER(s
));
3983 tcg_gen_addi_i32(addr
, addr
, stride
);
3984 } else if (size
== 1) {
3986 tmp
= gen_ld16u(addr
, IS_USER(s
));
3987 tcg_gen_addi_i32(addr
, addr
, stride
);
3988 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3989 tcg_gen_addi_i32(addr
, addr
, stride
);
3990 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3991 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3992 tcg_temp_free_i32(tmp2
);
3993 neon_store_reg(rd
, pass
, tmp
);
3995 tmp
= neon_load_reg(rd
, pass
);
3996 tmp2
= tcg_temp_new_i32();
3997 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3998 gen_st16(tmp
, addr
, IS_USER(s
));
3999 tcg_gen_addi_i32(addr
, addr
, stride
);
4000 gen_st16(tmp2
, addr
, IS_USER(s
));
4001 tcg_gen_addi_i32(addr
, addr
, stride
);
4003 } else /* size == 0 */ {
4006 for (n
= 0; n
< 4; n
++) {
4007 tmp
= gen_ld8u(addr
, IS_USER(s
));
4008 tcg_gen_addi_i32(addr
, addr
, stride
);
4012 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4013 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4014 tcg_temp_free_i32(tmp
);
4017 neon_store_reg(rd
, pass
, tmp2
);
4019 tmp2
= neon_load_reg(rd
, pass
);
4020 for (n
= 0; n
< 4; n
++) {
4021 tmp
= tcg_temp_new_i32();
4023 tcg_gen_mov_i32(tmp
, tmp2
);
4025 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4027 gen_st8(tmp
, addr
, IS_USER(s
));
4028 tcg_gen_addi_i32(addr
, addr
, stride
);
4030 tcg_temp_free_i32(tmp2
);
4037 tcg_temp_free_i32(addr
);
4040 size
= (insn
>> 10) & 3;
4042 /* Load single element to all lanes. */
4043 int a
= (insn
>> 4) & 1;
4047 size
= (insn
>> 6) & 3;
4048 nregs
= ((insn
>> 8) & 3) + 1;
4051 if (nregs
!= 4 || a
== 0) {
4054 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4057 if (nregs
== 1 && a
== 1 && size
== 0) {
4060 if (nregs
== 3 && a
== 1) {
4063 addr
= tcg_temp_new_i32();
4064 load_reg_var(s
, addr
, rn
);
4066 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4067 tmp
= gen_load_and_replicate(s
, addr
, size
);
4068 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4069 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4070 if (insn
& (1 << 5)) {
4071 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4072 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4074 tcg_temp_free_i32(tmp
);
4076 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4077 stride
= (insn
& (1 << 5)) ? 2 : 1;
4078 for (reg
= 0; reg
< nregs
; reg
++) {
4079 tmp
= gen_load_and_replicate(s
, addr
, size
);
4080 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4081 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4082 tcg_temp_free_i32(tmp
);
4083 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4087 tcg_temp_free_i32(addr
);
4088 stride
= (1 << size
) * nregs
;
4090 /* Single element. */
4091 int idx
= (insn
>> 4) & 0xf;
4092 pass
= (insn
>> 7) & 1;
4095 shift
= ((insn
>> 5) & 3) * 8;
4099 shift
= ((insn
>> 6) & 1) * 16;
4100 stride
= (insn
& (1 << 5)) ? 2 : 1;
4104 stride
= (insn
& (1 << 6)) ? 2 : 1;
4109 nregs
= ((insn
>> 8) & 3) + 1;
4110 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4113 if (((idx
& (1 << size
)) != 0) ||
4114 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4119 if ((idx
& 1) != 0) {
4124 if (size
== 2 && (idx
& 2) != 0) {
4129 if ((size
== 2) && ((idx
& 3) == 3)) {
4136 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4137 /* Attempts to write off the end of the register file
4138 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4139 * the neon_load_reg() would write off the end of the array.
4143 addr
= tcg_temp_new_i32();
4144 load_reg_var(s
, addr
, rn
);
4145 for (reg
= 0; reg
< nregs
; reg
++) {
4149 tmp
= gen_ld8u(addr
, IS_USER(s
));
4152 tmp
= gen_ld16u(addr
, IS_USER(s
));
4155 tmp
= gen_ld32(addr
, IS_USER(s
));
4157 default: /* Avoid compiler warnings. */
4161 tmp2
= neon_load_reg(rd
, pass
);
4162 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4163 tcg_temp_free_i32(tmp2
);
4165 neon_store_reg(rd
, pass
, tmp
);
4166 } else { /* Store */
4167 tmp
= neon_load_reg(rd
, pass
);
4169 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4172 gen_st8(tmp
, addr
, IS_USER(s
));
4175 gen_st16(tmp
, addr
, IS_USER(s
));
4178 gen_st32(tmp
, addr
, IS_USER(s
));
4183 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4185 tcg_temp_free_i32(addr
);
4186 stride
= nregs
* (1 << size
);
4192 base
= load_reg(s
, rn
);
4194 tcg_gen_addi_i32(base
, base
, stride
);
4197 index
= load_reg(s
, rm
);
4198 tcg_gen_add_i32(base
, base
, index
);
4199 tcg_temp_free_i32(index
);
4201 store_reg(s
, rn
, base
);
4206 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4207 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4209 tcg_gen_and_i32(t
, t
, c
);
4210 tcg_gen_andc_i32(f
, f
, c
);
4211 tcg_gen_or_i32(dest
, t
, f
);
4214 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4217 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4218 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4219 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4224 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4227 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4228 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4229 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4234 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4237 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4238 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4239 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4244 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4247 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4248 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4249 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4254 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4260 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4261 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4266 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4267 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4274 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4275 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4280 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4281 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4288 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4292 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4293 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4294 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4299 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4300 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4301 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4305 tcg_temp_free_i32(src
);
4308 static inline void gen_neon_addl(int size
)
4311 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4312 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4313 case 2: tcg_gen_add_i64(CPU_V001
); break;
4318 static inline void gen_neon_subl(int size
)
4321 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4322 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4323 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4328 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4331 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4332 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4333 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4338 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4341 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4342 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4347 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4351 switch ((size
<< 1) | u
) {
4352 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4353 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4354 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4355 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4357 tmp
= gen_muls_i64_i32(a
, b
);
4358 tcg_gen_mov_i64(dest
, tmp
);
4359 tcg_temp_free_i64(tmp
);
4362 tmp
= gen_mulu_i64_i32(a
, b
);
4363 tcg_gen_mov_i64(dest
, tmp
);
4364 tcg_temp_free_i64(tmp
);
4369 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4370 Don't forget to clean them now. */
4372 tcg_temp_free_i32(a
);
4373 tcg_temp_free_i32(b
);
4377 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4381 gen_neon_unarrow_sats(size
, dest
, src
);
4383 gen_neon_narrow(size
, dest
, src
);
4387 gen_neon_narrow_satu(size
, dest
, src
);
4389 gen_neon_narrow_sats(size
, dest
, src
);
4394 /* Symbolic constants for op fields for Neon 3-register same-length.
4395 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4398 #define NEON_3R_VHADD 0
4399 #define NEON_3R_VQADD 1
4400 #define NEON_3R_VRHADD 2
4401 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4402 #define NEON_3R_VHSUB 4
4403 #define NEON_3R_VQSUB 5
4404 #define NEON_3R_VCGT 6
4405 #define NEON_3R_VCGE 7
4406 #define NEON_3R_VSHL 8
4407 #define NEON_3R_VQSHL 9
4408 #define NEON_3R_VRSHL 10
4409 #define NEON_3R_VQRSHL 11
4410 #define NEON_3R_VMAX 12
4411 #define NEON_3R_VMIN 13
4412 #define NEON_3R_VABD 14
4413 #define NEON_3R_VABA 15
4414 #define NEON_3R_VADD_VSUB 16
4415 #define NEON_3R_VTST_VCEQ 17
4416 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4417 #define NEON_3R_VMUL 19
4418 #define NEON_3R_VPMAX 20
4419 #define NEON_3R_VPMIN 21
4420 #define NEON_3R_VQDMULH_VQRDMULH 22
4421 #define NEON_3R_VPADD 23
4422 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4423 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4424 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4425 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4426 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4427 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4429 static const uint8_t neon_3r_sizes
[] = {
4430 [NEON_3R_VHADD
] = 0x7,
4431 [NEON_3R_VQADD
] = 0xf,
4432 [NEON_3R_VRHADD
] = 0x7,
4433 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4434 [NEON_3R_VHSUB
] = 0x7,
4435 [NEON_3R_VQSUB
] = 0xf,
4436 [NEON_3R_VCGT
] = 0x7,
4437 [NEON_3R_VCGE
] = 0x7,
4438 [NEON_3R_VSHL
] = 0xf,
4439 [NEON_3R_VQSHL
] = 0xf,
4440 [NEON_3R_VRSHL
] = 0xf,
4441 [NEON_3R_VQRSHL
] = 0xf,
4442 [NEON_3R_VMAX
] = 0x7,
4443 [NEON_3R_VMIN
] = 0x7,
4444 [NEON_3R_VABD
] = 0x7,
4445 [NEON_3R_VABA
] = 0x7,
4446 [NEON_3R_VADD_VSUB
] = 0xf,
4447 [NEON_3R_VTST_VCEQ
] = 0x7,
4448 [NEON_3R_VML
] = 0x7,
4449 [NEON_3R_VMUL
] = 0x7,
4450 [NEON_3R_VPMAX
] = 0x7,
4451 [NEON_3R_VPMIN
] = 0x7,
4452 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4453 [NEON_3R_VPADD
] = 0x7,
4454 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4455 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4456 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4457 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4458 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4459 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4462 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4463 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4466 #define NEON_2RM_VREV64 0
4467 #define NEON_2RM_VREV32 1
4468 #define NEON_2RM_VREV16 2
4469 #define NEON_2RM_VPADDL 4
4470 #define NEON_2RM_VPADDL_U 5
4471 #define NEON_2RM_VCLS 8
4472 #define NEON_2RM_VCLZ 9
4473 #define NEON_2RM_VCNT 10
4474 #define NEON_2RM_VMVN 11
4475 #define NEON_2RM_VPADAL 12
4476 #define NEON_2RM_VPADAL_U 13
4477 #define NEON_2RM_VQABS 14
4478 #define NEON_2RM_VQNEG 15
4479 #define NEON_2RM_VCGT0 16
4480 #define NEON_2RM_VCGE0 17
4481 #define NEON_2RM_VCEQ0 18
4482 #define NEON_2RM_VCLE0 19
4483 #define NEON_2RM_VCLT0 20
4484 #define NEON_2RM_VABS 22
4485 #define NEON_2RM_VNEG 23
4486 #define NEON_2RM_VCGT0_F 24
4487 #define NEON_2RM_VCGE0_F 25
4488 #define NEON_2RM_VCEQ0_F 26
4489 #define NEON_2RM_VCLE0_F 27
4490 #define NEON_2RM_VCLT0_F 28
4491 #define NEON_2RM_VABS_F 30
4492 #define NEON_2RM_VNEG_F 31
4493 #define NEON_2RM_VSWP 32
4494 #define NEON_2RM_VTRN 33
4495 #define NEON_2RM_VUZP 34
4496 #define NEON_2RM_VZIP 35
4497 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4498 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4499 #define NEON_2RM_VSHLL 38
4500 #define NEON_2RM_VCVT_F16_F32 44
4501 #define NEON_2RM_VCVT_F32_F16 46
4502 #define NEON_2RM_VRECPE 56
4503 #define NEON_2RM_VRSQRTE 57
4504 #define NEON_2RM_VRECPE_F 58
4505 #define NEON_2RM_VRSQRTE_F 59
4506 #define NEON_2RM_VCVT_FS 60
4507 #define NEON_2RM_VCVT_FU 61
4508 #define NEON_2RM_VCVT_SF 62
4509 #define NEON_2RM_VCVT_UF 63
4511 static int neon_2rm_is_float_op(int op
)
4513 /* Return true if this neon 2reg-misc op is float-to-float */
4514 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4515 op
>= NEON_2RM_VRECPE_F
);
4518 /* Each entry in this array has bit n set if the insn allows
4519 * size value n (otherwise it will UNDEF). Since unallocated
4520 * op values will have no bits set they always UNDEF.
4522 static const uint8_t neon_2rm_sizes
[] = {
4523 [NEON_2RM_VREV64
] = 0x7,
4524 [NEON_2RM_VREV32
] = 0x3,
4525 [NEON_2RM_VREV16
] = 0x1,
4526 [NEON_2RM_VPADDL
] = 0x7,
4527 [NEON_2RM_VPADDL_U
] = 0x7,
4528 [NEON_2RM_VCLS
] = 0x7,
4529 [NEON_2RM_VCLZ
] = 0x7,
4530 [NEON_2RM_VCNT
] = 0x1,
4531 [NEON_2RM_VMVN
] = 0x1,
4532 [NEON_2RM_VPADAL
] = 0x7,
4533 [NEON_2RM_VPADAL_U
] = 0x7,
4534 [NEON_2RM_VQABS
] = 0x7,
4535 [NEON_2RM_VQNEG
] = 0x7,
4536 [NEON_2RM_VCGT0
] = 0x7,
4537 [NEON_2RM_VCGE0
] = 0x7,
4538 [NEON_2RM_VCEQ0
] = 0x7,
4539 [NEON_2RM_VCLE0
] = 0x7,
4540 [NEON_2RM_VCLT0
] = 0x7,
4541 [NEON_2RM_VABS
] = 0x7,
4542 [NEON_2RM_VNEG
] = 0x7,
4543 [NEON_2RM_VCGT0_F
] = 0x4,
4544 [NEON_2RM_VCGE0_F
] = 0x4,
4545 [NEON_2RM_VCEQ0_F
] = 0x4,
4546 [NEON_2RM_VCLE0_F
] = 0x4,
4547 [NEON_2RM_VCLT0_F
] = 0x4,
4548 [NEON_2RM_VABS_F
] = 0x4,
4549 [NEON_2RM_VNEG_F
] = 0x4,
4550 [NEON_2RM_VSWP
] = 0x1,
4551 [NEON_2RM_VTRN
] = 0x7,
4552 [NEON_2RM_VUZP
] = 0x7,
4553 [NEON_2RM_VZIP
] = 0x7,
4554 [NEON_2RM_VMOVN
] = 0x7,
4555 [NEON_2RM_VQMOVN
] = 0x7,
4556 [NEON_2RM_VSHLL
] = 0x7,
4557 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4558 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4559 [NEON_2RM_VRECPE
] = 0x4,
4560 [NEON_2RM_VRSQRTE
] = 0x4,
4561 [NEON_2RM_VRECPE_F
] = 0x4,
4562 [NEON_2RM_VRSQRTE_F
] = 0x4,
4563 [NEON_2RM_VCVT_FS
] = 0x4,
4564 [NEON_2RM_VCVT_FU
] = 0x4,
4565 [NEON_2RM_VCVT_SF
] = 0x4,
4566 [NEON_2RM_VCVT_UF
] = 0x4,
4569 /* Translate a NEON data processing instruction. Return nonzero if the
4570 instruction is invalid.
4571 We process data in a mixture of 32-bit and 64-bit chunks.
4572 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4574 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4586 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4589 if (!s
->vfp_enabled
)
4591 q
= (insn
& (1 << 6)) != 0;
4592 u
= (insn
>> 24) & 1;
4593 VFP_DREG_D(rd
, insn
);
4594 VFP_DREG_N(rn
, insn
);
4595 VFP_DREG_M(rm
, insn
);
4596 size
= (insn
>> 20) & 3;
4597 if ((insn
& (1 << 23)) == 0) {
4598 /* Three register same length. */
4599 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4600 /* Catch invalid op and bad size combinations: UNDEF */
4601 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4604 /* All insns of this form UNDEF for either this condition or the
4605 * superset of cases "Q==1"; we catch the latter later.
4607 if (q
&& ((rd
| rn
| rm
) & 1)) {
4610 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4611 /* 64-bit element instructions. */
4612 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4613 neon_load_reg64(cpu_V0
, rn
+ pass
);
4614 neon_load_reg64(cpu_V1
, rm
+ pass
);
4618 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4621 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4627 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4630 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4636 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4638 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4643 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4646 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4652 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4654 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4657 case NEON_3R_VQRSHL
:
4659 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4662 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4666 case NEON_3R_VADD_VSUB
:
4668 tcg_gen_sub_i64(CPU_V001
);
4670 tcg_gen_add_i64(CPU_V001
);
4676 neon_store_reg64(cpu_V0
, rd
+ pass
);
4685 case NEON_3R_VQRSHL
:
4688 /* Shift instruction operands are reversed. */
4703 case NEON_3R_FLOAT_ARITH
:
4704 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4706 case NEON_3R_FLOAT_MINMAX
:
4707 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4709 case NEON_3R_FLOAT_CMP
:
4711 /* no encoding for U=0 C=1x */
4715 case NEON_3R_FLOAT_ACMP
:
4720 case NEON_3R_VRECPS_VRSQRTS
:
4726 if (u
&& (size
!= 0)) {
4727 /* UNDEF on invalid size for polynomial subcase */
4735 if (pairwise
&& q
) {
4736 /* All the pairwise insns UNDEF if Q is set */
4740 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4745 tmp
= neon_load_reg(rn
, 0);
4746 tmp2
= neon_load_reg(rn
, 1);
4748 tmp
= neon_load_reg(rm
, 0);
4749 tmp2
= neon_load_reg(rm
, 1);
4753 tmp
= neon_load_reg(rn
, pass
);
4754 tmp2
= neon_load_reg(rm
, pass
);
4758 GEN_NEON_INTEGER_OP(hadd
);
4761 GEN_NEON_INTEGER_OP_ENV(qadd
);
4763 case NEON_3R_VRHADD
:
4764 GEN_NEON_INTEGER_OP(rhadd
);
4766 case NEON_3R_LOGIC
: /* Logic ops. */
4767 switch ((u
<< 2) | size
) {
4769 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4772 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4775 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4778 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4781 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4784 tmp3
= neon_load_reg(rd
, pass
);
4785 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4786 tcg_temp_free_i32(tmp3
);
4789 tmp3
= neon_load_reg(rd
, pass
);
4790 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4791 tcg_temp_free_i32(tmp3
);
4794 tmp3
= neon_load_reg(rd
, pass
);
4795 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4796 tcg_temp_free_i32(tmp3
);
4801 GEN_NEON_INTEGER_OP(hsub
);
4804 GEN_NEON_INTEGER_OP_ENV(qsub
);
4807 GEN_NEON_INTEGER_OP(cgt
);
4810 GEN_NEON_INTEGER_OP(cge
);
4813 GEN_NEON_INTEGER_OP(shl
);
4816 GEN_NEON_INTEGER_OP_ENV(qshl
);
4819 GEN_NEON_INTEGER_OP(rshl
);
4821 case NEON_3R_VQRSHL
:
4822 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4825 GEN_NEON_INTEGER_OP(max
);
4828 GEN_NEON_INTEGER_OP(min
);
4831 GEN_NEON_INTEGER_OP(abd
);
4834 GEN_NEON_INTEGER_OP(abd
);
4835 tcg_temp_free_i32(tmp2
);
4836 tmp2
= neon_load_reg(rd
, pass
);
4837 gen_neon_add(size
, tmp
, tmp2
);
4839 case NEON_3R_VADD_VSUB
:
4840 if (!u
) { /* VADD */
4841 gen_neon_add(size
, tmp
, tmp2
);
4844 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4845 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4846 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4851 case NEON_3R_VTST_VCEQ
:
4852 if (!u
) { /* VTST */
4854 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4855 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4856 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4861 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4862 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4863 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4868 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4870 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4871 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4872 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4875 tcg_temp_free_i32(tmp2
);
4876 tmp2
= neon_load_reg(rd
, pass
);
4878 gen_neon_rsb(size
, tmp
, tmp2
);
4880 gen_neon_add(size
, tmp
, tmp2
);
4884 if (u
) { /* polynomial */
4885 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4886 } else { /* Integer */
4888 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4889 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4890 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4896 GEN_NEON_INTEGER_OP(pmax
);
4899 GEN_NEON_INTEGER_OP(pmin
);
4901 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4902 if (!u
) { /* VQDMULH */
4905 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4908 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4912 } else { /* VQRDMULH */
4915 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4918 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4926 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4927 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4928 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4932 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4934 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4935 switch ((u
<< 2) | size
) {
4938 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4941 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4944 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4949 tcg_temp_free_ptr(fpstatus
);
4952 case NEON_3R_FLOAT_MULTIPLY
:
4954 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4955 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4957 tcg_temp_free_i32(tmp2
);
4958 tmp2
= neon_load_reg(rd
, pass
);
4960 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4962 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4965 tcg_temp_free_ptr(fpstatus
);
4968 case NEON_3R_FLOAT_CMP
:
4970 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4972 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4975 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4977 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4980 tcg_temp_free_ptr(fpstatus
);
4983 case NEON_3R_FLOAT_ACMP
:
4985 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4987 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4989 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4991 tcg_temp_free_ptr(fpstatus
);
4994 case NEON_3R_FLOAT_MINMAX
:
4996 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4998 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
5000 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
5002 tcg_temp_free_ptr(fpstatus
);
5005 case NEON_3R_VRECPS_VRSQRTS
:
5007 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5009 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5014 tcg_temp_free_i32(tmp2
);
5016 /* Save the result. For elementwise operations we can put it
5017 straight into the destination register. For pairwise operations
5018 we have to be careful to avoid clobbering the source operands. */
5019 if (pairwise
&& rd
== rm
) {
5020 neon_store_scratch(pass
, tmp
);
5022 neon_store_reg(rd
, pass
, tmp
);
5026 if (pairwise
&& rd
== rm
) {
5027 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5028 tmp
= neon_load_scratch(pass
);
5029 neon_store_reg(rd
, pass
, tmp
);
5032 /* End of 3 register same size operations. */
5033 } else if (insn
& (1 << 4)) {
5034 if ((insn
& 0x00380080) != 0) {
5035 /* Two registers and shift. */
5036 op
= (insn
>> 8) & 0xf;
5037 if (insn
& (1 << 7)) {
5045 while ((insn
& (1 << (size
+ 19))) == 0)
5048 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5049 /* To avoid excessive dumplication of ops we implement shift
5050 by immediate using the variable shift operations. */
5052 /* Shift by immediate:
5053 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5054 if (q
&& ((rd
| rm
) & 1)) {
5057 if (!u
&& (op
== 4 || op
== 6)) {
5060 /* Right shifts are encoded as N - shift, where N is the
5061 element size in bits. */
5063 shift
= shift
- (1 << (size
+ 3));
5071 imm
= (uint8_t) shift
;
5076 imm
= (uint16_t) shift
;
5087 for (pass
= 0; pass
< count
; pass
++) {
5089 neon_load_reg64(cpu_V0
, rm
+ pass
);
5090 tcg_gen_movi_i64(cpu_V1
, imm
);
5095 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5097 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5102 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5104 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5107 case 5: /* VSHL, VSLI */
5108 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5110 case 6: /* VQSHLU */
5111 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5116 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5119 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5124 if (op
== 1 || op
== 3) {
5126 neon_load_reg64(cpu_V1
, rd
+ pass
);
5127 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5128 } else if (op
== 4 || (op
== 5 && u
)) {
5130 neon_load_reg64(cpu_V1
, rd
+ pass
);
5132 if (shift
< -63 || shift
> 63) {
5136 mask
= 0xffffffffffffffffull
>> -shift
;
5138 mask
= 0xffffffffffffffffull
<< shift
;
5141 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5142 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5144 neon_store_reg64(cpu_V0
, rd
+ pass
);
5145 } else { /* size < 3 */
5146 /* Operands in T0 and T1. */
5147 tmp
= neon_load_reg(rm
, pass
);
5148 tmp2
= tcg_temp_new_i32();
5149 tcg_gen_movi_i32(tmp2
, imm
);
5153 GEN_NEON_INTEGER_OP(shl
);
5157 GEN_NEON_INTEGER_OP(rshl
);
5160 case 5: /* VSHL, VSLI */
5162 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5163 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5164 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5168 case 6: /* VQSHLU */
5171 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5175 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5179 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5187 GEN_NEON_INTEGER_OP_ENV(qshl
);
5190 tcg_temp_free_i32(tmp2
);
5192 if (op
== 1 || op
== 3) {
5194 tmp2
= neon_load_reg(rd
, pass
);
5195 gen_neon_add(size
, tmp
, tmp2
);
5196 tcg_temp_free_i32(tmp2
);
5197 } else if (op
== 4 || (op
== 5 && u
)) {
5202 mask
= 0xff >> -shift
;
5204 mask
= (uint8_t)(0xff << shift
);
5210 mask
= 0xffff >> -shift
;
5212 mask
= (uint16_t)(0xffff << shift
);
5216 if (shift
< -31 || shift
> 31) {
5220 mask
= 0xffffffffu
>> -shift
;
5222 mask
= 0xffffffffu
<< shift
;
5228 tmp2
= neon_load_reg(rd
, pass
);
5229 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5230 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5231 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5232 tcg_temp_free_i32(tmp2
);
5234 neon_store_reg(rd
, pass
, tmp
);
5237 } else if (op
< 10) {
5238 /* Shift by immediate and narrow:
5239 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5240 int input_unsigned
= (op
== 8) ? !u
: u
;
5244 shift
= shift
- (1 << (size
+ 3));
5247 tmp64
= tcg_const_i64(shift
);
5248 neon_load_reg64(cpu_V0
, rm
);
5249 neon_load_reg64(cpu_V1
, rm
+ 1);
5250 for (pass
= 0; pass
< 2; pass
++) {
5258 if (input_unsigned
) {
5259 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5261 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5264 if (input_unsigned
) {
5265 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5267 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5270 tmp
= tcg_temp_new_i32();
5271 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5272 neon_store_reg(rd
, pass
, tmp
);
5274 tcg_temp_free_i64(tmp64
);
5277 imm
= (uint16_t)shift
;
5281 imm
= (uint32_t)shift
;
5283 tmp2
= tcg_const_i32(imm
);
5284 tmp4
= neon_load_reg(rm
+ 1, 0);
5285 tmp5
= neon_load_reg(rm
+ 1, 1);
5286 for (pass
= 0; pass
< 2; pass
++) {
5288 tmp
= neon_load_reg(rm
, 0);
5292 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5295 tmp3
= neon_load_reg(rm
, 1);
5299 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5301 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5302 tcg_temp_free_i32(tmp
);
5303 tcg_temp_free_i32(tmp3
);
5304 tmp
= tcg_temp_new_i32();
5305 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5306 neon_store_reg(rd
, pass
, tmp
);
5308 tcg_temp_free_i32(tmp2
);
5310 } else if (op
== 10) {
5312 if (q
|| (rd
& 1)) {
5315 tmp
= neon_load_reg(rm
, 0);
5316 tmp2
= neon_load_reg(rm
, 1);
5317 for (pass
= 0; pass
< 2; pass
++) {
5321 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5324 /* The shift is less than the width of the source
5325 type, so we can just shift the whole register. */
5326 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5327 /* Widen the result of shift: we need to clear
5328 * the potential overflow bits resulting from
5329 * left bits of the narrow input appearing as
5330 * right bits of left the neighbour narrow
5332 if (size
< 2 || !u
) {
5335 imm
= (0xffu
>> (8 - shift
));
5337 } else if (size
== 1) {
5338 imm
= 0xffff >> (16 - shift
);
5341 imm
= 0xffffffff >> (32 - shift
);
5344 imm64
= imm
| (((uint64_t)imm
) << 32);
5348 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5351 neon_store_reg64(cpu_V0
, rd
+ pass
);
5353 } else if (op
>= 14) {
5354 /* VCVT fixed-point. */
5355 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5358 /* We have already masked out the must-be-1 top bit of imm6,
5359 * hence this 32-shift where the ARM ARM has 64-imm6.
5362 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5363 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5366 gen_vfp_ulto(0, shift
, 1);
5368 gen_vfp_slto(0, shift
, 1);
5371 gen_vfp_toul(0, shift
, 1);
5373 gen_vfp_tosl(0, shift
, 1);
5375 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5380 } else { /* (insn & 0x00380080) == 0 */
5382 if (q
&& (rd
& 1)) {
5386 op
= (insn
>> 8) & 0xf;
5387 /* One register and immediate. */
5388 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5389 invert
= (insn
& (1 << 5)) != 0;
5390 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5391 * We choose to not special-case this and will behave as if a
5392 * valid constant encoding of 0 had been given.
5411 imm
= (imm
<< 8) | (imm
<< 24);
5414 imm
= (imm
<< 8) | 0xff;
5417 imm
= (imm
<< 16) | 0xffff;
5420 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5428 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5429 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5435 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5436 if (op
& 1 && op
< 12) {
5437 tmp
= neon_load_reg(rd
, pass
);
5439 /* The immediate value has already been inverted, so
5441 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5443 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5447 tmp
= tcg_temp_new_i32();
5448 if (op
== 14 && invert
) {
5452 for (n
= 0; n
< 4; n
++) {
5453 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5454 val
|= 0xff << (n
* 8);
5456 tcg_gen_movi_i32(tmp
, val
);
5458 tcg_gen_movi_i32(tmp
, imm
);
5461 neon_store_reg(rd
, pass
, tmp
);
5464 } else { /* (insn & 0x00800010 == 0x00800000) */
5466 op
= (insn
>> 8) & 0xf;
5467 if ((insn
& (1 << 6)) == 0) {
5468 /* Three registers of different lengths. */
5472 /* undefreq: bit 0 : UNDEF if size != 0
5473 * bit 1 : UNDEF if size == 0
5474 * bit 2 : UNDEF if U == 1
5475 * Note that [1:0] set implies 'always UNDEF'
5478 /* prewiden, src1_wide, src2_wide, undefreq */
5479 static const int neon_3reg_wide
[16][4] = {
5480 {1, 0, 0, 0}, /* VADDL */
5481 {1, 1, 0, 0}, /* VADDW */
5482 {1, 0, 0, 0}, /* VSUBL */
5483 {1, 1, 0, 0}, /* VSUBW */
5484 {0, 1, 1, 0}, /* VADDHN */
5485 {0, 0, 0, 0}, /* VABAL */
5486 {0, 1, 1, 0}, /* VSUBHN */
5487 {0, 0, 0, 0}, /* VABDL */
5488 {0, 0, 0, 0}, /* VMLAL */
5489 {0, 0, 0, 6}, /* VQDMLAL */
5490 {0, 0, 0, 0}, /* VMLSL */
5491 {0, 0, 0, 6}, /* VQDMLSL */
5492 {0, 0, 0, 0}, /* Integer VMULL */
5493 {0, 0, 0, 2}, /* VQDMULL */
5494 {0, 0, 0, 5}, /* Polynomial VMULL */
5495 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5498 prewiden
= neon_3reg_wide
[op
][0];
5499 src1_wide
= neon_3reg_wide
[op
][1];
5500 src2_wide
= neon_3reg_wide
[op
][2];
5501 undefreq
= neon_3reg_wide
[op
][3];
5503 if (((undefreq
& 1) && (size
!= 0)) ||
5504 ((undefreq
& 2) && (size
== 0)) ||
5505 ((undefreq
& 4) && u
)) {
5508 if ((src1_wide
&& (rn
& 1)) ||
5509 (src2_wide
&& (rm
& 1)) ||
5510 (!src2_wide
&& (rd
& 1))) {
5514 /* Avoid overlapping operands. Wide source operands are
5515 always aligned so will never overlap with wide
5516 destinations in problematic ways. */
5517 if (rd
== rm
&& !src2_wide
) {
5518 tmp
= neon_load_reg(rm
, 1);
5519 neon_store_scratch(2, tmp
);
5520 } else if (rd
== rn
&& !src1_wide
) {
5521 tmp
= neon_load_reg(rn
, 1);
5522 neon_store_scratch(2, tmp
);
5525 for (pass
= 0; pass
< 2; pass
++) {
5527 neon_load_reg64(cpu_V0
, rn
+ pass
);
5530 if (pass
== 1 && rd
== rn
) {
5531 tmp
= neon_load_scratch(2);
5533 tmp
= neon_load_reg(rn
, pass
);
5536 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5540 neon_load_reg64(cpu_V1
, rm
+ pass
);
5543 if (pass
== 1 && rd
== rm
) {
5544 tmp2
= neon_load_scratch(2);
5546 tmp2
= neon_load_reg(rm
, pass
);
5549 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5553 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5554 gen_neon_addl(size
);
5556 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5557 gen_neon_subl(size
);
5559 case 5: case 7: /* VABAL, VABDL */
5560 switch ((size
<< 1) | u
) {
5562 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5565 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5568 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5571 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5574 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5577 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5581 tcg_temp_free_i32(tmp2
);
5582 tcg_temp_free_i32(tmp
);
5584 case 8: case 9: case 10: case 11: case 12: case 13:
5585 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5586 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5588 case 14: /* Polynomial VMULL */
5589 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5590 tcg_temp_free_i32(tmp2
);
5591 tcg_temp_free_i32(tmp
);
5593 default: /* 15 is RESERVED: caught earlier */
5598 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5599 neon_store_reg64(cpu_V0
, rd
+ pass
);
5600 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5602 neon_load_reg64(cpu_V1
, rd
+ pass
);
5604 case 10: /* VMLSL */
5605 gen_neon_negl(cpu_V0
, size
);
5607 case 5: case 8: /* VABAL, VMLAL */
5608 gen_neon_addl(size
);
5610 case 9: case 11: /* VQDMLAL, VQDMLSL */
5611 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5613 gen_neon_negl(cpu_V0
, size
);
5615 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5620 neon_store_reg64(cpu_V0
, rd
+ pass
);
5621 } else if (op
== 4 || op
== 6) {
5622 /* Narrowing operation. */
5623 tmp
= tcg_temp_new_i32();
5627 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5630 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5633 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5634 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5641 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5644 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5647 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5648 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5649 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5657 neon_store_reg(rd
, 0, tmp3
);
5658 neon_store_reg(rd
, 1, tmp
);
5661 /* Write back the result. */
5662 neon_store_reg64(cpu_V0
, rd
+ pass
);
5666 /* Two registers and a scalar. NB that for ops of this form
5667 * the ARM ARM labels bit 24 as Q, but it is in our variable
5674 case 1: /* Float VMLA scalar */
5675 case 5: /* Floating point VMLS scalar */
5676 case 9: /* Floating point VMUL scalar */
5681 case 0: /* Integer VMLA scalar */
5682 case 4: /* Integer VMLS scalar */
5683 case 8: /* Integer VMUL scalar */
5684 case 12: /* VQDMULH scalar */
5685 case 13: /* VQRDMULH scalar */
5686 if (u
&& ((rd
| rn
) & 1)) {
5689 tmp
= neon_get_scalar(size
, rm
);
5690 neon_store_scratch(0, tmp
);
5691 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5692 tmp
= neon_load_scratch(0);
5693 tmp2
= neon_load_reg(rn
, pass
);
5696 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5698 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5700 } else if (op
== 13) {
5702 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5704 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5706 } else if (op
& 1) {
5707 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5708 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5709 tcg_temp_free_ptr(fpstatus
);
5712 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5713 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5714 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5718 tcg_temp_free_i32(tmp2
);
5721 tmp2
= neon_load_reg(rd
, pass
);
5724 gen_neon_add(size
, tmp
, tmp2
);
5728 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5729 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5730 tcg_temp_free_ptr(fpstatus
);
5734 gen_neon_rsb(size
, tmp
, tmp2
);
5738 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5739 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5740 tcg_temp_free_ptr(fpstatus
);
5746 tcg_temp_free_i32(tmp2
);
5748 neon_store_reg(rd
, pass
, tmp
);
5751 case 3: /* VQDMLAL scalar */
5752 case 7: /* VQDMLSL scalar */
5753 case 11: /* VQDMULL scalar */
5758 case 2: /* VMLAL sclar */
5759 case 6: /* VMLSL scalar */
5760 case 10: /* VMULL scalar */
5764 tmp2
= neon_get_scalar(size
, rm
);
5765 /* We need a copy of tmp2 because gen_neon_mull
5766 * deletes it during pass 0. */
5767 tmp4
= tcg_temp_new_i32();
5768 tcg_gen_mov_i32(tmp4
, tmp2
);
5769 tmp3
= neon_load_reg(rn
, 1);
5771 for (pass
= 0; pass
< 2; pass
++) {
5773 tmp
= neon_load_reg(rn
, 0);
5778 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5780 neon_load_reg64(cpu_V1
, rd
+ pass
);
5784 gen_neon_negl(cpu_V0
, size
);
5787 gen_neon_addl(size
);
5790 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5792 gen_neon_negl(cpu_V0
, size
);
5794 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5800 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5805 neon_store_reg64(cpu_V0
, rd
+ pass
);
5810 default: /* 14 and 15 are RESERVED */
5814 } else { /* size == 3 */
5817 imm
= (insn
>> 8) & 0xf;
5822 if (q
&& ((rd
| rn
| rm
) & 1)) {
5827 neon_load_reg64(cpu_V0
, rn
);
5829 neon_load_reg64(cpu_V1
, rn
+ 1);
5831 } else if (imm
== 8) {
5832 neon_load_reg64(cpu_V0
, rn
+ 1);
5834 neon_load_reg64(cpu_V1
, rm
);
5837 tmp64
= tcg_temp_new_i64();
5839 neon_load_reg64(cpu_V0
, rn
);
5840 neon_load_reg64(tmp64
, rn
+ 1);
5842 neon_load_reg64(cpu_V0
, rn
+ 1);
5843 neon_load_reg64(tmp64
, rm
);
5845 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5846 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5847 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5849 neon_load_reg64(cpu_V1
, rm
);
5851 neon_load_reg64(cpu_V1
, rm
+ 1);
5854 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5855 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5856 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5857 tcg_temp_free_i64(tmp64
);
5860 neon_load_reg64(cpu_V0
, rn
);
5861 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5862 neon_load_reg64(cpu_V1
, rm
);
5863 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5864 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5866 neon_store_reg64(cpu_V0
, rd
);
5868 neon_store_reg64(cpu_V1
, rd
+ 1);
5870 } else if ((insn
& (1 << 11)) == 0) {
5871 /* Two register misc. */
5872 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5873 size
= (insn
>> 18) & 3;
5874 /* UNDEF for unknown op values and bad op-size combinations */
5875 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5878 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5879 q
&& ((rm
| rd
) & 1)) {
5883 case NEON_2RM_VREV64
:
5884 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5885 tmp
= neon_load_reg(rm
, pass
* 2);
5886 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5888 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5889 case 1: gen_swap_half(tmp
); break;
5890 case 2: /* no-op */ break;
5893 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5895 neon_store_reg(rd
, pass
* 2, tmp2
);
5898 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5899 case 1: gen_swap_half(tmp2
); break;
5902 neon_store_reg(rd
, pass
* 2, tmp2
);
5906 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5907 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5908 for (pass
= 0; pass
< q
+ 1; pass
++) {
5909 tmp
= neon_load_reg(rm
, pass
* 2);
5910 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5911 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5912 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5914 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5915 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5916 case 2: tcg_gen_add_i64(CPU_V001
); break;
5919 if (op
>= NEON_2RM_VPADAL
) {
5921 neon_load_reg64(cpu_V1
, rd
+ pass
);
5922 gen_neon_addl(size
);
5924 neon_store_reg64(cpu_V0
, rd
+ pass
);
5930 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5931 tmp
= neon_load_reg(rm
, n
);
5932 tmp2
= neon_load_reg(rd
, n
+ 1);
5933 neon_store_reg(rm
, n
, tmp2
);
5934 neon_store_reg(rd
, n
+ 1, tmp
);
5941 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5946 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5950 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5951 /* also VQMOVUN; op field and mnemonics don't line up */
5956 for (pass
= 0; pass
< 2; pass
++) {
5957 neon_load_reg64(cpu_V0
, rm
+ pass
);
5958 tmp
= tcg_temp_new_i32();
5959 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5964 neon_store_reg(rd
, 0, tmp2
);
5965 neon_store_reg(rd
, 1, tmp
);
5969 case NEON_2RM_VSHLL
:
5970 if (q
|| (rd
& 1)) {
5973 tmp
= neon_load_reg(rm
, 0);
5974 tmp2
= neon_load_reg(rm
, 1);
5975 for (pass
= 0; pass
< 2; pass
++) {
5978 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5979 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5980 neon_store_reg64(cpu_V0
, rd
+ pass
);
5983 case NEON_2RM_VCVT_F16_F32
:
5984 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5988 tmp
= tcg_temp_new_i32();
5989 tmp2
= tcg_temp_new_i32();
5990 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5991 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5992 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5993 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5994 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5995 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5996 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5997 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5998 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5999 neon_store_reg(rd
, 0, tmp2
);
6000 tmp2
= tcg_temp_new_i32();
6001 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6002 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6003 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6004 neon_store_reg(rd
, 1, tmp2
);
6005 tcg_temp_free_i32(tmp
);
6007 case NEON_2RM_VCVT_F32_F16
:
6008 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
6012 tmp3
= tcg_temp_new_i32();
6013 tmp
= neon_load_reg(rm
, 0);
6014 tmp2
= neon_load_reg(rm
, 1);
6015 tcg_gen_ext16u_i32(tmp3
, tmp
);
6016 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6017 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6018 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6019 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6020 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6021 tcg_temp_free_i32(tmp
);
6022 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6023 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6024 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6025 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6026 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6027 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6028 tcg_temp_free_i32(tmp2
);
6029 tcg_temp_free_i32(tmp3
);
6033 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6034 if (neon_2rm_is_float_op(op
)) {
6035 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6036 neon_reg_offset(rm
, pass
));
6039 tmp
= neon_load_reg(rm
, pass
);
6042 case NEON_2RM_VREV32
:
6044 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6045 case 1: gen_swap_half(tmp
); break;
6049 case NEON_2RM_VREV16
:
6054 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6055 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6056 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6062 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6063 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6064 case 2: gen_helper_clz(tmp
, tmp
); break;
6069 gen_helper_neon_cnt_u8(tmp
, tmp
);
6072 tcg_gen_not_i32(tmp
, tmp
);
6074 case NEON_2RM_VQABS
:
6077 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6080 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6083 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6088 case NEON_2RM_VQNEG
:
6091 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6094 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6097 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6102 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6103 tmp2
= tcg_const_i32(0);
6105 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6106 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6107 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6110 tcg_temp_free(tmp2
);
6111 if (op
== NEON_2RM_VCLE0
) {
6112 tcg_gen_not_i32(tmp
, tmp
);
6115 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6116 tmp2
= tcg_const_i32(0);
6118 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6119 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6120 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6123 tcg_temp_free(tmp2
);
6124 if (op
== NEON_2RM_VCLT0
) {
6125 tcg_gen_not_i32(tmp
, tmp
);
6128 case NEON_2RM_VCEQ0
:
6129 tmp2
= tcg_const_i32(0);
6131 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6132 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6133 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6136 tcg_temp_free(tmp2
);
6140 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6141 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6142 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6147 tmp2
= tcg_const_i32(0);
6148 gen_neon_rsb(size
, tmp
, tmp2
);
6149 tcg_temp_free(tmp2
);
6151 case NEON_2RM_VCGT0_F
:
6153 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6154 tmp2
= tcg_const_i32(0);
6155 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6156 tcg_temp_free(tmp2
);
6157 tcg_temp_free_ptr(fpstatus
);
6160 case NEON_2RM_VCGE0_F
:
6162 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6163 tmp2
= tcg_const_i32(0);
6164 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6165 tcg_temp_free(tmp2
);
6166 tcg_temp_free_ptr(fpstatus
);
6169 case NEON_2RM_VCEQ0_F
:
6171 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6172 tmp2
= tcg_const_i32(0);
6173 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6174 tcg_temp_free(tmp2
);
6175 tcg_temp_free_ptr(fpstatus
);
6178 case NEON_2RM_VCLE0_F
:
6180 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6181 tmp2
= tcg_const_i32(0);
6182 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6183 tcg_temp_free(tmp2
);
6184 tcg_temp_free_ptr(fpstatus
);
6187 case NEON_2RM_VCLT0_F
:
6189 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6190 tmp2
= tcg_const_i32(0);
6191 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6192 tcg_temp_free(tmp2
);
6193 tcg_temp_free_ptr(fpstatus
);
6196 case NEON_2RM_VABS_F
:
6199 case NEON_2RM_VNEG_F
:
6203 tmp2
= neon_load_reg(rd
, pass
);
6204 neon_store_reg(rm
, pass
, tmp2
);
6207 tmp2
= neon_load_reg(rd
, pass
);
6209 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6210 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6213 neon_store_reg(rm
, pass
, tmp2
);
6215 case NEON_2RM_VRECPE
:
6216 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6218 case NEON_2RM_VRSQRTE
:
6219 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6221 case NEON_2RM_VRECPE_F
:
6222 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6224 case NEON_2RM_VRSQRTE_F
:
6225 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6227 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6230 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6233 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6234 gen_vfp_tosiz(0, 1);
6236 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6237 gen_vfp_touiz(0, 1);
6240 /* Reserved op values were caught by the
6241 * neon_2rm_sizes[] check earlier.
6245 if (neon_2rm_is_float_op(op
)) {
6246 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6247 neon_reg_offset(rd
, pass
));
6249 neon_store_reg(rd
, pass
, tmp
);
6254 } else if ((insn
& (1 << 10)) == 0) {
6256 int n
= ((insn
>> 8) & 3) + 1;
6257 if ((rn
+ n
) > 32) {
6258 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6259 * helper function running off the end of the register file.
6264 if (insn
& (1 << 6)) {
6265 tmp
= neon_load_reg(rd
, 0);
6267 tmp
= tcg_temp_new_i32();
6268 tcg_gen_movi_i32(tmp
, 0);
6270 tmp2
= neon_load_reg(rm
, 0);
6271 tmp4
= tcg_const_i32(rn
);
6272 tmp5
= tcg_const_i32(n
);
6273 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6274 tcg_temp_free_i32(tmp
);
6275 if (insn
& (1 << 6)) {
6276 tmp
= neon_load_reg(rd
, 1);
6278 tmp
= tcg_temp_new_i32();
6279 tcg_gen_movi_i32(tmp
, 0);
6281 tmp3
= neon_load_reg(rm
, 1);
6282 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6283 tcg_temp_free_i32(tmp5
);
6284 tcg_temp_free_i32(tmp4
);
6285 neon_store_reg(rd
, 0, tmp2
);
6286 neon_store_reg(rd
, 1, tmp3
);
6287 tcg_temp_free_i32(tmp
);
6288 } else if ((insn
& 0x380) == 0) {
6290 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6293 if (insn
& (1 << 19)) {
6294 tmp
= neon_load_reg(rm
, 1);
6296 tmp
= neon_load_reg(rm
, 0);
6298 if (insn
& (1 << 16)) {
6299 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6300 } else if (insn
& (1 << 17)) {
6301 if ((insn
>> 18) & 1)
6302 gen_neon_dup_high16(tmp
);
6304 gen_neon_dup_low16(tmp
);
6306 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6307 tmp2
= tcg_temp_new_i32();
6308 tcg_gen_mov_i32(tmp2
, tmp
);
6309 neon_store_reg(rd
, pass
, tmp2
);
6311 tcg_temp_free_i32(tmp
);
6320 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6322 int crn
= (insn
>> 16) & 0xf;
6323 int crm
= insn
& 0xf;
6324 int op1
= (insn
>> 21) & 7;
6325 int op2
= (insn
>> 5) & 7;
6326 int rt
= (insn
>> 12) & 0xf;
6329 /* Minimal set of debug registers, since we don't support debug */
6330 if (op1
== 0 && crn
== 0 && op2
== 0) {
6333 /* DBGDIDR: just RAZ. In particular this means the
6334 * "debug architecture version" bits will read as
6335 * a reserved value, which should cause Linux to
6336 * not try to use the debug hardware.
6338 tmp
= tcg_const_i32(0);
6339 store_reg(s
, rt
, tmp
);
6343 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6344 * don't implement memory mapped debug components
6346 if (ENABLE_ARCH_7
) {
6347 tmp
= tcg_const_i32(0);
6348 store_reg(s
, rt
, tmp
);
6357 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6358 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6362 tmp
= load_cpu_field(teecr
);
6363 store_reg(s
, rt
, tmp
);
6366 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6368 if (IS_USER(s
) && (env
->teecr
& 1))
6370 tmp
= load_cpu_field(teehbr
);
6371 store_reg(s
, rt
, tmp
);
6375 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6376 op1
, crn
, crm
, op2
);
6380 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6382 int crn
= (insn
>> 16) & 0xf;
6383 int crm
= insn
& 0xf;
6384 int op1
= (insn
>> 21) & 7;
6385 int op2
= (insn
>> 5) & 7;
6386 int rt
= (insn
>> 12) & 0xf;
6389 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6390 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6394 tmp
= load_reg(s
, rt
);
6395 gen_helper_set_teecr(cpu_env
, tmp
);
6396 tcg_temp_free_i32(tmp
);
6399 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6401 if (IS_USER(s
) && (env
->teecr
& 1))
6403 tmp
= load_reg(s
, rt
);
6404 store_cpu_field(tmp
, teehbr
);
6408 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6409 op1
, crn
, crm
, op2
);
6413 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
6417 cpnum
= (insn
>> 8) & 0xf;
6418 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6419 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6425 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6426 return disas_iwmmxt_insn(env
, s
, insn
);
6427 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6428 return disas_dsp_insn(env
, s
, insn
);
6433 return disas_vfp_insn (env
, s
, insn
);
6435 /* Coprocessors 7-15 are architecturally reserved by ARM.
6436 Unfortunately Intel decided to ignore this. */
6437 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
6439 if (insn
& (1 << 20))
6440 return disas_cp14_read(env
, s
, insn
);
6442 return disas_cp14_write(env
, s
, insn
);
6444 return disas_cp15_insn (env
, s
, insn
);
6447 /* Unknown coprocessor. See if the board has hooked it. */
6448 return disas_cp_insn (env
, s
, insn
);
6453 /* Store a 64-bit value to a register pair. Clobbers val. */
6454 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6457 tmp
= tcg_temp_new_i32();
6458 tcg_gen_trunc_i64_i32(tmp
, val
);
6459 store_reg(s
, rlow
, tmp
);
6460 tmp
= tcg_temp_new_i32();
6461 tcg_gen_shri_i64(val
, val
, 32);
6462 tcg_gen_trunc_i64_i32(tmp
, val
);
6463 store_reg(s
, rhigh
, tmp
);
6466 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6467 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6472 /* Load value and extend to 64 bits. */
6473 tmp
= tcg_temp_new_i64();
6474 tmp2
= load_reg(s
, rlow
);
6475 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6476 tcg_temp_free_i32(tmp2
);
6477 tcg_gen_add_i64(val
, val
, tmp
);
6478 tcg_temp_free_i64(tmp
);
6481 /* load and add a 64-bit value from a register pair. */
6482 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6488 /* Load 64-bit value rd:rn. */
6489 tmpl
= load_reg(s
, rlow
);
6490 tmph
= load_reg(s
, rhigh
);
6491 tmp
= tcg_temp_new_i64();
6492 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6493 tcg_temp_free_i32(tmpl
);
6494 tcg_temp_free_i32(tmph
);
6495 tcg_gen_add_i64(val
, val
, tmp
);
6496 tcg_temp_free_i64(tmp
);
6499 /* Set N and Z flags from a 64-bit value. */
6500 static void gen_logicq_cc(TCGv_i64 val
)
6502 TCGv tmp
= tcg_temp_new_i32();
6503 gen_helper_logicq_cc(tmp
, val
);
6505 tcg_temp_free_i32(tmp
);
6508 /* Load/Store exclusive instructions are implemented by remembering
6509 the value/address loaded, and seeing if these are the same
6510 when the store is performed. This should be is sufficient to implement
6511 the architecturally mandated semantics, and avoids having to monitor
6514 In system emulation mode only one CPU will be running at once, so
6515 this sequence is effectively atomic. In user emulation mode we
6516 throw an exception and handle the atomic operation elsewhere. */
6517 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6518 TCGv addr
, int size
)
6524 tmp
= gen_ld8u(addr
, IS_USER(s
));
6527 tmp
= gen_ld16u(addr
, IS_USER(s
));
6531 tmp
= gen_ld32(addr
, IS_USER(s
));
6536 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6537 store_reg(s
, rt
, tmp
);
6539 TCGv tmp2
= tcg_temp_new_i32();
6540 tcg_gen_addi_i32(tmp2
, addr
, 4);
6541 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6542 tcg_temp_free_i32(tmp2
);
6543 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6544 store_reg(s
, rt2
, tmp
);
6546 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6549 static void gen_clrex(DisasContext
*s
)
6551 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6554 #ifdef CONFIG_USER_ONLY
6555 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6556 TCGv addr
, int size
)
6558 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6559 tcg_gen_movi_i32(cpu_exclusive_info
,
6560 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6561 gen_exception_insn(s
, 4, EXCP_STREX
);
6564 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6565 TCGv addr
, int size
)
6571 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6577 fail_label
= gen_new_label();
6578 done_label
= gen_new_label();
6579 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6582 tmp
= gen_ld8u(addr
, IS_USER(s
));
6585 tmp
= gen_ld16u(addr
, IS_USER(s
));
6589 tmp
= gen_ld32(addr
, IS_USER(s
));
6594 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6595 tcg_temp_free_i32(tmp
);
6597 TCGv tmp2
= tcg_temp_new_i32();
6598 tcg_gen_addi_i32(tmp2
, addr
, 4);
6599 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6600 tcg_temp_free_i32(tmp2
);
6601 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6602 tcg_temp_free_i32(tmp
);
6604 tmp
= load_reg(s
, rt
);
6607 gen_st8(tmp
, addr
, IS_USER(s
));
6610 gen_st16(tmp
, addr
, IS_USER(s
));
6614 gen_st32(tmp
, addr
, IS_USER(s
));
6620 tcg_gen_addi_i32(addr
, addr
, 4);
6621 tmp
= load_reg(s
, rt2
);
6622 gen_st32(tmp
, addr
, IS_USER(s
));
6624 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6625 tcg_gen_br(done_label
);
6626 gen_set_label(fail_label
);
6627 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6628 gen_set_label(done_label
);
6629 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6633 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
6635 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6642 insn
= ldl_code(s
->pc
);
6645 /* M variants do not implement ARM mode. */
6650 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6651 * choose to UNDEF. In ARMv5 and above the space is used
6652 * for miscellaneous unconditional instructions.
6656 /* Unconditional instructions. */
6657 if (((insn
>> 25) & 7) == 1) {
6658 /* NEON Data processing. */
6659 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6662 if (disas_neon_data_insn(env
, s
, insn
))
6666 if ((insn
& 0x0f100000) == 0x04000000) {
6667 /* NEON load/store. */
6668 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6671 if (disas_neon_ls_insn(env
, s
, insn
))
6675 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6676 ((insn
& 0x0f30f010) == 0x0710f000)) {
6677 if ((insn
& (1 << 22)) == 0) {
6679 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6683 /* Otherwise PLD; v5TE+ */
6687 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6688 ((insn
& 0x0f70f010) == 0x0650f000)) {
6690 return; /* PLI; V7 */
6692 if (((insn
& 0x0f700000) == 0x04100000) ||
6693 ((insn
& 0x0f700010) == 0x06100000)) {
6694 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6697 return; /* v7MP: Unallocated memory hint: must NOP */
6700 if ((insn
& 0x0ffffdff) == 0x01010000) {
6703 if (insn
& (1 << 9)) {
6704 /* BE8 mode not implemented. */
6708 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6709 switch ((insn
>> 4) & 0xf) {
6718 /* We don't emulate caches so these are a no-op. */
6723 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6729 op1
= (insn
& 0x1f);
6730 addr
= tcg_temp_new_i32();
6731 tmp
= tcg_const_i32(op1
);
6732 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6733 tcg_temp_free_i32(tmp
);
6734 i
= (insn
>> 23) & 3;
6736 case 0: offset
= -4; break; /* DA */
6737 case 1: offset
= 0; break; /* IA */
6738 case 2: offset
= -8; break; /* DB */
6739 case 3: offset
= 4; break; /* IB */
6743 tcg_gen_addi_i32(addr
, addr
, offset
);
6744 tmp
= load_reg(s
, 14);
6745 gen_st32(tmp
, addr
, 0);
6746 tmp
= load_cpu_field(spsr
);
6747 tcg_gen_addi_i32(addr
, addr
, 4);
6748 gen_st32(tmp
, addr
, 0);
6749 if (insn
& (1 << 21)) {
6750 /* Base writeback. */
6752 case 0: offset
= -8; break;
6753 case 1: offset
= 4; break;
6754 case 2: offset
= -4; break;
6755 case 3: offset
= 0; break;
6759 tcg_gen_addi_i32(addr
, addr
, offset
);
6760 tmp
= tcg_const_i32(op1
);
6761 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6762 tcg_temp_free_i32(tmp
);
6763 tcg_temp_free_i32(addr
);
6765 tcg_temp_free_i32(addr
);
6768 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6774 rn
= (insn
>> 16) & 0xf;
6775 addr
= load_reg(s
, rn
);
6776 i
= (insn
>> 23) & 3;
6778 case 0: offset
= -4; break; /* DA */
6779 case 1: offset
= 0; break; /* IA */
6780 case 2: offset
= -8; break; /* DB */
6781 case 3: offset
= 4; break; /* IB */
6785 tcg_gen_addi_i32(addr
, addr
, offset
);
6786 /* Load PC into tmp and CPSR into tmp2. */
6787 tmp
= gen_ld32(addr
, 0);
6788 tcg_gen_addi_i32(addr
, addr
, 4);
6789 tmp2
= gen_ld32(addr
, 0);
6790 if (insn
& (1 << 21)) {
6791 /* Base writeback. */
6793 case 0: offset
= -8; break;
6794 case 1: offset
= 4; break;
6795 case 2: offset
= -4; break;
6796 case 3: offset
= 0; break;
6800 tcg_gen_addi_i32(addr
, addr
, offset
);
6801 store_reg(s
, rn
, addr
);
6803 tcg_temp_free_i32(addr
);
6805 gen_rfe(s
, tmp
, tmp2
);
6807 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6808 /* branch link and change to thumb (blx <offset>) */
6811 val
= (uint32_t)s
->pc
;
6812 tmp
= tcg_temp_new_i32();
6813 tcg_gen_movi_i32(tmp
, val
);
6814 store_reg(s
, 14, tmp
);
6815 /* Sign-extend the 24-bit offset */
6816 offset
= (((int32_t)insn
) << 8) >> 8;
6817 /* offset * 4 + bit24 * 2 + (thumb bit) */
6818 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6819 /* pipeline offset */
6821 /* protected by ARCH(5); above, near the start of uncond block */
6824 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6825 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6826 /* iWMMXt register transfer. */
6827 if (env
->cp15
.c15_cpar
& (1 << 1))
6828 if (!disas_iwmmxt_insn(env
, s
, insn
))
6831 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6832 /* Coprocessor double register transfer. */
6834 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6835 /* Additional coprocessor register transfer. */
6836 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6839 /* cps (privileged) */
6843 if (insn
& (1 << 19)) {
6844 if (insn
& (1 << 8))
6846 if (insn
& (1 << 7))
6848 if (insn
& (1 << 6))
6850 if (insn
& (1 << 18))
6853 if (insn
& (1 << 17)) {
6855 val
|= (insn
& 0x1f);
6858 gen_set_psr_im(s
, mask
, 0, val
);
6865 /* if not always execute, we generate a conditional jump to
6867 s
->condlabel
= gen_new_label();
6868 gen_test_cc(cond
^ 1, s
->condlabel
);
6871 if ((insn
& 0x0f900000) == 0x03000000) {
6872 if ((insn
& (1 << 21)) == 0) {
6874 rd
= (insn
>> 12) & 0xf;
6875 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6876 if ((insn
& (1 << 22)) == 0) {
6878 tmp
= tcg_temp_new_i32();
6879 tcg_gen_movi_i32(tmp
, val
);
6882 tmp
= load_reg(s
, rd
);
6883 tcg_gen_ext16u_i32(tmp
, tmp
);
6884 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6886 store_reg(s
, rd
, tmp
);
6888 if (((insn
>> 12) & 0xf) != 0xf)
6890 if (((insn
>> 16) & 0xf) == 0) {
6891 gen_nop_hint(s
, insn
& 0xff);
6893 /* CPSR = immediate */
6895 shift
= ((insn
>> 8) & 0xf) * 2;
6897 val
= (val
>> shift
) | (val
<< (32 - shift
));
6898 i
= ((insn
& (1 << 22)) != 0);
6899 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6903 } else if ((insn
& 0x0f900000) == 0x01000000
6904 && (insn
& 0x00000090) != 0x00000090) {
6905 /* miscellaneous instructions */
6906 op1
= (insn
>> 21) & 3;
6907 sh
= (insn
>> 4) & 0xf;
6910 case 0x0: /* move program status register */
6913 tmp
= load_reg(s
, rm
);
6914 i
= ((op1
& 2) != 0);
6915 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6919 rd
= (insn
>> 12) & 0xf;
6923 tmp
= load_cpu_field(spsr
);
6925 tmp
= tcg_temp_new_i32();
6926 gen_helper_cpsr_read(tmp
);
6928 store_reg(s
, rd
, tmp
);
6933 /* branch/exchange thumb (bx). */
6935 tmp
= load_reg(s
, rm
);
6937 } else if (op1
== 3) {
6940 rd
= (insn
>> 12) & 0xf;
6941 tmp
= load_reg(s
, rm
);
6942 gen_helper_clz(tmp
, tmp
);
6943 store_reg(s
, rd
, tmp
);
6951 /* Trivial implementation equivalent to bx. */
6952 tmp
= load_reg(s
, rm
);
6963 /* branch link/exchange thumb (blx) */
6964 tmp
= load_reg(s
, rm
);
6965 tmp2
= tcg_temp_new_i32();
6966 tcg_gen_movi_i32(tmp2
, s
->pc
);
6967 store_reg(s
, 14, tmp2
);
6970 case 0x5: /* saturating add/subtract */
6972 rd
= (insn
>> 12) & 0xf;
6973 rn
= (insn
>> 16) & 0xf;
6974 tmp
= load_reg(s
, rm
);
6975 tmp2
= load_reg(s
, rn
);
6977 gen_helper_double_saturate(tmp2
, tmp2
);
6979 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6981 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6982 tcg_temp_free_i32(tmp2
);
6983 store_reg(s
, rd
, tmp
);
6986 /* SMC instruction (op1 == 3)
6987 and undefined instructions (op1 == 0 || op1 == 2)
6994 gen_exception_insn(s
, 4, EXCP_BKPT
);
6996 case 0x8: /* signed multiply */
7001 rs
= (insn
>> 8) & 0xf;
7002 rn
= (insn
>> 12) & 0xf;
7003 rd
= (insn
>> 16) & 0xf;
7005 /* (32 * 16) >> 16 */
7006 tmp
= load_reg(s
, rm
);
7007 tmp2
= load_reg(s
, rs
);
7009 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7012 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7013 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7014 tmp
= tcg_temp_new_i32();
7015 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7016 tcg_temp_free_i64(tmp64
);
7017 if ((sh
& 2) == 0) {
7018 tmp2
= load_reg(s
, rn
);
7019 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7020 tcg_temp_free_i32(tmp2
);
7022 store_reg(s
, rd
, tmp
);
7025 tmp
= load_reg(s
, rm
);
7026 tmp2
= load_reg(s
, rs
);
7027 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
7028 tcg_temp_free_i32(tmp2
);
7030 tmp64
= tcg_temp_new_i64();
7031 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7032 tcg_temp_free_i32(tmp
);
7033 gen_addq(s
, tmp64
, rn
, rd
);
7034 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7035 tcg_temp_free_i64(tmp64
);
7038 tmp2
= load_reg(s
, rn
);
7039 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7040 tcg_temp_free_i32(tmp2
);
7042 store_reg(s
, rd
, tmp
);
7049 } else if (((insn
& 0x0e000000) == 0 &&
7050 (insn
& 0x00000090) != 0x90) ||
7051 ((insn
& 0x0e000000) == (1 << 25))) {
7052 int set_cc
, logic_cc
, shiftop
;
7054 op1
= (insn
>> 21) & 0xf;
7055 set_cc
= (insn
>> 20) & 1;
7056 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7058 /* data processing instruction */
7059 if (insn
& (1 << 25)) {
7060 /* immediate operand */
7062 shift
= ((insn
>> 8) & 0xf) * 2;
7064 val
= (val
>> shift
) | (val
<< (32 - shift
));
7066 tmp2
= tcg_temp_new_i32();
7067 tcg_gen_movi_i32(tmp2
, val
);
7068 if (logic_cc
&& shift
) {
7069 gen_set_CF_bit31(tmp2
);
7074 tmp2
= load_reg(s
, rm
);
7075 shiftop
= (insn
>> 5) & 3;
7076 if (!(insn
& (1 << 4))) {
7077 shift
= (insn
>> 7) & 0x1f;
7078 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7080 rs
= (insn
>> 8) & 0xf;
7081 tmp
= load_reg(s
, rs
);
7082 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7085 if (op1
!= 0x0f && op1
!= 0x0d) {
7086 rn
= (insn
>> 16) & 0xf;
7087 tmp
= load_reg(s
, rn
);
7091 rd
= (insn
>> 12) & 0xf;
7094 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7098 store_reg_bx(env
, s
, rd
, tmp
);
7101 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7105 store_reg_bx(env
, s
, rd
, tmp
);
7108 if (set_cc
&& rd
== 15) {
7109 /* SUBS r15, ... is used for exception return. */
7113 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7114 gen_exception_return(s
, tmp
);
7117 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7119 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7121 store_reg_bx(env
, s
, rd
, tmp
);
7126 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
7128 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7130 store_reg_bx(env
, s
, rd
, tmp
);
7134 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7136 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7138 store_reg_bx(env
, s
, rd
, tmp
);
7142 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
7144 gen_add_carry(tmp
, tmp
, tmp2
);
7146 store_reg_bx(env
, s
, rd
, tmp
);
7150 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
7152 gen_sub_carry(tmp
, tmp
, tmp2
);
7154 store_reg_bx(env
, s
, rd
, tmp
);
7158 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
7160 gen_sub_carry(tmp
, tmp2
, tmp
);
7162 store_reg_bx(env
, s
, rd
, tmp
);
7166 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7169 tcg_temp_free_i32(tmp
);
7173 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7176 tcg_temp_free_i32(tmp
);
7180 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7182 tcg_temp_free_i32(tmp
);
7186 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7188 tcg_temp_free_i32(tmp
);
7191 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7195 store_reg_bx(env
, s
, rd
, tmp
);
7198 if (logic_cc
&& rd
== 15) {
7199 /* MOVS r15, ... is used for exception return. */
7203 gen_exception_return(s
, tmp2
);
7208 store_reg_bx(env
, s
, rd
, tmp2
);
7212 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7216 store_reg_bx(env
, s
, rd
, tmp
);
7220 tcg_gen_not_i32(tmp2
, tmp2
);
7224 store_reg_bx(env
, s
, rd
, tmp2
);
7227 if (op1
!= 0x0f && op1
!= 0x0d) {
7228 tcg_temp_free_i32(tmp2
);
7231 /* other instructions */
7232 op1
= (insn
>> 24) & 0xf;
7236 /* multiplies, extra load/stores */
7237 sh
= (insn
>> 5) & 3;
7240 rd
= (insn
>> 16) & 0xf;
7241 rn
= (insn
>> 12) & 0xf;
7242 rs
= (insn
>> 8) & 0xf;
7244 op1
= (insn
>> 20) & 0xf;
7246 case 0: case 1: case 2: case 3: case 6:
7248 tmp
= load_reg(s
, rs
);
7249 tmp2
= load_reg(s
, rm
);
7250 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7251 tcg_temp_free_i32(tmp2
);
7252 if (insn
& (1 << 22)) {
7253 /* Subtract (mls) */
7255 tmp2
= load_reg(s
, rn
);
7256 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7257 tcg_temp_free_i32(tmp2
);
7258 } else if (insn
& (1 << 21)) {
7260 tmp2
= load_reg(s
, rn
);
7261 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7262 tcg_temp_free_i32(tmp2
);
7264 if (insn
& (1 << 20))
7266 store_reg(s
, rd
, tmp
);
7269 /* 64 bit mul double accumulate (UMAAL) */
7271 tmp
= load_reg(s
, rs
);
7272 tmp2
= load_reg(s
, rm
);
7273 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7274 gen_addq_lo(s
, tmp64
, rn
);
7275 gen_addq_lo(s
, tmp64
, rd
);
7276 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7277 tcg_temp_free_i64(tmp64
);
7279 case 8: case 9: case 10: case 11:
7280 case 12: case 13: case 14: case 15:
7281 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7282 tmp
= load_reg(s
, rs
);
7283 tmp2
= load_reg(s
, rm
);
7284 if (insn
& (1 << 22)) {
7285 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7287 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7289 if (insn
& (1 << 21)) { /* mult accumulate */
7290 gen_addq(s
, tmp64
, rn
, rd
);
7292 if (insn
& (1 << 20)) {
7293 gen_logicq_cc(tmp64
);
7295 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7296 tcg_temp_free_i64(tmp64
);
7302 rn
= (insn
>> 16) & 0xf;
7303 rd
= (insn
>> 12) & 0xf;
7304 if (insn
& (1 << 23)) {
7305 /* load/store exclusive */
7306 op1
= (insn
>> 21) & 0x3;
7311 addr
= tcg_temp_local_new_i32();
7312 load_reg_var(s
, addr
, rn
);
7313 if (insn
& (1 << 20)) {
7316 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7318 case 1: /* ldrexd */
7319 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7321 case 2: /* ldrexb */
7322 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7324 case 3: /* ldrexh */
7325 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7334 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7336 case 1: /* strexd */
7337 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7339 case 2: /* strexb */
7340 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7342 case 3: /* strexh */
7343 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7349 tcg_temp_free(addr
);
7351 /* SWP instruction */
7354 /* ??? This is not really atomic. However we know
7355 we never have multiple CPUs running in parallel,
7356 so it is good enough. */
7357 addr
= load_reg(s
, rn
);
7358 tmp
= load_reg(s
, rm
);
7359 if (insn
& (1 << 22)) {
7360 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7361 gen_st8(tmp
, addr
, IS_USER(s
));
7363 tmp2
= gen_ld32(addr
, IS_USER(s
));
7364 gen_st32(tmp
, addr
, IS_USER(s
));
7366 tcg_temp_free_i32(addr
);
7367 store_reg(s
, rd
, tmp2
);
7373 /* Misc load/store */
7374 rn
= (insn
>> 16) & 0xf;
7375 rd
= (insn
>> 12) & 0xf;
7376 addr
= load_reg(s
, rn
);
7377 if (insn
& (1 << 24))
7378 gen_add_datah_offset(s
, insn
, 0, addr
);
7380 if (insn
& (1 << 20)) {
7384 tmp
= gen_ld16u(addr
, IS_USER(s
));
7387 tmp
= gen_ld8s(addr
, IS_USER(s
));
7391 tmp
= gen_ld16s(addr
, IS_USER(s
));
7395 } else if (sh
& 2) {
7400 tmp
= load_reg(s
, rd
);
7401 gen_st32(tmp
, addr
, IS_USER(s
));
7402 tcg_gen_addi_i32(addr
, addr
, 4);
7403 tmp
= load_reg(s
, rd
+ 1);
7404 gen_st32(tmp
, addr
, IS_USER(s
));
7408 tmp
= gen_ld32(addr
, IS_USER(s
));
7409 store_reg(s
, rd
, tmp
);
7410 tcg_gen_addi_i32(addr
, addr
, 4);
7411 tmp
= gen_ld32(addr
, IS_USER(s
));
7415 address_offset
= -4;
7418 tmp
= load_reg(s
, rd
);
7419 gen_st16(tmp
, addr
, IS_USER(s
));
7422 /* Perform base writeback before the loaded value to
7423 ensure correct behavior with overlapping index registers.
7424 ldrd with base writeback is is undefined if the
7425 destination and index registers overlap. */
7426 if (!(insn
& (1 << 24))) {
7427 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7428 store_reg(s
, rn
, addr
);
7429 } else if (insn
& (1 << 21)) {
7431 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7432 store_reg(s
, rn
, addr
);
7434 tcg_temp_free_i32(addr
);
7437 /* Complete the load. */
7438 store_reg(s
, rd
, tmp
);
7447 if (insn
& (1 << 4)) {
7449 /* Armv6 Media instructions. */
7451 rn
= (insn
>> 16) & 0xf;
7452 rd
= (insn
>> 12) & 0xf;
7453 rs
= (insn
>> 8) & 0xf;
7454 switch ((insn
>> 23) & 3) {
7455 case 0: /* Parallel add/subtract. */
7456 op1
= (insn
>> 20) & 7;
7457 tmp
= load_reg(s
, rn
);
7458 tmp2
= load_reg(s
, rm
);
7459 sh
= (insn
>> 5) & 7;
7460 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7462 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7463 tcg_temp_free_i32(tmp2
);
7464 store_reg(s
, rd
, tmp
);
7467 if ((insn
& 0x00700020) == 0) {
7468 /* Halfword pack. */
7469 tmp
= load_reg(s
, rn
);
7470 tmp2
= load_reg(s
, rm
);
7471 shift
= (insn
>> 7) & 0x1f;
7472 if (insn
& (1 << 6)) {
7476 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7477 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7478 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7482 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7483 tcg_gen_ext16u_i32(tmp
, tmp
);
7484 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7486 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7487 tcg_temp_free_i32(tmp2
);
7488 store_reg(s
, rd
, tmp
);
7489 } else if ((insn
& 0x00200020) == 0x00200000) {
7491 tmp
= load_reg(s
, rm
);
7492 shift
= (insn
>> 7) & 0x1f;
7493 if (insn
& (1 << 6)) {
7496 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7498 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7500 sh
= (insn
>> 16) & 0x1f;
7501 tmp2
= tcg_const_i32(sh
);
7502 if (insn
& (1 << 22))
7503 gen_helper_usat(tmp
, tmp
, tmp2
);
7505 gen_helper_ssat(tmp
, tmp
, tmp2
);
7506 tcg_temp_free_i32(tmp2
);
7507 store_reg(s
, rd
, tmp
);
7508 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7510 tmp
= load_reg(s
, rm
);
7511 sh
= (insn
>> 16) & 0x1f;
7512 tmp2
= tcg_const_i32(sh
);
7513 if (insn
& (1 << 22))
7514 gen_helper_usat16(tmp
, tmp
, tmp2
);
7516 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7517 tcg_temp_free_i32(tmp2
);
7518 store_reg(s
, rd
, tmp
);
7519 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7521 tmp
= load_reg(s
, rn
);
7522 tmp2
= load_reg(s
, rm
);
7523 tmp3
= tcg_temp_new_i32();
7524 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7525 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7526 tcg_temp_free_i32(tmp3
);
7527 tcg_temp_free_i32(tmp2
);
7528 store_reg(s
, rd
, tmp
);
7529 } else if ((insn
& 0x000003e0) == 0x00000060) {
7530 tmp
= load_reg(s
, rm
);
7531 shift
= (insn
>> 10) & 3;
7532 /* ??? In many cases it's not necessary to do a
7533 rotate, a shift is sufficient. */
7535 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7536 op1
= (insn
>> 20) & 7;
7538 case 0: gen_sxtb16(tmp
); break;
7539 case 2: gen_sxtb(tmp
); break;
7540 case 3: gen_sxth(tmp
); break;
7541 case 4: gen_uxtb16(tmp
); break;
7542 case 6: gen_uxtb(tmp
); break;
7543 case 7: gen_uxth(tmp
); break;
7544 default: goto illegal_op
;
7547 tmp2
= load_reg(s
, rn
);
7548 if ((op1
& 3) == 0) {
7549 gen_add16(tmp
, tmp2
);
7551 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7552 tcg_temp_free_i32(tmp2
);
7555 store_reg(s
, rd
, tmp
);
7556 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7558 tmp
= load_reg(s
, rm
);
7559 if (insn
& (1 << 22)) {
7560 if (insn
& (1 << 7)) {
7564 gen_helper_rbit(tmp
, tmp
);
7567 if (insn
& (1 << 7))
7570 tcg_gen_bswap32_i32(tmp
, tmp
);
7572 store_reg(s
, rd
, tmp
);
7577 case 2: /* Multiplies (Type 3). */
7578 tmp
= load_reg(s
, rm
);
7579 tmp2
= load_reg(s
, rs
);
7580 if (insn
& (1 << 20)) {
7581 /* Signed multiply most significant [accumulate].
7582 (SMMUL, SMMLA, SMMLS) */
7583 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7586 tmp
= load_reg(s
, rd
);
7587 if (insn
& (1 << 6)) {
7588 tmp64
= gen_subq_msw(tmp64
, tmp
);
7590 tmp64
= gen_addq_msw(tmp64
, tmp
);
7593 if (insn
& (1 << 5)) {
7594 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7596 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7597 tmp
= tcg_temp_new_i32();
7598 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7599 tcg_temp_free_i64(tmp64
);
7600 store_reg(s
, rn
, tmp
);
7602 if (insn
& (1 << 5))
7603 gen_swap_half(tmp2
);
7604 gen_smul_dual(tmp
, tmp2
);
7605 if (insn
& (1 << 6)) {
7606 /* This subtraction cannot overflow. */
7607 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7609 /* This addition cannot overflow 32 bits;
7610 * however it may overflow considered as a signed
7611 * operation, in which case we must set the Q flag.
7613 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7615 tcg_temp_free_i32(tmp2
);
7616 if (insn
& (1 << 22)) {
7617 /* smlald, smlsld */
7618 tmp64
= tcg_temp_new_i64();
7619 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7620 tcg_temp_free_i32(tmp
);
7621 gen_addq(s
, tmp64
, rd
, rn
);
7622 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7623 tcg_temp_free_i64(tmp64
);
7625 /* smuad, smusd, smlad, smlsd */
7628 tmp2
= load_reg(s
, rd
);
7629 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7630 tcg_temp_free_i32(tmp2
);
7632 store_reg(s
, rn
, tmp
);
7637 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7639 case 0: /* Unsigned sum of absolute differences. */
7641 tmp
= load_reg(s
, rm
);
7642 tmp2
= load_reg(s
, rs
);
7643 gen_helper_usad8(tmp
, tmp
, tmp2
);
7644 tcg_temp_free_i32(tmp2
);
7646 tmp2
= load_reg(s
, rd
);
7647 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7648 tcg_temp_free_i32(tmp2
);
7650 store_reg(s
, rn
, tmp
);
7652 case 0x20: case 0x24: case 0x28: case 0x2c:
7653 /* Bitfield insert/clear. */
7655 shift
= (insn
>> 7) & 0x1f;
7656 i
= (insn
>> 16) & 0x1f;
7659 tmp
= tcg_temp_new_i32();
7660 tcg_gen_movi_i32(tmp
, 0);
7662 tmp
= load_reg(s
, rm
);
7665 tmp2
= load_reg(s
, rd
);
7666 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7667 tcg_temp_free_i32(tmp2
);
7669 store_reg(s
, rd
, tmp
);
7671 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7672 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7674 tmp
= load_reg(s
, rm
);
7675 shift
= (insn
>> 7) & 0x1f;
7676 i
= ((insn
>> 16) & 0x1f) + 1;
7681 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7683 gen_sbfx(tmp
, shift
, i
);
7686 store_reg(s
, rd
, tmp
);
7696 /* Check for undefined extension instructions
7697 * per the ARM Bible IE:
7698 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7700 sh
= (0xf << 20) | (0xf << 4);
7701 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7705 /* load/store byte/word */
7706 rn
= (insn
>> 16) & 0xf;
7707 rd
= (insn
>> 12) & 0xf;
7708 tmp2
= load_reg(s
, rn
);
7709 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7710 if (insn
& (1 << 24))
7711 gen_add_data_offset(s
, insn
, tmp2
);
7712 if (insn
& (1 << 20)) {
7714 if (insn
& (1 << 22)) {
7715 tmp
= gen_ld8u(tmp2
, i
);
7717 tmp
= gen_ld32(tmp2
, i
);
7721 tmp
= load_reg(s
, rd
);
7722 if (insn
& (1 << 22))
7723 gen_st8(tmp
, tmp2
, i
);
7725 gen_st32(tmp
, tmp2
, i
);
7727 if (!(insn
& (1 << 24))) {
7728 gen_add_data_offset(s
, insn
, tmp2
);
7729 store_reg(s
, rn
, tmp2
);
7730 } else if (insn
& (1 << 21)) {
7731 store_reg(s
, rn
, tmp2
);
7733 tcg_temp_free_i32(tmp2
);
7735 if (insn
& (1 << 20)) {
7736 /* Complete the load. */
7737 store_reg_from_load(env
, s
, rd
, tmp
);
7743 int j
, n
, user
, loaded_base
;
7745 /* load/store multiple words */
7746 /* XXX: store correct base if write back */
7748 if (insn
& (1 << 22)) {
7750 goto illegal_op
; /* only usable in supervisor mode */
7752 if ((insn
& (1 << 15)) == 0)
7755 rn
= (insn
>> 16) & 0xf;
7756 addr
= load_reg(s
, rn
);
7758 /* compute total size */
7760 TCGV_UNUSED(loaded_var
);
7763 if (insn
& (1 << i
))
7766 /* XXX: test invalid n == 0 case ? */
7767 if (insn
& (1 << 23)) {
7768 if (insn
& (1 << 24)) {
7770 tcg_gen_addi_i32(addr
, addr
, 4);
7772 /* post increment */
7775 if (insn
& (1 << 24)) {
7777 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7779 /* post decrement */
7781 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7786 if (insn
& (1 << i
)) {
7787 if (insn
& (1 << 20)) {
7789 tmp
= gen_ld32(addr
, IS_USER(s
));
7791 tmp2
= tcg_const_i32(i
);
7792 gen_helper_set_user_reg(tmp2
, tmp
);
7793 tcg_temp_free_i32(tmp2
);
7794 tcg_temp_free_i32(tmp
);
7795 } else if (i
== rn
) {
7799 store_reg_from_load(env
, s
, i
, tmp
);
7804 /* special case: r15 = PC + 8 */
7805 val
= (long)s
->pc
+ 4;
7806 tmp
= tcg_temp_new_i32();
7807 tcg_gen_movi_i32(tmp
, val
);
7809 tmp
= tcg_temp_new_i32();
7810 tmp2
= tcg_const_i32(i
);
7811 gen_helper_get_user_reg(tmp
, tmp2
);
7812 tcg_temp_free_i32(tmp2
);
7814 tmp
= load_reg(s
, i
);
7816 gen_st32(tmp
, addr
, IS_USER(s
));
7819 /* no need to add after the last transfer */
7821 tcg_gen_addi_i32(addr
, addr
, 4);
7824 if (insn
& (1 << 21)) {
7826 if (insn
& (1 << 23)) {
7827 if (insn
& (1 << 24)) {
7830 /* post increment */
7831 tcg_gen_addi_i32(addr
, addr
, 4);
7834 if (insn
& (1 << 24)) {
7837 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7839 /* post decrement */
7840 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7843 store_reg(s
, rn
, addr
);
7845 tcg_temp_free_i32(addr
);
7848 store_reg(s
, rn
, loaded_var
);
7850 if ((insn
& (1 << 22)) && !user
) {
7851 /* Restore CPSR from SPSR. */
7852 tmp
= load_cpu_field(spsr
);
7853 gen_set_cpsr(tmp
, 0xffffffff);
7854 tcg_temp_free_i32(tmp
);
7855 s
->is_jmp
= DISAS_UPDATE
;
7864 /* branch (and link) */
7865 val
= (int32_t)s
->pc
;
7866 if (insn
& (1 << 24)) {
7867 tmp
= tcg_temp_new_i32();
7868 tcg_gen_movi_i32(tmp
, val
);
7869 store_reg(s
, 14, tmp
);
7871 offset
= (((int32_t)insn
<< 8) >> 8);
7872 val
+= (offset
<< 2) + 4;
7880 if (disas_coproc_insn(env
, s
, insn
))
7885 gen_set_pc_im(s
->pc
);
7886 s
->is_jmp
= DISAS_SWI
;
7890 gen_exception_insn(s
, 4, EXCP_UDEF
);
7896 /* Return true if this is a Thumb-2 logical op. */
7898 thumb2_logic_op(int op
)
7903 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7904 then set condition code flags based on the result of the operation.
7905 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7906 to the high bit of T1.
7907 Returns zero if the opcode is valid. */
7910 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7917 tcg_gen_and_i32(t0
, t0
, t1
);
7921 tcg_gen_andc_i32(t0
, t0
, t1
);
7925 tcg_gen_or_i32(t0
, t0
, t1
);
7929 tcg_gen_orc_i32(t0
, t0
, t1
);
7933 tcg_gen_xor_i32(t0
, t0
, t1
);
7938 gen_helper_add_cc(t0
, t0
, t1
);
7940 tcg_gen_add_i32(t0
, t0
, t1
);
7944 gen_helper_adc_cc(t0
, t0
, t1
);
7950 gen_helper_sbc_cc(t0
, t0
, t1
);
7952 gen_sub_carry(t0
, t0
, t1
);
7956 gen_helper_sub_cc(t0
, t0
, t1
);
7958 tcg_gen_sub_i32(t0
, t0
, t1
);
7962 gen_helper_sub_cc(t0
, t1
, t0
);
7964 tcg_gen_sub_i32(t0
, t1
, t0
);
7966 default: /* 5, 6, 7, 9, 12, 15. */
7972 gen_set_CF_bit31(t1
);
7977 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7979 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7981 uint32_t insn
, imm
, shift
, offset
;
7982 uint32_t rd
, rn
, rm
, rs
;
7993 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7994 || arm_feature (env
, ARM_FEATURE_M
))) {
7995 /* Thumb-1 cores may need to treat bl and blx as a pair of
7996 16-bit instructions to get correct prefetch abort behavior. */
7998 if ((insn
& (1 << 12)) == 0) {
8000 /* Second half of blx. */
8001 offset
= ((insn
& 0x7ff) << 1);
8002 tmp
= load_reg(s
, 14);
8003 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8004 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8006 tmp2
= tcg_temp_new_i32();
8007 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8008 store_reg(s
, 14, tmp2
);
8012 if (insn
& (1 << 11)) {
8013 /* Second half of bl. */
8014 offset
= ((insn
& 0x7ff) << 1) | 1;
8015 tmp
= load_reg(s
, 14);
8016 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8018 tmp2
= tcg_temp_new_i32();
8019 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8020 store_reg(s
, 14, tmp2
);
8024 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
8025 /* Instruction spans a page boundary. Implement it as two
8026 16-bit instructions in case the second half causes an
8028 offset
= ((int32_t)insn
<< 21) >> 9;
8029 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
8032 /* Fall through to 32-bit decode. */
8035 insn
= lduw_code(s
->pc
);
8037 insn
|= (uint32_t)insn_hw1
<< 16;
8039 if ((insn
& 0xf800e800) != 0xf000e800) {
8043 rn
= (insn
>> 16) & 0xf;
8044 rs
= (insn
>> 12) & 0xf;
8045 rd
= (insn
>> 8) & 0xf;
8047 switch ((insn
>> 25) & 0xf) {
8048 case 0: case 1: case 2: case 3:
8049 /* 16-bit instructions. Should never happen. */
8052 if (insn
& (1 << 22)) {
8053 /* Other load/store, table branch. */
8054 if (insn
& 0x01200000) {
8055 /* Load/store doubleword. */
8057 addr
= tcg_temp_new_i32();
8058 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8060 addr
= load_reg(s
, rn
);
8062 offset
= (insn
& 0xff) * 4;
8063 if ((insn
& (1 << 23)) == 0)
8065 if (insn
& (1 << 24)) {
8066 tcg_gen_addi_i32(addr
, addr
, offset
);
8069 if (insn
& (1 << 20)) {
8071 tmp
= gen_ld32(addr
, IS_USER(s
));
8072 store_reg(s
, rs
, tmp
);
8073 tcg_gen_addi_i32(addr
, addr
, 4);
8074 tmp
= gen_ld32(addr
, IS_USER(s
));
8075 store_reg(s
, rd
, tmp
);
8078 tmp
= load_reg(s
, rs
);
8079 gen_st32(tmp
, addr
, IS_USER(s
));
8080 tcg_gen_addi_i32(addr
, addr
, 4);
8081 tmp
= load_reg(s
, rd
);
8082 gen_st32(tmp
, addr
, IS_USER(s
));
8084 if (insn
& (1 << 21)) {
8085 /* Base writeback. */
8088 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8089 store_reg(s
, rn
, addr
);
8091 tcg_temp_free_i32(addr
);
8093 } else if ((insn
& (1 << 23)) == 0) {
8094 /* Load/store exclusive word. */
8095 addr
= tcg_temp_local_new();
8096 load_reg_var(s
, addr
, rn
);
8097 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8098 if (insn
& (1 << 20)) {
8099 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8101 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8103 tcg_temp_free(addr
);
8104 } else if ((insn
& (1 << 6)) == 0) {
8107 addr
= tcg_temp_new_i32();
8108 tcg_gen_movi_i32(addr
, s
->pc
);
8110 addr
= load_reg(s
, rn
);
8112 tmp
= load_reg(s
, rm
);
8113 tcg_gen_add_i32(addr
, addr
, tmp
);
8114 if (insn
& (1 << 4)) {
8116 tcg_gen_add_i32(addr
, addr
, tmp
);
8117 tcg_temp_free_i32(tmp
);
8118 tmp
= gen_ld16u(addr
, IS_USER(s
));
8120 tcg_temp_free_i32(tmp
);
8121 tmp
= gen_ld8u(addr
, IS_USER(s
));
8123 tcg_temp_free_i32(addr
);
8124 tcg_gen_shli_i32(tmp
, tmp
, 1);
8125 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8126 store_reg(s
, 15, tmp
);
8128 /* Load/store exclusive byte/halfword/doubleword. */
8130 op
= (insn
>> 4) & 0x3;
8134 addr
= tcg_temp_local_new();
8135 load_reg_var(s
, addr
, rn
);
8136 if (insn
& (1 << 20)) {
8137 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8139 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8141 tcg_temp_free(addr
);
8144 /* Load/store multiple, RFE, SRS. */
8145 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8146 /* Not available in user mode. */
8149 if (insn
& (1 << 20)) {
8151 addr
= load_reg(s
, rn
);
8152 if ((insn
& (1 << 24)) == 0)
8153 tcg_gen_addi_i32(addr
, addr
, -8);
8154 /* Load PC into tmp and CPSR into tmp2. */
8155 tmp
= gen_ld32(addr
, 0);
8156 tcg_gen_addi_i32(addr
, addr
, 4);
8157 tmp2
= gen_ld32(addr
, 0);
8158 if (insn
& (1 << 21)) {
8159 /* Base writeback. */
8160 if (insn
& (1 << 24)) {
8161 tcg_gen_addi_i32(addr
, addr
, 4);
8163 tcg_gen_addi_i32(addr
, addr
, -4);
8165 store_reg(s
, rn
, addr
);
8167 tcg_temp_free_i32(addr
);
8169 gen_rfe(s
, tmp
, tmp2
);
8173 addr
= tcg_temp_new_i32();
8174 tmp
= tcg_const_i32(op
);
8175 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8176 tcg_temp_free_i32(tmp
);
8177 if ((insn
& (1 << 24)) == 0) {
8178 tcg_gen_addi_i32(addr
, addr
, -8);
8180 tmp
= load_reg(s
, 14);
8181 gen_st32(tmp
, addr
, 0);
8182 tcg_gen_addi_i32(addr
, addr
, 4);
8183 tmp
= tcg_temp_new_i32();
8184 gen_helper_cpsr_read(tmp
);
8185 gen_st32(tmp
, addr
, 0);
8186 if (insn
& (1 << 21)) {
8187 if ((insn
& (1 << 24)) == 0) {
8188 tcg_gen_addi_i32(addr
, addr
, -4);
8190 tcg_gen_addi_i32(addr
, addr
, 4);
8192 tmp
= tcg_const_i32(op
);
8193 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8194 tcg_temp_free_i32(tmp
);
8196 tcg_temp_free_i32(addr
);
8200 int i
, loaded_base
= 0;
8202 /* Load/store multiple. */
8203 addr
= load_reg(s
, rn
);
8205 for (i
= 0; i
< 16; i
++) {
8206 if (insn
& (1 << i
))
8209 if (insn
& (1 << 24)) {
8210 tcg_gen_addi_i32(addr
, addr
, -offset
);
8213 TCGV_UNUSED(loaded_var
);
8214 for (i
= 0; i
< 16; i
++) {
8215 if ((insn
& (1 << i
)) == 0)
8217 if (insn
& (1 << 20)) {
8219 tmp
= gen_ld32(addr
, IS_USER(s
));
8222 } else if (i
== rn
) {
8226 store_reg(s
, i
, tmp
);
8230 tmp
= load_reg(s
, i
);
8231 gen_st32(tmp
, addr
, IS_USER(s
));
8233 tcg_gen_addi_i32(addr
, addr
, 4);
8236 store_reg(s
, rn
, loaded_var
);
8238 if (insn
& (1 << 21)) {
8239 /* Base register writeback. */
8240 if (insn
& (1 << 24)) {
8241 tcg_gen_addi_i32(addr
, addr
, -offset
);
8243 /* Fault if writeback register is in register list. */
8244 if (insn
& (1 << rn
))
8246 store_reg(s
, rn
, addr
);
8248 tcg_temp_free_i32(addr
);
8255 op
= (insn
>> 21) & 0xf;
8257 /* Halfword pack. */
8258 tmp
= load_reg(s
, rn
);
8259 tmp2
= load_reg(s
, rm
);
8260 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8261 if (insn
& (1 << 5)) {
8265 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8266 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8267 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8271 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8272 tcg_gen_ext16u_i32(tmp
, tmp
);
8273 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8275 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8276 tcg_temp_free_i32(tmp2
);
8277 store_reg(s
, rd
, tmp
);
8279 /* Data processing register constant shift. */
8281 tmp
= tcg_temp_new_i32();
8282 tcg_gen_movi_i32(tmp
, 0);
8284 tmp
= load_reg(s
, rn
);
8286 tmp2
= load_reg(s
, rm
);
8288 shiftop
= (insn
>> 4) & 3;
8289 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8290 conds
= (insn
& (1 << 20)) != 0;
8291 logic_cc
= (conds
&& thumb2_logic_op(op
));
8292 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8293 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8295 tcg_temp_free_i32(tmp2
);
8297 store_reg(s
, rd
, tmp
);
8299 tcg_temp_free_i32(tmp
);
8303 case 13: /* Misc data processing. */
8304 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8305 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8308 case 0: /* Register controlled shift. */
8309 tmp
= load_reg(s
, rn
);
8310 tmp2
= load_reg(s
, rm
);
8311 if ((insn
& 0x70) != 0)
8313 op
= (insn
>> 21) & 3;
8314 logic_cc
= (insn
& (1 << 20)) != 0;
8315 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8318 store_reg_bx(env
, s
, rd
, tmp
);
8320 case 1: /* Sign/zero extend. */
8321 tmp
= load_reg(s
, rm
);
8322 shift
= (insn
>> 4) & 3;
8323 /* ??? In many cases it's not necessary to do a
8324 rotate, a shift is sufficient. */
8326 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8327 op
= (insn
>> 20) & 7;
8329 case 0: gen_sxth(tmp
); break;
8330 case 1: gen_uxth(tmp
); break;
8331 case 2: gen_sxtb16(tmp
); break;
8332 case 3: gen_uxtb16(tmp
); break;
8333 case 4: gen_sxtb(tmp
); break;
8334 case 5: gen_uxtb(tmp
); break;
8335 default: goto illegal_op
;
8338 tmp2
= load_reg(s
, rn
);
8339 if ((op
>> 1) == 1) {
8340 gen_add16(tmp
, tmp2
);
8342 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8343 tcg_temp_free_i32(tmp2
);
8346 store_reg(s
, rd
, tmp
);
8348 case 2: /* SIMD add/subtract. */
8349 op
= (insn
>> 20) & 7;
8350 shift
= (insn
>> 4) & 7;
8351 if ((op
& 3) == 3 || (shift
& 3) == 3)
8353 tmp
= load_reg(s
, rn
);
8354 tmp2
= load_reg(s
, rm
);
8355 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8356 tcg_temp_free_i32(tmp2
);
8357 store_reg(s
, rd
, tmp
);
8359 case 3: /* Other data processing. */
8360 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8362 /* Saturating add/subtract. */
8363 tmp
= load_reg(s
, rn
);
8364 tmp2
= load_reg(s
, rm
);
8366 gen_helper_double_saturate(tmp
, tmp
);
8368 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8370 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8371 tcg_temp_free_i32(tmp2
);
8373 tmp
= load_reg(s
, rn
);
8375 case 0x0a: /* rbit */
8376 gen_helper_rbit(tmp
, tmp
);
8378 case 0x08: /* rev */
8379 tcg_gen_bswap32_i32(tmp
, tmp
);
8381 case 0x09: /* rev16 */
8384 case 0x0b: /* revsh */
8387 case 0x10: /* sel */
8388 tmp2
= load_reg(s
, rm
);
8389 tmp3
= tcg_temp_new_i32();
8390 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
8391 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8392 tcg_temp_free_i32(tmp3
);
8393 tcg_temp_free_i32(tmp2
);
8395 case 0x18: /* clz */
8396 gen_helper_clz(tmp
, tmp
);
8402 store_reg(s
, rd
, tmp
);
8404 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8405 op
= (insn
>> 4) & 0xf;
8406 tmp
= load_reg(s
, rn
);
8407 tmp2
= load_reg(s
, rm
);
8408 switch ((insn
>> 20) & 7) {
8409 case 0: /* 32 x 32 -> 32 */
8410 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8411 tcg_temp_free_i32(tmp2
);
8413 tmp2
= load_reg(s
, rs
);
8415 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8417 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8418 tcg_temp_free_i32(tmp2
);
8421 case 1: /* 16 x 16 -> 32 */
8422 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8423 tcg_temp_free_i32(tmp2
);
8425 tmp2
= load_reg(s
, rs
);
8426 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8427 tcg_temp_free_i32(tmp2
);
8430 case 2: /* Dual multiply add. */
8431 case 4: /* Dual multiply subtract. */
8433 gen_swap_half(tmp2
);
8434 gen_smul_dual(tmp
, tmp2
);
8435 if (insn
& (1 << 22)) {
8436 /* This subtraction cannot overflow. */
8437 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8439 /* This addition cannot overflow 32 bits;
8440 * however it may overflow considered as a signed
8441 * operation, in which case we must set the Q flag.
8443 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8445 tcg_temp_free_i32(tmp2
);
8448 tmp2
= load_reg(s
, rs
);
8449 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8450 tcg_temp_free_i32(tmp2
);
8453 case 3: /* 32 * 16 -> 32msb */
8455 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8458 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8459 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8460 tmp
= tcg_temp_new_i32();
8461 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8462 tcg_temp_free_i64(tmp64
);
8465 tmp2
= load_reg(s
, rs
);
8466 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8467 tcg_temp_free_i32(tmp2
);
8470 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8471 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8473 tmp
= load_reg(s
, rs
);
8474 if (insn
& (1 << 20)) {
8475 tmp64
= gen_addq_msw(tmp64
, tmp
);
8477 tmp64
= gen_subq_msw(tmp64
, tmp
);
8480 if (insn
& (1 << 4)) {
8481 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8483 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8484 tmp
= tcg_temp_new_i32();
8485 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8486 tcg_temp_free_i64(tmp64
);
8488 case 7: /* Unsigned sum of absolute differences. */
8489 gen_helper_usad8(tmp
, tmp
, tmp2
);
8490 tcg_temp_free_i32(tmp2
);
8492 tmp2
= load_reg(s
, rs
);
8493 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8494 tcg_temp_free_i32(tmp2
);
8498 store_reg(s
, rd
, tmp
);
8500 case 6: case 7: /* 64-bit multiply, Divide. */
8501 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8502 tmp
= load_reg(s
, rn
);
8503 tmp2
= load_reg(s
, rm
);
8504 if ((op
& 0x50) == 0x10) {
8506 if (!arm_feature(env
, ARM_FEATURE_DIV
))
8509 gen_helper_udiv(tmp
, tmp
, tmp2
);
8511 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8512 tcg_temp_free_i32(tmp2
);
8513 store_reg(s
, rd
, tmp
);
8514 } else if ((op
& 0xe) == 0xc) {
8515 /* Dual multiply accumulate long. */
8517 gen_swap_half(tmp2
);
8518 gen_smul_dual(tmp
, tmp2
);
8520 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8522 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8524 tcg_temp_free_i32(tmp2
);
8526 tmp64
= tcg_temp_new_i64();
8527 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8528 tcg_temp_free_i32(tmp
);
8529 gen_addq(s
, tmp64
, rs
, rd
);
8530 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8531 tcg_temp_free_i64(tmp64
);
8534 /* Unsigned 64-bit multiply */
8535 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8539 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8540 tcg_temp_free_i32(tmp2
);
8541 tmp64
= tcg_temp_new_i64();
8542 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8543 tcg_temp_free_i32(tmp
);
8545 /* Signed 64-bit multiply */
8546 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8551 gen_addq_lo(s
, tmp64
, rs
);
8552 gen_addq_lo(s
, tmp64
, rd
);
8553 } else if (op
& 0x40) {
8554 /* 64-bit accumulate. */
8555 gen_addq(s
, tmp64
, rs
, rd
);
8557 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8558 tcg_temp_free_i64(tmp64
);
8563 case 6: case 7: case 14: case 15:
8565 if (((insn
>> 24) & 3) == 3) {
8566 /* Translate into the equivalent ARM encoding. */
8567 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8568 if (disas_neon_data_insn(env
, s
, insn
))
8571 if (insn
& (1 << 28))
8573 if (disas_coproc_insn (env
, s
, insn
))
8577 case 8: case 9: case 10: case 11:
8578 if (insn
& (1 << 15)) {
8579 /* Branches, misc control. */
8580 if (insn
& 0x5000) {
8581 /* Unconditional branch. */
8582 /* signextend(hw1[10:0]) -> offset[:12]. */
8583 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8584 /* hw1[10:0] -> offset[11:1]. */
8585 offset
|= (insn
& 0x7ff) << 1;
8586 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8587 offset[24:22] already have the same value because of the
8588 sign extension above. */
8589 offset
^= ((~insn
) & (1 << 13)) << 10;
8590 offset
^= ((~insn
) & (1 << 11)) << 11;
8592 if (insn
& (1 << 14)) {
8593 /* Branch and link. */
8594 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8598 if (insn
& (1 << 12)) {
8603 offset
&= ~(uint32_t)2;
8604 /* thumb2 bx, no need to check */
8605 gen_bx_im(s
, offset
);
8607 } else if (((insn
>> 23) & 7) == 7) {
8609 if (insn
& (1 << 13))
8612 if (insn
& (1 << 26)) {
8613 /* Secure monitor call (v6Z) */
8614 goto illegal_op
; /* not implemented. */
8616 op
= (insn
>> 20) & 7;
8618 case 0: /* msr cpsr. */
8620 tmp
= load_reg(s
, rn
);
8621 addr
= tcg_const_i32(insn
& 0xff);
8622 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8623 tcg_temp_free_i32(addr
);
8624 tcg_temp_free_i32(tmp
);
8629 case 1: /* msr spsr. */
8632 tmp
= load_reg(s
, rn
);
8634 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8638 case 2: /* cps, nop-hint. */
8639 if (((insn
>> 8) & 7) == 0) {
8640 gen_nop_hint(s
, insn
& 0xff);
8642 /* Implemented as NOP in user mode. */
8647 if (insn
& (1 << 10)) {
8648 if (insn
& (1 << 7))
8650 if (insn
& (1 << 6))
8652 if (insn
& (1 << 5))
8654 if (insn
& (1 << 9))
8655 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8657 if (insn
& (1 << 8)) {
8659 imm
|= (insn
& 0x1f);
8662 gen_set_psr_im(s
, offset
, 0, imm
);
8665 case 3: /* Special control operations. */
8667 op
= (insn
>> 4) & 0xf;
8675 /* These execute as NOPs. */
8682 /* Trivial implementation equivalent to bx. */
8683 tmp
= load_reg(s
, rn
);
8686 case 5: /* Exception return. */
8690 if (rn
!= 14 || rd
!= 15) {
8693 tmp
= load_reg(s
, rn
);
8694 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8695 gen_exception_return(s
, tmp
);
8697 case 6: /* mrs cpsr. */
8698 tmp
= tcg_temp_new_i32();
8700 addr
= tcg_const_i32(insn
& 0xff);
8701 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8702 tcg_temp_free_i32(addr
);
8704 gen_helper_cpsr_read(tmp
);
8706 store_reg(s
, rd
, tmp
);
8708 case 7: /* mrs spsr. */
8709 /* Not accessible in user mode. */
8710 if (IS_USER(s
) || IS_M(env
))
8712 tmp
= load_cpu_field(spsr
);
8713 store_reg(s
, rd
, tmp
);
8718 /* Conditional branch. */
8719 op
= (insn
>> 22) & 0xf;
8720 /* Generate a conditional jump to next instruction. */
8721 s
->condlabel
= gen_new_label();
8722 gen_test_cc(op
^ 1, s
->condlabel
);
8725 /* offset[11:1] = insn[10:0] */
8726 offset
= (insn
& 0x7ff) << 1;
8727 /* offset[17:12] = insn[21:16]. */
8728 offset
|= (insn
& 0x003f0000) >> 4;
8729 /* offset[31:20] = insn[26]. */
8730 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8731 /* offset[18] = insn[13]. */
8732 offset
|= (insn
& (1 << 13)) << 5;
8733 /* offset[19] = insn[11]. */
8734 offset
|= (insn
& (1 << 11)) << 8;
8736 /* jump to the offset */
8737 gen_jmp(s
, s
->pc
+ offset
);
8740 /* Data processing immediate. */
8741 if (insn
& (1 << 25)) {
8742 if (insn
& (1 << 24)) {
8743 if (insn
& (1 << 20))
8745 /* Bitfield/Saturate. */
8746 op
= (insn
>> 21) & 7;
8748 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8750 tmp
= tcg_temp_new_i32();
8751 tcg_gen_movi_i32(tmp
, 0);
8753 tmp
= load_reg(s
, rn
);
8756 case 2: /* Signed bitfield extract. */
8758 if (shift
+ imm
> 32)
8761 gen_sbfx(tmp
, shift
, imm
);
8763 case 6: /* Unsigned bitfield extract. */
8765 if (shift
+ imm
> 32)
8768 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8770 case 3: /* Bitfield insert/clear. */
8773 imm
= imm
+ 1 - shift
;
8775 tmp2
= load_reg(s
, rd
);
8776 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8777 tcg_temp_free_i32(tmp2
);
8782 default: /* Saturate. */
8785 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8787 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8789 tmp2
= tcg_const_i32(imm
);
8792 if ((op
& 1) && shift
== 0)
8793 gen_helper_usat16(tmp
, tmp
, tmp2
);
8795 gen_helper_usat(tmp
, tmp
, tmp2
);
8798 if ((op
& 1) && shift
== 0)
8799 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8801 gen_helper_ssat(tmp
, tmp
, tmp2
);
8803 tcg_temp_free_i32(tmp2
);
8806 store_reg(s
, rd
, tmp
);
8808 imm
= ((insn
& 0x04000000) >> 15)
8809 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8810 if (insn
& (1 << 22)) {
8811 /* 16-bit immediate. */
8812 imm
|= (insn
>> 4) & 0xf000;
8813 if (insn
& (1 << 23)) {
8815 tmp
= load_reg(s
, rd
);
8816 tcg_gen_ext16u_i32(tmp
, tmp
);
8817 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8820 tmp
= tcg_temp_new_i32();
8821 tcg_gen_movi_i32(tmp
, imm
);
8824 /* Add/sub 12-bit immediate. */
8826 offset
= s
->pc
& ~(uint32_t)3;
8827 if (insn
& (1 << 23))
8831 tmp
= tcg_temp_new_i32();
8832 tcg_gen_movi_i32(tmp
, offset
);
8834 tmp
= load_reg(s
, rn
);
8835 if (insn
& (1 << 23))
8836 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8838 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8841 store_reg(s
, rd
, tmp
);
8844 int shifter_out
= 0;
8845 /* modified 12-bit immediate. */
8846 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8847 imm
= (insn
& 0xff);
8850 /* Nothing to do. */
8852 case 1: /* 00XY00XY */
8855 case 2: /* XY00XY00 */
8859 case 3: /* XYXYXYXY */
8863 default: /* Rotated constant. */
8864 shift
= (shift
<< 1) | (imm
>> 7);
8866 imm
= imm
<< (32 - shift
);
8870 tmp2
= tcg_temp_new_i32();
8871 tcg_gen_movi_i32(tmp2
, imm
);
8872 rn
= (insn
>> 16) & 0xf;
8874 tmp
= tcg_temp_new_i32();
8875 tcg_gen_movi_i32(tmp
, 0);
8877 tmp
= load_reg(s
, rn
);
8879 op
= (insn
>> 21) & 0xf;
8880 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8881 shifter_out
, tmp
, tmp2
))
8883 tcg_temp_free_i32(tmp2
);
8884 rd
= (insn
>> 8) & 0xf;
8886 store_reg(s
, rd
, tmp
);
8888 tcg_temp_free_i32(tmp
);
8893 case 12: /* Load/store single data item. */
8898 if ((insn
& 0x01100000) == 0x01000000) {
8899 if (disas_neon_ls_insn(env
, s
, insn
))
8903 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8905 if (!(insn
& (1 << 20))) {
8909 /* Byte or halfword load space with dest == r15 : memory hints.
8910 * Catch them early so we don't emit pointless addressing code.
8911 * This space is a mix of:
8912 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8913 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8915 * unallocated hints, which must be treated as NOPs
8916 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8917 * which is easiest for the decoding logic
8918 * Some space which must UNDEF
8920 int op1
= (insn
>> 23) & 3;
8921 int op2
= (insn
>> 6) & 0x3f;
8926 /* UNPREDICTABLE or unallocated hint */
8930 return 0; /* PLD* or unallocated hint */
8932 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8933 return 0; /* PLD* or unallocated hint */
8935 /* UNDEF space, or an UNPREDICTABLE */
8941 addr
= tcg_temp_new_i32();
8943 /* s->pc has already been incremented by 4. */
8944 imm
= s
->pc
& 0xfffffffc;
8945 if (insn
& (1 << 23))
8946 imm
+= insn
& 0xfff;
8948 imm
-= insn
& 0xfff;
8949 tcg_gen_movi_i32(addr
, imm
);
8951 addr
= load_reg(s
, rn
);
8952 if (insn
& (1 << 23)) {
8953 /* Positive offset. */
8955 tcg_gen_addi_i32(addr
, addr
, imm
);
8958 switch ((insn
>> 8) & 0xf) {
8959 case 0x0: /* Shifted Register. */
8960 shift
= (insn
>> 4) & 0xf;
8962 tcg_temp_free_i32(addr
);
8965 tmp
= load_reg(s
, rm
);
8967 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8968 tcg_gen_add_i32(addr
, addr
, tmp
);
8969 tcg_temp_free_i32(tmp
);
8971 case 0xc: /* Negative offset. */
8972 tcg_gen_addi_i32(addr
, addr
, -imm
);
8974 case 0xe: /* User privilege. */
8975 tcg_gen_addi_i32(addr
, addr
, imm
);
8978 case 0x9: /* Post-decrement. */
8981 case 0xb: /* Post-increment. */
8985 case 0xd: /* Pre-decrement. */
8988 case 0xf: /* Pre-increment. */
8989 tcg_gen_addi_i32(addr
, addr
, imm
);
8993 tcg_temp_free_i32(addr
);
8998 if (insn
& (1 << 20)) {
9001 case 0: tmp
= gen_ld8u(addr
, user
); break;
9002 case 4: tmp
= gen_ld8s(addr
, user
); break;
9003 case 1: tmp
= gen_ld16u(addr
, user
); break;
9004 case 5: tmp
= gen_ld16s(addr
, user
); break;
9005 case 2: tmp
= gen_ld32(addr
, user
); break;
9007 tcg_temp_free_i32(addr
);
9013 store_reg(s
, rs
, tmp
);
9017 tmp
= load_reg(s
, rs
);
9019 case 0: gen_st8(tmp
, addr
, user
); break;
9020 case 1: gen_st16(tmp
, addr
, user
); break;
9021 case 2: gen_st32(tmp
, addr
, user
); break;
9023 tcg_temp_free_i32(addr
);
9028 tcg_gen_addi_i32(addr
, addr
, imm
);
9030 store_reg(s
, rn
, addr
);
9032 tcg_temp_free_i32(addr
);
9044 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
9046 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9053 if (s
->condexec_mask
) {
9054 cond
= s
->condexec_cond
;
9055 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9056 s
->condlabel
= gen_new_label();
9057 gen_test_cc(cond
^ 1, s
->condlabel
);
9062 insn
= lduw_code(s
->pc
);
9065 switch (insn
>> 12) {
9069 op
= (insn
>> 11) & 3;
9072 rn
= (insn
>> 3) & 7;
9073 tmp
= load_reg(s
, rn
);
9074 if (insn
& (1 << 10)) {
9076 tmp2
= tcg_temp_new_i32();
9077 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9080 rm
= (insn
>> 6) & 7;
9081 tmp2
= load_reg(s
, rm
);
9083 if (insn
& (1 << 9)) {
9084 if (s
->condexec_mask
)
9085 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9087 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9089 if (s
->condexec_mask
)
9090 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9092 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9094 tcg_temp_free_i32(tmp2
);
9095 store_reg(s
, rd
, tmp
);
9097 /* shift immediate */
9098 rm
= (insn
>> 3) & 7;
9099 shift
= (insn
>> 6) & 0x1f;
9100 tmp
= load_reg(s
, rm
);
9101 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9102 if (!s
->condexec_mask
)
9104 store_reg(s
, rd
, tmp
);
9108 /* arithmetic large immediate */
9109 op
= (insn
>> 11) & 3;
9110 rd
= (insn
>> 8) & 0x7;
9111 if (op
== 0) { /* mov */
9112 tmp
= tcg_temp_new_i32();
9113 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9114 if (!s
->condexec_mask
)
9116 store_reg(s
, rd
, tmp
);
9118 tmp
= load_reg(s
, rd
);
9119 tmp2
= tcg_temp_new_i32();
9120 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9123 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9124 tcg_temp_free_i32(tmp
);
9125 tcg_temp_free_i32(tmp2
);
9128 if (s
->condexec_mask
)
9129 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9131 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9132 tcg_temp_free_i32(tmp2
);
9133 store_reg(s
, rd
, tmp
);
9136 if (s
->condexec_mask
)
9137 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9139 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9140 tcg_temp_free_i32(tmp2
);
9141 store_reg(s
, rd
, tmp
);
9147 if (insn
& (1 << 11)) {
9148 rd
= (insn
>> 8) & 7;
9149 /* load pc-relative. Bit 1 of PC is ignored. */
9150 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9151 val
&= ~(uint32_t)2;
9152 addr
= tcg_temp_new_i32();
9153 tcg_gen_movi_i32(addr
, val
);
9154 tmp
= gen_ld32(addr
, IS_USER(s
));
9155 tcg_temp_free_i32(addr
);
9156 store_reg(s
, rd
, tmp
);
9159 if (insn
& (1 << 10)) {
9160 /* data processing extended or blx */
9161 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9162 rm
= (insn
>> 3) & 0xf;
9163 op
= (insn
>> 8) & 3;
9166 tmp
= load_reg(s
, rd
);
9167 tmp2
= load_reg(s
, rm
);
9168 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9169 tcg_temp_free_i32(tmp2
);
9170 store_reg(s
, rd
, tmp
);
9173 tmp
= load_reg(s
, rd
);
9174 tmp2
= load_reg(s
, rm
);
9175 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9176 tcg_temp_free_i32(tmp2
);
9177 tcg_temp_free_i32(tmp
);
9179 case 2: /* mov/cpy */
9180 tmp
= load_reg(s
, rm
);
9181 store_reg(s
, rd
, tmp
);
9183 case 3:/* branch [and link] exchange thumb register */
9184 tmp
= load_reg(s
, rm
);
9185 if (insn
& (1 << 7)) {
9187 val
= (uint32_t)s
->pc
| 1;
9188 tmp2
= tcg_temp_new_i32();
9189 tcg_gen_movi_i32(tmp2
, val
);
9190 store_reg(s
, 14, tmp2
);
9192 /* already thumb, no need to check */
9199 /* data processing register */
9201 rm
= (insn
>> 3) & 7;
9202 op
= (insn
>> 6) & 0xf;
9203 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9204 /* the shift/rotate ops want the operands backwards */
9213 if (op
== 9) { /* neg */
9214 tmp
= tcg_temp_new_i32();
9215 tcg_gen_movi_i32(tmp
, 0);
9216 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9217 tmp
= load_reg(s
, rd
);
9222 tmp2
= load_reg(s
, rm
);
9225 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9226 if (!s
->condexec_mask
)
9230 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9231 if (!s
->condexec_mask
)
9235 if (s
->condexec_mask
) {
9236 gen_helper_shl(tmp2
, tmp2
, tmp
);
9238 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9243 if (s
->condexec_mask
) {
9244 gen_helper_shr(tmp2
, tmp2
, tmp
);
9246 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9251 if (s
->condexec_mask
) {
9252 gen_helper_sar(tmp2
, tmp2
, tmp
);
9254 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9259 if (s
->condexec_mask
)
9262 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9265 if (s
->condexec_mask
)
9266 gen_sub_carry(tmp
, tmp
, tmp2
);
9268 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9271 if (s
->condexec_mask
) {
9272 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9273 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9275 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9280 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9285 if (s
->condexec_mask
)
9286 tcg_gen_neg_i32(tmp
, tmp2
);
9288 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9291 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9295 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9299 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9300 if (!s
->condexec_mask
)
9304 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9305 if (!s
->condexec_mask
)
9309 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9310 if (!s
->condexec_mask
)
9314 tcg_gen_not_i32(tmp2
, tmp2
);
9315 if (!s
->condexec_mask
)
9323 store_reg(s
, rm
, tmp2
);
9325 tcg_temp_free_i32(tmp
);
9327 store_reg(s
, rd
, tmp
);
9328 tcg_temp_free_i32(tmp2
);
9331 tcg_temp_free_i32(tmp
);
9332 tcg_temp_free_i32(tmp2
);
9337 /* load/store register offset. */
9339 rn
= (insn
>> 3) & 7;
9340 rm
= (insn
>> 6) & 7;
9341 op
= (insn
>> 9) & 7;
9342 addr
= load_reg(s
, rn
);
9343 tmp
= load_reg(s
, rm
);
9344 tcg_gen_add_i32(addr
, addr
, tmp
);
9345 tcg_temp_free_i32(tmp
);
9347 if (op
< 3) /* store */
9348 tmp
= load_reg(s
, rd
);
9352 gen_st32(tmp
, addr
, IS_USER(s
));
9355 gen_st16(tmp
, addr
, IS_USER(s
));
9358 gen_st8(tmp
, addr
, IS_USER(s
));
9361 tmp
= gen_ld8s(addr
, IS_USER(s
));
9364 tmp
= gen_ld32(addr
, IS_USER(s
));
9367 tmp
= gen_ld16u(addr
, IS_USER(s
));
9370 tmp
= gen_ld8u(addr
, IS_USER(s
));
9373 tmp
= gen_ld16s(addr
, IS_USER(s
));
9376 if (op
>= 3) /* load */
9377 store_reg(s
, rd
, tmp
);
9378 tcg_temp_free_i32(addr
);
9382 /* load/store word immediate offset */
9384 rn
= (insn
>> 3) & 7;
9385 addr
= load_reg(s
, rn
);
9386 val
= (insn
>> 4) & 0x7c;
9387 tcg_gen_addi_i32(addr
, addr
, val
);
9389 if (insn
& (1 << 11)) {
9391 tmp
= gen_ld32(addr
, IS_USER(s
));
9392 store_reg(s
, rd
, tmp
);
9395 tmp
= load_reg(s
, rd
);
9396 gen_st32(tmp
, addr
, IS_USER(s
));
9398 tcg_temp_free_i32(addr
);
9402 /* load/store byte immediate offset */
9404 rn
= (insn
>> 3) & 7;
9405 addr
= load_reg(s
, rn
);
9406 val
= (insn
>> 6) & 0x1f;
9407 tcg_gen_addi_i32(addr
, addr
, val
);
9409 if (insn
& (1 << 11)) {
9411 tmp
= gen_ld8u(addr
, IS_USER(s
));
9412 store_reg(s
, rd
, tmp
);
9415 tmp
= load_reg(s
, rd
);
9416 gen_st8(tmp
, addr
, IS_USER(s
));
9418 tcg_temp_free_i32(addr
);
9422 /* load/store halfword immediate offset */
9424 rn
= (insn
>> 3) & 7;
9425 addr
= load_reg(s
, rn
);
9426 val
= (insn
>> 5) & 0x3e;
9427 tcg_gen_addi_i32(addr
, addr
, val
);
9429 if (insn
& (1 << 11)) {
9431 tmp
= gen_ld16u(addr
, IS_USER(s
));
9432 store_reg(s
, rd
, tmp
);
9435 tmp
= load_reg(s
, rd
);
9436 gen_st16(tmp
, addr
, IS_USER(s
));
9438 tcg_temp_free_i32(addr
);
9442 /* load/store from stack */
9443 rd
= (insn
>> 8) & 7;
9444 addr
= load_reg(s
, 13);
9445 val
= (insn
& 0xff) * 4;
9446 tcg_gen_addi_i32(addr
, addr
, val
);
9448 if (insn
& (1 << 11)) {
9450 tmp
= gen_ld32(addr
, IS_USER(s
));
9451 store_reg(s
, rd
, tmp
);
9454 tmp
= load_reg(s
, rd
);
9455 gen_st32(tmp
, addr
, IS_USER(s
));
9457 tcg_temp_free_i32(addr
);
9461 /* add to high reg */
9462 rd
= (insn
>> 8) & 7;
9463 if (insn
& (1 << 11)) {
9465 tmp
= load_reg(s
, 13);
9467 /* PC. bit 1 is ignored. */
9468 tmp
= tcg_temp_new_i32();
9469 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9471 val
= (insn
& 0xff) * 4;
9472 tcg_gen_addi_i32(tmp
, tmp
, val
);
9473 store_reg(s
, rd
, tmp
);
9478 op
= (insn
>> 8) & 0xf;
9481 /* adjust stack pointer */
9482 tmp
= load_reg(s
, 13);
9483 val
= (insn
& 0x7f) * 4;
9484 if (insn
& (1 << 7))
9485 val
= -(int32_t)val
;
9486 tcg_gen_addi_i32(tmp
, tmp
, val
);
9487 store_reg(s
, 13, tmp
);
9490 case 2: /* sign/zero extend. */
9493 rm
= (insn
>> 3) & 7;
9494 tmp
= load_reg(s
, rm
);
9495 switch ((insn
>> 6) & 3) {
9496 case 0: gen_sxth(tmp
); break;
9497 case 1: gen_sxtb(tmp
); break;
9498 case 2: gen_uxth(tmp
); break;
9499 case 3: gen_uxtb(tmp
); break;
9501 store_reg(s
, rd
, tmp
);
9503 case 4: case 5: case 0xc: case 0xd:
9505 addr
= load_reg(s
, 13);
9506 if (insn
& (1 << 8))
9510 for (i
= 0; i
< 8; i
++) {
9511 if (insn
& (1 << i
))
9514 if ((insn
& (1 << 11)) == 0) {
9515 tcg_gen_addi_i32(addr
, addr
, -offset
);
9517 for (i
= 0; i
< 8; i
++) {
9518 if (insn
& (1 << i
)) {
9519 if (insn
& (1 << 11)) {
9521 tmp
= gen_ld32(addr
, IS_USER(s
));
9522 store_reg(s
, i
, tmp
);
9525 tmp
= load_reg(s
, i
);
9526 gen_st32(tmp
, addr
, IS_USER(s
));
9528 /* advance to the next address. */
9529 tcg_gen_addi_i32(addr
, addr
, 4);
9533 if (insn
& (1 << 8)) {
9534 if (insn
& (1 << 11)) {
9536 tmp
= gen_ld32(addr
, IS_USER(s
));
9537 /* don't set the pc until the rest of the instruction
9541 tmp
= load_reg(s
, 14);
9542 gen_st32(tmp
, addr
, IS_USER(s
));
9544 tcg_gen_addi_i32(addr
, addr
, 4);
9546 if ((insn
& (1 << 11)) == 0) {
9547 tcg_gen_addi_i32(addr
, addr
, -offset
);
9549 /* write back the new stack pointer */
9550 store_reg(s
, 13, addr
);
9551 /* set the new PC value */
9552 if ((insn
& 0x0900) == 0x0900) {
9553 store_reg_from_load(env
, s
, 15, tmp
);
9557 case 1: case 3: case 9: case 11: /* czb */
9559 tmp
= load_reg(s
, rm
);
9560 s
->condlabel
= gen_new_label();
9562 if (insn
& (1 << 11))
9563 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9565 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9566 tcg_temp_free_i32(tmp
);
9567 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9568 val
= (uint32_t)s
->pc
+ 2;
9573 case 15: /* IT, nop-hint. */
9574 if ((insn
& 0xf) == 0) {
9575 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9579 s
->condexec_cond
= (insn
>> 4) & 0xe;
9580 s
->condexec_mask
= insn
& 0x1f;
9581 /* No actual code generated for this insn, just setup state. */
9584 case 0xe: /* bkpt */
9586 gen_exception_insn(s
, 2, EXCP_BKPT
);
9591 rn
= (insn
>> 3) & 0x7;
9593 tmp
= load_reg(s
, rn
);
9594 switch ((insn
>> 6) & 3) {
9595 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9596 case 1: gen_rev16(tmp
); break;
9597 case 3: gen_revsh(tmp
); break;
9598 default: goto illegal_op
;
9600 store_reg(s
, rd
, tmp
);
9608 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9611 addr
= tcg_const_i32(16);
9612 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9613 tcg_temp_free_i32(addr
);
9617 addr
= tcg_const_i32(17);
9618 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9619 tcg_temp_free_i32(addr
);
9621 tcg_temp_free_i32(tmp
);
9624 if (insn
& (1 << 4))
9625 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9628 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9639 /* load/store multiple */
9641 TCGV_UNUSED(loaded_var
);
9642 rn
= (insn
>> 8) & 0x7;
9643 addr
= load_reg(s
, rn
);
9644 for (i
= 0; i
< 8; i
++) {
9645 if (insn
& (1 << i
)) {
9646 if (insn
& (1 << 11)) {
9648 tmp
= gen_ld32(addr
, IS_USER(s
));
9652 store_reg(s
, i
, tmp
);
9656 tmp
= load_reg(s
, i
);
9657 gen_st32(tmp
, addr
, IS_USER(s
));
9659 /* advance to the next address */
9660 tcg_gen_addi_i32(addr
, addr
, 4);
9663 if ((insn
& (1 << rn
)) == 0) {
9664 /* base reg not in list: base register writeback */
9665 store_reg(s
, rn
, addr
);
9667 /* base reg in list: if load, complete it now */
9668 if (insn
& (1 << 11)) {
9669 store_reg(s
, rn
, loaded_var
);
9671 tcg_temp_free_i32(addr
);
9676 /* conditional branch or swi */
9677 cond
= (insn
>> 8) & 0xf;
9683 gen_set_pc_im(s
->pc
);
9684 s
->is_jmp
= DISAS_SWI
;
9687 /* generate a conditional jump to next instruction */
9688 s
->condlabel
= gen_new_label();
9689 gen_test_cc(cond
^ 1, s
->condlabel
);
9692 /* jump to the offset */
9693 val
= (uint32_t)s
->pc
+ 2;
9694 offset
= ((int32_t)insn
<< 24) >> 24;
9700 if (insn
& (1 << 11)) {
9701 if (disas_thumb2_insn(env
, s
, insn
))
9705 /* unconditional branch */
9706 val
= (uint32_t)s
->pc
;
9707 offset
= ((int32_t)insn
<< 21) >> 21;
9708 val
+= (offset
<< 1) + 2;
9713 if (disas_thumb2_insn(env
, s
, insn
))
9719 gen_exception_insn(s
, 4, EXCP_UDEF
);
9723 gen_exception_insn(s
, 2, EXCP_UDEF
);
9726 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9727 basic block 'tb'. If search_pc is TRUE, also generate PC
9728 information for each intermediate instruction. */
9729 static inline void gen_intermediate_code_internal(CPUState
*env
,
9730 TranslationBlock
*tb
,
9733 DisasContext dc1
, *dc
= &dc1
;
9735 uint16_t *gen_opc_end
;
9737 target_ulong pc_start
;
9738 uint32_t next_page_start
;
9742 /* generate intermediate code */
9747 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9749 dc
->is_jmp
= DISAS_NEXT
;
9751 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9753 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9754 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9755 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9756 #if !defined(CONFIG_USER_ONLY)
9757 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9759 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9760 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9761 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9762 cpu_F0s
= tcg_temp_new_i32();
9763 cpu_F1s
= tcg_temp_new_i32();
9764 cpu_F0d
= tcg_temp_new_i64();
9765 cpu_F1d
= tcg_temp_new_i64();
9768 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9769 cpu_M0
= tcg_temp_new_i64();
9770 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9773 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9775 max_insns
= CF_COUNT_MASK
;
9779 tcg_clear_temp_count();
9781 /* A note on handling of the condexec (IT) bits:
9783 * We want to avoid the overhead of having to write the updated condexec
9784 * bits back to the CPUState for every instruction in an IT block. So:
9785 * (1) if the condexec bits are not already zero then we write
9786 * zero back into the CPUState now. This avoids complications trying
9787 * to do it at the end of the block. (For example if we don't do this
9788 * it's hard to identify whether we can safely skip writing condexec
9789 * at the end of the TB, which we definitely want to do for the case
9790 * where a TB doesn't do anything with the IT state at all.)
9791 * (2) if we are going to leave the TB then we call gen_set_condexec()
9792 * which will write the correct value into CPUState if zero is wrong.
9793 * This is done both for leaving the TB at the end, and for leaving
9794 * it because of an exception we know will happen, which is done in
9795 * gen_exception_insn(). The latter is necessary because we need to
9796 * leave the TB with the PC/IT state just prior to execution of the
9797 * instruction which caused the exception.
9798 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9799 * then the CPUState will be wrong and we need to reset it.
9800 * This is handled in the same way as restoration of the
9801 * PC in these situations: we will be called again with search_pc=1
9802 * and generate a mapping of the condexec bits for each PC in
9803 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9804 * this to restore the condexec bits.
9806 * Note that there are no instructions which can read the condexec
9807 * bits, and none which can write non-static values to them, so
9808 * we don't need to care about whether CPUState is correct in the
9812 /* Reset the conditional execution bits immediately. This avoids
9813 complications trying to do it at the end of the block. */
9814 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9816 TCGv tmp
= tcg_temp_new_i32();
9817 tcg_gen_movi_i32(tmp
, 0);
9818 store_cpu_field(tmp
, condexec_bits
);
9821 #ifdef CONFIG_USER_ONLY
9822 /* Intercept jump to the magic kernel page. */
9823 if (dc
->pc
>= 0xffff0000) {
9824 /* We always get here via a jump, so know we are not in a
9825 conditional execution block. */
9826 gen_exception(EXCP_KERNEL_TRAP
);
9827 dc
->is_jmp
= DISAS_UPDATE
;
9831 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9832 /* We always get here via a jump, so know we are not in a
9833 conditional execution block. */
9834 gen_exception(EXCP_EXCEPTION_EXIT
);
9835 dc
->is_jmp
= DISAS_UPDATE
;
9840 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9841 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9842 if (bp
->pc
== dc
->pc
) {
9843 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9844 /* Advance PC so that clearing the breakpoint will
9845 invalidate this TB. */
9847 goto done_generating
;
9853 j
= gen_opc_ptr
- gen_opc_buf
;
9857 gen_opc_instr_start
[lj
++] = 0;
9859 gen_opc_pc
[lj
] = dc
->pc
;
9860 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9861 gen_opc_instr_start
[lj
] = 1;
9862 gen_opc_icount
[lj
] = num_insns
;
9865 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9868 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9869 tcg_gen_debug_insn_start(dc
->pc
);
9873 disas_thumb_insn(env
, dc
);
9874 if (dc
->condexec_mask
) {
9875 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9876 | ((dc
->condexec_mask
>> 4) & 1);
9877 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9878 if (dc
->condexec_mask
== 0) {
9879 dc
->condexec_cond
= 0;
9883 disas_arm_insn(env
, dc
);
9886 if (dc
->condjmp
&& !dc
->is_jmp
) {
9887 gen_set_label(dc
->condlabel
);
9891 if (tcg_check_temp_count()) {
9892 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9895 /* Translation stops when a conditional branch is encountered.
9896 * Otherwise the subsequent code could get translated several times.
9897 * Also stop translation when a page boundary is reached. This
9898 * ensures prefetch aborts occur at the right place. */
9900 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9901 !env
->singlestep_enabled
&&
9903 dc
->pc
< next_page_start
&&
9904 num_insns
< max_insns
);
9906 if (tb
->cflags
& CF_LAST_IO
) {
9908 /* FIXME: This can theoretically happen with self-modifying
9910 cpu_abort(env
, "IO on conditional branch instruction");
9915 /* At this stage dc->condjmp will only be set when the skipped
9916 instruction was a conditional branch or trap, and the PC has
9917 already been written. */
9918 if (unlikely(env
->singlestep_enabled
)) {
9919 /* Make sure the pc is updated, and raise a debug exception. */
9921 gen_set_condexec(dc
);
9922 if (dc
->is_jmp
== DISAS_SWI
) {
9923 gen_exception(EXCP_SWI
);
9925 gen_exception(EXCP_DEBUG
);
9927 gen_set_label(dc
->condlabel
);
9929 if (dc
->condjmp
|| !dc
->is_jmp
) {
9930 gen_set_pc_im(dc
->pc
);
9933 gen_set_condexec(dc
);
9934 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9935 gen_exception(EXCP_SWI
);
9937 /* FIXME: Single stepping a WFI insn will not halt
9939 gen_exception(EXCP_DEBUG
);
9942 /* While branches must always occur at the end of an IT block,
9943 there are a few other things that can cause us to terminate
9944 the TB in the middel of an IT block:
9945 - Exception generating instructions (bkpt, swi, undefined).
9947 - Hardware watchpoints.
9948 Hardware breakpoints have already been handled and skip this code.
9950 gen_set_condexec(dc
);
9951 switch(dc
->is_jmp
) {
9953 gen_goto_tb(dc
, 1, dc
->pc
);
9958 /* indicate that the hash table must be used to find the next TB */
9962 /* nothing more to generate */
9968 gen_exception(EXCP_SWI
);
9972 gen_set_label(dc
->condlabel
);
9973 gen_set_condexec(dc
);
9974 gen_goto_tb(dc
, 1, dc
->pc
);
9980 gen_icount_end(tb
, num_insns
);
9981 *gen_opc_ptr
= INDEX_op_end
;
9984 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9985 qemu_log("----------------\n");
9986 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9987 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
9992 j
= gen_opc_ptr
- gen_opc_buf
;
9995 gen_opc_instr_start
[lj
++] = 0;
9997 tb
->size
= dc
->pc
- pc_start
;
9998 tb
->icount
= num_insns
;
10002 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
10004 gen_intermediate_code_internal(env
, tb
, 0);
10007 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
10009 gen_intermediate_code_internal(env
, tb
, 1);
10012 static const char *cpu_mode_names
[16] = {
10013 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10014 "???", "???", "???", "und", "???", "???", "???", "sys"
10017 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
10027 /* ??? This assumes float64 and double have the same layout.
10028 Oh well, it's only debug dumps. */
10036 for(i
=0;i
<16;i
++) {
10037 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10039 cpu_fprintf(f
, "\n");
10041 cpu_fprintf(f
, " ");
10043 psr
= cpsr_read(env
);
10044 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10046 psr
& (1 << 31) ? 'N' : '-',
10047 psr
& (1 << 30) ? 'Z' : '-',
10048 psr
& (1 << 29) ? 'C' : '-',
10049 psr
& (1 << 28) ? 'V' : '-',
10050 psr
& CPSR_T
? 'T' : 'A',
10051 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10054 for (i
= 0; i
< 16; i
++) {
10055 d
.d
= env
->vfp
.regs
[i
];
10059 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10060 i
* 2, (int)s0
.i
, s0
.s
,
10061 i
* 2 + 1, (int)s1
.i
, s1
.s
,
10062 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
10065 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10069 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
10071 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10072 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];