]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Don't decode RFE or SRS on M profile cores
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "qemu/log.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
47
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 conditional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 int bswap_code;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
70
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
78
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
83
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
89 static TCGv_i32 cpu_exclusive_addr;
90 static TCGv_i32 cpu_exclusive_val;
91 static TCGv_i32 cpu_exclusive_high;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test;
94 static TCGv_i32 cpu_exclusive_info;
95 #endif
96
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s, cpu_F1s;
99 static TCGv_i64 cpu_F0d, cpu_F1d;
100
101 #include "exec/gen-icount.h"
102
103 static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
109 {
110 int i;
111
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUARMState, regs[i]),
117 regnames[i]);
118 }
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
135 #endif
136
137 #define GEN_HELPER 2
138 #include "helper.h"
139 }
140
141 static inline TCGv load_cpu_offset(int offset)
142 {
143 TCGv tmp = tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146 }
147
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
149
150 static inline void store_cpu_offset(TCGv var, int offset)
151 {
152 tcg_gen_st_i32(var, cpu_env, offset);
153 tcg_temp_free_i32(var);
154 }
155
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
158
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext *s, TCGv var, int reg)
161 {
162 if (reg == 15) {
163 uint32_t addr;
164 /* normally, since we updated PC, we need only to add one insn */
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
171 tcg_gen_mov_i32(var, cpu_R[reg]);
172 }
173 }
174
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv load_reg(DisasContext *s, int reg)
177 {
178 TCGv tmp = tcg_temp_new_i32();
179 load_reg_var(s, tmp, reg);
180 return tmp;
181 }
182
183 /* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185 static void store_reg(DisasContext *s, int reg, TCGv var)
186 {
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
191 tcg_gen_mov_i32(cpu_R[reg], var);
192 tcg_temp_free_i32(var);
193 }
194
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
203
204
205 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206 {
207 TCGv tmp_mask = tcg_const_i32(mask);
208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
209 tcg_temp_free_i32(tmp_mask);
210 }
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214 static void gen_exception(int excp)
215 {
216 TCGv tmp = tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp, excp);
218 gen_helper_exception(cpu_env, tmp);
219 tcg_temp_free_i32(tmp);
220 }
221
222 static void gen_smul_dual(TCGv a, TCGv b)
223 {
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
229 tcg_temp_free_i32(tmp2);
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
234 tcg_temp_free_i32(tmp1);
235 }
236
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var)
239 {
240 TCGv tmp = tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
246 tcg_temp_free_i32(tmp);
247 }
248
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var)
251 {
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
255 }
256
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259 {
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263 }
264
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var, int shift, int width)
267 {
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278 }
279
280 /* Return (b << 32) + a. Mark inputs as dead */
281 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
282 {
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
286 tcg_temp_free_i32(b);
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292 }
293
294 /* Return (b << 32) - a. Mark inputs as dead. */
295 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296 {
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
300 tcg_temp_free_i32(b);
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
306 }
307
308 /* 32x32->64 multiply. Marks inputs as dead. */
309 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
310 {
311 TCGv lo = tcg_temp_new_i32();
312 TCGv hi = tcg_temp_new_i32();
313 TCGv_i64 ret;
314
315 tcg_gen_mulu2_i32(lo, hi, a, b);
316 tcg_temp_free_i32(a);
317 tcg_temp_free_i32(b);
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
321 tcg_temp_free(lo);
322 tcg_temp_free(hi);
323
324 return ret;
325 }
326
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
328 {
329 TCGv lo = tcg_temp_new_i32();
330 TCGv hi = tcg_temp_new_i32();
331 TCGv_i64 ret;
332
333 tcg_gen_muls2_i32(lo, hi, a, b);
334 tcg_temp_free_i32(a);
335 tcg_temp_free_i32(b);
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
339 tcg_temp_free(lo);
340 tcg_temp_free(hi);
341
342 return ret;
343 }
344
345 /* Swap low and high halfwords. */
346 static void gen_swap_half(TCGv var)
347 {
348 TCGv tmp = tcg_temp_new_i32();
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
352 tcg_temp_free_i32(tmp);
353 }
354
355 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
362 static void gen_add16(TCGv t0, TCGv t1)
363 {
364 TCGv tmp = tcg_temp_new_i32();
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
373 }
374
375 /* Set CF to the top bit of var. */
376 static void gen_set_CF_bit31(TCGv var)
377 {
378 tcg_gen_shri_i32(cpu_CF, var, 31);
379 }
380
381 /* Set N and Z flags from var. */
382 static inline void gen_logic_CC(TCGv var)
383 {
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
386 }
387
388 /* T0 += T1 + CF. */
389 static void gen_adc(TCGv t0, TCGv t1)
390 {
391 tcg_gen_add_i32(t0, t0, t1);
392 tcg_gen_add_i32(t0, t0, cpu_CF);
393 }
394
395 /* dest = T0 + T1 + CF. */
396 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
397 {
398 tcg_gen_add_i32(dest, t0, t1);
399 tcg_gen_add_i32(dest, dest, cpu_CF);
400 }
401
402 /* dest = T0 - T1 + CF - 1. */
403 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404 {
405 tcg_gen_sub_i32(dest, t0, t1);
406 tcg_gen_add_i32(dest, dest, cpu_CF);
407 tcg_gen_subi_i32(dest, dest, 1);
408 }
409
410 /* dest = T0 + T1. Compute C, N, V and Z flags */
411 static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
412 {
413 TCGv tmp = tcg_temp_new_i32();
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422 }
423
424 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
425 static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1)
426 {
427 TCGv tmp = tcg_temp_new_i32();
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
443 }
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
450 }
451
452 /* dest = T0 - T1. Compute C, N, V and Z flags */
453 static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
454 {
455 TCGv tmp;
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465 }
466
467 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
468 static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
469 {
470 TCGv tmp = tcg_temp_new_i32();
471 tcg_gen_not_i32(tmp, t1);
472 gen_adc_CC(dest, t0, tmp);
473 tcg_temp_free(tmp);
474 }
475
476 #define GEN_SHIFT(name) \
477 static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
478 { \
479 TCGv tmp1, tmp2, tmp3; \
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
490 }
491 GEN_SHIFT(shl)
492 GEN_SHIFT(shr)
493 #undef GEN_SHIFT
494
495 static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
496 {
497 TCGv tmp1, tmp2;
498 tmp1 = tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1, t1, 0xff);
500 tmp2 = tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
502 tcg_temp_free_i32(tmp2);
503 tcg_gen_sar_i32(dest, t0, tmp1);
504 tcg_temp_free_i32(tmp1);
505 }
506
507 static void tcg_gen_abs_i32(TCGv dest, TCGv src)
508 {
509 TCGv c0 = tcg_const_i32(0);
510 TCGv tmp = tcg_temp_new_i32();
511 tcg_gen_neg_i32(tmp, src);
512 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
513 tcg_temp_free_i32(c0);
514 tcg_temp_free_i32(tmp);
515 }
516
517 static void shifter_out_im(TCGv var, int shift)
518 {
519 if (shift == 0) {
520 tcg_gen_andi_i32(cpu_CF, var, 1);
521 } else {
522 tcg_gen_shri_i32(cpu_CF, var, shift);
523 if (shift != 31) {
524 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
525 }
526 }
527 }
528
529 /* Shift by immediate. Includes special handling for shift == 0. */
530 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
531 {
532 switch (shiftop) {
533 case 0: /* LSL */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, 32 - shift);
537 tcg_gen_shli_i32(var, var, shift);
538 }
539 break;
540 case 1: /* LSR */
541 if (shift == 0) {
542 if (flags) {
543 tcg_gen_shri_i32(cpu_CF, var, 31);
544 }
545 tcg_gen_movi_i32(var, 0);
546 } else {
547 if (flags)
548 shifter_out_im(var, shift - 1);
549 tcg_gen_shri_i32(var, var, shift);
550 }
551 break;
552 case 2: /* ASR */
553 if (shift == 0)
554 shift = 32;
555 if (flags)
556 shifter_out_im(var, shift - 1);
557 if (shift == 32)
558 shift = 31;
559 tcg_gen_sari_i32(var, var, shift);
560 break;
561 case 3: /* ROR/RRX */
562 if (shift != 0) {
563 if (flags)
564 shifter_out_im(var, shift - 1);
565 tcg_gen_rotri_i32(var, var, shift); break;
566 } else {
567 TCGv tmp = tcg_temp_new_i32();
568 tcg_gen_shli_i32(tmp, cpu_CF, 31);
569 if (flags)
570 shifter_out_im(var, 0);
571 tcg_gen_shri_i32(var, var, 1);
572 tcg_gen_or_i32(var, var, tmp);
573 tcg_temp_free_i32(tmp);
574 }
575 }
576 };
577
578 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
579 TCGv shift, int flags)
580 {
581 if (flags) {
582 switch (shiftop) {
583 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
584 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
585 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
586 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
587 }
588 } else {
589 switch (shiftop) {
590 case 0:
591 gen_shl(var, var, shift);
592 break;
593 case 1:
594 gen_shr(var, var, shift);
595 break;
596 case 2:
597 gen_sar(var, var, shift);
598 break;
599 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
600 tcg_gen_rotr_i32(var, var, shift); break;
601 }
602 }
603 tcg_temp_free_i32(shift);
604 }
605
606 #define PAS_OP(pfx) \
607 switch (op2) { \
608 case 0: gen_pas_helper(glue(pfx,add16)); break; \
609 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
610 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
611 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
612 case 4: gen_pas_helper(glue(pfx,add8)); break; \
613 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
614 }
615 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
616 {
617 TCGv_ptr tmp;
618
619 switch (op1) {
620 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
621 case 1:
622 tmp = tcg_temp_new_ptr();
623 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
624 PAS_OP(s)
625 tcg_temp_free_ptr(tmp);
626 break;
627 case 5:
628 tmp = tcg_temp_new_ptr();
629 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
630 PAS_OP(u)
631 tcg_temp_free_ptr(tmp);
632 break;
633 #undef gen_pas_helper
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
635 case 2:
636 PAS_OP(q);
637 break;
638 case 3:
639 PAS_OP(sh);
640 break;
641 case 6:
642 PAS_OP(uq);
643 break;
644 case 7:
645 PAS_OP(uh);
646 break;
647 #undef gen_pas_helper
648 }
649 }
650 #undef PAS_OP
651
652 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
653 #define PAS_OP(pfx) \
654 switch (op1) { \
655 case 0: gen_pas_helper(glue(pfx,add8)); break; \
656 case 1: gen_pas_helper(glue(pfx,add16)); break; \
657 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
658 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
659 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
660 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
661 }
662 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
663 {
664 TCGv_ptr tmp;
665
666 switch (op2) {
667 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
668 case 0:
669 tmp = tcg_temp_new_ptr();
670 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
671 PAS_OP(s)
672 tcg_temp_free_ptr(tmp);
673 break;
674 case 4:
675 tmp = tcg_temp_new_ptr();
676 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
677 PAS_OP(u)
678 tcg_temp_free_ptr(tmp);
679 break;
680 #undef gen_pas_helper
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
682 case 1:
683 PAS_OP(q);
684 break;
685 case 2:
686 PAS_OP(sh);
687 break;
688 case 5:
689 PAS_OP(uq);
690 break;
691 case 6:
692 PAS_OP(uh);
693 break;
694 #undef gen_pas_helper
695 }
696 }
697 #undef PAS_OP
698
699 static void gen_test_cc(int cc, int label)
700 {
701 TCGv tmp;
702 int inv;
703
704 switch (cc) {
705 case 0: /* eq: Z */
706 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
707 break;
708 case 1: /* ne: !Z */
709 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
710 break;
711 case 2: /* cs: C */
712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
713 break;
714 case 3: /* cc: !C */
715 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
716 break;
717 case 4: /* mi: N */
718 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
719 break;
720 case 5: /* pl: !N */
721 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
722 break;
723 case 6: /* vs: V */
724 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
725 break;
726 case 7: /* vc: !V */
727 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
728 break;
729 case 8: /* hi: C && !Z */
730 inv = gen_new_label();
731 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
732 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
733 gen_set_label(inv);
734 break;
735 case 9: /* ls: !C || Z */
736 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
738 break;
739 case 10: /* ge: N == V -> N ^ V == 0 */
740 tmp = tcg_temp_new_i32();
741 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
742 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
743 tcg_temp_free_i32(tmp);
744 break;
745 case 11: /* lt: N != V -> N ^ V != 0 */
746 tmp = tcg_temp_new_i32();
747 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
748 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
749 tcg_temp_free_i32(tmp);
750 break;
751 case 12: /* gt: !Z && N == V */
752 inv = gen_new_label();
753 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
754 tmp = tcg_temp_new_i32();
755 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
756 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
757 tcg_temp_free_i32(tmp);
758 gen_set_label(inv);
759 break;
760 case 13: /* le: Z || N != V */
761 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
762 tmp = tcg_temp_new_i32();
763 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
764 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
765 tcg_temp_free_i32(tmp);
766 break;
767 default:
768 fprintf(stderr, "Bad condition code 0x%x\n", cc);
769 abort();
770 }
771 }
772
773 static const uint8_t table_logic_cc[16] = {
774 1, /* and */
775 1, /* xor */
776 0, /* sub */
777 0, /* rsb */
778 0, /* add */
779 0, /* adc */
780 0, /* sbc */
781 0, /* rsc */
782 1, /* andl */
783 1, /* xorl */
784 0, /* cmp */
785 0, /* cmn */
786 1, /* orr */
787 1, /* mov */
788 1, /* bic */
789 1, /* mvn */
790 };
791
792 /* Set PC and Thumb state from an immediate address. */
793 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
794 {
795 TCGv tmp;
796
797 s->is_jmp = DISAS_UPDATE;
798 if (s->thumb != (addr & 1)) {
799 tmp = tcg_temp_new_i32();
800 tcg_gen_movi_i32(tmp, addr & 1);
801 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
802 tcg_temp_free_i32(tmp);
803 }
804 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
805 }
806
807 /* Set PC and Thumb state from var. var is marked as dead. */
808 static inline void gen_bx(DisasContext *s, TCGv var)
809 {
810 s->is_jmp = DISAS_UPDATE;
811 tcg_gen_andi_i32(cpu_R[15], var, ~1);
812 tcg_gen_andi_i32(var, var, 1);
813 store_cpu_field(var, thumb);
814 }
815
816 /* Variant of store_reg which uses branch&exchange logic when storing
817 to r15 in ARM architecture v7 and above. The source must be a temporary
818 and will be marked as dead. */
819 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
820 int reg, TCGv var)
821 {
822 if (reg == 15 && ENABLE_ARCH_7) {
823 gen_bx(s, var);
824 } else {
825 store_reg(s, reg, var);
826 }
827 }
828
829 /* Variant of store_reg which uses branch&exchange logic when storing
830 * to r15 in ARM architecture v5T and above. This is used for storing
831 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
832 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
833 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
834 int reg, TCGv var)
835 {
836 if (reg == 15 && ENABLE_ARCH_5) {
837 gen_bx(s, var);
838 } else {
839 store_reg(s, reg, var);
840 }
841 }
842
843 static inline TCGv gen_ld8s(TCGv addr, int index)
844 {
845 TCGv tmp = tcg_temp_new_i32();
846 tcg_gen_qemu_ld8s(tmp, addr, index);
847 return tmp;
848 }
849 static inline TCGv gen_ld8u(TCGv addr, int index)
850 {
851 TCGv tmp = tcg_temp_new_i32();
852 tcg_gen_qemu_ld8u(tmp, addr, index);
853 return tmp;
854 }
855 static inline TCGv gen_ld16s(TCGv addr, int index)
856 {
857 TCGv tmp = tcg_temp_new_i32();
858 tcg_gen_qemu_ld16s(tmp, addr, index);
859 return tmp;
860 }
861 static inline TCGv gen_ld16u(TCGv addr, int index)
862 {
863 TCGv tmp = tcg_temp_new_i32();
864 tcg_gen_qemu_ld16u(tmp, addr, index);
865 return tmp;
866 }
867 static inline TCGv gen_ld32(TCGv addr, int index)
868 {
869 TCGv tmp = tcg_temp_new_i32();
870 tcg_gen_qemu_ld32u(tmp, addr, index);
871 return tmp;
872 }
873 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
874 {
875 TCGv_i64 tmp = tcg_temp_new_i64();
876 tcg_gen_qemu_ld64(tmp, addr, index);
877 return tmp;
878 }
879 static inline void gen_st8(TCGv val, TCGv addr, int index)
880 {
881 tcg_gen_qemu_st8(val, addr, index);
882 tcg_temp_free_i32(val);
883 }
884 static inline void gen_st16(TCGv val, TCGv addr, int index)
885 {
886 tcg_gen_qemu_st16(val, addr, index);
887 tcg_temp_free_i32(val);
888 }
889 static inline void gen_st32(TCGv val, TCGv addr, int index)
890 {
891 tcg_gen_qemu_st32(val, addr, index);
892 tcg_temp_free_i32(val);
893 }
894 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
895 {
896 tcg_gen_qemu_st64(val, addr, index);
897 tcg_temp_free_i64(val);
898 }
899
900 static inline void gen_set_pc_im(uint32_t val)
901 {
902 tcg_gen_movi_i32(cpu_R[15], val);
903 }
904
905 /* Force a TB lookup after an instruction that changes the CPU state. */
906 static inline void gen_lookup_tb(DisasContext *s)
907 {
908 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
909 s->is_jmp = DISAS_UPDATE;
910 }
911
912 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
913 TCGv var)
914 {
915 int val, rm, shift, shiftop;
916 TCGv offset;
917
918 if (!(insn & (1 << 25))) {
919 /* immediate */
920 val = insn & 0xfff;
921 if (!(insn & (1 << 23)))
922 val = -val;
923 if (val != 0)
924 tcg_gen_addi_i32(var, var, val);
925 } else {
926 /* shift/register */
927 rm = (insn) & 0xf;
928 shift = (insn >> 7) & 0x1f;
929 shiftop = (insn >> 5) & 3;
930 offset = load_reg(s, rm);
931 gen_arm_shift_im(offset, shiftop, shift, 0);
932 if (!(insn & (1 << 23)))
933 tcg_gen_sub_i32(var, var, offset);
934 else
935 tcg_gen_add_i32(var, var, offset);
936 tcg_temp_free_i32(offset);
937 }
938 }
939
940 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
941 int extra, TCGv var)
942 {
943 int val, rm;
944 TCGv offset;
945
946 if (insn & (1 << 22)) {
947 /* immediate */
948 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
949 if (!(insn & (1 << 23)))
950 val = -val;
951 val += extra;
952 if (val != 0)
953 tcg_gen_addi_i32(var, var, val);
954 } else {
955 /* register */
956 if (extra)
957 tcg_gen_addi_i32(var, var, extra);
958 rm = (insn) & 0xf;
959 offset = load_reg(s, rm);
960 if (!(insn & (1 << 23)))
961 tcg_gen_sub_i32(var, var, offset);
962 else
963 tcg_gen_add_i32(var, var, offset);
964 tcg_temp_free_i32(offset);
965 }
966 }
967
968 static TCGv_ptr get_fpstatus_ptr(int neon)
969 {
970 TCGv_ptr statusptr = tcg_temp_new_ptr();
971 int offset;
972 if (neon) {
973 offset = offsetof(CPUARMState, vfp.standard_fp_status);
974 } else {
975 offset = offsetof(CPUARMState, vfp.fp_status);
976 }
977 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
978 return statusptr;
979 }
980
981 #define VFP_OP2(name) \
982 static inline void gen_vfp_##name(int dp) \
983 { \
984 TCGv_ptr fpst = get_fpstatus_ptr(0); \
985 if (dp) { \
986 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
987 } else { \
988 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
989 } \
990 tcg_temp_free_ptr(fpst); \
991 }
992
993 VFP_OP2(add)
994 VFP_OP2(sub)
995 VFP_OP2(mul)
996 VFP_OP2(div)
997
998 #undef VFP_OP2
999
1000 static inline void gen_vfp_F1_mul(int dp)
1001 {
1002 /* Like gen_vfp_mul() but put result in F1 */
1003 TCGv_ptr fpst = get_fpstatus_ptr(0);
1004 if (dp) {
1005 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1006 } else {
1007 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1008 }
1009 tcg_temp_free_ptr(fpst);
1010 }
1011
1012 static inline void gen_vfp_F1_neg(int dp)
1013 {
1014 /* Like gen_vfp_neg() but put result in F1 */
1015 if (dp) {
1016 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1017 } else {
1018 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1019 }
1020 }
1021
1022 static inline void gen_vfp_abs(int dp)
1023 {
1024 if (dp)
1025 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1026 else
1027 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1028 }
1029
1030 static inline void gen_vfp_neg(int dp)
1031 {
1032 if (dp)
1033 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1034 else
1035 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1036 }
1037
1038 static inline void gen_vfp_sqrt(int dp)
1039 {
1040 if (dp)
1041 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1042 else
1043 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1044 }
1045
1046 static inline void gen_vfp_cmp(int dp)
1047 {
1048 if (dp)
1049 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1050 else
1051 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1052 }
1053
1054 static inline void gen_vfp_cmpe(int dp)
1055 {
1056 if (dp)
1057 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1058 else
1059 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1060 }
1061
1062 static inline void gen_vfp_F1_ld0(int dp)
1063 {
1064 if (dp)
1065 tcg_gen_movi_i64(cpu_F1d, 0);
1066 else
1067 tcg_gen_movi_i32(cpu_F1s, 0);
1068 }
1069
1070 #define VFP_GEN_ITOF(name) \
1071 static inline void gen_vfp_##name(int dp, int neon) \
1072 { \
1073 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1074 if (dp) { \
1075 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1076 } else { \
1077 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1078 } \
1079 tcg_temp_free_ptr(statusptr); \
1080 }
1081
1082 VFP_GEN_ITOF(uito)
1083 VFP_GEN_ITOF(sito)
1084 #undef VFP_GEN_ITOF
1085
1086 #define VFP_GEN_FTOI(name) \
1087 static inline void gen_vfp_##name(int dp, int neon) \
1088 { \
1089 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1090 if (dp) { \
1091 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1092 } else { \
1093 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1094 } \
1095 tcg_temp_free_ptr(statusptr); \
1096 }
1097
1098 VFP_GEN_FTOI(toui)
1099 VFP_GEN_FTOI(touiz)
1100 VFP_GEN_FTOI(tosi)
1101 VFP_GEN_FTOI(tosiz)
1102 #undef VFP_GEN_FTOI
1103
1104 #define VFP_GEN_FIX(name) \
1105 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1106 { \
1107 TCGv tmp_shift = tcg_const_i32(shift); \
1108 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1109 if (dp) { \
1110 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1111 } else { \
1112 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1113 } \
1114 tcg_temp_free_i32(tmp_shift); \
1115 tcg_temp_free_ptr(statusptr); \
1116 }
1117 VFP_GEN_FIX(tosh)
1118 VFP_GEN_FIX(tosl)
1119 VFP_GEN_FIX(touh)
1120 VFP_GEN_FIX(toul)
1121 VFP_GEN_FIX(shto)
1122 VFP_GEN_FIX(slto)
1123 VFP_GEN_FIX(uhto)
1124 VFP_GEN_FIX(ulto)
1125 #undef VFP_GEN_FIX
1126
1127 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1128 {
1129 if (dp)
1130 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1131 else
1132 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1133 }
1134
1135 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1136 {
1137 if (dp)
1138 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1139 else
1140 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1141 }
1142
1143 static inline long
1144 vfp_reg_offset (int dp, int reg)
1145 {
1146 if (dp)
1147 return offsetof(CPUARMState, vfp.regs[reg]);
1148 else if (reg & 1) {
1149 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1150 + offsetof(CPU_DoubleU, l.upper);
1151 } else {
1152 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1153 + offsetof(CPU_DoubleU, l.lower);
1154 }
1155 }
1156
1157 /* Return the offset of a 32-bit piece of a NEON register.
1158 zero is the least significant end of the register. */
1159 static inline long
1160 neon_reg_offset (int reg, int n)
1161 {
1162 int sreg;
1163 sreg = reg * 2 + n;
1164 return vfp_reg_offset(0, sreg);
1165 }
1166
1167 static TCGv neon_load_reg(int reg, int pass)
1168 {
1169 TCGv tmp = tcg_temp_new_i32();
1170 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1171 return tmp;
1172 }
1173
1174 static void neon_store_reg(int reg, int pass, TCGv var)
1175 {
1176 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1177 tcg_temp_free_i32(var);
1178 }
1179
1180 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1181 {
1182 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1183 }
1184
1185 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1186 {
1187 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1188 }
1189
1190 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1191 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1192 #define tcg_gen_st_f32 tcg_gen_st_i32
1193 #define tcg_gen_st_f64 tcg_gen_st_i64
1194
1195 static inline void gen_mov_F0_vreg(int dp, int reg)
1196 {
1197 if (dp)
1198 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1199 else
1200 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1201 }
1202
1203 static inline void gen_mov_F1_vreg(int dp, int reg)
1204 {
1205 if (dp)
1206 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1207 else
1208 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1209 }
1210
1211 static inline void gen_mov_vreg_F0(int dp, int reg)
1212 {
1213 if (dp)
1214 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1215 else
1216 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1217 }
1218
1219 #define ARM_CP_RW_BIT (1 << 20)
1220
1221 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1222 {
1223 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1224 }
1225
1226 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1227 {
1228 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1229 }
1230
1231 static inline TCGv iwmmxt_load_creg(int reg)
1232 {
1233 TCGv var = tcg_temp_new_i32();
1234 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1235 return var;
1236 }
1237
1238 static inline void iwmmxt_store_creg(int reg, TCGv var)
1239 {
1240 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1241 tcg_temp_free_i32(var);
1242 }
1243
1244 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1245 {
1246 iwmmxt_store_reg(cpu_M0, rn);
1247 }
1248
1249 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1250 {
1251 iwmmxt_load_reg(cpu_M0, rn);
1252 }
1253
1254 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1255 {
1256 iwmmxt_load_reg(cpu_V1, rn);
1257 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1258 }
1259
1260 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1261 {
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1264 }
1265
1266 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1267 {
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1270 }
1271
1272 #define IWMMXT_OP(name) \
1273 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1274 { \
1275 iwmmxt_load_reg(cpu_V1, rn); \
1276 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1277 }
1278
1279 #define IWMMXT_OP_ENV(name) \
1280 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1281 { \
1282 iwmmxt_load_reg(cpu_V1, rn); \
1283 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1284 }
1285
1286 #define IWMMXT_OP_ENV_SIZE(name) \
1287 IWMMXT_OP_ENV(name##b) \
1288 IWMMXT_OP_ENV(name##w) \
1289 IWMMXT_OP_ENV(name##l)
1290
1291 #define IWMMXT_OP_ENV1(name) \
1292 static inline void gen_op_iwmmxt_##name##_M0(void) \
1293 { \
1294 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1295 }
1296
1297 IWMMXT_OP(maddsq)
1298 IWMMXT_OP(madduq)
1299 IWMMXT_OP(sadb)
1300 IWMMXT_OP(sadw)
1301 IWMMXT_OP(mulslw)
1302 IWMMXT_OP(mulshw)
1303 IWMMXT_OP(mululw)
1304 IWMMXT_OP(muluhw)
1305 IWMMXT_OP(macsw)
1306 IWMMXT_OP(macuw)
1307
1308 IWMMXT_OP_ENV_SIZE(unpackl)
1309 IWMMXT_OP_ENV_SIZE(unpackh)
1310
1311 IWMMXT_OP_ENV1(unpacklub)
1312 IWMMXT_OP_ENV1(unpackluw)
1313 IWMMXT_OP_ENV1(unpacklul)
1314 IWMMXT_OP_ENV1(unpackhub)
1315 IWMMXT_OP_ENV1(unpackhuw)
1316 IWMMXT_OP_ENV1(unpackhul)
1317 IWMMXT_OP_ENV1(unpacklsb)
1318 IWMMXT_OP_ENV1(unpacklsw)
1319 IWMMXT_OP_ENV1(unpacklsl)
1320 IWMMXT_OP_ENV1(unpackhsb)
1321 IWMMXT_OP_ENV1(unpackhsw)
1322 IWMMXT_OP_ENV1(unpackhsl)
1323
1324 IWMMXT_OP_ENV_SIZE(cmpeq)
1325 IWMMXT_OP_ENV_SIZE(cmpgtu)
1326 IWMMXT_OP_ENV_SIZE(cmpgts)
1327
1328 IWMMXT_OP_ENV_SIZE(mins)
1329 IWMMXT_OP_ENV_SIZE(minu)
1330 IWMMXT_OP_ENV_SIZE(maxs)
1331 IWMMXT_OP_ENV_SIZE(maxu)
1332
1333 IWMMXT_OP_ENV_SIZE(subn)
1334 IWMMXT_OP_ENV_SIZE(addn)
1335 IWMMXT_OP_ENV_SIZE(subu)
1336 IWMMXT_OP_ENV_SIZE(addu)
1337 IWMMXT_OP_ENV_SIZE(subs)
1338 IWMMXT_OP_ENV_SIZE(adds)
1339
1340 IWMMXT_OP_ENV(avgb0)
1341 IWMMXT_OP_ENV(avgb1)
1342 IWMMXT_OP_ENV(avgw0)
1343 IWMMXT_OP_ENV(avgw1)
1344
1345 IWMMXT_OP(msadb)
1346
1347 IWMMXT_OP_ENV(packuw)
1348 IWMMXT_OP_ENV(packul)
1349 IWMMXT_OP_ENV(packuq)
1350 IWMMXT_OP_ENV(packsw)
1351 IWMMXT_OP_ENV(packsl)
1352 IWMMXT_OP_ENV(packsq)
1353
1354 static void gen_op_iwmmxt_set_mup(void)
1355 {
1356 TCGv tmp;
1357 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1358 tcg_gen_ori_i32(tmp, tmp, 2);
1359 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1360 }
1361
1362 static void gen_op_iwmmxt_set_cup(void)
1363 {
1364 TCGv tmp;
1365 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1366 tcg_gen_ori_i32(tmp, tmp, 1);
1367 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368 }
1369
1370 static void gen_op_iwmmxt_setpsr_nz(void)
1371 {
1372 TCGv tmp = tcg_temp_new_i32();
1373 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1374 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1375 }
1376
1377 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1378 {
1379 iwmmxt_load_reg(cpu_V1, rn);
1380 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1381 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1382 }
1383
1384 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1385 {
1386 int rd;
1387 uint32_t offset;
1388 TCGv tmp;
1389
1390 rd = (insn >> 16) & 0xf;
1391 tmp = load_reg(s, rd);
1392
1393 offset = (insn & 0xff) << ((insn >> 7) & 2);
1394 if (insn & (1 << 24)) {
1395 /* Pre indexed */
1396 if (insn & (1 << 23))
1397 tcg_gen_addi_i32(tmp, tmp, offset);
1398 else
1399 tcg_gen_addi_i32(tmp, tmp, -offset);
1400 tcg_gen_mov_i32(dest, tmp);
1401 if (insn & (1 << 21))
1402 store_reg(s, rd, tmp);
1403 else
1404 tcg_temp_free_i32(tmp);
1405 } else if (insn & (1 << 21)) {
1406 /* Post indexed */
1407 tcg_gen_mov_i32(dest, tmp);
1408 if (insn & (1 << 23))
1409 tcg_gen_addi_i32(tmp, tmp, offset);
1410 else
1411 tcg_gen_addi_i32(tmp, tmp, -offset);
1412 store_reg(s, rd, tmp);
1413 } else if (!(insn & (1 << 23)))
1414 return 1;
1415 return 0;
1416 }
1417
1418 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1419 {
1420 int rd = (insn >> 0) & 0xf;
1421 TCGv tmp;
1422
1423 if (insn & (1 << 8)) {
1424 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1425 return 1;
1426 } else {
1427 tmp = iwmmxt_load_creg(rd);
1428 }
1429 } else {
1430 tmp = tcg_temp_new_i32();
1431 iwmmxt_load_reg(cpu_V0, rd);
1432 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1433 }
1434 tcg_gen_andi_i32(tmp, tmp, mask);
1435 tcg_gen_mov_i32(dest, tmp);
1436 tcg_temp_free_i32(tmp);
1437 return 0;
1438 }
1439
1440 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1441 (ie. an undefined instruction). */
1442 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1443 {
1444 int rd, wrd;
1445 int rdhi, rdlo, rd0, rd1, i;
1446 TCGv addr;
1447 TCGv tmp, tmp2, tmp3;
1448
1449 if ((insn & 0x0e000e00) == 0x0c000000) {
1450 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1451 wrd = insn & 0xf;
1452 rdlo = (insn >> 12) & 0xf;
1453 rdhi = (insn >> 16) & 0xf;
1454 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1455 iwmmxt_load_reg(cpu_V0, wrd);
1456 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1457 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1458 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1459 } else { /* TMCRR */
1460 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1461 iwmmxt_store_reg(cpu_V0, wrd);
1462 gen_op_iwmmxt_set_mup();
1463 }
1464 return 0;
1465 }
1466
1467 wrd = (insn >> 12) & 0xf;
1468 addr = tcg_temp_new_i32();
1469 if (gen_iwmmxt_address(s, insn, addr)) {
1470 tcg_temp_free_i32(addr);
1471 return 1;
1472 }
1473 if (insn & ARM_CP_RW_BIT) {
1474 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1475 tmp = tcg_temp_new_i32();
1476 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1477 iwmmxt_store_creg(wrd, tmp);
1478 } else {
1479 i = 1;
1480 if (insn & (1 << 8)) {
1481 if (insn & (1 << 22)) { /* WLDRD */
1482 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1483 i = 0;
1484 } else { /* WLDRW wRd */
1485 tmp = gen_ld32(addr, IS_USER(s));
1486 }
1487 } else {
1488 if (insn & (1 << 22)) { /* WLDRH */
1489 tmp = gen_ld16u(addr, IS_USER(s));
1490 } else { /* WLDRB */
1491 tmp = gen_ld8u(addr, IS_USER(s));
1492 }
1493 }
1494 if (i) {
1495 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1496 tcg_temp_free_i32(tmp);
1497 }
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 }
1500 } else {
1501 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1502 tmp = iwmmxt_load_creg(wrd);
1503 gen_st32(tmp, addr, IS_USER(s));
1504 } else {
1505 gen_op_iwmmxt_movq_M0_wRn(wrd);
1506 tmp = tcg_temp_new_i32();
1507 if (insn & (1 << 8)) {
1508 if (insn & (1 << 22)) { /* WSTRD */
1509 tcg_temp_free_i32(tmp);
1510 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1511 } else { /* WSTRW wRd */
1512 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1513 gen_st32(tmp, addr, IS_USER(s));
1514 }
1515 } else {
1516 if (insn & (1 << 22)) { /* WSTRH */
1517 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1518 gen_st16(tmp, addr, IS_USER(s));
1519 } else { /* WSTRB */
1520 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1521 gen_st8(tmp, addr, IS_USER(s));
1522 }
1523 }
1524 }
1525 }
1526 tcg_temp_free_i32(addr);
1527 return 0;
1528 }
1529
1530 if ((insn & 0x0f000000) != 0x0e000000)
1531 return 1;
1532
1533 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1534 case 0x000: /* WOR */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_orq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x011: /* TMCR */
1546 if (insn & 0xf)
1547 return 1;
1548 rd = (insn >> 12) & 0xf;
1549 wrd = (insn >> 16) & 0xf;
1550 switch (wrd) {
1551 case ARM_IWMMXT_wCID:
1552 case ARM_IWMMXT_wCASF:
1553 break;
1554 case ARM_IWMMXT_wCon:
1555 gen_op_iwmmxt_set_cup();
1556 /* Fall through. */
1557 case ARM_IWMMXT_wCSSF:
1558 tmp = iwmmxt_load_creg(wrd);
1559 tmp2 = load_reg(s, rd);
1560 tcg_gen_andc_i32(tmp, tmp, tmp2);
1561 tcg_temp_free_i32(tmp2);
1562 iwmmxt_store_creg(wrd, tmp);
1563 break;
1564 case ARM_IWMMXT_wCGR0:
1565 case ARM_IWMMXT_wCGR1:
1566 case ARM_IWMMXT_wCGR2:
1567 case ARM_IWMMXT_wCGR3:
1568 gen_op_iwmmxt_set_cup();
1569 tmp = load_reg(s, rd);
1570 iwmmxt_store_creg(wrd, tmp);
1571 break;
1572 default:
1573 return 1;
1574 }
1575 break;
1576 case 0x100: /* WXOR */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 0) & 0xf;
1579 rd1 = (insn >> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1582 gen_op_iwmmxt_setpsr_nz();
1583 gen_op_iwmmxt_movq_wRn_M0(wrd);
1584 gen_op_iwmmxt_set_mup();
1585 gen_op_iwmmxt_set_cup();
1586 break;
1587 case 0x111: /* TMRC */
1588 if (insn & 0xf)
1589 return 1;
1590 rd = (insn >> 12) & 0xf;
1591 wrd = (insn >> 16) & 0xf;
1592 tmp = iwmmxt_load_creg(wrd);
1593 store_reg(s, rd, tmp);
1594 break;
1595 case 0x300: /* WANDN */
1596 wrd = (insn >> 12) & 0xf;
1597 rd0 = (insn >> 0) & 0xf;
1598 rd1 = (insn >> 16) & 0xf;
1599 gen_op_iwmmxt_movq_M0_wRn(rd0);
1600 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1601 gen_op_iwmmxt_andq_M0_wRn(rd1);
1602 gen_op_iwmmxt_setpsr_nz();
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 gen_op_iwmmxt_set_cup();
1606 break;
1607 case 0x200: /* WAND */
1608 wrd = (insn >> 12) & 0xf;
1609 rd0 = (insn >> 0) & 0xf;
1610 rd1 = (insn >> 16) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0);
1612 gen_op_iwmmxt_andq_M0_wRn(rd1);
1613 gen_op_iwmmxt_setpsr_nz();
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 gen_op_iwmmxt_set_cup();
1617 break;
1618 case 0x810: case 0xa10: /* WMADD */
1619 wrd = (insn >> 12) & 0xf;
1620 rd0 = (insn >> 0) & 0xf;
1621 rd1 = (insn >> 16) & 0xf;
1622 gen_op_iwmmxt_movq_M0_wRn(rd0);
1623 if (insn & (1 << 21))
1624 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1625 else
1626 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 break;
1630 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
1635 switch ((insn >> 22) & 3) {
1636 case 0:
1637 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1638 break;
1639 case 1:
1640 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1641 break;
1642 case 2:
1643 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1644 break;
1645 case 3:
1646 return 1;
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 gen_op_iwmmxt_set_cup();
1651 break;
1652 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1653 wrd = (insn >> 12) & 0xf;
1654 rd0 = (insn >> 16) & 0xf;
1655 rd1 = (insn >> 0) & 0xf;
1656 gen_op_iwmmxt_movq_M0_wRn(rd0);
1657 switch ((insn >> 22) & 3) {
1658 case 0:
1659 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1660 break;
1661 case 1:
1662 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1663 break;
1664 case 2:
1665 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1666 break;
1667 case 3:
1668 return 1;
1669 }
1670 gen_op_iwmmxt_movq_wRn_M0(wrd);
1671 gen_op_iwmmxt_set_mup();
1672 gen_op_iwmmxt_set_cup();
1673 break;
1674 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1675 wrd = (insn >> 12) & 0xf;
1676 rd0 = (insn >> 16) & 0xf;
1677 rd1 = (insn >> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0);
1679 if (insn & (1 << 22))
1680 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1683 if (!(insn & (1 << 20)))
1684 gen_op_iwmmxt_addl_M0_wRn(wrd);
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 break;
1688 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 16) & 0xf;
1691 rd1 = (insn >> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
1693 if (insn & (1 << 21)) {
1694 if (insn & (1 << 20))
1695 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1696 else
1697 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1698 } else {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1703 }
1704 gen_op_iwmmxt_movq_wRn_M0(wrd);
1705 gen_op_iwmmxt_set_mup();
1706 break;
1707 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1708 wrd = (insn >> 12) & 0xf;
1709 rd0 = (insn >> 16) & 0xf;
1710 rd1 = (insn >> 0) & 0xf;
1711 gen_op_iwmmxt_movq_M0_wRn(rd0);
1712 if (insn & (1 << 21))
1713 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1714 else
1715 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1716 if (!(insn & (1 << 20))) {
1717 iwmmxt_load_reg(cpu_V1, wrd);
1718 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1719 }
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 switch ((insn >> 22) & 3) {
1729 case 0:
1730 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1731 break;
1732 case 1:
1733 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1734 break;
1735 case 2:
1736 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1737 break;
1738 case 3:
1739 return 1;
1740 }
1741 gen_op_iwmmxt_movq_wRn_M0(wrd);
1742 gen_op_iwmmxt_set_mup();
1743 gen_op_iwmmxt_set_cup();
1744 break;
1745 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 22)) {
1751 if (insn & (1 << 20))
1752 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1753 else
1754 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1755 } else {
1756 if (insn & (1 << 20))
1757 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1760 }
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 gen_op_iwmmxt_set_cup();
1764 break;
1765 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1766 wrd = (insn >> 12) & 0xf;
1767 rd0 = (insn >> 16) & 0xf;
1768 rd1 = (insn >> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0);
1770 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1771 tcg_gen_andi_i32(tmp, tmp, 7);
1772 iwmmxt_load_reg(cpu_V1, rd1);
1773 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1774 tcg_temp_free_i32(tmp);
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 break;
1778 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1779 if (((insn >> 6) & 3) == 3)
1780 return 1;
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 tmp = load_reg(s, rd);
1784 gen_op_iwmmxt_movq_M0_wRn(wrd);
1785 switch ((insn >> 6) & 3) {
1786 case 0:
1787 tmp2 = tcg_const_i32(0xff);
1788 tmp3 = tcg_const_i32((insn & 7) << 3);
1789 break;
1790 case 1:
1791 tmp2 = tcg_const_i32(0xffff);
1792 tmp3 = tcg_const_i32((insn & 3) << 4);
1793 break;
1794 case 2:
1795 tmp2 = tcg_const_i32(0xffffffff);
1796 tmp3 = tcg_const_i32((insn & 1) << 5);
1797 break;
1798 default:
1799 TCGV_UNUSED(tmp2);
1800 TCGV_UNUSED(tmp3);
1801 }
1802 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1803 tcg_temp_free(tmp3);
1804 tcg_temp_free(tmp2);
1805 tcg_temp_free_i32(tmp);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1810 rd = (insn >> 12) & 0xf;
1811 wrd = (insn >> 16) & 0xf;
1812 if (rd == 15 || ((insn >> 22) & 3) == 3)
1813 return 1;
1814 gen_op_iwmmxt_movq_M0_wRn(wrd);
1815 tmp = tcg_temp_new_i32();
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1819 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1820 if (insn & 8) {
1821 tcg_gen_ext8s_i32(tmp, tmp);
1822 } else {
1823 tcg_gen_andi_i32(tmp, tmp, 0xff);
1824 }
1825 break;
1826 case 1:
1827 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1828 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1829 if (insn & 8) {
1830 tcg_gen_ext16s_i32(tmp, tmp);
1831 } else {
1832 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1833 }
1834 break;
1835 case 2:
1836 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1837 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1838 break;
1839 }
1840 store_reg(s, rd, tmp);
1841 break;
1842 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1843 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1844 return 1;
1845 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1846 switch ((insn >> 22) & 3) {
1847 case 0:
1848 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1849 break;
1850 case 1:
1851 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1852 break;
1853 case 2:
1854 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1855 break;
1856 }
1857 tcg_gen_shli_i32(tmp, tmp, 28);
1858 gen_set_nzcv(tmp);
1859 tcg_temp_free_i32(tmp);
1860 break;
1861 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1862 if (((insn >> 6) & 3) == 3)
1863 return 1;
1864 rd = (insn >> 12) & 0xf;
1865 wrd = (insn >> 16) & 0xf;
1866 tmp = load_reg(s, rd);
1867 switch ((insn >> 6) & 3) {
1868 case 0:
1869 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1870 break;
1871 case 1:
1872 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1873 break;
1874 case 2:
1875 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1876 break;
1877 }
1878 tcg_temp_free_i32(tmp);
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1883 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1884 return 1;
1885 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1886 tmp2 = tcg_temp_new_i32();
1887 tcg_gen_mov_i32(tmp2, tmp);
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 for (i = 0; i < 7; i ++) {
1891 tcg_gen_shli_i32(tmp2, tmp2, 4);
1892 tcg_gen_and_i32(tmp, tmp, tmp2);
1893 }
1894 break;
1895 case 1:
1896 for (i = 0; i < 3; i ++) {
1897 tcg_gen_shli_i32(tmp2, tmp2, 8);
1898 tcg_gen_and_i32(tmp, tmp, tmp2);
1899 }
1900 break;
1901 case 2:
1902 tcg_gen_shli_i32(tmp2, tmp2, 16);
1903 tcg_gen_and_i32(tmp, tmp, tmp2);
1904 break;
1905 }
1906 gen_set_nzcv(tmp);
1907 tcg_temp_free_i32(tmp2);
1908 tcg_temp_free_i32(tmp);
1909 break;
1910 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1917 break;
1918 case 1:
1919 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1920 break;
1921 case 2:
1922 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1923 break;
1924 case 3:
1925 return 1;
1926 }
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 break;
1930 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1931 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1932 return 1;
1933 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1934 tmp2 = tcg_temp_new_i32();
1935 tcg_gen_mov_i32(tmp2, tmp);
1936 switch ((insn >> 22) & 3) {
1937 case 0:
1938 for (i = 0; i < 7; i ++) {
1939 tcg_gen_shli_i32(tmp2, tmp2, 4);
1940 tcg_gen_or_i32(tmp, tmp, tmp2);
1941 }
1942 break;
1943 case 1:
1944 for (i = 0; i < 3; i ++) {
1945 tcg_gen_shli_i32(tmp2, tmp2, 8);
1946 tcg_gen_or_i32(tmp, tmp, tmp2);
1947 }
1948 break;
1949 case 2:
1950 tcg_gen_shli_i32(tmp2, tmp2, 16);
1951 tcg_gen_or_i32(tmp, tmp, tmp2);
1952 break;
1953 }
1954 gen_set_nzcv(tmp);
1955 tcg_temp_free_i32(tmp2);
1956 tcg_temp_free_i32(tmp);
1957 break;
1958 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1959 rd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1962 return 1;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
1964 tmp = tcg_temp_new_i32();
1965 switch ((insn >> 22) & 3) {
1966 case 0:
1967 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1968 break;
1969 case 1:
1970 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1971 break;
1972 case 2:
1973 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1974 break;
1975 }
1976 store_reg(s, rd, tmp);
1977 break;
1978 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1979 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 rd1 = (insn >> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 if (insn & (1 << 21))
1987 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1988 else
1989 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1990 break;
1991 case 1:
1992 if (insn & (1 << 21))
1993 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1994 else
1995 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1996 break;
1997 case 2:
1998 if (insn & (1 << 21))
1999 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2000 else
2001 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2002 break;
2003 case 3:
2004 return 1;
2005 }
2006 gen_op_iwmmxt_movq_wRn_M0(wrd);
2007 gen_op_iwmmxt_set_mup();
2008 gen_op_iwmmxt_set_cup();
2009 break;
2010 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2011 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2012 wrd = (insn >> 12) & 0xf;
2013 rd0 = (insn >> 16) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0);
2015 switch ((insn >> 22) & 3) {
2016 case 0:
2017 if (insn & (1 << 21))
2018 gen_op_iwmmxt_unpacklsb_M0();
2019 else
2020 gen_op_iwmmxt_unpacklub_M0();
2021 break;
2022 case 1:
2023 if (insn & (1 << 21))
2024 gen_op_iwmmxt_unpacklsw_M0();
2025 else
2026 gen_op_iwmmxt_unpackluw_M0();
2027 break;
2028 case 2:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_unpacklsl_M0();
2031 else
2032 gen_op_iwmmxt_unpacklul_M0();
2033 break;
2034 case 3:
2035 return 1;
2036 }
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2040 break;
2041 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2042 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
2046 switch ((insn >> 22) & 3) {
2047 case 0:
2048 if (insn & (1 << 21))
2049 gen_op_iwmmxt_unpackhsb_M0();
2050 else
2051 gen_op_iwmmxt_unpackhub_M0();
2052 break;
2053 case 1:
2054 if (insn & (1 << 21))
2055 gen_op_iwmmxt_unpackhsw_M0();
2056 else
2057 gen_op_iwmmxt_unpackhuw_M0();
2058 break;
2059 case 2:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpackhsl_M0();
2062 else
2063 gen_op_iwmmxt_unpackhul_M0();
2064 break;
2065 case 3:
2066 return 1;
2067 }
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2071 break;
2072 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2073 case 0x214: case 0x614: case 0xa14: case 0xe14:
2074 if (((insn >> 22) & 3) == 0)
2075 return 1;
2076 wrd = (insn >> 12) & 0xf;
2077 rd0 = (insn >> 16) & 0xf;
2078 gen_op_iwmmxt_movq_M0_wRn(rd0);
2079 tmp = tcg_temp_new_i32();
2080 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2081 tcg_temp_free_i32(tmp);
2082 return 1;
2083 }
2084 switch ((insn >> 22) & 3) {
2085 case 1:
2086 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2087 break;
2088 case 2:
2089 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2090 break;
2091 case 3:
2092 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2093 break;
2094 }
2095 tcg_temp_free_i32(tmp);
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2099 break;
2100 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2101 case 0x014: case 0x414: case 0x814: case 0xc14:
2102 if (((insn >> 22) & 3) == 0)
2103 return 1;
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0);
2107 tmp = tcg_temp_new_i32();
2108 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2109 tcg_temp_free_i32(tmp);
2110 return 1;
2111 }
2112 switch ((insn >> 22) & 3) {
2113 case 1:
2114 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2115 break;
2116 case 2:
2117 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2118 break;
2119 case 3:
2120 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2121 break;
2122 }
2123 tcg_temp_free_i32(tmp);
2124 gen_op_iwmmxt_movq_wRn_M0(wrd);
2125 gen_op_iwmmxt_set_mup();
2126 gen_op_iwmmxt_set_cup();
2127 break;
2128 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2129 case 0x114: case 0x514: case 0x914: case 0xd14:
2130 if (((insn >> 22) & 3) == 0)
2131 return 1;
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 16) & 0xf;
2134 gen_op_iwmmxt_movq_M0_wRn(rd0);
2135 tmp = tcg_temp_new_i32();
2136 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2137 tcg_temp_free_i32(tmp);
2138 return 1;
2139 }
2140 switch ((insn >> 22) & 3) {
2141 case 1:
2142 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2143 break;
2144 case 2:
2145 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2146 break;
2147 case 3:
2148 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2149 break;
2150 }
2151 tcg_temp_free_i32(tmp);
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
2158 if (((insn >> 22) & 3) == 0)
2159 return 1;
2160 wrd = (insn >> 12) & 0xf;
2161 rd0 = (insn >> 16) & 0xf;
2162 gen_op_iwmmxt_movq_M0_wRn(rd0);
2163 tmp = tcg_temp_new_i32();
2164 switch ((insn >> 22) & 3) {
2165 case 1:
2166 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2167 tcg_temp_free_i32(tmp);
2168 return 1;
2169 }
2170 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2171 break;
2172 case 2:
2173 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2174 tcg_temp_free_i32(tmp);
2175 return 1;
2176 }
2177 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2178 break;
2179 case 3:
2180 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2181 tcg_temp_free_i32(tmp);
2182 return 1;
2183 }
2184 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2185 break;
2186 }
2187 tcg_temp_free_i32(tmp);
2188 gen_op_iwmmxt_movq_wRn_M0(wrd);
2189 gen_op_iwmmxt_set_mup();
2190 gen_op_iwmmxt_set_cup();
2191 break;
2192 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2193 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 if (insn & (1 << 21))
2201 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2202 else
2203 gen_op_iwmmxt_minub_M0_wRn(rd1);
2204 break;
2205 case 1:
2206 if (insn & (1 << 21))
2207 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2208 else
2209 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2210 break;
2211 case 2:
2212 if (insn & (1 << 21))
2213 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2214 else
2215 gen_op_iwmmxt_minul_M0_wRn(rd1);
2216 break;
2217 case 3:
2218 return 1;
2219 }
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 break;
2223 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2224 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2225 wrd = (insn >> 12) & 0xf;
2226 rd0 = (insn >> 16) & 0xf;
2227 rd1 = (insn >> 0) & 0xf;
2228 gen_op_iwmmxt_movq_M0_wRn(rd0);
2229 switch ((insn >> 22) & 3) {
2230 case 0:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2235 break;
2236 case 1:
2237 if (insn & (1 << 21))
2238 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2239 else
2240 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2241 break;
2242 case 2:
2243 if (insn & (1 << 21))
2244 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2245 else
2246 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2247 break;
2248 case 3:
2249 return 1;
2250 }
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 break;
2254 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2255 case 0x402: case 0x502: case 0x602: case 0x702:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 tmp = tcg_const_i32((insn >> 20) & 3);
2261 iwmmxt_load_reg(cpu_V1, rd1);
2262 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2263 tcg_temp_free(tmp);
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2268 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2269 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2270 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2271 wrd = (insn >> 12) & 0xf;
2272 rd0 = (insn >> 16) & 0xf;
2273 rd1 = (insn >> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
2275 switch ((insn >> 20) & 0xf) {
2276 case 0x0:
2277 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2278 break;
2279 case 0x1:
2280 gen_op_iwmmxt_subub_M0_wRn(rd1);
2281 break;
2282 case 0x3:
2283 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2284 break;
2285 case 0x4:
2286 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2287 break;
2288 case 0x5:
2289 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2290 break;
2291 case 0x7:
2292 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2293 break;
2294 case 0x8:
2295 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2296 break;
2297 case 0x9:
2298 gen_op_iwmmxt_subul_M0_wRn(rd1);
2299 break;
2300 case 0xb:
2301 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2302 break;
2303 default:
2304 return 1;
2305 }
2306 gen_op_iwmmxt_movq_wRn_M0(wrd);
2307 gen_op_iwmmxt_set_mup();
2308 gen_op_iwmmxt_set_cup();
2309 break;
2310 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2311 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2312 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2313 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2314 wrd = (insn >> 12) & 0xf;
2315 rd0 = (insn >> 16) & 0xf;
2316 gen_op_iwmmxt_movq_M0_wRn(rd0);
2317 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2318 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2319 tcg_temp_free(tmp);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2372 return 1;
2373 wrd = (insn >> 12) & 0xf;
2374 rd0 = (insn >> 16) & 0xf;
2375 rd1 = (insn >> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0);
2377 switch ((insn >> 22) & 3) {
2378 case 1:
2379 if (insn & (1 << 21))
2380 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2381 else
2382 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2383 break;
2384 case 2:
2385 if (insn & (1 << 21))
2386 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2387 else
2388 gen_op_iwmmxt_packul_M0_wRn(rd1);
2389 break;
2390 case 3:
2391 if (insn & (1 << 21))
2392 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2393 else
2394 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2395 break;
2396 }
2397 gen_op_iwmmxt_movq_wRn_M0(wrd);
2398 gen_op_iwmmxt_set_mup();
2399 gen_op_iwmmxt_set_cup();
2400 break;
2401 case 0x201: case 0x203: case 0x205: case 0x207:
2402 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2403 case 0x211: case 0x213: case 0x215: case 0x217:
2404 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2405 wrd = (insn >> 5) & 0xf;
2406 rd0 = (insn >> 12) & 0xf;
2407 rd1 = (insn >> 0) & 0xf;
2408 if (rd0 == 0xf || rd1 == 0xf)
2409 return 1;
2410 gen_op_iwmmxt_movq_M0_wRn(wrd);
2411 tmp = load_reg(s, rd0);
2412 tmp2 = load_reg(s, rd1);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2416 break;
2417 case 0x8: /* TMIAPH */
2418 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2419 break;
2420 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2421 if (insn & (1 << 16))
2422 tcg_gen_shri_i32(tmp, tmp, 16);
2423 if (insn & (1 << 17))
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2426 break;
2427 default:
2428 tcg_temp_free_i32(tmp2);
2429 tcg_temp_free_i32(tmp);
2430 return 1;
2431 }
2432 tcg_temp_free_i32(tmp2);
2433 tcg_temp_free_i32(tmp);
2434 gen_op_iwmmxt_movq_wRn_M0(wrd);
2435 gen_op_iwmmxt_set_mup();
2436 break;
2437 default:
2438 return 1;
2439 }
2440
2441 return 0;
2442 }
2443
2444 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2445 (ie. an undefined instruction). */
2446 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2447 {
2448 int acc, rd0, rd1, rdhi, rdlo;
2449 TCGv tmp, tmp2;
2450
2451 if ((insn & 0x0ff00f10) == 0x0e200010) {
2452 /* Multiply with Internal Accumulate Format */
2453 rd0 = (insn >> 12) & 0xf;
2454 rd1 = insn & 0xf;
2455 acc = (insn >> 5) & 7;
2456
2457 if (acc != 0)
2458 return 1;
2459
2460 tmp = load_reg(s, rd0);
2461 tmp2 = load_reg(s, rd1);
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
2464 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2465 break;
2466 case 0x8: /* MIAPH */
2467 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2468 break;
2469 case 0xc: /* MIABB */
2470 case 0xd: /* MIABT */
2471 case 0xe: /* MIATB */
2472 case 0xf: /* MIATT */
2473 if (insn & (1 << 16))
2474 tcg_gen_shri_i32(tmp, tmp, 16);
2475 if (insn & (1 << 17))
2476 tcg_gen_shri_i32(tmp2, tmp2, 16);
2477 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2478 break;
2479 default:
2480 return 1;
2481 }
2482 tcg_temp_free_i32(tmp2);
2483 tcg_temp_free_i32(tmp);
2484
2485 gen_op_iwmmxt_movq_wRn_M0(acc);
2486 return 0;
2487 }
2488
2489 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2490 /* Internal Accumulator Access Format */
2491 rdhi = (insn >> 16) & 0xf;
2492 rdlo = (insn >> 12) & 0xf;
2493 acc = insn & 7;
2494
2495 if (acc != 0)
2496 return 1;
2497
2498 if (insn & ARM_CP_RW_BIT) { /* MRA */
2499 iwmmxt_load_reg(cpu_V0, acc);
2500 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2501 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2502 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2503 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2504 } else { /* MAR */
2505 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2506 iwmmxt_store_reg(cpu_V0, acc);
2507 }
2508 return 0;
2509 }
2510
2511 return 1;
2512 }
2513
2514 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2515 #define VFP_SREG(insn, bigbit, smallbit) \
2516 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2517 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2518 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2519 reg = (((insn) >> (bigbit)) & 0x0f) \
2520 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2521 } else { \
2522 if (insn & (1 << (smallbit))) \
2523 return 1; \
2524 reg = ((insn) >> (bigbit)) & 0x0f; \
2525 }} while (0)
2526
2527 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2528 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2529 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2530 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2531 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2532 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2533
2534 /* Move between integer and VFP cores. */
2535 static TCGv gen_vfp_mrs(void)
2536 {
2537 TCGv tmp = tcg_temp_new_i32();
2538 tcg_gen_mov_i32(tmp, cpu_F0s);
2539 return tmp;
2540 }
2541
2542 static void gen_vfp_msr(TCGv tmp)
2543 {
2544 tcg_gen_mov_i32(cpu_F0s, tmp);
2545 tcg_temp_free_i32(tmp);
2546 }
2547
2548 static void gen_neon_dup_u8(TCGv var, int shift)
2549 {
2550 TCGv tmp = tcg_temp_new_i32();
2551 if (shift)
2552 tcg_gen_shri_i32(var, var, shift);
2553 tcg_gen_ext8u_i32(var, var);
2554 tcg_gen_shli_i32(tmp, var, 8);
2555 tcg_gen_or_i32(var, var, tmp);
2556 tcg_gen_shli_i32(tmp, var, 16);
2557 tcg_gen_or_i32(var, var, tmp);
2558 tcg_temp_free_i32(tmp);
2559 }
2560
2561 static void gen_neon_dup_low16(TCGv var)
2562 {
2563 TCGv tmp = tcg_temp_new_i32();
2564 tcg_gen_ext16u_i32(var, var);
2565 tcg_gen_shli_i32(tmp, var, 16);
2566 tcg_gen_or_i32(var, var, tmp);
2567 tcg_temp_free_i32(tmp);
2568 }
2569
2570 static void gen_neon_dup_high16(TCGv var)
2571 {
2572 TCGv tmp = tcg_temp_new_i32();
2573 tcg_gen_andi_i32(var, var, 0xffff0000);
2574 tcg_gen_shri_i32(tmp, var, 16);
2575 tcg_gen_or_i32(var, var, tmp);
2576 tcg_temp_free_i32(tmp);
2577 }
2578
2579 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2580 {
2581 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2582 TCGv tmp;
2583 switch (size) {
2584 case 0:
2585 tmp = gen_ld8u(addr, IS_USER(s));
2586 gen_neon_dup_u8(tmp, 0);
2587 break;
2588 case 1:
2589 tmp = gen_ld16u(addr, IS_USER(s));
2590 gen_neon_dup_low16(tmp);
2591 break;
2592 case 2:
2593 tmp = gen_ld32(addr, IS_USER(s));
2594 break;
2595 default: /* Avoid compiler warnings. */
2596 abort();
2597 }
2598 return tmp;
2599 }
2600
2601 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2602 (ie. an undefined instruction). */
2603 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2604 {
2605 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2606 int dp, veclen;
2607 TCGv addr;
2608 TCGv tmp;
2609 TCGv tmp2;
2610
2611 if (!arm_feature(env, ARM_FEATURE_VFP))
2612 return 1;
2613
2614 if (!s->vfp_enabled) {
2615 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2616 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2617 return 1;
2618 rn = (insn >> 16) & 0xf;
2619 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2620 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2621 return 1;
2622 }
2623 dp = ((insn & 0xf00) == 0xb00);
2624 switch ((insn >> 24) & 0xf) {
2625 case 0xe:
2626 if (insn & (1 << 4)) {
2627 /* single register transfer */
2628 rd = (insn >> 12) & 0xf;
2629 if (dp) {
2630 int size;
2631 int pass;
2632
2633 VFP_DREG_N(rn, insn);
2634 if (insn & 0xf)
2635 return 1;
2636 if (insn & 0x00c00060
2637 && !arm_feature(env, ARM_FEATURE_NEON))
2638 return 1;
2639
2640 pass = (insn >> 21) & 1;
2641 if (insn & (1 << 22)) {
2642 size = 0;
2643 offset = ((insn >> 5) & 3) * 8;
2644 } else if (insn & (1 << 5)) {
2645 size = 1;
2646 offset = (insn & (1 << 6)) ? 16 : 0;
2647 } else {
2648 size = 2;
2649 offset = 0;
2650 }
2651 if (insn & ARM_CP_RW_BIT) {
2652 /* vfp->arm */
2653 tmp = neon_load_reg(rn, pass);
2654 switch (size) {
2655 case 0:
2656 if (offset)
2657 tcg_gen_shri_i32(tmp, tmp, offset);
2658 if (insn & (1 << 23))
2659 gen_uxtb(tmp);
2660 else
2661 gen_sxtb(tmp);
2662 break;
2663 case 1:
2664 if (insn & (1 << 23)) {
2665 if (offset) {
2666 tcg_gen_shri_i32(tmp, tmp, 16);
2667 } else {
2668 gen_uxth(tmp);
2669 }
2670 } else {
2671 if (offset) {
2672 tcg_gen_sari_i32(tmp, tmp, 16);
2673 } else {
2674 gen_sxth(tmp);
2675 }
2676 }
2677 break;
2678 case 2:
2679 break;
2680 }
2681 store_reg(s, rd, tmp);
2682 } else {
2683 /* arm->vfp */
2684 tmp = load_reg(s, rd);
2685 if (insn & (1 << 23)) {
2686 /* VDUP */
2687 if (size == 0) {
2688 gen_neon_dup_u8(tmp, 0);
2689 } else if (size == 1) {
2690 gen_neon_dup_low16(tmp);
2691 }
2692 for (n = 0; n <= pass * 2; n++) {
2693 tmp2 = tcg_temp_new_i32();
2694 tcg_gen_mov_i32(tmp2, tmp);
2695 neon_store_reg(rn, n, tmp2);
2696 }
2697 neon_store_reg(rn, n, tmp);
2698 } else {
2699 /* VMOV */
2700 switch (size) {
2701 case 0:
2702 tmp2 = neon_load_reg(rn, pass);
2703 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
2704 tcg_temp_free_i32(tmp2);
2705 break;
2706 case 1:
2707 tmp2 = neon_load_reg(rn, pass);
2708 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
2709 tcg_temp_free_i32(tmp2);
2710 break;
2711 case 2:
2712 break;
2713 }
2714 neon_store_reg(rn, pass, tmp);
2715 }
2716 }
2717 } else { /* !dp */
2718 if ((insn & 0x6f) != 0x00)
2719 return 1;
2720 rn = VFP_SREG_N(insn);
2721 if (insn & ARM_CP_RW_BIT) {
2722 /* vfp->arm */
2723 if (insn & (1 << 21)) {
2724 /* system register */
2725 rn >>= 1;
2726
2727 switch (rn) {
2728 case ARM_VFP_FPSID:
2729 /* VFP2 allows access to FSID from userspace.
2730 VFP3 restricts all id registers to privileged
2731 accesses. */
2732 if (IS_USER(s)
2733 && arm_feature(env, ARM_FEATURE_VFP3))
2734 return 1;
2735 tmp = load_cpu_field(vfp.xregs[rn]);
2736 break;
2737 case ARM_VFP_FPEXC:
2738 if (IS_USER(s))
2739 return 1;
2740 tmp = load_cpu_field(vfp.xregs[rn]);
2741 break;
2742 case ARM_VFP_FPINST:
2743 case ARM_VFP_FPINST2:
2744 /* Not present in VFP3. */
2745 if (IS_USER(s)
2746 || arm_feature(env, ARM_FEATURE_VFP3))
2747 return 1;
2748 tmp = load_cpu_field(vfp.xregs[rn]);
2749 break;
2750 case ARM_VFP_FPSCR:
2751 if (rd == 15) {
2752 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2753 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2754 } else {
2755 tmp = tcg_temp_new_i32();
2756 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2757 }
2758 break;
2759 case ARM_VFP_MVFR0:
2760 case ARM_VFP_MVFR1:
2761 if (IS_USER(s)
2762 || !arm_feature(env, ARM_FEATURE_MVFR))
2763 return 1;
2764 tmp = load_cpu_field(vfp.xregs[rn]);
2765 break;
2766 default:
2767 return 1;
2768 }
2769 } else {
2770 gen_mov_F0_vreg(0, rn);
2771 tmp = gen_vfp_mrs();
2772 }
2773 if (rd == 15) {
2774 /* Set the 4 flag bits in the CPSR. */
2775 gen_set_nzcv(tmp);
2776 tcg_temp_free_i32(tmp);
2777 } else {
2778 store_reg(s, rd, tmp);
2779 }
2780 } else {
2781 /* arm->vfp */
2782 if (insn & (1 << 21)) {
2783 rn >>= 1;
2784 /* system register */
2785 switch (rn) {
2786 case ARM_VFP_FPSID:
2787 case ARM_VFP_MVFR0:
2788 case ARM_VFP_MVFR1:
2789 /* Writes are ignored. */
2790 break;
2791 case ARM_VFP_FPSCR:
2792 tmp = load_reg(s, rd);
2793 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2794 tcg_temp_free_i32(tmp);
2795 gen_lookup_tb(s);
2796 break;
2797 case ARM_VFP_FPEXC:
2798 if (IS_USER(s))
2799 return 1;
2800 /* TODO: VFP subarchitecture support.
2801 * For now, keep the EN bit only */
2802 tmp = load_reg(s, rd);
2803 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2804 store_cpu_field(tmp, vfp.xregs[rn]);
2805 gen_lookup_tb(s);
2806 break;
2807 case ARM_VFP_FPINST:
2808 case ARM_VFP_FPINST2:
2809 tmp = load_reg(s, rd);
2810 store_cpu_field(tmp, vfp.xregs[rn]);
2811 break;
2812 default:
2813 return 1;
2814 }
2815 } else {
2816 tmp = load_reg(s, rd);
2817 gen_vfp_msr(tmp);
2818 gen_mov_vreg_F0(0, rn);
2819 }
2820 }
2821 }
2822 } else {
2823 /* data processing */
2824 /* The opcode is in bits 23, 21, 20 and 6. */
2825 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2826 if (dp) {
2827 if (op == 15) {
2828 /* rn is opcode */
2829 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2830 } else {
2831 /* rn is register number */
2832 VFP_DREG_N(rn, insn);
2833 }
2834
2835 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2836 /* Integer or single precision destination. */
2837 rd = VFP_SREG_D(insn);
2838 } else {
2839 VFP_DREG_D(rd, insn);
2840 }
2841 if (op == 15 &&
2842 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2843 /* VCVT from int is always from S reg regardless of dp bit.
2844 * VCVT with immediate frac_bits has same format as SREG_M
2845 */
2846 rm = VFP_SREG_M(insn);
2847 } else {
2848 VFP_DREG_M(rm, insn);
2849 }
2850 } else {
2851 rn = VFP_SREG_N(insn);
2852 if (op == 15 && rn == 15) {
2853 /* Double precision destination. */
2854 VFP_DREG_D(rd, insn);
2855 } else {
2856 rd = VFP_SREG_D(insn);
2857 }
2858 /* NB that we implicitly rely on the encoding for the frac_bits
2859 * in VCVT of fixed to float being the same as that of an SREG_M
2860 */
2861 rm = VFP_SREG_M(insn);
2862 }
2863
2864 veclen = s->vec_len;
2865 if (op == 15 && rn > 3)
2866 veclen = 0;
2867
2868 /* Shut up compiler warnings. */
2869 delta_m = 0;
2870 delta_d = 0;
2871 bank_mask = 0;
2872
2873 if (veclen > 0) {
2874 if (dp)
2875 bank_mask = 0xc;
2876 else
2877 bank_mask = 0x18;
2878
2879 /* Figure out what type of vector operation this is. */
2880 if ((rd & bank_mask) == 0) {
2881 /* scalar */
2882 veclen = 0;
2883 } else {
2884 if (dp)
2885 delta_d = (s->vec_stride >> 1) + 1;
2886 else
2887 delta_d = s->vec_stride + 1;
2888
2889 if ((rm & bank_mask) == 0) {
2890 /* mixed scalar/vector */
2891 delta_m = 0;
2892 } else {
2893 /* vector */
2894 delta_m = delta_d;
2895 }
2896 }
2897 }
2898
2899 /* Load the initial operands. */
2900 if (op == 15) {
2901 switch (rn) {
2902 case 16:
2903 case 17:
2904 /* Integer source */
2905 gen_mov_F0_vreg(0, rm);
2906 break;
2907 case 8:
2908 case 9:
2909 /* Compare */
2910 gen_mov_F0_vreg(dp, rd);
2911 gen_mov_F1_vreg(dp, rm);
2912 break;
2913 case 10:
2914 case 11:
2915 /* Compare with zero */
2916 gen_mov_F0_vreg(dp, rd);
2917 gen_vfp_F1_ld0(dp);
2918 break;
2919 case 20:
2920 case 21:
2921 case 22:
2922 case 23:
2923 case 28:
2924 case 29:
2925 case 30:
2926 case 31:
2927 /* Source and destination the same. */
2928 gen_mov_F0_vreg(dp, rd);
2929 break;
2930 case 4:
2931 case 5:
2932 case 6:
2933 case 7:
2934 /* VCVTB, VCVTT: only present with the halfprec extension,
2935 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2936 */
2937 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2938 return 1;
2939 }
2940 /* Otherwise fall through */
2941 default:
2942 /* One source operand. */
2943 gen_mov_F0_vreg(dp, rm);
2944 break;
2945 }
2946 } else {
2947 /* Two source operands. */
2948 gen_mov_F0_vreg(dp, rn);
2949 gen_mov_F1_vreg(dp, rm);
2950 }
2951
2952 for (;;) {
2953 /* Perform the calculation. */
2954 switch (op) {
2955 case 0: /* VMLA: fd + (fn * fm) */
2956 /* Note that order of inputs to the add matters for NaNs */
2957 gen_vfp_F1_mul(dp);
2958 gen_mov_F0_vreg(dp, rd);
2959 gen_vfp_add(dp);
2960 break;
2961 case 1: /* VMLS: fd + -(fn * fm) */
2962 gen_vfp_mul(dp);
2963 gen_vfp_F1_neg(dp);
2964 gen_mov_F0_vreg(dp, rd);
2965 gen_vfp_add(dp);
2966 break;
2967 case 2: /* VNMLS: -fd + (fn * fm) */
2968 /* Note that it isn't valid to replace (-A + B) with (B - A)
2969 * or similar plausible looking simplifications
2970 * because this will give wrong results for NaNs.
2971 */
2972 gen_vfp_F1_mul(dp);
2973 gen_mov_F0_vreg(dp, rd);
2974 gen_vfp_neg(dp);
2975 gen_vfp_add(dp);
2976 break;
2977 case 3: /* VNMLA: -fd + -(fn * fm) */
2978 gen_vfp_mul(dp);
2979 gen_vfp_F1_neg(dp);
2980 gen_mov_F0_vreg(dp, rd);
2981 gen_vfp_neg(dp);
2982 gen_vfp_add(dp);
2983 break;
2984 case 4: /* mul: fn * fm */
2985 gen_vfp_mul(dp);
2986 break;
2987 case 5: /* nmul: -(fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 break;
2991 case 6: /* add: fn + fm */
2992 gen_vfp_add(dp);
2993 break;
2994 case 7: /* sub: fn - fm */
2995 gen_vfp_sub(dp);
2996 break;
2997 case 8: /* div: fn / fm */
2998 gen_vfp_div(dp);
2999 break;
3000 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3001 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3002 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3003 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3004 /* These are fused multiply-add, and must be done as one
3005 * floating point operation with no rounding between the
3006 * multiplication and addition steps.
3007 * NB that doing the negations here as separate steps is
3008 * correct : an input NaN should come out with its sign bit
3009 * flipped if it is a negated-input.
3010 */
3011 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3012 return 1;
3013 }
3014 if (dp) {
3015 TCGv_ptr fpst;
3016 TCGv_i64 frd;
3017 if (op & 1) {
3018 /* VFNMS, VFMS */
3019 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3020 }
3021 frd = tcg_temp_new_i64();
3022 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3023 if (op & 2) {
3024 /* VFNMA, VFNMS */
3025 gen_helper_vfp_negd(frd, frd);
3026 }
3027 fpst = get_fpstatus_ptr(0);
3028 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3029 cpu_F1d, frd, fpst);
3030 tcg_temp_free_ptr(fpst);
3031 tcg_temp_free_i64(frd);
3032 } else {
3033 TCGv_ptr fpst;
3034 TCGv_i32 frd;
3035 if (op & 1) {
3036 /* VFNMS, VFMS */
3037 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3038 }
3039 frd = tcg_temp_new_i32();
3040 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3041 if (op & 2) {
3042 gen_helper_vfp_negs(frd, frd);
3043 }
3044 fpst = get_fpstatus_ptr(0);
3045 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3046 cpu_F1s, frd, fpst);
3047 tcg_temp_free_ptr(fpst);
3048 tcg_temp_free_i32(frd);
3049 }
3050 break;
3051 case 14: /* fconst */
3052 if (!arm_feature(env, ARM_FEATURE_VFP3))
3053 return 1;
3054
3055 n = (insn << 12) & 0x80000000;
3056 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3057 if (dp) {
3058 if (i & 0x40)
3059 i |= 0x3f80;
3060 else
3061 i |= 0x4000;
3062 n |= i << 16;
3063 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3064 } else {
3065 if (i & 0x40)
3066 i |= 0x780;
3067 else
3068 i |= 0x800;
3069 n |= i << 19;
3070 tcg_gen_movi_i32(cpu_F0s, n);
3071 }
3072 break;
3073 case 15: /* extension space */
3074 switch (rn) {
3075 case 0: /* cpy */
3076 /* no-op */
3077 break;
3078 case 1: /* abs */
3079 gen_vfp_abs(dp);
3080 break;
3081 case 2: /* neg */
3082 gen_vfp_neg(dp);
3083 break;
3084 case 3: /* sqrt */
3085 gen_vfp_sqrt(dp);
3086 break;
3087 case 4: /* vcvtb.f32.f16 */
3088 tmp = gen_vfp_mrs();
3089 tcg_gen_ext16u_i32(tmp, tmp);
3090 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3091 tcg_temp_free_i32(tmp);
3092 break;
3093 case 5: /* vcvtt.f32.f16 */
3094 tmp = gen_vfp_mrs();
3095 tcg_gen_shri_i32(tmp, tmp, 16);
3096 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3097 tcg_temp_free_i32(tmp);
3098 break;
3099 case 6: /* vcvtb.f16.f32 */
3100 tmp = tcg_temp_new_i32();
3101 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3102 gen_mov_F0_vreg(0, rd);
3103 tmp2 = gen_vfp_mrs();
3104 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3105 tcg_gen_or_i32(tmp, tmp, tmp2);
3106 tcg_temp_free_i32(tmp2);
3107 gen_vfp_msr(tmp);
3108 break;
3109 case 7: /* vcvtt.f16.f32 */
3110 tmp = tcg_temp_new_i32();
3111 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3112 tcg_gen_shli_i32(tmp, tmp, 16);
3113 gen_mov_F0_vreg(0, rd);
3114 tmp2 = gen_vfp_mrs();
3115 tcg_gen_ext16u_i32(tmp2, tmp2);
3116 tcg_gen_or_i32(tmp, tmp, tmp2);
3117 tcg_temp_free_i32(tmp2);
3118 gen_vfp_msr(tmp);
3119 break;
3120 case 8: /* cmp */
3121 gen_vfp_cmp(dp);
3122 break;
3123 case 9: /* cmpe */
3124 gen_vfp_cmpe(dp);
3125 break;
3126 case 10: /* cmpz */
3127 gen_vfp_cmp(dp);
3128 break;
3129 case 11: /* cmpez */
3130 gen_vfp_F1_ld0(dp);
3131 gen_vfp_cmpe(dp);
3132 break;
3133 case 15: /* single<->double conversion */
3134 if (dp)
3135 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3136 else
3137 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3138 break;
3139 case 16: /* fuito */
3140 gen_vfp_uito(dp, 0);
3141 break;
3142 case 17: /* fsito */
3143 gen_vfp_sito(dp, 0);
3144 break;
3145 case 20: /* fshto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
3148 gen_vfp_shto(dp, 16 - rm, 0);
3149 break;
3150 case 21: /* fslto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
3153 gen_vfp_slto(dp, 32 - rm, 0);
3154 break;
3155 case 22: /* fuhto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_uhto(dp, 16 - rm, 0);
3159 break;
3160 case 23: /* fulto */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
3163 gen_vfp_ulto(dp, 32 - rm, 0);
3164 break;
3165 case 24: /* ftoui */
3166 gen_vfp_toui(dp, 0);
3167 break;
3168 case 25: /* ftouiz */
3169 gen_vfp_touiz(dp, 0);
3170 break;
3171 case 26: /* ftosi */
3172 gen_vfp_tosi(dp, 0);
3173 break;
3174 case 27: /* ftosiz */
3175 gen_vfp_tosiz(dp, 0);
3176 break;
3177 case 28: /* ftosh */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
3180 gen_vfp_tosh(dp, 16 - rm, 0);
3181 break;
3182 case 29: /* ftosl */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
3185 gen_vfp_tosl(dp, 32 - rm, 0);
3186 break;
3187 case 30: /* ftouh */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
3190 gen_vfp_touh(dp, 16 - rm, 0);
3191 break;
3192 case 31: /* ftoul */
3193 if (!arm_feature(env, ARM_FEATURE_VFP3))
3194 return 1;
3195 gen_vfp_toul(dp, 32 - rm, 0);
3196 break;
3197 default: /* undefined */
3198 return 1;
3199 }
3200 break;
3201 default: /* undefined */
3202 return 1;
3203 }
3204
3205 /* Write back the result. */
3206 if (op == 15 && (rn >= 8 && rn <= 11))
3207 ; /* Comparison, do nothing. */
3208 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3209 /* VCVT double to int: always integer result. */
3210 gen_mov_vreg_F0(0, rd);
3211 else if (op == 15 && rn == 15)
3212 /* conversion */
3213 gen_mov_vreg_F0(!dp, rd);
3214 else
3215 gen_mov_vreg_F0(dp, rd);
3216
3217 /* break out of the loop if we have finished */
3218 if (veclen == 0)
3219 break;
3220
3221 if (op == 15 && delta_m == 0) {
3222 /* single source one-many */
3223 while (veclen--) {
3224 rd = ((rd + delta_d) & (bank_mask - 1))
3225 | (rd & bank_mask);
3226 gen_mov_vreg_F0(dp, rd);
3227 }
3228 break;
3229 }
3230 /* Setup the next operands. */
3231 veclen--;
3232 rd = ((rd + delta_d) & (bank_mask - 1))
3233 | (rd & bank_mask);
3234
3235 if (op == 15) {
3236 /* One source operand. */
3237 rm = ((rm + delta_m) & (bank_mask - 1))
3238 | (rm & bank_mask);
3239 gen_mov_F0_vreg(dp, rm);
3240 } else {
3241 /* Two source operands. */
3242 rn = ((rn + delta_d) & (bank_mask - 1))
3243 | (rn & bank_mask);
3244 gen_mov_F0_vreg(dp, rn);
3245 if (delta_m) {
3246 rm = ((rm + delta_m) & (bank_mask - 1))
3247 | (rm & bank_mask);
3248 gen_mov_F1_vreg(dp, rm);
3249 }
3250 }
3251 }
3252 }
3253 break;
3254 case 0xc:
3255 case 0xd:
3256 if ((insn & 0x03e00000) == 0x00400000) {
3257 /* two-register transfer */
3258 rn = (insn >> 16) & 0xf;
3259 rd = (insn >> 12) & 0xf;
3260 if (dp) {
3261 VFP_DREG_M(rm, insn);
3262 } else {
3263 rm = VFP_SREG_M(insn);
3264 }
3265
3266 if (insn & ARM_CP_RW_BIT) {
3267 /* vfp->arm */
3268 if (dp) {
3269 gen_mov_F0_vreg(0, rm * 2);
3270 tmp = gen_vfp_mrs();
3271 store_reg(s, rd, tmp);
3272 gen_mov_F0_vreg(0, rm * 2 + 1);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rn, tmp);
3275 } else {
3276 gen_mov_F0_vreg(0, rm);
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rd, tmp);
3279 gen_mov_F0_vreg(0, rm + 1);
3280 tmp = gen_vfp_mrs();
3281 store_reg(s, rn, tmp);
3282 }
3283 } else {
3284 /* arm->vfp */
3285 if (dp) {
3286 tmp = load_reg(s, rd);
3287 gen_vfp_msr(tmp);
3288 gen_mov_vreg_F0(0, rm * 2);
3289 tmp = load_reg(s, rn);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm * 2 + 1);
3292 } else {
3293 tmp = load_reg(s, rd);
3294 gen_vfp_msr(tmp);
3295 gen_mov_vreg_F0(0, rm);
3296 tmp = load_reg(s, rn);
3297 gen_vfp_msr(tmp);
3298 gen_mov_vreg_F0(0, rm + 1);
3299 }
3300 }
3301 } else {
3302 /* Load/store */
3303 rn = (insn >> 16) & 0xf;
3304 if (dp)
3305 VFP_DREG_D(rd, insn);
3306 else
3307 rd = VFP_SREG_D(insn);
3308 if ((insn & 0x01200000) == 0x01000000) {
3309 /* Single load/store */
3310 offset = (insn & 0xff) << 2;
3311 if ((insn & (1 << 23)) == 0)
3312 offset = -offset;
3313 if (s->thumb && rn == 15) {
3314 /* This is actually UNPREDICTABLE */
3315 addr = tcg_temp_new_i32();
3316 tcg_gen_movi_i32(addr, s->pc & ~2);
3317 } else {
3318 addr = load_reg(s, rn);
3319 }
3320 tcg_gen_addi_i32(addr, addr, offset);
3321 if (insn & (1 << 20)) {
3322 gen_vfp_ld(s, dp, addr);
3323 gen_mov_vreg_F0(dp, rd);
3324 } else {
3325 gen_mov_F0_vreg(dp, rd);
3326 gen_vfp_st(s, dp, addr);
3327 }
3328 tcg_temp_free_i32(addr);
3329 } else {
3330 /* load/store multiple */
3331 int w = insn & (1 << 21);
3332 if (dp)
3333 n = (insn >> 1) & 0x7f;
3334 else
3335 n = insn & 0xff;
3336
3337 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3338 /* P == U , W == 1 => UNDEF */
3339 return 1;
3340 }
3341 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3342 /* UNPREDICTABLE cases for bad immediates: we choose to
3343 * UNDEF to avoid generating huge numbers of TCG ops
3344 */
3345 return 1;
3346 }
3347 if (rn == 15 && w) {
3348 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3349 return 1;
3350 }
3351
3352 if (s->thumb && rn == 15) {
3353 /* This is actually UNPREDICTABLE */
3354 addr = tcg_temp_new_i32();
3355 tcg_gen_movi_i32(addr, s->pc & ~2);
3356 } else {
3357 addr = load_reg(s, rn);
3358 }
3359 if (insn & (1 << 24)) /* pre-decrement */
3360 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3361
3362 if (dp)
3363 offset = 8;
3364 else
3365 offset = 4;
3366 for (i = 0; i < n; i++) {
3367 if (insn & ARM_CP_RW_BIT) {
3368 /* load */
3369 gen_vfp_ld(s, dp, addr);
3370 gen_mov_vreg_F0(dp, rd + i);
3371 } else {
3372 /* store */
3373 gen_mov_F0_vreg(dp, rd + i);
3374 gen_vfp_st(s, dp, addr);
3375 }
3376 tcg_gen_addi_i32(addr, addr, offset);
3377 }
3378 if (w) {
3379 /* writeback */
3380 if (insn & (1 << 24))
3381 offset = -offset * n;
3382 else if (dp && (insn & 1))
3383 offset = 4;
3384 else
3385 offset = 0;
3386
3387 if (offset != 0)
3388 tcg_gen_addi_i32(addr, addr, offset);
3389 store_reg(s, rn, addr);
3390 } else {
3391 tcg_temp_free_i32(addr);
3392 }
3393 }
3394 }
3395 break;
3396 default:
3397 /* Should never happen. */
3398 return 1;
3399 }
3400 return 0;
3401 }
3402
3403 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3404 {
3405 TranslationBlock *tb;
3406
3407 tb = s->tb;
3408 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3409 tcg_gen_goto_tb(n);
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb((tcg_target_long)tb + n);
3412 } else {
3413 gen_set_pc_im(dest);
3414 tcg_gen_exit_tb(0);
3415 }
3416 }
3417
3418 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3419 {
3420 if (unlikely(s->singlestep_enabled)) {
3421 /* An indirect jump so that we still trigger the debug exception. */
3422 if (s->thumb)
3423 dest |= 1;
3424 gen_bx_im(s, dest);
3425 } else {
3426 gen_goto_tb(s, 0, dest);
3427 s->is_jmp = DISAS_TB_JUMP;
3428 }
3429 }
3430
3431 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3432 {
3433 if (x)
3434 tcg_gen_sari_i32(t0, t0, 16);
3435 else
3436 gen_sxth(t0);
3437 if (y)
3438 tcg_gen_sari_i32(t1, t1, 16);
3439 else
3440 gen_sxth(t1);
3441 tcg_gen_mul_i32(t0, t0, t1);
3442 }
3443
3444 /* Return the mask of PSR bits set by a MSR instruction. */
3445 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3446 uint32_t mask;
3447
3448 mask = 0;
3449 if (flags & (1 << 0))
3450 mask |= 0xff;
3451 if (flags & (1 << 1))
3452 mask |= 0xff00;
3453 if (flags & (1 << 2))
3454 mask |= 0xff0000;
3455 if (flags & (1 << 3))
3456 mask |= 0xff000000;
3457
3458 /* Mask out undefined bits. */
3459 mask &= ~CPSR_RESERVED;
3460 if (!arm_feature(env, ARM_FEATURE_V4T))
3461 mask &= ~CPSR_T;
3462 if (!arm_feature(env, ARM_FEATURE_V5))
3463 mask &= ~CPSR_Q; /* V5TE in reality*/
3464 if (!arm_feature(env, ARM_FEATURE_V6))
3465 mask &= ~(CPSR_E | CPSR_GE);
3466 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3467 mask &= ~CPSR_IT;
3468 /* Mask out execution state bits. */
3469 if (!spsr)
3470 mask &= ~CPSR_EXEC;
3471 /* Mask out privileged bits. */
3472 if (IS_USER(s))
3473 mask &= CPSR_USER;
3474 return mask;
3475 }
3476
3477 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3478 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3479 {
3480 TCGv tmp;
3481 if (spsr) {
3482 /* ??? This is also undefined in system mode. */
3483 if (IS_USER(s))
3484 return 1;
3485
3486 tmp = load_cpu_field(spsr);
3487 tcg_gen_andi_i32(tmp, tmp, ~mask);
3488 tcg_gen_andi_i32(t0, t0, mask);
3489 tcg_gen_or_i32(tmp, tmp, t0);
3490 store_cpu_field(tmp, spsr);
3491 } else {
3492 gen_set_cpsr(t0, mask);
3493 }
3494 tcg_temp_free_i32(t0);
3495 gen_lookup_tb(s);
3496 return 0;
3497 }
3498
3499 /* Returns nonzero if access to the PSR is not permitted. */
3500 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3501 {
3502 TCGv tmp;
3503 tmp = tcg_temp_new_i32();
3504 tcg_gen_movi_i32(tmp, val);
3505 return gen_set_psr(s, mask, spsr, tmp);
3506 }
3507
3508 /* Generate an old-style exception return. Marks pc as dead. */
3509 static void gen_exception_return(DisasContext *s, TCGv pc)
3510 {
3511 TCGv tmp;
3512 store_reg(s, 15, pc);
3513 tmp = load_cpu_field(spsr);
3514 gen_set_cpsr(tmp, 0xffffffff);
3515 tcg_temp_free_i32(tmp);
3516 s->is_jmp = DISAS_UPDATE;
3517 }
3518
3519 /* Generate a v6 exception return. Marks both values as dead. */
3520 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3521 {
3522 gen_set_cpsr(cpsr, 0xffffffff);
3523 tcg_temp_free_i32(cpsr);
3524 store_reg(s, 15, pc);
3525 s->is_jmp = DISAS_UPDATE;
3526 }
3527
3528 static inline void
3529 gen_set_condexec (DisasContext *s)
3530 {
3531 if (s->condexec_mask) {
3532 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3533 TCGv tmp = tcg_temp_new_i32();
3534 tcg_gen_movi_i32(tmp, val);
3535 store_cpu_field(tmp, condexec_bits);
3536 }
3537 }
3538
3539 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3540 {
3541 gen_set_condexec(s);
3542 gen_set_pc_im(s->pc - offset);
3543 gen_exception(excp);
3544 s->is_jmp = DISAS_JUMP;
3545 }
3546
3547 static void gen_nop_hint(DisasContext *s, int val)
3548 {
3549 switch (val) {
3550 case 3: /* wfi */
3551 gen_set_pc_im(s->pc);
3552 s->is_jmp = DISAS_WFI;
3553 break;
3554 case 2: /* wfe */
3555 case 4: /* sev */
3556 /* TODO: Implement SEV and WFE. May help SMP performance. */
3557 default: /* nop */
3558 break;
3559 }
3560 }
3561
3562 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3563
3564 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3565 {
3566 switch (size) {
3567 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3568 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3569 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3570 default: abort();
3571 }
3572 }
3573
3574 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3575 {
3576 switch (size) {
3577 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3578 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3579 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3580 default: return;
3581 }
3582 }
3583
3584 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3585 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3586 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3587 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3588 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3589
3590 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3591 switch ((size << 1) | u) { \
3592 case 0: \
3593 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 1: \
3596 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 2: \
3599 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 3: \
3602 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 4: \
3605 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 case 5: \
3608 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3609 break; \
3610 default: return 1; \
3611 }} while (0)
3612
3613 #define GEN_NEON_INTEGER_OP(name) do { \
3614 switch ((size << 1) | u) { \
3615 case 0: \
3616 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3617 break; \
3618 case 1: \
3619 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3620 break; \
3621 case 2: \
3622 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3623 break; \
3624 case 3: \
3625 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3626 break; \
3627 case 4: \
3628 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3629 break; \
3630 case 5: \
3631 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3632 break; \
3633 default: return 1; \
3634 }} while (0)
3635
3636 static TCGv neon_load_scratch(int scratch)
3637 {
3638 TCGv tmp = tcg_temp_new_i32();
3639 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3640 return tmp;
3641 }
3642
3643 static void neon_store_scratch(int scratch, TCGv var)
3644 {
3645 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3646 tcg_temp_free_i32(var);
3647 }
3648
3649 static inline TCGv neon_get_scalar(int size, int reg)
3650 {
3651 TCGv tmp;
3652 if (size == 1) {
3653 tmp = neon_load_reg(reg & 7, reg >> 4);
3654 if (reg & 8) {
3655 gen_neon_dup_high16(tmp);
3656 } else {
3657 gen_neon_dup_low16(tmp);
3658 }
3659 } else {
3660 tmp = neon_load_reg(reg & 15, reg >> 4);
3661 }
3662 return tmp;
3663 }
3664
3665 static int gen_neon_unzip(int rd, int rm, int size, int q)
3666 {
3667 TCGv tmp, tmp2;
3668 if (!q && size == 2) {
3669 return 1;
3670 }
3671 tmp = tcg_const_i32(rd);
3672 tmp2 = tcg_const_i32(rm);
3673 if (q) {
3674 switch (size) {
3675 case 0:
3676 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3677 break;
3678 case 1:
3679 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3680 break;
3681 case 2:
3682 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3683 break;
3684 default:
3685 abort();
3686 }
3687 } else {
3688 switch (size) {
3689 case 0:
3690 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3691 break;
3692 case 1:
3693 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3694 break;
3695 default:
3696 abort();
3697 }
3698 }
3699 tcg_temp_free_i32(tmp);
3700 tcg_temp_free_i32(tmp2);
3701 return 0;
3702 }
3703
3704 static int gen_neon_zip(int rd, int rm, int size, int q)
3705 {
3706 TCGv tmp, tmp2;
3707 if (!q && size == 2) {
3708 return 1;
3709 }
3710 tmp = tcg_const_i32(rd);
3711 tmp2 = tcg_const_i32(rm);
3712 if (q) {
3713 switch (size) {
3714 case 0:
3715 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3716 break;
3717 case 1:
3718 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3719 break;
3720 case 2:
3721 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3722 break;
3723 default:
3724 abort();
3725 }
3726 } else {
3727 switch (size) {
3728 case 0:
3729 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3730 break;
3731 case 1:
3732 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3733 break;
3734 default:
3735 abort();
3736 }
3737 }
3738 tcg_temp_free_i32(tmp);
3739 tcg_temp_free_i32(tmp2);
3740 return 0;
3741 }
3742
3743 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3744 {
3745 TCGv rd, tmp;
3746
3747 rd = tcg_temp_new_i32();
3748 tmp = tcg_temp_new_i32();
3749
3750 tcg_gen_shli_i32(rd, t0, 8);
3751 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3752 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3753 tcg_gen_or_i32(rd, rd, tmp);
3754
3755 tcg_gen_shri_i32(t1, t1, 8);
3756 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3757 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3758 tcg_gen_or_i32(t1, t1, tmp);
3759 tcg_gen_mov_i32(t0, rd);
3760
3761 tcg_temp_free_i32(tmp);
3762 tcg_temp_free_i32(rd);
3763 }
3764
3765 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3766 {
3767 TCGv rd, tmp;
3768
3769 rd = tcg_temp_new_i32();
3770 tmp = tcg_temp_new_i32();
3771
3772 tcg_gen_shli_i32(rd, t0, 16);
3773 tcg_gen_andi_i32(tmp, t1, 0xffff);
3774 tcg_gen_or_i32(rd, rd, tmp);
3775 tcg_gen_shri_i32(t1, t1, 16);
3776 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3777 tcg_gen_or_i32(t1, t1, tmp);
3778 tcg_gen_mov_i32(t0, rd);
3779
3780 tcg_temp_free_i32(tmp);
3781 tcg_temp_free_i32(rd);
3782 }
3783
3784
3785 static struct {
3786 int nregs;
3787 int interleave;
3788 int spacing;
3789 } neon_ls_element_type[11] = {
3790 {4, 4, 1},
3791 {4, 4, 2},
3792 {4, 1, 1},
3793 {4, 2, 1},
3794 {3, 3, 1},
3795 {3, 3, 2},
3796 {3, 1, 1},
3797 {1, 1, 1},
3798 {2, 2, 1},
3799 {2, 2, 2},
3800 {2, 1, 1}
3801 };
3802
3803 /* Translate a NEON load/store element instruction. Return nonzero if the
3804 instruction is invalid. */
3805 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3806 {
3807 int rd, rn, rm;
3808 int op;
3809 int nregs;
3810 int interleave;
3811 int spacing;
3812 int stride;
3813 int size;
3814 int reg;
3815 int pass;
3816 int load;
3817 int shift;
3818 int n;
3819 TCGv addr;
3820 TCGv tmp;
3821 TCGv tmp2;
3822 TCGv_i64 tmp64;
3823
3824 if (!s->vfp_enabled)
3825 return 1;
3826 VFP_DREG_D(rd, insn);
3827 rn = (insn >> 16) & 0xf;
3828 rm = insn & 0xf;
3829 load = (insn & (1 << 21)) != 0;
3830 if ((insn & (1 << 23)) == 0) {
3831 /* Load store all elements. */
3832 op = (insn >> 8) & 0xf;
3833 size = (insn >> 6) & 3;
3834 if (op > 10)
3835 return 1;
3836 /* Catch UNDEF cases for bad values of align field */
3837 switch (op & 0xc) {
3838 case 4:
3839 if (((insn >> 5) & 1) == 1) {
3840 return 1;
3841 }
3842 break;
3843 case 8:
3844 if (((insn >> 4) & 3) == 3) {
3845 return 1;
3846 }
3847 break;
3848 default:
3849 break;
3850 }
3851 nregs = neon_ls_element_type[op].nregs;
3852 interleave = neon_ls_element_type[op].interleave;
3853 spacing = neon_ls_element_type[op].spacing;
3854 if (size == 3 && (interleave | spacing) != 1)
3855 return 1;
3856 addr = tcg_temp_new_i32();
3857 load_reg_var(s, addr, rn);
3858 stride = (1 << size) * interleave;
3859 for (reg = 0; reg < nregs; reg++) {
3860 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3861 load_reg_var(s, addr, rn);
3862 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3863 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3864 load_reg_var(s, addr, rn);
3865 tcg_gen_addi_i32(addr, addr, 1 << size);
3866 }
3867 if (size == 3) {
3868 if (load) {
3869 tmp64 = gen_ld64(addr, IS_USER(s));
3870 neon_store_reg64(tmp64, rd);
3871 tcg_temp_free_i64(tmp64);
3872 } else {
3873 tmp64 = tcg_temp_new_i64();
3874 neon_load_reg64(tmp64, rd);
3875 gen_st64(tmp64, addr, IS_USER(s));
3876 }
3877 tcg_gen_addi_i32(addr, addr, stride);
3878 } else {
3879 for (pass = 0; pass < 2; pass++) {
3880 if (size == 2) {
3881 if (load) {
3882 tmp = gen_ld32(addr, IS_USER(s));
3883 neon_store_reg(rd, pass, tmp);
3884 } else {
3885 tmp = neon_load_reg(rd, pass);
3886 gen_st32(tmp, addr, IS_USER(s));
3887 }
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 } else if (size == 1) {
3890 if (load) {
3891 tmp = gen_ld16u(addr, IS_USER(s));
3892 tcg_gen_addi_i32(addr, addr, stride);
3893 tmp2 = gen_ld16u(addr, IS_USER(s));
3894 tcg_gen_addi_i32(addr, addr, stride);
3895 tcg_gen_shli_i32(tmp2, tmp2, 16);
3896 tcg_gen_or_i32(tmp, tmp, tmp2);
3897 tcg_temp_free_i32(tmp2);
3898 neon_store_reg(rd, pass, tmp);
3899 } else {
3900 tmp = neon_load_reg(rd, pass);
3901 tmp2 = tcg_temp_new_i32();
3902 tcg_gen_shri_i32(tmp2, tmp, 16);
3903 gen_st16(tmp, addr, IS_USER(s));
3904 tcg_gen_addi_i32(addr, addr, stride);
3905 gen_st16(tmp2, addr, IS_USER(s));
3906 tcg_gen_addi_i32(addr, addr, stride);
3907 }
3908 } else /* size == 0 */ {
3909 if (load) {
3910 TCGV_UNUSED(tmp2);
3911 for (n = 0; n < 4; n++) {
3912 tmp = gen_ld8u(addr, IS_USER(s));
3913 tcg_gen_addi_i32(addr, addr, stride);
3914 if (n == 0) {
3915 tmp2 = tmp;
3916 } else {
3917 tcg_gen_shli_i32(tmp, tmp, n * 8);
3918 tcg_gen_or_i32(tmp2, tmp2, tmp);
3919 tcg_temp_free_i32(tmp);
3920 }
3921 }
3922 neon_store_reg(rd, pass, tmp2);
3923 } else {
3924 tmp2 = neon_load_reg(rd, pass);
3925 for (n = 0; n < 4; n++) {
3926 tmp = tcg_temp_new_i32();
3927 if (n == 0) {
3928 tcg_gen_mov_i32(tmp, tmp2);
3929 } else {
3930 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3931 }
3932 gen_st8(tmp, addr, IS_USER(s));
3933 tcg_gen_addi_i32(addr, addr, stride);
3934 }
3935 tcg_temp_free_i32(tmp2);
3936 }
3937 }
3938 }
3939 }
3940 rd += spacing;
3941 }
3942 tcg_temp_free_i32(addr);
3943 stride = nregs * 8;
3944 } else {
3945 size = (insn >> 10) & 3;
3946 if (size == 3) {
3947 /* Load single element to all lanes. */
3948 int a = (insn >> 4) & 1;
3949 if (!load) {
3950 return 1;
3951 }
3952 size = (insn >> 6) & 3;
3953 nregs = ((insn >> 8) & 3) + 1;
3954
3955 if (size == 3) {
3956 if (nregs != 4 || a == 0) {
3957 return 1;
3958 }
3959 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3960 size = 2;
3961 }
3962 if (nregs == 1 && a == 1 && size == 0) {
3963 return 1;
3964 }
3965 if (nregs == 3 && a == 1) {
3966 return 1;
3967 }
3968 addr = tcg_temp_new_i32();
3969 load_reg_var(s, addr, rn);
3970 if (nregs == 1) {
3971 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3972 tmp = gen_load_and_replicate(s, addr, size);
3973 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3974 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3975 if (insn & (1 << 5)) {
3976 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3978 }
3979 tcg_temp_free_i32(tmp);
3980 } else {
3981 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3982 stride = (insn & (1 << 5)) ? 2 : 1;
3983 for (reg = 0; reg < nregs; reg++) {
3984 tmp = gen_load_and_replicate(s, addr, size);
3985 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3986 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3987 tcg_temp_free_i32(tmp);
3988 tcg_gen_addi_i32(addr, addr, 1 << size);
3989 rd += stride;
3990 }
3991 }
3992 tcg_temp_free_i32(addr);
3993 stride = (1 << size) * nregs;
3994 } else {
3995 /* Single element. */
3996 int idx = (insn >> 4) & 0xf;
3997 pass = (insn >> 7) & 1;
3998 switch (size) {
3999 case 0:
4000 shift = ((insn >> 5) & 3) * 8;
4001 stride = 1;
4002 break;
4003 case 1:
4004 shift = ((insn >> 6) & 1) * 16;
4005 stride = (insn & (1 << 5)) ? 2 : 1;
4006 break;
4007 case 2:
4008 shift = 0;
4009 stride = (insn & (1 << 6)) ? 2 : 1;
4010 break;
4011 default:
4012 abort();
4013 }
4014 nregs = ((insn >> 8) & 3) + 1;
4015 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4016 switch (nregs) {
4017 case 1:
4018 if (((idx & (1 << size)) != 0) ||
4019 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4020 return 1;
4021 }
4022 break;
4023 case 3:
4024 if ((idx & 1) != 0) {
4025 return 1;
4026 }
4027 /* fall through */
4028 case 2:
4029 if (size == 2 && (idx & 2) != 0) {
4030 return 1;
4031 }
4032 break;
4033 case 4:
4034 if ((size == 2) && ((idx & 3) == 3)) {
4035 return 1;
4036 }
4037 break;
4038 default:
4039 abort();
4040 }
4041 if ((rd + stride * (nregs - 1)) > 31) {
4042 /* Attempts to write off the end of the register file
4043 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4044 * the neon_load_reg() would write off the end of the array.
4045 */
4046 return 1;
4047 }
4048 addr = tcg_temp_new_i32();
4049 load_reg_var(s, addr, rn);
4050 for (reg = 0; reg < nregs; reg++) {
4051 if (load) {
4052 switch (size) {
4053 case 0:
4054 tmp = gen_ld8u(addr, IS_USER(s));
4055 break;
4056 case 1:
4057 tmp = gen_ld16u(addr, IS_USER(s));
4058 break;
4059 case 2:
4060 tmp = gen_ld32(addr, IS_USER(s));
4061 break;
4062 default: /* Avoid compiler warnings. */
4063 abort();
4064 }
4065 if (size != 2) {
4066 tmp2 = neon_load_reg(rd, pass);
4067 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4068 shift, size ? 16 : 8);
4069 tcg_temp_free_i32(tmp2);
4070 }
4071 neon_store_reg(rd, pass, tmp);
4072 } else { /* Store */
4073 tmp = neon_load_reg(rd, pass);
4074 if (shift)
4075 tcg_gen_shri_i32(tmp, tmp, shift);
4076 switch (size) {
4077 case 0:
4078 gen_st8(tmp, addr, IS_USER(s));
4079 break;
4080 case 1:
4081 gen_st16(tmp, addr, IS_USER(s));
4082 break;
4083 case 2:
4084 gen_st32(tmp, addr, IS_USER(s));
4085 break;
4086 }
4087 }
4088 rd += stride;
4089 tcg_gen_addi_i32(addr, addr, 1 << size);
4090 }
4091 tcg_temp_free_i32(addr);
4092 stride = nregs * (1 << size);
4093 }
4094 }
4095 if (rm != 15) {
4096 TCGv base;
4097
4098 base = load_reg(s, rn);
4099 if (rm == 13) {
4100 tcg_gen_addi_i32(base, base, stride);
4101 } else {
4102 TCGv index;
4103 index = load_reg(s, rm);
4104 tcg_gen_add_i32(base, base, index);
4105 tcg_temp_free_i32(index);
4106 }
4107 store_reg(s, rn, base);
4108 }
4109 return 0;
4110 }
4111
4112 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4113 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4114 {
4115 tcg_gen_and_i32(t, t, c);
4116 tcg_gen_andc_i32(f, f, c);
4117 tcg_gen_or_i32(dest, t, f);
4118 }
4119
4120 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4121 {
4122 switch (size) {
4123 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4124 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4125 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4126 default: abort();
4127 }
4128 }
4129
4130 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4131 {
4132 switch (size) {
4133 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4134 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4135 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4136 default: abort();
4137 }
4138 }
4139
4140 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4141 {
4142 switch (size) {
4143 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4144 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4145 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4146 default: abort();
4147 }
4148 }
4149
4150 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4151 {
4152 switch (size) {
4153 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4154 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4155 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4156 default: abort();
4157 }
4158 }
4159
4160 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4161 int q, int u)
4162 {
4163 if (q) {
4164 if (u) {
4165 switch (size) {
4166 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4167 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4168 default: abort();
4169 }
4170 } else {
4171 switch (size) {
4172 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4173 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4174 default: abort();
4175 }
4176 }
4177 } else {
4178 if (u) {
4179 switch (size) {
4180 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4181 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4182 default: abort();
4183 }
4184 } else {
4185 switch (size) {
4186 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4187 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4188 default: abort();
4189 }
4190 }
4191 }
4192 }
4193
4194 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4195 {
4196 if (u) {
4197 switch (size) {
4198 case 0: gen_helper_neon_widen_u8(dest, src); break;
4199 case 1: gen_helper_neon_widen_u16(dest, src); break;
4200 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4201 default: abort();
4202 }
4203 } else {
4204 switch (size) {
4205 case 0: gen_helper_neon_widen_s8(dest, src); break;
4206 case 1: gen_helper_neon_widen_s16(dest, src); break;
4207 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4208 default: abort();
4209 }
4210 }
4211 tcg_temp_free_i32(src);
4212 }
4213
4214 static inline void gen_neon_addl(int size)
4215 {
4216 switch (size) {
4217 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4218 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4219 case 2: tcg_gen_add_i64(CPU_V001); break;
4220 default: abort();
4221 }
4222 }
4223
4224 static inline void gen_neon_subl(int size)
4225 {
4226 switch (size) {
4227 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4228 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4229 case 2: tcg_gen_sub_i64(CPU_V001); break;
4230 default: abort();
4231 }
4232 }
4233
4234 static inline void gen_neon_negl(TCGv_i64 var, int size)
4235 {
4236 switch (size) {
4237 case 0: gen_helper_neon_negl_u16(var, var); break;
4238 case 1: gen_helper_neon_negl_u32(var, var); break;
4239 case 2:
4240 tcg_gen_neg_i64(var, var);
4241 break;
4242 default: abort();
4243 }
4244 }
4245
4246 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4247 {
4248 switch (size) {
4249 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4250 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4251 default: abort();
4252 }
4253 }
4254
4255 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4256 {
4257 TCGv_i64 tmp;
4258
4259 switch ((size << 1) | u) {
4260 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4261 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4262 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4263 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4264 case 4:
4265 tmp = gen_muls_i64_i32(a, b);
4266 tcg_gen_mov_i64(dest, tmp);
4267 tcg_temp_free_i64(tmp);
4268 break;
4269 case 5:
4270 tmp = gen_mulu_i64_i32(a, b);
4271 tcg_gen_mov_i64(dest, tmp);
4272 tcg_temp_free_i64(tmp);
4273 break;
4274 default: abort();
4275 }
4276
4277 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4278 Don't forget to clean them now. */
4279 if (size < 2) {
4280 tcg_temp_free_i32(a);
4281 tcg_temp_free_i32(b);
4282 }
4283 }
4284
4285 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4286 {
4287 if (op) {
4288 if (u) {
4289 gen_neon_unarrow_sats(size, dest, src);
4290 } else {
4291 gen_neon_narrow(size, dest, src);
4292 }
4293 } else {
4294 if (u) {
4295 gen_neon_narrow_satu(size, dest, src);
4296 } else {
4297 gen_neon_narrow_sats(size, dest, src);
4298 }
4299 }
4300 }
4301
4302 /* Symbolic constants for op fields for Neon 3-register same-length.
4303 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4304 * table A7-9.
4305 */
4306 #define NEON_3R_VHADD 0
4307 #define NEON_3R_VQADD 1
4308 #define NEON_3R_VRHADD 2
4309 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4310 #define NEON_3R_VHSUB 4
4311 #define NEON_3R_VQSUB 5
4312 #define NEON_3R_VCGT 6
4313 #define NEON_3R_VCGE 7
4314 #define NEON_3R_VSHL 8
4315 #define NEON_3R_VQSHL 9
4316 #define NEON_3R_VRSHL 10
4317 #define NEON_3R_VQRSHL 11
4318 #define NEON_3R_VMAX 12
4319 #define NEON_3R_VMIN 13
4320 #define NEON_3R_VABD 14
4321 #define NEON_3R_VABA 15
4322 #define NEON_3R_VADD_VSUB 16
4323 #define NEON_3R_VTST_VCEQ 17
4324 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4325 #define NEON_3R_VMUL 19
4326 #define NEON_3R_VPMAX 20
4327 #define NEON_3R_VPMIN 21
4328 #define NEON_3R_VQDMULH_VQRDMULH 22
4329 #define NEON_3R_VPADD 23
4330 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4331 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4332 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4333 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4334 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4335 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4336 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4337
4338 static const uint8_t neon_3r_sizes[] = {
4339 [NEON_3R_VHADD] = 0x7,
4340 [NEON_3R_VQADD] = 0xf,
4341 [NEON_3R_VRHADD] = 0x7,
4342 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4343 [NEON_3R_VHSUB] = 0x7,
4344 [NEON_3R_VQSUB] = 0xf,
4345 [NEON_3R_VCGT] = 0x7,
4346 [NEON_3R_VCGE] = 0x7,
4347 [NEON_3R_VSHL] = 0xf,
4348 [NEON_3R_VQSHL] = 0xf,
4349 [NEON_3R_VRSHL] = 0xf,
4350 [NEON_3R_VQRSHL] = 0xf,
4351 [NEON_3R_VMAX] = 0x7,
4352 [NEON_3R_VMIN] = 0x7,
4353 [NEON_3R_VABD] = 0x7,
4354 [NEON_3R_VABA] = 0x7,
4355 [NEON_3R_VADD_VSUB] = 0xf,
4356 [NEON_3R_VTST_VCEQ] = 0x7,
4357 [NEON_3R_VML] = 0x7,
4358 [NEON_3R_VMUL] = 0x7,
4359 [NEON_3R_VPMAX] = 0x7,
4360 [NEON_3R_VPMIN] = 0x7,
4361 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4362 [NEON_3R_VPADD] = 0x7,
4363 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4364 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4365 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4366 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4367 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4368 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4369 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4370 };
4371
4372 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4373 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4374 * table A7-13.
4375 */
4376 #define NEON_2RM_VREV64 0
4377 #define NEON_2RM_VREV32 1
4378 #define NEON_2RM_VREV16 2
4379 #define NEON_2RM_VPADDL 4
4380 #define NEON_2RM_VPADDL_U 5
4381 #define NEON_2RM_VCLS 8
4382 #define NEON_2RM_VCLZ 9
4383 #define NEON_2RM_VCNT 10
4384 #define NEON_2RM_VMVN 11
4385 #define NEON_2RM_VPADAL 12
4386 #define NEON_2RM_VPADAL_U 13
4387 #define NEON_2RM_VQABS 14
4388 #define NEON_2RM_VQNEG 15
4389 #define NEON_2RM_VCGT0 16
4390 #define NEON_2RM_VCGE0 17
4391 #define NEON_2RM_VCEQ0 18
4392 #define NEON_2RM_VCLE0 19
4393 #define NEON_2RM_VCLT0 20
4394 #define NEON_2RM_VABS 22
4395 #define NEON_2RM_VNEG 23
4396 #define NEON_2RM_VCGT0_F 24
4397 #define NEON_2RM_VCGE0_F 25
4398 #define NEON_2RM_VCEQ0_F 26
4399 #define NEON_2RM_VCLE0_F 27
4400 #define NEON_2RM_VCLT0_F 28
4401 #define NEON_2RM_VABS_F 30
4402 #define NEON_2RM_VNEG_F 31
4403 #define NEON_2RM_VSWP 32
4404 #define NEON_2RM_VTRN 33
4405 #define NEON_2RM_VUZP 34
4406 #define NEON_2RM_VZIP 35
4407 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4408 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4409 #define NEON_2RM_VSHLL 38
4410 #define NEON_2RM_VCVT_F16_F32 44
4411 #define NEON_2RM_VCVT_F32_F16 46
4412 #define NEON_2RM_VRECPE 56
4413 #define NEON_2RM_VRSQRTE 57
4414 #define NEON_2RM_VRECPE_F 58
4415 #define NEON_2RM_VRSQRTE_F 59
4416 #define NEON_2RM_VCVT_FS 60
4417 #define NEON_2RM_VCVT_FU 61
4418 #define NEON_2RM_VCVT_SF 62
4419 #define NEON_2RM_VCVT_UF 63
4420
4421 static int neon_2rm_is_float_op(int op)
4422 {
4423 /* Return true if this neon 2reg-misc op is float-to-float */
4424 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4425 op >= NEON_2RM_VRECPE_F);
4426 }
4427
4428 /* Each entry in this array has bit n set if the insn allows
4429 * size value n (otherwise it will UNDEF). Since unallocated
4430 * op values will have no bits set they always UNDEF.
4431 */
4432 static const uint8_t neon_2rm_sizes[] = {
4433 [NEON_2RM_VREV64] = 0x7,
4434 [NEON_2RM_VREV32] = 0x3,
4435 [NEON_2RM_VREV16] = 0x1,
4436 [NEON_2RM_VPADDL] = 0x7,
4437 [NEON_2RM_VPADDL_U] = 0x7,
4438 [NEON_2RM_VCLS] = 0x7,
4439 [NEON_2RM_VCLZ] = 0x7,
4440 [NEON_2RM_VCNT] = 0x1,
4441 [NEON_2RM_VMVN] = 0x1,
4442 [NEON_2RM_VPADAL] = 0x7,
4443 [NEON_2RM_VPADAL_U] = 0x7,
4444 [NEON_2RM_VQABS] = 0x7,
4445 [NEON_2RM_VQNEG] = 0x7,
4446 [NEON_2RM_VCGT0] = 0x7,
4447 [NEON_2RM_VCGE0] = 0x7,
4448 [NEON_2RM_VCEQ0] = 0x7,
4449 [NEON_2RM_VCLE0] = 0x7,
4450 [NEON_2RM_VCLT0] = 0x7,
4451 [NEON_2RM_VABS] = 0x7,
4452 [NEON_2RM_VNEG] = 0x7,
4453 [NEON_2RM_VCGT0_F] = 0x4,
4454 [NEON_2RM_VCGE0_F] = 0x4,
4455 [NEON_2RM_VCEQ0_F] = 0x4,
4456 [NEON_2RM_VCLE0_F] = 0x4,
4457 [NEON_2RM_VCLT0_F] = 0x4,
4458 [NEON_2RM_VABS_F] = 0x4,
4459 [NEON_2RM_VNEG_F] = 0x4,
4460 [NEON_2RM_VSWP] = 0x1,
4461 [NEON_2RM_VTRN] = 0x7,
4462 [NEON_2RM_VUZP] = 0x7,
4463 [NEON_2RM_VZIP] = 0x7,
4464 [NEON_2RM_VMOVN] = 0x7,
4465 [NEON_2RM_VQMOVN] = 0x7,
4466 [NEON_2RM_VSHLL] = 0x7,
4467 [NEON_2RM_VCVT_F16_F32] = 0x2,
4468 [NEON_2RM_VCVT_F32_F16] = 0x2,
4469 [NEON_2RM_VRECPE] = 0x4,
4470 [NEON_2RM_VRSQRTE] = 0x4,
4471 [NEON_2RM_VRECPE_F] = 0x4,
4472 [NEON_2RM_VRSQRTE_F] = 0x4,
4473 [NEON_2RM_VCVT_FS] = 0x4,
4474 [NEON_2RM_VCVT_FU] = 0x4,
4475 [NEON_2RM_VCVT_SF] = 0x4,
4476 [NEON_2RM_VCVT_UF] = 0x4,
4477 };
4478
4479 /* Translate a NEON data processing instruction. Return nonzero if the
4480 instruction is invalid.
4481 We process data in a mixture of 32-bit and 64-bit chunks.
4482 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4483
4484 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4485 {
4486 int op;
4487 int q;
4488 int rd, rn, rm;
4489 int size;
4490 int shift;
4491 int pass;
4492 int count;
4493 int pairwise;
4494 int u;
4495 uint32_t imm, mask;
4496 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4497 TCGv_i64 tmp64;
4498
4499 if (!s->vfp_enabled)
4500 return 1;
4501 q = (insn & (1 << 6)) != 0;
4502 u = (insn >> 24) & 1;
4503 VFP_DREG_D(rd, insn);
4504 VFP_DREG_N(rn, insn);
4505 VFP_DREG_M(rm, insn);
4506 size = (insn >> 20) & 3;
4507 if ((insn & (1 << 23)) == 0) {
4508 /* Three register same length. */
4509 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4510 /* Catch invalid op and bad size combinations: UNDEF */
4511 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4512 return 1;
4513 }
4514 /* All insns of this form UNDEF for either this condition or the
4515 * superset of cases "Q==1"; we catch the latter later.
4516 */
4517 if (q && ((rd | rn | rm) & 1)) {
4518 return 1;
4519 }
4520 if (size == 3 && op != NEON_3R_LOGIC) {
4521 /* 64-bit element instructions. */
4522 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4523 neon_load_reg64(cpu_V0, rn + pass);
4524 neon_load_reg64(cpu_V1, rm + pass);
4525 switch (op) {
4526 case NEON_3R_VQADD:
4527 if (u) {
4528 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4529 cpu_V0, cpu_V1);
4530 } else {
4531 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4532 cpu_V0, cpu_V1);
4533 }
4534 break;
4535 case NEON_3R_VQSUB:
4536 if (u) {
4537 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4538 cpu_V0, cpu_V1);
4539 } else {
4540 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4541 cpu_V0, cpu_V1);
4542 }
4543 break;
4544 case NEON_3R_VSHL:
4545 if (u) {
4546 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4547 } else {
4548 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4549 }
4550 break;
4551 case NEON_3R_VQSHL:
4552 if (u) {
4553 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4554 cpu_V1, cpu_V0);
4555 } else {
4556 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4557 cpu_V1, cpu_V0);
4558 }
4559 break;
4560 case NEON_3R_VRSHL:
4561 if (u) {
4562 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4563 } else {
4564 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4565 }
4566 break;
4567 case NEON_3R_VQRSHL:
4568 if (u) {
4569 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4570 cpu_V1, cpu_V0);
4571 } else {
4572 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4573 cpu_V1, cpu_V0);
4574 }
4575 break;
4576 case NEON_3R_VADD_VSUB:
4577 if (u) {
4578 tcg_gen_sub_i64(CPU_V001);
4579 } else {
4580 tcg_gen_add_i64(CPU_V001);
4581 }
4582 break;
4583 default:
4584 abort();
4585 }
4586 neon_store_reg64(cpu_V0, rd + pass);
4587 }
4588 return 0;
4589 }
4590 pairwise = 0;
4591 switch (op) {
4592 case NEON_3R_VSHL:
4593 case NEON_3R_VQSHL:
4594 case NEON_3R_VRSHL:
4595 case NEON_3R_VQRSHL:
4596 {
4597 int rtmp;
4598 /* Shift instruction operands are reversed. */
4599 rtmp = rn;
4600 rn = rm;
4601 rm = rtmp;
4602 }
4603 break;
4604 case NEON_3R_VPADD:
4605 if (u) {
4606 return 1;
4607 }
4608 /* Fall through */
4609 case NEON_3R_VPMAX:
4610 case NEON_3R_VPMIN:
4611 pairwise = 1;
4612 break;
4613 case NEON_3R_FLOAT_ARITH:
4614 pairwise = (u && size < 2); /* if VPADD (float) */
4615 break;
4616 case NEON_3R_FLOAT_MINMAX:
4617 pairwise = u; /* if VPMIN/VPMAX (float) */
4618 break;
4619 case NEON_3R_FLOAT_CMP:
4620 if (!u && size) {
4621 /* no encoding for U=0 C=1x */
4622 return 1;
4623 }
4624 break;
4625 case NEON_3R_FLOAT_ACMP:
4626 if (!u) {
4627 return 1;
4628 }
4629 break;
4630 case NEON_3R_VRECPS_VRSQRTS:
4631 if (u) {
4632 return 1;
4633 }
4634 break;
4635 case NEON_3R_VMUL:
4636 if (u && (size != 0)) {
4637 /* UNDEF on invalid size for polynomial subcase */
4638 return 1;
4639 }
4640 break;
4641 case NEON_3R_VFM:
4642 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4643 return 1;
4644 }
4645 break;
4646 default:
4647 break;
4648 }
4649
4650 if (pairwise && q) {
4651 /* All the pairwise insns UNDEF if Q is set */
4652 return 1;
4653 }
4654
4655 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4656
4657 if (pairwise) {
4658 /* Pairwise. */
4659 if (pass < 1) {
4660 tmp = neon_load_reg(rn, 0);
4661 tmp2 = neon_load_reg(rn, 1);
4662 } else {
4663 tmp = neon_load_reg(rm, 0);
4664 tmp2 = neon_load_reg(rm, 1);
4665 }
4666 } else {
4667 /* Elementwise. */
4668 tmp = neon_load_reg(rn, pass);
4669 tmp2 = neon_load_reg(rm, pass);
4670 }
4671 switch (op) {
4672 case NEON_3R_VHADD:
4673 GEN_NEON_INTEGER_OP(hadd);
4674 break;
4675 case NEON_3R_VQADD:
4676 GEN_NEON_INTEGER_OP_ENV(qadd);
4677 break;
4678 case NEON_3R_VRHADD:
4679 GEN_NEON_INTEGER_OP(rhadd);
4680 break;
4681 case NEON_3R_LOGIC: /* Logic ops. */
4682 switch ((u << 2) | size) {
4683 case 0: /* VAND */
4684 tcg_gen_and_i32(tmp, tmp, tmp2);
4685 break;
4686 case 1: /* BIC */
4687 tcg_gen_andc_i32(tmp, tmp, tmp2);
4688 break;
4689 case 2: /* VORR */
4690 tcg_gen_or_i32(tmp, tmp, tmp2);
4691 break;
4692 case 3: /* VORN */
4693 tcg_gen_orc_i32(tmp, tmp, tmp2);
4694 break;
4695 case 4: /* VEOR */
4696 tcg_gen_xor_i32(tmp, tmp, tmp2);
4697 break;
4698 case 5: /* VBSL */
4699 tmp3 = neon_load_reg(rd, pass);
4700 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4701 tcg_temp_free_i32(tmp3);
4702 break;
4703 case 6: /* VBIT */
4704 tmp3 = neon_load_reg(rd, pass);
4705 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4706 tcg_temp_free_i32(tmp3);
4707 break;
4708 case 7: /* VBIF */
4709 tmp3 = neon_load_reg(rd, pass);
4710 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4711 tcg_temp_free_i32(tmp3);
4712 break;
4713 }
4714 break;
4715 case NEON_3R_VHSUB:
4716 GEN_NEON_INTEGER_OP(hsub);
4717 break;
4718 case NEON_3R_VQSUB:
4719 GEN_NEON_INTEGER_OP_ENV(qsub);
4720 break;
4721 case NEON_3R_VCGT:
4722 GEN_NEON_INTEGER_OP(cgt);
4723 break;
4724 case NEON_3R_VCGE:
4725 GEN_NEON_INTEGER_OP(cge);
4726 break;
4727 case NEON_3R_VSHL:
4728 GEN_NEON_INTEGER_OP(shl);
4729 break;
4730 case NEON_3R_VQSHL:
4731 GEN_NEON_INTEGER_OP_ENV(qshl);
4732 break;
4733 case NEON_3R_VRSHL:
4734 GEN_NEON_INTEGER_OP(rshl);
4735 break;
4736 case NEON_3R_VQRSHL:
4737 GEN_NEON_INTEGER_OP_ENV(qrshl);
4738 break;
4739 case NEON_3R_VMAX:
4740 GEN_NEON_INTEGER_OP(max);
4741 break;
4742 case NEON_3R_VMIN:
4743 GEN_NEON_INTEGER_OP(min);
4744 break;
4745 case NEON_3R_VABD:
4746 GEN_NEON_INTEGER_OP(abd);
4747 break;
4748 case NEON_3R_VABA:
4749 GEN_NEON_INTEGER_OP(abd);
4750 tcg_temp_free_i32(tmp2);
4751 tmp2 = neon_load_reg(rd, pass);
4752 gen_neon_add(size, tmp, tmp2);
4753 break;
4754 case NEON_3R_VADD_VSUB:
4755 if (!u) { /* VADD */
4756 gen_neon_add(size, tmp, tmp2);
4757 } else { /* VSUB */
4758 switch (size) {
4759 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4760 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4761 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4762 default: abort();
4763 }
4764 }
4765 break;
4766 case NEON_3R_VTST_VCEQ:
4767 if (!u) { /* VTST */
4768 switch (size) {
4769 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4770 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4771 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4772 default: abort();
4773 }
4774 } else { /* VCEQ */
4775 switch (size) {
4776 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4777 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4778 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4779 default: abort();
4780 }
4781 }
4782 break;
4783 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4784 switch (size) {
4785 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4787 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4788 default: abort();
4789 }
4790 tcg_temp_free_i32(tmp2);
4791 tmp2 = neon_load_reg(rd, pass);
4792 if (u) { /* VMLS */
4793 gen_neon_rsb(size, tmp, tmp2);
4794 } else { /* VMLA */
4795 gen_neon_add(size, tmp, tmp2);
4796 }
4797 break;
4798 case NEON_3R_VMUL:
4799 if (u) { /* polynomial */
4800 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4801 } else { /* Integer */
4802 switch (size) {
4803 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4804 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4805 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4806 default: abort();
4807 }
4808 }
4809 break;
4810 case NEON_3R_VPMAX:
4811 GEN_NEON_INTEGER_OP(pmax);
4812 break;
4813 case NEON_3R_VPMIN:
4814 GEN_NEON_INTEGER_OP(pmin);
4815 break;
4816 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4817 if (!u) { /* VQDMULH */
4818 switch (size) {
4819 case 1:
4820 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4821 break;
4822 case 2:
4823 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4824 break;
4825 default: abort();
4826 }
4827 } else { /* VQRDMULH */
4828 switch (size) {
4829 case 1:
4830 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4831 break;
4832 case 2:
4833 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4834 break;
4835 default: abort();
4836 }
4837 }
4838 break;
4839 case NEON_3R_VPADD:
4840 switch (size) {
4841 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4842 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4843 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4844 default: abort();
4845 }
4846 break;
4847 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4848 {
4849 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4850 switch ((u << 2) | size) {
4851 case 0: /* VADD */
4852 case 4: /* VPADD */
4853 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4854 break;
4855 case 2: /* VSUB */
4856 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4857 break;
4858 case 6: /* VABD */
4859 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4860 break;
4861 default:
4862 abort();
4863 }
4864 tcg_temp_free_ptr(fpstatus);
4865 break;
4866 }
4867 case NEON_3R_FLOAT_MULTIPLY:
4868 {
4869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4870 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4871 if (!u) {
4872 tcg_temp_free_i32(tmp2);
4873 tmp2 = neon_load_reg(rd, pass);
4874 if (size == 0) {
4875 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4876 } else {
4877 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4878 }
4879 }
4880 tcg_temp_free_ptr(fpstatus);
4881 break;
4882 }
4883 case NEON_3R_FLOAT_CMP:
4884 {
4885 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4886 if (!u) {
4887 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4888 } else {
4889 if (size == 0) {
4890 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4891 } else {
4892 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4893 }
4894 }
4895 tcg_temp_free_ptr(fpstatus);
4896 break;
4897 }
4898 case NEON_3R_FLOAT_ACMP:
4899 {
4900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4901 if (size == 0) {
4902 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4903 } else {
4904 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4905 }
4906 tcg_temp_free_ptr(fpstatus);
4907 break;
4908 }
4909 case NEON_3R_FLOAT_MINMAX:
4910 {
4911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4912 if (size == 0) {
4913 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4914 } else {
4915 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4916 }
4917 tcg_temp_free_ptr(fpstatus);
4918 break;
4919 }
4920 case NEON_3R_VRECPS_VRSQRTS:
4921 if (size == 0)
4922 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4923 else
4924 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4925 break;
4926 case NEON_3R_VFM:
4927 {
4928 /* VFMA, VFMS: fused multiply-add */
4929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4930 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4931 if (size) {
4932 /* VFMS */
4933 gen_helper_vfp_negs(tmp, tmp);
4934 }
4935 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4936 tcg_temp_free_i32(tmp3);
4937 tcg_temp_free_ptr(fpstatus);
4938 break;
4939 }
4940 default:
4941 abort();
4942 }
4943 tcg_temp_free_i32(tmp2);
4944
4945 /* Save the result. For elementwise operations we can put it
4946 straight into the destination register. For pairwise operations
4947 we have to be careful to avoid clobbering the source operands. */
4948 if (pairwise && rd == rm) {
4949 neon_store_scratch(pass, tmp);
4950 } else {
4951 neon_store_reg(rd, pass, tmp);
4952 }
4953
4954 } /* for pass */
4955 if (pairwise && rd == rm) {
4956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4957 tmp = neon_load_scratch(pass);
4958 neon_store_reg(rd, pass, tmp);
4959 }
4960 }
4961 /* End of 3 register same size operations. */
4962 } else if (insn & (1 << 4)) {
4963 if ((insn & 0x00380080) != 0) {
4964 /* Two registers and shift. */
4965 op = (insn >> 8) & 0xf;
4966 if (insn & (1 << 7)) {
4967 /* 64-bit shift. */
4968 if (op > 7) {
4969 return 1;
4970 }
4971 size = 3;
4972 } else {
4973 size = 2;
4974 while ((insn & (1 << (size + 19))) == 0)
4975 size--;
4976 }
4977 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4978 /* To avoid excessive duplication of ops we implement shift
4979 by immediate using the variable shift operations. */
4980 if (op < 8) {
4981 /* Shift by immediate:
4982 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4983 if (q && ((rd | rm) & 1)) {
4984 return 1;
4985 }
4986 if (!u && (op == 4 || op == 6)) {
4987 return 1;
4988 }
4989 /* Right shifts are encoded as N - shift, where N is the
4990 element size in bits. */
4991 if (op <= 4)
4992 shift = shift - (1 << (size + 3));
4993 if (size == 3) {
4994 count = q + 1;
4995 } else {
4996 count = q ? 4: 2;
4997 }
4998 switch (size) {
4999 case 0:
5000 imm = (uint8_t) shift;
5001 imm |= imm << 8;
5002 imm |= imm << 16;
5003 break;
5004 case 1:
5005 imm = (uint16_t) shift;
5006 imm |= imm << 16;
5007 break;
5008 case 2:
5009 case 3:
5010 imm = shift;
5011 break;
5012 default:
5013 abort();
5014 }
5015
5016 for (pass = 0; pass < count; pass++) {
5017 if (size == 3) {
5018 neon_load_reg64(cpu_V0, rm + pass);
5019 tcg_gen_movi_i64(cpu_V1, imm);
5020 switch (op) {
5021 case 0: /* VSHR */
5022 case 1: /* VSRA */
5023 if (u)
5024 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5025 else
5026 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5027 break;
5028 case 2: /* VRSHR */
5029 case 3: /* VRSRA */
5030 if (u)
5031 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5032 else
5033 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5034 break;
5035 case 4: /* VSRI */
5036 case 5: /* VSHL, VSLI */
5037 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5038 break;
5039 case 6: /* VQSHLU */
5040 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5041 cpu_V0, cpu_V1);
5042 break;
5043 case 7: /* VQSHL */
5044 if (u) {
5045 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5046 cpu_V0, cpu_V1);
5047 } else {
5048 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5049 cpu_V0, cpu_V1);
5050 }
5051 break;
5052 }
5053 if (op == 1 || op == 3) {
5054 /* Accumulate. */
5055 neon_load_reg64(cpu_V1, rd + pass);
5056 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5057 } else if (op == 4 || (op == 5 && u)) {
5058 /* Insert */
5059 neon_load_reg64(cpu_V1, rd + pass);
5060 uint64_t mask;
5061 if (shift < -63 || shift > 63) {
5062 mask = 0;
5063 } else {
5064 if (op == 4) {
5065 mask = 0xffffffffffffffffull >> -shift;
5066 } else {
5067 mask = 0xffffffffffffffffull << shift;
5068 }
5069 }
5070 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5071 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5072 }
5073 neon_store_reg64(cpu_V0, rd + pass);
5074 } else { /* size < 3 */
5075 /* Operands in T0 and T1. */
5076 tmp = neon_load_reg(rm, pass);
5077 tmp2 = tcg_temp_new_i32();
5078 tcg_gen_movi_i32(tmp2, imm);
5079 switch (op) {
5080 case 0: /* VSHR */
5081 case 1: /* VSRA */
5082 GEN_NEON_INTEGER_OP(shl);
5083 break;
5084 case 2: /* VRSHR */
5085 case 3: /* VRSRA */
5086 GEN_NEON_INTEGER_OP(rshl);
5087 break;
5088 case 4: /* VSRI */
5089 case 5: /* VSHL, VSLI */
5090 switch (size) {
5091 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5092 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5093 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5094 default: abort();
5095 }
5096 break;
5097 case 6: /* VQSHLU */
5098 switch (size) {
5099 case 0:
5100 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5101 tmp, tmp2);
5102 break;
5103 case 1:
5104 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5105 tmp, tmp2);
5106 break;
5107 case 2:
5108 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5109 tmp, tmp2);
5110 break;
5111 default:
5112 abort();
5113 }
5114 break;
5115 case 7: /* VQSHL */
5116 GEN_NEON_INTEGER_OP_ENV(qshl);
5117 break;
5118 }
5119 tcg_temp_free_i32(tmp2);
5120
5121 if (op == 1 || op == 3) {
5122 /* Accumulate. */
5123 tmp2 = neon_load_reg(rd, pass);
5124 gen_neon_add(size, tmp, tmp2);
5125 tcg_temp_free_i32(tmp2);
5126 } else if (op == 4 || (op == 5 && u)) {
5127 /* Insert */
5128 switch (size) {
5129 case 0:
5130 if (op == 4)
5131 mask = 0xff >> -shift;
5132 else
5133 mask = (uint8_t)(0xff << shift);
5134 mask |= mask << 8;
5135 mask |= mask << 16;
5136 break;
5137 case 1:
5138 if (op == 4)
5139 mask = 0xffff >> -shift;
5140 else
5141 mask = (uint16_t)(0xffff << shift);
5142 mask |= mask << 16;
5143 break;
5144 case 2:
5145 if (shift < -31 || shift > 31) {
5146 mask = 0;
5147 } else {
5148 if (op == 4)
5149 mask = 0xffffffffu >> -shift;
5150 else
5151 mask = 0xffffffffu << shift;
5152 }
5153 break;
5154 default:
5155 abort();
5156 }
5157 tmp2 = neon_load_reg(rd, pass);
5158 tcg_gen_andi_i32(tmp, tmp, mask);
5159 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5160 tcg_gen_or_i32(tmp, tmp, tmp2);
5161 tcg_temp_free_i32(tmp2);
5162 }
5163 neon_store_reg(rd, pass, tmp);
5164 }
5165 } /* for pass */
5166 } else if (op < 10) {
5167 /* Shift by immediate and narrow:
5168 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5169 int input_unsigned = (op == 8) ? !u : u;
5170 if (rm & 1) {
5171 return 1;
5172 }
5173 shift = shift - (1 << (size + 3));
5174 size++;
5175 if (size == 3) {
5176 tmp64 = tcg_const_i64(shift);
5177 neon_load_reg64(cpu_V0, rm);
5178 neon_load_reg64(cpu_V1, rm + 1);
5179 for (pass = 0; pass < 2; pass++) {
5180 TCGv_i64 in;
5181 if (pass == 0) {
5182 in = cpu_V0;
5183 } else {
5184 in = cpu_V1;
5185 }
5186 if (q) {
5187 if (input_unsigned) {
5188 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5189 } else {
5190 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5191 }
5192 } else {
5193 if (input_unsigned) {
5194 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5195 } else {
5196 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5197 }
5198 }
5199 tmp = tcg_temp_new_i32();
5200 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5201 neon_store_reg(rd, pass, tmp);
5202 } /* for pass */
5203 tcg_temp_free_i64(tmp64);
5204 } else {
5205 if (size == 1) {
5206 imm = (uint16_t)shift;
5207 imm |= imm << 16;
5208 } else {
5209 /* size == 2 */
5210 imm = (uint32_t)shift;
5211 }
5212 tmp2 = tcg_const_i32(imm);
5213 tmp4 = neon_load_reg(rm + 1, 0);
5214 tmp5 = neon_load_reg(rm + 1, 1);
5215 for (pass = 0; pass < 2; pass++) {
5216 if (pass == 0) {
5217 tmp = neon_load_reg(rm, 0);
5218 } else {
5219 tmp = tmp4;
5220 }
5221 gen_neon_shift_narrow(size, tmp, tmp2, q,
5222 input_unsigned);
5223 if (pass == 0) {
5224 tmp3 = neon_load_reg(rm, 1);
5225 } else {
5226 tmp3 = tmp5;
5227 }
5228 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5229 input_unsigned);
5230 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5231 tcg_temp_free_i32(tmp);
5232 tcg_temp_free_i32(tmp3);
5233 tmp = tcg_temp_new_i32();
5234 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5235 neon_store_reg(rd, pass, tmp);
5236 } /* for pass */
5237 tcg_temp_free_i32(tmp2);
5238 }
5239 } else if (op == 10) {
5240 /* VSHLL, VMOVL */
5241 if (q || (rd & 1)) {
5242 return 1;
5243 }
5244 tmp = neon_load_reg(rm, 0);
5245 tmp2 = neon_load_reg(rm, 1);
5246 for (pass = 0; pass < 2; pass++) {
5247 if (pass == 1)
5248 tmp = tmp2;
5249
5250 gen_neon_widen(cpu_V0, tmp, size, u);
5251
5252 if (shift != 0) {
5253 /* The shift is less than the width of the source
5254 type, so we can just shift the whole register. */
5255 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5256 /* Widen the result of shift: we need to clear
5257 * the potential overflow bits resulting from
5258 * left bits of the narrow input appearing as
5259 * right bits of left the neighbour narrow
5260 * input. */
5261 if (size < 2 || !u) {
5262 uint64_t imm64;
5263 if (size == 0) {
5264 imm = (0xffu >> (8 - shift));
5265 imm |= imm << 16;
5266 } else if (size == 1) {
5267 imm = 0xffff >> (16 - shift);
5268 } else {
5269 /* size == 2 */
5270 imm = 0xffffffff >> (32 - shift);
5271 }
5272 if (size < 2) {
5273 imm64 = imm | (((uint64_t)imm) << 32);
5274 } else {
5275 imm64 = imm;
5276 }
5277 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5278 }
5279 }
5280 neon_store_reg64(cpu_V0, rd + pass);
5281 }
5282 } else if (op >= 14) {
5283 /* VCVT fixed-point. */
5284 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5285 return 1;
5286 }
5287 /* We have already masked out the must-be-1 top bit of imm6,
5288 * hence this 32-shift where the ARM ARM has 64-imm6.
5289 */
5290 shift = 32 - shift;
5291 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5292 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5293 if (!(op & 1)) {
5294 if (u)
5295 gen_vfp_ulto(0, shift, 1);
5296 else
5297 gen_vfp_slto(0, shift, 1);
5298 } else {
5299 if (u)
5300 gen_vfp_toul(0, shift, 1);
5301 else
5302 gen_vfp_tosl(0, shift, 1);
5303 }
5304 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5305 }
5306 } else {
5307 return 1;
5308 }
5309 } else { /* (insn & 0x00380080) == 0 */
5310 int invert;
5311 if (q && (rd & 1)) {
5312 return 1;
5313 }
5314
5315 op = (insn >> 8) & 0xf;
5316 /* One register and immediate. */
5317 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5318 invert = (insn & (1 << 5)) != 0;
5319 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5320 * We choose to not special-case this and will behave as if a
5321 * valid constant encoding of 0 had been given.
5322 */
5323 switch (op) {
5324 case 0: case 1:
5325 /* no-op */
5326 break;
5327 case 2: case 3:
5328 imm <<= 8;
5329 break;
5330 case 4: case 5:
5331 imm <<= 16;
5332 break;
5333 case 6: case 7:
5334 imm <<= 24;
5335 break;
5336 case 8: case 9:
5337 imm |= imm << 16;
5338 break;
5339 case 10: case 11:
5340 imm = (imm << 8) | (imm << 24);
5341 break;
5342 case 12:
5343 imm = (imm << 8) | 0xff;
5344 break;
5345 case 13:
5346 imm = (imm << 16) | 0xffff;
5347 break;
5348 case 14:
5349 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5350 if (invert)
5351 imm = ~imm;
5352 break;
5353 case 15:
5354 if (invert) {
5355 return 1;
5356 }
5357 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5358 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5359 break;
5360 }
5361 if (invert)
5362 imm = ~imm;
5363
5364 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5365 if (op & 1 && op < 12) {
5366 tmp = neon_load_reg(rd, pass);
5367 if (invert) {
5368 /* The immediate value has already been inverted, so
5369 BIC becomes AND. */
5370 tcg_gen_andi_i32(tmp, tmp, imm);
5371 } else {
5372 tcg_gen_ori_i32(tmp, tmp, imm);
5373 }
5374 } else {
5375 /* VMOV, VMVN. */
5376 tmp = tcg_temp_new_i32();
5377 if (op == 14 && invert) {
5378 int n;
5379 uint32_t val;
5380 val = 0;
5381 for (n = 0; n < 4; n++) {
5382 if (imm & (1 << (n + (pass & 1) * 4)))
5383 val |= 0xff << (n * 8);
5384 }
5385 tcg_gen_movi_i32(tmp, val);
5386 } else {
5387 tcg_gen_movi_i32(tmp, imm);
5388 }
5389 }
5390 neon_store_reg(rd, pass, tmp);
5391 }
5392 }
5393 } else { /* (insn & 0x00800010 == 0x00800000) */
5394 if (size != 3) {
5395 op = (insn >> 8) & 0xf;
5396 if ((insn & (1 << 6)) == 0) {
5397 /* Three registers of different lengths. */
5398 int src1_wide;
5399 int src2_wide;
5400 int prewiden;
5401 /* undefreq: bit 0 : UNDEF if size != 0
5402 * bit 1 : UNDEF if size == 0
5403 * bit 2 : UNDEF if U == 1
5404 * Note that [1:0] set implies 'always UNDEF'
5405 */
5406 int undefreq;
5407 /* prewiden, src1_wide, src2_wide, undefreq */
5408 static const int neon_3reg_wide[16][4] = {
5409 {1, 0, 0, 0}, /* VADDL */
5410 {1, 1, 0, 0}, /* VADDW */
5411 {1, 0, 0, 0}, /* VSUBL */
5412 {1, 1, 0, 0}, /* VSUBW */
5413 {0, 1, 1, 0}, /* VADDHN */
5414 {0, 0, 0, 0}, /* VABAL */
5415 {0, 1, 1, 0}, /* VSUBHN */
5416 {0, 0, 0, 0}, /* VABDL */
5417 {0, 0, 0, 0}, /* VMLAL */
5418 {0, 0, 0, 6}, /* VQDMLAL */
5419 {0, 0, 0, 0}, /* VMLSL */
5420 {0, 0, 0, 6}, /* VQDMLSL */
5421 {0, 0, 0, 0}, /* Integer VMULL */
5422 {0, 0, 0, 2}, /* VQDMULL */
5423 {0, 0, 0, 5}, /* Polynomial VMULL */
5424 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5425 };
5426
5427 prewiden = neon_3reg_wide[op][0];
5428 src1_wide = neon_3reg_wide[op][1];
5429 src2_wide = neon_3reg_wide[op][2];
5430 undefreq = neon_3reg_wide[op][3];
5431
5432 if (((undefreq & 1) && (size != 0)) ||
5433 ((undefreq & 2) && (size == 0)) ||
5434 ((undefreq & 4) && u)) {
5435 return 1;
5436 }
5437 if ((src1_wide && (rn & 1)) ||
5438 (src2_wide && (rm & 1)) ||
5439 (!src2_wide && (rd & 1))) {
5440 return 1;
5441 }
5442
5443 /* Avoid overlapping operands. Wide source operands are
5444 always aligned so will never overlap with wide
5445 destinations in problematic ways. */
5446 if (rd == rm && !src2_wide) {
5447 tmp = neon_load_reg(rm, 1);
5448 neon_store_scratch(2, tmp);
5449 } else if (rd == rn && !src1_wide) {
5450 tmp = neon_load_reg(rn, 1);
5451 neon_store_scratch(2, tmp);
5452 }
5453 TCGV_UNUSED(tmp3);
5454 for (pass = 0; pass < 2; pass++) {
5455 if (src1_wide) {
5456 neon_load_reg64(cpu_V0, rn + pass);
5457 TCGV_UNUSED(tmp);
5458 } else {
5459 if (pass == 1 && rd == rn) {
5460 tmp = neon_load_scratch(2);
5461 } else {
5462 tmp = neon_load_reg(rn, pass);
5463 }
5464 if (prewiden) {
5465 gen_neon_widen(cpu_V0, tmp, size, u);
5466 }
5467 }
5468 if (src2_wide) {
5469 neon_load_reg64(cpu_V1, rm + pass);
5470 TCGV_UNUSED(tmp2);
5471 } else {
5472 if (pass == 1 && rd == rm) {
5473 tmp2 = neon_load_scratch(2);
5474 } else {
5475 tmp2 = neon_load_reg(rm, pass);
5476 }
5477 if (prewiden) {
5478 gen_neon_widen(cpu_V1, tmp2, size, u);
5479 }
5480 }
5481 switch (op) {
5482 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5483 gen_neon_addl(size);
5484 break;
5485 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5486 gen_neon_subl(size);
5487 break;
5488 case 5: case 7: /* VABAL, VABDL */
5489 switch ((size << 1) | u) {
5490 case 0:
5491 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5492 break;
5493 case 1:
5494 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5495 break;
5496 case 2:
5497 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5498 break;
5499 case 3:
5500 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5501 break;
5502 case 4:
5503 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5504 break;
5505 case 5:
5506 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5507 break;
5508 default: abort();
5509 }
5510 tcg_temp_free_i32(tmp2);
5511 tcg_temp_free_i32(tmp);
5512 break;
5513 case 8: case 9: case 10: case 11: case 12: case 13:
5514 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5515 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5516 break;
5517 case 14: /* Polynomial VMULL */
5518 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5519 tcg_temp_free_i32(tmp2);
5520 tcg_temp_free_i32(tmp);
5521 break;
5522 default: /* 15 is RESERVED: caught earlier */
5523 abort();
5524 }
5525 if (op == 13) {
5526 /* VQDMULL */
5527 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5528 neon_store_reg64(cpu_V0, rd + pass);
5529 } else if (op == 5 || (op >= 8 && op <= 11)) {
5530 /* Accumulate. */
5531 neon_load_reg64(cpu_V1, rd + pass);
5532 switch (op) {
5533 case 10: /* VMLSL */
5534 gen_neon_negl(cpu_V0, size);
5535 /* Fall through */
5536 case 5: case 8: /* VABAL, VMLAL */
5537 gen_neon_addl(size);
5538 break;
5539 case 9: case 11: /* VQDMLAL, VQDMLSL */
5540 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5541 if (op == 11) {
5542 gen_neon_negl(cpu_V0, size);
5543 }
5544 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5545 break;
5546 default:
5547 abort();
5548 }
5549 neon_store_reg64(cpu_V0, rd + pass);
5550 } else if (op == 4 || op == 6) {
5551 /* Narrowing operation. */
5552 tmp = tcg_temp_new_i32();
5553 if (!u) {
5554 switch (size) {
5555 case 0:
5556 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5557 break;
5558 case 1:
5559 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5560 break;
5561 case 2:
5562 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5563 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5564 break;
5565 default: abort();
5566 }
5567 } else {
5568 switch (size) {
5569 case 0:
5570 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5571 break;
5572 case 1:
5573 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5574 break;
5575 case 2:
5576 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5577 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5578 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5579 break;
5580 default: abort();
5581 }
5582 }
5583 if (pass == 0) {
5584 tmp3 = tmp;
5585 } else {
5586 neon_store_reg(rd, 0, tmp3);
5587 neon_store_reg(rd, 1, tmp);
5588 }
5589 } else {
5590 /* Write back the result. */
5591 neon_store_reg64(cpu_V0, rd + pass);
5592 }
5593 }
5594 } else {
5595 /* Two registers and a scalar. NB that for ops of this form
5596 * the ARM ARM labels bit 24 as Q, but it is in our variable
5597 * 'u', not 'q'.
5598 */
5599 if (size == 0) {
5600 return 1;
5601 }
5602 switch (op) {
5603 case 1: /* Float VMLA scalar */
5604 case 5: /* Floating point VMLS scalar */
5605 case 9: /* Floating point VMUL scalar */
5606 if (size == 1) {
5607 return 1;
5608 }
5609 /* fall through */
5610 case 0: /* Integer VMLA scalar */
5611 case 4: /* Integer VMLS scalar */
5612 case 8: /* Integer VMUL scalar */
5613 case 12: /* VQDMULH scalar */
5614 case 13: /* VQRDMULH scalar */
5615 if (u && ((rd | rn) & 1)) {
5616 return 1;
5617 }
5618 tmp = neon_get_scalar(size, rm);
5619 neon_store_scratch(0, tmp);
5620 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5621 tmp = neon_load_scratch(0);
5622 tmp2 = neon_load_reg(rn, pass);
5623 if (op == 12) {
5624 if (size == 1) {
5625 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5626 } else {
5627 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5628 }
5629 } else if (op == 13) {
5630 if (size == 1) {
5631 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5632 } else {
5633 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5634 }
5635 } else if (op & 1) {
5636 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5637 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5638 tcg_temp_free_ptr(fpstatus);
5639 } else {
5640 switch (size) {
5641 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5642 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5643 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5644 default: abort();
5645 }
5646 }
5647 tcg_temp_free_i32(tmp2);
5648 if (op < 8) {
5649 /* Accumulate. */
5650 tmp2 = neon_load_reg(rd, pass);
5651 switch (op) {
5652 case 0:
5653 gen_neon_add(size, tmp, tmp2);
5654 break;
5655 case 1:
5656 {
5657 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5658 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5659 tcg_temp_free_ptr(fpstatus);
5660 break;
5661 }
5662 case 4:
5663 gen_neon_rsb(size, tmp, tmp2);
5664 break;
5665 case 5:
5666 {
5667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5668 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5669 tcg_temp_free_ptr(fpstatus);
5670 break;
5671 }
5672 default:
5673 abort();
5674 }
5675 tcg_temp_free_i32(tmp2);
5676 }
5677 neon_store_reg(rd, pass, tmp);
5678 }
5679 break;
5680 case 3: /* VQDMLAL scalar */
5681 case 7: /* VQDMLSL scalar */
5682 case 11: /* VQDMULL scalar */
5683 if (u == 1) {
5684 return 1;
5685 }
5686 /* fall through */
5687 case 2: /* VMLAL sclar */
5688 case 6: /* VMLSL scalar */
5689 case 10: /* VMULL scalar */
5690 if (rd & 1) {
5691 return 1;
5692 }
5693 tmp2 = neon_get_scalar(size, rm);
5694 /* We need a copy of tmp2 because gen_neon_mull
5695 * deletes it during pass 0. */
5696 tmp4 = tcg_temp_new_i32();
5697 tcg_gen_mov_i32(tmp4, tmp2);
5698 tmp3 = neon_load_reg(rn, 1);
5699
5700 for (pass = 0; pass < 2; pass++) {
5701 if (pass == 0) {
5702 tmp = neon_load_reg(rn, 0);
5703 } else {
5704 tmp = tmp3;
5705 tmp2 = tmp4;
5706 }
5707 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5708 if (op != 11) {
5709 neon_load_reg64(cpu_V1, rd + pass);
5710 }
5711 switch (op) {
5712 case 6:
5713 gen_neon_negl(cpu_V0, size);
5714 /* Fall through */
5715 case 2:
5716 gen_neon_addl(size);
5717 break;
5718 case 3: case 7:
5719 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5720 if (op == 7) {
5721 gen_neon_negl(cpu_V0, size);
5722 }
5723 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5724 break;
5725 case 10:
5726 /* no-op */
5727 break;
5728 case 11:
5729 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5730 break;
5731 default:
5732 abort();
5733 }
5734 neon_store_reg64(cpu_V0, rd + pass);
5735 }
5736
5737
5738 break;
5739 default: /* 14 and 15 are RESERVED */
5740 return 1;
5741 }
5742 }
5743 } else { /* size == 3 */
5744 if (!u) {
5745 /* Extract. */
5746 imm = (insn >> 8) & 0xf;
5747
5748 if (imm > 7 && !q)
5749 return 1;
5750
5751 if (q && ((rd | rn | rm) & 1)) {
5752 return 1;
5753 }
5754
5755 if (imm == 0) {
5756 neon_load_reg64(cpu_V0, rn);
5757 if (q) {
5758 neon_load_reg64(cpu_V1, rn + 1);
5759 }
5760 } else if (imm == 8) {
5761 neon_load_reg64(cpu_V0, rn + 1);
5762 if (q) {
5763 neon_load_reg64(cpu_V1, rm);
5764 }
5765 } else if (q) {
5766 tmp64 = tcg_temp_new_i64();
5767 if (imm < 8) {
5768 neon_load_reg64(cpu_V0, rn);
5769 neon_load_reg64(tmp64, rn + 1);
5770 } else {
5771 neon_load_reg64(cpu_V0, rn + 1);
5772 neon_load_reg64(tmp64, rm);
5773 }
5774 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5775 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5776 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5777 if (imm < 8) {
5778 neon_load_reg64(cpu_V1, rm);
5779 } else {
5780 neon_load_reg64(cpu_V1, rm + 1);
5781 imm -= 8;
5782 }
5783 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5784 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5785 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5786 tcg_temp_free_i64(tmp64);
5787 } else {
5788 /* BUGFIX */
5789 neon_load_reg64(cpu_V0, rn);
5790 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5791 neon_load_reg64(cpu_V1, rm);
5792 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5793 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5794 }
5795 neon_store_reg64(cpu_V0, rd);
5796 if (q) {
5797 neon_store_reg64(cpu_V1, rd + 1);
5798 }
5799 } else if ((insn & (1 << 11)) == 0) {
5800 /* Two register misc. */
5801 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5802 size = (insn >> 18) & 3;
5803 /* UNDEF for unknown op values and bad op-size combinations */
5804 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5805 return 1;
5806 }
5807 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5808 q && ((rm | rd) & 1)) {
5809 return 1;
5810 }
5811 switch (op) {
5812 case NEON_2RM_VREV64:
5813 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5814 tmp = neon_load_reg(rm, pass * 2);
5815 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5816 switch (size) {
5817 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5818 case 1: gen_swap_half(tmp); break;
5819 case 2: /* no-op */ break;
5820 default: abort();
5821 }
5822 neon_store_reg(rd, pass * 2 + 1, tmp);
5823 if (size == 2) {
5824 neon_store_reg(rd, pass * 2, tmp2);
5825 } else {
5826 switch (size) {
5827 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5828 case 1: gen_swap_half(tmp2); break;
5829 default: abort();
5830 }
5831 neon_store_reg(rd, pass * 2, tmp2);
5832 }
5833 }
5834 break;
5835 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5836 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5837 for (pass = 0; pass < q + 1; pass++) {
5838 tmp = neon_load_reg(rm, pass * 2);
5839 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5840 tmp = neon_load_reg(rm, pass * 2 + 1);
5841 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5842 switch (size) {
5843 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5844 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5845 case 2: tcg_gen_add_i64(CPU_V001); break;
5846 default: abort();
5847 }
5848 if (op >= NEON_2RM_VPADAL) {
5849 /* Accumulate. */
5850 neon_load_reg64(cpu_V1, rd + pass);
5851 gen_neon_addl(size);
5852 }
5853 neon_store_reg64(cpu_V0, rd + pass);
5854 }
5855 break;
5856 case NEON_2RM_VTRN:
5857 if (size == 2) {
5858 int n;
5859 for (n = 0; n < (q ? 4 : 2); n += 2) {
5860 tmp = neon_load_reg(rm, n);
5861 tmp2 = neon_load_reg(rd, n + 1);
5862 neon_store_reg(rm, n, tmp2);
5863 neon_store_reg(rd, n + 1, tmp);
5864 }
5865 } else {
5866 goto elementwise;
5867 }
5868 break;
5869 case NEON_2RM_VUZP:
5870 if (gen_neon_unzip(rd, rm, size, q)) {
5871 return 1;
5872 }
5873 break;
5874 case NEON_2RM_VZIP:
5875 if (gen_neon_zip(rd, rm, size, q)) {
5876 return 1;
5877 }
5878 break;
5879 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5880 /* also VQMOVUN; op field and mnemonics don't line up */
5881 if (rm & 1) {
5882 return 1;
5883 }
5884 TCGV_UNUSED(tmp2);
5885 for (pass = 0; pass < 2; pass++) {
5886 neon_load_reg64(cpu_V0, rm + pass);
5887 tmp = tcg_temp_new_i32();
5888 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5889 tmp, cpu_V0);
5890 if (pass == 0) {
5891 tmp2 = tmp;
5892 } else {
5893 neon_store_reg(rd, 0, tmp2);
5894 neon_store_reg(rd, 1, tmp);
5895 }
5896 }
5897 break;
5898 case NEON_2RM_VSHLL:
5899 if (q || (rd & 1)) {
5900 return 1;
5901 }
5902 tmp = neon_load_reg(rm, 0);
5903 tmp2 = neon_load_reg(rm, 1);
5904 for (pass = 0; pass < 2; pass++) {
5905 if (pass == 1)
5906 tmp = tmp2;
5907 gen_neon_widen(cpu_V0, tmp, size, 1);
5908 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5909 neon_store_reg64(cpu_V0, rd + pass);
5910 }
5911 break;
5912 case NEON_2RM_VCVT_F16_F32:
5913 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5914 q || (rm & 1)) {
5915 return 1;
5916 }
5917 tmp = tcg_temp_new_i32();
5918 tmp2 = tcg_temp_new_i32();
5919 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5920 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5921 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5922 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5923 tcg_gen_shli_i32(tmp2, tmp2, 16);
5924 tcg_gen_or_i32(tmp2, tmp2, tmp);
5925 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5926 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5927 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5928 neon_store_reg(rd, 0, tmp2);
5929 tmp2 = tcg_temp_new_i32();
5930 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5931 tcg_gen_shli_i32(tmp2, tmp2, 16);
5932 tcg_gen_or_i32(tmp2, tmp2, tmp);
5933 neon_store_reg(rd, 1, tmp2);
5934 tcg_temp_free_i32(tmp);
5935 break;
5936 case NEON_2RM_VCVT_F32_F16:
5937 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5938 q || (rd & 1)) {
5939 return 1;
5940 }
5941 tmp3 = tcg_temp_new_i32();
5942 tmp = neon_load_reg(rm, 0);
5943 tmp2 = neon_load_reg(rm, 1);
5944 tcg_gen_ext16u_i32(tmp3, tmp);
5945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5946 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5947 tcg_gen_shri_i32(tmp3, tmp, 16);
5948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5949 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5950 tcg_temp_free_i32(tmp);
5951 tcg_gen_ext16u_i32(tmp3, tmp2);
5952 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5953 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5954 tcg_gen_shri_i32(tmp3, tmp2, 16);
5955 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5956 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5957 tcg_temp_free_i32(tmp2);
5958 tcg_temp_free_i32(tmp3);
5959 break;
5960 default:
5961 elementwise:
5962 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5963 if (neon_2rm_is_float_op(op)) {
5964 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5965 neon_reg_offset(rm, pass));
5966 TCGV_UNUSED(tmp);
5967 } else {
5968 tmp = neon_load_reg(rm, pass);
5969 }
5970 switch (op) {
5971 case NEON_2RM_VREV32:
5972 switch (size) {
5973 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5974 case 1: gen_swap_half(tmp); break;
5975 default: abort();
5976 }
5977 break;
5978 case NEON_2RM_VREV16:
5979 gen_rev16(tmp);
5980 break;
5981 case NEON_2RM_VCLS:
5982 switch (size) {
5983 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5984 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5985 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5986 default: abort();
5987 }
5988 break;
5989 case NEON_2RM_VCLZ:
5990 switch (size) {
5991 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5992 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5993 case 2: gen_helper_clz(tmp, tmp); break;
5994 default: abort();
5995 }
5996 break;
5997 case NEON_2RM_VCNT:
5998 gen_helper_neon_cnt_u8(tmp, tmp);
5999 break;
6000 case NEON_2RM_VMVN:
6001 tcg_gen_not_i32(tmp, tmp);
6002 break;
6003 case NEON_2RM_VQABS:
6004 switch (size) {
6005 case 0:
6006 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6007 break;
6008 case 1:
6009 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6010 break;
6011 case 2:
6012 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6013 break;
6014 default: abort();
6015 }
6016 break;
6017 case NEON_2RM_VQNEG:
6018 switch (size) {
6019 case 0:
6020 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6021 break;
6022 case 1:
6023 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6024 break;
6025 case 2:
6026 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6027 break;
6028 default: abort();
6029 }
6030 break;
6031 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6032 tmp2 = tcg_const_i32(0);
6033 switch(size) {
6034 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6035 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6036 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6037 default: abort();
6038 }
6039 tcg_temp_free(tmp2);
6040 if (op == NEON_2RM_VCLE0) {
6041 tcg_gen_not_i32(tmp, tmp);
6042 }
6043 break;
6044 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6045 tmp2 = tcg_const_i32(0);
6046 switch(size) {
6047 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6048 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6049 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6050 default: abort();
6051 }
6052 tcg_temp_free(tmp2);
6053 if (op == NEON_2RM_VCLT0) {
6054 tcg_gen_not_i32(tmp, tmp);
6055 }
6056 break;
6057 case NEON_2RM_VCEQ0:
6058 tmp2 = tcg_const_i32(0);
6059 switch(size) {
6060 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6063 default: abort();
6064 }
6065 tcg_temp_free(tmp2);
6066 break;
6067 case NEON_2RM_VABS:
6068 switch(size) {
6069 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6070 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6071 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6072 default: abort();
6073 }
6074 break;
6075 case NEON_2RM_VNEG:
6076 tmp2 = tcg_const_i32(0);
6077 gen_neon_rsb(size, tmp, tmp2);
6078 tcg_temp_free(tmp2);
6079 break;
6080 case NEON_2RM_VCGT0_F:
6081 {
6082 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6083 tmp2 = tcg_const_i32(0);
6084 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6085 tcg_temp_free(tmp2);
6086 tcg_temp_free_ptr(fpstatus);
6087 break;
6088 }
6089 case NEON_2RM_VCGE0_F:
6090 {
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6092 tmp2 = tcg_const_i32(0);
6093 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6094 tcg_temp_free(tmp2);
6095 tcg_temp_free_ptr(fpstatus);
6096 break;
6097 }
6098 case NEON_2RM_VCEQ0_F:
6099 {
6100 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6101 tmp2 = tcg_const_i32(0);
6102 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6103 tcg_temp_free(tmp2);
6104 tcg_temp_free_ptr(fpstatus);
6105 break;
6106 }
6107 case NEON_2RM_VCLE0_F:
6108 {
6109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6110 tmp2 = tcg_const_i32(0);
6111 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6112 tcg_temp_free(tmp2);
6113 tcg_temp_free_ptr(fpstatus);
6114 break;
6115 }
6116 case NEON_2RM_VCLT0_F:
6117 {
6118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6119 tmp2 = tcg_const_i32(0);
6120 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6121 tcg_temp_free(tmp2);
6122 tcg_temp_free_ptr(fpstatus);
6123 break;
6124 }
6125 case NEON_2RM_VABS_F:
6126 gen_vfp_abs(0);
6127 break;
6128 case NEON_2RM_VNEG_F:
6129 gen_vfp_neg(0);
6130 break;
6131 case NEON_2RM_VSWP:
6132 tmp2 = neon_load_reg(rd, pass);
6133 neon_store_reg(rm, pass, tmp2);
6134 break;
6135 case NEON_2RM_VTRN:
6136 tmp2 = neon_load_reg(rd, pass);
6137 switch (size) {
6138 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6139 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6140 default: abort();
6141 }
6142 neon_store_reg(rm, pass, tmp2);
6143 break;
6144 case NEON_2RM_VRECPE:
6145 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6146 break;
6147 case NEON_2RM_VRSQRTE:
6148 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6149 break;
6150 case NEON_2RM_VRECPE_F:
6151 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6152 break;
6153 case NEON_2RM_VRSQRTE_F:
6154 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6155 break;
6156 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6157 gen_vfp_sito(0, 1);
6158 break;
6159 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6160 gen_vfp_uito(0, 1);
6161 break;
6162 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6163 gen_vfp_tosiz(0, 1);
6164 break;
6165 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6166 gen_vfp_touiz(0, 1);
6167 break;
6168 default:
6169 /* Reserved op values were caught by the
6170 * neon_2rm_sizes[] check earlier.
6171 */
6172 abort();
6173 }
6174 if (neon_2rm_is_float_op(op)) {
6175 tcg_gen_st_f32(cpu_F0s, cpu_env,
6176 neon_reg_offset(rd, pass));
6177 } else {
6178 neon_store_reg(rd, pass, tmp);
6179 }
6180 }
6181 break;
6182 }
6183 } else if ((insn & (1 << 10)) == 0) {
6184 /* VTBL, VTBX. */
6185 int n = ((insn >> 8) & 3) + 1;
6186 if ((rn + n) > 32) {
6187 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6188 * helper function running off the end of the register file.
6189 */
6190 return 1;
6191 }
6192 n <<= 3;
6193 if (insn & (1 << 6)) {
6194 tmp = neon_load_reg(rd, 0);
6195 } else {
6196 tmp = tcg_temp_new_i32();
6197 tcg_gen_movi_i32(tmp, 0);
6198 }
6199 tmp2 = neon_load_reg(rm, 0);
6200 tmp4 = tcg_const_i32(rn);
6201 tmp5 = tcg_const_i32(n);
6202 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6203 tcg_temp_free_i32(tmp);
6204 if (insn & (1 << 6)) {
6205 tmp = neon_load_reg(rd, 1);
6206 } else {
6207 tmp = tcg_temp_new_i32();
6208 tcg_gen_movi_i32(tmp, 0);
6209 }
6210 tmp3 = neon_load_reg(rm, 1);
6211 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6212 tcg_temp_free_i32(tmp5);
6213 tcg_temp_free_i32(tmp4);
6214 neon_store_reg(rd, 0, tmp2);
6215 neon_store_reg(rd, 1, tmp3);
6216 tcg_temp_free_i32(tmp);
6217 } else if ((insn & 0x380) == 0) {
6218 /* VDUP */
6219 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6220 return 1;
6221 }
6222 if (insn & (1 << 19)) {
6223 tmp = neon_load_reg(rm, 1);
6224 } else {
6225 tmp = neon_load_reg(rm, 0);
6226 }
6227 if (insn & (1 << 16)) {
6228 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6229 } else if (insn & (1 << 17)) {
6230 if ((insn >> 18) & 1)
6231 gen_neon_dup_high16(tmp);
6232 else
6233 gen_neon_dup_low16(tmp);
6234 }
6235 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6236 tmp2 = tcg_temp_new_i32();
6237 tcg_gen_mov_i32(tmp2, tmp);
6238 neon_store_reg(rd, pass, tmp2);
6239 }
6240 tcg_temp_free_i32(tmp);
6241 } else {
6242 return 1;
6243 }
6244 }
6245 }
6246 return 0;
6247 }
6248
6249 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6250 {
6251 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6252 const ARMCPRegInfo *ri;
6253 ARMCPU *cpu = arm_env_get_cpu(env);
6254
6255 cpnum = (insn >> 8) & 0xf;
6256 if (arm_feature(env, ARM_FEATURE_XSCALE)
6257 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6258 return 1;
6259
6260 /* First check for coprocessor space used for actual instructions */
6261 switch (cpnum) {
6262 case 0:
6263 case 1:
6264 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6265 return disas_iwmmxt_insn(env, s, insn);
6266 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6267 return disas_dsp_insn(env, s, insn);
6268 }
6269 return 1;
6270 case 10:
6271 case 11:
6272 return disas_vfp_insn (env, s, insn);
6273 default:
6274 break;
6275 }
6276
6277 /* Otherwise treat as a generic register access */
6278 is64 = (insn & (1 << 25)) == 0;
6279 if (!is64 && ((insn & (1 << 4)) == 0)) {
6280 /* cdp */
6281 return 1;
6282 }
6283
6284 crm = insn & 0xf;
6285 if (is64) {
6286 crn = 0;
6287 opc1 = (insn >> 4) & 0xf;
6288 opc2 = 0;
6289 rt2 = (insn >> 16) & 0xf;
6290 } else {
6291 crn = (insn >> 16) & 0xf;
6292 opc1 = (insn >> 21) & 7;
6293 opc2 = (insn >> 5) & 7;
6294 rt2 = 0;
6295 }
6296 isread = (insn >> 20) & 1;
6297 rt = (insn >> 12) & 0xf;
6298
6299 ri = get_arm_cp_reginfo(cpu,
6300 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6301 if (ri) {
6302 /* Check access permissions */
6303 if (!cp_access_ok(env, ri, isread)) {
6304 return 1;
6305 }
6306
6307 /* Handle special cases first */
6308 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6309 case ARM_CP_NOP:
6310 return 0;
6311 case ARM_CP_WFI:
6312 if (isread) {
6313 return 1;
6314 }
6315 gen_set_pc_im(s->pc);
6316 s->is_jmp = DISAS_WFI;
6317 return 0;
6318 default:
6319 break;
6320 }
6321
6322 if (isread) {
6323 /* Read */
6324 if (is64) {
6325 TCGv_i64 tmp64;
6326 TCGv_i32 tmp;
6327 if (ri->type & ARM_CP_CONST) {
6328 tmp64 = tcg_const_i64(ri->resetvalue);
6329 } else if (ri->readfn) {
6330 TCGv_ptr tmpptr;
6331 gen_set_pc_im(s->pc);
6332 tmp64 = tcg_temp_new_i64();
6333 tmpptr = tcg_const_ptr(ri);
6334 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6335 tcg_temp_free_ptr(tmpptr);
6336 } else {
6337 tmp64 = tcg_temp_new_i64();
6338 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6339 }
6340 tmp = tcg_temp_new_i32();
6341 tcg_gen_trunc_i64_i32(tmp, tmp64);
6342 store_reg(s, rt, tmp);
6343 tcg_gen_shri_i64(tmp64, tmp64, 32);
6344 tmp = tcg_temp_new_i32();
6345 tcg_gen_trunc_i64_i32(tmp, tmp64);
6346 tcg_temp_free_i64(tmp64);
6347 store_reg(s, rt2, tmp);
6348 } else {
6349 TCGv tmp;
6350 if (ri->type & ARM_CP_CONST) {
6351 tmp = tcg_const_i32(ri->resetvalue);
6352 } else if (ri->readfn) {
6353 TCGv_ptr tmpptr;
6354 gen_set_pc_im(s->pc);
6355 tmp = tcg_temp_new_i32();
6356 tmpptr = tcg_const_ptr(ri);
6357 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6358 tcg_temp_free_ptr(tmpptr);
6359 } else {
6360 tmp = load_cpu_offset(ri->fieldoffset);
6361 }
6362 if (rt == 15) {
6363 /* Destination register of r15 for 32 bit loads sets
6364 * the condition codes from the high 4 bits of the value
6365 */
6366 gen_set_nzcv(tmp);
6367 tcg_temp_free_i32(tmp);
6368 } else {
6369 store_reg(s, rt, tmp);
6370 }
6371 }
6372 } else {
6373 /* Write */
6374 if (ri->type & ARM_CP_CONST) {
6375 /* If not forbidden by access permissions, treat as WI */
6376 return 0;
6377 }
6378
6379 if (is64) {
6380 TCGv tmplo, tmphi;
6381 TCGv_i64 tmp64 = tcg_temp_new_i64();
6382 tmplo = load_reg(s, rt);
6383 tmphi = load_reg(s, rt2);
6384 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6385 tcg_temp_free_i32(tmplo);
6386 tcg_temp_free_i32(tmphi);
6387 if (ri->writefn) {
6388 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6389 gen_set_pc_im(s->pc);
6390 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6391 tcg_temp_free_ptr(tmpptr);
6392 } else {
6393 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6394 }
6395 tcg_temp_free_i64(tmp64);
6396 } else {
6397 if (ri->writefn) {
6398 TCGv tmp;
6399 TCGv_ptr tmpptr;
6400 gen_set_pc_im(s->pc);
6401 tmp = load_reg(s, rt);
6402 tmpptr = tcg_const_ptr(ri);
6403 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6404 tcg_temp_free_ptr(tmpptr);
6405 tcg_temp_free_i32(tmp);
6406 } else {
6407 TCGv tmp = load_reg(s, rt);
6408 store_cpu_offset(tmp, ri->fieldoffset);
6409 }
6410 }
6411 /* We default to ending the TB on a coprocessor register write,
6412 * but allow this to be suppressed by the register definition
6413 * (usually only necessary to work around guest bugs).
6414 */
6415 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6416 gen_lookup_tb(s);
6417 }
6418 }
6419 return 0;
6420 }
6421
6422 return 1;
6423 }
6424
6425
6426 /* Store a 64-bit value to a register pair. Clobbers val. */
6427 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6428 {
6429 TCGv tmp;
6430 tmp = tcg_temp_new_i32();
6431 tcg_gen_trunc_i64_i32(tmp, val);
6432 store_reg(s, rlow, tmp);
6433 tmp = tcg_temp_new_i32();
6434 tcg_gen_shri_i64(val, val, 32);
6435 tcg_gen_trunc_i64_i32(tmp, val);
6436 store_reg(s, rhigh, tmp);
6437 }
6438
6439 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6440 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6441 {
6442 TCGv_i64 tmp;
6443 TCGv tmp2;
6444
6445 /* Load value and extend to 64 bits. */
6446 tmp = tcg_temp_new_i64();
6447 tmp2 = load_reg(s, rlow);
6448 tcg_gen_extu_i32_i64(tmp, tmp2);
6449 tcg_temp_free_i32(tmp2);
6450 tcg_gen_add_i64(val, val, tmp);
6451 tcg_temp_free_i64(tmp);
6452 }
6453
6454 /* load and add a 64-bit value from a register pair. */
6455 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6456 {
6457 TCGv_i64 tmp;
6458 TCGv tmpl;
6459 TCGv tmph;
6460
6461 /* Load 64-bit value rd:rn. */
6462 tmpl = load_reg(s, rlow);
6463 tmph = load_reg(s, rhigh);
6464 tmp = tcg_temp_new_i64();
6465 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6466 tcg_temp_free_i32(tmpl);
6467 tcg_temp_free_i32(tmph);
6468 tcg_gen_add_i64(val, val, tmp);
6469 tcg_temp_free_i64(tmp);
6470 }
6471
6472 /* Set N and Z flags from hi|lo. */
6473 static void gen_logicq_cc(TCGv lo, TCGv hi)
6474 {
6475 tcg_gen_mov_i32(cpu_NF, hi);
6476 tcg_gen_or_i32(cpu_ZF, lo, hi);
6477 }
6478
6479 /* Load/Store exclusive instructions are implemented by remembering
6480 the value/address loaded, and seeing if these are the same
6481 when the store is performed. This should be sufficient to implement
6482 the architecturally mandated semantics, and avoids having to monitor
6483 regular stores.
6484
6485 In system emulation mode only one CPU will be running at once, so
6486 this sequence is effectively atomic. In user emulation mode we
6487 throw an exception and handle the atomic operation elsewhere. */
6488 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6489 TCGv addr, int size)
6490 {
6491 TCGv tmp;
6492
6493 switch (size) {
6494 case 0:
6495 tmp = gen_ld8u(addr, IS_USER(s));
6496 break;
6497 case 1:
6498 tmp = gen_ld16u(addr, IS_USER(s));
6499 break;
6500 case 2:
6501 case 3:
6502 tmp = gen_ld32(addr, IS_USER(s));
6503 break;
6504 default:
6505 abort();
6506 }
6507 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6508 store_reg(s, rt, tmp);
6509 if (size == 3) {
6510 TCGv tmp2 = tcg_temp_new_i32();
6511 tcg_gen_addi_i32(tmp2, addr, 4);
6512 tmp = gen_ld32(tmp2, IS_USER(s));
6513 tcg_temp_free_i32(tmp2);
6514 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6515 store_reg(s, rt2, tmp);
6516 }
6517 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6518 }
6519
6520 static void gen_clrex(DisasContext *s)
6521 {
6522 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6523 }
6524
6525 #ifdef CONFIG_USER_ONLY
6526 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6527 TCGv addr, int size)
6528 {
6529 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6530 tcg_gen_movi_i32(cpu_exclusive_info,
6531 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6532 gen_exception_insn(s, 4, EXCP_STREX);
6533 }
6534 #else
6535 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6536 TCGv addr, int size)
6537 {
6538 TCGv tmp;
6539 int done_label;
6540 int fail_label;
6541
6542 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6543 [addr] = {Rt};
6544 {Rd} = 0;
6545 } else {
6546 {Rd} = 1;
6547 } */
6548 fail_label = gen_new_label();
6549 done_label = gen_new_label();
6550 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6551 switch (size) {
6552 case 0:
6553 tmp = gen_ld8u(addr, IS_USER(s));
6554 break;
6555 case 1:
6556 tmp = gen_ld16u(addr, IS_USER(s));
6557 break;
6558 case 2:
6559 case 3:
6560 tmp = gen_ld32(addr, IS_USER(s));
6561 break;
6562 default:
6563 abort();
6564 }
6565 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6566 tcg_temp_free_i32(tmp);
6567 if (size == 3) {
6568 TCGv tmp2 = tcg_temp_new_i32();
6569 tcg_gen_addi_i32(tmp2, addr, 4);
6570 tmp = gen_ld32(tmp2, IS_USER(s));
6571 tcg_temp_free_i32(tmp2);
6572 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6573 tcg_temp_free_i32(tmp);
6574 }
6575 tmp = load_reg(s, rt);
6576 switch (size) {
6577 case 0:
6578 gen_st8(tmp, addr, IS_USER(s));
6579 break;
6580 case 1:
6581 gen_st16(tmp, addr, IS_USER(s));
6582 break;
6583 case 2:
6584 case 3:
6585 gen_st32(tmp, addr, IS_USER(s));
6586 break;
6587 default:
6588 abort();
6589 }
6590 if (size == 3) {
6591 tcg_gen_addi_i32(addr, addr, 4);
6592 tmp = load_reg(s, rt2);
6593 gen_st32(tmp, addr, IS_USER(s));
6594 }
6595 tcg_gen_movi_i32(cpu_R[rd], 0);
6596 tcg_gen_br(done_label);
6597 gen_set_label(fail_label);
6598 tcg_gen_movi_i32(cpu_R[rd], 1);
6599 gen_set_label(done_label);
6600 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6601 }
6602 #endif
6603
6604 /* gen_srs:
6605 * @env: CPUARMState
6606 * @s: DisasContext
6607 * @mode: mode field from insn (which stack to store to)
6608 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6609 * @writeback: true if writeback bit set
6610 *
6611 * Generate code for the SRS (Store Return State) insn.
6612 */
6613 static void gen_srs(DisasContext *s,
6614 uint32_t mode, uint32_t amode, bool writeback)
6615 {
6616 int32_t offset;
6617 TCGv_i32 addr = tcg_temp_new_i32();
6618 TCGv_i32 tmp = tcg_const_i32(mode);
6619 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6620 tcg_temp_free_i32(tmp);
6621 switch (amode) {
6622 case 0: /* DA */
6623 offset = -4;
6624 break;
6625 case 1: /* IA */
6626 offset = 0;
6627 break;
6628 case 2: /* DB */
6629 offset = -8;
6630 break;
6631 case 3: /* IB */
6632 offset = 4;
6633 break;
6634 default:
6635 abort();
6636 }
6637 tcg_gen_addi_i32(addr, addr, offset);
6638 tmp = load_reg(s, 14);
6639 gen_st32(tmp, addr, 0);
6640 tmp = load_cpu_field(spsr);
6641 tcg_gen_addi_i32(addr, addr, 4);
6642 gen_st32(tmp, addr, 0);
6643 if (writeback) {
6644 switch (amode) {
6645 case 0:
6646 offset = -8;
6647 break;
6648 case 1:
6649 offset = 4;
6650 break;
6651 case 2:
6652 offset = -4;
6653 break;
6654 case 3:
6655 offset = 0;
6656 break;
6657 default:
6658 abort();
6659 }
6660 tcg_gen_addi_i32(addr, addr, offset);
6661 tmp = tcg_const_i32(mode);
6662 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6663 tcg_temp_free_i32(tmp);
6664 }
6665 tcg_temp_free_i32(addr);
6666 }
6667
6668 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6669 {
6670 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6671 TCGv tmp;
6672 TCGv tmp2;
6673 TCGv tmp3;
6674 TCGv addr;
6675 TCGv_i64 tmp64;
6676
6677 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6678 s->pc += 4;
6679
6680 /* M variants do not implement ARM mode. */
6681 if (IS_M(env))
6682 goto illegal_op;
6683 cond = insn >> 28;
6684 if (cond == 0xf){
6685 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6686 * choose to UNDEF. In ARMv5 and above the space is used
6687 * for miscellaneous unconditional instructions.
6688 */
6689 ARCH(5);
6690
6691 /* Unconditional instructions. */
6692 if (((insn >> 25) & 7) == 1) {
6693 /* NEON Data processing. */
6694 if (!arm_feature(env, ARM_FEATURE_NEON))
6695 goto illegal_op;
6696
6697 if (disas_neon_data_insn(env, s, insn))
6698 goto illegal_op;
6699 return;
6700 }
6701 if ((insn & 0x0f100000) == 0x04000000) {
6702 /* NEON load/store. */
6703 if (!arm_feature(env, ARM_FEATURE_NEON))
6704 goto illegal_op;
6705
6706 if (disas_neon_ls_insn(env, s, insn))
6707 goto illegal_op;
6708 return;
6709 }
6710 if (((insn & 0x0f30f000) == 0x0510f000) ||
6711 ((insn & 0x0f30f010) == 0x0710f000)) {
6712 if ((insn & (1 << 22)) == 0) {
6713 /* PLDW; v7MP */
6714 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6715 goto illegal_op;
6716 }
6717 }
6718 /* Otherwise PLD; v5TE+ */
6719 ARCH(5TE);
6720 return;
6721 }
6722 if (((insn & 0x0f70f000) == 0x0450f000) ||
6723 ((insn & 0x0f70f010) == 0x0650f000)) {
6724 ARCH(7);
6725 return; /* PLI; V7 */
6726 }
6727 if (((insn & 0x0f700000) == 0x04100000) ||
6728 ((insn & 0x0f700010) == 0x06100000)) {
6729 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6730 goto illegal_op;
6731 }
6732 return; /* v7MP: Unallocated memory hint: must NOP */
6733 }
6734
6735 if ((insn & 0x0ffffdff) == 0x01010000) {
6736 ARCH(6);
6737 /* setend */
6738 if (((insn >> 9) & 1) != s->bswap_code) {
6739 /* Dynamic endianness switching not implemented. */
6740 goto illegal_op;
6741 }
6742 return;
6743 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6744 switch ((insn >> 4) & 0xf) {
6745 case 1: /* clrex */
6746 ARCH(6K);
6747 gen_clrex(s);
6748 return;
6749 case 4: /* dsb */
6750 case 5: /* dmb */
6751 case 6: /* isb */
6752 ARCH(7);
6753 /* We don't emulate caches so these are a no-op. */
6754 return;
6755 default:
6756 goto illegal_op;
6757 }
6758 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6759 /* srs */
6760 if (IS_USER(s)) {
6761 goto illegal_op;
6762 }
6763 ARCH(6);
6764 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
6765 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6766 /* rfe */
6767 int32_t offset;
6768 if (IS_USER(s))
6769 goto illegal_op;
6770 ARCH(6);
6771 rn = (insn >> 16) & 0xf;
6772 addr = load_reg(s, rn);
6773 i = (insn >> 23) & 3;
6774 switch (i) {
6775 case 0: offset = -4; break; /* DA */
6776 case 1: offset = 0; break; /* IA */
6777 case 2: offset = -8; break; /* DB */
6778 case 3: offset = 4; break; /* IB */
6779 default: abort();
6780 }
6781 if (offset)
6782 tcg_gen_addi_i32(addr, addr, offset);
6783 /* Load PC into tmp and CPSR into tmp2. */
6784 tmp = gen_ld32(addr, 0);
6785 tcg_gen_addi_i32(addr, addr, 4);
6786 tmp2 = gen_ld32(addr, 0);
6787 if (insn & (1 << 21)) {
6788 /* Base writeback. */
6789 switch (i) {
6790 case 0: offset = -8; break;
6791 case 1: offset = 4; break;
6792 case 2: offset = -4; break;
6793 case 3: offset = 0; break;
6794 default: abort();
6795 }
6796 if (offset)
6797 tcg_gen_addi_i32(addr, addr, offset);
6798 store_reg(s, rn, addr);
6799 } else {
6800 tcg_temp_free_i32(addr);
6801 }
6802 gen_rfe(s, tmp, tmp2);
6803 return;
6804 } else if ((insn & 0x0e000000) == 0x0a000000) {
6805 /* branch link and change to thumb (blx <offset>) */
6806 int32_t offset;
6807
6808 val = (uint32_t)s->pc;
6809 tmp = tcg_temp_new_i32();
6810 tcg_gen_movi_i32(tmp, val);
6811 store_reg(s, 14, tmp);
6812 /* Sign-extend the 24-bit offset */
6813 offset = (((int32_t)insn) << 8) >> 8;
6814 /* offset * 4 + bit24 * 2 + (thumb bit) */
6815 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6816 /* pipeline offset */
6817 val += 4;
6818 /* protected by ARCH(5); above, near the start of uncond block */
6819 gen_bx_im(s, val);
6820 return;
6821 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6822 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6823 /* iWMMXt register transfer. */
6824 if (env->cp15.c15_cpar & (1 << 1))
6825 if (!disas_iwmmxt_insn(env, s, insn))
6826 return;
6827 }
6828 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6829 /* Coprocessor double register transfer. */
6830 ARCH(5TE);
6831 } else if ((insn & 0x0f000010) == 0x0e000010) {
6832 /* Additional coprocessor register transfer. */
6833 } else if ((insn & 0x0ff10020) == 0x01000000) {
6834 uint32_t mask;
6835 uint32_t val;
6836 /* cps (privileged) */
6837 if (IS_USER(s))
6838 return;
6839 mask = val = 0;
6840 if (insn & (1 << 19)) {
6841 if (insn & (1 << 8))
6842 mask |= CPSR_A;
6843 if (insn & (1 << 7))
6844 mask |= CPSR_I;
6845 if (insn & (1 << 6))
6846 mask |= CPSR_F;
6847 if (insn & (1 << 18))
6848 val |= mask;
6849 }
6850 if (insn & (1 << 17)) {
6851 mask |= CPSR_M;
6852 val |= (insn & 0x1f);
6853 }
6854 if (mask) {
6855 gen_set_psr_im(s, mask, 0, val);
6856 }
6857 return;
6858 }
6859 goto illegal_op;
6860 }
6861 if (cond != 0xe) {
6862 /* if not always execute, we generate a conditional jump to
6863 next instruction */
6864 s->condlabel = gen_new_label();
6865 gen_test_cc(cond ^ 1, s->condlabel);
6866 s->condjmp = 1;
6867 }
6868 if ((insn & 0x0f900000) == 0x03000000) {
6869 if ((insn & (1 << 21)) == 0) {
6870 ARCH(6T2);
6871 rd = (insn >> 12) & 0xf;
6872 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6873 if ((insn & (1 << 22)) == 0) {
6874 /* MOVW */
6875 tmp = tcg_temp_new_i32();
6876 tcg_gen_movi_i32(tmp, val);
6877 } else {
6878 /* MOVT */
6879 tmp = load_reg(s, rd);
6880 tcg_gen_ext16u_i32(tmp, tmp);
6881 tcg_gen_ori_i32(tmp, tmp, val << 16);
6882 }
6883 store_reg(s, rd, tmp);
6884 } else {
6885 if (((insn >> 12) & 0xf) != 0xf)
6886 goto illegal_op;
6887 if (((insn >> 16) & 0xf) == 0) {
6888 gen_nop_hint(s, insn & 0xff);
6889 } else {
6890 /* CPSR = immediate */
6891 val = insn & 0xff;
6892 shift = ((insn >> 8) & 0xf) * 2;
6893 if (shift)
6894 val = (val >> shift) | (val << (32 - shift));
6895 i = ((insn & (1 << 22)) != 0);
6896 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6897 goto illegal_op;
6898 }
6899 }
6900 } else if ((insn & 0x0f900000) == 0x01000000
6901 && (insn & 0x00000090) != 0x00000090) {
6902 /* miscellaneous instructions */
6903 op1 = (insn >> 21) & 3;
6904 sh = (insn >> 4) & 0xf;
6905 rm = insn & 0xf;
6906 switch (sh) {
6907 case 0x0: /* move program status register */
6908 if (op1 & 1) {
6909 /* PSR = reg */
6910 tmp = load_reg(s, rm);
6911 i = ((op1 & 2) != 0);
6912 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6913 goto illegal_op;
6914 } else {
6915 /* reg = PSR */
6916 rd = (insn >> 12) & 0xf;
6917 if (op1 & 2) {
6918 if (IS_USER(s))
6919 goto illegal_op;
6920 tmp = load_cpu_field(spsr);
6921 } else {
6922 tmp = tcg_temp_new_i32();
6923 gen_helper_cpsr_read(tmp, cpu_env);
6924 }
6925 store_reg(s, rd, tmp);
6926 }
6927 break;
6928 case 0x1:
6929 if (op1 == 1) {
6930 /* branch/exchange thumb (bx). */
6931 ARCH(4T);
6932 tmp = load_reg(s, rm);
6933 gen_bx(s, tmp);
6934 } else if (op1 == 3) {
6935 /* clz */
6936 ARCH(5);
6937 rd = (insn >> 12) & 0xf;
6938 tmp = load_reg(s, rm);
6939 gen_helper_clz(tmp, tmp);
6940 store_reg(s, rd, tmp);
6941 } else {
6942 goto illegal_op;
6943 }
6944 break;
6945 case 0x2:
6946 if (op1 == 1) {
6947 ARCH(5J); /* bxj */
6948 /* Trivial implementation equivalent to bx. */
6949 tmp = load_reg(s, rm);
6950 gen_bx(s, tmp);
6951 } else {
6952 goto illegal_op;
6953 }
6954 break;
6955 case 0x3:
6956 if (op1 != 1)
6957 goto illegal_op;
6958
6959 ARCH(5);
6960 /* branch link/exchange thumb (blx) */
6961 tmp = load_reg(s, rm);
6962 tmp2 = tcg_temp_new_i32();
6963 tcg_gen_movi_i32(tmp2, s->pc);
6964 store_reg(s, 14, tmp2);
6965 gen_bx(s, tmp);
6966 break;
6967 case 0x5: /* saturating add/subtract */
6968 ARCH(5TE);
6969 rd = (insn >> 12) & 0xf;
6970 rn = (insn >> 16) & 0xf;
6971 tmp = load_reg(s, rm);
6972 tmp2 = load_reg(s, rn);
6973 if (op1 & 2)
6974 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
6975 if (op1 & 1)
6976 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
6977 else
6978 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
6979 tcg_temp_free_i32(tmp2);
6980 store_reg(s, rd, tmp);
6981 break;
6982 case 7:
6983 /* SMC instruction (op1 == 3)
6984 and undefined instructions (op1 == 0 || op1 == 2)
6985 will trap */
6986 if (op1 != 1) {
6987 goto illegal_op;
6988 }
6989 /* bkpt */
6990 ARCH(5);
6991 gen_exception_insn(s, 4, EXCP_BKPT);
6992 break;
6993 case 0x8: /* signed multiply */
6994 case 0xa:
6995 case 0xc:
6996 case 0xe:
6997 ARCH(5TE);
6998 rs = (insn >> 8) & 0xf;
6999 rn = (insn >> 12) & 0xf;
7000 rd = (insn >> 16) & 0xf;
7001 if (op1 == 1) {
7002 /* (32 * 16) >> 16 */
7003 tmp = load_reg(s, rm);
7004 tmp2 = load_reg(s, rs);
7005 if (sh & 4)
7006 tcg_gen_sari_i32(tmp2, tmp2, 16);
7007 else
7008 gen_sxth(tmp2);
7009 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7010 tcg_gen_shri_i64(tmp64, tmp64, 16);
7011 tmp = tcg_temp_new_i32();
7012 tcg_gen_trunc_i64_i32(tmp, tmp64);
7013 tcg_temp_free_i64(tmp64);
7014 if ((sh & 2) == 0) {
7015 tmp2 = load_reg(s, rn);
7016 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7017 tcg_temp_free_i32(tmp2);
7018 }
7019 store_reg(s, rd, tmp);
7020 } else {
7021 /* 16 * 16 */
7022 tmp = load_reg(s, rm);
7023 tmp2 = load_reg(s, rs);
7024 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7025 tcg_temp_free_i32(tmp2);
7026 if (op1 == 2) {
7027 tmp64 = tcg_temp_new_i64();
7028 tcg_gen_ext_i32_i64(tmp64, tmp);
7029 tcg_temp_free_i32(tmp);
7030 gen_addq(s, tmp64, rn, rd);
7031 gen_storeq_reg(s, rn, rd, tmp64);
7032 tcg_temp_free_i64(tmp64);
7033 } else {
7034 if (op1 == 0) {
7035 tmp2 = load_reg(s, rn);
7036 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7037 tcg_temp_free_i32(tmp2);
7038 }
7039 store_reg(s, rd, tmp);
7040 }
7041 }
7042 break;
7043 default:
7044 goto illegal_op;
7045 }
7046 } else if (((insn & 0x0e000000) == 0 &&
7047 (insn & 0x00000090) != 0x90) ||
7048 ((insn & 0x0e000000) == (1 << 25))) {
7049 int set_cc, logic_cc, shiftop;
7050
7051 op1 = (insn >> 21) & 0xf;
7052 set_cc = (insn >> 20) & 1;
7053 logic_cc = table_logic_cc[op1] & set_cc;
7054
7055 /* data processing instruction */
7056 if (insn & (1 << 25)) {
7057 /* immediate operand */
7058 val = insn & 0xff;
7059 shift = ((insn >> 8) & 0xf) * 2;
7060 if (shift) {
7061 val = (val >> shift) | (val << (32 - shift));
7062 }
7063 tmp2 = tcg_temp_new_i32();
7064 tcg_gen_movi_i32(tmp2, val);
7065 if (logic_cc && shift) {
7066 gen_set_CF_bit31(tmp2);
7067 }
7068 } else {
7069 /* register */
7070 rm = (insn) & 0xf;
7071 tmp2 = load_reg(s, rm);
7072 shiftop = (insn >> 5) & 3;
7073 if (!(insn & (1 << 4))) {
7074 shift = (insn >> 7) & 0x1f;
7075 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7076 } else {
7077 rs = (insn >> 8) & 0xf;
7078 tmp = load_reg(s, rs);
7079 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7080 }
7081 }
7082 if (op1 != 0x0f && op1 != 0x0d) {
7083 rn = (insn >> 16) & 0xf;
7084 tmp = load_reg(s, rn);
7085 } else {
7086 TCGV_UNUSED(tmp);
7087 }
7088 rd = (insn >> 12) & 0xf;
7089 switch(op1) {
7090 case 0x00:
7091 tcg_gen_and_i32(tmp, tmp, tmp2);
7092 if (logic_cc) {
7093 gen_logic_CC(tmp);
7094 }
7095 store_reg_bx(env, s, rd, tmp);
7096 break;
7097 case 0x01:
7098 tcg_gen_xor_i32(tmp, tmp, tmp2);
7099 if (logic_cc) {
7100 gen_logic_CC(tmp);
7101 }
7102 store_reg_bx(env, s, rd, tmp);
7103 break;
7104 case 0x02:
7105 if (set_cc && rd == 15) {
7106 /* SUBS r15, ... is used for exception return. */
7107 if (IS_USER(s)) {
7108 goto illegal_op;
7109 }
7110 gen_sub_CC(tmp, tmp, tmp2);
7111 gen_exception_return(s, tmp);
7112 } else {
7113 if (set_cc) {
7114 gen_sub_CC(tmp, tmp, tmp2);
7115 } else {
7116 tcg_gen_sub_i32(tmp, tmp, tmp2);
7117 }
7118 store_reg_bx(env, s, rd, tmp);
7119 }
7120 break;
7121 case 0x03:
7122 if (set_cc) {
7123 gen_sub_CC(tmp, tmp2, tmp);
7124 } else {
7125 tcg_gen_sub_i32(tmp, tmp2, tmp);
7126 }
7127 store_reg_bx(env, s, rd, tmp);
7128 break;
7129 case 0x04:
7130 if (set_cc) {
7131 gen_add_CC(tmp, tmp, tmp2);
7132 } else {
7133 tcg_gen_add_i32(tmp, tmp, tmp2);
7134 }
7135 store_reg_bx(env, s, rd, tmp);
7136 break;
7137 case 0x05:
7138 if (set_cc) {
7139 gen_adc_CC(tmp, tmp, tmp2);
7140 } else {
7141 gen_add_carry(tmp, tmp, tmp2);
7142 }
7143 store_reg_bx(env, s, rd, tmp);
7144 break;
7145 case 0x06:
7146 if (set_cc) {
7147 gen_sbc_CC(tmp, tmp, tmp2);
7148 } else {
7149 gen_sub_carry(tmp, tmp, tmp2);
7150 }
7151 store_reg_bx(env, s, rd, tmp);
7152 break;
7153 case 0x07:
7154 if (set_cc) {
7155 gen_sbc_CC(tmp, tmp2, tmp);
7156 } else {
7157 gen_sub_carry(tmp, tmp2, tmp);
7158 }
7159 store_reg_bx(env, s, rd, tmp);
7160 break;
7161 case 0x08:
7162 if (set_cc) {
7163 tcg_gen_and_i32(tmp, tmp, tmp2);
7164 gen_logic_CC(tmp);
7165 }
7166 tcg_temp_free_i32(tmp);
7167 break;
7168 case 0x09:
7169 if (set_cc) {
7170 tcg_gen_xor_i32(tmp, tmp, tmp2);
7171 gen_logic_CC(tmp);
7172 }
7173 tcg_temp_free_i32(tmp);
7174 break;
7175 case 0x0a:
7176 if (set_cc) {
7177 gen_sub_CC(tmp, tmp, tmp2);
7178 }
7179 tcg_temp_free_i32(tmp);
7180 break;
7181 case 0x0b:
7182 if (set_cc) {
7183 gen_add_CC(tmp, tmp, tmp2);
7184 }
7185 tcg_temp_free_i32(tmp);
7186 break;
7187 case 0x0c:
7188 tcg_gen_or_i32(tmp, tmp, tmp2);
7189 if (logic_cc) {
7190 gen_logic_CC(tmp);
7191 }
7192 store_reg_bx(env, s, rd, tmp);
7193 break;
7194 case 0x0d:
7195 if (logic_cc && rd == 15) {
7196 /* MOVS r15, ... is used for exception return. */
7197 if (IS_USER(s)) {
7198 goto illegal_op;
7199 }
7200 gen_exception_return(s, tmp2);
7201 } else {
7202 if (logic_cc) {
7203 gen_logic_CC(tmp2);
7204 }
7205 store_reg_bx(env, s, rd, tmp2);
7206 }
7207 break;
7208 case 0x0e:
7209 tcg_gen_andc_i32(tmp, tmp, tmp2);
7210 if (logic_cc) {
7211 gen_logic_CC(tmp);
7212 }
7213 store_reg_bx(env, s, rd, tmp);
7214 break;
7215 default:
7216 case 0x0f:
7217 tcg_gen_not_i32(tmp2, tmp2);
7218 if (logic_cc) {
7219 gen_logic_CC(tmp2);
7220 }
7221 store_reg_bx(env, s, rd, tmp2);
7222 break;
7223 }
7224 if (op1 != 0x0f && op1 != 0x0d) {
7225 tcg_temp_free_i32(tmp2);
7226 }
7227 } else {
7228 /* other instructions */
7229 op1 = (insn >> 24) & 0xf;
7230 switch(op1) {
7231 case 0x0:
7232 case 0x1:
7233 /* multiplies, extra load/stores */
7234 sh = (insn >> 5) & 3;
7235 if (sh == 0) {
7236 if (op1 == 0x0) {
7237 rd = (insn >> 16) & 0xf;
7238 rn = (insn >> 12) & 0xf;
7239 rs = (insn >> 8) & 0xf;
7240 rm = (insn) & 0xf;
7241 op1 = (insn >> 20) & 0xf;
7242 switch (op1) {
7243 case 0: case 1: case 2: case 3: case 6:
7244 /* 32 bit mul */
7245 tmp = load_reg(s, rs);
7246 tmp2 = load_reg(s, rm);
7247 tcg_gen_mul_i32(tmp, tmp, tmp2);
7248 tcg_temp_free_i32(tmp2);
7249 if (insn & (1 << 22)) {
7250 /* Subtract (mls) */
7251 ARCH(6T2);
7252 tmp2 = load_reg(s, rn);
7253 tcg_gen_sub_i32(tmp, tmp2, tmp);
7254 tcg_temp_free_i32(tmp2);
7255 } else if (insn & (1 << 21)) {
7256 /* Add */
7257 tmp2 = load_reg(s, rn);
7258 tcg_gen_add_i32(tmp, tmp, tmp2);
7259 tcg_temp_free_i32(tmp2);
7260 }
7261 if (insn & (1 << 20))
7262 gen_logic_CC(tmp);
7263 store_reg(s, rd, tmp);
7264 break;
7265 case 4:
7266 /* 64 bit mul double accumulate (UMAAL) */
7267 ARCH(6);
7268 tmp = load_reg(s, rs);
7269 tmp2 = load_reg(s, rm);
7270 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7271 gen_addq_lo(s, tmp64, rn);
7272 gen_addq_lo(s, tmp64, rd);
7273 gen_storeq_reg(s, rn, rd, tmp64);
7274 tcg_temp_free_i64(tmp64);
7275 break;
7276 case 8: case 9: case 10: case 11:
7277 case 12: case 13: case 14: case 15:
7278 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7279 tmp = load_reg(s, rs);
7280 tmp2 = load_reg(s, rm);
7281 if (insn & (1 << 22)) {
7282 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
7283 } else {
7284 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
7285 }
7286 if (insn & (1 << 21)) { /* mult accumulate */
7287 TCGv al = load_reg(s, rn);
7288 TCGv ah = load_reg(s, rd);
7289 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7290 tcg_temp_free(al);
7291 tcg_temp_free(ah);
7292 }
7293 if (insn & (1 << 20)) {
7294 gen_logicq_cc(tmp, tmp2);
7295 }
7296 store_reg(s, rn, tmp);
7297 store_reg(s, rd, tmp2);
7298 break;
7299 default:
7300 goto illegal_op;
7301 }
7302 } else {
7303 rn = (insn >> 16) & 0xf;
7304 rd = (insn >> 12) & 0xf;
7305 if (insn & (1 << 23)) {
7306 /* load/store exclusive */
7307 op1 = (insn >> 21) & 0x3;
7308 if (op1)
7309 ARCH(6K);
7310 else
7311 ARCH(6);
7312 addr = tcg_temp_local_new_i32();
7313 load_reg_var(s, addr, rn);
7314 if (insn & (1 << 20)) {
7315 switch (op1) {
7316 case 0: /* ldrex */
7317 gen_load_exclusive(s, rd, 15, addr, 2);
7318 break;
7319 case 1: /* ldrexd */
7320 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7321 break;
7322 case 2: /* ldrexb */
7323 gen_load_exclusive(s, rd, 15, addr, 0);
7324 break;
7325 case 3: /* ldrexh */
7326 gen_load_exclusive(s, rd, 15, addr, 1);
7327 break;
7328 default:
7329 abort();
7330 }
7331 } else {
7332 rm = insn & 0xf;
7333 switch (op1) {
7334 case 0: /* strex */
7335 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7336 break;
7337 case 1: /* strexd */
7338 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7339 break;
7340 case 2: /* strexb */
7341 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7342 break;
7343 case 3: /* strexh */
7344 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7345 break;
7346 default:
7347 abort();
7348 }
7349 }
7350 tcg_temp_free(addr);
7351 } else {
7352 /* SWP instruction */
7353 rm = (insn) & 0xf;
7354
7355 /* ??? This is not really atomic. However we know
7356 we never have multiple CPUs running in parallel,
7357 so it is good enough. */
7358 addr = load_reg(s, rn);
7359 tmp = load_reg(s, rm);
7360 if (insn & (1 << 22)) {
7361 tmp2 = gen_ld8u(addr, IS_USER(s));
7362 gen_st8(tmp, addr, IS_USER(s));
7363 } else {
7364 tmp2 = gen_ld32(addr, IS_USER(s));
7365 gen_st32(tmp, addr, IS_USER(s));
7366 }
7367 tcg_temp_free_i32(addr);
7368 store_reg(s, rd, tmp2);
7369 }
7370 }
7371 } else {
7372 int address_offset;
7373 int load;
7374 /* Misc load/store */
7375 rn = (insn >> 16) & 0xf;
7376 rd = (insn >> 12) & 0xf;
7377 addr = load_reg(s, rn);
7378 if (insn & (1 << 24))
7379 gen_add_datah_offset(s, insn, 0, addr);
7380 address_offset = 0;
7381 if (insn & (1 << 20)) {
7382 /* load */
7383 switch(sh) {
7384 case 1:
7385 tmp = gen_ld16u(addr, IS_USER(s));
7386 break;
7387 case 2:
7388 tmp = gen_ld8s(addr, IS_USER(s));
7389 break;
7390 default:
7391 case 3:
7392 tmp = gen_ld16s(addr, IS_USER(s));
7393 break;
7394 }
7395 load = 1;
7396 } else if (sh & 2) {
7397 ARCH(5TE);
7398 /* doubleword */
7399 if (sh & 1) {
7400 /* store */
7401 tmp = load_reg(s, rd);
7402 gen_st32(tmp, addr, IS_USER(s));
7403 tcg_gen_addi_i32(addr, addr, 4);
7404 tmp = load_reg(s, rd + 1);
7405 gen_st32(tmp, addr, IS_USER(s));
7406 load = 0;
7407 } else {
7408 /* load */
7409 tmp = gen_ld32(addr, IS_USER(s));
7410 store_reg(s, rd, tmp);
7411 tcg_gen_addi_i32(addr, addr, 4);
7412 tmp = gen_ld32(addr, IS_USER(s));
7413 rd++;
7414 load = 1;
7415 }
7416 address_offset = -4;
7417 } else {
7418 /* store */
7419 tmp = load_reg(s, rd);
7420 gen_st16(tmp, addr, IS_USER(s));
7421 load = 0;
7422 }
7423 /* Perform base writeback before the loaded value to
7424 ensure correct behavior with overlapping index registers.
7425 ldrd with base writeback is is undefined if the
7426 destination and index registers overlap. */
7427 if (!(insn & (1 << 24))) {
7428 gen_add_datah_offset(s, insn, address_offset, addr);
7429 store_reg(s, rn, addr);
7430 } else if (insn & (1 << 21)) {
7431 if (address_offset)
7432 tcg_gen_addi_i32(addr, addr, address_offset);
7433 store_reg(s, rn, addr);
7434 } else {
7435 tcg_temp_free_i32(addr);
7436 }
7437 if (load) {
7438 /* Complete the load. */
7439 store_reg(s, rd, tmp);
7440 }
7441 }
7442 break;
7443 case 0x4:
7444 case 0x5:
7445 goto do_ldst;
7446 case 0x6:
7447 case 0x7:
7448 if (insn & (1 << 4)) {
7449 ARCH(6);
7450 /* Armv6 Media instructions. */
7451 rm = insn & 0xf;
7452 rn = (insn >> 16) & 0xf;
7453 rd = (insn >> 12) & 0xf;
7454 rs = (insn >> 8) & 0xf;
7455 switch ((insn >> 23) & 3) {
7456 case 0: /* Parallel add/subtract. */
7457 op1 = (insn >> 20) & 7;
7458 tmp = load_reg(s, rn);
7459 tmp2 = load_reg(s, rm);
7460 sh = (insn >> 5) & 7;
7461 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7462 goto illegal_op;
7463 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7464 tcg_temp_free_i32(tmp2);
7465 store_reg(s, rd, tmp);
7466 break;
7467 case 1:
7468 if ((insn & 0x00700020) == 0) {
7469 /* Halfword pack. */
7470 tmp = load_reg(s, rn);
7471 tmp2 = load_reg(s, rm);
7472 shift = (insn >> 7) & 0x1f;
7473 if (insn & (1 << 6)) {
7474 /* pkhtb */
7475 if (shift == 0)
7476 shift = 31;
7477 tcg_gen_sari_i32(tmp2, tmp2, shift);
7478 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7479 tcg_gen_ext16u_i32(tmp2, tmp2);
7480 } else {
7481 /* pkhbt */
7482 if (shift)
7483 tcg_gen_shli_i32(tmp2, tmp2, shift);
7484 tcg_gen_ext16u_i32(tmp, tmp);
7485 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7486 }
7487 tcg_gen_or_i32(tmp, tmp, tmp2);
7488 tcg_temp_free_i32(tmp2);
7489 store_reg(s, rd, tmp);
7490 } else if ((insn & 0x00200020) == 0x00200000) {
7491 /* [us]sat */
7492 tmp = load_reg(s, rm);
7493 shift = (insn >> 7) & 0x1f;
7494 if (insn & (1 << 6)) {
7495 if (shift == 0)
7496 shift = 31;
7497 tcg_gen_sari_i32(tmp, tmp, shift);
7498 } else {
7499 tcg_gen_shli_i32(tmp, tmp, shift);
7500 }
7501 sh = (insn >> 16) & 0x1f;
7502 tmp2 = tcg_const_i32(sh);
7503 if (insn & (1 << 22))
7504 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7505 else
7506 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7507 tcg_temp_free_i32(tmp2);
7508 store_reg(s, rd, tmp);
7509 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7510 /* [us]sat16 */
7511 tmp = load_reg(s, rm);
7512 sh = (insn >> 16) & 0x1f;
7513 tmp2 = tcg_const_i32(sh);
7514 if (insn & (1 << 22))
7515 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7516 else
7517 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7518 tcg_temp_free_i32(tmp2);
7519 store_reg(s, rd, tmp);
7520 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7521 /* Select bytes. */
7522 tmp = load_reg(s, rn);
7523 tmp2 = load_reg(s, rm);
7524 tmp3 = tcg_temp_new_i32();
7525 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7526 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7527 tcg_temp_free_i32(tmp3);
7528 tcg_temp_free_i32(tmp2);
7529 store_reg(s, rd, tmp);
7530 } else if ((insn & 0x000003e0) == 0x00000060) {
7531 tmp = load_reg(s, rm);
7532 shift = (insn >> 10) & 3;
7533 /* ??? In many cases it's not necessary to do a
7534 rotate, a shift is sufficient. */
7535 if (shift != 0)
7536 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7537 op1 = (insn >> 20) & 7;
7538 switch (op1) {
7539 case 0: gen_sxtb16(tmp); break;
7540 case 2: gen_sxtb(tmp); break;
7541 case 3: gen_sxth(tmp); break;
7542 case 4: gen_uxtb16(tmp); break;
7543 case 6: gen_uxtb(tmp); break;
7544 case 7: gen_uxth(tmp); break;
7545 default: goto illegal_op;
7546 }
7547 if (rn != 15) {
7548 tmp2 = load_reg(s, rn);
7549 if ((op1 & 3) == 0) {
7550 gen_add16(tmp, tmp2);
7551 } else {
7552 tcg_gen_add_i32(tmp, tmp, tmp2);
7553 tcg_temp_free_i32(tmp2);
7554 }
7555 }
7556 store_reg(s, rd, tmp);
7557 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7558 /* rev */
7559 tmp = load_reg(s, rm);
7560 if (insn & (1 << 22)) {
7561 if (insn & (1 << 7)) {
7562 gen_revsh(tmp);
7563 } else {
7564 ARCH(6T2);
7565 gen_helper_rbit(tmp, tmp);
7566 }
7567 } else {
7568 if (insn & (1 << 7))
7569 gen_rev16(tmp);
7570 else
7571 tcg_gen_bswap32_i32(tmp, tmp);
7572 }
7573 store_reg(s, rd, tmp);
7574 } else {
7575 goto illegal_op;
7576 }
7577 break;
7578 case 2: /* Multiplies (Type 3). */
7579 switch ((insn >> 20) & 0x7) {
7580 case 5:
7581 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7582 /* op2 not 00x or 11x : UNDEF */
7583 goto illegal_op;
7584 }
7585 /* Signed multiply most significant [accumulate].
7586 (SMMUL, SMMLA, SMMLS) */
7587 tmp = load_reg(s, rm);
7588 tmp2 = load_reg(s, rs);
7589 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7590
7591 if (rd != 15) {
7592 tmp = load_reg(s, rd);
7593 if (insn & (1 << 6)) {
7594 tmp64 = gen_subq_msw(tmp64, tmp);
7595 } else {
7596 tmp64 = gen_addq_msw(tmp64, tmp);
7597 }
7598 }
7599 if (insn & (1 << 5)) {
7600 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7601 }
7602 tcg_gen_shri_i64(tmp64, tmp64, 32);
7603 tmp = tcg_temp_new_i32();
7604 tcg_gen_trunc_i64_i32(tmp, tmp64);
7605 tcg_temp_free_i64(tmp64);
7606 store_reg(s, rn, tmp);
7607 break;
7608 case 0:
7609 case 4:
7610 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7611 if (insn & (1 << 7)) {
7612 goto illegal_op;
7613 }
7614 tmp = load_reg(s, rm);
7615 tmp2 = load_reg(s, rs);
7616 if (insn & (1 << 5))
7617 gen_swap_half(tmp2);
7618 gen_smul_dual(tmp, tmp2);
7619 if (insn & (1 << 6)) {
7620 /* This subtraction cannot overflow. */
7621 tcg_gen_sub_i32(tmp, tmp, tmp2);
7622 } else {
7623 /* This addition cannot overflow 32 bits;
7624 * however it may overflow considered as a signed
7625 * operation, in which case we must set the Q flag.
7626 */
7627 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7628 }
7629 tcg_temp_free_i32(tmp2);
7630 if (insn & (1 << 22)) {
7631 /* smlald, smlsld */
7632 tmp64 = tcg_temp_new_i64();
7633 tcg_gen_ext_i32_i64(tmp64, tmp);
7634 tcg_temp_free_i32(tmp);
7635 gen_addq(s, tmp64, rd, rn);
7636 gen_storeq_reg(s, rd, rn, tmp64);
7637 tcg_temp_free_i64(tmp64);
7638 } else {
7639 /* smuad, smusd, smlad, smlsd */
7640 if (rd != 15)
7641 {
7642 tmp2 = load_reg(s, rd);
7643 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7644 tcg_temp_free_i32(tmp2);
7645 }
7646 store_reg(s, rn, tmp);
7647 }
7648 break;
7649 case 1:
7650 case 3:
7651 /* SDIV, UDIV */
7652 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7653 goto illegal_op;
7654 }
7655 if (((insn >> 5) & 7) || (rd != 15)) {
7656 goto illegal_op;
7657 }
7658 tmp = load_reg(s, rm);
7659 tmp2 = load_reg(s, rs);
7660 if (insn & (1 << 21)) {
7661 gen_helper_udiv(tmp, tmp, tmp2);
7662 } else {
7663 gen_helper_sdiv(tmp, tmp, tmp2);
7664 }
7665 tcg_temp_free_i32(tmp2);
7666 store_reg(s, rn, tmp);
7667 break;
7668 default:
7669 goto illegal_op;
7670 }
7671 break;
7672 case 3:
7673 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7674 switch (op1) {
7675 case 0: /* Unsigned sum of absolute differences. */
7676 ARCH(6);
7677 tmp = load_reg(s, rm);
7678 tmp2 = load_reg(s, rs);
7679 gen_helper_usad8(tmp, tmp, tmp2);
7680 tcg_temp_free_i32(tmp2);
7681 if (rd != 15) {
7682 tmp2 = load_reg(s, rd);
7683 tcg_gen_add_i32(tmp, tmp, tmp2);
7684 tcg_temp_free_i32(tmp2);
7685 }
7686 store_reg(s, rn, tmp);
7687 break;
7688 case 0x20: case 0x24: case 0x28: case 0x2c:
7689 /* Bitfield insert/clear. */
7690 ARCH(6T2);
7691 shift = (insn >> 7) & 0x1f;
7692 i = (insn >> 16) & 0x1f;
7693 i = i + 1 - shift;
7694 if (rm == 15) {
7695 tmp = tcg_temp_new_i32();
7696 tcg_gen_movi_i32(tmp, 0);
7697 } else {
7698 tmp = load_reg(s, rm);
7699 }
7700 if (i != 32) {
7701 tmp2 = load_reg(s, rd);
7702 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7703 tcg_temp_free_i32(tmp2);
7704 }
7705 store_reg(s, rd, tmp);
7706 break;
7707 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7708 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7709 ARCH(6T2);
7710 tmp = load_reg(s, rm);
7711 shift = (insn >> 7) & 0x1f;
7712 i = ((insn >> 16) & 0x1f) + 1;
7713 if (shift + i > 32)
7714 goto illegal_op;
7715 if (i < 32) {
7716 if (op1 & 0x20) {
7717 gen_ubfx(tmp, shift, (1u << i) - 1);
7718 } else {
7719 gen_sbfx(tmp, shift, i);
7720 }
7721 }
7722 store_reg(s, rd, tmp);
7723 break;
7724 default:
7725 goto illegal_op;
7726 }
7727 break;
7728 }
7729 break;
7730 }
7731 do_ldst:
7732 /* Check for undefined extension instructions
7733 * per the ARM Bible IE:
7734 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7735 */
7736 sh = (0xf << 20) | (0xf << 4);
7737 if (op1 == 0x7 && ((insn & sh) == sh))
7738 {
7739 goto illegal_op;
7740 }
7741 /* load/store byte/word */
7742 rn = (insn >> 16) & 0xf;
7743 rd = (insn >> 12) & 0xf;
7744 tmp2 = load_reg(s, rn);
7745 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7746 if (insn & (1 << 24))
7747 gen_add_data_offset(s, insn, tmp2);
7748 if (insn & (1 << 20)) {
7749 /* load */
7750 if (insn & (1 << 22)) {
7751 tmp = gen_ld8u(tmp2, i);
7752 } else {
7753 tmp = gen_ld32(tmp2, i);
7754 }
7755 } else {
7756 /* store */
7757 tmp = load_reg(s, rd);
7758 if (insn & (1 << 22))
7759 gen_st8(tmp, tmp2, i);
7760 else
7761 gen_st32(tmp, tmp2, i);
7762 }
7763 if (!(insn & (1 << 24))) {
7764 gen_add_data_offset(s, insn, tmp2);
7765 store_reg(s, rn, tmp2);
7766 } else if (insn & (1 << 21)) {
7767 store_reg(s, rn, tmp2);
7768 } else {
7769 tcg_temp_free_i32(tmp2);
7770 }
7771 if (insn & (1 << 20)) {
7772 /* Complete the load. */
7773 store_reg_from_load(env, s, rd, tmp);
7774 }
7775 break;
7776 case 0x08:
7777 case 0x09:
7778 {
7779 int j, n, user, loaded_base;
7780 TCGv loaded_var;
7781 /* load/store multiple words */
7782 /* XXX: store correct base if write back */
7783 user = 0;
7784 if (insn & (1 << 22)) {
7785 if (IS_USER(s))
7786 goto illegal_op; /* only usable in supervisor mode */
7787
7788 if ((insn & (1 << 15)) == 0)
7789 user = 1;
7790 }
7791 rn = (insn >> 16) & 0xf;
7792 addr = load_reg(s, rn);
7793
7794 /* compute total size */
7795 loaded_base = 0;
7796 TCGV_UNUSED(loaded_var);
7797 n = 0;
7798 for(i=0;i<16;i++) {
7799 if (insn & (1 << i))
7800 n++;
7801 }
7802 /* XXX: test invalid n == 0 case ? */
7803 if (insn & (1 << 23)) {
7804 if (insn & (1 << 24)) {
7805 /* pre increment */
7806 tcg_gen_addi_i32(addr, addr, 4);
7807 } else {
7808 /* post increment */
7809 }
7810 } else {
7811 if (insn & (1 << 24)) {
7812 /* pre decrement */
7813 tcg_gen_addi_i32(addr, addr, -(n * 4));
7814 } else {
7815 /* post decrement */
7816 if (n != 1)
7817 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7818 }
7819 }
7820 j = 0;
7821 for(i=0;i<16;i++) {
7822 if (insn & (1 << i)) {
7823 if (insn & (1 << 20)) {
7824 /* load */
7825 tmp = gen_ld32(addr, IS_USER(s));
7826 if (user) {
7827 tmp2 = tcg_const_i32(i);
7828 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7829 tcg_temp_free_i32(tmp2);
7830 tcg_temp_free_i32(tmp);
7831 } else if (i == rn) {
7832 loaded_var = tmp;
7833 loaded_base = 1;
7834 } else {
7835 store_reg_from_load(env, s, i, tmp);
7836 }
7837 } else {
7838 /* store */
7839 if (i == 15) {
7840 /* special case: r15 = PC + 8 */
7841 val = (long)s->pc + 4;
7842 tmp = tcg_temp_new_i32();
7843 tcg_gen_movi_i32(tmp, val);
7844 } else if (user) {
7845 tmp = tcg_temp_new_i32();
7846 tmp2 = tcg_const_i32(i);
7847 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7848 tcg_temp_free_i32(tmp2);
7849 } else {
7850 tmp = load_reg(s, i);
7851 }
7852 gen_st32(tmp, addr, IS_USER(s));
7853 }
7854 j++;
7855 /* no need to add after the last transfer */
7856 if (j != n)
7857 tcg_gen_addi_i32(addr, addr, 4);
7858 }
7859 }
7860 if (insn & (1 << 21)) {
7861 /* write back */
7862 if (insn & (1 << 23)) {
7863 if (insn & (1 << 24)) {
7864 /* pre increment */
7865 } else {
7866 /* post increment */
7867 tcg_gen_addi_i32(addr, addr, 4);
7868 }
7869 } else {
7870 if (insn & (1 << 24)) {
7871 /* pre decrement */
7872 if (n != 1)
7873 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7874 } else {
7875 /* post decrement */
7876 tcg_gen_addi_i32(addr, addr, -(n * 4));
7877 }
7878 }
7879 store_reg(s, rn, addr);
7880 } else {
7881 tcg_temp_free_i32(addr);
7882 }
7883 if (loaded_base) {
7884 store_reg(s, rn, loaded_var);
7885 }
7886 if ((insn & (1 << 22)) && !user) {
7887 /* Restore CPSR from SPSR. */
7888 tmp = load_cpu_field(spsr);
7889 gen_set_cpsr(tmp, 0xffffffff);
7890 tcg_temp_free_i32(tmp);
7891 s->is_jmp = DISAS_UPDATE;
7892 }
7893 }
7894 break;
7895 case 0xa:
7896 case 0xb:
7897 {
7898 int32_t offset;
7899
7900 /* branch (and link) */
7901 val = (int32_t)s->pc;
7902 if (insn & (1 << 24)) {
7903 tmp = tcg_temp_new_i32();
7904 tcg_gen_movi_i32(tmp, val);
7905 store_reg(s, 14, tmp);
7906 }
7907 offset = (((int32_t)insn << 8) >> 8);
7908 val += (offset << 2) + 4;
7909 gen_jmp(s, val);
7910 }
7911 break;
7912 case 0xc:
7913 case 0xd:
7914 case 0xe:
7915 /* Coprocessor. */
7916 if (disas_coproc_insn(env, s, insn))
7917 goto illegal_op;
7918 break;
7919 case 0xf:
7920 /* swi */
7921 gen_set_pc_im(s->pc);
7922 s->is_jmp = DISAS_SWI;
7923 break;
7924 default:
7925 illegal_op:
7926 gen_exception_insn(s, 4, EXCP_UDEF);
7927 break;
7928 }
7929 }
7930 }
7931
7932 /* Return true if this is a Thumb-2 logical op. */
7933 static int
7934 thumb2_logic_op(int op)
7935 {
7936 return (op < 8);
7937 }
7938
7939 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7940 then set condition code flags based on the result of the operation.
7941 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7942 to the high bit of T1.
7943 Returns zero if the opcode is valid. */
7944
7945 static int
7946 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7947 {
7948 int logic_cc;
7949
7950 logic_cc = 0;
7951 switch (op) {
7952 case 0: /* and */
7953 tcg_gen_and_i32(t0, t0, t1);
7954 logic_cc = conds;
7955 break;
7956 case 1: /* bic */
7957 tcg_gen_andc_i32(t0, t0, t1);
7958 logic_cc = conds;
7959 break;
7960 case 2: /* orr */
7961 tcg_gen_or_i32(t0, t0, t1);
7962 logic_cc = conds;
7963 break;
7964 case 3: /* orn */
7965 tcg_gen_orc_i32(t0, t0, t1);
7966 logic_cc = conds;
7967 break;
7968 case 4: /* eor */
7969 tcg_gen_xor_i32(t0, t0, t1);
7970 logic_cc = conds;
7971 break;
7972 case 8: /* add */
7973 if (conds)
7974 gen_add_CC(t0, t0, t1);
7975 else
7976 tcg_gen_add_i32(t0, t0, t1);
7977 break;
7978 case 10: /* adc */
7979 if (conds)
7980 gen_adc_CC(t0, t0, t1);
7981 else
7982 gen_adc(t0, t1);
7983 break;
7984 case 11: /* sbc */
7985 if (conds) {
7986 gen_sbc_CC(t0, t0, t1);
7987 } else {
7988 gen_sub_carry(t0, t0, t1);
7989 }
7990 break;
7991 case 13: /* sub */
7992 if (conds)
7993 gen_sub_CC(t0, t0, t1);
7994 else
7995 tcg_gen_sub_i32(t0, t0, t1);
7996 break;
7997 case 14: /* rsb */
7998 if (conds)
7999 gen_sub_CC(t0, t1, t0);
8000 else
8001 tcg_gen_sub_i32(t0, t1, t0);
8002 break;
8003 default: /* 5, 6, 7, 9, 12, 15. */
8004 return 1;
8005 }
8006 if (logic_cc) {
8007 gen_logic_CC(t0);
8008 if (shifter_out)
8009 gen_set_CF_bit31(t1);
8010 }
8011 return 0;
8012 }
8013
8014 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8015 is not legal. */
8016 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8017 {
8018 uint32_t insn, imm, shift, offset;
8019 uint32_t rd, rn, rm, rs;
8020 TCGv tmp;
8021 TCGv tmp2;
8022 TCGv tmp3;
8023 TCGv addr;
8024 TCGv_i64 tmp64;
8025 int op;
8026 int shiftop;
8027 int conds;
8028 int logic_cc;
8029
8030 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8031 || arm_feature (env, ARM_FEATURE_M))) {
8032 /* Thumb-1 cores may need to treat bl and blx as a pair of
8033 16-bit instructions to get correct prefetch abort behavior. */
8034 insn = insn_hw1;
8035 if ((insn & (1 << 12)) == 0) {
8036 ARCH(5);
8037 /* Second half of blx. */
8038 offset = ((insn & 0x7ff) << 1);
8039 tmp = load_reg(s, 14);
8040 tcg_gen_addi_i32(tmp, tmp, offset);
8041 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8042
8043 tmp2 = tcg_temp_new_i32();
8044 tcg_gen_movi_i32(tmp2, s->pc | 1);
8045 store_reg(s, 14, tmp2);
8046 gen_bx(s, tmp);
8047 return 0;
8048 }
8049 if (insn & (1 << 11)) {
8050 /* Second half of bl. */
8051 offset = ((insn & 0x7ff) << 1) | 1;
8052 tmp = load_reg(s, 14);
8053 tcg_gen_addi_i32(tmp, tmp, offset);
8054
8055 tmp2 = tcg_temp_new_i32();
8056 tcg_gen_movi_i32(tmp2, s->pc | 1);
8057 store_reg(s, 14, tmp2);
8058 gen_bx(s, tmp);
8059 return 0;
8060 }
8061 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8062 /* Instruction spans a page boundary. Implement it as two
8063 16-bit instructions in case the second half causes an
8064 prefetch abort. */
8065 offset = ((int32_t)insn << 21) >> 9;
8066 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8067 return 0;
8068 }
8069 /* Fall through to 32-bit decode. */
8070 }
8071
8072 insn = arm_lduw_code(env, s->pc, s->bswap_code);
8073 s->pc += 2;
8074 insn |= (uint32_t)insn_hw1 << 16;
8075
8076 if ((insn & 0xf800e800) != 0xf000e800) {
8077 ARCH(6T2);
8078 }
8079
8080 rn = (insn >> 16) & 0xf;
8081 rs = (insn >> 12) & 0xf;
8082 rd = (insn >> 8) & 0xf;
8083 rm = insn & 0xf;
8084 switch ((insn >> 25) & 0xf) {
8085 case 0: case 1: case 2: case 3:
8086 /* 16-bit instructions. Should never happen. */
8087 abort();
8088 case 4:
8089 if (insn & (1 << 22)) {
8090 /* Other load/store, table branch. */
8091 if (insn & 0x01200000) {
8092 /* Load/store doubleword. */
8093 if (rn == 15) {
8094 addr = tcg_temp_new_i32();
8095 tcg_gen_movi_i32(addr, s->pc & ~3);
8096 } else {
8097 addr = load_reg(s, rn);
8098 }
8099 offset = (insn & 0xff) * 4;
8100 if ((insn & (1 << 23)) == 0)
8101 offset = -offset;
8102 if (insn & (1 << 24)) {
8103 tcg_gen_addi_i32(addr, addr, offset);
8104 offset = 0;
8105 }
8106 if (insn & (1 << 20)) {
8107 /* ldrd */
8108 tmp = gen_ld32(addr, IS_USER(s));
8109 store_reg(s, rs, tmp);
8110 tcg_gen_addi_i32(addr, addr, 4);
8111 tmp = gen_ld32(addr, IS_USER(s));
8112 store_reg(s, rd, tmp);
8113 } else {
8114 /* strd */
8115 tmp = load_reg(s, rs);
8116 gen_st32(tmp, addr, IS_USER(s));
8117 tcg_gen_addi_i32(addr, addr, 4);
8118 tmp = load_reg(s, rd);
8119 gen_st32(tmp, addr, IS_USER(s));
8120 }
8121 if (insn & (1 << 21)) {
8122 /* Base writeback. */
8123 if (rn == 15)
8124 goto illegal_op;
8125 tcg_gen_addi_i32(addr, addr, offset - 4);
8126 store_reg(s, rn, addr);
8127 } else {
8128 tcg_temp_free_i32(addr);
8129 }
8130 } else if ((insn & (1 << 23)) == 0) {
8131 /* Load/store exclusive word. */
8132 addr = tcg_temp_local_new();
8133 load_reg_var(s, addr, rn);
8134 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8135 if (insn & (1 << 20)) {
8136 gen_load_exclusive(s, rs, 15, addr, 2);
8137 } else {
8138 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8139 }
8140 tcg_temp_free(addr);
8141 } else if ((insn & (1 << 6)) == 0) {
8142 /* Table Branch. */
8143 if (rn == 15) {
8144 addr = tcg_temp_new_i32();
8145 tcg_gen_movi_i32(addr, s->pc);
8146 } else {
8147 addr = load_reg(s, rn);
8148 }
8149 tmp = load_reg(s, rm);
8150 tcg_gen_add_i32(addr, addr, tmp);
8151 if (insn & (1 << 4)) {
8152 /* tbh */
8153 tcg_gen_add_i32(addr, addr, tmp);
8154 tcg_temp_free_i32(tmp);
8155 tmp = gen_ld16u(addr, IS_USER(s));
8156 } else { /* tbb */
8157 tcg_temp_free_i32(tmp);
8158 tmp = gen_ld8u(addr, IS_USER(s));
8159 }
8160 tcg_temp_free_i32(addr);
8161 tcg_gen_shli_i32(tmp, tmp, 1);
8162 tcg_gen_addi_i32(tmp, tmp, s->pc);
8163 store_reg(s, 15, tmp);
8164 } else {
8165 /* Load/store exclusive byte/halfword/doubleword. */
8166 ARCH(7);
8167 op = (insn >> 4) & 0x3;
8168 if (op == 2) {
8169 goto illegal_op;
8170 }
8171 addr = tcg_temp_local_new();
8172 load_reg_var(s, addr, rn);
8173 if (insn & (1 << 20)) {
8174 gen_load_exclusive(s, rs, rd, addr, op);
8175 } else {
8176 gen_store_exclusive(s, rm, rs, rd, addr, op);
8177 }
8178 tcg_temp_free(addr);
8179 }
8180 } else {
8181 /* Load/store multiple, RFE, SRS. */
8182 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8183 /* RFE, SRS: not available in user mode or on M profile */
8184 if (IS_USER(s) || IS_M(env)) {
8185 goto illegal_op;
8186 }
8187 if (insn & (1 << 20)) {
8188 /* rfe */
8189 addr = load_reg(s, rn);
8190 if ((insn & (1 << 24)) == 0)
8191 tcg_gen_addi_i32(addr, addr, -8);
8192 /* Load PC into tmp and CPSR into tmp2. */
8193 tmp = gen_ld32(addr, 0);
8194 tcg_gen_addi_i32(addr, addr, 4);
8195 tmp2 = gen_ld32(addr, 0);
8196 if (insn & (1 << 21)) {
8197 /* Base writeback. */
8198 if (insn & (1 << 24)) {
8199 tcg_gen_addi_i32(addr, addr, 4);
8200 } else {
8201 tcg_gen_addi_i32(addr, addr, -4);
8202 }
8203 store_reg(s, rn, addr);
8204 } else {
8205 tcg_temp_free_i32(addr);
8206 }
8207 gen_rfe(s, tmp, tmp2);
8208 } else {
8209 /* srs */
8210 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8211 insn & (1 << 21));
8212 }
8213 } else {
8214 int i, loaded_base = 0;
8215 TCGv loaded_var;
8216 /* Load/store multiple. */
8217 addr = load_reg(s, rn);
8218 offset = 0;
8219 for (i = 0; i < 16; i++) {
8220 if (insn & (1 << i))
8221 offset += 4;
8222 }
8223 if (insn & (1 << 24)) {
8224 tcg_gen_addi_i32(addr, addr, -offset);
8225 }
8226
8227 TCGV_UNUSED(loaded_var);
8228 for (i = 0; i < 16; i++) {
8229 if ((insn & (1 << i)) == 0)
8230 continue;
8231 if (insn & (1 << 20)) {
8232 /* Load. */
8233 tmp = gen_ld32(addr, IS_USER(s));
8234 if (i == 15) {
8235 gen_bx(s, tmp);
8236 } else if (i == rn) {
8237 loaded_var = tmp;
8238 loaded_base = 1;
8239 } else {
8240 store_reg(s, i, tmp);
8241 }
8242 } else {
8243 /* Store. */
8244 tmp = load_reg(s, i);
8245 gen_st32(tmp, addr, IS_USER(s));
8246 }
8247 tcg_gen_addi_i32(addr, addr, 4);
8248 }
8249 if (loaded_base) {
8250 store_reg(s, rn, loaded_var);
8251 }
8252 if (insn & (1 << 21)) {
8253 /* Base register writeback. */
8254 if (insn & (1 << 24)) {
8255 tcg_gen_addi_i32(addr, addr, -offset);
8256 }
8257 /* Fault if writeback register is in register list. */
8258 if (insn & (1 << rn))
8259 goto illegal_op;
8260 store_reg(s, rn, addr);
8261 } else {
8262 tcg_temp_free_i32(addr);
8263 }
8264 }
8265 }
8266 break;
8267 case 5:
8268
8269 op = (insn >> 21) & 0xf;
8270 if (op == 6) {
8271 /* Halfword pack. */
8272 tmp = load_reg(s, rn);
8273 tmp2 = load_reg(s, rm);
8274 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8275 if (insn & (1 << 5)) {
8276 /* pkhtb */
8277 if (shift == 0)
8278 shift = 31;
8279 tcg_gen_sari_i32(tmp2, tmp2, shift);
8280 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8281 tcg_gen_ext16u_i32(tmp2, tmp2);
8282 } else {
8283 /* pkhbt */
8284 if (shift)
8285 tcg_gen_shli_i32(tmp2, tmp2, shift);
8286 tcg_gen_ext16u_i32(tmp, tmp);
8287 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8288 }
8289 tcg_gen_or_i32(tmp, tmp, tmp2);
8290 tcg_temp_free_i32(tmp2);
8291 store_reg(s, rd, tmp);
8292 } else {
8293 /* Data processing register constant shift. */
8294 if (rn == 15) {
8295 tmp = tcg_temp_new_i32();
8296 tcg_gen_movi_i32(tmp, 0);
8297 } else {
8298 tmp = load_reg(s, rn);
8299 }
8300 tmp2 = load_reg(s, rm);
8301
8302 shiftop = (insn >> 4) & 3;
8303 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8304 conds = (insn & (1 << 20)) != 0;
8305 logic_cc = (conds && thumb2_logic_op(op));
8306 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8307 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8308 goto illegal_op;
8309 tcg_temp_free_i32(tmp2);
8310 if (rd != 15) {
8311 store_reg(s, rd, tmp);
8312 } else {
8313 tcg_temp_free_i32(tmp);
8314 }
8315 }
8316 break;
8317 case 13: /* Misc data processing. */
8318 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8319 if (op < 4 && (insn & 0xf000) != 0xf000)
8320 goto illegal_op;
8321 switch (op) {
8322 case 0: /* Register controlled shift. */
8323 tmp = load_reg(s, rn);
8324 tmp2 = load_reg(s, rm);
8325 if ((insn & 0x70) != 0)
8326 goto illegal_op;
8327 op = (insn >> 21) & 3;
8328 logic_cc = (insn & (1 << 20)) != 0;
8329 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8330 if (logic_cc)
8331 gen_logic_CC(tmp);
8332 store_reg_bx(env, s, rd, tmp);
8333 break;
8334 case 1: /* Sign/zero extend. */
8335 tmp = load_reg(s, rm);
8336 shift = (insn >> 4) & 3;
8337 /* ??? In many cases it's not necessary to do a
8338 rotate, a shift is sufficient. */
8339 if (shift != 0)
8340 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8341 op = (insn >> 20) & 7;
8342 switch (op) {
8343 case 0: gen_sxth(tmp); break;
8344 case 1: gen_uxth(tmp); break;
8345 case 2: gen_sxtb16(tmp); break;
8346 case 3: gen_uxtb16(tmp); break;
8347 case 4: gen_sxtb(tmp); break;
8348 case 5: gen_uxtb(tmp); break;
8349 default: goto illegal_op;
8350 }
8351 if (rn != 15) {
8352 tmp2 = load_reg(s, rn);
8353 if ((op >> 1) == 1) {
8354 gen_add16(tmp, tmp2);
8355 } else {
8356 tcg_gen_add_i32(tmp, tmp, tmp2);
8357 tcg_temp_free_i32(tmp2);
8358 }
8359 }
8360 store_reg(s, rd, tmp);
8361 break;
8362 case 2: /* SIMD add/subtract. */
8363 op = (insn >> 20) & 7;
8364 shift = (insn >> 4) & 7;
8365 if ((op & 3) == 3 || (shift & 3) == 3)
8366 goto illegal_op;
8367 tmp = load_reg(s, rn);
8368 tmp2 = load_reg(s, rm);
8369 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8370 tcg_temp_free_i32(tmp2);
8371 store_reg(s, rd, tmp);
8372 break;
8373 case 3: /* Other data processing. */
8374 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8375 if (op < 4) {
8376 /* Saturating add/subtract. */
8377 tmp = load_reg(s, rn);
8378 tmp2 = load_reg(s, rm);
8379 if (op & 1)
8380 gen_helper_double_saturate(tmp, cpu_env, tmp);
8381 if (op & 2)
8382 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8383 else
8384 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8385 tcg_temp_free_i32(tmp2);
8386 } else {
8387 tmp = load_reg(s, rn);
8388 switch (op) {
8389 case 0x0a: /* rbit */
8390 gen_helper_rbit(tmp, tmp);
8391 break;
8392 case 0x08: /* rev */
8393 tcg_gen_bswap32_i32(tmp, tmp);
8394 break;
8395 case 0x09: /* rev16 */
8396 gen_rev16(tmp);
8397 break;
8398 case 0x0b: /* revsh */
8399 gen_revsh(tmp);
8400 break;
8401 case 0x10: /* sel */
8402 tmp2 = load_reg(s, rm);
8403 tmp3 = tcg_temp_new_i32();
8404 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8405 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8406 tcg_temp_free_i32(tmp3);
8407 tcg_temp_free_i32(tmp2);
8408 break;
8409 case 0x18: /* clz */
8410 gen_helper_clz(tmp, tmp);
8411 break;
8412 default:
8413 goto illegal_op;
8414 }
8415 }
8416 store_reg(s, rd, tmp);
8417 break;
8418 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8419 op = (insn >> 4) & 0xf;
8420 tmp = load_reg(s, rn);
8421 tmp2 = load_reg(s, rm);
8422 switch ((insn >> 20) & 7) {
8423 case 0: /* 32 x 32 -> 32 */
8424 tcg_gen_mul_i32(tmp, tmp, tmp2);
8425 tcg_temp_free_i32(tmp2);
8426 if (rs != 15) {
8427 tmp2 = load_reg(s, rs);
8428 if (op)
8429 tcg_gen_sub_i32(tmp, tmp2, tmp);
8430 else
8431 tcg_gen_add_i32(tmp, tmp, tmp2);
8432 tcg_temp_free_i32(tmp2);
8433 }
8434 break;
8435 case 1: /* 16 x 16 -> 32 */
8436 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8437 tcg_temp_free_i32(tmp2);
8438 if (rs != 15) {
8439 tmp2 = load_reg(s, rs);
8440 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8441 tcg_temp_free_i32(tmp2);
8442 }
8443 break;
8444 case 2: /* Dual multiply add. */
8445 case 4: /* Dual multiply subtract. */
8446 if (op)
8447 gen_swap_half(tmp2);
8448 gen_smul_dual(tmp, tmp2);
8449 if (insn & (1 << 22)) {
8450 /* This subtraction cannot overflow. */
8451 tcg_gen_sub_i32(tmp, tmp, tmp2);
8452 } else {
8453 /* This addition cannot overflow 32 bits;
8454 * however it may overflow considered as a signed
8455 * operation, in which case we must set the Q flag.
8456 */
8457 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8458 }
8459 tcg_temp_free_i32(tmp2);
8460 if (rs != 15)
8461 {
8462 tmp2 = load_reg(s, rs);
8463 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8464 tcg_temp_free_i32(tmp2);
8465 }
8466 break;
8467 case 3: /* 32 * 16 -> 32msb */
8468 if (op)
8469 tcg_gen_sari_i32(tmp2, tmp2, 16);
8470 else
8471 gen_sxth(tmp2);
8472 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8473 tcg_gen_shri_i64(tmp64, tmp64, 16);
8474 tmp = tcg_temp_new_i32();
8475 tcg_gen_trunc_i64_i32(tmp, tmp64);
8476 tcg_temp_free_i64(tmp64);
8477 if (rs != 15)
8478 {
8479 tmp2 = load_reg(s, rs);
8480 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8481 tcg_temp_free_i32(tmp2);
8482 }
8483 break;
8484 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8485 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8486 if (rs != 15) {
8487 tmp = load_reg(s, rs);
8488 if (insn & (1 << 20)) {
8489 tmp64 = gen_addq_msw(tmp64, tmp);
8490 } else {
8491 tmp64 = gen_subq_msw(tmp64, tmp);
8492 }
8493 }
8494 if (insn & (1 << 4)) {
8495 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8496 }
8497 tcg_gen_shri_i64(tmp64, tmp64, 32);
8498 tmp = tcg_temp_new_i32();
8499 tcg_gen_trunc_i64_i32(tmp, tmp64);
8500 tcg_temp_free_i64(tmp64);
8501 break;
8502 case 7: /* Unsigned sum of absolute differences. */
8503 gen_helper_usad8(tmp, tmp, tmp2);
8504 tcg_temp_free_i32(tmp2);
8505 if (rs != 15) {
8506 tmp2 = load_reg(s, rs);
8507 tcg_gen_add_i32(tmp, tmp, tmp2);
8508 tcg_temp_free_i32(tmp2);
8509 }
8510 break;
8511 }
8512 store_reg(s, rd, tmp);
8513 break;
8514 case 6: case 7: /* 64-bit multiply, Divide. */
8515 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8516 tmp = load_reg(s, rn);
8517 tmp2 = load_reg(s, rm);
8518 if ((op & 0x50) == 0x10) {
8519 /* sdiv, udiv */
8520 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8521 goto illegal_op;
8522 }
8523 if (op & 0x20)
8524 gen_helper_udiv(tmp, tmp, tmp2);
8525 else
8526 gen_helper_sdiv(tmp, tmp, tmp2);
8527 tcg_temp_free_i32(tmp2);
8528 store_reg(s, rd, tmp);
8529 } else if ((op & 0xe) == 0xc) {
8530 /* Dual multiply accumulate long. */
8531 if (op & 1)
8532 gen_swap_half(tmp2);
8533 gen_smul_dual(tmp, tmp2);
8534 if (op & 0x10) {
8535 tcg_gen_sub_i32(tmp, tmp, tmp2);
8536 } else {
8537 tcg_gen_add_i32(tmp, tmp, tmp2);
8538 }
8539 tcg_temp_free_i32(tmp2);
8540 /* BUGFIX */
8541 tmp64 = tcg_temp_new_i64();
8542 tcg_gen_ext_i32_i64(tmp64, tmp);
8543 tcg_temp_free_i32(tmp);
8544 gen_addq(s, tmp64, rs, rd);
8545 gen_storeq_reg(s, rs, rd, tmp64);
8546 tcg_temp_free_i64(tmp64);
8547 } else {
8548 if (op & 0x20) {
8549 /* Unsigned 64-bit multiply */
8550 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8551 } else {
8552 if (op & 8) {
8553 /* smlalxy */
8554 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8555 tcg_temp_free_i32(tmp2);
8556 tmp64 = tcg_temp_new_i64();
8557 tcg_gen_ext_i32_i64(tmp64, tmp);
8558 tcg_temp_free_i32(tmp);
8559 } else {
8560 /* Signed 64-bit multiply */
8561 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8562 }
8563 }
8564 if (op & 4) {
8565 /* umaal */
8566 gen_addq_lo(s, tmp64, rs);
8567 gen_addq_lo(s, tmp64, rd);
8568 } else if (op & 0x40) {
8569 /* 64-bit accumulate. */
8570 gen_addq(s, tmp64, rs, rd);
8571 }
8572 gen_storeq_reg(s, rs, rd, tmp64);
8573 tcg_temp_free_i64(tmp64);
8574 }
8575 break;
8576 }
8577 break;
8578 case 6: case 7: case 14: case 15:
8579 /* Coprocessor. */
8580 if (((insn >> 24) & 3) == 3) {
8581 /* Translate into the equivalent ARM encoding. */
8582 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8583 if (disas_neon_data_insn(env, s, insn))
8584 goto illegal_op;
8585 } else {
8586 if (insn & (1 << 28))
8587 goto illegal_op;
8588 if (disas_coproc_insn (env, s, insn))
8589 goto illegal_op;
8590 }
8591 break;
8592 case 8: case 9: case 10: case 11:
8593 if (insn & (1 << 15)) {
8594 /* Branches, misc control. */
8595 if (insn & 0x5000) {
8596 /* Unconditional branch. */
8597 /* signextend(hw1[10:0]) -> offset[:12]. */
8598 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8599 /* hw1[10:0] -> offset[11:1]. */
8600 offset |= (insn & 0x7ff) << 1;
8601 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8602 offset[24:22] already have the same value because of the
8603 sign extension above. */
8604 offset ^= ((~insn) & (1 << 13)) << 10;
8605 offset ^= ((~insn) & (1 << 11)) << 11;
8606
8607 if (insn & (1 << 14)) {
8608 /* Branch and link. */
8609 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8610 }
8611
8612 offset += s->pc;
8613 if (insn & (1 << 12)) {
8614 /* b/bl */
8615 gen_jmp(s, offset);
8616 } else {
8617 /* blx */
8618 offset &= ~(uint32_t)2;
8619 /* thumb2 bx, no need to check */
8620 gen_bx_im(s, offset);
8621 }
8622 } else if (((insn >> 23) & 7) == 7) {
8623 /* Misc control */
8624 if (insn & (1 << 13))
8625 goto illegal_op;
8626
8627 if (insn & (1 << 26)) {
8628 /* Secure monitor call (v6Z) */
8629 goto illegal_op; /* not implemented. */
8630 } else {
8631 op = (insn >> 20) & 7;
8632 switch (op) {
8633 case 0: /* msr cpsr. */
8634 if (IS_M(env)) {
8635 tmp = load_reg(s, rn);
8636 addr = tcg_const_i32(insn & 0xff);
8637 gen_helper_v7m_msr(cpu_env, addr, tmp);
8638 tcg_temp_free_i32(addr);
8639 tcg_temp_free_i32(tmp);
8640 gen_lookup_tb(s);
8641 break;
8642 }
8643 /* fall through */
8644 case 1: /* msr spsr. */
8645 if (IS_M(env))
8646 goto illegal_op;
8647 tmp = load_reg(s, rn);
8648 if (gen_set_psr(s,
8649 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8650 op == 1, tmp))
8651 goto illegal_op;
8652 break;
8653 case 2: /* cps, nop-hint. */
8654 if (((insn >> 8) & 7) == 0) {
8655 gen_nop_hint(s, insn & 0xff);
8656 }
8657 /* Implemented as NOP in user mode. */
8658 if (IS_USER(s))
8659 break;
8660 offset = 0;
8661 imm = 0;
8662 if (insn & (1 << 10)) {
8663 if (insn & (1 << 7))
8664 offset |= CPSR_A;
8665 if (insn & (1 << 6))
8666 offset |= CPSR_I;
8667 if (insn & (1 << 5))
8668 offset |= CPSR_F;
8669 if (insn & (1 << 9))
8670 imm = CPSR_A | CPSR_I | CPSR_F;
8671 }
8672 if (insn & (1 << 8)) {
8673 offset |= 0x1f;
8674 imm |= (insn & 0x1f);
8675 }
8676 if (offset) {
8677 gen_set_psr_im(s, offset, 0, imm);
8678 }
8679 break;
8680 case 3: /* Special control operations. */
8681 ARCH(7);
8682 op = (insn >> 4) & 0xf;
8683 switch (op) {
8684 case 2: /* clrex */
8685 gen_clrex(s);
8686 break;
8687 case 4: /* dsb */
8688 case 5: /* dmb */
8689 case 6: /* isb */
8690 /* These execute as NOPs. */
8691 break;
8692 default:
8693 goto illegal_op;
8694 }
8695 break;
8696 case 4: /* bxj */
8697 /* Trivial implementation equivalent to bx. */
8698 tmp = load_reg(s, rn);
8699 gen_bx(s, tmp);
8700 break;
8701 case 5: /* Exception return. */
8702 if (IS_USER(s)) {
8703 goto illegal_op;
8704 }
8705 if (rn != 14 || rd != 15) {
8706 goto illegal_op;
8707 }
8708 tmp = load_reg(s, rn);
8709 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8710 gen_exception_return(s, tmp);
8711 break;
8712 case 6: /* mrs cpsr. */
8713 tmp = tcg_temp_new_i32();
8714 if (IS_M(env)) {
8715 addr = tcg_const_i32(insn & 0xff);
8716 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8717 tcg_temp_free_i32(addr);
8718 } else {
8719 gen_helper_cpsr_read(tmp, cpu_env);
8720 }
8721 store_reg(s, rd, tmp);
8722 break;
8723 case 7: /* mrs spsr. */
8724 /* Not accessible in user mode. */
8725 if (IS_USER(s) || IS_M(env))
8726 goto illegal_op;
8727 tmp = load_cpu_field(spsr);
8728 store_reg(s, rd, tmp);
8729 break;
8730 }
8731 }
8732 } else {
8733 /* Conditional branch. */
8734 op = (insn >> 22) & 0xf;
8735 /* Generate a conditional jump to next instruction. */
8736 s->condlabel = gen_new_label();
8737 gen_test_cc(op ^ 1, s->condlabel);
8738 s->condjmp = 1;
8739
8740 /* offset[11:1] = insn[10:0] */
8741 offset = (insn & 0x7ff) << 1;
8742 /* offset[17:12] = insn[21:16]. */
8743 offset |= (insn & 0x003f0000) >> 4;
8744 /* offset[31:20] = insn[26]. */
8745 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8746 /* offset[18] = insn[13]. */
8747 offset |= (insn & (1 << 13)) << 5;
8748 /* offset[19] = insn[11]. */
8749 offset |= (insn & (1 << 11)) << 8;
8750
8751 /* jump to the offset */
8752 gen_jmp(s, s->pc + offset);
8753 }
8754 } else {
8755 /* Data processing immediate. */
8756 if (insn & (1 << 25)) {
8757 if (insn & (1 << 24)) {
8758 if (insn & (1 << 20))
8759 goto illegal_op;
8760 /* Bitfield/Saturate. */
8761 op = (insn >> 21) & 7;
8762 imm = insn & 0x1f;
8763 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8764 if (rn == 15) {
8765 tmp = tcg_temp_new_i32();
8766 tcg_gen_movi_i32(tmp, 0);
8767 } else {
8768 tmp = load_reg(s, rn);
8769 }
8770 switch (op) {
8771 case 2: /* Signed bitfield extract. */
8772 imm++;
8773 if (shift + imm > 32)
8774 goto illegal_op;
8775 if (imm < 32)
8776 gen_sbfx(tmp, shift, imm);
8777 break;
8778 case 6: /* Unsigned bitfield extract. */
8779 imm++;
8780 if (shift + imm > 32)
8781 goto illegal_op;
8782 if (imm < 32)
8783 gen_ubfx(tmp, shift, (1u << imm) - 1);
8784 break;
8785 case 3: /* Bitfield insert/clear. */
8786 if (imm < shift)
8787 goto illegal_op;
8788 imm = imm + 1 - shift;
8789 if (imm != 32) {
8790 tmp2 = load_reg(s, rd);
8791 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
8792 tcg_temp_free_i32(tmp2);
8793 }
8794 break;
8795 case 7:
8796 goto illegal_op;
8797 default: /* Saturate. */
8798 if (shift) {
8799 if (op & 1)
8800 tcg_gen_sari_i32(tmp, tmp, shift);
8801 else
8802 tcg_gen_shli_i32(tmp, tmp, shift);
8803 }
8804 tmp2 = tcg_const_i32(imm);
8805 if (op & 4) {
8806 /* Unsigned. */
8807 if ((op & 1) && shift == 0)
8808 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8809 else
8810 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8811 } else {
8812 /* Signed. */
8813 if ((op & 1) && shift == 0)
8814 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8815 else
8816 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8817 }
8818 tcg_temp_free_i32(tmp2);
8819 break;
8820 }
8821 store_reg(s, rd, tmp);
8822 } else {
8823 imm = ((insn & 0x04000000) >> 15)
8824 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8825 if (insn & (1 << 22)) {
8826 /* 16-bit immediate. */
8827 imm |= (insn >> 4) & 0xf000;
8828 if (insn & (1 << 23)) {
8829 /* movt */
8830 tmp = load_reg(s, rd);
8831 tcg_gen_ext16u_i32(tmp, tmp);
8832 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8833 } else {
8834 /* movw */
8835 tmp = tcg_temp_new_i32();
8836 tcg_gen_movi_i32(tmp, imm);
8837 }
8838 } else {
8839 /* Add/sub 12-bit immediate. */
8840 if (rn == 15) {
8841 offset = s->pc & ~(uint32_t)3;
8842 if (insn & (1 << 23))
8843 offset -= imm;
8844 else
8845 offset += imm;
8846 tmp = tcg_temp_new_i32();
8847 tcg_gen_movi_i32(tmp, offset);
8848 } else {
8849 tmp = load_reg(s, rn);
8850 if (insn & (1 << 23))
8851 tcg_gen_subi_i32(tmp, tmp, imm);
8852 else
8853 tcg_gen_addi_i32(tmp, tmp, imm);
8854 }
8855 }
8856 store_reg(s, rd, tmp);
8857 }
8858 } else {
8859 int shifter_out = 0;
8860 /* modified 12-bit immediate. */
8861 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8862 imm = (insn & 0xff);
8863 switch (shift) {
8864 case 0: /* XY */
8865 /* Nothing to do. */
8866 break;
8867 case 1: /* 00XY00XY */
8868 imm |= imm << 16;
8869 break;
8870 case 2: /* XY00XY00 */
8871 imm |= imm << 16;
8872 imm <<= 8;
8873 break;
8874 case 3: /* XYXYXYXY */
8875 imm |= imm << 16;
8876 imm |= imm << 8;
8877 break;
8878 default: /* Rotated constant. */
8879 shift = (shift << 1) | (imm >> 7);
8880 imm |= 0x80;
8881 imm = imm << (32 - shift);
8882 shifter_out = 1;
8883 break;
8884 }
8885 tmp2 = tcg_temp_new_i32();
8886 tcg_gen_movi_i32(tmp2, imm);
8887 rn = (insn >> 16) & 0xf;
8888 if (rn == 15) {
8889 tmp = tcg_temp_new_i32();
8890 tcg_gen_movi_i32(tmp, 0);
8891 } else {
8892 tmp = load_reg(s, rn);
8893 }
8894 op = (insn >> 21) & 0xf;
8895 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8896 shifter_out, tmp, tmp2))
8897 goto illegal_op;
8898 tcg_temp_free_i32(tmp2);
8899 rd = (insn >> 8) & 0xf;
8900 if (rd != 15) {
8901 store_reg(s, rd, tmp);
8902 } else {
8903 tcg_temp_free_i32(tmp);
8904 }
8905 }
8906 }
8907 break;
8908 case 12: /* Load/store single data item. */
8909 {
8910 int postinc = 0;
8911 int writeback = 0;
8912 int user;
8913 if ((insn & 0x01100000) == 0x01000000) {
8914 if (disas_neon_ls_insn(env, s, insn))
8915 goto illegal_op;
8916 break;
8917 }
8918 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8919 if (rs == 15) {
8920 if (!(insn & (1 << 20))) {
8921 goto illegal_op;
8922 }
8923 if (op != 2) {
8924 /* Byte or halfword load space with dest == r15 : memory hints.
8925 * Catch them early so we don't emit pointless addressing code.
8926 * This space is a mix of:
8927 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8928 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8929 * cores)
8930 * unallocated hints, which must be treated as NOPs
8931 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8932 * which is easiest for the decoding logic
8933 * Some space which must UNDEF
8934 */
8935 int op1 = (insn >> 23) & 3;
8936 int op2 = (insn >> 6) & 0x3f;
8937 if (op & 2) {
8938 goto illegal_op;
8939 }
8940 if (rn == 15) {
8941 /* UNPREDICTABLE, unallocated hint or
8942 * PLD/PLDW/PLI (literal)
8943 */
8944 return 0;
8945 }
8946 if (op1 & 1) {
8947 return 0; /* PLD/PLDW/PLI or unallocated hint */
8948 }
8949 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8950 return 0; /* PLD/PLDW/PLI or unallocated hint */
8951 }
8952 /* UNDEF space, or an UNPREDICTABLE */
8953 return 1;
8954 }
8955 }
8956 user = IS_USER(s);
8957 if (rn == 15) {
8958 addr = tcg_temp_new_i32();
8959 /* PC relative. */
8960 /* s->pc has already been incremented by 4. */
8961 imm = s->pc & 0xfffffffc;
8962 if (insn & (1 << 23))
8963 imm += insn & 0xfff;
8964 else
8965 imm -= insn & 0xfff;
8966 tcg_gen_movi_i32(addr, imm);
8967 } else {
8968 addr = load_reg(s, rn);
8969 if (insn & (1 << 23)) {
8970 /* Positive offset. */
8971 imm = insn & 0xfff;
8972 tcg_gen_addi_i32(addr, addr, imm);
8973 } else {
8974 imm = insn & 0xff;
8975 switch ((insn >> 8) & 0xf) {
8976 case 0x0: /* Shifted Register. */
8977 shift = (insn >> 4) & 0xf;
8978 if (shift > 3) {
8979 tcg_temp_free_i32(addr);
8980 goto illegal_op;
8981 }
8982 tmp = load_reg(s, rm);
8983 if (shift)
8984 tcg_gen_shli_i32(tmp, tmp, shift);
8985 tcg_gen_add_i32(addr, addr, tmp);
8986 tcg_temp_free_i32(tmp);
8987 break;
8988 case 0xc: /* Negative offset. */
8989 tcg_gen_addi_i32(addr, addr, -imm);
8990 break;
8991 case 0xe: /* User privilege. */
8992 tcg_gen_addi_i32(addr, addr, imm);
8993 user = 1;
8994 break;
8995 case 0x9: /* Post-decrement. */
8996 imm = -imm;
8997 /* Fall through. */
8998 case 0xb: /* Post-increment. */
8999 postinc = 1;
9000 writeback = 1;
9001 break;
9002 case 0xd: /* Pre-decrement. */
9003 imm = -imm;
9004 /* Fall through. */
9005 case 0xf: /* Pre-increment. */
9006 tcg_gen_addi_i32(addr, addr, imm);
9007 writeback = 1;
9008 break;
9009 default:
9010 tcg_temp_free_i32(addr);
9011 goto illegal_op;
9012 }
9013 }
9014 }
9015 if (insn & (1 << 20)) {
9016 /* Load. */
9017 switch (op) {
9018 case 0: tmp = gen_ld8u(addr, user); break;
9019 case 4: tmp = gen_ld8s(addr, user); break;
9020 case 1: tmp = gen_ld16u(addr, user); break;
9021 case 5: tmp = gen_ld16s(addr, user); break;
9022 case 2: tmp = gen_ld32(addr, user); break;
9023 default:
9024 tcg_temp_free_i32(addr);
9025 goto illegal_op;
9026 }
9027 if (rs == 15) {
9028 gen_bx(s, tmp);
9029 } else {
9030 store_reg(s, rs, tmp);
9031 }
9032 } else {
9033 /* Store. */
9034 tmp = load_reg(s, rs);
9035 switch (op) {
9036 case 0: gen_st8(tmp, addr, user); break;
9037 case 1: gen_st16(tmp, addr, user); break;
9038 case 2: gen_st32(tmp, addr, user); break;
9039 default:
9040 tcg_temp_free_i32(addr);
9041 goto illegal_op;
9042 }
9043 }
9044 if (postinc)
9045 tcg_gen_addi_i32(addr, addr, imm);
9046 if (writeback) {
9047 store_reg(s, rn, addr);
9048 } else {
9049 tcg_temp_free_i32(addr);
9050 }
9051 }
9052 break;
9053 default:
9054 goto illegal_op;
9055 }
9056 return 0;
9057 illegal_op:
9058 return 1;
9059 }
9060
9061 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9062 {
9063 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9064 int32_t offset;
9065 int i;
9066 TCGv tmp;
9067 TCGv tmp2;
9068 TCGv addr;
9069
9070 if (s->condexec_mask) {
9071 cond = s->condexec_cond;
9072 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9073 s->condlabel = gen_new_label();
9074 gen_test_cc(cond ^ 1, s->condlabel);
9075 s->condjmp = 1;
9076 }
9077 }
9078
9079 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9080 s->pc += 2;
9081
9082 switch (insn >> 12) {
9083 case 0: case 1:
9084
9085 rd = insn & 7;
9086 op = (insn >> 11) & 3;
9087 if (op == 3) {
9088 /* add/subtract */
9089 rn = (insn >> 3) & 7;
9090 tmp = load_reg(s, rn);
9091 if (insn & (1 << 10)) {
9092 /* immediate */
9093 tmp2 = tcg_temp_new_i32();
9094 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9095 } else {
9096 /* reg */
9097 rm = (insn >> 6) & 7;
9098 tmp2 = load_reg(s, rm);
9099 }
9100 if (insn & (1 << 9)) {
9101 if (s->condexec_mask)
9102 tcg_gen_sub_i32(tmp, tmp, tmp2);
9103 else
9104 gen_sub_CC(tmp, tmp, tmp2);
9105 } else {
9106 if (s->condexec_mask)
9107 tcg_gen_add_i32(tmp, tmp, tmp2);
9108 else
9109 gen_add_CC(tmp, tmp, tmp2);
9110 }
9111 tcg_temp_free_i32(tmp2);
9112 store_reg(s, rd, tmp);
9113 } else {
9114 /* shift immediate */
9115 rm = (insn >> 3) & 7;
9116 shift = (insn >> 6) & 0x1f;
9117 tmp = load_reg(s, rm);
9118 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9119 if (!s->condexec_mask)
9120 gen_logic_CC(tmp);
9121 store_reg(s, rd, tmp);
9122 }
9123 break;
9124 case 2: case 3:
9125 /* arithmetic large immediate */
9126 op = (insn >> 11) & 3;
9127 rd = (insn >> 8) & 0x7;
9128 if (op == 0) { /* mov */
9129 tmp = tcg_temp_new_i32();
9130 tcg_gen_movi_i32(tmp, insn & 0xff);
9131 if (!s->condexec_mask)
9132 gen_logic_CC(tmp);
9133 store_reg(s, rd, tmp);
9134 } else {
9135 tmp = load_reg(s, rd);
9136 tmp2 = tcg_temp_new_i32();
9137 tcg_gen_movi_i32(tmp2, insn & 0xff);
9138 switch (op) {
9139 case 1: /* cmp */
9140 gen_sub_CC(tmp, tmp, tmp2);
9141 tcg_temp_free_i32(tmp);
9142 tcg_temp_free_i32(tmp2);
9143 break;
9144 case 2: /* add */
9145 if (s->condexec_mask)
9146 tcg_gen_add_i32(tmp, tmp, tmp2);
9147 else
9148 gen_add_CC(tmp, tmp, tmp2);
9149 tcg_temp_free_i32(tmp2);
9150 store_reg(s, rd, tmp);
9151 break;
9152 case 3: /* sub */
9153 if (s->condexec_mask)
9154 tcg_gen_sub_i32(tmp, tmp, tmp2);
9155 else
9156 gen_sub_CC(tmp, tmp, tmp2);
9157 tcg_temp_free_i32(tmp2);
9158 store_reg(s, rd, tmp);
9159 break;
9160 }
9161 }
9162 break;
9163 case 4:
9164 if (insn & (1 << 11)) {
9165 rd = (insn >> 8) & 7;
9166 /* load pc-relative. Bit 1 of PC is ignored. */
9167 val = s->pc + 2 + ((insn & 0xff) * 4);
9168 val &= ~(uint32_t)2;
9169 addr = tcg_temp_new_i32();
9170 tcg_gen_movi_i32(addr, val);
9171 tmp = gen_ld32(addr, IS_USER(s));
9172 tcg_temp_free_i32(addr);
9173 store_reg(s, rd, tmp);
9174 break;
9175 }
9176 if (insn & (1 << 10)) {
9177 /* data processing extended or blx */
9178 rd = (insn & 7) | ((insn >> 4) & 8);
9179 rm = (insn >> 3) & 0xf;
9180 op = (insn >> 8) & 3;
9181 switch (op) {
9182 case 0: /* add */
9183 tmp = load_reg(s, rd);
9184 tmp2 = load_reg(s, rm);
9185 tcg_gen_add_i32(tmp, tmp, tmp2);
9186 tcg_temp_free_i32(tmp2);
9187 store_reg(s, rd, tmp);
9188 break;
9189 case 1: /* cmp */
9190 tmp = load_reg(s, rd);
9191 tmp2 = load_reg(s, rm);
9192 gen_sub_CC(tmp, tmp, tmp2);
9193 tcg_temp_free_i32(tmp2);
9194 tcg_temp_free_i32(tmp);
9195 break;
9196 case 2: /* mov/cpy */
9197 tmp = load_reg(s, rm);
9198 store_reg(s, rd, tmp);
9199 break;
9200 case 3:/* branch [and link] exchange thumb register */
9201 tmp = load_reg(s, rm);
9202 if (insn & (1 << 7)) {
9203 ARCH(5);
9204 val = (uint32_t)s->pc | 1;
9205 tmp2 = tcg_temp_new_i32();
9206 tcg_gen_movi_i32(tmp2, val);
9207 store_reg(s, 14, tmp2);
9208 }
9209 /* already thumb, no need to check */
9210 gen_bx(s, tmp);
9211 break;
9212 }
9213 break;
9214 }
9215
9216 /* data processing register */
9217 rd = insn & 7;
9218 rm = (insn >> 3) & 7;
9219 op = (insn >> 6) & 0xf;
9220 if (op == 2 || op == 3 || op == 4 || op == 7) {
9221 /* the shift/rotate ops want the operands backwards */
9222 val = rm;
9223 rm = rd;
9224 rd = val;
9225 val = 1;
9226 } else {
9227 val = 0;
9228 }
9229
9230 if (op == 9) { /* neg */
9231 tmp = tcg_temp_new_i32();
9232 tcg_gen_movi_i32(tmp, 0);
9233 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9234 tmp = load_reg(s, rd);
9235 } else {
9236 TCGV_UNUSED(tmp);
9237 }
9238
9239 tmp2 = load_reg(s, rm);
9240 switch (op) {
9241 case 0x0: /* and */
9242 tcg_gen_and_i32(tmp, tmp, tmp2);
9243 if (!s->condexec_mask)
9244 gen_logic_CC(tmp);
9245 break;
9246 case 0x1: /* eor */
9247 tcg_gen_xor_i32(tmp, tmp, tmp2);
9248 if (!s->condexec_mask)
9249 gen_logic_CC(tmp);
9250 break;
9251 case 0x2: /* lsl */
9252 if (s->condexec_mask) {
9253 gen_shl(tmp2, tmp2, tmp);
9254 } else {
9255 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9256 gen_logic_CC(tmp2);
9257 }
9258 break;
9259 case 0x3: /* lsr */
9260 if (s->condexec_mask) {
9261 gen_shr(tmp2, tmp2, tmp);
9262 } else {
9263 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9264 gen_logic_CC(tmp2);
9265 }
9266 break;
9267 case 0x4: /* asr */
9268 if (s->condexec_mask) {
9269 gen_sar(tmp2, tmp2, tmp);
9270 } else {
9271 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9272 gen_logic_CC(tmp2);
9273 }
9274 break;
9275 case 0x5: /* adc */
9276 if (s->condexec_mask) {
9277 gen_adc(tmp, tmp2);
9278 } else {
9279 gen_adc_CC(tmp, tmp, tmp2);
9280 }
9281 break;
9282 case 0x6: /* sbc */
9283 if (s->condexec_mask) {
9284 gen_sub_carry(tmp, tmp, tmp2);
9285 } else {
9286 gen_sbc_CC(tmp, tmp, tmp2);
9287 }
9288 break;
9289 case 0x7: /* ror */
9290 if (s->condexec_mask) {
9291 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9292 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9293 } else {
9294 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9295 gen_logic_CC(tmp2);
9296 }
9297 break;
9298 case 0x8: /* tst */
9299 tcg_gen_and_i32(tmp, tmp, tmp2);
9300 gen_logic_CC(tmp);
9301 rd = 16;
9302 break;
9303 case 0x9: /* neg */
9304 if (s->condexec_mask)
9305 tcg_gen_neg_i32(tmp, tmp2);
9306 else
9307 gen_sub_CC(tmp, tmp, tmp2);
9308 break;
9309 case 0xa: /* cmp */
9310 gen_sub_CC(tmp, tmp, tmp2);
9311 rd = 16;
9312 break;
9313 case 0xb: /* cmn */
9314 gen_add_CC(tmp, tmp, tmp2);
9315 rd = 16;
9316 break;
9317 case 0xc: /* orr */
9318 tcg_gen_or_i32(tmp, tmp, tmp2);
9319 if (!s->condexec_mask)
9320 gen_logic_CC(tmp);
9321 break;
9322 case 0xd: /* mul */
9323 tcg_gen_mul_i32(tmp, tmp, tmp2);
9324 if (!s->condexec_mask)
9325 gen_logic_CC(tmp);
9326 break;
9327 case 0xe: /* bic */
9328 tcg_gen_andc_i32(tmp, tmp, tmp2);
9329 if (!s->condexec_mask)
9330 gen_logic_CC(tmp);
9331 break;
9332 case 0xf: /* mvn */
9333 tcg_gen_not_i32(tmp2, tmp2);
9334 if (!s->condexec_mask)
9335 gen_logic_CC(tmp2);
9336 val = 1;
9337 rm = rd;
9338 break;
9339 }
9340 if (rd != 16) {
9341 if (val) {
9342 store_reg(s, rm, tmp2);
9343 if (op != 0xf)
9344 tcg_temp_free_i32(tmp);
9345 } else {
9346 store_reg(s, rd, tmp);
9347 tcg_temp_free_i32(tmp2);
9348 }
9349 } else {
9350 tcg_temp_free_i32(tmp);
9351 tcg_temp_free_i32(tmp2);
9352 }
9353 break;
9354
9355 case 5:
9356 /* load/store register offset. */
9357 rd = insn & 7;
9358 rn = (insn >> 3) & 7;
9359 rm = (insn >> 6) & 7;
9360 op = (insn >> 9) & 7;
9361 addr = load_reg(s, rn);
9362 tmp = load_reg(s, rm);
9363 tcg_gen_add_i32(addr, addr, tmp);
9364 tcg_temp_free_i32(tmp);
9365
9366 if (op < 3) /* store */
9367 tmp = load_reg(s, rd);
9368
9369 switch (op) {
9370 case 0: /* str */
9371 gen_st32(tmp, addr, IS_USER(s));
9372 break;
9373 case 1: /* strh */
9374 gen_st16(tmp, addr, IS_USER(s));
9375 break;
9376 case 2: /* strb */
9377 gen_st8(tmp, addr, IS_USER(s));
9378 break;
9379 case 3: /* ldrsb */
9380 tmp = gen_ld8s(addr, IS_USER(s));
9381 break;
9382 case 4: /* ldr */
9383 tmp = gen_ld32(addr, IS_USER(s));
9384 break;
9385 case 5: /* ldrh */
9386 tmp = gen_ld16u(addr, IS_USER(s));
9387 break;
9388 case 6: /* ldrb */
9389 tmp = gen_ld8u(addr, IS_USER(s));
9390 break;
9391 case 7: /* ldrsh */
9392 tmp = gen_ld16s(addr, IS_USER(s));
9393 break;
9394 }
9395 if (op >= 3) /* load */
9396 store_reg(s, rd, tmp);
9397 tcg_temp_free_i32(addr);
9398 break;
9399
9400 case 6:
9401 /* load/store word immediate offset */
9402 rd = insn & 7;
9403 rn = (insn >> 3) & 7;
9404 addr = load_reg(s, rn);
9405 val = (insn >> 4) & 0x7c;
9406 tcg_gen_addi_i32(addr, addr, val);
9407
9408 if (insn & (1 << 11)) {
9409 /* load */
9410 tmp = gen_ld32(addr, IS_USER(s));
9411 store_reg(s, rd, tmp);
9412 } else {
9413 /* store */
9414 tmp = load_reg(s, rd);
9415 gen_st32(tmp, addr, IS_USER(s));
9416 }
9417 tcg_temp_free_i32(addr);
9418 break;
9419
9420 case 7:
9421 /* load/store byte immediate offset */
9422 rd = insn & 7;
9423 rn = (insn >> 3) & 7;
9424 addr = load_reg(s, rn);
9425 val = (insn >> 6) & 0x1f;
9426 tcg_gen_addi_i32(addr, addr, val);
9427
9428 if (insn & (1 << 11)) {
9429 /* load */
9430 tmp = gen_ld8u(addr, IS_USER(s));
9431 store_reg(s, rd, tmp);
9432 } else {
9433 /* store */
9434 tmp = load_reg(s, rd);
9435 gen_st8(tmp, addr, IS_USER(s));
9436 }
9437 tcg_temp_free_i32(addr);
9438 break;
9439
9440 case 8:
9441 /* load/store halfword immediate offset */
9442 rd = insn & 7;
9443 rn = (insn >> 3) & 7;
9444 addr = load_reg(s, rn);
9445 val = (insn >> 5) & 0x3e;
9446 tcg_gen_addi_i32(addr, addr, val);
9447
9448 if (insn & (1 << 11)) {
9449 /* load */
9450 tmp = gen_ld16u(addr, IS_USER(s));
9451 store_reg(s, rd, tmp);
9452 } else {
9453 /* store */
9454 tmp = load_reg(s, rd);
9455 gen_st16(tmp, addr, IS_USER(s));
9456 }
9457 tcg_temp_free_i32(addr);
9458 break;
9459
9460 case 9:
9461 /* load/store from stack */
9462 rd = (insn >> 8) & 7;
9463 addr = load_reg(s, 13);
9464 val = (insn & 0xff) * 4;
9465 tcg_gen_addi_i32(addr, addr, val);
9466
9467 if (insn & (1 << 11)) {
9468 /* load */
9469 tmp = gen_ld32(addr, IS_USER(s));
9470 store_reg(s, rd, tmp);
9471 } else {
9472 /* store */
9473 tmp = load_reg(s, rd);
9474 gen_st32(tmp, addr, IS_USER(s));
9475 }
9476 tcg_temp_free_i32(addr);
9477 break;
9478
9479 case 10:
9480 /* add to high reg */
9481 rd = (insn >> 8) & 7;
9482 if (insn & (1 << 11)) {
9483 /* SP */
9484 tmp = load_reg(s, 13);
9485 } else {
9486 /* PC. bit 1 is ignored. */
9487 tmp = tcg_temp_new_i32();
9488 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9489 }
9490 val = (insn & 0xff) * 4;
9491 tcg_gen_addi_i32(tmp, tmp, val);
9492 store_reg(s, rd, tmp);
9493 break;
9494
9495 case 11:
9496 /* misc */
9497 op = (insn >> 8) & 0xf;
9498 switch (op) {
9499 case 0:
9500 /* adjust stack pointer */
9501 tmp = load_reg(s, 13);
9502 val = (insn & 0x7f) * 4;
9503 if (insn & (1 << 7))
9504 val = -(int32_t)val;
9505 tcg_gen_addi_i32(tmp, tmp, val);
9506 store_reg(s, 13, tmp);
9507 break;
9508
9509 case 2: /* sign/zero extend. */
9510 ARCH(6);
9511 rd = insn & 7;
9512 rm = (insn >> 3) & 7;
9513 tmp = load_reg(s, rm);
9514 switch ((insn >> 6) & 3) {
9515 case 0: gen_sxth(tmp); break;
9516 case 1: gen_sxtb(tmp); break;
9517 case 2: gen_uxth(tmp); break;
9518 case 3: gen_uxtb(tmp); break;
9519 }
9520 store_reg(s, rd, tmp);
9521 break;
9522 case 4: case 5: case 0xc: case 0xd:
9523 /* push/pop */
9524 addr = load_reg(s, 13);
9525 if (insn & (1 << 8))
9526 offset = 4;
9527 else
9528 offset = 0;
9529 for (i = 0; i < 8; i++) {
9530 if (insn & (1 << i))
9531 offset += 4;
9532 }
9533 if ((insn & (1 << 11)) == 0) {
9534 tcg_gen_addi_i32(addr, addr, -offset);
9535 }
9536 for (i = 0; i < 8; i++) {
9537 if (insn & (1 << i)) {
9538 if (insn & (1 << 11)) {
9539 /* pop */
9540 tmp = gen_ld32(addr, IS_USER(s));
9541 store_reg(s, i, tmp);
9542 } else {
9543 /* push */
9544 tmp = load_reg(s, i);
9545 gen_st32(tmp, addr, IS_USER(s));
9546 }
9547 /* advance to the next address. */
9548 tcg_gen_addi_i32(addr, addr, 4);
9549 }
9550 }
9551 TCGV_UNUSED(tmp);
9552 if (insn & (1 << 8)) {
9553 if (insn & (1 << 11)) {
9554 /* pop pc */
9555 tmp = gen_ld32(addr, IS_USER(s));
9556 /* don't set the pc until the rest of the instruction
9557 has completed */
9558 } else {
9559 /* push lr */
9560 tmp = load_reg(s, 14);
9561 gen_st32(tmp, addr, IS_USER(s));
9562 }
9563 tcg_gen_addi_i32(addr, addr, 4);
9564 }
9565 if ((insn & (1 << 11)) == 0) {
9566 tcg_gen_addi_i32(addr, addr, -offset);
9567 }
9568 /* write back the new stack pointer */
9569 store_reg(s, 13, addr);
9570 /* set the new PC value */
9571 if ((insn & 0x0900) == 0x0900) {
9572 store_reg_from_load(env, s, 15, tmp);
9573 }
9574 break;
9575
9576 case 1: case 3: case 9: case 11: /* czb */
9577 rm = insn & 7;
9578 tmp = load_reg(s, rm);
9579 s->condlabel = gen_new_label();
9580 s->condjmp = 1;
9581 if (insn & (1 << 11))
9582 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9583 else
9584 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9585 tcg_temp_free_i32(tmp);
9586 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9587 val = (uint32_t)s->pc + 2;
9588 val += offset;
9589 gen_jmp(s, val);
9590 break;
9591
9592 case 15: /* IT, nop-hint. */
9593 if ((insn & 0xf) == 0) {
9594 gen_nop_hint(s, (insn >> 4) & 0xf);
9595 break;
9596 }
9597 /* If Then. */
9598 s->condexec_cond = (insn >> 4) & 0xe;
9599 s->condexec_mask = insn & 0x1f;
9600 /* No actual code generated for this insn, just setup state. */
9601 break;
9602
9603 case 0xe: /* bkpt */
9604 ARCH(5);
9605 gen_exception_insn(s, 2, EXCP_BKPT);
9606 break;
9607
9608 case 0xa: /* rev */
9609 ARCH(6);
9610 rn = (insn >> 3) & 0x7;
9611 rd = insn & 0x7;
9612 tmp = load_reg(s, rn);
9613 switch ((insn >> 6) & 3) {
9614 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9615 case 1: gen_rev16(tmp); break;
9616 case 3: gen_revsh(tmp); break;
9617 default: goto illegal_op;
9618 }
9619 store_reg(s, rd, tmp);
9620 break;
9621
9622 case 6:
9623 switch ((insn >> 5) & 7) {
9624 case 2:
9625 /* setend */
9626 ARCH(6);
9627 if (((insn >> 3) & 1) != s->bswap_code) {
9628 /* Dynamic endianness switching not implemented. */
9629 goto illegal_op;
9630 }
9631 break;
9632 case 3:
9633 /* cps */
9634 ARCH(6);
9635 if (IS_USER(s)) {
9636 break;
9637 }
9638 if (IS_M(env)) {
9639 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9640 /* FAULTMASK */
9641 if (insn & 1) {
9642 addr = tcg_const_i32(19);
9643 gen_helper_v7m_msr(cpu_env, addr, tmp);
9644 tcg_temp_free_i32(addr);
9645 }
9646 /* PRIMASK */
9647 if (insn & 2) {
9648 addr = tcg_const_i32(16);
9649 gen_helper_v7m_msr(cpu_env, addr, tmp);
9650 tcg_temp_free_i32(addr);
9651 }
9652 tcg_temp_free_i32(tmp);
9653 gen_lookup_tb(s);
9654 } else {
9655 if (insn & (1 << 4)) {
9656 shift = CPSR_A | CPSR_I | CPSR_F;
9657 } else {
9658 shift = 0;
9659 }
9660 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9661 }
9662 break;
9663 default:
9664 goto undef;
9665 }
9666 break;
9667
9668 default:
9669 goto undef;
9670 }
9671 break;
9672
9673 case 12:
9674 {
9675 /* load/store multiple */
9676 TCGv loaded_var;
9677 TCGV_UNUSED(loaded_var);
9678 rn = (insn >> 8) & 0x7;
9679 addr = load_reg(s, rn);
9680 for (i = 0; i < 8; i++) {
9681 if (insn & (1 << i)) {
9682 if (insn & (1 << 11)) {
9683 /* load */
9684 tmp = gen_ld32(addr, IS_USER(s));
9685 if (i == rn) {
9686 loaded_var = tmp;
9687 } else {
9688 store_reg(s, i, tmp);
9689 }
9690 } else {
9691 /* store */
9692 tmp = load_reg(s, i);
9693 gen_st32(tmp, addr, IS_USER(s));
9694 }
9695 /* advance to the next address */
9696 tcg_gen_addi_i32(addr, addr, 4);
9697 }
9698 }
9699 if ((insn & (1 << rn)) == 0) {
9700 /* base reg not in list: base register writeback */
9701 store_reg(s, rn, addr);
9702 } else {
9703 /* base reg in list: if load, complete it now */
9704 if (insn & (1 << 11)) {
9705 store_reg(s, rn, loaded_var);
9706 }
9707 tcg_temp_free_i32(addr);
9708 }
9709 break;
9710 }
9711 case 13:
9712 /* conditional branch or swi */
9713 cond = (insn >> 8) & 0xf;
9714 if (cond == 0xe)
9715 goto undef;
9716
9717 if (cond == 0xf) {
9718 /* swi */
9719 gen_set_pc_im(s->pc);
9720 s->is_jmp = DISAS_SWI;
9721 break;
9722 }
9723 /* generate a conditional jump to next instruction */
9724 s->condlabel = gen_new_label();
9725 gen_test_cc(cond ^ 1, s->condlabel);
9726 s->condjmp = 1;
9727
9728 /* jump to the offset */
9729 val = (uint32_t)s->pc + 2;
9730 offset = ((int32_t)insn << 24) >> 24;
9731 val += offset << 1;
9732 gen_jmp(s, val);
9733 break;
9734
9735 case 14:
9736 if (insn & (1 << 11)) {
9737 if (disas_thumb2_insn(env, s, insn))
9738 goto undef32;
9739 break;
9740 }
9741 /* unconditional branch */
9742 val = (uint32_t)s->pc;
9743 offset = ((int32_t)insn << 21) >> 21;
9744 val += (offset << 1) + 2;
9745 gen_jmp(s, val);
9746 break;
9747
9748 case 15:
9749 if (disas_thumb2_insn(env, s, insn))
9750 goto undef32;
9751 break;
9752 }
9753 return;
9754 undef32:
9755 gen_exception_insn(s, 4, EXCP_UDEF);
9756 return;
9757 illegal_op:
9758 undef:
9759 gen_exception_insn(s, 2, EXCP_UDEF);
9760 }
9761
9762 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9763 basic block 'tb'. If search_pc is TRUE, also generate PC
9764 information for each intermediate instruction. */
9765 static inline void gen_intermediate_code_internal(CPUARMState *env,
9766 TranslationBlock *tb,
9767 int search_pc)
9768 {
9769 DisasContext dc1, *dc = &dc1;
9770 CPUBreakpoint *bp;
9771 uint16_t *gen_opc_end;
9772 int j, lj;
9773 target_ulong pc_start;
9774 uint32_t next_page_start;
9775 int num_insns;
9776 int max_insns;
9777
9778 /* generate intermediate code */
9779 pc_start = tb->pc;
9780
9781 dc->tb = tb;
9782
9783 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
9784
9785 dc->is_jmp = DISAS_NEXT;
9786 dc->pc = pc_start;
9787 dc->singlestep_enabled = env->singlestep_enabled;
9788 dc->condjmp = 0;
9789 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9790 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9791 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9792 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9793 #if !defined(CONFIG_USER_ONLY)
9794 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9795 #endif
9796 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9797 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9798 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9799 cpu_F0s = tcg_temp_new_i32();
9800 cpu_F1s = tcg_temp_new_i32();
9801 cpu_F0d = tcg_temp_new_i64();
9802 cpu_F1d = tcg_temp_new_i64();
9803 cpu_V0 = cpu_F0d;
9804 cpu_V1 = cpu_F1d;
9805 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9806 cpu_M0 = tcg_temp_new_i64();
9807 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9808 lj = -1;
9809 num_insns = 0;
9810 max_insns = tb->cflags & CF_COUNT_MASK;
9811 if (max_insns == 0)
9812 max_insns = CF_COUNT_MASK;
9813
9814 gen_tb_start();
9815
9816 tcg_clear_temp_count();
9817
9818 /* A note on handling of the condexec (IT) bits:
9819 *
9820 * We want to avoid the overhead of having to write the updated condexec
9821 * bits back to the CPUARMState for every instruction in an IT block. So:
9822 * (1) if the condexec bits are not already zero then we write
9823 * zero back into the CPUARMState now. This avoids complications trying
9824 * to do it at the end of the block. (For example if we don't do this
9825 * it's hard to identify whether we can safely skip writing condexec
9826 * at the end of the TB, which we definitely want to do for the case
9827 * where a TB doesn't do anything with the IT state at all.)
9828 * (2) if we are going to leave the TB then we call gen_set_condexec()
9829 * which will write the correct value into CPUARMState if zero is wrong.
9830 * This is done both for leaving the TB at the end, and for leaving
9831 * it because of an exception we know will happen, which is done in
9832 * gen_exception_insn(). The latter is necessary because we need to
9833 * leave the TB with the PC/IT state just prior to execution of the
9834 * instruction which caused the exception.
9835 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9836 * then the CPUARMState will be wrong and we need to reset it.
9837 * This is handled in the same way as restoration of the
9838 * PC in these situations: we will be called again with search_pc=1
9839 * and generate a mapping of the condexec bits for each PC in
9840 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9841 * this to restore the condexec bits.
9842 *
9843 * Note that there are no instructions which can read the condexec
9844 * bits, and none which can write non-static values to them, so
9845 * we don't need to care about whether CPUARMState is correct in the
9846 * middle of a TB.
9847 */
9848
9849 /* Reset the conditional execution bits immediately. This avoids
9850 complications trying to do it at the end of the block. */
9851 if (dc->condexec_mask || dc->condexec_cond)
9852 {
9853 TCGv tmp = tcg_temp_new_i32();
9854 tcg_gen_movi_i32(tmp, 0);
9855 store_cpu_field(tmp, condexec_bits);
9856 }
9857 do {
9858 #ifdef CONFIG_USER_ONLY
9859 /* Intercept jump to the magic kernel page. */
9860 if (dc->pc >= 0xffff0000) {
9861 /* We always get here via a jump, so know we are not in a
9862 conditional execution block. */
9863 gen_exception(EXCP_KERNEL_TRAP);
9864 dc->is_jmp = DISAS_UPDATE;
9865 break;
9866 }
9867 #else
9868 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9869 /* We always get here via a jump, so know we are not in a
9870 conditional execution block. */
9871 gen_exception(EXCP_EXCEPTION_EXIT);
9872 dc->is_jmp = DISAS_UPDATE;
9873 break;
9874 }
9875 #endif
9876
9877 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9878 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9879 if (bp->pc == dc->pc) {
9880 gen_exception_insn(dc, 0, EXCP_DEBUG);
9881 /* Advance PC so that clearing the breakpoint will
9882 invalidate this TB. */
9883 dc->pc += 2;
9884 goto done_generating;
9885 break;
9886 }
9887 }
9888 }
9889 if (search_pc) {
9890 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
9891 if (lj < j) {
9892 lj++;
9893 while (lj < j)
9894 tcg_ctx.gen_opc_instr_start[lj++] = 0;
9895 }
9896 tcg_ctx.gen_opc_pc[lj] = dc->pc;
9897 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9898 tcg_ctx.gen_opc_instr_start[lj] = 1;
9899 tcg_ctx.gen_opc_icount[lj] = num_insns;
9900 }
9901
9902 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9903 gen_io_start();
9904
9905 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
9906 tcg_gen_debug_insn_start(dc->pc);
9907 }
9908
9909 if (dc->thumb) {
9910 disas_thumb_insn(env, dc);
9911 if (dc->condexec_mask) {
9912 dc->condexec_cond = (dc->condexec_cond & 0xe)
9913 | ((dc->condexec_mask >> 4) & 1);
9914 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9915 if (dc->condexec_mask == 0) {
9916 dc->condexec_cond = 0;
9917 }
9918 }
9919 } else {
9920 disas_arm_insn(env, dc);
9921 }
9922
9923 if (dc->condjmp && !dc->is_jmp) {
9924 gen_set_label(dc->condlabel);
9925 dc->condjmp = 0;
9926 }
9927
9928 if (tcg_check_temp_count()) {
9929 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9930 }
9931
9932 /* Translation stops when a conditional branch is encountered.
9933 * Otherwise the subsequent code could get translated several times.
9934 * Also stop translation when a page boundary is reached. This
9935 * ensures prefetch aborts occur at the right place. */
9936 num_insns ++;
9937 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
9938 !env->singlestep_enabled &&
9939 !singlestep &&
9940 dc->pc < next_page_start &&
9941 num_insns < max_insns);
9942
9943 if (tb->cflags & CF_LAST_IO) {
9944 if (dc->condjmp) {
9945 /* FIXME: This can theoretically happen with self-modifying
9946 code. */
9947 cpu_abort(env, "IO on conditional branch instruction");
9948 }
9949 gen_io_end();
9950 }
9951
9952 /* At this stage dc->condjmp will only be set when the skipped
9953 instruction was a conditional branch or trap, and the PC has
9954 already been written. */
9955 if (unlikely(env->singlestep_enabled)) {
9956 /* Make sure the pc is updated, and raise a debug exception. */
9957 if (dc->condjmp) {
9958 gen_set_condexec(dc);
9959 if (dc->is_jmp == DISAS_SWI) {
9960 gen_exception(EXCP_SWI);
9961 } else {
9962 gen_exception(EXCP_DEBUG);
9963 }
9964 gen_set_label(dc->condlabel);
9965 }
9966 if (dc->condjmp || !dc->is_jmp) {
9967 gen_set_pc_im(dc->pc);
9968 dc->condjmp = 0;
9969 }
9970 gen_set_condexec(dc);
9971 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9972 gen_exception(EXCP_SWI);
9973 } else {
9974 /* FIXME: Single stepping a WFI insn will not halt
9975 the CPU. */
9976 gen_exception(EXCP_DEBUG);
9977 }
9978 } else {
9979 /* While branches must always occur at the end of an IT block,
9980 there are a few other things that can cause us to terminate
9981 the TB in the middle of an IT block:
9982 - Exception generating instructions (bkpt, swi, undefined).
9983 - Page boundaries.
9984 - Hardware watchpoints.
9985 Hardware breakpoints have already been handled and skip this code.
9986 */
9987 gen_set_condexec(dc);
9988 switch(dc->is_jmp) {
9989 case DISAS_NEXT:
9990 gen_goto_tb(dc, 1, dc->pc);
9991 break;
9992 default:
9993 case DISAS_JUMP:
9994 case DISAS_UPDATE:
9995 /* indicate that the hash table must be used to find the next TB */
9996 tcg_gen_exit_tb(0);
9997 break;
9998 case DISAS_TB_JUMP:
9999 /* nothing more to generate */
10000 break;
10001 case DISAS_WFI:
10002 gen_helper_wfi(cpu_env);
10003 break;
10004 case DISAS_SWI:
10005 gen_exception(EXCP_SWI);
10006 break;
10007 }
10008 if (dc->condjmp) {
10009 gen_set_label(dc->condlabel);
10010 gen_set_condexec(dc);
10011 gen_goto_tb(dc, 1, dc->pc);
10012 dc->condjmp = 0;
10013 }
10014 }
10015
10016 done_generating:
10017 gen_tb_end(tb, num_insns);
10018 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
10019
10020 #ifdef DEBUG_DISAS
10021 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10022 qemu_log("----------------\n");
10023 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10024 log_target_disas(env, pc_start, dc->pc - pc_start,
10025 dc->thumb | (dc->bswap_code << 1));
10026 qemu_log("\n");
10027 }
10028 #endif
10029 if (search_pc) {
10030 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10031 lj++;
10032 while (lj <= j)
10033 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10034 } else {
10035 tb->size = dc->pc - pc_start;
10036 tb->icount = num_insns;
10037 }
10038 }
10039
10040 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10041 {
10042 gen_intermediate_code_internal(env, tb, 0);
10043 }
10044
10045 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10046 {
10047 gen_intermediate_code_internal(env, tb, 1);
10048 }
10049
10050 static const char *cpu_mode_names[16] = {
10051 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10052 "???", "???", "???", "und", "???", "???", "???", "sys"
10053 };
10054
10055 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10056 int flags)
10057 {
10058 int i;
10059 uint32_t psr;
10060
10061 for(i=0;i<16;i++) {
10062 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10063 if ((i % 4) == 3)
10064 cpu_fprintf(f, "\n");
10065 else
10066 cpu_fprintf(f, " ");
10067 }
10068 psr = cpsr_read(env);
10069 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10070 psr,
10071 psr & (1 << 31) ? 'N' : '-',
10072 psr & (1 << 30) ? 'Z' : '-',
10073 psr & (1 << 29) ? 'C' : '-',
10074 psr & (1 << 28) ? 'V' : '-',
10075 psr & CPSR_T ? 'T' : 'A',
10076 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10077
10078 if (flags & CPU_DUMP_FPU) {
10079 int numvfpregs = 0;
10080 if (arm_feature(env, ARM_FEATURE_VFP)) {
10081 numvfpregs += 16;
10082 }
10083 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10084 numvfpregs += 16;
10085 }
10086 for (i = 0; i < numvfpregs; i++) {
10087 uint64_t v = float64_val(env->vfp.regs[i]);
10088 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10089 i * 2, (uint32_t)v,
10090 i * 2 + 1, (uint32_t)(v >> 32),
10091 i, v);
10092 }
10093 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10094 }
10095 }
10096
10097 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10098 {
10099 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10100 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
10101 }