]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: fix VSHLL Neon instruction.
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3544 case 0: \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 1: \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 2: \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 3: \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 4: \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 5: \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 default: return 1; \
3563 }} while (0)
3564
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3567 case 0: \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3569 break; \
3570 case 1: \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3572 break; \
3573 case 2: \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3575 break; \
3576 case 3: \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3578 break; \
3579 case 4: \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3581 break; \
3582 case 5: \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3584 break; \
3585 default: return 1; \
3586 }} while (0)
3587
3588 static TCGv neon_load_scratch(int scratch)
3589 {
3590 TCGv tmp = new_tmp();
3591 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 return tmp;
3593 }
3594
3595 static void neon_store_scratch(int scratch, TCGv var)
3596 {
3597 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 dead_tmp(var);
3599 }
3600
3601 static inline TCGv neon_get_scalar(int size, int reg)
3602 {
3603 TCGv tmp;
3604 if (size == 1) {
3605 tmp = neon_load_reg(reg & 7, reg >> 4);
3606 if (reg & 8) {
3607 gen_neon_dup_high16(tmp);
3608 } else {
3609 gen_neon_dup_low16(tmp);
3610 }
3611 } else {
3612 tmp = neon_load_reg(reg & 15, reg >> 4);
3613 }
3614 return tmp;
3615 }
3616
3617 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3618 {
3619 TCGv rd, rm, tmp;
3620
3621 rd = new_tmp();
3622 rm = new_tmp();
3623 tmp = new_tmp();
3624
3625 tcg_gen_andi_i32(rd, t0, 0xff);
3626 tcg_gen_shri_i32(tmp, t0, 8);
3627 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3628 tcg_gen_or_i32(rd, rd, tmp);
3629 tcg_gen_shli_i32(tmp, t1, 16);
3630 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3631 tcg_gen_or_i32(rd, rd, tmp);
3632 tcg_gen_shli_i32(tmp, t1, 8);
3633 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3634 tcg_gen_or_i32(rd, rd, tmp);
3635
3636 tcg_gen_shri_i32(rm, t0, 8);
3637 tcg_gen_andi_i32(rm, rm, 0xff);
3638 tcg_gen_shri_i32(tmp, t0, 16);
3639 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3640 tcg_gen_or_i32(rm, rm, tmp);
3641 tcg_gen_shli_i32(tmp, t1, 8);
3642 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3643 tcg_gen_or_i32(rm, rm, tmp);
3644 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3645 tcg_gen_or_i32(t1, rm, tmp);
3646 tcg_gen_mov_i32(t0, rd);
3647
3648 dead_tmp(tmp);
3649 dead_tmp(rm);
3650 dead_tmp(rd);
3651 }
3652
3653 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3654 {
3655 TCGv rd, rm, tmp;
3656
3657 rd = new_tmp();
3658 rm = new_tmp();
3659 tmp = new_tmp();
3660
3661 tcg_gen_andi_i32(rd, t0, 0xff);
3662 tcg_gen_shli_i32(tmp, t1, 8);
3663 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3664 tcg_gen_or_i32(rd, rd, tmp);
3665 tcg_gen_shli_i32(tmp, t0, 16);
3666 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3667 tcg_gen_or_i32(rd, rd, tmp);
3668 tcg_gen_shli_i32(tmp, t1, 24);
3669 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3670 tcg_gen_or_i32(rd, rd, tmp);
3671
3672 tcg_gen_andi_i32(rm, t1, 0xff000000);
3673 tcg_gen_shri_i32(tmp, t0, 8);
3674 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3675 tcg_gen_or_i32(rm, rm, tmp);
3676 tcg_gen_shri_i32(tmp, t1, 8);
3677 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3678 tcg_gen_or_i32(rm, rm, tmp);
3679 tcg_gen_shri_i32(tmp, t0, 16);
3680 tcg_gen_andi_i32(tmp, tmp, 0xff);
3681 tcg_gen_or_i32(t1, rm, tmp);
3682 tcg_gen_mov_i32(t0, rd);
3683
3684 dead_tmp(tmp);
3685 dead_tmp(rm);
3686 dead_tmp(rd);
3687 }
3688
3689 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3690 {
3691 TCGv tmp, tmp2;
3692
3693 tmp = new_tmp();
3694 tmp2 = new_tmp();
3695
3696 tcg_gen_andi_i32(tmp, t0, 0xffff);
3697 tcg_gen_shli_i32(tmp2, t1, 16);
3698 tcg_gen_or_i32(tmp, tmp, tmp2);
3699 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3700 tcg_gen_shri_i32(tmp2, t0, 16);
3701 tcg_gen_or_i32(t1, t1, tmp2);
3702 tcg_gen_mov_i32(t0, tmp);
3703
3704 dead_tmp(tmp2);
3705 dead_tmp(tmp);
3706 }
3707
3708 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3709 {
3710 int n;
3711 TCGv t0, t1;
3712
3713 for (n = 0; n < q + 1; n += 2) {
3714 t0 = neon_load_reg(reg, n);
3715 t1 = neon_load_reg(reg, n + 1);
3716 switch (size) {
3717 case 0: gen_neon_unzip_u8(t0, t1); break;
3718 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
3719 case 2: /* no-op */; break;
3720 default: abort();
3721 }
3722 neon_store_scratch(tmp + n, t0);
3723 neon_store_scratch(tmp + n + 1, t1);
3724 }
3725 }
3726
3727 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3728 {
3729 TCGv rd, tmp;
3730
3731 rd = new_tmp();
3732 tmp = new_tmp();
3733
3734 tcg_gen_shli_i32(rd, t0, 8);
3735 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3736 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3737 tcg_gen_or_i32(rd, rd, tmp);
3738
3739 tcg_gen_shri_i32(t1, t1, 8);
3740 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3741 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3742 tcg_gen_or_i32(t1, t1, tmp);
3743 tcg_gen_mov_i32(t0, rd);
3744
3745 dead_tmp(tmp);
3746 dead_tmp(rd);
3747 }
3748
3749 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3750 {
3751 TCGv rd, tmp;
3752
3753 rd = new_tmp();
3754 tmp = new_tmp();
3755
3756 tcg_gen_shli_i32(rd, t0, 16);
3757 tcg_gen_andi_i32(tmp, t1, 0xffff);
3758 tcg_gen_or_i32(rd, rd, tmp);
3759 tcg_gen_shri_i32(t1, t1, 16);
3760 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3761 tcg_gen_or_i32(t1, t1, tmp);
3762 tcg_gen_mov_i32(t0, rd);
3763
3764 dead_tmp(tmp);
3765 dead_tmp(rd);
3766 }
3767
3768
3769 static struct {
3770 int nregs;
3771 int interleave;
3772 int spacing;
3773 } neon_ls_element_type[11] = {
3774 {4, 4, 1},
3775 {4, 4, 2},
3776 {4, 1, 1},
3777 {4, 2, 1},
3778 {3, 3, 1},
3779 {3, 3, 2},
3780 {3, 1, 1},
3781 {1, 1, 1},
3782 {2, 2, 1},
3783 {2, 2, 2},
3784 {2, 1, 1}
3785 };
3786
3787 /* Translate a NEON load/store element instruction. Return nonzero if the
3788 instruction is invalid. */
3789 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3790 {
3791 int rd, rn, rm;
3792 int op;
3793 int nregs;
3794 int interleave;
3795 int spacing;
3796 int stride;
3797 int size;
3798 int reg;
3799 int pass;
3800 int load;
3801 int shift;
3802 int n;
3803 TCGv addr;
3804 TCGv tmp;
3805 TCGv tmp2;
3806 TCGv_i64 tmp64;
3807
3808 if (!s->vfp_enabled)
3809 return 1;
3810 VFP_DREG_D(rd, insn);
3811 rn = (insn >> 16) & 0xf;
3812 rm = insn & 0xf;
3813 load = (insn & (1 << 21)) != 0;
3814 addr = new_tmp();
3815 if ((insn & (1 << 23)) == 0) {
3816 /* Load store all elements. */
3817 op = (insn >> 8) & 0xf;
3818 size = (insn >> 6) & 3;
3819 if (op > 10)
3820 return 1;
3821 nregs = neon_ls_element_type[op].nregs;
3822 interleave = neon_ls_element_type[op].interleave;
3823 spacing = neon_ls_element_type[op].spacing;
3824 if (size == 3 && (interleave | spacing) != 1)
3825 return 1;
3826 load_reg_var(s, addr, rn);
3827 stride = (1 << size) * interleave;
3828 for (reg = 0; reg < nregs; reg++) {
3829 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3830 load_reg_var(s, addr, rn);
3831 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3832 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3833 load_reg_var(s, addr, rn);
3834 tcg_gen_addi_i32(addr, addr, 1 << size);
3835 }
3836 if (size == 3) {
3837 if (load) {
3838 tmp64 = gen_ld64(addr, IS_USER(s));
3839 neon_store_reg64(tmp64, rd);
3840 tcg_temp_free_i64(tmp64);
3841 } else {
3842 tmp64 = tcg_temp_new_i64();
3843 neon_load_reg64(tmp64, rd);
3844 gen_st64(tmp64, addr, IS_USER(s));
3845 }
3846 tcg_gen_addi_i32(addr, addr, stride);
3847 } else {
3848 for (pass = 0; pass < 2; pass++) {
3849 if (size == 2) {
3850 if (load) {
3851 tmp = gen_ld32(addr, IS_USER(s));
3852 neon_store_reg(rd, pass, tmp);
3853 } else {
3854 tmp = neon_load_reg(rd, pass);
3855 gen_st32(tmp, addr, IS_USER(s));
3856 }
3857 tcg_gen_addi_i32(addr, addr, stride);
3858 } else if (size == 1) {
3859 if (load) {
3860 tmp = gen_ld16u(addr, IS_USER(s));
3861 tcg_gen_addi_i32(addr, addr, stride);
3862 tmp2 = gen_ld16u(addr, IS_USER(s));
3863 tcg_gen_addi_i32(addr, addr, stride);
3864 tcg_gen_shli_i32(tmp2, tmp2, 16);
3865 tcg_gen_or_i32(tmp, tmp, tmp2);
3866 dead_tmp(tmp2);
3867 neon_store_reg(rd, pass, tmp);
3868 } else {
3869 tmp = neon_load_reg(rd, pass);
3870 tmp2 = new_tmp();
3871 tcg_gen_shri_i32(tmp2, tmp, 16);
3872 gen_st16(tmp, addr, IS_USER(s));
3873 tcg_gen_addi_i32(addr, addr, stride);
3874 gen_st16(tmp2, addr, IS_USER(s));
3875 tcg_gen_addi_i32(addr, addr, stride);
3876 }
3877 } else /* size == 0 */ {
3878 if (load) {
3879 TCGV_UNUSED(tmp2);
3880 for (n = 0; n < 4; n++) {
3881 tmp = gen_ld8u(addr, IS_USER(s));
3882 tcg_gen_addi_i32(addr, addr, stride);
3883 if (n == 0) {
3884 tmp2 = tmp;
3885 } else {
3886 tcg_gen_shli_i32(tmp, tmp, n * 8);
3887 tcg_gen_or_i32(tmp2, tmp2, tmp);
3888 dead_tmp(tmp);
3889 }
3890 }
3891 neon_store_reg(rd, pass, tmp2);
3892 } else {
3893 tmp2 = neon_load_reg(rd, pass);
3894 for (n = 0; n < 4; n++) {
3895 tmp = new_tmp();
3896 if (n == 0) {
3897 tcg_gen_mov_i32(tmp, tmp2);
3898 } else {
3899 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3900 }
3901 gen_st8(tmp, addr, IS_USER(s));
3902 tcg_gen_addi_i32(addr, addr, stride);
3903 }
3904 dead_tmp(tmp2);
3905 }
3906 }
3907 }
3908 }
3909 rd += spacing;
3910 }
3911 stride = nregs * 8;
3912 } else {
3913 size = (insn >> 10) & 3;
3914 if (size == 3) {
3915 /* Load single element to all lanes. */
3916 if (!load)
3917 return 1;
3918 size = (insn >> 6) & 3;
3919 nregs = ((insn >> 8) & 3) + 1;
3920 stride = (insn & (1 << 5)) ? 2 : 1;
3921 load_reg_var(s, addr, rn);
3922 for (reg = 0; reg < nregs; reg++) {
3923 switch (size) {
3924 case 0:
3925 tmp = gen_ld8u(addr, IS_USER(s));
3926 gen_neon_dup_u8(tmp, 0);
3927 break;
3928 case 1:
3929 tmp = gen_ld16u(addr, IS_USER(s));
3930 gen_neon_dup_low16(tmp);
3931 break;
3932 case 2:
3933 tmp = gen_ld32(addr, IS_USER(s));
3934 break;
3935 case 3:
3936 return 1;
3937 default: /* Avoid compiler warnings. */
3938 abort();
3939 }
3940 tcg_gen_addi_i32(addr, addr, 1 << size);
3941 tmp2 = new_tmp();
3942 tcg_gen_mov_i32(tmp2, tmp);
3943 neon_store_reg(rd, 0, tmp2);
3944 neon_store_reg(rd, 1, tmp);
3945 rd += stride;
3946 }
3947 stride = (1 << size) * nregs;
3948 } else {
3949 /* Single element. */
3950 pass = (insn >> 7) & 1;
3951 switch (size) {
3952 case 0:
3953 shift = ((insn >> 5) & 3) * 8;
3954 stride = 1;
3955 break;
3956 case 1:
3957 shift = ((insn >> 6) & 1) * 16;
3958 stride = (insn & (1 << 5)) ? 2 : 1;
3959 break;
3960 case 2:
3961 shift = 0;
3962 stride = (insn & (1 << 6)) ? 2 : 1;
3963 break;
3964 default:
3965 abort();
3966 }
3967 nregs = ((insn >> 8) & 3) + 1;
3968 load_reg_var(s, addr, rn);
3969 for (reg = 0; reg < nregs; reg++) {
3970 if (load) {
3971 switch (size) {
3972 case 0:
3973 tmp = gen_ld8u(addr, IS_USER(s));
3974 break;
3975 case 1:
3976 tmp = gen_ld16u(addr, IS_USER(s));
3977 break;
3978 case 2:
3979 tmp = gen_ld32(addr, IS_USER(s));
3980 break;
3981 default: /* Avoid compiler warnings. */
3982 abort();
3983 }
3984 if (size != 2) {
3985 tmp2 = neon_load_reg(rd, pass);
3986 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3987 dead_tmp(tmp2);
3988 }
3989 neon_store_reg(rd, pass, tmp);
3990 } else { /* Store */
3991 tmp = neon_load_reg(rd, pass);
3992 if (shift)
3993 tcg_gen_shri_i32(tmp, tmp, shift);
3994 switch (size) {
3995 case 0:
3996 gen_st8(tmp, addr, IS_USER(s));
3997 break;
3998 case 1:
3999 gen_st16(tmp, addr, IS_USER(s));
4000 break;
4001 case 2:
4002 gen_st32(tmp, addr, IS_USER(s));
4003 break;
4004 }
4005 }
4006 rd += stride;
4007 tcg_gen_addi_i32(addr, addr, 1 << size);
4008 }
4009 stride = nregs * (1 << size);
4010 }
4011 }
4012 dead_tmp(addr);
4013 if (rm != 15) {
4014 TCGv base;
4015
4016 base = load_reg(s, rn);
4017 if (rm == 13) {
4018 tcg_gen_addi_i32(base, base, stride);
4019 } else {
4020 TCGv index;
4021 index = load_reg(s, rm);
4022 tcg_gen_add_i32(base, base, index);
4023 dead_tmp(index);
4024 }
4025 store_reg(s, rn, base);
4026 }
4027 return 0;
4028 }
4029
4030 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4031 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4032 {
4033 tcg_gen_and_i32(t, t, c);
4034 tcg_gen_andc_i32(f, f, c);
4035 tcg_gen_or_i32(dest, t, f);
4036 }
4037
4038 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4039 {
4040 switch (size) {
4041 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4042 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4043 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4044 default: abort();
4045 }
4046 }
4047
4048 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4049 {
4050 switch (size) {
4051 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4052 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4053 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4054 default: abort();
4055 }
4056 }
4057
4058 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4059 {
4060 switch (size) {
4061 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4062 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4063 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4064 default: abort();
4065 }
4066 }
4067
4068 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4069 {
4070 switch (size) {
4071 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4072 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4073 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4074 default: abort();
4075 }
4076 }
4077
4078 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4079 int q, int u)
4080 {
4081 if (q) {
4082 if (u) {
4083 switch (size) {
4084 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4085 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4086 default: abort();
4087 }
4088 } else {
4089 switch (size) {
4090 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4091 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4092 default: abort();
4093 }
4094 }
4095 } else {
4096 if (u) {
4097 switch (size) {
4098 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4099 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4100 default: abort();
4101 }
4102 } else {
4103 switch (size) {
4104 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4105 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4106 default: abort();
4107 }
4108 }
4109 }
4110 }
4111
4112 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4113 {
4114 if (u) {
4115 switch (size) {
4116 case 0: gen_helper_neon_widen_u8(dest, src); break;
4117 case 1: gen_helper_neon_widen_u16(dest, src); break;
4118 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4119 default: abort();
4120 }
4121 } else {
4122 switch (size) {
4123 case 0: gen_helper_neon_widen_s8(dest, src); break;
4124 case 1: gen_helper_neon_widen_s16(dest, src); break;
4125 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4126 default: abort();
4127 }
4128 }
4129 dead_tmp(src);
4130 }
4131
4132 static inline void gen_neon_addl(int size)
4133 {
4134 switch (size) {
4135 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4136 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4137 case 2: tcg_gen_add_i64(CPU_V001); break;
4138 default: abort();
4139 }
4140 }
4141
4142 static inline void gen_neon_subl(int size)
4143 {
4144 switch (size) {
4145 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4146 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4147 case 2: tcg_gen_sub_i64(CPU_V001); break;
4148 default: abort();
4149 }
4150 }
4151
4152 static inline void gen_neon_negl(TCGv_i64 var, int size)
4153 {
4154 switch (size) {
4155 case 0: gen_helper_neon_negl_u16(var, var); break;
4156 case 1: gen_helper_neon_negl_u32(var, var); break;
4157 case 2: gen_helper_neon_negl_u64(var, var); break;
4158 default: abort();
4159 }
4160 }
4161
4162 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4163 {
4164 switch (size) {
4165 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4166 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4167 default: abort();
4168 }
4169 }
4170
4171 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4172 {
4173 TCGv_i64 tmp;
4174
4175 switch ((size << 1) | u) {
4176 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4177 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4178 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4179 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4180 case 4:
4181 tmp = gen_muls_i64_i32(a, b);
4182 tcg_gen_mov_i64(dest, tmp);
4183 break;
4184 case 5:
4185 tmp = gen_mulu_i64_i32(a, b);
4186 tcg_gen_mov_i64(dest, tmp);
4187 break;
4188 default: abort();
4189 }
4190
4191 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4192 Don't forget to clean them now. */
4193 if (size < 2) {
4194 dead_tmp(a);
4195 dead_tmp(b);
4196 }
4197 }
4198
4199 /* Translate a NEON data processing instruction. Return nonzero if the
4200 instruction is invalid.
4201 We process data in a mixture of 32-bit and 64-bit chunks.
4202 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4203
4204 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4205 {
4206 int op;
4207 int q;
4208 int rd, rn, rm;
4209 int size;
4210 int shift;
4211 int pass;
4212 int count;
4213 int pairwise;
4214 int u;
4215 int n;
4216 uint32_t imm, mask;
4217 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4218 TCGv_i64 tmp64;
4219
4220 if (!s->vfp_enabled)
4221 return 1;
4222 q = (insn & (1 << 6)) != 0;
4223 u = (insn >> 24) & 1;
4224 VFP_DREG_D(rd, insn);
4225 VFP_DREG_N(rn, insn);
4226 VFP_DREG_M(rm, insn);
4227 size = (insn >> 20) & 3;
4228 if ((insn & (1 << 23)) == 0) {
4229 /* Three register same length. */
4230 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4231 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4232 || op == 10 || op == 11 || op == 16)) {
4233 /* 64-bit element instructions. */
4234 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4235 neon_load_reg64(cpu_V0, rn + pass);
4236 neon_load_reg64(cpu_V1, rm + pass);
4237 switch (op) {
4238 case 1: /* VQADD */
4239 if (u) {
4240 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4241 cpu_V0, cpu_V1);
4242 } else {
4243 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4244 cpu_V0, cpu_V1);
4245 }
4246 break;
4247 case 5: /* VQSUB */
4248 if (u) {
4249 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4250 cpu_V0, cpu_V1);
4251 } else {
4252 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4253 cpu_V0, cpu_V1);
4254 }
4255 break;
4256 case 8: /* VSHL */
4257 if (u) {
4258 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4259 } else {
4260 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4261 }
4262 break;
4263 case 9: /* VQSHL */
4264 if (u) {
4265 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4266 cpu_V1, cpu_V0);
4267 } else {
4268 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4269 cpu_V1, cpu_V0);
4270 }
4271 break;
4272 case 10: /* VRSHL */
4273 if (u) {
4274 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4275 } else {
4276 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4277 }
4278 break;
4279 case 11: /* VQRSHL */
4280 if (u) {
4281 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4282 cpu_V1, cpu_V0);
4283 } else {
4284 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4285 cpu_V1, cpu_V0);
4286 }
4287 break;
4288 case 16:
4289 if (u) {
4290 tcg_gen_sub_i64(CPU_V001);
4291 } else {
4292 tcg_gen_add_i64(CPU_V001);
4293 }
4294 break;
4295 default:
4296 abort();
4297 }
4298 neon_store_reg64(cpu_V0, rd + pass);
4299 }
4300 return 0;
4301 }
4302 switch (op) {
4303 case 8: /* VSHL */
4304 case 9: /* VQSHL */
4305 case 10: /* VRSHL */
4306 case 11: /* VQRSHL */
4307 {
4308 int rtmp;
4309 /* Shift instruction operands are reversed. */
4310 rtmp = rn;
4311 rn = rm;
4312 rm = rtmp;
4313 pairwise = 0;
4314 }
4315 break;
4316 case 20: /* VPMAX */
4317 case 21: /* VPMIN */
4318 case 23: /* VPADD */
4319 pairwise = 1;
4320 break;
4321 case 26: /* VPADD (float) */
4322 pairwise = (u && size < 2);
4323 break;
4324 case 30: /* VPMIN/VPMAX (float) */
4325 pairwise = u;
4326 break;
4327 default:
4328 pairwise = 0;
4329 break;
4330 }
4331
4332 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4333
4334 if (pairwise) {
4335 /* Pairwise. */
4336 if (q)
4337 n = (pass & 1) * 2;
4338 else
4339 n = 0;
4340 if (pass < q + 1) {
4341 tmp = neon_load_reg(rn, n);
4342 tmp2 = neon_load_reg(rn, n + 1);
4343 } else {
4344 tmp = neon_load_reg(rm, n);
4345 tmp2 = neon_load_reg(rm, n + 1);
4346 }
4347 } else {
4348 /* Elementwise. */
4349 tmp = neon_load_reg(rn, pass);
4350 tmp2 = neon_load_reg(rm, pass);
4351 }
4352 switch (op) {
4353 case 0: /* VHADD */
4354 GEN_NEON_INTEGER_OP(hadd);
4355 break;
4356 case 1: /* VQADD */
4357 GEN_NEON_INTEGER_OP_ENV(qadd);
4358 break;
4359 case 2: /* VRHADD */
4360 GEN_NEON_INTEGER_OP(rhadd);
4361 break;
4362 case 3: /* Logic ops. */
4363 switch ((u << 2) | size) {
4364 case 0: /* VAND */
4365 tcg_gen_and_i32(tmp, tmp, tmp2);
4366 break;
4367 case 1: /* BIC */
4368 tcg_gen_andc_i32(tmp, tmp, tmp2);
4369 break;
4370 case 2: /* VORR */
4371 tcg_gen_or_i32(tmp, tmp, tmp2);
4372 break;
4373 case 3: /* VORN */
4374 tcg_gen_orc_i32(tmp, tmp, tmp2);
4375 break;
4376 case 4: /* VEOR */
4377 tcg_gen_xor_i32(tmp, tmp, tmp2);
4378 break;
4379 case 5: /* VBSL */
4380 tmp3 = neon_load_reg(rd, pass);
4381 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4382 dead_tmp(tmp3);
4383 break;
4384 case 6: /* VBIT */
4385 tmp3 = neon_load_reg(rd, pass);
4386 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4387 dead_tmp(tmp3);
4388 break;
4389 case 7: /* VBIF */
4390 tmp3 = neon_load_reg(rd, pass);
4391 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4392 dead_tmp(tmp3);
4393 break;
4394 }
4395 break;
4396 case 4: /* VHSUB */
4397 GEN_NEON_INTEGER_OP(hsub);
4398 break;
4399 case 5: /* VQSUB */
4400 GEN_NEON_INTEGER_OP_ENV(qsub);
4401 break;
4402 case 6: /* VCGT */
4403 GEN_NEON_INTEGER_OP(cgt);
4404 break;
4405 case 7: /* VCGE */
4406 GEN_NEON_INTEGER_OP(cge);
4407 break;
4408 case 8: /* VSHL */
4409 GEN_NEON_INTEGER_OP(shl);
4410 break;
4411 case 9: /* VQSHL */
4412 GEN_NEON_INTEGER_OP_ENV(qshl);
4413 break;
4414 case 10: /* VRSHL */
4415 GEN_NEON_INTEGER_OP(rshl);
4416 break;
4417 case 11: /* VQRSHL */
4418 GEN_NEON_INTEGER_OP_ENV(qrshl);
4419 break;
4420 case 12: /* VMAX */
4421 GEN_NEON_INTEGER_OP(max);
4422 break;
4423 case 13: /* VMIN */
4424 GEN_NEON_INTEGER_OP(min);
4425 break;
4426 case 14: /* VABD */
4427 GEN_NEON_INTEGER_OP(abd);
4428 break;
4429 case 15: /* VABA */
4430 GEN_NEON_INTEGER_OP(abd);
4431 dead_tmp(tmp2);
4432 tmp2 = neon_load_reg(rd, pass);
4433 gen_neon_add(size, tmp, tmp2);
4434 break;
4435 case 16:
4436 if (!u) { /* VADD */
4437 if (gen_neon_add(size, tmp, tmp2))
4438 return 1;
4439 } else { /* VSUB */
4440 switch (size) {
4441 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4442 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4443 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4444 default: return 1;
4445 }
4446 }
4447 break;
4448 case 17:
4449 if (!u) { /* VTST */
4450 switch (size) {
4451 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4452 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4453 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4454 default: return 1;
4455 }
4456 } else { /* VCEQ */
4457 switch (size) {
4458 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4459 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4460 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4461 default: return 1;
4462 }
4463 }
4464 break;
4465 case 18: /* Multiply. */
4466 switch (size) {
4467 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4468 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4469 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4470 default: return 1;
4471 }
4472 dead_tmp(tmp2);
4473 tmp2 = neon_load_reg(rd, pass);
4474 if (u) { /* VMLS */
4475 gen_neon_rsb(size, tmp, tmp2);
4476 } else { /* VMLA */
4477 gen_neon_add(size, tmp, tmp2);
4478 }
4479 break;
4480 case 19: /* VMUL */
4481 if (u) { /* polynomial */
4482 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4483 } else { /* Integer */
4484 switch (size) {
4485 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4486 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4487 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4488 default: return 1;
4489 }
4490 }
4491 break;
4492 case 20: /* VPMAX */
4493 GEN_NEON_INTEGER_OP(pmax);
4494 break;
4495 case 21: /* VPMIN */
4496 GEN_NEON_INTEGER_OP(pmin);
4497 break;
4498 case 22: /* Hultiply high. */
4499 if (!u) { /* VQDMULH */
4500 switch (size) {
4501 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4502 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4503 default: return 1;
4504 }
4505 } else { /* VQRDHMUL */
4506 switch (size) {
4507 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4508 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4509 default: return 1;
4510 }
4511 }
4512 break;
4513 case 23: /* VPADD */
4514 if (u)
4515 return 1;
4516 switch (size) {
4517 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4518 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4519 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4520 default: return 1;
4521 }
4522 break;
4523 case 26: /* Floating point arithnetic. */
4524 switch ((u << 2) | size) {
4525 case 0: /* VADD */
4526 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4527 break;
4528 case 2: /* VSUB */
4529 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4530 break;
4531 case 4: /* VPADD */
4532 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4533 break;
4534 case 6: /* VABD */
4535 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4536 break;
4537 default:
4538 return 1;
4539 }
4540 break;
4541 case 27: /* Float multiply. */
4542 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4543 if (!u) {
4544 dead_tmp(tmp2);
4545 tmp2 = neon_load_reg(rd, pass);
4546 if (size == 0) {
4547 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4548 } else {
4549 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4550 }
4551 }
4552 break;
4553 case 28: /* Float compare. */
4554 if (!u) {
4555 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4556 } else {
4557 if (size == 0)
4558 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4559 else
4560 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4561 }
4562 break;
4563 case 29: /* Float compare absolute. */
4564 if (!u)
4565 return 1;
4566 if (size == 0)
4567 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4568 else
4569 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4570 break;
4571 case 30: /* Float min/max. */
4572 if (size == 0)
4573 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4574 else
4575 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4576 break;
4577 case 31:
4578 if (size == 0)
4579 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4580 else
4581 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4582 break;
4583 default:
4584 abort();
4585 }
4586 dead_tmp(tmp2);
4587
4588 /* Save the result. For elementwise operations we can put it
4589 straight into the destination register. For pairwise operations
4590 we have to be careful to avoid clobbering the source operands. */
4591 if (pairwise && rd == rm) {
4592 neon_store_scratch(pass, tmp);
4593 } else {
4594 neon_store_reg(rd, pass, tmp);
4595 }
4596
4597 } /* for pass */
4598 if (pairwise && rd == rm) {
4599 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4600 tmp = neon_load_scratch(pass);
4601 neon_store_reg(rd, pass, tmp);
4602 }
4603 }
4604 /* End of 3 register same size operations. */
4605 } else if (insn & (1 << 4)) {
4606 if ((insn & 0x00380080) != 0) {
4607 /* Two registers and shift. */
4608 op = (insn >> 8) & 0xf;
4609 if (insn & (1 << 7)) {
4610 /* 64-bit shift. */
4611 size = 3;
4612 } else {
4613 size = 2;
4614 while ((insn & (1 << (size + 19))) == 0)
4615 size--;
4616 }
4617 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4618 /* To avoid excessive dumplication of ops we implement shift
4619 by immediate using the variable shift operations. */
4620 if (op < 8) {
4621 /* Shift by immediate:
4622 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4623 /* Right shifts are encoded as N - shift, where N is the
4624 element size in bits. */
4625 if (op <= 4)
4626 shift = shift - (1 << (size + 3));
4627 if (size == 3) {
4628 count = q + 1;
4629 } else {
4630 count = q ? 4: 2;
4631 }
4632 switch (size) {
4633 case 0:
4634 imm = (uint8_t) shift;
4635 imm |= imm << 8;
4636 imm |= imm << 16;
4637 break;
4638 case 1:
4639 imm = (uint16_t) shift;
4640 imm |= imm << 16;
4641 break;
4642 case 2:
4643 case 3:
4644 imm = shift;
4645 break;
4646 default:
4647 abort();
4648 }
4649
4650 for (pass = 0; pass < count; pass++) {
4651 if (size == 3) {
4652 neon_load_reg64(cpu_V0, rm + pass);
4653 tcg_gen_movi_i64(cpu_V1, imm);
4654 switch (op) {
4655 case 0: /* VSHR */
4656 case 1: /* VSRA */
4657 if (u)
4658 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4659 else
4660 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4661 break;
4662 case 2: /* VRSHR */
4663 case 3: /* VRSRA */
4664 if (u)
4665 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4666 else
4667 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4668 break;
4669 case 4: /* VSRI */
4670 if (!u)
4671 return 1;
4672 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4673 break;
4674 case 5: /* VSHL, VSLI */
4675 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4676 break;
4677 case 6: /* VQSHLU */
4678 if (u) {
4679 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4680 cpu_V0, cpu_V1);
4681 } else {
4682 return 1;
4683 }
4684 break;
4685 case 7: /* VQSHL */
4686 if (u) {
4687 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4688 cpu_V0, cpu_V1);
4689 } else {
4690 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4691 cpu_V0, cpu_V1);
4692 }
4693 break;
4694 }
4695 if (op == 1 || op == 3) {
4696 /* Accumulate. */
4697 neon_load_reg64(cpu_V1, rd + pass);
4698 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4699 } else if (op == 4 || (op == 5 && u)) {
4700 /* Insert */
4701 cpu_abort(env, "VS[LR]I.64 not implemented");
4702 }
4703 neon_store_reg64(cpu_V0, rd + pass);
4704 } else { /* size < 3 */
4705 /* Operands in T0 and T1. */
4706 tmp = neon_load_reg(rm, pass);
4707 tmp2 = new_tmp();
4708 tcg_gen_movi_i32(tmp2, imm);
4709 switch (op) {
4710 case 0: /* VSHR */
4711 case 1: /* VSRA */
4712 GEN_NEON_INTEGER_OP(shl);
4713 break;
4714 case 2: /* VRSHR */
4715 case 3: /* VRSRA */
4716 GEN_NEON_INTEGER_OP(rshl);
4717 break;
4718 case 4: /* VSRI */
4719 if (!u)
4720 return 1;
4721 GEN_NEON_INTEGER_OP(shl);
4722 break;
4723 case 5: /* VSHL, VSLI */
4724 switch (size) {
4725 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4726 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4727 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4728 default: return 1;
4729 }
4730 break;
4731 case 6: /* VQSHLU */
4732 if (!u) {
4733 return 1;
4734 }
4735 switch (size) {
4736 case 0:
4737 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4738 tmp, tmp2);
4739 break;
4740 case 1:
4741 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4742 tmp, tmp2);
4743 break;
4744 case 2:
4745 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4746 tmp, tmp2);
4747 break;
4748 default:
4749 return 1;
4750 }
4751 break;
4752 case 7: /* VQSHL */
4753 GEN_NEON_INTEGER_OP_ENV(qshl);
4754 break;
4755 }
4756 dead_tmp(tmp2);
4757
4758 if (op == 1 || op == 3) {
4759 /* Accumulate. */
4760 tmp2 = neon_load_reg(rd, pass);
4761 gen_neon_add(size, tmp, tmp2);
4762 dead_tmp(tmp2);
4763 } else if (op == 4 || (op == 5 && u)) {
4764 /* Insert */
4765 switch (size) {
4766 case 0:
4767 if (op == 4)
4768 mask = 0xff >> -shift;
4769 else
4770 mask = (uint8_t)(0xff << shift);
4771 mask |= mask << 8;
4772 mask |= mask << 16;
4773 break;
4774 case 1:
4775 if (op == 4)
4776 mask = 0xffff >> -shift;
4777 else
4778 mask = (uint16_t)(0xffff << shift);
4779 mask |= mask << 16;
4780 break;
4781 case 2:
4782 if (shift < -31 || shift > 31) {
4783 mask = 0;
4784 } else {
4785 if (op == 4)
4786 mask = 0xffffffffu >> -shift;
4787 else
4788 mask = 0xffffffffu << shift;
4789 }
4790 break;
4791 default:
4792 abort();
4793 }
4794 tmp2 = neon_load_reg(rd, pass);
4795 tcg_gen_andi_i32(tmp, tmp, mask);
4796 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4797 tcg_gen_or_i32(tmp, tmp, tmp2);
4798 dead_tmp(tmp2);
4799 }
4800 neon_store_reg(rd, pass, tmp);
4801 }
4802 } /* for pass */
4803 } else if (op < 10) {
4804 /* Shift by immediate and narrow:
4805 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4806 shift = shift - (1 << (size + 3));
4807 size++;
4808 switch (size) {
4809 case 1:
4810 imm = (uint16_t)shift;
4811 imm |= imm << 16;
4812 tmp2 = tcg_const_i32(imm);
4813 TCGV_UNUSED_I64(tmp64);
4814 break;
4815 case 2:
4816 imm = (uint32_t)shift;
4817 tmp2 = tcg_const_i32(imm);
4818 TCGV_UNUSED_I64(tmp64);
4819 break;
4820 case 3:
4821 tmp64 = tcg_const_i64(shift);
4822 TCGV_UNUSED(tmp2);
4823 break;
4824 default:
4825 abort();
4826 }
4827
4828 for (pass = 0; pass < 2; pass++) {
4829 if (size == 3) {
4830 neon_load_reg64(cpu_V0, rm + pass);
4831 if (q) {
4832 if (u)
4833 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4834 else
4835 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4836 } else {
4837 if (u)
4838 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4839 else
4840 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4841 }
4842 } else {
4843 tmp = neon_load_reg(rm + pass, 0);
4844 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4845 tmp3 = neon_load_reg(rm + pass, 1);
4846 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4847 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4848 dead_tmp(tmp);
4849 dead_tmp(tmp3);
4850 }
4851 tmp = new_tmp();
4852 if (op == 8 && !u) {
4853 gen_neon_narrow(size - 1, tmp, cpu_V0);
4854 } else {
4855 if (op == 8)
4856 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4857 else
4858 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4859 }
4860 neon_store_reg(rd, pass, tmp);
4861 } /* for pass */
4862 if (size == 3) {
4863 tcg_temp_free_i64(tmp64);
4864 } else {
4865 tcg_temp_free_i32(tmp2);
4866 }
4867 } else if (op == 10) {
4868 /* VSHLL */
4869 if (q || size == 3)
4870 return 1;
4871 tmp = neon_load_reg(rm, 0);
4872 tmp2 = neon_load_reg(rm, 1);
4873 for (pass = 0; pass < 2; pass++) {
4874 if (pass == 1)
4875 tmp = tmp2;
4876
4877 gen_neon_widen(cpu_V0, tmp, size, u);
4878
4879 if (shift != 0) {
4880 /* The shift is less than the width of the source
4881 type, so we can just shift the whole register. */
4882 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4883 /* Widen the result of shift: we need to clear
4884 * the potential overflow bits resulting from
4885 * left bits of the narrow input appearing as
4886 * right bits of left the neighbour narrow
4887 * input. */
4888 if (size < 2 || !u) {
4889 uint64_t imm64;
4890 if (size == 0) {
4891 imm = (0xffu >> (8 - shift));
4892 imm |= imm << 16;
4893 } else if (size == 1) {
4894 imm = 0xffff >> (16 - shift);
4895 } else {
4896 /* size == 2 */
4897 imm = 0xffffffff >> (32 - shift);
4898 }
4899 if (size < 2) {
4900 imm64 = imm | (((uint64_t)imm) << 32);
4901 } else {
4902 imm64 = imm;
4903 }
4904 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4905 }
4906 }
4907 neon_store_reg64(cpu_V0, rd + pass);
4908 }
4909 } else if (op >= 14) {
4910 /* VCVT fixed-point. */
4911 /* We have already masked out the must-be-1 top bit of imm6,
4912 * hence this 32-shift where the ARM ARM has 64-imm6.
4913 */
4914 shift = 32 - shift;
4915 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4916 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4917 if (!(op & 1)) {
4918 if (u)
4919 gen_vfp_ulto(0, shift);
4920 else
4921 gen_vfp_slto(0, shift);
4922 } else {
4923 if (u)
4924 gen_vfp_toul(0, shift);
4925 else
4926 gen_vfp_tosl(0, shift);
4927 }
4928 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4929 }
4930 } else {
4931 return 1;
4932 }
4933 } else { /* (insn & 0x00380080) == 0 */
4934 int invert;
4935
4936 op = (insn >> 8) & 0xf;
4937 /* One register and immediate. */
4938 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4939 invert = (insn & (1 << 5)) != 0;
4940 switch (op) {
4941 case 0: case 1:
4942 /* no-op */
4943 break;
4944 case 2: case 3:
4945 imm <<= 8;
4946 break;
4947 case 4: case 5:
4948 imm <<= 16;
4949 break;
4950 case 6: case 7:
4951 imm <<= 24;
4952 break;
4953 case 8: case 9:
4954 imm |= imm << 16;
4955 break;
4956 case 10: case 11:
4957 imm = (imm << 8) | (imm << 24);
4958 break;
4959 case 12:
4960 imm = (imm << 8) | 0xff;
4961 break;
4962 case 13:
4963 imm = (imm << 16) | 0xffff;
4964 break;
4965 case 14:
4966 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4967 if (invert)
4968 imm = ~imm;
4969 break;
4970 case 15:
4971 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4972 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4973 break;
4974 }
4975 if (invert)
4976 imm = ~imm;
4977
4978 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4979 if (op & 1 && op < 12) {
4980 tmp = neon_load_reg(rd, pass);
4981 if (invert) {
4982 /* The immediate value has already been inverted, so
4983 BIC becomes AND. */
4984 tcg_gen_andi_i32(tmp, tmp, imm);
4985 } else {
4986 tcg_gen_ori_i32(tmp, tmp, imm);
4987 }
4988 } else {
4989 /* VMOV, VMVN. */
4990 tmp = new_tmp();
4991 if (op == 14 && invert) {
4992 uint32_t val;
4993 val = 0;
4994 for (n = 0; n < 4; n++) {
4995 if (imm & (1 << (n + (pass & 1) * 4)))
4996 val |= 0xff << (n * 8);
4997 }
4998 tcg_gen_movi_i32(tmp, val);
4999 } else {
5000 tcg_gen_movi_i32(tmp, imm);
5001 }
5002 }
5003 neon_store_reg(rd, pass, tmp);
5004 }
5005 }
5006 } else { /* (insn & 0x00800010 == 0x00800000) */
5007 if (size != 3) {
5008 op = (insn >> 8) & 0xf;
5009 if ((insn & (1 << 6)) == 0) {
5010 /* Three registers of different lengths. */
5011 int src1_wide;
5012 int src2_wide;
5013 int prewiden;
5014 /* prewiden, src1_wide, src2_wide */
5015 static const int neon_3reg_wide[16][3] = {
5016 {1, 0, 0}, /* VADDL */
5017 {1, 1, 0}, /* VADDW */
5018 {1, 0, 0}, /* VSUBL */
5019 {1, 1, 0}, /* VSUBW */
5020 {0, 1, 1}, /* VADDHN */
5021 {0, 0, 0}, /* VABAL */
5022 {0, 1, 1}, /* VSUBHN */
5023 {0, 0, 0}, /* VABDL */
5024 {0, 0, 0}, /* VMLAL */
5025 {0, 0, 0}, /* VQDMLAL */
5026 {0, 0, 0}, /* VMLSL */
5027 {0, 0, 0}, /* VQDMLSL */
5028 {0, 0, 0}, /* Integer VMULL */
5029 {0, 0, 0}, /* VQDMULL */
5030 {0, 0, 0} /* Polynomial VMULL */
5031 };
5032
5033 prewiden = neon_3reg_wide[op][0];
5034 src1_wide = neon_3reg_wide[op][1];
5035 src2_wide = neon_3reg_wide[op][2];
5036
5037 if (size == 0 && (op == 9 || op == 11 || op == 13))
5038 return 1;
5039
5040 /* Avoid overlapping operands. Wide source operands are
5041 always aligned so will never overlap with wide
5042 destinations in problematic ways. */
5043 if (rd == rm && !src2_wide) {
5044 tmp = neon_load_reg(rm, 1);
5045 neon_store_scratch(2, tmp);
5046 } else if (rd == rn && !src1_wide) {
5047 tmp = neon_load_reg(rn, 1);
5048 neon_store_scratch(2, tmp);
5049 }
5050 TCGV_UNUSED(tmp3);
5051 for (pass = 0; pass < 2; pass++) {
5052 if (src1_wide) {
5053 neon_load_reg64(cpu_V0, rn + pass);
5054 TCGV_UNUSED(tmp);
5055 } else {
5056 if (pass == 1 && rd == rn) {
5057 tmp = neon_load_scratch(2);
5058 } else {
5059 tmp = neon_load_reg(rn, pass);
5060 }
5061 if (prewiden) {
5062 gen_neon_widen(cpu_V0, tmp, size, u);
5063 }
5064 }
5065 if (src2_wide) {
5066 neon_load_reg64(cpu_V1, rm + pass);
5067 TCGV_UNUSED(tmp2);
5068 } else {
5069 if (pass == 1 && rd == rm) {
5070 tmp2 = neon_load_scratch(2);
5071 } else {
5072 tmp2 = neon_load_reg(rm, pass);
5073 }
5074 if (prewiden) {
5075 gen_neon_widen(cpu_V1, tmp2, size, u);
5076 }
5077 }
5078 switch (op) {
5079 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5080 gen_neon_addl(size);
5081 break;
5082 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5083 gen_neon_subl(size);
5084 break;
5085 case 5: case 7: /* VABAL, VABDL */
5086 switch ((size << 1) | u) {
5087 case 0:
5088 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5089 break;
5090 case 1:
5091 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5092 break;
5093 case 2:
5094 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5095 break;
5096 case 3:
5097 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5098 break;
5099 case 4:
5100 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5101 break;
5102 case 5:
5103 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5104 break;
5105 default: abort();
5106 }
5107 dead_tmp(tmp2);
5108 dead_tmp(tmp);
5109 break;
5110 case 8: case 9: case 10: case 11: case 12: case 13:
5111 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5112 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5113 break;
5114 case 14: /* Polynomial VMULL */
5115 cpu_abort(env, "Polynomial VMULL not implemented");
5116
5117 default: /* 15 is RESERVED. */
5118 return 1;
5119 }
5120 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5121 /* Accumulate. */
5122 if (op == 10 || op == 11) {
5123 gen_neon_negl(cpu_V0, size);
5124 }
5125
5126 if (op != 13) {
5127 neon_load_reg64(cpu_V1, rd + pass);
5128 }
5129
5130 switch (op) {
5131 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5132 gen_neon_addl(size);
5133 break;
5134 case 9: case 11: /* VQDMLAL, VQDMLSL */
5135 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5136 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5137 break;
5138 /* Fall through. */
5139 case 13: /* VQDMULL */
5140 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5141 break;
5142 default:
5143 abort();
5144 }
5145 neon_store_reg64(cpu_V0, rd + pass);
5146 } else if (op == 4 || op == 6) {
5147 /* Narrowing operation. */
5148 tmp = new_tmp();
5149 if (!u) {
5150 switch (size) {
5151 case 0:
5152 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5153 break;
5154 case 1:
5155 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5156 break;
5157 case 2:
5158 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5159 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5160 break;
5161 default: abort();
5162 }
5163 } else {
5164 switch (size) {
5165 case 0:
5166 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5167 break;
5168 case 1:
5169 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5170 break;
5171 case 2:
5172 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5173 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5174 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5175 break;
5176 default: abort();
5177 }
5178 }
5179 if (pass == 0) {
5180 tmp3 = tmp;
5181 } else {
5182 neon_store_reg(rd, 0, tmp3);
5183 neon_store_reg(rd, 1, tmp);
5184 }
5185 } else {
5186 /* Write back the result. */
5187 neon_store_reg64(cpu_V0, rd + pass);
5188 }
5189 }
5190 } else {
5191 /* Two registers and a scalar. */
5192 switch (op) {
5193 case 0: /* Integer VMLA scalar */
5194 case 1: /* Float VMLA scalar */
5195 case 4: /* Integer VMLS scalar */
5196 case 5: /* Floating point VMLS scalar */
5197 case 8: /* Integer VMUL scalar */
5198 case 9: /* Floating point VMUL scalar */
5199 case 12: /* VQDMULH scalar */
5200 case 13: /* VQRDMULH scalar */
5201 tmp = neon_get_scalar(size, rm);
5202 neon_store_scratch(0, tmp);
5203 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5204 tmp = neon_load_scratch(0);
5205 tmp2 = neon_load_reg(rn, pass);
5206 if (op == 12) {
5207 if (size == 1) {
5208 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5209 } else {
5210 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5211 }
5212 } else if (op == 13) {
5213 if (size == 1) {
5214 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5215 } else {
5216 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5217 }
5218 } else if (op & 1) {
5219 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5220 } else {
5221 switch (size) {
5222 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5223 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5224 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5225 default: return 1;
5226 }
5227 }
5228 dead_tmp(tmp2);
5229 if (op < 8) {
5230 /* Accumulate. */
5231 tmp2 = neon_load_reg(rd, pass);
5232 switch (op) {
5233 case 0:
5234 gen_neon_add(size, tmp, tmp2);
5235 break;
5236 case 1:
5237 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5238 break;
5239 case 4:
5240 gen_neon_rsb(size, tmp, tmp2);
5241 break;
5242 case 5:
5243 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5244 break;
5245 default:
5246 abort();
5247 }
5248 dead_tmp(tmp2);
5249 }
5250 neon_store_reg(rd, pass, tmp);
5251 }
5252 break;
5253 case 2: /* VMLAL sclar */
5254 case 3: /* VQDMLAL scalar */
5255 case 6: /* VMLSL scalar */
5256 case 7: /* VQDMLSL scalar */
5257 case 10: /* VMULL scalar */
5258 case 11: /* VQDMULL scalar */
5259 if (size == 0 && (op == 3 || op == 7 || op == 11))
5260 return 1;
5261
5262 tmp2 = neon_get_scalar(size, rm);
5263 /* We need a copy of tmp2 because gen_neon_mull
5264 * deletes it during pass 0. */
5265 tmp4 = new_tmp();
5266 tcg_gen_mov_i32(tmp4, tmp2);
5267 tmp3 = neon_load_reg(rn, 1);
5268
5269 for (pass = 0; pass < 2; pass++) {
5270 if (pass == 0) {
5271 tmp = neon_load_reg(rn, 0);
5272 } else {
5273 tmp = tmp3;
5274 tmp2 = tmp4;
5275 }
5276 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5277 if (op == 6 || op == 7) {
5278 gen_neon_negl(cpu_V0, size);
5279 }
5280 if (op != 11) {
5281 neon_load_reg64(cpu_V1, rd + pass);
5282 }
5283 switch (op) {
5284 case 2: case 6:
5285 gen_neon_addl(size);
5286 break;
5287 case 3: case 7:
5288 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5289 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5290 break;
5291 case 10:
5292 /* no-op */
5293 break;
5294 case 11:
5295 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5296 break;
5297 default:
5298 abort();
5299 }
5300 neon_store_reg64(cpu_V0, rd + pass);
5301 }
5302
5303
5304 break;
5305 default: /* 14 and 15 are RESERVED */
5306 return 1;
5307 }
5308 }
5309 } else { /* size == 3 */
5310 if (!u) {
5311 /* Extract. */
5312 imm = (insn >> 8) & 0xf;
5313
5314 if (imm > 7 && !q)
5315 return 1;
5316
5317 if (imm == 0) {
5318 neon_load_reg64(cpu_V0, rn);
5319 if (q) {
5320 neon_load_reg64(cpu_V1, rn + 1);
5321 }
5322 } else if (imm == 8) {
5323 neon_load_reg64(cpu_V0, rn + 1);
5324 if (q) {
5325 neon_load_reg64(cpu_V1, rm);
5326 }
5327 } else if (q) {
5328 tmp64 = tcg_temp_new_i64();
5329 if (imm < 8) {
5330 neon_load_reg64(cpu_V0, rn);
5331 neon_load_reg64(tmp64, rn + 1);
5332 } else {
5333 neon_load_reg64(cpu_V0, rn + 1);
5334 neon_load_reg64(tmp64, rm);
5335 }
5336 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5337 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5338 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5339 if (imm < 8) {
5340 neon_load_reg64(cpu_V1, rm);
5341 } else {
5342 neon_load_reg64(cpu_V1, rm + 1);
5343 imm -= 8;
5344 }
5345 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5346 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5347 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5348 tcg_temp_free_i64(tmp64);
5349 } else {
5350 /* BUGFIX */
5351 neon_load_reg64(cpu_V0, rn);
5352 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5353 neon_load_reg64(cpu_V1, rm);
5354 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5355 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5356 }
5357 neon_store_reg64(cpu_V0, rd);
5358 if (q) {
5359 neon_store_reg64(cpu_V1, rd + 1);
5360 }
5361 } else if ((insn & (1 << 11)) == 0) {
5362 /* Two register misc. */
5363 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5364 size = (insn >> 18) & 3;
5365 switch (op) {
5366 case 0: /* VREV64 */
5367 if (size == 3)
5368 return 1;
5369 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5370 tmp = neon_load_reg(rm, pass * 2);
5371 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5372 switch (size) {
5373 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5374 case 1: gen_swap_half(tmp); break;
5375 case 2: /* no-op */ break;
5376 default: abort();
5377 }
5378 neon_store_reg(rd, pass * 2 + 1, tmp);
5379 if (size == 2) {
5380 neon_store_reg(rd, pass * 2, tmp2);
5381 } else {
5382 switch (size) {
5383 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5384 case 1: gen_swap_half(tmp2); break;
5385 default: abort();
5386 }
5387 neon_store_reg(rd, pass * 2, tmp2);
5388 }
5389 }
5390 break;
5391 case 4: case 5: /* VPADDL */
5392 case 12: case 13: /* VPADAL */
5393 if (size == 3)
5394 return 1;
5395 for (pass = 0; pass < q + 1; pass++) {
5396 tmp = neon_load_reg(rm, pass * 2);
5397 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5398 tmp = neon_load_reg(rm, pass * 2 + 1);
5399 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5400 switch (size) {
5401 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5402 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5403 case 2: tcg_gen_add_i64(CPU_V001); break;
5404 default: abort();
5405 }
5406 if (op >= 12) {
5407 /* Accumulate. */
5408 neon_load_reg64(cpu_V1, rd + pass);
5409 gen_neon_addl(size);
5410 }
5411 neon_store_reg64(cpu_V0, rd + pass);
5412 }
5413 break;
5414 case 33: /* VTRN */
5415 if (size == 2) {
5416 for (n = 0; n < (q ? 4 : 2); n += 2) {
5417 tmp = neon_load_reg(rm, n);
5418 tmp2 = neon_load_reg(rd, n + 1);
5419 neon_store_reg(rm, n, tmp2);
5420 neon_store_reg(rd, n + 1, tmp);
5421 }
5422 } else {
5423 goto elementwise;
5424 }
5425 break;
5426 case 34: /* VUZP */
5427 /* Reg Before After
5428 Rd A3 A2 A1 A0 B2 B0 A2 A0
5429 Rm B3 B2 B1 B0 B3 B1 A3 A1
5430 */
5431 if (size == 3)
5432 return 1;
5433 gen_neon_unzip(rd, q, 0, size);
5434 gen_neon_unzip(rm, q, 4, size);
5435 if (q) {
5436 static int unzip_order_q[8] =
5437 {0, 2, 4, 6, 1, 3, 5, 7};
5438 for (n = 0; n < 8; n++) {
5439 int reg = (n < 4) ? rd : rm;
5440 tmp = neon_load_scratch(unzip_order_q[n]);
5441 neon_store_reg(reg, n % 4, tmp);
5442 }
5443 } else {
5444 static int unzip_order[4] =
5445 {0, 4, 1, 5};
5446 for (n = 0; n < 4; n++) {
5447 int reg = (n < 2) ? rd : rm;
5448 tmp = neon_load_scratch(unzip_order[n]);
5449 neon_store_reg(reg, n % 2, tmp);
5450 }
5451 }
5452 break;
5453 case 35: /* VZIP */
5454 /* Reg Before After
5455 Rd A3 A2 A1 A0 B1 A1 B0 A0
5456 Rm B3 B2 B1 B0 B3 A3 B2 A2
5457 */
5458 if (size == 3)
5459 return 1;
5460 count = (q ? 4 : 2);
5461 for (n = 0; n < count; n++) {
5462 tmp = neon_load_reg(rd, n);
5463 tmp2 = neon_load_reg(rd, n);
5464 switch (size) {
5465 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5466 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5467 case 2: /* no-op */; break;
5468 default: abort();
5469 }
5470 neon_store_scratch(n * 2, tmp);
5471 neon_store_scratch(n * 2 + 1, tmp2);
5472 }
5473 for (n = 0; n < count * 2; n++) {
5474 int reg = (n < count) ? rd : rm;
5475 tmp = neon_load_scratch(n);
5476 neon_store_reg(reg, n % count, tmp);
5477 }
5478 break;
5479 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5480 if (size == 3)
5481 return 1;
5482 TCGV_UNUSED(tmp2);
5483 for (pass = 0; pass < 2; pass++) {
5484 neon_load_reg64(cpu_V0, rm + pass);
5485 tmp = new_tmp();
5486 if (op == 36) {
5487 if (q) { /* VQMOVUN */
5488 gen_neon_unarrow_sats(size, tmp, cpu_V0);
5489 } else { /* VMOVN */
5490 gen_neon_narrow(size, tmp, cpu_V0);
5491 }
5492 } else { /* VQMOVN */
5493 if (q) {
5494 gen_neon_narrow_satu(size, tmp, cpu_V0);
5495 } else {
5496 gen_neon_narrow_sats(size, tmp, cpu_V0);
5497 }
5498 }
5499 if (pass == 0) {
5500 tmp2 = tmp;
5501 } else {
5502 neon_store_reg(rd, 0, tmp2);
5503 neon_store_reg(rd, 1, tmp);
5504 }
5505 }
5506 break;
5507 case 38: /* VSHLL */
5508 if (q || size == 3)
5509 return 1;
5510 tmp = neon_load_reg(rm, 0);
5511 tmp2 = neon_load_reg(rm, 1);
5512 for (pass = 0; pass < 2; pass++) {
5513 if (pass == 1)
5514 tmp = tmp2;
5515 gen_neon_widen(cpu_V0, tmp, size, 1);
5516 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5517 neon_store_reg64(cpu_V0, rd + pass);
5518 }
5519 break;
5520 case 44: /* VCVT.F16.F32 */
5521 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5522 return 1;
5523 tmp = new_tmp();
5524 tmp2 = new_tmp();
5525 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5526 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5527 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5528 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5529 tcg_gen_shli_i32(tmp2, tmp2, 16);
5530 tcg_gen_or_i32(tmp2, tmp2, tmp);
5531 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5532 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5533 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5534 neon_store_reg(rd, 0, tmp2);
5535 tmp2 = new_tmp();
5536 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5537 tcg_gen_shli_i32(tmp2, tmp2, 16);
5538 tcg_gen_or_i32(tmp2, tmp2, tmp);
5539 neon_store_reg(rd, 1, tmp2);
5540 dead_tmp(tmp);
5541 break;
5542 case 46: /* VCVT.F32.F16 */
5543 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5544 return 1;
5545 tmp3 = new_tmp();
5546 tmp = neon_load_reg(rm, 0);
5547 tmp2 = neon_load_reg(rm, 1);
5548 tcg_gen_ext16u_i32(tmp3, tmp);
5549 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5550 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5551 tcg_gen_shri_i32(tmp3, tmp, 16);
5552 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5553 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5554 dead_tmp(tmp);
5555 tcg_gen_ext16u_i32(tmp3, tmp2);
5556 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5557 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5558 tcg_gen_shri_i32(tmp3, tmp2, 16);
5559 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5560 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5561 dead_tmp(tmp2);
5562 dead_tmp(tmp3);
5563 break;
5564 default:
5565 elementwise:
5566 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5567 if (op == 30 || op == 31 || op >= 58) {
5568 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5569 neon_reg_offset(rm, pass));
5570 TCGV_UNUSED(tmp);
5571 } else {
5572 tmp = neon_load_reg(rm, pass);
5573 }
5574 switch (op) {
5575 case 1: /* VREV32 */
5576 switch (size) {
5577 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5578 case 1: gen_swap_half(tmp); break;
5579 default: return 1;
5580 }
5581 break;
5582 case 2: /* VREV16 */
5583 if (size != 0)
5584 return 1;
5585 gen_rev16(tmp);
5586 break;
5587 case 8: /* CLS */
5588 switch (size) {
5589 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5590 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5591 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5592 default: return 1;
5593 }
5594 break;
5595 case 9: /* CLZ */
5596 switch (size) {
5597 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5598 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5599 case 2: gen_helper_clz(tmp, tmp); break;
5600 default: return 1;
5601 }
5602 break;
5603 case 10: /* CNT */
5604 if (size != 0)
5605 return 1;
5606 gen_helper_neon_cnt_u8(tmp, tmp);
5607 break;
5608 case 11: /* VNOT */
5609 if (size != 0)
5610 return 1;
5611 tcg_gen_not_i32(tmp, tmp);
5612 break;
5613 case 14: /* VQABS */
5614 switch (size) {
5615 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5616 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5617 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5618 default: return 1;
5619 }
5620 break;
5621 case 15: /* VQNEG */
5622 switch (size) {
5623 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5624 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5625 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5626 default: return 1;
5627 }
5628 break;
5629 case 16: case 19: /* VCGT #0, VCLE #0 */
5630 tmp2 = tcg_const_i32(0);
5631 switch(size) {
5632 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5633 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5634 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5635 default: return 1;
5636 }
5637 tcg_temp_free(tmp2);
5638 if (op == 19)
5639 tcg_gen_not_i32(tmp, tmp);
5640 break;
5641 case 17: case 20: /* VCGE #0, VCLT #0 */
5642 tmp2 = tcg_const_i32(0);
5643 switch(size) {
5644 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5645 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5646 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5647 default: return 1;
5648 }
5649 tcg_temp_free(tmp2);
5650 if (op == 20)
5651 tcg_gen_not_i32(tmp, tmp);
5652 break;
5653 case 18: /* VCEQ #0 */
5654 tmp2 = tcg_const_i32(0);
5655 switch(size) {
5656 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5657 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5658 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5659 default: return 1;
5660 }
5661 tcg_temp_free(tmp2);
5662 break;
5663 case 22: /* VABS */
5664 switch(size) {
5665 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5666 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5667 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5668 default: return 1;
5669 }
5670 break;
5671 case 23: /* VNEG */
5672 if (size == 3)
5673 return 1;
5674 tmp2 = tcg_const_i32(0);
5675 gen_neon_rsb(size, tmp, tmp2);
5676 tcg_temp_free(tmp2);
5677 break;
5678 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5679 tmp2 = tcg_const_i32(0);
5680 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5681 tcg_temp_free(tmp2);
5682 if (op == 27)
5683 tcg_gen_not_i32(tmp, tmp);
5684 break;
5685 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5686 tmp2 = tcg_const_i32(0);
5687 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5688 tcg_temp_free(tmp2);
5689 if (op == 28)
5690 tcg_gen_not_i32(tmp, tmp);
5691 break;
5692 case 26: /* Float VCEQ #0 */
5693 tmp2 = tcg_const_i32(0);
5694 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5695 tcg_temp_free(tmp2);
5696 break;
5697 case 30: /* Float VABS */
5698 gen_vfp_abs(0);
5699 break;
5700 case 31: /* Float VNEG */
5701 gen_vfp_neg(0);
5702 break;
5703 case 32: /* VSWP */
5704 tmp2 = neon_load_reg(rd, pass);
5705 neon_store_reg(rm, pass, tmp2);
5706 break;
5707 case 33: /* VTRN */
5708 tmp2 = neon_load_reg(rd, pass);
5709 switch (size) {
5710 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5711 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5712 case 2: abort();
5713 default: return 1;
5714 }
5715 neon_store_reg(rm, pass, tmp2);
5716 break;
5717 case 56: /* Integer VRECPE */
5718 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5719 break;
5720 case 57: /* Integer VRSQRTE */
5721 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5722 break;
5723 case 58: /* Float VRECPE */
5724 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5725 break;
5726 case 59: /* Float VRSQRTE */
5727 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5728 break;
5729 case 60: /* VCVT.F32.S32 */
5730 gen_vfp_sito(0);
5731 break;
5732 case 61: /* VCVT.F32.U32 */
5733 gen_vfp_uito(0);
5734 break;
5735 case 62: /* VCVT.S32.F32 */
5736 gen_vfp_tosiz(0);
5737 break;
5738 case 63: /* VCVT.U32.F32 */
5739 gen_vfp_touiz(0);
5740 break;
5741 default:
5742 /* Reserved: 21, 29, 39-56 */
5743 return 1;
5744 }
5745 if (op == 30 || op == 31 || op >= 58) {
5746 tcg_gen_st_f32(cpu_F0s, cpu_env,
5747 neon_reg_offset(rd, pass));
5748 } else {
5749 neon_store_reg(rd, pass, tmp);
5750 }
5751 }
5752 break;
5753 }
5754 } else if ((insn & (1 << 10)) == 0) {
5755 /* VTBL, VTBX. */
5756 n = ((insn >> 5) & 0x18) + 8;
5757 if (insn & (1 << 6)) {
5758 tmp = neon_load_reg(rd, 0);
5759 } else {
5760 tmp = new_tmp();
5761 tcg_gen_movi_i32(tmp, 0);
5762 }
5763 tmp2 = neon_load_reg(rm, 0);
5764 tmp4 = tcg_const_i32(rn);
5765 tmp5 = tcg_const_i32(n);
5766 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5767 dead_tmp(tmp);
5768 if (insn & (1 << 6)) {
5769 tmp = neon_load_reg(rd, 1);
5770 } else {
5771 tmp = new_tmp();
5772 tcg_gen_movi_i32(tmp, 0);
5773 }
5774 tmp3 = neon_load_reg(rm, 1);
5775 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5776 tcg_temp_free_i32(tmp5);
5777 tcg_temp_free_i32(tmp4);
5778 neon_store_reg(rd, 0, tmp2);
5779 neon_store_reg(rd, 1, tmp3);
5780 dead_tmp(tmp);
5781 } else if ((insn & 0x380) == 0) {
5782 /* VDUP */
5783 if (insn & (1 << 19)) {
5784 tmp = neon_load_reg(rm, 1);
5785 } else {
5786 tmp = neon_load_reg(rm, 0);
5787 }
5788 if (insn & (1 << 16)) {
5789 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5790 } else if (insn & (1 << 17)) {
5791 if ((insn >> 18) & 1)
5792 gen_neon_dup_high16(tmp);
5793 else
5794 gen_neon_dup_low16(tmp);
5795 }
5796 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5797 tmp2 = new_tmp();
5798 tcg_gen_mov_i32(tmp2, tmp);
5799 neon_store_reg(rd, pass, tmp2);
5800 }
5801 dead_tmp(tmp);
5802 } else {
5803 return 1;
5804 }
5805 }
5806 }
5807 return 0;
5808 }
5809
5810 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5811 {
5812 int crn = (insn >> 16) & 0xf;
5813 int crm = insn & 0xf;
5814 int op1 = (insn >> 21) & 7;
5815 int op2 = (insn >> 5) & 7;
5816 int rt = (insn >> 12) & 0xf;
5817 TCGv tmp;
5818
5819 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5820 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5821 /* TEECR */
5822 if (IS_USER(s))
5823 return 1;
5824 tmp = load_cpu_field(teecr);
5825 store_reg(s, rt, tmp);
5826 return 0;
5827 }
5828 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5829 /* TEEHBR */
5830 if (IS_USER(s) && (env->teecr & 1))
5831 return 1;
5832 tmp = load_cpu_field(teehbr);
5833 store_reg(s, rt, tmp);
5834 return 0;
5835 }
5836 }
5837 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5838 op1, crn, crm, op2);
5839 return 1;
5840 }
5841
5842 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5843 {
5844 int crn = (insn >> 16) & 0xf;
5845 int crm = insn & 0xf;
5846 int op1 = (insn >> 21) & 7;
5847 int op2 = (insn >> 5) & 7;
5848 int rt = (insn >> 12) & 0xf;
5849 TCGv tmp;
5850
5851 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5852 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5853 /* TEECR */
5854 if (IS_USER(s))
5855 return 1;
5856 tmp = load_reg(s, rt);
5857 gen_helper_set_teecr(cpu_env, tmp);
5858 dead_tmp(tmp);
5859 return 0;
5860 }
5861 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5862 /* TEEHBR */
5863 if (IS_USER(s) && (env->teecr & 1))
5864 return 1;
5865 tmp = load_reg(s, rt);
5866 store_cpu_field(tmp, teehbr);
5867 return 0;
5868 }
5869 }
5870 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5871 op1, crn, crm, op2);
5872 return 1;
5873 }
5874
5875 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5876 {
5877 int cpnum;
5878
5879 cpnum = (insn >> 8) & 0xf;
5880 if (arm_feature(env, ARM_FEATURE_XSCALE)
5881 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5882 return 1;
5883
5884 switch (cpnum) {
5885 case 0:
5886 case 1:
5887 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5888 return disas_iwmmxt_insn(env, s, insn);
5889 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5890 return disas_dsp_insn(env, s, insn);
5891 }
5892 return 1;
5893 case 10:
5894 case 11:
5895 return disas_vfp_insn (env, s, insn);
5896 case 14:
5897 /* Coprocessors 7-15 are architecturally reserved by ARM.
5898 Unfortunately Intel decided to ignore this. */
5899 if (arm_feature(env, ARM_FEATURE_XSCALE))
5900 goto board;
5901 if (insn & (1 << 20))
5902 return disas_cp14_read(env, s, insn);
5903 else
5904 return disas_cp14_write(env, s, insn);
5905 case 15:
5906 return disas_cp15_insn (env, s, insn);
5907 default:
5908 board:
5909 /* Unknown coprocessor. See if the board has hooked it. */
5910 return disas_cp_insn (env, s, insn);
5911 }
5912 }
5913
5914
5915 /* Store a 64-bit value to a register pair. Clobbers val. */
5916 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5917 {
5918 TCGv tmp;
5919 tmp = new_tmp();
5920 tcg_gen_trunc_i64_i32(tmp, val);
5921 store_reg(s, rlow, tmp);
5922 tmp = new_tmp();
5923 tcg_gen_shri_i64(val, val, 32);
5924 tcg_gen_trunc_i64_i32(tmp, val);
5925 store_reg(s, rhigh, tmp);
5926 }
5927
5928 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5929 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5930 {
5931 TCGv_i64 tmp;
5932 TCGv tmp2;
5933
5934 /* Load value and extend to 64 bits. */
5935 tmp = tcg_temp_new_i64();
5936 tmp2 = load_reg(s, rlow);
5937 tcg_gen_extu_i32_i64(tmp, tmp2);
5938 dead_tmp(tmp2);
5939 tcg_gen_add_i64(val, val, tmp);
5940 tcg_temp_free_i64(tmp);
5941 }
5942
5943 /* load and add a 64-bit value from a register pair. */
5944 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5945 {
5946 TCGv_i64 tmp;
5947 TCGv tmpl;
5948 TCGv tmph;
5949
5950 /* Load 64-bit value rd:rn. */
5951 tmpl = load_reg(s, rlow);
5952 tmph = load_reg(s, rhigh);
5953 tmp = tcg_temp_new_i64();
5954 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5955 dead_tmp(tmpl);
5956 dead_tmp(tmph);
5957 tcg_gen_add_i64(val, val, tmp);
5958 tcg_temp_free_i64(tmp);
5959 }
5960
5961 /* Set N and Z flags from a 64-bit value. */
5962 static void gen_logicq_cc(TCGv_i64 val)
5963 {
5964 TCGv tmp = new_tmp();
5965 gen_helper_logicq_cc(tmp, val);
5966 gen_logic_CC(tmp);
5967 dead_tmp(tmp);
5968 }
5969
5970 /* Load/Store exclusive instructions are implemented by remembering
5971 the value/address loaded, and seeing if these are the same
5972 when the store is performed. This should be is sufficient to implement
5973 the architecturally mandated semantics, and avoids having to monitor
5974 regular stores.
5975
5976 In system emulation mode only one CPU will be running at once, so
5977 this sequence is effectively atomic. In user emulation mode we
5978 throw an exception and handle the atomic operation elsewhere. */
5979 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5980 TCGv addr, int size)
5981 {
5982 TCGv tmp;
5983
5984 switch (size) {
5985 case 0:
5986 tmp = gen_ld8u(addr, IS_USER(s));
5987 break;
5988 case 1:
5989 tmp = gen_ld16u(addr, IS_USER(s));
5990 break;
5991 case 2:
5992 case 3:
5993 tmp = gen_ld32(addr, IS_USER(s));
5994 break;
5995 default:
5996 abort();
5997 }
5998 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5999 store_reg(s, rt, tmp);
6000 if (size == 3) {
6001 TCGv tmp2 = new_tmp();
6002 tcg_gen_addi_i32(tmp2, addr, 4);
6003 tmp = gen_ld32(tmp2, IS_USER(s));
6004 dead_tmp(tmp2);
6005 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6006 store_reg(s, rt2, tmp);
6007 }
6008 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6009 }
6010
6011 static void gen_clrex(DisasContext *s)
6012 {
6013 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6014 }
6015
6016 #ifdef CONFIG_USER_ONLY
6017 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6018 TCGv addr, int size)
6019 {
6020 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6021 tcg_gen_movi_i32(cpu_exclusive_info,
6022 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6023 gen_exception_insn(s, 4, EXCP_STREX);
6024 }
6025 #else
6026 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6027 TCGv addr, int size)
6028 {
6029 TCGv tmp;
6030 int done_label;
6031 int fail_label;
6032
6033 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6034 [addr] = {Rt};
6035 {Rd} = 0;
6036 } else {
6037 {Rd} = 1;
6038 } */
6039 fail_label = gen_new_label();
6040 done_label = gen_new_label();
6041 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6042 switch (size) {
6043 case 0:
6044 tmp = gen_ld8u(addr, IS_USER(s));
6045 break;
6046 case 1:
6047 tmp = gen_ld16u(addr, IS_USER(s));
6048 break;
6049 case 2:
6050 case 3:
6051 tmp = gen_ld32(addr, IS_USER(s));
6052 break;
6053 default:
6054 abort();
6055 }
6056 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6057 dead_tmp(tmp);
6058 if (size == 3) {
6059 TCGv tmp2 = new_tmp();
6060 tcg_gen_addi_i32(tmp2, addr, 4);
6061 tmp = gen_ld32(tmp2, IS_USER(s));
6062 dead_tmp(tmp2);
6063 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6064 dead_tmp(tmp);
6065 }
6066 tmp = load_reg(s, rt);
6067 switch (size) {
6068 case 0:
6069 gen_st8(tmp, addr, IS_USER(s));
6070 break;
6071 case 1:
6072 gen_st16(tmp, addr, IS_USER(s));
6073 break;
6074 case 2:
6075 case 3:
6076 gen_st32(tmp, addr, IS_USER(s));
6077 break;
6078 default:
6079 abort();
6080 }
6081 if (size == 3) {
6082 tcg_gen_addi_i32(addr, addr, 4);
6083 tmp = load_reg(s, rt2);
6084 gen_st32(tmp, addr, IS_USER(s));
6085 }
6086 tcg_gen_movi_i32(cpu_R[rd], 0);
6087 tcg_gen_br(done_label);
6088 gen_set_label(fail_label);
6089 tcg_gen_movi_i32(cpu_R[rd], 1);
6090 gen_set_label(done_label);
6091 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6092 }
6093 #endif
6094
6095 static void disas_arm_insn(CPUState * env, DisasContext *s)
6096 {
6097 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6098 TCGv tmp;
6099 TCGv tmp2;
6100 TCGv tmp3;
6101 TCGv addr;
6102 TCGv_i64 tmp64;
6103
6104 insn = ldl_code(s->pc);
6105 s->pc += 4;
6106
6107 /* M variants do not implement ARM mode. */
6108 if (IS_M(env))
6109 goto illegal_op;
6110 cond = insn >> 28;
6111 if (cond == 0xf){
6112 /* Unconditional instructions. */
6113 if (((insn >> 25) & 7) == 1) {
6114 /* NEON Data processing. */
6115 if (!arm_feature(env, ARM_FEATURE_NEON))
6116 goto illegal_op;
6117
6118 if (disas_neon_data_insn(env, s, insn))
6119 goto illegal_op;
6120 return;
6121 }
6122 if ((insn & 0x0f100000) == 0x04000000) {
6123 /* NEON load/store. */
6124 if (!arm_feature(env, ARM_FEATURE_NEON))
6125 goto illegal_op;
6126
6127 if (disas_neon_ls_insn(env, s, insn))
6128 goto illegal_op;
6129 return;
6130 }
6131 if (((insn & 0x0f30f000) == 0x0510f000) ||
6132 ((insn & 0x0f30f010) == 0x0710f000)) {
6133 if ((insn & (1 << 22)) == 0) {
6134 /* PLDW; v7MP */
6135 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6136 goto illegal_op;
6137 }
6138 }
6139 /* Otherwise PLD; v5TE+ */
6140 return;
6141 }
6142 if (((insn & 0x0f70f000) == 0x0450f000) ||
6143 ((insn & 0x0f70f010) == 0x0650f000)) {
6144 ARCH(7);
6145 return; /* PLI; V7 */
6146 }
6147 if (((insn & 0x0f700000) == 0x04100000) ||
6148 ((insn & 0x0f700010) == 0x06100000)) {
6149 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6150 goto illegal_op;
6151 }
6152 return; /* v7MP: Unallocated memory hint: must NOP */
6153 }
6154
6155 if ((insn & 0x0ffffdff) == 0x01010000) {
6156 ARCH(6);
6157 /* setend */
6158 if (insn & (1 << 9)) {
6159 /* BE8 mode not implemented. */
6160 goto illegal_op;
6161 }
6162 return;
6163 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6164 switch ((insn >> 4) & 0xf) {
6165 case 1: /* clrex */
6166 ARCH(6K);
6167 gen_clrex(s);
6168 return;
6169 case 4: /* dsb */
6170 case 5: /* dmb */
6171 case 6: /* isb */
6172 ARCH(7);
6173 /* We don't emulate caches so these are a no-op. */
6174 return;
6175 default:
6176 goto illegal_op;
6177 }
6178 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6179 /* srs */
6180 int32_t offset;
6181 if (IS_USER(s))
6182 goto illegal_op;
6183 ARCH(6);
6184 op1 = (insn & 0x1f);
6185 addr = new_tmp();
6186 tmp = tcg_const_i32(op1);
6187 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6188 tcg_temp_free_i32(tmp);
6189 i = (insn >> 23) & 3;
6190 switch (i) {
6191 case 0: offset = -4; break; /* DA */
6192 case 1: offset = 0; break; /* IA */
6193 case 2: offset = -8; break; /* DB */
6194 case 3: offset = 4; break; /* IB */
6195 default: abort();
6196 }
6197 if (offset)
6198 tcg_gen_addi_i32(addr, addr, offset);
6199 tmp = load_reg(s, 14);
6200 gen_st32(tmp, addr, 0);
6201 tmp = load_cpu_field(spsr);
6202 tcg_gen_addi_i32(addr, addr, 4);
6203 gen_st32(tmp, addr, 0);
6204 if (insn & (1 << 21)) {
6205 /* Base writeback. */
6206 switch (i) {
6207 case 0: offset = -8; break;
6208 case 1: offset = 4; break;
6209 case 2: offset = -4; break;
6210 case 3: offset = 0; break;
6211 default: abort();
6212 }
6213 if (offset)
6214 tcg_gen_addi_i32(addr, addr, offset);
6215 tmp = tcg_const_i32(op1);
6216 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6217 tcg_temp_free_i32(tmp);
6218 dead_tmp(addr);
6219 } else {
6220 dead_tmp(addr);
6221 }
6222 return;
6223 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6224 /* rfe */
6225 int32_t offset;
6226 if (IS_USER(s))
6227 goto illegal_op;
6228 ARCH(6);
6229 rn = (insn >> 16) & 0xf;
6230 addr = load_reg(s, rn);
6231 i = (insn >> 23) & 3;
6232 switch (i) {
6233 case 0: offset = -4; break; /* DA */
6234 case 1: offset = 0; break; /* IA */
6235 case 2: offset = -8; break; /* DB */
6236 case 3: offset = 4; break; /* IB */
6237 default: abort();
6238 }
6239 if (offset)
6240 tcg_gen_addi_i32(addr, addr, offset);
6241 /* Load PC into tmp and CPSR into tmp2. */
6242 tmp = gen_ld32(addr, 0);
6243 tcg_gen_addi_i32(addr, addr, 4);
6244 tmp2 = gen_ld32(addr, 0);
6245 if (insn & (1 << 21)) {
6246 /* Base writeback. */
6247 switch (i) {
6248 case 0: offset = -8; break;
6249 case 1: offset = 4; break;
6250 case 2: offset = -4; break;
6251 case 3: offset = 0; break;
6252 default: abort();
6253 }
6254 if (offset)
6255 tcg_gen_addi_i32(addr, addr, offset);
6256 store_reg(s, rn, addr);
6257 } else {
6258 dead_tmp(addr);
6259 }
6260 gen_rfe(s, tmp, tmp2);
6261 return;
6262 } else if ((insn & 0x0e000000) == 0x0a000000) {
6263 /* branch link and change to thumb (blx <offset>) */
6264 int32_t offset;
6265
6266 val = (uint32_t)s->pc;
6267 tmp = new_tmp();
6268 tcg_gen_movi_i32(tmp, val);
6269 store_reg(s, 14, tmp);
6270 /* Sign-extend the 24-bit offset */
6271 offset = (((int32_t)insn) << 8) >> 8;
6272 /* offset * 4 + bit24 * 2 + (thumb bit) */
6273 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6274 /* pipeline offset */
6275 val += 4;
6276 gen_bx_im(s, val);
6277 return;
6278 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6279 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6280 /* iWMMXt register transfer. */
6281 if (env->cp15.c15_cpar & (1 << 1))
6282 if (!disas_iwmmxt_insn(env, s, insn))
6283 return;
6284 }
6285 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6286 /* Coprocessor double register transfer. */
6287 } else if ((insn & 0x0f000010) == 0x0e000010) {
6288 /* Additional coprocessor register transfer. */
6289 } else if ((insn & 0x0ff10020) == 0x01000000) {
6290 uint32_t mask;
6291 uint32_t val;
6292 /* cps (privileged) */
6293 if (IS_USER(s))
6294 return;
6295 mask = val = 0;
6296 if (insn & (1 << 19)) {
6297 if (insn & (1 << 8))
6298 mask |= CPSR_A;
6299 if (insn & (1 << 7))
6300 mask |= CPSR_I;
6301 if (insn & (1 << 6))
6302 mask |= CPSR_F;
6303 if (insn & (1 << 18))
6304 val |= mask;
6305 }
6306 if (insn & (1 << 17)) {
6307 mask |= CPSR_M;
6308 val |= (insn & 0x1f);
6309 }
6310 if (mask) {
6311 gen_set_psr_im(s, mask, 0, val);
6312 }
6313 return;
6314 }
6315 goto illegal_op;
6316 }
6317 if (cond != 0xe) {
6318 /* if not always execute, we generate a conditional jump to
6319 next instruction */
6320 s->condlabel = gen_new_label();
6321 gen_test_cc(cond ^ 1, s->condlabel);
6322 s->condjmp = 1;
6323 }
6324 if ((insn & 0x0f900000) == 0x03000000) {
6325 if ((insn & (1 << 21)) == 0) {
6326 ARCH(6T2);
6327 rd = (insn >> 12) & 0xf;
6328 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6329 if ((insn & (1 << 22)) == 0) {
6330 /* MOVW */
6331 tmp = new_tmp();
6332 tcg_gen_movi_i32(tmp, val);
6333 } else {
6334 /* MOVT */
6335 tmp = load_reg(s, rd);
6336 tcg_gen_ext16u_i32(tmp, tmp);
6337 tcg_gen_ori_i32(tmp, tmp, val << 16);
6338 }
6339 store_reg(s, rd, tmp);
6340 } else {
6341 if (((insn >> 12) & 0xf) != 0xf)
6342 goto illegal_op;
6343 if (((insn >> 16) & 0xf) == 0) {
6344 gen_nop_hint(s, insn & 0xff);
6345 } else {
6346 /* CPSR = immediate */
6347 val = insn & 0xff;
6348 shift = ((insn >> 8) & 0xf) * 2;
6349 if (shift)
6350 val = (val >> shift) | (val << (32 - shift));
6351 i = ((insn & (1 << 22)) != 0);
6352 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6353 goto illegal_op;
6354 }
6355 }
6356 } else if ((insn & 0x0f900000) == 0x01000000
6357 && (insn & 0x00000090) != 0x00000090) {
6358 /* miscellaneous instructions */
6359 op1 = (insn >> 21) & 3;
6360 sh = (insn >> 4) & 0xf;
6361 rm = insn & 0xf;
6362 switch (sh) {
6363 case 0x0: /* move program status register */
6364 if (op1 & 1) {
6365 /* PSR = reg */
6366 tmp = load_reg(s, rm);
6367 i = ((op1 & 2) != 0);
6368 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6369 goto illegal_op;
6370 } else {
6371 /* reg = PSR */
6372 rd = (insn >> 12) & 0xf;
6373 if (op1 & 2) {
6374 if (IS_USER(s))
6375 goto illegal_op;
6376 tmp = load_cpu_field(spsr);
6377 } else {
6378 tmp = new_tmp();
6379 gen_helper_cpsr_read(tmp);
6380 }
6381 store_reg(s, rd, tmp);
6382 }
6383 break;
6384 case 0x1:
6385 if (op1 == 1) {
6386 /* branch/exchange thumb (bx). */
6387 tmp = load_reg(s, rm);
6388 gen_bx(s, tmp);
6389 } else if (op1 == 3) {
6390 /* clz */
6391 rd = (insn >> 12) & 0xf;
6392 tmp = load_reg(s, rm);
6393 gen_helper_clz(tmp, tmp);
6394 store_reg(s, rd, tmp);
6395 } else {
6396 goto illegal_op;
6397 }
6398 break;
6399 case 0x2:
6400 if (op1 == 1) {
6401 ARCH(5J); /* bxj */
6402 /* Trivial implementation equivalent to bx. */
6403 tmp = load_reg(s, rm);
6404 gen_bx(s, tmp);
6405 } else {
6406 goto illegal_op;
6407 }
6408 break;
6409 case 0x3:
6410 if (op1 != 1)
6411 goto illegal_op;
6412
6413 /* branch link/exchange thumb (blx) */
6414 tmp = load_reg(s, rm);
6415 tmp2 = new_tmp();
6416 tcg_gen_movi_i32(tmp2, s->pc);
6417 store_reg(s, 14, tmp2);
6418 gen_bx(s, tmp);
6419 break;
6420 case 0x5: /* saturating add/subtract */
6421 rd = (insn >> 12) & 0xf;
6422 rn = (insn >> 16) & 0xf;
6423 tmp = load_reg(s, rm);
6424 tmp2 = load_reg(s, rn);
6425 if (op1 & 2)
6426 gen_helper_double_saturate(tmp2, tmp2);
6427 if (op1 & 1)
6428 gen_helper_sub_saturate(tmp, tmp, tmp2);
6429 else
6430 gen_helper_add_saturate(tmp, tmp, tmp2);
6431 dead_tmp(tmp2);
6432 store_reg(s, rd, tmp);
6433 break;
6434 case 7:
6435 /* SMC instruction (op1 == 3)
6436 and undefined instructions (op1 == 0 || op1 == 2)
6437 will trap */
6438 if (op1 != 1) {
6439 goto illegal_op;
6440 }
6441 /* bkpt */
6442 gen_exception_insn(s, 4, EXCP_BKPT);
6443 break;
6444 case 0x8: /* signed multiply */
6445 case 0xa:
6446 case 0xc:
6447 case 0xe:
6448 rs = (insn >> 8) & 0xf;
6449 rn = (insn >> 12) & 0xf;
6450 rd = (insn >> 16) & 0xf;
6451 if (op1 == 1) {
6452 /* (32 * 16) >> 16 */
6453 tmp = load_reg(s, rm);
6454 tmp2 = load_reg(s, rs);
6455 if (sh & 4)
6456 tcg_gen_sari_i32(tmp2, tmp2, 16);
6457 else
6458 gen_sxth(tmp2);
6459 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6460 tcg_gen_shri_i64(tmp64, tmp64, 16);
6461 tmp = new_tmp();
6462 tcg_gen_trunc_i64_i32(tmp, tmp64);
6463 tcg_temp_free_i64(tmp64);
6464 if ((sh & 2) == 0) {
6465 tmp2 = load_reg(s, rn);
6466 gen_helper_add_setq(tmp, tmp, tmp2);
6467 dead_tmp(tmp2);
6468 }
6469 store_reg(s, rd, tmp);
6470 } else {
6471 /* 16 * 16 */
6472 tmp = load_reg(s, rm);
6473 tmp2 = load_reg(s, rs);
6474 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6475 dead_tmp(tmp2);
6476 if (op1 == 2) {
6477 tmp64 = tcg_temp_new_i64();
6478 tcg_gen_ext_i32_i64(tmp64, tmp);
6479 dead_tmp(tmp);
6480 gen_addq(s, tmp64, rn, rd);
6481 gen_storeq_reg(s, rn, rd, tmp64);
6482 tcg_temp_free_i64(tmp64);
6483 } else {
6484 if (op1 == 0) {
6485 tmp2 = load_reg(s, rn);
6486 gen_helper_add_setq(tmp, tmp, tmp2);
6487 dead_tmp(tmp2);
6488 }
6489 store_reg(s, rd, tmp);
6490 }
6491 }
6492 break;
6493 default:
6494 goto illegal_op;
6495 }
6496 } else if (((insn & 0x0e000000) == 0 &&
6497 (insn & 0x00000090) != 0x90) ||
6498 ((insn & 0x0e000000) == (1 << 25))) {
6499 int set_cc, logic_cc, shiftop;
6500
6501 op1 = (insn >> 21) & 0xf;
6502 set_cc = (insn >> 20) & 1;
6503 logic_cc = table_logic_cc[op1] & set_cc;
6504
6505 /* data processing instruction */
6506 if (insn & (1 << 25)) {
6507 /* immediate operand */
6508 val = insn & 0xff;
6509 shift = ((insn >> 8) & 0xf) * 2;
6510 if (shift) {
6511 val = (val >> shift) | (val << (32 - shift));
6512 }
6513 tmp2 = new_tmp();
6514 tcg_gen_movi_i32(tmp2, val);
6515 if (logic_cc && shift) {
6516 gen_set_CF_bit31(tmp2);
6517 }
6518 } else {
6519 /* register */
6520 rm = (insn) & 0xf;
6521 tmp2 = load_reg(s, rm);
6522 shiftop = (insn >> 5) & 3;
6523 if (!(insn & (1 << 4))) {
6524 shift = (insn >> 7) & 0x1f;
6525 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6526 } else {
6527 rs = (insn >> 8) & 0xf;
6528 tmp = load_reg(s, rs);
6529 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6530 }
6531 }
6532 if (op1 != 0x0f && op1 != 0x0d) {
6533 rn = (insn >> 16) & 0xf;
6534 tmp = load_reg(s, rn);
6535 } else {
6536 TCGV_UNUSED(tmp);
6537 }
6538 rd = (insn >> 12) & 0xf;
6539 switch(op1) {
6540 case 0x00:
6541 tcg_gen_and_i32(tmp, tmp, tmp2);
6542 if (logic_cc) {
6543 gen_logic_CC(tmp);
6544 }
6545 store_reg_bx(env, s, rd, tmp);
6546 break;
6547 case 0x01:
6548 tcg_gen_xor_i32(tmp, tmp, tmp2);
6549 if (logic_cc) {
6550 gen_logic_CC(tmp);
6551 }
6552 store_reg_bx(env, s, rd, tmp);
6553 break;
6554 case 0x02:
6555 if (set_cc && rd == 15) {
6556 /* SUBS r15, ... is used for exception return. */
6557 if (IS_USER(s)) {
6558 goto illegal_op;
6559 }
6560 gen_helper_sub_cc(tmp, tmp, tmp2);
6561 gen_exception_return(s, tmp);
6562 } else {
6563 if (set_cc) {
6564 gen_helper_sub_cc(tmp, tmp, tmp2);
6565 } else {
6566 tcg_gen_sub_i32(tmp, tmp, tmp2);
6567 }
6568 store_reg_bx(env, s, rd, tmp);
6569 }
6570 break;
6571 case 0x03:
6572 if (set_cc) {
6573 gen_helper_sub_cc(tmp, tmp2, tmp);
6574 } else {
6575 tcg_gen_sub_i32(tmp, tmp2, tmp);
6576 }
6577 store_reg_bx(env, s, rd, tmp);
6578 break;
6579 case 0x04:
6580 if (set_cc) {
6581 gen_helper_add_cc(tmp, tmp, tmp2);
6582 } else {
6583 tcg_gen_add_i32(tmp, tmp, tmp2);
6584 }
6585 store_reg_bx(env, s, rd, tmp);
6586 break;
6587 case 0x05:
6588 if (set_cc) {
6589 gen_helper_adc_cc(tmp, tmp, tmp2);
6590 } else {
6591 gen_add_carry(tmp, tmp, tmp2);
6592 }
6593 store_reg_bx(env, s, rd, tmp);
6594 break;
6595 case 0x06:
6596 if (set_cc) {
6597 gen_helper_sbc_cc(tmp, tmp, tmp2);
6598 } else {
6599 gen_sub_carry(tmp, tmp, tmp2);
6600 }
6601 store_reg_bx(env, s, rd, tmp);
6602 break;
6603 case 0x07:
6604 if (set_cc) {
6605 gen_helper_sbc_cc(tmp, tmp2, tmp);
6606 } else {
6607 gen_sub_carry(tmp, tmp2, tmp);
6608 }
6609 store_reg_bx(env, s, rd, tmp);
6610 break;
6611 case 0x08:
6612 if (set_cc) {
6613 tcg_gen_and_i32(tmp, tmp, tmp2);
6614 gen_logic_CC(tmp);
6615 }
6616 dead_tmp(tmp);
6617 break;
6618 case 0x09:
6619 if (set_cc) {
6620 tcg_gen_xor_i32(tmp, tmp, tmp2);
6621 gen_logic_CC(tmp);
6622 }
6623 dead_tmp(tmp);
6624 break;
6625 case 0x0a:
6626 if (set_cc) {
6627 gen_helper_sub_cc(tmp, tmp, tmp2);
6628 }
6629 dead_tmp(tmp);
6630 break;
6631 case 0x0b:
6632 if (set_cc) {
6633 gen_helper_add_cc(tmp, tmp, tmp2);
6634 }
6635 dead_tmp(tmp);
6636 break;
6637 case 0x0c:
6638 tcg_gen_or_i32(tmp, tmp, tmp2);
6639 if (logic_cc) {
6640 gen_logic_CC(tmp);
6641 }
6642 store_reg_bx(env, s, rd, tmp);
6643 break;
6644 case 0x0d:
6645 if (logic_cc && rd == 15) {
6646 /* MOVS r15, ... is used for exception return. */
6647 if (IS_USER(s)) {
6648 goto illegal_op;
6649 }
6650 gen_exception_return(s, tmp2);
6651 } else {
6652 if (logic_cc) {
6653 gen_logic_CC(tmp2);
6654 }
6655 store_reg_bx(env, s, rd, tmp2);
6656 }
6657 break;
6658 case 0x0e:
6659 tcg_gen_andc_i32(tmp, tmp, tmp2);
6660 if (logic_cc) {
6661 gen_logic_CC(tmp);
6662 }
6663 store_reg_bx(env, s, rd, tmp);
6664 break;
6665 default:
6666 case 0x0f:
6667 tcg_gen_not_i32(tmp2, tmp2);
6668 if (logic_cc) {
6669 gen_logic_CC(tmp2);
6670 }
6671 store_reg_bx(env, s, rd, tmp2);
6672 break;
6673 }
6674 if (op1 != 0x0f && op1 != 0x0d) {
6675 dead_tmp(tmp2);
6676 }
6677 } else {
6678 /* other instructions */
6679 op1 = (insn >> 24) & 0xf;
6680 switch(op1) {
6681 case 0x0:
6682 case 0x1:
6683 /* multiplies, extra load/stores */
6684 sh = (insn >> 5) & 3;
6685 if (sh == 0) {
6686 if (op1 == 0x0) {
6687 rd = (insn >> 16) & 0xf;
6688 rn = (insn >> 12) & 0xf;
6689 rs = (insn >> 8) & 0xf;
6690 rm = (insn) & 0xf;
6691 op1 = (insn >> 20) & 0xf;
6692 switch (op1) {
6693 case 0: case 1: case 2: case 3: case 6:
6694 /* 32 bit mul */
6695 tmp = load_reg(s, rs);
6696 tmp2 = load_reg(s, rm);
6697 tcg_gen_mul_i32(tmp, tmp, tmp2);
6698 dead_tmp(tmp2);
6699 if (insn & (1 << 22)) {
6700 /* Subtract (mls) */
6701 ARCH(6T2);
6702 tmp2 = load_reg(s, rn);
6703 tcg_gen_sub_i32(tmp, tmp2, tmp);
6704 dead_tmp(tmp2);
6705 } else if (insn & (1 << 21)) {
6706 /* Add */
6707 tmp2 = load_reg(s, rn);
6708 tcg_gen_add_i32(tmp, tmp, tmp2);
6709 dead_tmp(tmp2);
6710 }
6711 if (insn & (1 << 20))
6712 gen_logic_CC(tmp);
6713 store_reg(s, rd, tmp);
6714 break;
6715 case 4:
6716 /* 64 bit mul double accumulate (UMAAL) */
6717 ARCH(6);
6718 tmp = load_reg(s, rs);
6719 tmp2 = load_reg(s, rm);
6720 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6721 gen_addq_lo(s, tmp64, rn);
6722 gen_addq_lo(s, tmp64, rd);
6723 gen_storeq_reg(s, rn, rd, tmp64);
6724 tcg_temp_free_i64(tmp64);
6725 break;
6726 case 8: case 9: case 10: case 11:
6727 case 12: case 13: case 14: case 15:
6728 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6729 tmp = load_reg(s, rs);
6730 tmp2 = load_reg(s, rm);
6731 if (insn & (1 << 22)) {
6732 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6733 } else {
6734 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6735 }
6736 if (insn & (1 << 21)) { /* mult accumulate */
6737 gen_addq(s, tmp64, rn, rd);
6738 }
6739 if (insn & (1 << 20)) {
6740 gen_logicq_cc(tmp64);
6741 }
6742 gen_storeq_reg(s, rn, rd, tmp64);
6743 tcg_temp_free_i64(tmp64);
6744 break;
6745 default:
6746 goto illegal_op;
6747 }
6748 } else {
6749 rn = (insn >> 16) & 0xf;
6750 rd = (insn >> 12) & 0xf;
6751 if (insn & (1 << 23)) {
6752 /* load/store exclusive */
6753 op1 = (insn >> 21) & 0x3;
6754 if (op1)
6755 ARCH(6K);
6756 else
6757 ARCH(6);
6758 addr = tcg_temp_local_new_i32();
6759 load_reg_var(s, addr, rn);
6760 if (insn & (1 << 20)) {
6761 switch (op1) {
6762 case 0: /* ldrex */
6763 gen_load_exclusive(s, rd, 15, addr, 2);
6764 break;
6765 case 1: /* ldrexd */
6766 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6767 break;
6768 case 2: /* ldrexb */
6769 gen_load_exclusive(s, rd, 15, addr, 0);
6770 break;
6771 case 3: /* ldrexh */
6772 gen_load_exclusive(s, rd, 15, addr, 1);
6773 break;
6774 default:
6775 abort();
6776 }
6777 } else {
6778 rm = insn & 0xf;
6779 switch (op1) {
6780 case 0: /* strex */
6781 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6782 break;
6783 case 1: /* strexd */
6784 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6785 break;
6786 case 2: /* strexb */
6787 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6788 break;
6789 case 3: /* strexh */
6790 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6791 break;
6792 default:
6793 abort();
6794 }
6795 }
6796 tcg_temp_free(addr);
6797 } else {
6798 /* SWP instruction */
6799 rm = (insn) & 0xf;
6800
6801 /* ??? This is not really atomic. However we know
6802 we never have multiple CPUs running in parallel,
6803 so it is good enough. */
6804 addr = load_reg(s, rn);
6805 tmp = load_reg(s, rm);
6806 if (insn & (1 << 22)) {
6807 tmp2 = gen_ld8u(addr, IS_USER(s));
6808 gen_st8(tmp, addr, IS_USER(s));
6809 } else {
6810 tmp2 = gen_ld32(addr, IS_USER(s));
6811 gen_st32(tmp, addr, IS_USER(s));
6812 }
6813 dead_tmp(addr);
6814 store_reg(s, rd, tmp2);
6815 }
6816 }
6817 } else {
6818 int address_offset;
6819 int load;
6820 /* Misc load/store */
6821 rn = (insn >> 16) & 0xf;
6822 rd = (insn >> 12) & 0xf;
6823 addr = load_reg(s, rn);
6824 if (insn & (1 << 24))
6825 gen_add_datah_offset(s, insn, 0, addr);
6826 address_offset = 0;
6827 if (insn & (1 << 20)) {
6828 /* load */
6829 switch(sh) {
6830 case 1:
6831 tmp = gen_ld16u(addr, IS_USER(s));
6832 break;
6833 case 2:
6834 tmp = gen_ld8s(addr, IS_USER(s));
6835 break;
6836 default:
6837 case 3:
6838 tmp = gen_ld16s(addr, IS_USER(s));
6839 break;
6840 }
6841 load = 1;
6842 } else if (sh & 2) {
6843 /* doubleword */
6844 if (sh & 1) {
6845 /* store */
6846 tmp = load_reg(s, rd);
6847 gen_st32(tmp, addr, IS_USER(s));
6848 tcg_gen_addi_i32(addr, addr, 4);
6849 tmp = load_reg(s, rd + 1);
6850 gen_st32(tmp, addr, IS_USER(s));
6851 load = 0;
6852 } else {
6853 /* load */
6854 tmp = gen_ld32(addr, IS_USER(s));
6855 store_reg(s, rd, tmp);
6856 tcg_gen_addi_i32(addr, addr, 4);
6857 tmp = gen_ld32(addr, IS_USER(s));
6858 rd++;
6859 load = 1;
6860 }
6861 address_offset = -4;
6862 } else {
6863 /* store */
6864 tmp = load_reg(s, rd);
6865 gen_st16(tmp, addr, IS_USER(s));
6866 load = 0;
6867 }
6868 /* Perform base writeback before the loaded value to
6869 ensure correct behavior with overlapping index registers.
6870 ldrd with base writeback is is undefined if the
6871 destination and index registers overlap. */
6872 if (!(insn & (1 << 24))) {
6873 gen_add_datah_offset(s, insn, address_offset, addr);
6874 store_reg(s, rn, addr);
6875 } else if (insn & (1 << 21)) {
6876 if (address_offset)
6877 tcg_gen_addi_i32(addr, addr, address_offset);
6878 store_reg(s, rn, addr);
6879 } else {
6880 dead_tmp(addr);
6881 }
6882 if (load) {
6883 /* Complete the load. */
6884 store_reg(s, rd, tmp);
6885 }
6886 }
6887 break;
6888 case 0x4:
6889 case 0x5:
6890 goto do_ldst;
6891 case 0x6:
6892 case 0x7:
6893 if (insn & (1 << 4)) {
6894 ARCH(6);
6895 /* Armv6 Media instructions. */
6896 rm = insn & 0xf;
6897 rn = (insn >> 16) & 0xf;
6898 rd = (insn >> 12) & 0xf;
6899 rs = (insn >> 8) & 0xf;
6900 switch ((insn >> 23) & 3) {
6901 case 0: /* Parallel add/subtract. */
6902 op1 = (insn >> 20) & 7;
6903 tmp = load_reg(s, rn);
6904 tmp2 = load_reg(s, rm);
6905 sh = (insn >> 5) & 7;
6906 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6907 goto illegal_op;
6908 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6909 dead_tmp(tmp2);
6910 store_reg(s, rd, tmp);
6911 break;
6912 case 1:
6913 if ((insn & 0x00700020) == 0) {
6914 /* Halfword pack. */
6915 tmp = load_reg(s, rn);
6916 tmp2 = load_reg(s, rm);
6917 shift = (insn >> 7) & 0x1f;
6918 if (insn & (1 << 6)) {
6919 /* pkhtb */
6920 if (shift == 0)
6921 shift = 31;
6922 tcg_gen_sari_i32(tmp2, tmp2, shift);
6923 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6924 tcg_gen_ext16u_i32(tmp2, tmp2);
6925 } else {
6926 /* pkhbt */
6927 if (shift)
6928 tcg_gen_shli_i32(tmp2, tmp2, shift);
6929 tcg_gen_ext16u_i32(tmp, tmp);
6930 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6931 }
6932 tcg_gen_or_i32(tmp, tmp, tmp2);
6933 dead_tmp(tmp2);
6934 store_reg(s, rd, tmp);
6935 } else if ((insn & 0x00200020) == 0x00200000) {
6936 /* [us]sat */
6937 tmp = load_reg(s, rm);
6938 shift = (insn >> 7) & 0x1f;
6939 if (insn & (1 << 6)) {
6940 if (shift == 0)
6941 shift = 31;
6942 tcg_gen_sari_i32(tmp, tmp, shift);
6943 } else {
6944 tcg_gen_shli_i32(tmp, tmp, shift);
6945 }
6946 sh = (insn >> 16) & 0x1f;
6947 tmp2 = tcg_const_i32(sh);
6948 if (insn & (1 << 22))
6949 gen_helper_usat(tmp, tmp, tmp2);
6950 else
6951 gen_helper_ssat(tmp, tmp, tmp2);
6952 tcg_temp_free_i32(tmp2);
6953 store_reg(s, rd, tmp);
6954 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6955 /* [us]sat16 */
6956 tmp = load_reg(s, rm);
6957 sh = (insn >> 16) & 0x1f;
6958 tmp2 = tcg_const_i32(sh);
6959 if (insn & (1 << 22))
6960 gen_helper_usat16(tmp, tmp, tmp2);
6961 else
6962 gen_helper_ssat16(tmp, tmp, tmp2);
6963 tcg_temp_free_i32(tmp2);
6964 store_reg(s, rd, tmp);
6965 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6966 /* Select bytes. */
6967 tmp = load_reg(s, rn);
6968 tmp2 = load_reg(s, rm);
6969 tmp3 = new_tmp();
6970 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6971 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6972 dead_tmp(tmp3);
6973 dead_tmp(tmp2);
6974 store_reg(s, rd, tmp);
6975 } else if ((insn & 0x000003e0) == 0x00000060) {
6976 tmp = load_reg(s, rm);
6977 shift = (insn >> 10) & 3;
6978 /* ??? In many cases it's not neccessary to do a
6979 rotate, a shift is sufficient. */
6980 if (shift != 0)
6981 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6982 op1 = (insn >> 20) & 7;
6983 switch (op1) {
6984 case 0: gen_sxtb16(tmp); break;
6985 case 2: gen_sxtb(tmp); break;
6986 case 3: gen_sxth(tmp); break;
6987 case 4: gen_uxtb16(tmp); break;
6988 case 6: gen_uxtb(tmp); break;
6989 case 7: gen_uxth(tmp); break;
6990 default: goto illegal_op;
6991 }
6992 if (rn != 15) {
6993 tmp2 = load_reg(s, rn);
6994 if ((op1 & 3) == 0) {
6995 gen_add16(tmp, tmp2);
6996 } else {
6997 tcg_gen_add_i32(tmp, tmp, tmp2);
6998 dead_tmp(tmp2);
6999 }
7000 }
7001 store_reg(s, rd, tmp);
7002 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7003 /* rev */
7004 tmp = load_reg(s, rm);
7005 if (insn & (1 << 22)) {
7006 if (insn & (1 << 7)) {
7007 gen_revsh(tmp);
7008 } else {
7009 ARCH(6T2);
7010 gen_helper_rbit(tmp, tmp);
7011 }
7012 } else {
7013 if (insn & (1 << 7))
7014 gen_rev16(tmp);
7015 else
7016 tcg_gen_bswap32_i32(tmp, tmp);
7017 }
7018 store_reg(s, rd, tmp);
7019 } else {
7020 goto illegal_op;
7021 }
7022 break;
7023 case 2: /* Multiplies (Type 3). */
7024 tmp = load_reg(s, rm);
7025 tmp2 = load_reg(s, rs);
7026 if (insn & (1 << 20)) {
7027 /* Signed multiply most significant [accumulate].
7028 (SMMUL, SMMLA, SMMLS) */
7029 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7030
7031 if (rd != 15) {
7032 tmp = load_reg(s, rd);
7033 if (insn & (1 << 6)) {
7034 tmp64 = gen_subq_msw(tmp64, tmp);
7035 } else {
7036 tmp64 = gen_addq_msw(tmp64, tmp);
7037 }
7038 }
7039 if (insn & (1 << 5)) {
7040 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7041 }
7042 tcg_gen_shri_i64(tmp64, tmp64, 32);
7043 tmp = new_tmp();
7044 tcg_gen_trunc_i64_i32(tmp, tmp64);
7045 tcg_temp_free_i64(tmp64);
7046 store_reg(s, rn, tmp);
7047 } else {
7048 if (insn & (1 << 5))
7049 gen_swap_half(tmp2);
7050 gen_smul_dual(tmp, tmp2);
7051 /* This addition cannot overflow. */
7052 if (insn & (1 << 6)) {
7053 tcg_gen_sub_i32(tmp, tmp, tmp2);
7054 } else {
7055 tcg_gen_add_i32(tmp, tmp, tmp2);
7056 }
7057 dead_tmp(tmp2);
7058 if (insn & (1 << 22)) {
7059 /* smlald, smlsld */
7060 tmp64 = tcg_temp_new_i64();
7061 tcg_gen_ext_i32_i64(tmp64, tmp);
7062 dead_tmp(tmp);
7063 gen_addq(s, tmp64, rd, rn);
7064 gen_storeq_reg(s, rd, rn, tmp64);
7065 tcg_temp_free_i64(tmp64);
7066 } else {
7067 /* smuad, smusd, smlad, smlsd */
7068 if (rd != 15)
7069 {
7070 tmp2 = load_reg(s, rd);
7071 gen_helper_add_setq(tmp, tmp, tmp2);
7072 dead_tmp(tmp2);
7073 }
7074 store_reg(s, rn, tmp);
7075 }
7076 }
7077 break;
7078 case 3:
7079 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7080 switch (op1) {
7081 case 0: /* Unsigned sum of absolute differences. */
7082 ARCH(6);
7083 tmp = load_reg(s, rm);
7084 tmp2 = load_reg(s, rs);
7085 gen_helper_usad8(tmp, tmp, tmp2);
7086 dead_tmp(tmp2);
7087 if (rd != 15) {
7088 tmp2 = load_reg(s, rd);
7089 tcg_gen_add_i32(tmp, tmp, tmp2);
7090 dead_tmp(tmp2);
7091 }
7092 store_reg(s, rn, tmp);
7093 break;
7094 case 0x20: case 0x24: case 0x28: case 0x2c:
7095 /* Bitfield insert/clear. */
7096 ARCH(6T2);
7097 shift = (insn >> 7) & 0x1f;
7098 i = (insn >> 16) & 0x1f;
7099 i = i + 1 - shift;
7100 if (rm == 15) {
7101 tmp = new_tmp();
7102 tcg_gen_movi_i32(tmp, 0);
7103 } else {
7104 tmp = load_reg(s, rm);
7105 }
7106 if (i != 32) {
7107 tmp2 = load_reg(s, rd);
7108 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7109 dead_tmp(tmp2);
7110 }
7111 store_reg(s, rd, tmp);
7112 break;
7113 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7114 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7115 ARCH(6T2);
7116 tmp = load_reg(s, rm);
7117 shift = (insn >> 7) & 0x1f;
7118 i = ((insn >> 16) & 0x1f) + 1;
7119 if (shift + i > 32)
7120 goto illegal_op;
7121 if (i < 32) {
7122 if (op1 & 0x20) {
7123 gen_ubfx(tmp, shift, (1u << i) - 1);
7124 } else {
7125 gen_sbfx(tmp, shift, i);
7126 }
7127 }
7128 store_reg(s, rd, tmp);
7129 break;
7130 default:
7131 goto illegal_op;
7132 }
7133 break;
7134 }
7135 break;
7136 }
7137 do_ldst:
7138 /* Check for undefined extension instructions
7139 * per the ARM Bible IE:
7140 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7141 */
7142 sh = (0xf << 20) | (0xf << 4);
7143 if (op1 == 0x7 && ((insn & sh) == sh))
7144 {
7145 goto illegal_op;
7146 }
7147 /* load/store byte/word */
7148 rn = (insn >> 16) & 0xf;
7149 rd = (insn >> 12) & 0xf;
7150 tmp2 = load_reg(s, rn);
7151 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7152 if (insn & (1 << 24))
7153 gen_add_data_offset(s, insn, tmp2);
7154 if (insn & (1 << 20)) {
7155 /* load */
7156 if (insn & (1 << 22)) {
7157 tmp = gen_ld8u(tmp2, i);
7158 } else {
7159 tmp = gen_ld32(tmp2, i);
7160 }
7161 } else {
7162 /* store */
7163 tmp = load_reg(s, rd);
7164 if (insn & (1 << 22))
7165 gen_st8(tmp, tmp2, i);
7166 else
7167 gen_st32(tmp, tmp2, i);
7168 }
7169 if (!(insn & (1 << 24))) {
7170 gen_add_data_offset(s, insn, tmp2);
7171 store_reg(s, rn, tmp2);
7172 } else if (insn & (1 << 21)) {
7173 store_reg(s, rn, tmp2);
7174 } else {
7175 dead_tmp(tmp2);
7176 }
7177 if (insn & (1 << 20)) {
7178 /* Complete the load. */
7179 if (rd == 15)
7180 gen_bx(s, tmp);
7181 else
7182 store_reg(s, rd, tmp);
7183 }
7184 break;
7185 case 0x08:
7186 case 0x09:
7187 {
7188 int j, n, user, loaded_base;
7189 TCGv loaded_var;
7190 /* load/store multiple words */
7191 /* XXX: store correct base if write back */
7192 user = 0;
7193 if (insn & (1 << 22)) {
7194 if (IS_USER(s))
7195 goto illegal_op; /* only usable in supervisor mode */
7196
7197 if ((insn & (1 << 15)) == 0)
7198 user = 1;
7199 }
7200 rn = (insn >> 16) & 0xf;
7201 addr = load_reg(s, rn);
7202
7203 /* compute total size */
7204 loaded_base = 0;
7205 TCGV_UNUSED(loaded_var);
7206 n = 0;
7207 for(i=0;i<16;i++) {
7208 if (insn & (1 << i))
7209 n++;
7210 }
7211 /* XXX: test invalid n == 0 case ? */
7212 if (insn & (1 << 23)) {
7213 if (insn & (1 << 24)) {
7214 /* pre increment */
7215 tcg_gen_addi_i32(addr, addr, 4);
7216 } else {
7217 /* post increment */
7218 }
7219 } else {
7220 if (insn & (1 << 24)) {
7221 /* pre decrement */
7222 tcg_gen_addi_i32(addr, addr, -(n * 4));
7223 } else {
7224 /* post decrement */
7225 if (n != 1)
7226 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7227 }
7228 }
7229 j = 0;
7230 for(i=0;i<16;i++) {
7231 if (insn & (1 << i)) {
7232 if (insn & (1 << 20)) {
7233 /* load */
7234 tmp = gen_ld32(addr, IS_USER(s));
7235 if (i == 15) {
7236 gen_bx(s, tmp);
7237 } else if (user) {
7238 tmp2 = tcg_const_i32(i);
7239 gen_helper_set_user_reg(tmp2, tmp);
7240 tcg_temp_free_i32(tmp2);
7241 dead_tmp(tmp);
7242 } else if (i == rn) {
7243 loaded_var = tmp;
7244 loaded_base = 1;
7245 } else {
7246 store_reg(s, i, tmp);
7247 }
7248 } else {
7249 /* store */
7250 if (i == 15) {
7251 /* special case: r15 = PC + 8 */
7252 val = (long)s->pc + 4;
7253 tmp = new_tmp();
7254 tcg_gen_movi_i32(tmp, val);
7255 } else if (user) {
7256 tmp = new_tmp();
7257 tmp2 = tcg_const_i32(i);
7258 gen_helper_get_user_reg(tmp, tmp2);
7259 tcg_temp_free_i32(tmp2);
7260 } else {
7261 tmp = load_reg(s, i);
7262 }
7263 gen_st32(tmp, addr, IS_USER(s));
7264 }
7265 j++;
7266 /* no need to add after the last transfer */
7267 if (j != n)
7268 tcg_gen_addi_i32(addr, addr, 4);
7269 }
7270 }
7271 if (insn & (1 << 21)) {
7272 /* write back */
7273 if (insn & (1 << 23)) {
7274 if (insn & (1 << 24)) {
7275 /* pre increment */
7276 } else {
7277 /* post increment */
7278 tcg_gen_addi_i32(addr, addr, 4);
7279 }
7280 } else {
7281 if (insn & (1 << 24)) {
7282 /* pre decrement */
7283 if (n != 1)
7284 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7285 } else {
7286 /* post decrement */
7287 tcg_gen_addi_i32(addr, addr, -(n * 4));
7288 }
7289 }
7290 store_reg(s, rn, addr);
7291 } else {
7292 dead_tmp(addr);
7293 }
7294 if (loaded_base) {
7295 store_reg(s, rn, loaded_var);
7296 }
7297 if ((insn & (1 << 22)) && !user) {
7298 /* Restore CPSR from SPSR. */
7299 tmp = load_cpu_field(spsr);
7300 gen_set_cpsr(tmp, 0xffffffff);
7301 dead_tmp(tmp);
7302 s->is_jmp = DISAS_UPDATE;
7303 }
7304 }
7305 break;
7306 case 0xa:
7307 case 0xb:
7308 {
7309 int32_t offset;
7310
7311 /* branch (and link) */
7312 val = (int32_t)s->pc;
7313 if (insn & (1 << 24)) {
7314 tmp = new_tmp();
7315 tcg_gen_movi_i32(tmp, val);
7316 store_reg(s, 14, tmp);
7317 }
7318 offset = (((int32_t)insn << 8) >> 8);
7319 val += (offset << 2) + 4;
7320 gen_jmp(s, val);
7321 }
7322 break;
7323 case 0xc:
7324 case 0xd:
7325 case 0xe:
7326 /* Coprocessor. */
7327 if (disas_coproc_insn(env, s, insn))
7328 goto illegal_op;
7329 break;
7330 case 0xf:
7331 /* swi */
7332 gen_set_pc_im(s->pc);
7333 s->is_jmp = DISAS_SWI;
7334 break;
7335 default:
7336 illegal_op:
7337 gen_exception_insn(s, 4, EXCP_UDEF);
7338 break;
7339 }
7340 }
7341 }
7342
7343 /* Return true if this is a Thumb-2 logical op. */
7344 static int
7345 thumb2_logic_op(int op)
7346 {
7347 return (op < 8);
7348 }
7349
7350 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7351 then set condition code flags based on the result of the operation.
7352 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7353 to the high bit of T1.
7354 Returns zero if the opcode is valid. */
7355
7356 static int
7357 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7358 {
7359 int logic_cc;
7360
7361 logic_cc = 0;
7362 switch (op) {
7363 case 0: /* and */
7364 tcg_gen_and_i32(t0, t0, t1);
7365 logic_cc = conds;
7366 break;
7367 case 1: /* bic */
7368 tcg_gen_andc_i32(t0, t0, t1);
7369 logic_cc = conds;
7370 break;
7371 case 2: /* orr */
7372 tcg_gen_or_i32(t0, t0, t1);
7373 logic_cc = conds;
7374 break;
7375 case 3: /* orn */
7376 tcg_gen_not_i32(t1, t1);
7377 tcg_gen_or_i32(t0, t0, t1);
7378 logic_cc = conds;
7379 break;
7380 case 4: /* eor */
7381 tcg_gen_xor_i32(t0, t0, t1);
7382 logic_cc = conds;
7383 break;
7384 case 8: /* add */
7385 if (conds)
7386 gen_helper_add_cc(t0, t0, t1);
7387 else
7388 tcg_gen_add_i32(t0, t0, t1);
7389 break;
7390 case 10: /* adc */
7391 if (conds)
7392 gen_helper_adc_cc(t0, t0, t1);
7393 else
7394 gen_adc(t0, t1);
7395 break;
7396 case 11: /* sbc */
7397 if (conds)
7398 gen_helper_sbc_cc(t0, t0, t1);
7399 else
7400 gen_sub_carry(t0, t0, t1);
7401 break;
7402 case 13: /* sub */
7403 if (conds)
7404 gen_helper_sub_cc(t0, t0, t1);
7405 else
7406 tcg_gen_sub_i32(t0, t0, t1);
7407 break;
7408 case 14: /* rsb */
7409 if (conds)
7410 gen_helper_sub_cc(t0, t1, t0);
7411 else
7412 tcg_gen_sub_i32(t0, t1, t0);
7413 break;
7414 default: /* 5, 6, 7, 9, 12, 15. */
7415 return 1;
7416 }
7417 if (logic_cc) {
7418 gen_logic_CC(t0);
7419 if (shifter_out)
7420 gen_set_CF_bit31(t1);
7421 }
7422 return 0;
7423 }
7424
7425 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7426 is not legal. */
7427 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7428 {
7429 uint32_t insn, imm, shift, offset;
7430 uint32_t rd, rn, rm, rs;
7431 TCGv tmp;
7432 TCGv tmp2;
7433 TCGv tmp3;
7434 TCGv addr;
7435 TCGv_i64 tmp64;
7436 int op;
7437 int shiftop;
7438 int conds;
7439 int logic_cc;
7440
7441 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7442 || arm_feature (env, ARM_FEATURE_M))) {
7443 /* Thumb-1 cores may need to treat bl and blx as a pair of
7444 16-bit instructions to get correct prefetch abort behavior. */
7445 insn = insn_hw1;
7446 if ((insn & (1 << 12)) == 0) {
7447 /* Second half of blx. */
7448 offset = ((insn & 0x7ff) << 1);
7449 tmp = load_reg(s, 14);
7450 tcg_gen_addi_i32(tmp, tmp, offset);
7451 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7452
7453 tmp2 = new_tmp();
7454 tcg_gen_movi_i32(tmp2, s->pc | 1);
7455 store_reg(s, 14, tmp2);
7456 gen_bx(s, tmp);
7457 return 0;
7458 }
7459 if (insn & (1 << 11)) {
7460 /* Second half of bl. */
7461 offset = ((insn & 0x7ff) << 1) | 1;
7462 tmp = load_reg(s, 14);
7463 tcg_gen_addi_i32(tmp, tmp, offset);
7464
7465 tmp2 = new_tmp();
7466 tcg_gen_movi_i32(tmp2, s->pc | 1);
7467 store_reg(s, 14, tmp2);
7468 gen_bx(s, tmp);
7469 return 0;
7470 }
7471 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7472 /* Instruction spans a page boundary. Implement it as two
7473 16-bit instructions in case the second half causes an
7474 prefetch abort. */
7475 offset = ((int32_t)insn << 21) >> 9;
7476 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7477 return 0;
7478 }
7479 /* Fall through to 32-bit decode. */
7480 }
7481
7482 insn = lduw_code(s->pc);
7483 s->pc += 2;
7484 insn |= (uint32_t)insn_hw1 << 16;
7485
7486 if ((insn & 0xf800e800) != 0xf000e800) {
7487 ARCH(6T2);
7488 }
7489
7490 rn = (insn >> 16) & 0xf;
7491 rs = (insn >> 12) & 0xf;
7492 rd = (insn >> 8) & 0xf;
7493 rm = insn & 0xf;
7494 switch ((insn >> 25) & 0xf) {
7495 case 0: case 1: case 2: case 3:
7496 /* 16-bit instructions. Should never happen. */
7497 abort();
7498 case 4:
7499 if (insn & (1 << 22)) {
7500 /* Other load/store, table branch. */
7501 if (insn & 0x01200000) {
7502 /* Load/store doubleword. */
7503 if (rn == 15) {
7504 addr = new_tmp();
7505 tcg_gen_movi_i32(addr, s->pc & ~3);
7506 } else {
7507 addr = load_reg(s, rn);
7508 }
7509 offset = (insn & 0xff) * 4;
7510 if ((insn & (1 << 23)) == 0)
7511 offset = -offset;
7512 if (insn & (1 << 24)) {
7513 tcg_gen_addi_i32(addr, addr, offset);
7514 offset = 0;
7515 }
7516 if (insn & (1 << 20)) {
7517 /* ldrd */
7518 tmp = gen_ld32(addr, IS_USER(s));
7519 store_reg(s, rs, tmp);
7520 tcg_gen_addi_i32(addr, addr, 4);
7521 tmp = gen_ld32(addr, IS_USER(s));
7522 store_reg(s, rd, tmp);
7523 } else {
7524 /* strd */
7525 tmp = load_reg(s, rs);
7526 gen_st32(tmp, addr, IS_USER(s));
7527 tcg_gen_addi_i32(addr, addr, 4);
7528 tmp = load_reg(s, rd);
7529 gen_st32(tmp, addr, IS_USER(s));
7530 }
7531 if (insn & (1 << 21)) {
7532 /* Base writeback. */
7533 if (rn == 15)
7534 goto illegal_op;
7535 tcg_gen_addi_i32(addr, addr, offset - 4);
7536 store_reg(s, rn, addr);
7537 } else {
7538 dead_tmp(addr);
7539 }
7540 } else if ((insn & (1 << 23)) == 0) {
7541 /* Load/store exclusive word. */
7542 addr = tcg_temp_local_new();
7543 load_reg_var(s, addr, rn);
7544 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7545 if (insn & (1 << 20)) {
7546 gen_load_exclusive(s, rs, 15, addr, 2);
7547 } else {
7548 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7549 }
7550 tcg_temp_free(addr);
7551 } else if ((insn & (1 << 6)) == 0) {
7552 /* Table Branch. */
7553 if (rn == 15) {
7554 addr = new_tmp();
7555 tcg_gen_movi_i32(addr, s->pc);
7556 } else {
7557 addr = load_reg(s, rn);
7558 }
7559 tmp = load_reg(s, rm);
7560 tcg_gen_add_i32(addr, addr, tmp);
7561 if (insn & (1 << 4)) {
7562 /* tbh */
7563 tcg_gen_add_i32(addr, addr, tmp);
7564 dead_tmp(tmp);
7565 tmp = gen_ld16u(addr, IS_USER(s));
7566 } else { /* tbb */
7567 dead_tmp(tmp);
7568 tmp = gen_ld8u(addr, IS_USER(s));
7569 }
7570 dead_tmp(addr);
7571 tcg_gen_shli_i32(tmp, tmp, 1);
7572 tcg_gen_addi_i32(tmp, tmp, s->pc);
7573 store_reg(s, 15, tmp);
7574 } else {
7575 /* Load/store exclusive byte/halfword/doubleword. */
7576 ARCH(7);
7577 op = (insn >> 4) & 0x3;
7578 if (op == 2) {
7579 goto illegal_op;
7580 }
7581 addr = tcg_temp_local_new();
7582 load_reg_var(s, addr, rn);
7583 if (insn & (1 << 20)) {
7584 gen_load_exclusive(s, rs, rd, addr, op);
7585 } else {
7586 gen_store_exclusive(s, rm, rs, rd, addr, op);
7587 }
7588 tcg_temp_free(addr);
7589 }
7590 } else {
7591 /* Load/store multiple, RFE, SRS. */
7592 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7593 /* Not available in user mode. */
7594 if (IS_USER(s))
7595 goto illegal_op;
7596 if (insn & (1 << 20)) {
7597 /* rfe */
7598 addr = load_reg(s, rn);
7599 if ((insn & (1 << 24)) == 0)
7600 tcg_gen_addi_i32(addr, addr, -8);
7601 /* Load PC into tmp and CPSR into tmp2. */
7602 tmp = gen_ld32(addr, 0);
7603 tcg_gen_addi_i32(addr, addr, 4);
7604 tmp2 = gen_ld32(addr, 0);
7605 if (insn & (1 << 21)) {
7606 /* Base writeback. */
7607 if (insn & (1 << 24)) {
7608 tcg_gen_addi_i32(addr, addr, 4);
7609 } else {
7610 tcg_gen_addi_i32(addr, addr, -4);
7611 }
7612 store_reg(s, rn, addr);
7613 } else {
7614 dead_tmp(addr);
7615 }
7616 gen_rfe(s, tmp, tmp2);
7617 } else {
7618 /* srs */
7619 op = (insn & 0x1f);
7620 addr = new_tmp();
7621 tmp = tcg_const_i32(op);
7622 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7623 tcg_temp_free_i32(tmp);
7624 if ((insn & (1 << 24)) == 0) {
7625 tcg_gen_addi_i32(addr, addr, -8);
7626 }
7627 tmp = load_reg(s, 14);
7628 gen_st32(tmp, addr, 0);
7629 tcg_gen_addi_i32(addr, addr, 4);
7630 tmp = new_tmp();
7631 gen_helper_cpsr_read(tmp);
7632 gen_st32(tmp, addr, 0);
7633 if (insn & (1 << 21)) {
7634 if ((insn & (1 << 24)) == 0) {
7635 tcg_gen_addi_i32(addr, addr, -4);
7636 } else {
7637 tcg_gen_addi_i32(addr, addr, 4);
7638 }
7639 tmp = tcg_const_i32(op);
7640 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7641 tcg_temp_free_i32(tmp);
7642 } else {
7643 dead_tmp(addr);
7644 }
7645 }
7646 } else {
7647 int i;
7648 /* Load/store multiple. */
7649 addr = load_reg(s, rn);
7650 offset = 0;
7651 for (i = 0; i < 16; i++) {
7652 if (insn & (1 << i))
7653 offset += 4;
7654 }
7655 if (insn & (1 << 24)) {
7656 tcg_gen_addi_i32(addr, addr, -offset);
7657 }
7658
7659 for (i = 0; i < 16; i++) {
7660 if ((insn & (1 << i)) == 0)
7661 continue;
7662 if (insn & (1 << 20)) {
7663 /* Load. */
7664 tmp = gen_ld32(addr, IS_USER(s));
7665 if (i == 15) {
7666 gen_bx(s, tmp);
7667 } else {
7668 store_reg(s, i, tmp);
7669 }
7670 } else {
7671 /* Store. */
7672 tmp = load_reg(s, i);
7673 gen_st32(tmp, addr, IS_USER(s));
7674 }
7675 tcg_gen_addi_i32(addr, addr, 4);
7676 }
7677 if (insn & (1 << 21)) {
7678 /* Base register writeback. */
7679 if (insn & (1 << 24)) {
7680 tcg_gen_addi_i32(addr, addr, -offset);
7681 }
7682 /* Fault if writeback register is in register list. */
7683 if (insn & (1 << rn))
7684 goto illegal_op;
7685 store_reg(s, rn, addr);
7686 } else {
7687 dead_tmp(addr);
7688 }
7689 }
7690 }
7691 break;
7692 case 5:
7693
7694 op = (insn >> 21) & 0xf;
7695 if (op == 6) {
7696 /* Halfword pack. */
7697 tmp = load_reg(s, rn);
7698 tmp2 = load_reg(s, rm);
7699 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7700 if (insn & (1 << 5)) {
7701 /* pkhtb */
7702 if (shift == 0)
7703 shift = 31;
7704 tcg_gen_sari_i32(tmp2, tmp2, shift);
7705 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7706 tcg_gen_ext16u_i32(tmp2, tmp2);
7707 } else {
7708 /* pkhbt */
7709 if (shift)
7710 tcg_gen_shli_i32(tmp2, tmp2, shift);
7711 tcg_gen_ext16u_i32(tmp, tmp);
7712 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7713 }
7714 tcg_gen_or_i32(tmp, tmp, tmp2);
7715 dead_tmp(tmp2);
7716 store_reg(s, rd, tmp);
7717 } else {
7718 /* Data processing register constant shift. */
7719 if (rn == 15) {
7720 tmp = new_tmp();
7721 tcg_gen_movi_i32(tmp, 0);
7722 } else {
7723 tmp = load_reg(s, rn);
7724 }
7725 tmp2 = load_reg(s, rm);
7726
7727 shiftop = (insn >> 4) & 3;
7728 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7729 conds = (insn & (1 << 20)) != 0;
7730 logic_cc = (conds && thumb2_logic_op(op));
7731 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7732 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7733 goto illegal_op;
7734 dead_tmp(tmp2);
7735 if (rd != 15) {
7736 store_reg(s, rd, tmp);
7737 } else {
7738 dead_tmp(tmp);
7739 }
7740 }
7741 break;
7742 case 13: /* Misc data processing. */
7743 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7744 if (op < 4 && (insn & 0xf000) != 0xf000)
7745 goto illegal_op;
7746 switch (op) {
7747 case 0: /* Register controlled shift. */
7748 tmp = load_reg(s, rn);
7749 tmp2 = load_reg(s, rm);
7750 if ((insn & 0x70) != 0)
7751 goto illegal_op;
7752 op = (insn >> 21) & 3;
7753 logic_cc = (insn & (1 << 20)) != 0;
7754 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7755 if (logic_cc)
7756 gen_logic_CC(tmp);
7757 store_reg_bx(env, s, rd, tmp);
7758 break;
7759 case 1: /* Sign/zero extend. */
7760 tmp = load_reg(s, rm);
7761 shift = (insn >> 4) & 3;
7762 /* ??? In many cases it's not neccessary to do a
7763 rotate, a shift is sufficient. */
7764 if (shift != 0)
7765 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7766 op = (insn >> 20) & 7;
7767 switch (op) {
7768 case 0: gen_sxth(tmp); break;
7769 case 1: gen_uxth(tmp); break;
7770 case 2: gen_sxtb16(tmp); break;
7771 case 3: gen_uxtb16(tmp); break;
7772 case 4: gen_sxtb(tmp); break;
7773 case 5: gen_uxtb(tmp); break;
7774 default: goto illegal_op;
7775 }
7776 if (rn != 15) {
7777 tmp2 = load_reg(s, rn);
7778 if ((op >> 1) == 1) {
7779 gen_add16(tmp, tmp2);
7780 } else {
7781 tcg_gen_add_i32(tmp, tmp, tmp2);
7782 dead_tmp(tmp2);
7783 }
7784 }
7785 store_reg(s, rd, tmp);
7786 break;
7787 case 2: /* SIMD add/subtract. */
7788 op = (insn >> 20) & 7;
7789 shift = (insn >> 4) & 7;
7790 if ((op & 3) == 3 || (shift & 3) == 3)
7791 goto illegal_op;
7792 tmp = load_reg(s, rn);
7793 tmp2 = load_reg(s, rm);
7794 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7795 dead_tmp(tmp2);
7796 store_reg(s, rd, tmp);
7797 break;
7798 case 3: /* Other data processing. */
7799 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7800 if (op < 4) {
7801 /* Saturating add/subtract. */
7802 tmp = load_reg(s, rn);
7803 tmp2 = load_reg(s, rm);
7804 if (op & 1)
7805 gen_helper_double_saturate(tmp, tmp);
7806 if (op & 2)
7807 gen_helper_sub_saturate(tmp, tmp2, tmp);
7808 else
7809 gen_helper_add_saturate(tmp, tmp, tmp2);
7810 dead_tmp(tmp2);
7811 } else {
7812 tmp = load_reg(s, rn);
7813 switch (op) {
7814 case 0x0a: /* rbit */
7815 gen_helper_rbit(tmp, tmp);
7816 break;
7817 case 0x08: /* rev */
7818 tcg_gen_bswap32_i32(tmp, tmp);
7819 break;
7820 case 0x09: /* rev16 */
7821 gen_rev16(tmp);
7822 break;
7823 case 0x0b: /* revsh */
7824 gen_revsh(tmp);
7825 break;
7826 case 0x10: /* sel */
7827 tmp2 = load_reg(s, rm);
7828 tmp3 = new_tmp();
7829 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7830 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7831 dead_tmp(tmp3);
7832 dead_tmp(tmp2);
7833 break;
7834 case 0x18: /* clz */
7835 gen_helper_clz(tmp, tmp);
7836 break;
7837 default:
7838 goto illegal_op;
7839 }
7840 }
7841 store_reg(s, rd, tmp);
7842 break;
7843 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7844 op = (insn >> 4) & 0xf;
7845 tmp = load_reg(s, rn);
7846 tmp2 = load_reg(s, rm);
7847 switch ((insn >> 20) & 7) {
7848 case 0: /* 32 x 32 -> 32 */
7849 tcg_gen_mul_i32(tmp, tmp, tmp2);
7850 dead_tmp(tmp2);
7851 if (rs != 15) {
7852 tmp2 = load_reg(s, rs);
7853 if (op)
7854 tcg_gen_sub_i32(tmp, tmp2, tmp);
7855 else
7856 tcg_gen_add_i32(tmp, tmp, tmp2);
7857 dead_tmp(tmp2);
7858 }
7859 break;
7860 case 1: /* 16 x 16 -> 32 */
7861 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7862 dead_tmp(tmp2);
7863 if (rs != 15) {
7864 tmp2 = load_reg(s, rs);
7865 gen_helper_add_setq(tmp, tmp, tmp2);
7866 dead_tmp(tmp2);
7867 }
7868 break;
7869 case 2: /* Dual multiply add. */
7870 case 4: /* Dual multiply subtract. */
7871 if (op)
7872 gen_swap_half(tmp2);
7873 gen_smul_dual(tmp, tmp2);
7874 /* This addition cannot overflow. */
7875 if (insn & (1 << 22)) {
7876 tcg_gen_sub_i32(tmp, tmp, tmp2);
7877 } else {
7878 tcg_gen_add_i32(tmp, tmp, tmp2);
7879 }
7880 dead_tmp(tmp2);
7881 if (rs != 15)
7882 {
7883 tmp2 = load_reg(s, rs);
7884 gen_helper_add_setq(tmp, tmp, tmp2);
7885 dead_tmp(tmp2);
7886 }
7887 break;
7888 case 3: /* 32 * 16 -> 32msb */
7889 if (op)
7890 tcg_gen_sari_i32(tmp2, tmp2, 16);
7891 else
7892 gen_sxth(tmp2);
7893 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7894 tcg_gen_shri_i64(tmp64, tmp64, 16);
7895 tmp = new_tmp();
7896 tcg_gen_trunc_i64_i32(tmp, tmp64);
7897 tcg_temp_free_i64(tmp64);
7898 if (rs != 15)
7899 {
7900 tmp2 = load_reg(s, rs);
7901 gen_helper_add_setq(tmp, tmp, tmp2);
7902 dead_tmp(tmp2);
7903 }
7904 break;
7905 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7906 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7907 if (rs != 15) {
7908 tmp = load_reg(s, rs);
7909 if (insn & (1 << 20)) {
7910 tmp64 = gen_addq_msw(tmp64, tmp);
7911 } else {
7912 tmp64 = gen_subq_msw(tmp64, tmp);
7913 }
7914 }
7915 if (insn & (1 << 4)) {
7916 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7917 }
7918 tcg_gen_shri_i64(tmp64, tmp64, 32);
7919 tmp = new_tmp();
7920 tcg_gen_trunc_i64_i32(tmp, tmp64);
7921 tcg_temp_free_i64(tmp64);
7922 break;
7923 case 7: /* Unsigned sum of absolute differences. */
7924 gen_helper_usad8(tmp, tmp, tmp2);
7925 dead_tmp(tmp2);
7926 if (rs != 15) {
7927 tmp2 = load_reg(s, rs);
7928 tcg_gen_add_i32(tmp, tmp, tmp2);
7929 dead_tmp(tmp2);
7930 }
7931 break;
7932 }
7933 store_reg(s, rd, tmp);
7934 break;
7935 case 6: case 7: /* 64-bit multiply, Divide. */
7936 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7937 tmp = load_reg(s, rn);
7938 tmp2 = load_reg(s, rm);
7939 if ((op & 0x50) == 0x10) {
7940 /* sdiv, udiv */
7941 if (!arm_feature(env, ARM_FEATURE_DIV))
7942 goto illegal_op;
7943 if (op & 0x20)
7944 gen_helper_udiv(tmp, tmp, tmp2);
7945 else
7946 gen_helper_sdiv(tmp, tmp, tmp2);
7947 dead_tmp(tmp2);
7948 store_reg(s, rd, tmp);
7949 } else if ((op & 0xe) == 0xc) {
7950 /* Dual multiply accumulate long. */
7951 if (op & 1)
7952 gen_swap_half(tmp2);
7953 gen_smul_dual(tmp, tmp2);
7954 if (op & 0x10) {
7955 tcg_gen_sub_i32(tmp, tmp, tmp2);
7956 } else {
7957 tcg_gen_add_i32(tmp, tmp, tmp2);
7958 }
7959 dead_tmp(tmp2);
7960 /* BUGFIX */
7961 tmp64 = tcg_temp_new_i64();
7962 tcg_gen_ext_i32_i64(tmp64, tmp);
7963 dead_tmp(tmp);
7964 gen_addq(s, tmp64, rs, rd);
7965 gen_storeq_reg(s, rs, rd, tmp64);
7966 tcg_temp_free_i64(tmp64);
7967 } else {
7968 if (op & 0x20) {
7969 /* Unsigned 64-bit multiply */
7970 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7971 } else {
7972 if (op & 8) {
7973 /* smlalxy */
7974 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7975 dead_tmp(tmp2);
7976 tmp64 = tcg_temp_new_i64();
7977 tcg_gen_ext_i32_i64(tmp64, tmp);
7978 dead_tmp(tmp);
7979 } else {
7980 /* Signed 64-bit multiply */
7981 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7982 }
7983 }
7984 if (op & 4) {
7985 /* umaal */
7986 gen_addq_lo(s, tmp64, rs);
7987 gen_addq_lo(s, tmp64, rd);
7988 } else if (op & 0x40) {
7989 /* 64-bit accumulate. */
7990 gen_addq(s, tmp64, rs, rd);
7991 }
7992 gen_storeq_reg(s, rs, rd, tmp64);
7993 tcg_temp_free_i64(tmp64);
7994 }
7995 break;
7996 }
7997 break;
7998 case 6: case 7: case 14: case 15:
7999 /* Coprocessor. */
8000 if (((insn >> 24) & 3) == 3) {
8001 /* Translate into the equivalent ARM encoding. */
8002 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
8003 if (disas_neon_data_insn(env, s, insn))
8004 goto illegal_op;
8005 } else {
8006 if (insn & (1 << 28))
8007 goto illegal_op;
8008 if (disas_coproc_insn (env, s, insn))
8009 goto illegal_op;
8010 }
8011 break;
8012 case 8: case 9: case 10: case 11:
8013 if (insn & (1 << 15)) {
8014 /* Branches, misc control. */
8015 if (insn & 0x5000) {
8016 /* Unconditional branch. */
8017 /* signextend(hw1[10:0]) -> offset[:12]. */
8018 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8019 /* hw1[10:0] -> offset[11:1]. */
8020 offset |= (insn & 0x7ff) << 1;
8021 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8022 offset[24:22] already have the same value because of the
8023 sign extension above. */
8024 offset ^= ((~insn) & (1 << 13)) << 10;
8025 offset ^= ((~insn) & (1 << 11)) << 11;
8026
8027 if (insn & (1 << 14)) {
8028 /* Branch and link. */
8029 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8030 }
8031
8032 offset += s->pc;
8033 if (insn & (1 << 12)) {
8034 /* b/bl */
8035 gen_jmp(s, offset);
8036 } else {
8037 /* blx */
8038 offset &= ~(uint32_t)2;
8039 gen_bx_im(s, offset);
8040 }
8041 } else if (((insn >> 23) & 7) == 7) {
8042 /* Misc control */
8043 if (insn & (1 << 13))
8044 goto illegal_op;
8045
8046 if (insn & (1 << 26)) {
8047 /* Secure monitor call (v6Z) */
8048 goto illegal_op; /* not implemented. */
8049 } else {
8050 op = (insn >> 20) & 7;
8051 switch (op) {
8052 case 0: /* msr cpsr. */
8053 if (IS_M(env)) {
8054 tmp = load_reg(s, rn);
8055 addr = tcg_const_i32(insn & 0xff);
8056 gen_helper_v7m_msr(cpu_env, addr, tmp);
8057 tcg_temp_free_i32(addr);
8058 dead_tmp(tmp);
8059 gen_lookup_tb(s);
8060 break;
8061 }
8062 /* fall through */
8063 case 1: /* msr spsr. */
8064 if (IS_M(env))
8065 goto illegal_op;
8066 tmp = load_reg(s, rn);
8067 if (gen_set_psr(s,
8068 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8069 op == 1, tmp))
8070 goto illegal_op;
8071 break;
8072 case 2: /* cps, nop-hint. */
8073 if (((insn >> 8) & 7) == 0) {
8074 gen_nop_hint(s, insn & 0xff);
8075 }
8076 /* Implemented as NOP in user mode. */
8077 if (IS_USER(s))
8078 break;
8079 offset = 0;
8080 imm = 0;
8081 if (insn & (1 << 10)) {
8082 if (insn & (1 << 7))
8083 offset |= CPSR_A;
8084 if (insn & (1 << 6))
8085 offset |= CPSR_I;
8086 if (insn & (1 << 5))
8087 offset |= CPSR_F;
8088 if (insn & (1 << 9))
8089 imm = CPSR_A | CPSR_I | CPSR_F;
8090 }
8091 if (insn & (1 << 8)) {
8092 offset |= 0x1f;
8093 imm |= (insn & 0x1f);
8094 }
8095 if (offset) {
8096 gen_set_psr_im(s, offset, 0, imm);
8097 }
8098 break;
8099 case 3: /* Special control operations. */
8100 ARCH(7);
8101 op = (insn >> 4) & 0xf;
8102 switch (op) {
8103 case 2: /* clrex */
8104 gen_clrex(s);
8105 break;
8106 case 4: /* dsb */
8107 case 5: /* dmb */
8108 case 6: /* isb */
8109 /* These execute as NOPs. */
8110 break;
8111 default:
8112 goto illegal_op;
8113 }
8114 break;
8115 case 4: /* bxj */
8116 /* Trivial implementation equivalent to bx. */
8117 tmp = load_reg(s, rn);
8118 gen_bx(s, tmp);
8119 break;
8120 case 5: /* Exception return. */
8121 if (IS_USER(s)) {
8122 goto illegal_op;
8123 }
8124 if (rn != 14 || rd != 15) {
8125 goto illegal_op;
8126 }
8127 tmp = load_reg(s, rn);
8128 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8129 gen_exception_return(s, tmp);
8130 break;
8131 case 6: /* mrs cpsr. */
8132 tmp = new_tmp();
8133 if (IS_M(env)) {
8134 addr = tcg_const_i32(insn & 0xff);
8135 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8136 tcg_temp_free_i32(addr);
8137 } else {
8138 gen_helper_cpsr_read(tmp);
8139 }
8140 store_reg(s, rd, tmp);
8141 break;
8142 case 7: /* mrs spsr. */
8143 /* Not accessible in user mode. */
8144 if (IS_USER(s) || IS_M(env))
8145 goto illegal_op;
8146 tmp = load_cpu_field(spsr);
8147 store_reg(s, rd, tmp);
8148 break;
8149 }
8150 }
8151 } else {
8152 /* Conditional branch. */
8153 op = (insn >> 22) & 0xf;
8154 /* Generate a conditional jump to next instruction. */
8155 s->condlabel = gen_new_label();
8156 gen_test_cc(op ^ 1, s->condlabel);
8157 s->condjmp = 1;
8158
8159 /* offset[11:1] = insn[10:0] */
8160 offset = (insn & 0x7ff) << 1;
8161 /* offset[17:12] = insn[21:16]. */
8162 offset |= (insn & 0x003f0000) >> 4;
8163 /* offset[31:20] = insn[26]. */
8164 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8165 /* offset[18] = insn[13]. */
8166 offset |= (insn & (1 << 13)) << 5;
8167 /* offset[19] = insn[11]. */
8168 offset |= (insn & (1 << 11)) << 8;
8169
8170 /* jump to the offset */
8171 gen_jmp(s, s->pc + offset);
8172 }
8173 } else {
8174 /* Data processing immediate. */
8175 if (insn & (1 << 25)) {
8176 if (insn & (1 << 24)) {
8177 if (insn & (1 << 20))
8178 goto illegal_op;
8179 /* Bitfield/Saturate. */
8180 op = (insn >> 21) & 7;
8181 imm = insn & 0x1f;
8182 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8183 if (rn == 15) {
8184 tmp = new_tmp();
8185 tcg_gen_movi_i32(tmp, 0);
8186 } else {
8187 tmp = load_reg(s, rn);
8188 }
8189 switch (op) {
8190 case 2: /* Signed bitfield extract. */
8191 imm++;
8192 if (shift + imm > 32)
8193 goto illegal_op;
8194 if (imm < 32)
8195 gen_sbfx(tmp, shift, imm);
8196 break;
8197 case 6: /* Unsigned bitfield extract. */
8198 imm++;
8199 if (shift + imm > 32)
8200 goto illegal_op;
8201 if (imm < 32)
8202 gen_ubfx(tmp, shift, (1u << imm) - 1);
8203 break;
8204 case 3: /* Bitfield insert/clear. */
8205 if (imm < shift)
8206 goto illegal_op;
8207 imm = imm + 1 - shift;
8208 if (imm != 32) {
8209 tmp2 = load_reg(s, rd);
8210 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8211 dead_tmp(tmp2);
8212 }
8213 break;
8214 case 7:
8215 goto illegal_op;
8216 default: /* Saturate. */
8217 if (shift) {
8218 if (op & 1)
8219 tcg_gen_sari_i32(tmp, tmp, shift);
8220 else
8221 tcg_gen_shli_i32(tmp, tmp, shift);
8222 }
8223 tmp2 = tcg_const_i32(imm);
8224 if (op & 4) {
8225 /* Unsigned. */
8226 if ((op & 1) && shift == 0)
8227 gen_helper_usat16(tmp, tmp, tmp2);
8228 else
8229 gen_helper_usat(tmp, tmp, tmp2);
8230 } else {
8231 /* Signed. */
8232 if ((op & 1) && shift == 0)
8233 gen_helper_ssat16(tmp, tmp, tmp2);
8234 else
8235 gen_helper_ssat(tmp, tmp, tmp2);
8236 }
8237 tcg_temp_free_i32(tmp2);
8238 break;
8239 }
8240 store_reg(s, rd, tmp);
8241 } else {
8242 imm = ((insn & 0x04000000) >> 15)
8243 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8244 if (insn & (1 << 22)) {
8245 /* 16-bit immediate. */
8246 imm |= (insn >> 4) & 0xf000;
8247 if (insn & (1 << 23)) {
8248 /* movt */
8249 tmp = load_reg(s, rd);
8250 tcg_gen_ext16u_i32(tmp, tmp);
8251 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8252 } else {
8253 /* movw */
8254 tmp = new_tmp();
8255 tcg_gen_movi_i32(tmp, imm);
8256 }
8257 } else {
8258 /* Add/sub 12-bit immediate. */
8259 if (rn == 15) {
8260 offset = s->pc & ~(uint32_t)3;
8261 if (insn & (1 << 23))
8262 offset -= imm;
8263 else
8264 offset += imm;
8265 tmp = new_tmp();
8266 tcg_gen_movi_i32(tmp, offset);
8267 } else {
8268 tmp = load_reg(s, rn);
8269 if (insn & (1 << 23))
8270 tcg_gen_subi_i32(tmp, tmp, imm);
8271 else
8272 tcg_gen_addi_i32(tmp, tmp, imm);
8273 }
8274 }
8275 store_reg(s, rd, tmp);
8276 }
8277 } else {
8278 int shifter_out = 0;
8279 /* modified 12-bit immediate. */
8280 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8281 imm = (insn & 0xff);
8282 switch (shift) {
8283 case 0: /* XY */
8284 /* Nothing to do. */
8285 break;
8286 case 1: /* 00XY00XY */
8287 imm |= imm << 16;
8288 break;
8289 case 2: /* XY00XY00 */
8290 imm |= imm << 16;
8291 imm <<= 8;
8292 break;
8293 case 3: /* XYXYXYXY */
8294 imm |= imm << 16;
8295 imm |= imm << 8;
8296 break;
8297 default: /* Rotated constant. */
8298 shift = (shift << 1) | (imm >> 7);
8299 imm |= 0x80;
8300 imm = imm << (32 - shift);
8301 shifter_out = 1;
8302 break;
8303 }
8304 tmp2 = new_tmp();
8305 tcg_gen_movi_i32(tmp2, imm);
8306 rn = (insn >> 16) & 0xf;
8307 if (rn == 15) {
8308 tmp = new_tmp();
8309 tcg_gen_movi_i32(tmp, 0);
8310 } else {
8311 tmp = load_reg(s, rn);
8312 }
8313 op = (insn >> 21) & 0xf;
8314 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8315 shifter_out, tmp, tmp2))
8316 goto illegal_op;
8317 dead_tmp(tmp2);
8318 rd = (insn >> 8) & 0xf;
8319 if (rd != 15) {
8320 store_reg(s, rd, tmp);
8321 } else {
8322 dead_tmp(tmp);
8323 }
8324 }
8325 }
8326 break;
8327 case 12: /* Load/store single data item. */
8328 {
8329 int postinc = 0;
8330 int writeback = 0;
8331 int user;
8332 if ((insn & 0x01100000) == 0x01000000) {
8333 if (disas_neon_ls_insn(env, s, insn))
8334 goto illegal_op;
8335 break;
8336 }
8337 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8338 if (rs == 15) {
8339 if (!(insn & (1 << 20))) {
8340 goto illegal_op;
8341 }
8342 if (op != 2) {
8343 /* Byte or halfword load space with dest == r15 : memory hints.
8344 * Catch them early so we don't emit pointless addressing code.
8345 * This space is a mix of:
8346 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8347 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8348 * cores)
8349 * unallocated hints, which must be treated as NOPs
8350 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8351 * which is easiest for the decoding logic
8352 * Some space which must UNDEF
8353 */
8354 int op1 = (insn >> 23) & 3;
8355 int op2 = (insn >> 6) & 0x3f;
8356 if (op & 2) {
8357 goto illegal_op;
8358 }
8359 if (rn == 15) {
8360 /* UNPREDICTABLE or unallocated hint */
8361 return 0;
8362 }
8363 if (op1 & 1) {
8364 return 0; /* PLD* or unallocated hint */
8365 }
8366 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8367 return 0; /* PLD* or unallocated hint */
8368 }
8369 /* UNDEF space, or an UNPREDICTABLE */
8370 return 1;
8371 }
8372 }
8373 user = IS_USER(s);
8374 if (rn == 15) {
8375 addr = new_tmp();
8376 /* PC relative. */
8377 /* s->pc has already been incremented by 4. */
8378 imm = s->pc & 0xfffffffc;
8379 if (insn & (1 << 23))
8380 imm += insn & 0xfff;
8381 else
8382 imm -= insn & 0xfff;
8383 tcg_gen_movi_i32(addr, imm);
8384 } else {
8385 addr = load_reg(s, rn);
8386 if (insn & (1 << 23)) {
8387 /* Positive offset. */
8388 imm = insn & 0xfff;
8389 tcg_gen_addi_i32(addr, addr, imm);
8390 } else {
8391 imm = insn & 0xff;
8392 switch ((insn >> 8) & 7) {
8393 case 0: case 8: /* Shifted Register. */
8394 shift = (insn >> 4) & 0xf;
8395 if (shift > 3)
8396 goto illegal_op;
8397 tmp = load_reg(s, rm);
8398 if (shift)
8399 tcg_gen_shli_i32(tmp, tmp, shift);
8400 tcg_gen_add_i32(addr, addr, tmp);
8401 dead_tmp(tmp);
8402 break;
8403 case 4: /* Negative offset. */
8404 tcg_gen_addi_i32(addr, addr, -imm);
8405 break;
8406 case 6: /* User privilege. */
8407 tcg_gen_addi_i32(addr, addr, imm);
8408 user = 1;
8409 break;
8410 case 1: /* Post-decrement. */
8411 imm = -imm;
8412 /* Fall through. */
8413 case 3: /* Post-increment. */
8414 postinc = 1;
8415 writeback = 1;
8416 break;
8417 case 5: /* Pre-decrement. */
8418 imm = -imm;
8419 /* Fall through. */
8420 case 7: /* Pre-increment. */
8421 tcg_gen_addi_i32(addr, addr, imm);
8422 writeback = 1;
8423 break;
8424 default:
8425 goto illegal_op;
8426 }
8427 }
8428 }
8429 if (insn & (1 << 20)) {
8430 /* Load. */
8431 switch (op) {
8432 case 0: tmp = gen_ld8u(addr, user); break;
8433 case 4: tmp = gen_ld8s(addr, user); break;
8434 case 1: tmp = gen_ld16u(addr, user); break;
8435 case 5: tmp = gen_ld16s(addr, user); break;
8436 case 2: tmp = gen_ld32(addr, user); break;
8437 default: goto illegal_op;
8438 }
8439 if (rs == 15) {
8440 gen_bx(s, tmp);
8441 } else {
8442 store_reg(s, rs, tmp);
8443 }
8444 } else {
8445 /* Store. */
8446 tmp = load_reg(s, rs);
8447 switch (op) {
8448 case 0: gen_st8(tmp, addr, user); break;
8449 case 1: gen_st16(tmp, addr, user); break;
8450 case 2: gen_st32(tmp, addr, user); break;
8451 default: goto illegal_op;
8452 }
8453 }
8454 if (postinc)
8455 tcg_gen_addi_i32(addr, addr, imm);
8456 if (writeback) {
8457 store_reg(s, rn, addr);
8458 } else {
8459 dead_tmp(addr);
8460 }
8461 }
8462 break;
8463 default:
8464 goto illegal_op;
8465 }
8466 return 0;
8467 illegal_op:
8468 return 1;
8469 }
8470
8471 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8472 {
8473 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8474 int32_t offset;
8475 int i;
8476 TCGv tmp;
8477 TCGv tmp2;
8478 TCGv addr;
8479
8480 if (s->condexec_mask) {
8481 cond = s->condexec_cond;
8482 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8483 s->condlabel = gen_new_label();
8484 gen_test_cc(cond ^ 1, s->condlabel);
8485 s->condjmp = 1;
8486 }
8487 }
8488
8489 insn = lduw_code(s->pc);
8490 s->pc += 2;
8491
8492 switch (insn >> 12) {
8493 case 0: case 1:
8494
8495 rd = insn & 7;
8496 op = (insn >> 11) & 3;
8497 if (op == 3) {
8498 /* add/subtract */
8499 rn = (insn >> 3) & 7;
8500 tmp = load_reg(s, rn);
8501 if (insn & (1 << 10)) {
8502 /* immediate */
8503 tmp2 = new_tmp();
8504 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8505 } else {
8506 /* reg */
8507 rm = (insn >> 6) & 7;
8508 tmp2 = load_reg(s, rm);
8509 }
8510 if (insn & (1 << 9)) {
8511 if (s->condexec_mask)
8512 tcg_gen_sub_i32(tmp, tmp, tmp2);
8513 else
8514 gen_helper_sub_cc(tmp, tmp, tmp2);
8515 } else {
8516 if (s->condexec_mask)
8517 tcg_gen_add_i32(tmp, tmp, tmp2);
8518 else
8519 gen_helper_add_cc(tmp, tmp, tmp2);
8520 }
8521 dead_tmp(tmp2);
8522 store_reg(s, rd, tmp);
8523 } else {
8524 /* shift immediate */
8525 rm = (insn >> 3) & 7;
8526 shift = (insn >> 6) & 0x1f;
8527 tmp = load_reg(s, rm);
8528 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8529 if (!s->condexec_mask)
8530 gen_logic_CC(tmp);
8531 store_reg(s, rd, tmp);
8532 }
8533 break;
8534 case 2: case 3:
8535 /* arithmetic large immediate */
8536 op = (insn >> 11) & 3;
8537 rd = (insn >> 8) & 0x7;
8538 if (op == 0) { /* mov */
8539 tmp = new_tmp();
8540 tcg_gen_movi_i32(tmp, insn & 0xff);
8541 if (!s->condexec_mask)
8542 gen_logic_CC(tmp);
8543 store_reg(s, rd, tmp);
8544 } else {
8545 tmp = load_reg(s, rd);
8546 tmp2 = new_tmp();
8547 tcg_gen_movi_i32(tmp2, insn & 0xff);
8548 switch (op) {
8549 case 1: /* cmp */
8550 gen_helper_sub_cc(tmp, tmp, tmp2);
8551 dead_tmp(tmp);
8552 dead_tmp(tmp2);
8553 break;
8554 case 2: /* add */
8555 if (s->condexec_mask)
8556 tcg_gen_add_i32(tmp, tmp, tmp2);
8557 else
8558 gen_helper_add_cc(tmp, tmp, tmp2);
8559 dead_tmp(tmp2);
8560 store_reg(s, rd, tmp);
8561 break;
8562 case 3: /* sub */
8563 if (s->condexec_mask)
8564 tcg_gen_sub_i32(tmp, tmp, tmp2);
8565 else
8566 gen_helper_sub_cc(tmp, tmp, tmp2);
8567 dead_tmp(tmp2);
8568 store_reg(s, rd, tmp);
8569 break;
8570 }
8571 }
8572 break;
8573 case 4:
8574 if (insn & (1 << 11)) {
8575 rd = (insn >> 8) & 7;
8576 /* load pc-relative. Bit 1 of PC is ignored. */
8577 val = s->pc + 2 + ((insn & 0xff) * 4);
8578 val &= ~(uint32_t)2;
8579 addr = new_tmp();
8580 tcg_gen_movi_i32(addr, val);
8581 tmp = gen_ld32(addr, IS_USER(s));
8582 dead_tmp(addr);
8583 store_reg(s, rd, tmp);
8584 break;
8585 }
8586 if (insn & (1 << 10)) {
8587 /* data processing extended or blx */
8588 rd = (insn & 7) | ((insn >> 4) & 8);
8589 rm = (insn >> 3) & 0xf;
8590 op = (insn >> 8) & 3;
8591 switch (op) {
8592 case 0: /* add */
8593 tmp = load_reg(s, rd);
8594 tmp2 = load_reg(s, rm);
8595 tcg_gen_add_i32(tmp, tmp, tmp2);
8596 dead_tmp(tmp2);
8597 store_reg(s, rd, tmp);
8598 break;
8599 case 1: /* cmp */
8600 tmp = load_reg(s, rd);
8601 tmp2 = load_reg(s, rm);
8602 gen_helper_sub_cc(tmp, tmp, tmp2);
8603 dead_tmp(tmp2);
8604 dead_tmp(tmp);
8605 break;
8606 case 2: /* mov/cpy */
8607 tmp = load_reg(s, rm);
8608 store_reg(s, rd, tmp);
8609 break;
8610 case 3:/* branch [and link] exchange thumb register */
8611 tmp = load_reg(s, rm);
8612 if (insn & (1 << 7)) {
8613 val = (uint32_t)s->pc | 1;
8614 tmp2 = new_tmp();
8615 tcg_gen_movi_i32(tmp2, val);
8616 store_reg(s, 14, tmp2);
8617 }
8618 gen_bx(s, tmp);
8619 break;
8620 }
8621 break;
8622 }
8623
8624 /* data processing register */
8625 rd = insn & 7;
8626 rm = (insn >> 3) & 7;
8627 op = (insn >> 6) & 0xf;
8628 if (op == 2 || op == 3 || op == 4 || op == 7) {
8629 /* the shift/rotate ops want the operands backwards */
8630 val = rm;
8631 rm = rd;
8632 rd = val;
8633 val = 1;
8634 } else {
8635 val = 0;
8636 }
8637
8638 if (op == 9) { /* neg */
8639 tmp = new_tmp();
8640 tcg_gen_movi_i32(tmp, 0);
8641 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8642 tmp = load_reg(s, rd);
8643 } else {
8644 TCGV_UNUSED(tmp);
8645 }
8646
8647 tmp2 = load_reg(s, rm);
8648 switch (op) {
8649 case 0x0: /* and */
8650 tcg_gen_and_i32(tmp, tmp, tmp2);
8651 if (!s->condexec_mask)
8652 gen_logic_CC(tmp);
8653 break;
8654 case 0x1: /* eor */
8655 tcg_gen_xor_i32(tmp, tmp, tmp2);
8656 if (!s->condexec_mask)
8657 gen_logic_CC(tmp);
8658 break;
8659 case 0x2: /* lsl */
8660 if (s->condexec_mask) {
8661 gen_helper_shl(tmp2, tmp2, tmp);
8662 } else {
8663 gen_helper_shl_cc(tmp2, tmp2, tmp);
8664 gen_logic_CC(tmp2);
8665 }
8666 break;
8667 case 0x3: /* lsr */
8668 if (s->condexec_mask) {
8669 gen_helper_shr(tmp2, tmp2, tmp);
8670 } else {
8671 gen_helper_shr_cc(tmp2, tmp2, tmp);
8672 gen_logic_CC(tmp2);
8673 }
8674 break;
8675 case 0x4: /* asr */
8676 if (s->condexec_mask) {
8677 gen_helper_sar(tmp2, tmp2, tmp);
8678 } else {
8679 gen_helper_sar_cc(tmp2, tmp2, tmp);
8680 gen_logic_CC(tmp2);
8681 }
8682 break;
8683 case 0x5: /* adc */
8684 if (s->condexec_mask)
8685 gen_adc(tmp, tmp2);
8686 else
8687 gen_helper_adc_cc(tmp, tmp, tmp2);
8688 break;
8689 case 0x6: /* sbc */
8690 if (s->condexec_mask)
8691 gen_sub_carry(tmp, tmp, tmp2);
8692 else
8693 gen_helper_sbc_cc(tmp, tmp, tmp2);
8694 break;
8695 case 0x7: /* ror */
8696 if (s->condexec_mask) {
8697 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8698 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8699 } else {
8700 gen_helper_ror_cc(tmp2, tmp2, tmp);
8701 gen_logic_CC(tmp2);
8702 }
8703 break;
8704 case 0x8: /* tst */
8705 tcg_gen_and_i32(tmp, tmp, tmp2);
8706 gen_logic_CC(tmp);
8707 rd = 16;
8708 break;
8709 case 0x9: /* neg */
8710 if (s->condexec_mask)
8711 tcg_gen_neg_i32(tmp, tmp2);
8712 else
8713 gen_helper_sub_cc(tmp, tmp, tmp2);
8714 break;
8715 case 0xa: /* cmp */
8716 gen_helper_sub_cc(tmp, tmp, tmp2);
8717 rd = 16;
8718 break;
8719 case 0xb: /* cmn */
8720 gen_helper_add_cc(tmp, tmp, tmp2);
8721 rd = 16;
8722 break;
8723 case 0xc: /* orr */
8724 tcg_gen_or_i32(tmp, tmp, tmp2);
8725 if (!s->condexec_mask)
8726 gen_logic_CC(tmp);
8727 break;
8728 case 0xd: /* mul */
8729 tcg_gen_mul_i32(tmp, tmp, tmp2);
8730 if (!s->condexec_mask)
8731 gen_logic_CC(tmp);
8732 break;
8733 case 0xe: /* bic */
8734 tcg_gen_andc_i32(tmp, tmp, tmp2);
8735 if (!s->condexec_mask)
8736 gen_logic_CC(tmp);
8737 break;
8738 case 0xf: /* mvn */
8739 tcg_gen_not_i32(tmp2, tmp2);
8740 if (!s->condexec_mask)
8741 gen_logic_CC(tmp2);
8742 val = 1;
8743 rm = rd;
8744 break;
8745 }
8746 if (rd != 16) {
8747 if (val) {
8748 store_reg(s, rm, tmp2);
8749 if (op != 0xf)
8750 dead_tmp(tmp);
8751 } else {
8752 store_reg(s, rd, tmp);
8753 dead_tmp(tmp2);
8754 }
8755 } else {
8756 dead_tmp(tmp);
8757 dead_tmp(tmp2);
8758 }
8759 break;
8760
8761 case 5:
8762 /* load/store register offset. */
8763 rd = insn & 7;
8764 rn = (insn >> 3) & 7;
8765 rm = (insn >> 6) & 7;
8766 op = (insn >> 9) & 7;
8767 addr = load_reg(s, rn);
8768 tmp = load_reg(s, rm);
8769 tcg_gen_add_i32(addr, addr, tmp);
8770 dead_tmp(tmp);
8771
8772 if (op < 3) /* store */
8773 tmp = load_reg(s, rd);
8774
8775 switch (op) {
8776 case 0: /* str */
8777 gen_st32(tmp, addr, IS_USER(s));
8778 break;
8779 case 1: /* strh */
8780 gen_st16(tmp, addr, IS_USER(s));
8781 break;
8782 case 2: /* strb */
8783 gen_st8(tmp, addr, IS_USER(s));
8784 break;
8785 case 3: /* ldrsb */
8786 tmp = gen_ld8s(addr, IS_USER(s));
8787 break;
8788 case 4: /* ldr */
8789 tmp = gen_ld32(addr, IS_USER(s));
8790 break;
8791 case 5: /* ldrh */
8792 tmp = gen_ld16u(addr, IS_USER(s));
8793 break;
8794 case 6: /* ldrb */
8795 tmp = gen_ld8u(addr, IS_USER(s));
8796 break;
8797 case 7: /* ldrsh */
8798 tmp = gen_ld16s(addr, IS_USER(s));
8799 break;
8800 }
8801 if (op >= 3) /* load */
8802 store_reg(s, rd, tmp);
8803 dead_tmp(addr);
8804 break;
8805
8806 case 6:
8807 /* load/store word immediate offset */
8808 rd = insn & 7;
8809 rn = (insn >> 3) & 7;
8810 addr = load_reg(s, rn);
8811 val = (insn >> 4) & 0x7c;
8812 tcg_gen_addi_i32(addr, addr, val);
8813
8814 if (insn & (1 << 11)) {
8815 /* load */
8816 tmp = gen_ld32(addr, IS_USER(s));
8817 store_reg(s, rd, tmp);
8818 } else {
8819 /* store */
8820 tmp = load_reg(s, rd);
8821 gen_st32(tmp, addr, IS_USER(s));
8822 }
8823 dead_tmp(addr);
8824 break;
8825
8826 case 7:
8827 /* load/store byte immediate offset */
8828 rd = insn & 7;
8829 rn = (insn >> 3) & 7;
8830 addr = load_reg(s, rn);
8831 val = (insn >> 6) & 0x1f;
8832 tcg_gen_addi_i32(addr, addr, val);
8833
8834 if (insn & (1 << 11)) {
8835 /* load */
8836 tmp = gen_ld8u(addr, IS_USER(s));
8837 store_reg(s, rd, tmp);
8838 } else {
8839 /* store */
8840 tmp = load_reg(s, rd);
8841 gen_st8(tmp, addr, IS_USER(s));
8842 }
8843 dead_tmp(addr);
8844 break;
8845
8846 case 8:
8847 /* load/store halfword immediate offset */
8848 rd = insn & 7;
8849 rn = (insn >> 3) & 7;
8850 addr = load_reg(s, rn);
8851 val = (insn >> 5) & 0x3e;
8852 tcg_gen_addi_i32(addr, addr, val);
8853
8854 if (insn & (1 << 11)) {
8855 /* load */
8856 tmp = gen_ld16u(addr, IS_USER(s));
8857 store_reg(s, rd, tmp);
8858 } else {
8859 /* store */
8860 tmp = load_reg(s, rd);
8861 gen_st16(tmp, addr, IS_USER(s));
8862 }
8863 dead_tmp(addr);
8864 break;
8865
8866 case 9:
8867 /* load/store from stack */
8868 rd = (insn >> 8) & 7;
8869 addr = load_reg(s, 13);
8870 val = (insn & 0xff) * 4;
8871 tcg_gen_addi_i32(addr, addr, val);
8872
8873 if (insn & (1 << 11)) {
8874 /* load */
8875 tmp = gen_ld32(addr, IS_USER(s));
8876 store_reg(s, rd, tmp);
8877 } else {
8878 /* store */
8879 tmp = load_reg(s, rd);
8880 gen_st32(tmp, addr, IS_USER(s));
8881 }
8882 dead_tmp(addr);
8883 break;
8884
8885 case 10:
8886 /* add to high reg */
8887 rd = (insn >> 8) & 7;
8888 if (insn & (1 << 11)) {
8889 /* SP */
8890 tmp = load_reg(s, 13);
8891 } else {
8892 /* PC. bit 1 is ignored. */
8893 tmp = new_tmp();
8894 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8895 }
8896 val = (insn & 0xff) * 4;
8897 tcg_gen_addi_i32(tmp, tmp, val);
8898 store_reg(s, rd, tmp);
8899 break;
8900
8901 case 11:
8902 /* misc */
8903 op = (insn >> 8) & 0xf;
8904 switch (op) {
8905 case 0:
8906 /* adjust stack pointer */
8907 tmp = load_reg(s, 13);
8908 val = (insn & 0x7f) * 4;
8909 if (insn & (1 << 7))
8910 val = -(int32_t)val;
8911 tcg_gen_addi_i32(tmp, tmp, val);
8912 store_reg(s, 13, tmp);
8913 break;
8914
8915 case 2: /* sign/zero extend. */
8916 ARCH(6);
8917 rd = insn & 7;
8918 rm = (insn >> 3) & 7;
8919 tmp = load_reg(s, rm);
8920 switch ((insn >> 6) & 3) {
8921 case 0: gen_sxth(tmp); break;
8922 case 1: gen_sxtb(tmp); break;
8923 case 2: gen_uxth(tmp); break;
8924 case 3: gen_uxtb(tmp); break;
8925 }
8926 store_reg(s, rd, tmp);
8927 break;
8928 case 4: case 5: case 0xc: case 0xd:
8929 /* push/pop */
8930 addr = load_reg(s, 13);
8931 if (insn & (1 << 8))
8932 offset = 4;
8933 else
8934 offset = 0;
8935 for (i = 0; i < 8; i++) {
8936 if (insn & (1 << i))
8937 offset += 4;
8938 }
8939 if ((insn & (1 << 11)) == 0) {
8940 tcg_gen_addi_i32(addr, addr, -offset);
8941 }
8942 for (i = 0; i < 8; i++) {
8943 if (insn & (1 << i)) {
8944 if (insn & (1 << 11)) {
8945 /* pop */
8946 tmp = gen_ld32(addr, IS_USER(s));
8947 store_reg(s, i, tmp);
8948 } else {
8949 /* push */
8950 tmp = load_reg(s, i);
8951 gen_st32(tmp, addr, IS_USER(s));
8952 }
8953 /* advance to the next address. */
8954 tcg_gen_addi_i32(addr, addr, 4);
8955 }
8956 }
8957 TCGV_UNUSED(tmp);
8958 if (insn & (1 << 8)) {
8959 if (insn & (1 << 11)) {
8960 /* pop pc */
8961 tmp = gen_ld32(addr, IS_USER(s));
8962 /* don't set the pc until the rest of the instruction
8963 has completed */
8964 } else {
8965 /* push lr */
8966 tmp = load_reg(s, 14);
8967 gen_st32(tmp, addr, IS_USER(s));
8968 }
8969 tcg_gen_addi_i32(addr, addr, 4);
8970 }
8971 if ((insn & (1 << 11)) == 0) {
8972 tcg_gen_addi_i32(addr, addr, -offset);
8973 }
8974 /* write back the new stack pointer */
8975 store_reg(s, 13, addr);
8976 /* set the new PC value */
8977 if ((insn & 0x0900) == 0x0900)
8978 gen_bx(s, tmp);
8979 break;
8980
8981 case 1: case 3: case 9: case 11: /* czb */
8982 rm = insn & 7;
8983 tmp = load_reg(s, rm);
8984 s->condlabel = gen_new_label();
8985 s->condjmp = 1;
8986 if (insn & (1 << 11))
8987 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8988 else
8989 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8990 dead_tmp(tmp);
8991 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8992 val = (uint32_t)s->pc + 2;
8993 val += offset;
8994 gen_jmp(s, val);
8995 break;
8996
8997 case 15: /* IT, nop-hint. */
8998 if ((insn & 0xf) == 0) {
8999 gen_nop_hint(s, (insn >> 4) & 0xf);
9000 break;
9001 }
9002 /* If Then. */
9003 s->condexec_cond = (insn >> 4) & 0xe;
9004 s->condexec_mask = insn & 0x1f;
9005 /* No actual code generated for this insn, just setup state. */
9006 break;
9007
9008 case 0xe: /* bkpt */
9009 gen_exception_insn(s, 2, EXCP_BKPT);
9010 break;
9011
9012 case 0xa: /* rev */
9013 ARCH(6);
9014 rn = (insn >> 3) & 0x7;
9015 rd = insn & 0x7;
9016 tmp = load_reg(s, rn);
9017 switch ((insn >> 6) & 3) {
9018 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9019 case 1: gen_rev16(tmp); break;
9020 case 3: gen_revsh(tmp); break;
9021 default: goto illegal_op;
9022 }
9023 store_reg(s, rd, tmp);
9024 break;
9025
9026 case 6: /* cps */
9027 ARCH(6);
9028 if (IS_USER(s))
9029 break;
9030 if (IS_M(env)) {
9031 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9032 /* PRIMASK */
9033 if (insn & 1) {
9034 addr = tcg_const_i32(16);
9035 gen_helper_v7m_msr(cpu_env, addr, tmp);
9036 tcg_temp_free_i32(addr);
9037 }
9038 /* FAULTMASK */
9039 if (insn & 2) {
9040 addr = tcg_const_i32(17);
9041 gen_helper_v7m_msr(cpu_env, addr, tmp);
9042 tcg_temp_free_i32(addr);
9043 }
9044 tcg_temp_free_i32(tmp);
9045 gen_lookup_tb(s);
9046 } else {
9047 if (insn & (1 << 4))
9048 shift = CPSR_A | CPSR_I | CPSR_F;
9049 else
9050 shift = 0;
9051 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9052 }
9053 break;
9054
9055 default:
9056 goto undef;
9057 }
9058 break;
9059
9060 case 12:
9061 /* load/store multiple */
9062 rn = (insn >> 8) & 0x7;
9063 addr = load_reg(s, rn);
9064 for (i = 0; i < 8; i++) {
9065 if (insn & (1 << i)) {
9066 if (insn & (1 << 11)) {
9067 /* load */
9068 tmp = gen_ld32(addr, IS_USER(s));
9069 store_reg(s, i, tmp);
9070 } else {
9071 /* store */
9072 tmp = load_reg(s, i);
9073 gen_st32(tmp, addr, IS_USER(s));
9074 }
9075 /* advance to the next address */
9076 tcg_gen_addi_i32(addr, addr, 4);
9077 }
9078 }
9079 /* Base register writeback. */
9080 if ((insn & (1 << rn)) == 0) {
9081 store_reg(s, rn, addr);
9082 } else {
9083 dead_tmp(addr);
9084 }
9085 break;
9086
9087 case 13:
9088 /* conditional branch or swi */
9089 cond = (insn >> 8) & 0xf;
9090 if (cond == 0xe)
9091 goto undef;
9092
9093 if (cond == 0xf) {
9094 /* swi */
9095 gen_set_pc_im(s->pc);
9096 s->is_jmp = DISAS_SWI;
9097 break;
9098 }
9099 /* generate a conditional jump to next instruction */
9100 s->condlabel = gen_new_label();
9101 gen_test_cc(cond ^ 1, s->condlabel);
9102 s->condjmp = 1;
9103
9104 /* jump to the offset */
9105 val = (uint32_t)s->pc + 2;
9106 offset = ((int32_t)insn << 24) >> 24;
9107 val += offset << 1;
9108 gen_jmp(s, val);
9109 break;
9110
9111 case 14:
9112 if (insn & (1 << 11)) {
9113 if (disas_thumb2_insn(env, s, insn))
9114 goto undef32;
9115 break;
9116 }
9117 /* unconditional branch */
9118 val = (uint32_t)s->pc;
9119 offset = ((int32_t)insn << 21) >> 21;
9120 val += (offset << 1) + 2;
9121 gen_jmp(s, val);
9122 break;
9123
9124 case 15:
9125 if (disas_thumb2_insn(env, s, insn))
9126 goto undef32;
9127 break;
9128 }
9129 return;
9130 undef32:
9131 gen_exception_insn(s, 4, EXCP_UDEF);
9132 return;
9133 illegal_op:
9134 undef:
9135 gen_exception_insn(s, 2, EXCP_UDEF);
9136 }
9137
9138 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9139 basic block 'tb'. If search_pc is TRUE, also generate PC
9140 information for each intermediate instruction. */
9141 static inline void gen_intermediate_code_internal(CPUState *env,
9142 TranslationBlock *tb,
9143 int search_pc)
9144 {
9145 DisasContext dc1, *dc = &dc1;
9146 CPUBreakpoint *bp;
9147 uint16_t *gen_opc_end;
9148 int j, lj;
9149 target_ulong pc_start;
9150 uint32_t next_page_start;
9151 int num_insns;
9152 int max_insns;
9153
9154 /* generate intermediate code */
9155 num_temps = 0;
9156
9157 pc_start = tb->pc;
9158
9159 dc->tb = tb;
9160
9161 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9162
9163 dc->is_jmp = DISAS_NEXT;
9164 dc->pc = pc_start;
9165 dc->singlestep_enabled = env->singlestep_enabled;
9166 dc->condjmp = 0;
9167 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9168 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9169 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9170 #if !defined(CONFIG_USER_ONLY)
9171 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9172 #endif
9173 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9174 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9175 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9176 cpu_F0s = tcg_temp_new_i32();
9177 cpu_F1s = tcg_temp_new_i32();
9178 cpu_F0d = tcg_temp_new_i64();
9179 cpu_F1d = tcg_temp_new_i64();
9180 cpu_V0 = cpu_F0d;
9181 cpu_V1 = cpu_F1d;
9182 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9183 cpu_M0 = tcg_temp_new_i64();
9184 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9185 lj = -1;
9186 num_insns = 0;
9187 max_insns = tb->cflags & CF_COUNT_MASK;
9188 if (max_insns == 0)
9189 max_insns = CF_COUNT_MASK;
9190
9191 gen_icount_start();
9192
9193 /* A note on handling of the condexec (IT) bits:
9194 *
9195 * We want to avoid the overhead of having to write the updated condexec
9196 * bits back to the CPUState for every instruction in an IT block. So:
9197 * (1) if the condexec bits are not already zero then we write
9198 * zero back into the CPUState now. This avoids complications trying
9199 * to do it at the end of the block. (For example if we don't do this
9200 * it's hard to identify whether we can safely skip writing condexec
9201 * at the end of the TB, which we definitely want to do for the case
9202 * where a TB doesn't do anything with the IT state at all.)
9203 * (2) if we are going to leave the TB then we call gen_set_condexec()
9204 * which will write the correct value into CPUState if zero is wrong.
9205 * This is done both for leaving the TB at the end, and for leaving
9206 * it because of an exception we know will happen, which is done in
9207 * gen_exception_insn(). The latter is necessary because we need to
9208 * leave the TB with the PC/IT state just prior to execution of the
9209 * instruction which caused the exception.
9210 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9211 * then the CPUState will be wrong and we need to reset it.
9212 * This is handled in the same way as restoration of the
9213 * PC in these situations: we will be called again with search_pc=1
9214 * and generate a mapping of the condexec bits for each PC in
9215 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9216 * the condexec bits.
9217 *
9218 * Note that there are no instructions which can read the condexec
9219 * bits, and none which can write non-static values to them, so
9220 * we don't need to care about whether CPUState is correct in the
9221 * middle of a TB.
9222 */
9223
9224 /* Reset the conditional execution bits immediately. This avoids
9225 complications trying to do it at the end of the block. */
9226 if (dc->condexec_mask || dc->condexec_cond)
9227 {
9228 TCGv tmp = new_tmp();
9229 tcg_gen_movi_i32(tmp, 0);
9230 store_cpu_field(tmp, condexec_bits);
9231 }
9232 do {
9233 #ifdef CONFIG_USER_ONLY
9234 /* Intercept jump to the magic kernel page. */
9235 if (dc->pc >= 0xffff0000) {
9236 /* We always get here via a jump, so know we are not in a
9237 conditional execution block. */
9238 gen_exception(EXCP_KERNEL_TRAP);
9239 dc->is_jmp = DISAS_UPDATE;
9240 break;
9241 }
9242 #else
9243 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9244 /* We always get here via a jump, so know we are not in a
9245 conditional execution block. */
9246 gen_exception(EXCP_EXCEPTION_EXIT);
9247 dc->is_jmp = DISAS_UPDATE;
9248 break;
9249 }
9250 #endif
9251
9252 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9253 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9254 if (bp->pc == dc->pc) {
9255 gen_exception_insn(dc, 0, EXCP_DEBUG);
9256 /* Advance PC so that clearing the breakpoint will
9257 invalidate this TB. */
9258 dc->pc += 2;
9259 goto done_generating;
9260 break;
9261 }
9262 }
9263 }
9264 if (search_pc) {
9265 j = gen_opc_ptr - gen_opc_buf;
9266 if (lj < j) {
9267 lj++;
9268 while (lj < j)
9269 gen_opc_instr_start[lj++] = 0;
9270 }
9271 gen_opc_pc[lj] = dc->pc;
9272 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9273 gen_opc_instr_start[lj] = 1;
9274 gen_opc_icount[lj] = num_insns;
9275 }
9276
9277 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9278 gen_io_start();
9279
9280 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9281 tcg_gen_debug_insn_start(dc->pc);
9282 }
9283
9284 if (dc->thumb) {
9285 disas_thumb_insn(env, dc);
9286 if (dc->condexec_mask) {
9287 dc->condexec_cond = (dc->condexec_cond & 0xe)
9288 | ((dc->condexec_mask >> 4) & 1);
9289 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9290 if (dc->condexec_mask == 0) {
9291 dc->condexec_cond = 0;
9292 }
9293 }
9294 } else {
9295 disas_arm_insn(env, dc);
9296 }
9297 if (num_temps) {
9298 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9299 num_temps = 0;
9300 }
9301
9302 if (dc->condjmp && !dc->is_jmp) {
9303 gen_set_label(dc->condlabel);
9304 dc->condjmp = 0;
9305 }
9306 /* Translation stops when a conditional branch is encountered.
9307 * Otherwise the subsequent code could get translated several times.
9308 * Also stop translation when a page boundary is reached. This
9309 * ensures prefetch aborts occur at the right place. */
9310 num_insns ++;
9311 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9312 !env->singlestep_enabled &&
9313 !singlestep &&
9314 dc->pc < next_page_start &&
9315 num_insns < max_insns);
9316
9317 if (tb->cflags & CF_LAST_IO) {
9318 if (dc->condjmp) {
9319 /* FIXME: This can theoretically happen with self-modifying
9320 code. */
9321 cpu_abort(env, "IO on conditional branch instruction");
9322 }
9323 gen_io_end();
9324 }
9325
9326 /* At this stage dc->condjmp will only be set when the skipped
9327 instruction was a conditional branch or trap, and the PC has
9328 already been written. */
9329 if (unlikely(env->singlestep_enabled)) {
9330 /* Make sure the pc is updated, and raise a debug exception. */
9331 if (dc->condjmp) {
9332 gen_set_condexec(dc);
9333 if (dc->is_jmp == DISAS_SWI) {
9334 gen_exception(EXCP_SWI);
9335 } else {
9336 gen_exception(EXCP_DEBUG);
9337 }
9338 gen_set_label(dc->condlabel);
9339 }
9340 if (dc->condjmp || !dc->is_jmp) {
9341 gen_set_pc_im(dc->pc);
9342 dc->condjmp = 0;
9343 }
9344 gen_set_condexec(dc);
9345 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9346 gen_exception(EXCP_SWI);
9347 } else {
9348 /* FIXME: Single stepping a WFI insn will not halt
9349 the CPU. */
9350 gen_exception(EXCP_DEBUG);
9351 }
9352 } else {
9353 /* While branches must always occur at the end of an IT block,
9354 there are a few other things that can cause us to terminate
9355 the TB in the middel of an IT block:
9356 - Exception generating instructions (bkpt, swi, undefined).
9357 - Page boundaries.
9358 - Hardware watchpoints.
9359 Hardware breakpoints have already been handled and skip this code.
9360 */
9361 gen_set_condexec(dc);
9362 switch(dc->is_jmp) {
9363 case DISAS_NEXT:
9364 gen_goto_tb(dc, 1, dc->pc);
9365 break;
9366 default:
9367 case DISAS_JUMP:
9368 case DISAS_UPDATE:
9369 /* indicate that the hash table must be used to find the next TB */
9370 tcg_gen_exit_tb(0);
9371 break;
9372 case DISAS_TB_JUMP:
9373 /* nothing more to generate */
9374 break;
9375 case DISAS_WFI:
9376 gen_helper_wfi();
9377 break;
9378 case DISAS_SWI:
9379 gen_exception(EXCP_SWI);
9380 break;
9381 }
9382 if (dc->condjmp) {
9383 gen_set_label(dc->condlabel);
9384 gen_set_condexec(dc);
9385 gen_goto_tb(dc, 1, dc->pc);
9386 dc->condjmp = 0;
9387 }
9388 }
9389
9390 done_generating:
9391 gen_icount_end(tb, num_insns);
9392 *gen_opc_ptr = INDEX_op_end;
9393
9394 #ifdef DEBUG_DISAS
9395 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9396 qemu_log("----------------\n");
9397 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9398 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9399 qemu_log("\n");
9400 }
9401 #endif
9402 if (search_pc) {
9403 j = gen_opc_ptr - gen_opc_buf;
9404 lj++;
9405 while (lj <= j)
9406 gen_opc_instr_start[lj++] = 0;
9407 } else {
9408 tb->size = dc->pc - pc_start;
9409 tb->icount = num_insns;
9410 }
9411 }
9412
9413 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9414 {
9415 gen_intermediate_code_internal(env, tb, 0);
9416 }
9417
9418 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9419 {
9420 gen_intermediate_code_internal(env, tb, 1);
9421 }
9422
9423 static const char *cpu_mode_names[16] = {
9424 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9425 "???", "???", "???", "und", "???", "???", "???", "sys"
9426 };
9427
9428 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9429 int flags)
9430 {
9431 int i;
9432 #if 0
9433 union {
9434 uint32_t i;
9435 float s;
9436 } s0, s1;
9437 CPU_DoubleU d;
9438 /* ??? This assumes float64 and double have the same layout.
9439 Oh well, it's only debug dumps. */
9440 union {
9441 float64 f64;
9442 double d;
9443 } d0;
9444 #endif
9445 uint32_t psr;
9446
9447 for(i=0;i<16;i++) {
9448 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9449 if ((i % 4) == 3)
9450 cpu_fprintf(f, "\n");
9451 else
9452 cpu_fprintf(f, " ");
9453 }
9454 psr = cpsr_read(env);
9455 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9456 psr,
9457 psr & (1 << 31) ? 'N' : '-',
9458 psr & (1 << 30) ? 'Z' : '-',
9459 psr & (1 << 29) ? 'C' : '-',
9460 psr & (1 << 28) ? 'V' : '-',
9461 psr & CPSR_T ? 'T' : 'A',
9462 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9463
9464 #if 0
9465 for (i = 0; i < 16; i++) {
9466 d.d = env->vfp.regs[i];
9467 s0.i = d.l.lower;
9468 s1.i = d.l.upper;
9469 d0.f64 = d.d;
9470 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9471 i * 2, (int)s0.i, s0.s,
9472 i * 2 + 1, (int)s1.i, s1.s,
9473 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9474 d0.d);
9475 }
9476 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9477 #endif
9478 }
9479
9480 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9481 unsigned long searched_pc, int pc_pos, void *puc)
9482 {
9483 env->regs[15] = gen_opc_pc[pc_pos];
9484 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9485 }