]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Support saturation with shift=0.
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 /* FIXME: This is wrong. They set the wrong overflow bit. */
3543 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3544 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3545 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3546 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3547
3548 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3549 switch ((size << 1) | u) { \
3550 case 0: \
3551 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 1: \
3554 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 2: \
3557 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 3: \
3560 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 case 4: \
3563 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3564 break; \
3565 case 5: \
3566 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3567 break; \
3568 default: return 1; \
3569 }} while (0)
3570
3571 #define GEN_NEON_INTEGER_OP(name) do { \
3572 switch ((size << 1) | u) { \
3573 case 0: \
3574 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3575 break; \
3576 case 1: \
3577 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3578 break; \
3579 case 2: \
3580 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3581 break; \
3582 case 3: \
3583 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3584 break; \
3585 case 4: \
3586 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3587 break; \
3588 case 5: \
3589 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3590 break; \
3591 default: return 1; \
3592 }} while (0)
3593
3594 static TCGv neon_load_scratch(int scratch)
3595 {
3596 TCGv tmp = new_tmp();
3597 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 return tmp;
3599 }
3600
3601 static void neon_store_scratch(int scratch, TCGv var)
3602 {
3603 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3604 dead_tmp(var);
3605 }
3606
3607 static inline TCGv neon_get_scalar(int size, int reg)
3608 {
3609 TCGv tmp;
3610 if (size == 1) {
3611 tmp = neon_load_reg(reg >> 1, reg & 1);
3612 } else {
3613 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3614 if (reg & 1) {
3615 gen_neon_dup_low16(tmp);
3616 } else {
3617 gen_neon_dup_high16(tmp);
3618 }
3619 }
3620 return tmp;
3621 }
3622
3623 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3624 {
3625 TCGv rd, rm, tmp;
3626
3627 rd = new_tmp();
3628 rm = new_tmp();
3629 tmp = new_tmp();
3630
3631 tcg_gen_andi_i32(rd, t0, 0xff);
3632 tcg_gen_shri_i32(tmp, t0, 8);
3633 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3634 tcg_gen_or_i32(rd, rd, tmp);
3635 tcg_gen_shli_i32(tmp, t1, 16);
3636 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3637 tcg_gen_or_i32(rd, rd, tmp);
3638 tcg_gen_shli_i32(tmp, t1, 8);
3639 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3640 tcg_gen_or_i32(rd, rd, tmp);
3641
3642 tcg_gen_shri_i32(rm, t0, 8);
3643 tcg_gen_andi_i32(rm, rm, 0xff);
3644 tcg_gen_shri_i32(tmp, t0, 16);
3645 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3646 tcg_gen_or_i32(rm, rm, tmp);
3647 tcg_gen_shli_i32(tmp, t1, 8);
3648 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3649 tcg_gen_or_i32(rm, rm, tmp);
3650 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3651 tcg_gen_or_i32(t1, rm, tmp);
3652 tcg_gen_mov_i32(t0, rd);
3653
3654 dead_tmp(tmp);
3655 dead_tmp(rm);
3656 dead_tmp(rd);
3657 }
3658
3659 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3660 {
3661 TCGv rd, rm, tmp;
3662
3663 rd = new_tmp();
3664 rm = new_tmp();
3665 tmp = new_tmp();
3666
3667 tcg_gen_andi_i32(rd, t0, 0xff);
3668 tcg_gen_shli_i32(tmp, t1, 8);
3669 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3670 tcg_gen_or_i32(rd, rd, tmp);
3671 tcg_gen_shli_i32(tmp, t0, 16);
3672 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3673 tcg_gen_or_i32(rd, rd, tmp);
3674 tcg_gen_shli_i32(tmp, t1, 24);
3675 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3676 tcg_gen_or_i32(rd, rd, tmp);
3677
3678 tcg_gen_andi_i32(rm, t1, 0xff000000);
3679 tcg_gen_shri_i32(tmp, t0, 8);
3680 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3681 tcg_gen_or_i32(rm, rm, tmp);
3682 tcg_gen_shri_i32(tmp, t1, 8);
3683 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3684 tcg_gen_or_i32(rm, rm, tmp);
3685 tcg_gen_shri_i32(tmp, t0, 16);
3686 tcg_gen_andi_i32(tmp, tmp, 0xff);
3687 tcg_gen_or_i32(t1, rm, tmp);
3688 tcg_gen_mov_i32(t0, rd);
3689
3690 dead_tmp(tmp);
3691 dead_tmp(rm);
3692 dead_tmp(rd);
3693 }
3694
3695 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3696 {
3697 TCGv tmp, tmp2;
3698
3699 tmp = new_tmp();
3700 tmp2 = new_tmp();
3701
3702 tcg_gen_andi_i32(tmp, t0, 0xffff);
3703 tcg_gen_shli_i32(tmp2, t1, 16);
3704 tcg_gen_or_i32(tmp, tmp, tmp2);
3705 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3706 tcg_gen_shri_i32(tmp2, t0, 16);
3707 tcg_gen_or_i32(t1, t1, tmp2);
3708 tcg_gen_mov_i32(t0, tmp);
3709
3710 dead_tmp(tmp2);
3711 dead_tmp(tmp);
3712 }
3713
3714 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3715 {
3716 int n;
3717 TCGv t0, t1;
3718
3719 for (n = 0; n < q + 1; n += 2) {
3720 t0 = neon_load_reg(reg, n);
3721 t1 = neon_load_reg(reg, n + 1);
3722 switch (size) {
3723 case 0: gen_neon_unzip_u8(t0, t1); break;
3724 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
3725 case 2: /* no-op */; break;
3726 default: abort();
3727 }
3728 neon_store_scratch(tmp + n, t0);
3729 neon_store_scratch(tmp + n + 1, t1);
3730 }
3731 }
3732
3733 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3734 {
3735 TCGv rd, tmp;
3736
3737 rd = new_tmp();
3738 tmp = new_tmp();
3739
3740 tcg_gen_shli_i32(rd, t0, 8);
3741 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3742 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3743 tcg_gen_or_i32(rd, rd, tmp);
3744
3745 tcg_gen_shri_i32(t1, t1, 8);
3746 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3747 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3748 tcg_gen_or_i32(t1, t1, tmp);
3749 tcg_gen_mov_i32(t0, rd);
3750
3751 dead_tmp(tmp);
3752 dead_tmp(rd);
3753 }
3754
3755 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3756 {
3757 TCGv rd, tmp;
3758
3759 rd = new_tmp();
3760 tmp = new_tmp();
3761
3762 tcg_gen_shli_i32(rd, t0, 16);
3763 tcg_gen_andi_i32(tmp, t1, 0xffff);
3764 tcg_gen_or_i32(rd, rd, tmp);
3765 tcg_gen_shri_i32(t1, t1, 16);
3766 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3767 tcg_gen_or_i32(t1, t1, tmp);
3768 tcg_gen_mov_i32(t0, rd);
3769
3770 dead_tmp(tmp);
3771 dead_tmp(rd);
3772 }
3773
3774
3775 static struct {
3776 int nregs;
3777 int interleave;
3778 int spacing;
3779 } neon_ls_element_type[11] = {
3780 {4, 4, 1},
3781 {4, 4, 2},
3782 {4, 1, 1},
3783 {4, 2, 1},
3784 {3, 3, 1},
3785 {3, 3, 2},
3786 {3, 1, 1},
3787 {1, 1, 1},
3788 {2, 2, 1},
3789 {2, 2, 2},
3790 {2, 1, 1}
3791 };
3792
3793 /* Translate a NEON load/store element instruction. Return nonzero if the
3794 instruction is invalid. */
3795 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3796 {
3797 int rd, rn, rm;
3798 int op;
3799 int nregs;
3800 int interleave;
3801 int spacing;
3802 int stride;
3803 int size;
3804 int reg;
3805 int pass;
3806 int load;
3807 int shift;
3808 int n;
3809 TCGv addr;
3810 TCGv tmp;
3811 TCGv tmp2;
3812 TCGv_i64 tmp64;
3813
3814 if (!s->vfp_enabled)
3815 return 1;
3816 VFP_DREG_D(rd, insn);
3817 rn = (insn >> 16) & 0xf;
3818 rm = insn & 0xf;
3819 load = (insn & (1 << 21)) != 0;
3820 addr = new_tmp();
3821 if ((insn & (1 << 23)) == 0) {
3822 /* Load store all elements. */
3823 op = (insn >> 8) & 0xf;
3824 size = (insn >> 6) & 3;
3825 if (op > 10)
3826 return 1;
3827 nregs = neon_ls_element_type[op].nregs;
3828 interleave = neon_ls_element_type[op].interleave;
3829 spacing = neon_ls_element_type[op].spacing;
3830 if (size == 3 && (interleave | spacing) != 1)
3831 return 1;
3832 load_reg_var(s, addr, rn);
3833 stride = (1 << size) * interleave;
3834 for (reg = 0; reg < nregs; reg++) {
3835 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3836 load_reg_var(s, addr, rn);
3837 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3838 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3839 load_reg_var(s, addr, rn);
3840 tcg_gen_addi_i32(addr, addr, 1 << size);
3841 }
3842 if (size == 3) {
3843 if (load) {
3844 tmp64 = gen_ld64(addr, IS_USER(s));
3845 neon_store_reg64(tmp64, rd);
3846 tcg_temp_free_i64(tmp64);
3847 } else {
3848 tmp64 = tcg_temp_new_i64();
3849 neon_load_reg64(tmp64, rd);
3850 gen_st64(tmp64, addr, IS_USER(s));
3851 }
3852 tcg_gen_addi_i32(addr, addr, stride);
3853 } else {
3854 for (pass = 0; pass < 2; pass++) {
3855 if (size == 2) {
3856 if (load) {
3857 tmp = gen_ld32(addr, IS_USER(s));
3858 neon_store_reg(rd, pass, tmp);
3859 } else {
3860 tmp = neon_load_reg(rd, pass);
3861 gen_st32(tmp, addr, IS_USER(s));
3862 }
3863 tcg_gen_addi_i32(addr, addr, stride);
3864 } else if (size == 1) {
3865 if (load) {
3866 tmp = gen_ld16u(addr, IS_USER(s));
3867 tcg_gen_addi_i32(addr, addr, stride);
3868 tmp2 = gen_ld16u(addr, IS_USER(s));
3869 tcg_gen_addi_i32(addr, addr, stride);
3870 tcg_gen_shli_i32(tmp2, tmp2, 16);
3871 tcg_gen_or_i32(tmp, tmp, tmp2);
3872 dead_tmp(tmp2);
3873 neon_store_reg(rd, pass, tmp);
3874 } else {
3875 tmp = neon_load_reg(rd, pass);
3876 tmp2 = new_tmp();
3877 tcg_gen_shri_i32(tmp2, tmp, 16);
3878 gen_st16(tmp, addr, IS_USER(s));
3879 tcg_gen_addi_i32(addr, addr, stride);
3880 gen_st16(tmp2, addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3882 }
3883 } else /* size == 0 */ {
3884 if (load) {
3885 TCGV_UNUSED(tmp2);
3886 for (n = 0; n < 4; n++) {
3887 tmp = gen_ld8u(addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 if (n == 0) {
3890 tmp2 = tmp;
3891 } else {
3892 tcg_gen_shli_i32(tmp, tmp, n * 8);
3893 tcg_gen_or_i32(tmp2, tmp2, tmp);
3894 dead_tmp(tmp);
3895 }
3896 }
3897 neon_store_reg(rd, pass, tmp2);
3898 } else {
3899 tmp2 = neon_load_reg(rd, pass);
3900 for (n = 0; n < 4; n++) {
3901 tmp = new_tmp();
3902 if (n == 0) {
3903 tcg_gen_mov_i32(tmp, tmp2);
3904 } else {
3905 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3906 }
3907 gen_st8(tmp, addr, IS_USER(s));
3908 tcg_gen_addi_i32(addr, addr, stride);
3909 }
3910 dead_tmp(tmp2);
3911 }
3912 }
3913 }
3914 }
3915 rd += spacing;
3916 }
3917 stride = nregs * 8;
3918 } else {
3919 size = (insn >> 10) & 3;
3920 if (size == 3) {
3921 /* Load single element to all lanes. */
3922 if (!load)
3923 return 1;
3924 size = (insn >> 6) & 3;
3925 nregs = ((insn >> 8) & 3) + 1;
3926 stride = (insn & (1 << 5)) ? 2 : 1;
3927 load_reg_var(s, addr, rn);
3928 for (reg = 0; reg < nregs; reg++) {
3929 switch (size) {
3930 case 0:
3931 tmp = gen_ld8u(addr, IS_USER(s));
3932 gen_neon_dup_u8(tmp, 0);
3933 break;
3934 case 1:
3935 tmp = gen_ld16u(addr, IS_USER(s));
3936 gen_neon_dup_low16(tmp);
3937 break;
3938 case 2:
3939 tmp = gen_ld32(addr, IS_USER(s));
3940 break;
3941 case 3:
3942 return 1;
3943 default: /* Avoid compiler warnings. */
3944 abort();
3945 }
3946 tcg_gen_addi_i32(addr, addr, 1 << size);
3947 tmp2 = new_tmp();
3948 tcg_gen_mov_i32(tmp2, tmp);
3949 neon_store_reg(rd, 0, tmp2);
3950 neon_store_reg(rd, 1, tmp);
3951 rd += stride;
3952 }
3953 stride = (1 << size) * nregs;
3954 } else {
3955 /* Single element. */
3956 pass = (insn >> 7) & 1;
3957 switch (size) {
3958 case 0:
3959 shift = ((insn >> 5) & 3) * 8;
3960 stride = 1;
3961 break;
3962 case 1:
3963 shift = ((insn >> 6) & 1) * 16;
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 break;
3966 case 2:
3967 shift = 0;
3968 stride = (insn & (1 << 6)) ? 2 : 1;
3969 break;
3970 default:
3971 abort();
3972 }
3973 nregs = ((insn >> 8) & 3) + 1;
3974 load_reg_var(s, addr, rn);
3975 for (reg = 0; reg < nregs; reg++) {
3976 if (load) {
3977 switch (size) {
3978 case 0:
3979 tmp = gen_ld8u(addr, IS_USER(s));
3980 break;
3981 case 1:
3982 tmp = gen_ld16u(addr, IS_USER(s));
3983 break;
3984 case 2:
3985 tmp = gen_ld32(addr, IS_USER(s));
3986 break;
3987 default: /* Avoid compiler warnings. */
3988 abort();
3989 }
3990 if (size != 2) {
3991 tmp2 = neon_load_reg(rd, pass);
3992 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3993 dead_tmp(tmp2);
3994 }
3995 neon_store_reg(rd, pass, tmp);
3996 } else { /* Store */
3997 tmp = neon_load_reg(rd, pass);
3998 if (shift)
3999 tcg_gen_shri_i32(tmp, tmp, shift);
4000 switch (size) {
4001 case 0:
4002 gen_st8(tmp, addr, IS_USER(s));
4003 break;
4004 case 1:
4005 gen_st16(tmp, addr, IS_USER(s));
4006 break;
4007 case 2:
4008 gen_st32(tmp, addr, IS_USER(s));
4009 break;
4010 }
4011 }
4012 rd += stride;
4013 tcg_gen_addi_i32(addr, addr, 1 << size);
4014 }
4015 stride = nregs * (1 << size);
4016 }
4017 }
4018 dead_tmp(addr);
4019 if (rm != 15) {
4020 TCGv base;
4021
4022 base = load_reg(s, rn);
4023 if (rm == 13) {
4024 tcg_gen_addi_i32(base, base, stride);
4025 } else {
4026 TCGv index;
4027 index = load_reg(s, rm);
4028 tcg_gen_add_i32(base, base, index);
4029 dead_tmp(index);
4030 }
4031 store_reg(s, rn, base);
4032 }
4033 return 0;
4034 }
4035
4036 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4037 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4038 {
4039 tcg_gen_and_i32(t, t, c);
4040 tcg_gen_andc_i32(f, f, c);
4041 tcg_gen_or_i32(dest, t, f);
4042 }
4043
4044 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4045 {
4046 switch (size) {
4047 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4048 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4049 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4050 default: abort();
4051 }
4052 }
4053
4054 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4055 {
4056 switch (size) {
4057 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4058 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4059 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4060 default: abort();
4061 }
4062 }
4063
4064 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4065 {
4066 switch (size) {
4067 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4068 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4069 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4070 default: abort();
4071 }
4072 }
4073
4074 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4075 int q, int u)
4076 {
4077 if (q) {
4078 if (u) {
4079 switch (size) {
4080 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4081 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4082 default: abort();
4083 }
4084 } else {
4085 switch (size) {
4086 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4087 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4088 default: abort();
4089 }
4090 }
4091 } else {
4092 if (u) {
4093 switch (size) {
4094 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4095 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4096 default: abort();
4097 }
4098 } else {
4099 switch (size) {
4100 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4101 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4102 default: abort();
4103 }
4104 }
4105 }
4106 }
4107
4108 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4109 {
4110 if (u) {
4111 switch (size) {
4112 case 0: gen_helper_neon_widen_u8(dest, src); break;
4113 case 1: gen_helper_neon_widen_u16(dest, src); break;
4114 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 0: gen_helper_neon_widen_s8(dest, src); break;
4120 case 1: gen_helper_neon_widen_s16(dest, src); break;
4121 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4122 default: abort();
4123 }
4124 }
4125 dead_tmp(src);
4126 }
4127
4128 static inline void gen_neon_addl(int size)
4129 {
4130 switch (size) {
4131 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4132 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4133 case 2: tcg_gen_add_i64(CPU_V001); break;
4134 default: abort();
4135 }
4136 }
4137
4138 static inline void gen_neon_subl(int size)
4139 {
4140 switch (size) {
4141 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4142 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4143 case 2: tcg_gen_sub_i64(CPU_V001); break;
4144 default: abort();
4145 }
4146 }
4147
4148 static inline void gen_neon_negl(TCGv_i64 var, int size)
4149 {
4150 switch (size) {
4151 case 0: gen_helper_neon_negl_u16(var, var); break;
4152 case 1: gen_helper_neon_negl_u32(var, var); break;
4153 case 2: gen_helper_neon_negl_u64(var, var); break;
4154 default: abort();
4155 }
4156 }
4157
4158 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4159 {
4160 switch (size) {
4161 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4162 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4163 default: abort();
4164 }
4165 }
4166
4167 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4168 {
4169 TCGv_i64 tmp;
4170
4171 switch ((size << 1) | u) {
4172 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4173 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4174 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4175 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4176 case 4:
4177 tmp = gen_muls_i64_i32(a, b);
4178 tcg_gen_mov_i64(dest, tmp);
4179 break;
4180 case 5:
4181 tmp = gen_mulu_i64_i32(a, b);
4182 tcg_gen_mov_i64(dest, tmp);
4183 break;
4184 default: abort();
4185 }
4186 }
4187
4188 /* Translate a NEON data processing instruction. Return nonzero if the
4189 instruction is invalid.
4190 We process data in a mixture of 32-bit and 64-bit chunks.
4191 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4192
4193 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4194 {
4195 int op;
4196 int q;
4197 int rd, rn, rm;
4198 int size;
4199 int shift;
4200 int pass;
4201 int count;
4202 int pairwise;
4203 int u;
4204 int n;
4205 uint32_t imm, mask;
4206 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4207 TCGv_i64 tmp64;
4208
4209 if (!s->vfp_enabled)
4210 return 1;
4211 q = (insn & (1 << 6)) != 0;
4212 u = (insn >> 24) & 1;
4213 VFP_DREG_D(rd, insn);
4214 VFP_DREG_N(rn, insn);
4215 VFP_DREG_M(rm, insn);
4216 size = (insn >> 20) & 3;
4217 if ((insn & (1 << 23)) == 0) {
4218 /* Three register same length. */
4219 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4220 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4221 || op == 10 || op == 11 || op == 16)) {
4222 /* 64-bit element instructions. */
4223 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4224 neon_load_reg64(cpu_V0, rn + pass);
4225 neon_load_reg64(cpu_V1, rm + pass);
4226 switch (op) {
4227 case 1: /* VQADD */
4228 if (u) {
4229 gen_helper_neon_add_saturate_u64(CPU_V001);
4230 } else {
4231 gen_helper_neon_add_saturate_s64(CPU_V001);
4232 }
4233 break;
4234 case 5: /* VQSUB */
4235 if (u) {
4236 gen_helper_neon_sub_saturate_u64(CPU_V001);
4237 } else {
4238 gen_helper_neon_sub_saturate_s64(CPU_V001);
4239 }
4240 break;
4241 case 8: /* VSHL */
4242 if (u) {
4243 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4244 } else {
4245 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4246 }
4247 break;
4248 case 9: /* VQSHL */
4249 if (u) {
4250 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4251 cpu_V1, cpu_V0);
4252 } else {
4253 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4254 cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 10: /* VRSHL */
4258 if (u) {
4259 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4260 } else {
4261 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4262 }
4263 break;
4264 case 11: /* VQRSHL */
4265 if (u) {
4266 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4267 cpu_V1, cpu_V0);
4268 } else {
4269 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4270 cpu_V1, cpu_V0);
4271 }
4272 break;
4273 case 16:
4274 if (u) {
4275 tcg_gen_sub_i64(CPU_V001);
4276 } else {
4277 tcg_gen_add_i64(CPU_V001);
4278 }
4279 break;
4280 default:
4281 abort();
4282 }
4283 neon_store_reg64(cpu_V0, rd + pass);
4284 }
4285 return 0;
4286 }
4287 switch (op) {
4288 case 8: /* VSHL */
4289 case 9: /* VQSHL */
4290 case 10: /* VRSHL */
4291 case 11: /* VQRSHL */
4292 {
4293 int rtmp;
4294 /* Shift instruction operands are reversed. */
4295 rtmp = rn;
4296 rn = rm;
4297 rm = rtmp;
4298 pairwise = 0;
4299 }
4300 break;
4301 case 20: /* VPMAX */
4302 case 21: /* VPMIN */
4303 case 23: /* VPADD */
4304 pairwise = 1;
4305 break;
4306 case 26: /* VPADD (float) */
4307 pairwise = (u && size < 2);
4308 break;
4309 case 30: /* VPMIN/VPMAX (float) */
4310 pairwise = u;
4311 break;
4312 default:
4313 pairwise = 0;
4314 break;
4315 }
4316
4317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4318
4319 if (pairwise) {
4320 /* Pairwise. */
4321 if (q)
4322 n = (pass & 1) * 2;
4323 else
4324 n = 0;
4325 if (pass < q + 1) {
4326 tmp = neon_load_reg(rn, n);
4327 tmp2 = neon_load_reg(rn, n + 1);
4328 } else {
4329 tmp = neon_load_reg(rm, n);
4330 tmp2 = neon_load_reg(rm, n + 1);
4331 }
4332 } else {
4333 /* Elementwise. */
4334 tmp = neon_load_reg(rn, pass);
4335 tmp2 = neon_load_reg(rm, pass);
4336 }
4337 switch (op) {
4338 case 0: /* VHADD */
4339 GEN_NEON_INTEGER_OP(hadd);
4340 break;
4341 case 1: /* VQADD */
4342 GEN_NEON_INTEGER_OP_ENV(qadd);
4343 break;
4344 case 2: /* VRHADD */
4345 GEN_NEON_INTEGER_OP(rhadd);
4346 break;
4347 case 3: /* Logic ops. */
4348 switch ((u << 2) | size) {
4349 case 0: /* VAND */
4350 tcg_gen_and_i32(tmp, tmp, tmp2);
4351 break;
4352 case 1: /* BIC */
4353 tcg_gen_andc_i32(tmp, tmp, tmp2);
4354 break;
4355 case 2: /* VORR */
4356 tcg_gen_or_i32(tmp, tmp, tmp2);
4357 break;
4358 case 3: /* VORN */
4359 tcg_gen_orc_i32(tmp, tmp, tmp2);
4360 break;
4361 case 4: /* VEOR */
4362 tcg_gen_xor_i32(tmp, tmp, tmp2);
4363 break;
4364 case 5: /* VBSL */
4365 tmp3 = neon_load_reg(rd, pass);
4366 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4367 dead_tmp(tmp3);
4368 break;
4369 case 6: /* VBIT */
4370 tmp3 = neon_load_reg(rd, pass);
4371 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4372 dead_tmp(tmp3);
4373 break;
4374 case 7: /* VBIF */
4375 tmp3 = neon_load_reg(rd, pass);
4376 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4377 dead_tmp(tmp3);
4378 break;
4379 }
4380 break;
4381 case 4: /* VHSUB */
4382 GEN_NEON_INTEGER_OP(hsub);
4383 break;
4384 case 5: /* VQSUB */
4385 GEN_NEON_INTEGER_OP_ENV(qsub);
4386 break;
4387 case 6: /* VCGT */
4388 GEN_NEON_INTEGER_OP(cgt);
4389 break;
4390 case 7: /* VCGE */
4391 GEN_NEON_INTEGER_OP(cge);
4392 break;
4393 case 8: /* VSHL */
4394 GEN_NEON_INTEGER_OP(shl);
4395 break;
4396 case 9: /* VQSHL */
4397 GEN_NEON_INTEGER_OP_ENV(qshl);
4398 break;
4399 case 10: /* VRSHL */
4400 GEN_NEON_INTEGER_OP(rshl);
4401 break;
4402 case 11: /* VQRSHL */
4403 GEN_NEON_INTEGER_OP_ENV(qrshl);
4404 break;
4405 case 12: /* VMAX */
4406 GEN_NEON_INTEGER_OP(max);
4407 break;
4408 case 13: /* VMIN */
4409 GEN_NEON_INTEGER_OP(min);
4410 break;
4411 case 14: /* VABD */
4412 GEN_NEON_INTEGER_OP(abd);
4413 break;
4414 case 15: /* VABA */
4415 GEN_NEON_INTEGER_OP(abd);
4416 dead_tmp(tmp2);
4417 tmp2 = neon_load_reg(rd, pass);
4418 gen_neon_add(size, tmp, tmp2);
4419 break;
4420 case 16:
4421 if (!u) { /* VADD */
4422 if (gen_neon_add(size, tmp, tmp2))
4423 return 1;
4424 } else { /* VSUB */
4425 switch (size) {
4426 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4427 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4428 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4429 default: return 1;
4430 }
4431 }
4432 break;
4433 case 17:
4434 if (!u) { /* VTST */
4435 switch (size) {
4436 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4437 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4438 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4439 default: return 1;
4440 }
4441 } else { /* VCEQ */
4442 switch (size) {
4443 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4444 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4445 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4446 default: return 1;
4447 }
4448 }
4449 break;
4450 case 18: /* Multiply. */
4451 switch (size) {
4452 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4453 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4454 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4455 default: return 1;
4456 }
4457 dead_tmp(tmp2);
4458 tmp2 = neon_load_reg(rd, pass);
4459 if (u) { /* VMLS */
4460 gen_neon_rsb(size, tmp, tmp2);
4461 } else { /* VMLA */
4462 gen_neon_add(size, tmp, tmp2);
4463 }
4464 break;
4465 case 19: /* VMUL */
4466 if (u) { /* polynomial */
4467 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4468 } else { /* Integer */
4469 switch (size) {
4470 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4471 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4472 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4473 default: return 1;
4474 }
4475 }
4476 break;
4477 case 20: /* VPMAX */
4478 GEN_NEON_INTEGER_OP(pmax);
4479 break;
4480 case 21: /* VPMIN */
4481 GEN_NEON_INTEGER_OP(pmin);
4482 break;
4483 case 22: /* Hultiply high. */
4484 if (!u) { /* VQDMULH */
4485 switch (size) {
4486 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4487 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4488 default: return 1;
4489 }
4490 } else { /* VQRDHMUL */
4491 switch (size) {
4492 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4493 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4494 default: return 1;
4495 }
4496 }
4497 break;
4498 case 23: /* VPADD */
4499 if (u)
4500 return 1;
4501 switch (size) {
4502 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4503 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4504 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4505 default: return 1;
4506 }
4507 break;
4508 case 26: /* Floating point arithnetic. */
4509 switch ((u << 2) | size) {
4510 case 0: /* VADD */
4511 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4512 break;
4513 case 2: /* VSUB */
4514 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4515 break;
4516 case 4: /* VPADD */
4517 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4518 break;
4519 case 6: /* VABD */
4520 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4521 break;
4522 default:
4523 return 1;
4524 }
4525 break;
4526 case 27: /* Float multiply. */
4527 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4528 if (!u) {
4529 dead_tmp(tmp2);
4530 tmp2 = neon_load_reg(rd, pass);
4531 if (size == 0) {
4532 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4533 } else {
4534 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4535 }
4536 }
4537 break;
4538 case 28: /* Float compare. */
4539 if (!u) {
4540 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4541 } else {
4542 if (size == 0)
4543 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4544 else
4545 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4546 }
4547 break;
4548 case 29: /* Float compare absolute. */
4549 if (!u)
4550 return 1;
4551 if (size == 0)
4552 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4553 else
4554 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4555 break;
4556 case 30: /* Float min/max. */
4557 if (size == 0)
4558 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4559 else
4560 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4561 break;
4562 case 31:
4563 if (size == 0)
4564 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4565 else
4566 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4567 break;
4568 default:
4569 abort();
4570 }
4571 dead_tmp(tmp2);
4572
4573 /* Save the result. For elementwise operations we can put it
4574 straight into the destination register. For pairwise operations
4575 we have to be careful to avoid clobbering the source operands. */
4576 if (pairwise && rd == rm) {
4577 neon_store_scratch(pass, tmp);
4578 } else {
4579 neon_store_reg(rd, pass, tmp);
4580 }
4581
4582 } /* for pass */
4583 if (pairwise && rd == rm) {
4584 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4585 tmp = neon_load_scratch(pass);
4586 neon_store_reg(rd, pass, tmp);
4587 }
4588 }
4589 /* End of 3 register same size operations. */
4590 } else if (insn & (1 << 4)) {
4591 if ((insn & 0x00380080) != 0) {
4592 /* Two registers and shift. */
4593 op = (insn >> 8) & 0xf;
4594 if (insn & (1 << 7)) {
4595 /* 64-bit shift. */
4596 size = 3;
4597 } else {
4598 size = 2;
4599 while ((insn & (1 << (size + 19))) == 0)
4600 size--;
4601 }
4602 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4603 /* To avoid excessive dumplication of ops we implement shift
4604 by immediate using the variable shift operations. */
4605 if (op < 8) {
4606 /* Shift by immediate:
4607 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4608 /* Right shifts are encoded as N - shift, where N is the
4609 element size in bits. */
4610 if (op <= 4)
4611 shift = shift - (1 << (size + 3));
4612 if (size == 3) {
4613 count = q + 1;
4614 } else {
4615 count = q ? 4: 2;
4616 }
4617 switch (size) {
4618 case 0:
4619 imm = (uint8_t) shift;
4620 imm |= imm << 8;
4621 imm |= imm << 16;
4622 break;
4623 case 1:
4624 imm = (uint16_t) shift;
4625 imm |= imm << 16;
4626 break;
4627 case 2:
4628 case 3:
4629 imm = shift;
4630 break;
4631 default:
4632 abort();
4633 }
4634
4635 for (pass = 0; pass < count; pass++) {
4636 if (size == 3) {
4637 neon_load_reg64(cpu_V0, rm + pass);
4638 tcg_gen_movi_i64(cpu_V1, imm);
4639 switch (op) {
4640 case 0: /* VSHR */
4641 case 1: /* VSRA */
4642 if (u)
4643 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4644 else
4645 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 2: /* VRSHR */
4648 case 3: /* VRSRA */
4649 if (u)
4650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 else
4652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4653 break;
4654 case 4: /* VSRI */
4655 if (!u)
4656 return 1;
4657 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4658 break;
4659 case 5: /* VSHL, VSLI */
4660 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4661 break;
4662 case 6: /* VQSHLU */
4663 if (u) {
4664 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4665 cpu_V0, cpu_V1);
4666 } else {
4667 return 1;
4668 }
4669 break;
4670 case 7: /* VQSHL */
4671 if (u) {
4672 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4673 cpu_V0, cpu_V1);
4674 } else {
4675 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4676 cpu_V0, cpu_V1);
4677 }
4678 break;
4679 }
4680 if (op == 1 || op == 3) {
4681 /* Accumulate. */
4682 neon_load_reg64(cpu_V0, rd + pass);
4683 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4684 } else if (op == 4 || (op == 5 && u)) {
4685 /* Insert */
4686 cpu_abort(env, "VS[LR]I.64 not implemented");
4687 }
4688 neon_store_reg64(cpu_V0, rd + pass);
4689 } else { /* size < 3 */
4690 /* Operands in T0 and T1. */
4691 tmp = neon_load_reg(rm, pass);
4692 tmp2 = new_tmp();
4693 tcg_gen_movi_i32(tmp2, imm);
4694 switch (op) {
4695 case 0: /* VSHR */
4696 case 1: /* VSRA */
4697 GEN_NEON_INTEGER_OP(shl);
4698 break;
4699 case 2: /* VRSHR */
4700 case 3: /* VRSRA */
4701 GEN_NEON_INTEGER_OP(rshl);
4702 break;
4703 case 4: /* VSRI */
4704 if (!u)
4705 return 1;
4706 GEN_NEON_INTEGER_OP(shl);
4707 break;
4708 case 5: /* VSHL, VSLI */
4709 switch (size) {
4710 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4711 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4712 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4713 default: return 1;
4714 }
4715 break;
4716 case 6: /* VQSHLU */
4717 if (!u) {
4718 return 1;
4719 }
4720 switch (size) {
4721 case 0:
4722 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4723 tmp, tmp2);
4724 break;
4725 case 1:
4726 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4727 tmp, tmp2);
4728 break;
4729 case 2:
4730 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4731 tmp, tmp2);
4732 break;
4733 default:
4734 return 1;
4735 }
4736 break;
4737 case 7: /* VQSHL */
4738 GEN_NEON_INTEGER_OP_ENV(qshl);
4739 break;
4740 }
4741 dead_tmp(tmp2);
4742
4743 if (op == 1 || op == 3) {
4744 /* Accumulate. */
4745 tmp2 = neon_load_reg(rd, pass);
4746 gen_neon_add(size, tmp2, tmp);
4747 dead_tmp(tmp2);
4748 } else if (op == 4 || (op == 5 && u)) {
4749 /* Insert */
4750 switch (size) {
4751 case 0:
4752 if (op == 4)
4753 mask = 0xff >> -shift;
4754 else
4755 mask = (uint8_t)(0xff << shift);
4756 mask |= mask << 8;
4757 mask |= mask << 16;
4758 break;
4759 case 1:
4760 if (op == 4)
4761 mask = 0xffff >> -shift;
4762 else
4763 mask = (uint16_t)(0xffff << shift);
4764 mask |= mask << 16;
4765 break;
4766 case 2:
4767 if (shift < -31 || shift > 31) {
4768 mask = 0;
4769 } else {
4770 if (op == 4)
4771 mask = 0xffffffffu >> -shift;
4772 else
4773 mask = 0xffffffffu << shift;
4774 }
4775 break;
4776 default:
4777 abort();
4778 }
4779 tmp2 = neon_load_reg(rd, pass);
4780 tcg_gen_andi_i32(tmp, tmp, mask);
4781 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4782 tcg_gen_or_i32(tmp, tmp, tmp2);
4783 dead_tmp(tmp2);
4784 }
4785 neon_store_reg(rd, pass, tmp);
4786 }
4787 } /* for pass */
4788 } else if (op < 10) {
4789 /* Shift by immediate and narrow:
4790 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4791 shift = shift - (1 << (size + 3));
4792 size++;
4793 switch (size) {
4794 case 1:
4795 imm = (uint16_t)shift;
4796 imm |= imm << 16;
4797 tmp2 = tcg_const_i32(imm);
4798 TCGV_UNUSED_I64(tmp64);
4799 break;
4800 case 2:
4801 imm = (uint32_t)shift;
4802 tmp2 = tcg_const_i32(imm);
4803 TCGV_UNUSED_I64(tmp64);
4804 break;
4805 case 3:
4806 tmp64 = tcg_const_i64(shift);
4807 TCGV_UNUSED(tmp2);
4808 break;
4809 default:
4810 abort();
4811 }
4812
4813 for (pass = 0; pass < 2; pass++) {
4814 if (size == 3) {
4815 neon_load_reg64(cpu_V0, rm + pass);
4816 if (q) {
4817 if (u)
4818 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4819 else
4820 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4821 } else {
4822 if (u)
4823 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4824 else
4825 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4826 }
4827 } else {
4828 tmp = neon_load_reg(rm + pass, 0);
4829 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4830 tmp3 = neon_load_reg(rm + pass, 1);
4831 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4832 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4833 dead_tmp(tmp);
4834 dead_tmp(tmp3);
4835 }
4836 tmp = new_tmp();
4837 if (op == 8 && !u) {
4838 gen_neon_narrow(size - 1, tmp, cpu_V0);
4839 } else {
4840 if (op == 8)
4841 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4842 else
4843 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4844 }
4845 neon_store_reg(rd, pass, tmp);
4846 } /* for pass */
4847 if (size == 3) {
4848 tcg_temp_free_i64(tmp64);
4849 } else {
4850 dead_tmp(tmp2);
4851 }
4852 } else if (op == 10) {
4853 /* VSHLL */
4854 if (q || size == 3)
4855 return 1;
4856 tmp = neon_load_reg(rm, 0);
4857 tmp2 = neon_load_reg(rm, 1);
4858 for (pass = 0; pass < 2; pass++) {
4859 if (pass == 1)
4860 tmp = tmp2;
4861
4862 gen_neon_widen(cpu_V0, tmp, size, u);
4863
4864 if (shift != 0) {
4865 /* The shift is less than the width of the source
4866 type, so we can just shift the whole register. */
4867 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4868 if (size < 2 || !u) {
4869 uint64_t imm64;
4870 if (size == 0) {
4871 imm = (0xffu >> (8 - shift));
4872 imm |= imm << 16;
4873 } else {
4874 imm = 0xffff >> (16 - shift);
4875 }
4876 imm64 = imm | (((uint64_t)imm) << 32);
4877 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4878 }
4879 }
4880 neon_store_reg64(cpu_V0, rd + pass);
4881 }
4882 } else if (op >= 14) {
4883 /* VCVT fixed-point. */
4884 /* We have already masked out the must-be-1 top bit of imm6,
4885 * hence this 32-shift where the ARM ARM has 64-imm6.
4886 */
4887 shift = 32 - shift;
4888 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4889 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4890 if (!(op & 1)) {
4891 if (u)
4892 gen_vfp_ulto(0, shift);
4893 else
4894 gen_vfp_slto(0, shift);
4895 } else {
4896 if (u)
4897 gen_vfp_toul(0, shift);
4898 else
4899 gen_vfp_tosl(0, shift);
4900 }
4901 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4902 }
4903 } else {
4904 return 1;
4905 }
4906 } else { /* (insn & 0x00380080) == 0 */
4907 int invert;
4908
4909 op = (insn >> 8) & 0xf;
4910 /* One register and immediate. */
4911 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4912 invert = (insn & (1 << 5)) != 0;
4913 switch (op) {
4914 case 0: case 1:
4915 /* no-op */
4916 break;
4917 case 2: case 3:
4918 imm <<= 8;
4919 break;
4920 case 4: case 5:
4921 imm <<= 16;
4922 break;
4923 case 6: case 7:
4924 imm <<= 24;
4925 break;
4926 case 8: case 9:
4927 imm |= imm << 16;
4928 break;
4929 case 10: case 11:
4930 imm = (imm << 8) | (imm << 24);
4931 break;
4932 case 12:
4933 imm = (imm << 8) | 0xff;
4934 break;
4935 case 13:
4936 imm = (imm << 16) | 0xffff;
4937 break;
4938 case 14:
4939 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4940 if (invert)
4941 imm = ~imm;
4942 break;
4943 case 15:
4944 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4945 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4946 break;
4947 }
4948 if (invert)
4949 imm = ~imm;
4950
4951 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4952 if (op & 1 && op < 12) {
4953 tmp = neon_load_reg(rd, pass);
4954 if (invert) {
4955 /* The immediate value has already been inverted, so
4956 BIC becomes AND. */
4957 tcg_gen_andi_i32(tmp, tmp, imm);
4958 } else {
4959 tcg_gen_ori_i32(tmp, tmp, imm);
4960 }
4961 } else {
4962 /* VMOV, VMVN. */
4963 tmp = new_tmp();
4964 if (op == 14 && invert) {
4965 uint32_t val;
4966 val = 0;
4967 for (n = 0; n < 4; n++) {
4968 if (imm & (1 << (n + (pass & 1) * 4)))
4969 val |= 0xff << (n * 8);
4970 }
4971 tcg_gen_movi_i32(tmp, val);
4972 } else {
4973 tcg_gen_movi_i32(tmp, imm);
4974 }
4975 }
4976 neon_store_reg(rd, pass, tmp);
4977 }
4978 }
4979 } else { /* (insn & 0x00800010 == 0x00800000) */
4980 if (size != 3) {
4981 op = (insn >> 8) & 0xf;
4982 if ((insn & (1 << 6)) == 0) {
4983 /* Three registers of different lengths. */
4984 int src1_wide;
4985 int src2_wide;
4986 int prewiden;
4987 /* prewiden, src1_wide, src2_wide */
4988 static const int neon_3reg_wide[16][3] = {
4989 {1, 0, 0}, /* VADDL */
4990 {1, 1, 0}, /* VADDW */
4991 {1, 0, 0}, /* VSUBL */
4992 {1, 1, 0}, /* VSUBW */
4993 {0, 1, 1}, /* VADDHN */
4994 {0, 0, 0}, /* VABAL */
4995 {0, 1, 1}, /* VSUBHN */
4996 {0, 0, 0}, /* VABDL */
4997 {0, 0, 0}, /* VMLAL */
4998 {0, 0, 0}, /* VQDMLAL */
4999 {0, 0, 0}, /* VMLSL */
5000 {0, 0, 0}, /* VQDMLSL */
5001 {0, 0, 0}, /* Integer VMULL */
5002 {0, 0, 0}, /* VQDMULL */
5003 {0, 0, 0} /* Polynomial VMULL */
5004 };
5005
5006 prewiden = neon_3reg_wide[op][0];
5007 src1_wide = neon_3reg_wide[op][1];
5008 src2_wide = neon_3reg_wide[op][2];
5009
5010 if (size == 0 && (op == 9 || op == 11 || op == 13))
5011 return 1;
5012
5013 /* Avoid overlapping operands. Wide source operands are
5014 always aligned so will never overlap with wide
5015 destinations in problematic ways. */
5016 if (rd == rm && !src2_wide) {
5017 tmp = neon_load_reg(rm, 1);
5018 neon_store_scratch(2, tmp);
5019 } else if (rd == rn && !src1_wide) {
5020 tmp = neon_load_reg(rn, 1);
5021 neon_store_scratch(2, tmp);
5022 }
5023 TCGV_UNUSED(tmp3);
5024 for (pass = 0; pass < 2; pass++) {
5025 if (src1_wide) {
5026 neon_load_reg64(cpu_V0, rn + pass);
5027 TCGV_UNUSED(tmp);
5028 } else {
5029 if (pass == 1 && rd == rn) {
5030 tmp = neon_load_scratch(2);
5031 } else {
5032 tmp = neon_load_reg(rn, pass);
5033 }
5034 if (prewiden) {
5035 gen_neon_widen(cpu_V0, tmp, size, u);
5036 }
5037 }
5038 if (src2_wide) {
5039 neon_load_reg64(cpu_V1, rm + pass);
5040 TCGV_UNUSED(tmp2);
5041 } else {
5042 if (pass == 1 && rd == rm) {
5043 tmp2 = neon_load_scratch(2);
5044 } else {
5045 tmp2 = neon_load_reg(rm, pass);
5046 }
5047 if (prewiden) {
5048 gen_neon_widen(cpu_V1, tmp2, size, u);
5049 }
5050 }
5051 switch (op) {
5052 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5053 gen_neon_addl(size);
5054 break;
5055 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5056 gen_neon_subl(size);
5057 break;
5058 case 5: case 7: /* VABAL, VABDL */
5059 switch ((size << 1) | u) {
5060 case 0:
5061 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5062 break;
5063 case 1:
5064 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5065 break;
5066 case 2:
5067 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5068 break;
5069 case 3:
5070 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5071 break;
5072 case 4:
5073 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5074 break;
5075 case 5:
5076 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5077 break;
5078 default: abort();
5079 }
5080 dead_tmp(tmp2);
5081 dead_tmp(tmp);
5082 break;
5083 case 8: case 9: case 10: case 11: case 12: case 13:
5084 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5085 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5086 dead_tmp(tmp2);
5087 dead_tmp(tmp);
5088 break;
5089 case 14: /* Polynomial VMULL */
5090 cpu_abort(env, "Polynomial VMULL not implemented");
5091
5092 default: /* 15 is RESERVED. */
5093 return 1;
5094 }
5095 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5096 /* Accumulate. */
5097 if (op == 10 || op == 11) {
5098 gen_neon_negl(cpu_V0, size);
5099 }
5100
5101 if (op != 13) {
5102 neon_load_reg64(cpu_V1, rd + pass);
5103 }
5104
5105 switch (op) {
5106 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5107 gen_neon_addl(size);
5108 break;
5109 case 9: case 11: /* VQDMLAL, VQDMLSL */
5110 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5111 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5112 break;
5113 /* Fall through. */
5114 case 13: /* VQDMULL */
5115 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5116 break;
5117 default:
5118 abort();
5119 }
5120 neon_store_reg64(cpu_V0, rd + pass);
5121 } else if (op == 4 || op == 6) {
5122 /* Narrowing operation. */
5123 tmp = new_tmp();
5124 if (!u) {
5125 switch (size) {
5126 case 0:
5127 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5128 break;
5129 case 1:
5130 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5131 break;
5132 case 2:
5133 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5134 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5135 break;
5136 default: abort();
5137 }
5138 } else {
5139 switch (size) {
5140 case 0:
5141 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5142 break;
5143 case 1:
5144 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5145 break;
5146 case 2:
5147 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5148 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5149 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5150 break;
5151 default: abort();
5152 }
5153 }
5154 if (pass == 0) {
5155 tmp3 = tmp;
5156 } else {
5157 neon_store_reg(rd, 0, tmp3);
5158 neon_store_reg(rd, 1, tmp);
5159 }
5160 } else {
5161 /* Write back the result. */
5162 neon_store_reg64(cpu_V0, rd + pass);
5163 }
5164 }
5165 } else {
5166 /* Two registers and a scalar. */
5167 switch (op) {
5168 case 0: /* Integer VMLA scalar */
5169 case 1: /* Float VMLA scalar */
5170 case 4: /* Integer VMLS scalar */
5171 case 5: /* Floating point VMLS scalar */
5172 case 8: /* Integer VMUL scalar */
5173 case 9: /* Floating point VMUL scalar */
5174 case 12: /* VQDMULH scalar */
5175 case 13: /* VQRDMULH scalar */
5176 tmp = neon_get_scalar(size, rm);
5177 neon_store_scratch(0, tmp);
5178 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5179 tmp = neon_load_scratch(0);
5180 tmp2 = neon_load_reg(rn, pass);
5181 if (op == 12) {
5182 if (size == 1) {
5183 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5184 } else {
5185 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5186 }
5187 } else if (op == 13) {
5188 if (size == 1) {
5189 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5190 } else {
5191 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5192 }
5193 } else if (op & 1) {
5194 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5195 } else {
5196 switch (size) {
5197 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5198 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5199 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5200 default: return 1;
5201 }
5202 }
5203 dead_tmp(tmp2);
5204 if (op < 8) {
5205 /* Accumulate. */
5206 tmp2 = neon_load_reg(rd, pass);
5207 switch (op) {
5208 case 0:
5209 gen_neon_add(size, tmp, tmp2);
5210 break;
5211 case 1:
5212 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5213 break;
5214 case 4:
5215 gen_neon_rsb(size, tmp, tmp2);
5216 break;
5217 case 5:
5218 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5219 break;
5220 default:
5221 abort();
5222 }
5223 dead_tmp(tmp2);
5224 }
5225 neon_store_reg(rd, pass, tmp);
5226 }
5227 break;
5228 case 2: /* VMLAL sclar */
5229 case 3: /* VQDMLAL scalar */
5230 case 6: /* VMLSL scalar */
5231 case 7: /* VQDMLSL scalar */
5232 case 10: /* VMULL scalar */
5233 case 11: /* VQDMULL scalar */
5234 if (size == 0 && (op == 3 || op == 7 || op == 11))
5235 return 1;
5236
5237 tmp2 = neon_get_scalar(size, rm);
5238 tmp3 = neon_load_reg(rn, 1);
5239
5240 for (pass = 0; pass < 2; pass++) {
5241 if (pass == 0) {
5242 tmp = neon_load_reg(rn, 0);
5243 } else {
5244 tmp = tmp3;
5245 }
5246 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5247 dead_tmp(tmp);
5248 if (op == 6 || op == 7) {
5249 gen_neon_negl(cpu_V0, size);
5250 }
5251 if (op != 11) {
5252 neon_load_reg64(cpu_V1, rd + pass);
5253 }
5254 switch (op) {
5255 case 2: case 6:
5256 gen_neon_addl(size);
5257 break;
5258 case 3: case 7:
5259 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5260 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5261 break;
5262 case 10:
5263 /* no-op */
5264 break;
5265 case 11:
5266 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5267 break;
5268 default:
5269 abort();
5270 }
5271 neon_store_reg64(cpu_V0, rd + pass);
5272 }
5273
5274 dead_tmp(tmp2);
5275
5276 break;
5277 default: /* 14 and 15 are RESERVED */
5278 return 1;
5279 }
5280 }
5281 } else { /* size == 3 */
5282 if (!u) {
5283 /* Extract. */
5284 imm = (insn >> 8) & 0xf;
5285
5286 if (imm > 7 && !q)
5287 return 1;
5288
5289 if (imm == 0) {
5290 neon_load_reg64(cpu_V0, rn);
5291 if (q) {
5292 neon_load_reg64(cpu_V1, rn + 1);
5293 }
5294 } else if (imm == 8) {
5295 neon_load_reg64(cpu_V0, rn + 1);
5296 if (q) {
5297 neon_load_reg64(cpu_V1, rm);
5298 }
5299 } else if (q) {
5300 tmp64 = tcg_temp_new_i64();
5301 if (imm < 8) {
5302 neon_load_reg64(cpu_V0, rn);
5303 neon_load_reg64(tmp64, rn + 1);
5304 } else {
5305 neon_load_reg64(cpu_V0, rn + 1);
5306 neon_load_reg64(tmp64, rm);
5307 }
5308 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5309 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5310 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5311 if (imm < 8) {
5312 neon_load_reg64(cpu_V1, rm);
5313 } else {
5314 neon_load_reg64(cpu_V1, rm + 1);
5315 imm -= 8;
5316 }
5317 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5318 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5319 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5320 tcg_temp_free_i64(tmp64);
5321 } else {
5322 /* BUGFIX */
5323 neon_load_reg64(cpu_V0, rn);
5324 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5325 neon_load_reg64(cpu_V1, rm);
5326 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5327 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5328 }
5329 neon_store_reg64(cpu_V0, rd);
5330 if (q) {
5331 neon_store_reg64(cpu_V1, rd + 1);
5332 }
5333 } else if ((insn & (1 << 11)) == 0) {
5334 /* Two register misc. */
5335 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5336 size = (insn >> 18) & 3;
5337 switch (op) {
5338 case 0: /* VREV64 */
5339 if (size == 3)
5340 return 1;
5341 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5342 tmp = neon_load_reg(rm, pass * 2);
5343 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5344 switch (size) {
5345 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5346 case 1: gen_swap_half(tmp); break;
5347 case 2: /* no-op */ break;
5348 default: abort();
5349 }
5350 neon_store_reg(rd, pass * 2 + 1, tmp);
5351 if (size == 2) {
5352 neon_store_reg(rd, pass * 2, tmp2);
5353 } else {
5354 switch (size) {
5355 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5356 case 1: gen_swap_half(tmp2); break;
5357 default: abort();
5358 }
5359 neon_store_reg(rd, pass * 2, tmp2);
5360 }
5361 }
5362 break;
5363 case 4: case 5: /* VPADDL */
5364 case 12: case 13: /* VPADAL */
5365 if (size == 3)
5366 return 1;
5367 for (pass = 0; pass < q + 1; pass++) {
5368 tmp = neon_load_reg(rm, pass * 2);
5369 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5370 tmp = neon_load_reg(rm, pass * 2 + 1);
5371 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5372 switch (size) {
5373 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5374 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5375 case 2: tcg_gen_add_i64(CPU_V001); break;
5376 default: abort();
5377 }
5378 if (op >= 12) {
5379 /* Accumulate. */
5380 neon_load_reg64(cpu_V1, rd + pass);
5381 gen_neon_addl(size);
5382 }
5383 neon_store_reg64(cpu_V0, rd + pass);
5384 }
5385 break;
5386 case 33: /* VTRN */
5387 if (size == 2) {
5388 for (n = 0; n < (q ? 4 : 2); n += 2) {
5389 tmp = neon_load_reg(rm, n);
5390 tmp2 = neon_load_reg(rd, n + 1);
5391 neon_store_reg(rm, n, tmp2);
5392 neon_store_reg(rd, n + 1, tmp);
5393 }
5394 } else {
5395 goto elementwise;
5396 }
5397 break;
5398 case 34: /* VUZP */
5399 /* Reg Before After
5400 Rd A3 A2 A1 A0 B2 B0 A2 A0
5401 Rm B3 B2 B1 B0 B3 B1 A3 A1
5402 */
5403 if (size == 3)
5404 return 1;
5405 gen_neon_unzip(rd, q, 0, size);
5406 gen_neon_unzip(rm, q, 4, size);
5407 if (q) {
5408 static int unzip_order_q[8] =
5409 {0, 2, 4, 6, 1, 3, 5, 7};
5410 for (n = 0; n < 8; n++) {
5411 int reg = (n < 4) ? rd : rm;
5412 tmp = neon_load_scratch(unzip_order_q[n]);
5413 neon_store_reg(reg, n % 4, tmp);
5414 }
5415 } else {
5416 static int unzip_order[4] =
5417 {0, 4, 1, 5};
5418 for (n = 0; n < 4; n++) {
5419 int reg = (n < 2) ? rd : rm;
5420 tmp = neon_load_scratch(unzip_order[n]);
5421 neon_store_reg(reg, n % 2, tmp);
5422 }
5423 }
5424 break;
5425 case 35: /* VZIP */
5426 /* Reg Before After
5427 Rd A3 A2 A1 A0 B1 A1 B0 A0
5428 Rm B3 B2 B1 B0 B3 A3 B2 A2
5429 */
5430 if (size == 3)
5431 return 1;
5432 count = (q ? 4 : 2);
5433 for (n = 0; n < count; n++) {
5434 tmp = neon_load_reg(rd, n);
5435 tmp2 = neon_load_reg(rd, n);
5436 switch (size) {
5437 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5438 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5439 case 2: /* no-op */; break;
5440 default: abort();
5441 }
5442 neon_store_scratch(n * 2, tmp);
5443 neon_store_scratch(n * 2 + 1, tmp2);
5444 }
5445 for (n = 0; n < count * 2; n++) {
5446 int reg = (n < count) ? rd : rm;
5447 tmp = neon_load_scratch(n);
5448 neon_store_reg(reg, n % count, tmp);
5449 }
5450 break;
5451 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5452 if (size == 3)
5453 return 1;
5454 TCGV_UNUSED(tmp2);
5455 for (pass = 0; pass < 2; pass++) {
5456 neon_load_reg64(cpu_V0, rm + pass);
5457 tmp = new_tmp();
5458 if (op == 36 && q == 0) {
5459 gen_neon_narrow(size, tmp, cpu_V0);
5460 } else if (q) {
5461 gen_neon_narrow_satu(size, tmp, cpu_V0);
5462 } else {
5463 gen_neon_narrow_sats(size, tmp, cpu_V0);
5464 }
5465 if (pass == 0) {
5466 tmp2 = tmp;
5467 } else {
5468 neon_store_reg(rd, 0, tmp2);
5469 neon_store_reg(rd, 1, tmp);
5470 }
5471 }
5472 break;
5473 case 38: /* VSHLL */
5474 if (q || size == 3)
5475 return 1;
5476 tmp = neon_load_reg(rm, 0);
5477 tmp2 = neon_load_reg(rm, 1);
5478 for (pass = 0; pass < 2; pass++) {
5479 if (pass == 1)
5480 tmp = tmp2;
5481 gen_neon_widen(cpu_V0, tmp, size, 1);
5482 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5483 neon_store_reg64(cpu_V0, rd + pass);
5484 }
5485 break;
5486 case 44: /* VCVT.F16.F32 */
5487 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5488 return 1;
5489 tmp = new_tmp();
5490 tmp2 = new_tmp();
5491 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5492 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5493 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5494 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5495 tcg_gen_shli_i32(tmp2, tmp2, 16);
5496 tcg_gen_or_i32(tmp2, tmp2, tmp);
5497 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5498 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5499 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5500 neon_store_reg(rd, 0, tmp2);
5501 tmp2 = new_tmp();
5502 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5503 tcg_gen_shli_i32(tmp2, tmp2, 16);
5504 tcg_gen_or_i32(tmp2, tmp2, tmp);
5505 neon_store_reg(rd, 1, tmp2);
5506 dead_tmp(tmp);
5507 break;
5508 case 46: /* VCVT.F32.F16 */
5509 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5510 return 1;
5511 tmp3 = new_tmp();
5512 tmp = neon_load_reg(rm, 0);
5513 tmp2 = neon_load_reg(rm, 1);
5514 tcg_gen_ext16u_i32(tmp3, tmp);
5515 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5516 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5517 tcg_gen_shri_i32(tmp3, tmp, 16);
5518 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5519 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5520 dead_tmp(tmp);
5521 tcg_gen_ext16u_i32(tmp3, tmp2);
5522 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5523 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5524 tcg_gen_shri_i32(tmp3, tmp2, 16);
5525 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5526 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5527 dead_tmp(tmp2);
5528 dead_tmp(tmp3);
5529 break;
5530 default:
5531 elementwise:
5532 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5533 if (op == 30 || op == 31 || op >= 58) {
5534 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5535 neon_reg_offset(rm, pass));
5536 TCGV_UNUSED(tmp);
5537 } else {
5538 tmp = neon_load_reg(rm, pass);
5539 }
5540 switch (op) {
5541 case 1: /* VREV32 */
5542 switch (size) {
5543 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5544 case 1: gen_swap_half(tmp); break;
5545 default: return 1;
5546 }
5547 break;
5548 case 2: /* VREV16 */
5549 if (size != 0)
5550 return 1;
5551 gen_rev16(tmp);
5552 break;
5553 case 8: /* CLS */
5554 switch (size) {
5555 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5556 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5557 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5558 default: return 1;
5559 }
5560 break;
5561 case 9: /* CLZ */
5562 switch (size) {
5563 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5564 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5565 case 2: gen_helper_clz(tmp, tmp); break;
5566 default: return 1;
5567 }
5568 break;
5569 case 10: /* CNT */
5570 if (size != 0)
5571 return 1;
5572 gen_helper_neon_cnt_u8(tmp, tmp);
5573 break;
5574 case 11: /* VNOT */
5575 if (size != 0)
5576 return 1;
5577 tcg_gen_not_i32(tmp, tmp);
5578 break;
5579 case 14: /* VQABS */
5580 switch (size) {
5581 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5582 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5583 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5584 default: return 1;
5585 }
5586 break;
5587 case 15: /* VQNEG */
5588 switch (size) {
5589 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5590 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5591 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5592 default: return 1;
5593 }
5594 break;
5595 case 16: case 19: /* VCGT #0, VCLE #0 */
5596 tmp2 = tcg_const_i32(0);
5597 switch(size) {
5598 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5599 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5600 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5601 default: return 1;
5602 }
5603 tcg_temp_free(tmp2);
5604 if (op == 19)
5605 tcg_gen_not_i32(tmp, tmp);
5606 break;
5607 case 17: case 20: /* VCGE #0, VCLT #0 */
5608 tmp2 = tcg_const_i32(0);
5609 switch(size) {
5610 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5611 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5612 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5613 default: return 1;
5614 }
5615 tcg_temp_free(tmp2);
5616 if (op == 20)
5617 tcg_gen_not_i32(tmp, tmp);
5618 break;
5619 case 18: /* VCEQ #0 */
5620 tmp2 = tcg_const_i32(0);
5621 switch(size) {
5622 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5623 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5624 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5625 default: return 1;
5626 }
5627 tcg_temp_free(tmp2);
5628 break;
5629 case 22: /* VABS */
5630 switch(size) {
5631 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5632 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5633 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5634 default: return 1;
5635 }
5636 break;
5637 case 23: /* VNEG */
5638 if (size == 3)
5639 return 1;
5640 tmp2 = tcg_const_i32(0);
5641 gen_neon_rsb(size, tmp, tmp2);
5642 tcg_temp_free(tmp2);
5643 break;
5644 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5645 tmp2 = tcg_const_i32(0);
5646 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5647 tcg_temp_free(tmp2);
5648 if (op == 27)
5649 tcg_gen_not_i32(tmp, tmp);
5650 break;
5651 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5652 tmp2 = tcg_const_i32(0);
5653 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5654 tcg_temp_free(tmp2);
5655 if (op == 28)
5656 tcg_gen_not_i32(tmp, tmp);
5657 break;
5658 case 26: /* Float VCEQ #0 */
5659 tmp2 = tcg_const_i32(0);
5660 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5661 tcg_temp_free(tmp2);
5662 break;
5663 case 30: /* Float VABS */
5664 gen_vfp_abs(0);
5665 break;
5666 case 31: /* Float VNEG */
5667 gen_vfp_neg(0);
5668 break;
5669 case 32: /* VSWP */
5670 tmp2 = neon_load_reg(rd, pass);
5671 neon_store_reg(rm, pass, tmp2);
5672 break;
5673 case 33: /* VTRN */
5674 tmp2 = neon_load_reg(rd, pass);
5675 switch (size) {
5676 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5677 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5678 case 2: abort();
5679 default: return 1;
5680 }
5681 neon_store_reg(rm, pass, tmp2);
5682 break;
5683 case 56: /* Integer VRECPE */
5684 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5685 break;
5686 case 57: /* Integer VRSQRTE */
5687 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5688 break;
5689 case 58: /* Float VRECPE */
5690 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5691 break;
5692 case 59: /* Float VRSQRTE */
5693 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5694 break;
5695 case 60: /* VCVT.F32.S32 */
5696 gen_vfp_sito(0);
5697 break;
5698 case 61: /* VCVT.F32.U32 */
5699 gen_vfp_uito(0);
5700 break;
5701 case 62: /* VCVT.S32.F32 */
5702 gen_vfp_tosiz(0);
5703 break;
5704 case 63: /* VCVT.U32.F32 */
5705 gen_vfp_touiz(0);
5706 break;
5707 default:
5708 /* Reserved: 21, 29, 39-56 */
5709 return 1;
5710 }
5711 if (op == 30 || op == 31 || op >= 58) {
5712 tcg_gen_st_f32(cpu_F0s, cpu_env,
5713 neon_reg_offset(rd, pass));
5714 } else {
5715 neon_store_reg(rd, pass, tmp);
5716 }
5717 }
5718 break;
5719 }
5720 } else if ((insn & (1 << 10)) == 0) {
5721 /* VTBL, VTBX. */
5722 n = ((insn >> 5) & 0x18) + 8;
5723 if (insn & (1 << 6)) {
5724 tmp = neon_load_reg(rd, 0);
5725 } else {
5726 tmp = new_tmp();
5727 tcg_gen_movi_i32(tmp, 0);
5728 }
5729 tmp2 = neon_load_reg(rm, 0);
5730 tmp4 = tcg_const_i32(rn);
5731 tmp5 = tcg_const_i32(n);
5732 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5733 dead_tmp(tmp);
5734 if (insn & (1 << 6)) {
5735 tmp = neon_load_reg(rd, 1);
5736 } else {
5737 tmp = new_tmp();
5738 tcg_gen_movi_i32(tmp, 0);
5739 }
5740 tmp3 = neon_load_reg(rm, 1);
5741 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5742 tcg_temp_free_i32(tmp5);
5743 tcg_temp_free_i32(tmp4);
5744 neon_store_reg(rd, 0, tmp2);
5745 neon_store_reg(rd, 1, tmp3);
5746 dead_tmp(tmp);
5747 } else if ((insn & 0x380) == 0) {
5748 /* VDUP */
5749 if (insn & (1 << 19)) {
5750 tmp = neon_load_reg(rm, 1);
5751 } else {
5752 tmp = neon_load_reg(rm, 0);
5753 }
5754 if (insn & (1 << 16)) {
5755 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5756 } else if (insn & (1 << 17)) {
5757 if ((insn >> 18) & 1)
5758 gen_neon_dup_high16(tmp);
5759 else
5760 gen_neon_dup_low16(tmp);
5761 }
5762 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5763 tmp2 = new_tmp();
5764 tcg_gen_mov_i32(tmp2, tmp);
5765 neon_store_reg(rd, pass, tmp2);
5766 }
5767 dead_tmp(tmp);
5768 } else {
5769 return 1;
5770 }
5771 }
5772 }
5773 return 0;
5774 }
5775
5776 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5777 {
5778 int crn = (insn >> 16) & 0xf;
5779 int crm = insn & 0xf;
5780 int op1 = (insn >> 21) & 7;
5781 int op2 = (insn >> 5) & 7;
5782 int rt = (insn >> 12) & 0xf;
5783 TCGv tmp;
5784
5785 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5786 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5787 /* TEECR */
5788 if (IS_USER(s))
5789 return 1;
5790 tmp = load_cpu_field(teecr);
5791 store_reg(s, rt, tmp);
5792 return 0;
5793 }
5794 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5795 /* TEEHBR */
5796 if (IS_USER(s) && (env->teecr & 1))
5797 return 1;
5798 tmp = load_cpu_field(teehbr);
5799 store_reg(s, rt, tmp);
5800 return 0;
5801 }
5802 }
5803 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5804 op1, crn, crm, op2);
5805 return 1;
5806 }
5807
5808 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5809 {
5810 int crn = (insn >> 16) & 0xf;
5811 int crm = insn & 0xf;
5812 int op1 = (insn >> 21) & 7;
5813 int op2 = (insn >> 5) & 7;
5814 int rt = (insn >> 12) & 0xf;
5815 TCGv tmp;
5816
5817 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5818 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5819 /* TEECR */
5820 if (IS_USER(s))
5821 return 1;
5822 tmp = load_reg(s, rt);
5823 gen_helper_set_teecr(cpu_env, tmp);
5824 dead_tmp(tmp);
5825 return 0;
5826 }
5827 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5828 /* TEEHBR */
5829 if (IS_USER(s) && (env->teecr & 1))
5830 return 1;
5831 tmp = load_reg(s, rt);
5832 store_cpu_field(tmp, teehbr);
5833 return 0;
5834 }
5835 }
5836 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5837 op1, crn, crm, op2);
5838 return 1;
5839 }
5840
5841 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5842 {
5843 int cpnum;
5844
5845 cpnum = (insn >> 8) & 0xf;
5846 if (arm_feature(env, ARM_FEATURE_XSCALE)
5847 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5848 return 1;
5849
5850 switch (cpnum) {
5851 case 0:
5852 case 1:
5853 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5854 return disas_iwmmxt_insn(env, s, insn);
5855 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5856 return disas_dsp_insn(env, s, insn);
5857 }
5858 return 1;
5859 case 10:
5860 case 11:
5861 return disas_vfp_insn (env, s, insn);
5862 case 14:
5863 /* Coprocessors 7-15 are architecturally reserved by ARM.
5864 Unfortunately Intel decided to ignore this. */
5865 if (arm_feature(env, ARM_FEATURE_XSCALE))
5866 goto board;
5867 if (insn & (1 << 20))
5868 return disas_cp14_read(env, s, insn);
5869 else
5870 return disas_cp14_write(env, s, insn);
5871 case 15:
5872 return disas_cp15_insn (env, s, insn);
5873 default:
5874 board:
5875 /* Unknown coprocessor. See if the board has hooked it. */
5876 return disas_cp_insn (env, s, insn);
5877 }
5878 }
5879
5880
5881 /* Store a 64-bit value to a register pair. Clobbers val. */
5882 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5883 {
5884 TCGv tmp;
5885 tmp = new_tmp();
5886 tcg_gen_trunc_i64_i32(tmp, val);
5887 store_reg(s, rlow, tmp);
5888 tmp = new_tmp();
5889 tcg_gen_shri_i64(val, val, 32);
5890 tcg_gen_trunc_i64_i32(tmp, val);
5891 store_reg(s, rhigh, tmp);
5892 }
5893
5894 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5895 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5896 {
5897 TCGv_i64 tmp;
5898 TCGv tmp2;
5899
5900 /* Load value and extend to 64 bits. */
5901 tmp = tcg_temp_new_i64();
5902 tmp2 = load_reg(s, rlow);
5903 tcg_gen_extu_i32_i64(tmp, tmp2);
5904 dead_tmp(tmp2);
5905 tcg_gen_add_i64(val, val, tmp);
5906 tcg_temp_free_i64(tmp);
5907 }
5908
5909 /* load and add a 64-bit value from a register pair. */
5910 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5911 {
5912 TCGv_i64 tmp;
5913 TCGv tmpl;
5914 TCGv tmph;
5915
5916 /* Load 64-bit value rd:rn. */
5917 tmpl = load_reg(s, rlow);
5918 tmph = load_reg(s, rhigh);
5919 tmp = tcg_temp_new_i64();
5920 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5921 dead_tmp(tmpl);
5922 dead_tmp(tmph);
5923 tcg_gen_add_i64(val, val, tmp);
5924 tcg_temp_free_i64(tmp);
5925 }
5926
5927 /* Set N and Z flags from a 64-bit value. */
5928 static void gen_logicq_cc(TCGv_i64 val)
5929 {
5930 TCGv tmp = new_tmp();
5931 gen_helper_logicq_cc(tmp, val);
5932 gen_logic_CC(tmp);
5933 dead_tmp(tmp);
5934 }
5935
5936 /* Load/Store exclusive instructions are implemented by remembering
5937 the value/address loaded, and seeing if these are the same
5938 when the store is performed. This should be is sufficient to implement
5939 the architecturally mandated semantics, and avoids having to monitor
5940 regular stores.
5941
5942 In system emulation mode only one CPU will be running at once, so
5943 this sequence is effectively atomic. In user emulation mode we
5944 throw an exception and handle the atomic operation elsewhere. */
5945 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5946 TCGv addr, int size)
5947 {
5948 TCGv tmp;
5949
5950 switch (size) {
5951 case 0:
5952 tmp = gen_ld8u(addr, IS_USER(s));
5953 break;
5954 case 1:
5955 tmp = gen_ld16u(addr, IS_USER(s));
5956 break;
5957 case 2:
5958 case 3:
5959 tmp = gen_ld32(addr, IS_USER(s));
5960 break;
5961 default:
5962 abort();
5963 }
5964 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5965 store_reg(s, rt, tmp);
5966 if (size == 3) {
5967 TCGv tmp2 = new_tmp();
5968 tcg_gen_addi_i32(tmp2, addr, 4);
5969 tmp = gen_ld32(tmp2, IS_USER(s));
5970 dead_tmp(tmp2);
5971 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5972 store_reg(s, rt2, tmp);
5973 }
5974 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5975 }
5976
5977 static void gen_clrex(DisasContext *s)
5978 {
5979 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5980 }
5981
5982 #ifdef CONFIG_USER_ONLY
5983 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5984 TCGv addr, int size)
5985 {
5986 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5987 tcg_gen_movi_i32(cpu_exclusive_info,
5988 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5989 gen_exception_insn(s, 4, EXCP_STREX);
5990 }
5991 #else
5992 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5993 TCGv addr, int size)
5994 {
5995 TCGv tmp;
5996 int done_label;
5997 int fail_label;
5998
5999 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6000 [addr] = {Rt};
6001 {Rd} = 0;
6002 } else {
6003 {Rd} = 1;
6004 } */
6005 fail_label = gen_new_label();
6006 done_label = gen_new_label();
6007 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6008 switch (size) {
6009 case 0:
6010 tmp = gen_ld8u(addr, IS_USER(s));
6011 break;
6012 case 1:
6013 tmp = gen_ld16u(addr, IS_USER(s));
6014 break;
6015 case 2:
6016 case 3:
6017 tmp = gen_ld32(addr, IS_USER(s));
6018 break;
6019 default:
6020 abort();
6021 }
6022 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6023 dead_tmp(tmp);
6024 if (size == 3) {
6025 TCGv tmp2 = new_tmp();
6026 tcg_gen_addi_i32(tmp2, addr, 4);
6027 tmp = gen_ld32(tmp2, IS_USER(s));
6028 dead_tmp(tmp2);
6029 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6030 dead_tmp(tmp);
6031 }
6032 tmp = load_reg(s, rt);
6033 switch (size) {
6034 case 0:
6035 gen_st8(tmp, addr, IS_USER(s));
6036 break;
6037 case 1:
6038 gen_st16(tmp, addr, IS_USER(s));
6039 break;
6040 case 2:
6041 case 3:
6042 gen_st32(tmp, addr, IS_USER(s));
6043 break;
6044 default:
6045 abort();
6046 }
6047 if (size == 3) {
6048 tcg_gen_addi_i32(addr, addr, 4);
6049 tmp = load_reg(s, rt2);
6050 gen_st32(tmp, addr, IS_USER(s));
6051 }
6052 tcg_gen_movi_i32(cpu_R[rd], 0);
6053 tcg_gen_br(done_label);
6054 gen_set_label(fail_label);
6055 tcg_gen_movi_i32(cpu_R[rd], 1);
6056 gen_set_label(done_label);
6057 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6058 }
6059 #endif
6060
6061 static void disas_arm_insn(CPUState * env, DisasContext *s)
6062 {
6063 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6064 TCGv tmp;
6065 TCGv tmp2;
6066 TCGv tmp3;
6067 TCGv addr;
6068 TCGv_i64 tmp64;
6069
6070 insn = ldl_code(s->pc);
6071 s->pc += 4;
6072
6073 /* M variants do not implement ARM mode. */
6074 if (IS_M(env))
6075 goto illegal_op;
6076 cond = insn >> 28;
6077 if (cond == 0xf){
6078 /* Unconditional instructions. */
6079 if (((insn >> 25) & 7) == 1) {
6080 /* NEON Data processing. */
6081 if (!arm_feature(env, ARM_FEATURE_NEON))
6082 goto illegal_op;
6083
6084 if (disas_neon_data_insn(env, s, insn))
6085 goto illegal_op;
6086 return;
6087 }
6088 if ((insn & 0x0f100000) == 0x04000000) {
6089 /* NEON load/store. */
6090 if (!arm_feature(env, ARM_FEATURE_NEON))
6091 goto illegal_op;
6092
6093 if (disas_neon_ls_insn(env, s, insn))
6094 goto illegal_op;
6095 return;
6096 }
6097 if ((insn & 0x0d70f000) == 0x0550f000)
6098 return; /* PLD */
6099 else if ((insn & 0x0ffffdff) == 0x01010000) {
6100 ARCH(6);
6101 /* setend */
6102 if (insn & (1 << 9)) {
6103 /* BE8 mode not implemented. */
6104 goto illegal_op;
6105 }
6106 return;
6107 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6108 switch ((insn >> 4) & 0xf) {
6109 case 1: /* clrex */
6110 ARCH(6K);
6111 gen_clrex(s);
6112 return;
6113 case 4: /* dsb */
6114 case 5: /* dmb */
6115 case 6: /* isb */
6116 ARCH(7);
6117 /* We don't emulate caches so these are a no-op. */
6118 return;
6119 default:
6120 goto illegal_op;
6121 }
6122 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6123 /* srs */
6124 int32_t offset;
6125 if (IS_USER(s))
6126 goto illegal_op;
6127 ARCH(6);
6128 op1 = (insn & 0x1f);
6129 addr = new_tmp();
6130 tmp = tcg_const_i32(op1);
6131 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6132 tcg_temp_free_i32(tmp);
6133 i = (insn >> 23) & 3;
6134 switch (i) {
6135 case 0: offset = -4; break; /* DA */
6136 case 1: offset = 0; break; /* IA */
6137 case 2: offset = -8; break; /* DB */
6138 case 3: offset = 4; break; /* IB */
6139 default: abort();
6140 }
6141 if (offset)
6142 tcg_gen_addi_i32(addr, addr, offset);
6143 tmp = load_reg(s, 14);
6144 gen_st32(tmp, addr, 0);
6145 tmp = load_cpu_field(spsr);
6146 tcg_gen_addi_i32(addr, addr, 4);
6147 gen_st32(tmp, addr, 0);
6148 if (insn & (1 << 21)) {
6149 /* Base writeback. */
6150 switch (i) {
6151 case 0: offset = -8; break;
6152 case 1: offset = 4; break;
6153 case 2: offset = -4; break;
6154 case 3: offset = 0; break;
6155 default: abort();
6156 }
6157 if (offset)
6158 tcg_gen_addi_i32(addr, addr, offset);
6159 tmp = tcg_const_i32(op1);
6160 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6161 tcg_temp_free_i32(tmp);
6162 dead_tmp(addr);
6163 } else {
6164 dead_tmp(addr);
6165 }
6166 return;
6167 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6168 /* rfe */
6169 int32_t offset;
6170 if (IS_USER(s))
6171 goto illegal_op;
6172 ARCH(6);
6173 rn = (insn >> 16) & 0xf;
6174 addr = load_reg(s, rn);
6175 i = (insn >> 23) & 3;
6176 switch (i) {
6177 case 0: offset = -4; break; /* DA */
6178 case 1: offset = 0; break; /* IA */
6179 case 2: offset = -8; break; /* DB */
6180 case 3: offset = 4; break; /* IB */
6181 default: abort();
6182 }
6183 if (offset)
6184 tcg_gen_addi_i32(addr, addr, offset);
6185 /* Load PC into tmp and CPSR into tmp2. */
6186 tmp = gen_ld32(addr, 0);
6187 tcg_gen_addi_i32(addr, addr, 4);
6188 tmp2 = gen_ld32(addr, 0);
6189 if (insn & (1 << 21)) {
6190 /* Base writeback. */
6191 switch (i) {
6192 case 0: offset = -8; break;
6193 case 1: offset = 4; break;
6194 case 2: offset = -4; break;
6195 case 3: offset = 0; break;
6196 default: abort();
6197 }
6198 if (offset)
6199 tcg_gen_addi_i32(addr, addr, offset);
6200 store_reg(s, rn, addr);
6201 } else {
6202 dead_tmp(addr);
6203 }
6204 gen_rfe(s, tmp, tmp2);
6205 return;
6206 } else if ((insn & 0x0e000000) == 0x0a000000) {
6207 /* branch link and change to thumb (blx <offset>) */
6208 int32_t offset;
6209
6210 val = (uint32_t)s->pc;
6211 tmp = new_tmp();
6212 tcg_gen_movi_i32(tmp, val);
6213 store_reg(s, 14, tmp);
6214 /* Sign-extend the 24-bit offset */
6215 offset = (((int32_t)insn) << 8) >> 8;
6216 /* offset * 4 + bit24 * 2 + (thumb bit) */
6217 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6218 /* pipeline offset */
6219 val += 4;
6220 gen_bx_im(s, val);
6221 return;
6222 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6223 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6224 /* iWMMXt register transfer. */
6225 if (env->cp15.c15_cpar & (1 << 1))
6226 if (!disas_iwmmxt_insn(env, s, insn))
6227 return;
6228 }
6229 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6230 /* Coprocessor double register transfer. */
6231 } else if ((insn & 0x0f000010) == 0x0e000010) {
6232 /* Additional coprocessor register transfer. */
6233 } else if ((insn & 0x0ff10020) == 0x01000000) {
6234 uint32_t mask;
6235 uint32_t val;
6236 /* cps (privileged) */
6237 if (IS_USER(s))
6238 return;
6239 mask = val = 0;
6240 if (insn & (1 << 19)) {
6241 if (insn & (1 << 8))
6242 mask |= CPSR_A;
6243 if (insn & (1 << 7))
6244 mask |= CPSR_I;
6245 if (insn & (1 << 6))
6246 mask |= CPSR_F;
6247 if (insn & (1 << 18))
6248 val |= mask;
6249 }
6250 if (insn & (1 << 17)) {
6251 mask |= CPSR_M;
6252 val |= (insn & 0x1f);
6253 }
6254 if (mask) {
6255 gen_set_psr_im(s, mask, 0, val);
6256 }
6257 return;
6258 }
6259 goto illegal_op;
6260 }
6261 if (cond != 0xe) {
6262 /* if not always execute, we generate a conditional jump to
6263 next instruction */
6264 s->condlabel = gen_new_label();
6265 gen_test_cc(cond ^ 1, s->condlabel);
6266 s->condjmp = 1;
6267 }
6268 if ((insn & 0x0f900000) == 0x03000000) {
6269 if ((insn & (1 << 21)) == 0) {
6270 ARCH(6T2);
6271 rd = (insn >> 12) & 0xf;
6272 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6273 if ((insn & (1 << 22)) == 0) {
6274 /* MOVW */
6275 tmp = new_tmp();
6276 tcg_gen_movi_i32(tmp, val);
6277 } else {
6278 /* MOVT */
6279 tmp = load_reg(s, rd);
6280 tcg_gen_ext16u_i32(tmp, tmp);
6281 tcg_gen_ori_i32(tmp, tmp, val << 16);
6282 }
6283 store_reg(s, rd, tmp);
6284 } else {
6285 if (((insn >> 12) & 0xf) != 0xf)
6286 goto illegal_op;
6287 if (((insn >> 16) & 0xf) == 0) {
6288 gen_nop_hint(s, insn & 0xff);
6289 } else {
6290 /* CPSR = immediate */
6291 val = insn & 0xff;
6292 shift = ((insn >> 8) & 0xf) * 2;
6293 if (shift)
6294 val = (val >> shift) | (val << (32 - shift));
6295 i = ((insn & (1 << 22)) != 0);
6296 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6297 goto illegal_op;
6298 }
6299 }
6300 } else if ((insn & 0x0f900000) == 0x01000000
6301 && (insn & 0x00000090) != 0x00000090) {
6302 /* miscellaneous instructions */
6303 op1 = (insn >> 21) & 3;
6304 sh = (insn >> 4) & 0xf;
6305 rm = insn & 0xf;
6306 switch (sh) {
6307 case 0x0: /* move program status register */
6308 if (op1 & 1) {
6309 /* PSR = reg */
6310 tmp = load_reg(s, rm);
6311 i = ((op1 & 2) != 0);
6312 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6313 goto illegal_op;
6314 } else {
6315 /* reg = PSR */
6316 rd = (insn >> 12) & 0xf;
6317 if (op1 & 2) {
6318 if (IS_USER(s))
6319 goto illegal_op;
6320 tmp = load_cpu_field(spsr);
6321 } else {
6322 tmp = new_tmp();
6323 gen_helper_cpsr_read(tmp);
6324 }
6325 store_reg(s, rd, tmp);
6326 }
6327 break;
6328 case 0x1:
6329 if (op1 == 1) {
6330 /* branch/exchange thumb (bx). */
6331 tmp = load_reg(s, rm);
6332 gen_bx(s, tmp);
6333 } else if (op1 == 3) {
6334 /* clz */
6335 rd = (insn >> 12) & 0xf;
6336 tmp = load_reg(s, rm);
6337 gen_helper_clz(tmp, tmp);
6338 store_reg(s, rd, tmp);
6339 } else {
6340 goto illegal_op;
6341 }
6342 break;
6343 case 0x2:
6344 if (op1 == 1) {
6345 ARCH(5J); /* bxj */
6346 /* Trivial implementation equivalent to bx. */
6347 tmp = load_reg(s, rm);
6348 gen_bx(s, tmp);
6349 } else {
6350 goto illegal_op;
6351 }
6352 break;
6353 case 0x3:
6354 if (op1 != 1)
6355 goto illegal_op;
6356
6357 /* branch link/exchange thumb (blx) */
6358 tmp = load_reg(s, rm);
6359 tmp2 = new_tmp();
6360 tcg_gen_movi_i32(tmp2, s->pc);
6361 store_reg(s, 14, tmp2);
6362 gen_bx(s, tmp);
6363 break;
6364 case 0x5: /* saturating add/subtract */
6365 rd = (insn >> 12) & 0xf;
6366 rn = (insn >> 16) & 0xf;
6367 tmp = load_reg(s, rm);
6368 tmp2 = load_reg(s, rn);
6369 if (op1 & 2)
6370 gen_helper_double_saturate(tmp2, tmp2);
6371 if (op1 & 1)
6372 gen_helper_sub_saturate(tmp, tmp, tmp2);
6373 else
6374 gen_helper_add_saturate(tmp, tmp, tmp2);
6375 dead_tmp(tmp2);
6376 store_reg(s, rd, tmp);
6377 break;
6378 case 7:
6379 /* SMC instruction (op1 == 3)
6380 and undefined instructions (op1 == 0 || op1 == 2)
6381 will trap */
6382 if (op1 != 1) {
6383 goto illegal_op;
6384 }
6385 /* bkpt */
6386 gen_exception_insn(s, 4, EXCP_BKPT);
6387 break;
6388 case 0x8: /* signed multiply */
6389 case 0xa:
6390 case 0xc:
6391 case 0xe:
6392 rs = (insn >> 8) & 0xf;
6393 rn = (insn >> 12) & 0xf;
6394 rd = (insn >> 16) & 0xf;
6395 if (op1 == 1) {
6396 /* (32 * 16) >> 16 */
6397 tmp = load_reg(s, rm);
6398 tmp2 = load_reg(s, rs);
6399 if (sh & 4)
6400 tcg_gen_sari_i32(tmp2, tmp2, 16);
6401 else
6402 gen_sxth(tmp2);
6403 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6404 tcg_gen_shri_i64(tmp64, tmp64, 16);
6405 tmp = new_tmp();
6406 tcg_gen_trunc_i64_i32(tmp, tmp64);
6407 tcg_temp_free_i64(tmp64);
6408 if ((sh & 2) == 0) {
6409 tmp2 = load_reg(s, rn);
6410 gen_helper_add_setq(tmp, tmp, tmp2);
6411 dead_tmp(tmp2);
6412 }
6413 store_reg(s, rd, tmp);
6414 } else {
6415 /* 16 * 16 */
6416 tmp = load_reg(s, rm);
6417 tmp2 = load_reg(s, rs);
6418 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6419 dead_tmp(tmp2);
6420 if (op1 == 2) {
6421 tmp64 = tcg_temp_new_i64();
6422 tcg_gen_ext_i32_i64(tmp64, tmp);
6423 dead_tmp(tmp);
6424 gen_addq(s, tmp64, rn, rd);
6425 gen_storeq_reg(s, rn, rd, tmp64);
6426 tcg_temp_free_i64(tmp64);
6427 } else {
6428 if (op1 == 0) {
6429 tmp2 = load_reg(s, rn);
6430 gen_helper_add_setq(tmp, tmp, tmp2);
6431 dead_tmp(tmp2);
6432 }
6433 store_reg(s, rd, tmp);
6434 }
6435 }
6436 break;
6437 default:
6438 goto illegal_op;
6439 }
6440 } else if (((insn & 0x0e000000) == 0 &&
6441 (insn & 0x00000090) != 0x90) ||
6442 ((insn & 0x0e000000) == (1 << 25))) {
6443 int set_cc, logic_cc, shiftop;
6444
6445 op1 = (insn >> 21) & 0xf;
6446 set_cc = (insn >> 20) & 1;
6447 logic_cc = table_logic_cc[op1] & set_cc;
6448
6449 /* data processing instruction */
6450 if (insn & (1 << 25)) {
6451 /* immediate operand */
6452 val = insn & 0xff;
6453 shift = ((insn >> 8) & 0xf) * 2;
6454 if (shift) {
6455 val = (val >> shift) | (val << (32 - shift));
6456 }
6457 tmp2 = new_tmp();
6458 tcg_gen_movi_i32(tmp2, val);
6459 if (logic_cc && shift) {
6460 gen_set_CF_bit31(tmp2);
6461 }
6462 } else {
6463 /* register */
6464 rm = (insn) & 0xf;
6465 tmp2 = load_reg(s, rm);
6466 shiftop = (insn >> 5) & 3;
6467 if (!(insn & (1 << 4))) {
6468 shift = (insn >> 7) & 0x1f;
6469 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6470 } else {
6471 rs = (insn >> 8) & 0xf;
6472 tmp = load_reg(s, rs);
6473 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6474 }
6475 }
6476 if (op1 != 0x0f && op1 != 0x0d) {
6477 rn = (insn >> 16) & 0xf;
6478 tmp = load_reg(s, rn);
6479 } else {
6480 TCGV_UNUSED(tmp);
6481 }
6482 rd = (insn >> 12) & 0xf;
6483 switch(op1) {
6484 case 0x00:
6485 tcg_gen_and_i32(tmp, tmp, tmp2);
6486 if (logic_cc) {
6487 gen_logic_CC(tmp);
6488 }
6489 store_reg_bx(env, s, rd, tmp);
6490 break;
6491 case 0x01:
6492 tcg_gen_xor_i32(tmp, tmp, tmp2);
6493 if (logic_cc) {
6494 gen_logic_CC(tmp);
6495 }
6496 store_reg_bx(env, s, rd, tmp);
6497 break;
6498 case 0x02:
6499 if (set_cc && rd == 15) {
6500 /* SUBS r15, ... is used for exception return. */
6501 if (IS_USER(s)) {
6502 goto illegal_op;
6503 }
6504 gen_helper_sub_cc(tmp, tmp, tmp2);
6505 gen_exception_return(s, tmp);
6506 } else {
6507 if (set_cc) {
6508 gen_helper_sub_cc(tmp, tmp, tmp2);
6509 } else {
6510 tcg_gen_sub_i32(tmp, tmp, tmp2);
6511 }
6512 store_reg_bx(env, s, rd, tmp);
6513 }
6514 break;
6515 case 0x03:
6516 if (set_cc) {
6517 gen_helper_sub_cc(tmp, tmp2, tmp);
6518 } else {
6519 tcg_gen_sub_i32(tmp, tmp2, tmp);
6520 }
6521 store_reg_bx(env, s, rd, tmp);
6522 break;
6523 case 0x04:
6524 if (set_cc) {
6525 gen_helper_add_cc(tmp, tmp, tmp2);
6526 } else {
6527 tcg_gen_add_i32(tmp, tmp, tmp2);
6528 }
6529 store_reg_bx(env, s, rd, tmp);
6530 break;
6531 case 0x05:
6532 if (set_cc) {
6533 gen_helper_adc_cc(tmp, tmp, tmp2);
6534 } else {
6535 gen_add_carry(tmp, tmp, tmp2);
6536 }
6537 store_reg_bx(env, s, rd, tmp);
6538 break;
6539 case 0x06:
6540 if (set_cc) {
6541 gen_helper_sbc_cc(tmp, tmp, tmp2);
6542 } else {
6543 gen_sub_carry(tmp, tmp, tmp2);
6544 }
6545 store_reg_bx(env, s, rd, tmp);
6546 break;
6547 case 0x07:
6548 if (set_cc) {
6549 gen_helper_sbc_cc(tmp, tmp2, tmp);
6550 } else {
6551 gen_sub_carry(tmp, tmp2, tmp);
6552 }
6553 store_reg_bx(env, s, rd, tmp);
6554 break;
6555 case 0x08:
6556 if (set_cc) {
6557 tcg_gen_and_i32(tmp, tmp, tmp2);
6558 gen_logic_CC(tmp);
6559 }
6560 dead_tmp(tmp);
6561 break;
6562 case 0x09:
6563 if (set_cc) {
6564 tcg_gen_xor_i32(tmp, tmp, tmp2);
6565 gen_logic_CC(tmp);
6566 }
6567 dead_tmp(tmp);
6568 break;
6569 case 0x0a:
6570 if (set_cc) {
6571 gen_helper_sub_cc(tmp, tmp, tmp2);
6572 }
6573 dead_tmp(tmp);
6574 break;
6575 case 0x0b:
6576 if (set_cc) {
6577 gen_helper_add_cc(tmp, tmp, tmp2);
6578 }
6579 dead_tmp(tmp);
6580 break;
6581 case 0x0c:
6582 tcg_gen_or_i32(tmp, tmp, tmp2);
6583 if (logic_cc) {
6584 gen_logic_CC(tmp);
6585 }
6586 store_reg_bx(env, s, rd, tmp);
6587 break;
6588 case 0x0d:
6589 if (logic_cc && rd == 15) {
6590 /* MOVS r15, ... is used for exception return. */
6591 if (IS_USER(s)) {
6592 goto illegal_op;
6593 }
6594 gen_exception_return(s, tmp2);
6595 } else {
6596 if (logic_cc) {
6597 gen_logic_CC(tmp2);
6598 }
6599 store_reg_bx(env, s, rd, tmp2);
6600 }
6601 break;
6602 case 0x0e:
6603 tcg_gen_andc_i32(tmp, tmp, tmp2);
6604 if (logic_cc) {
6605 gen_logic_CC(tmp);
6606 }
6607 store_reg_bx(env, s, rd, tmp);
6608 break;
6609 default:
6610 case 0x0f:
6611 tcg_gen_not_i32(tmp2, tmp2);
6612 if (logic_cc) {
6613 gen_logic_CC(tmp2);
6614 }
6615 store_reg_bx(env, s, rd, tmp2);
6616 break;
6617 }
6618 if (op1 != 0x0f && op1 != 0x0d) {
6619 dead_tmp(tmp2);
6620 }
6621 } else {
6622 /* other instructions */
6623 op1 = (insn >> 24) & 0xf;
6624 switch(op1) {
6625 case 0x0:
6626 case 0x1:
6627 /* multiplies, extra load/stores */
6628 sh = (insn >> 5) & 3;
6629 if (sh == 0) {
6630 if (op1 == 0x0) {
6631 rd = (insn >> 16) & 0xf;
6632 rn = (insn >> 12) & 0xf;
6633 rs = (insn >> 8) & 0xf;
6634 rm = (insn) & 0xf;
6635 op1 = (insn >> 20) & 0xf;
6636 switch (op1) {
6637 case 0: case 1: case 2: case 3: case 6:
6638 /* 32 bit mul */
6639 tmp = load_reg(s, rs);
6640 tmp2 = load_reg(s, rm);
6641 tcg_gen_mul_i32(tmp, tmp, tmp2);
6642 dead_tmp(tmp2);
6643 if (insn & (1 << 22)) {
6644 /* Subtract (mls) */
6645 ARCH(6T2);
6646 tmp2 = load_reg(s, rn);
6647 tcg_gen_sub_i32(tmp, tmp2, tmp);
6648 dead_tmp(tmp2);
6649 } else if (insn & (1 << 21)) {
6650 /* Add */
6651 tmp2 = load_reg(s, rn);
6652 tcg_gen_add_i32(tmp, tmp, tmp2);
6653 dead_tmp(tmp2);
6654 }
6655 if (insn & (1 << 20))
6656 gen_logic_CC(tmp);
6657 store_reg(s, rd, tmp);
6658 break;
6659 case 4:
6660 /* 64 bit mul double accumulate (UMAAL) */
6661 ARCH(6);
6662 tmp = load_reg(s, rs);
6663 tmp2 = load_reg(s, rm);
6664 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6665 gen_addq_lo(s, tmp64, rn);
6666 gen_addq_lo(s, tmp64, rd);
6667 gen_storeq_reg(s, rn, rd, tmp64);
6668 tcg_temp_free_i64(tmp64);
6669 break;
6670 case 8: case 9: case 10: case 11:
6671 case 12: case 13: case 14: case 15:
6672 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6673 tmp = load_reg(s, rs);
6674 tmp2 = load_reg(s, rm);
6675 if (insn & (1 << 22)) {
6676 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6677 } else {
6678 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6679 }
6680 if (insn & (1 << 21)) { /* mult accumulate */
6681 gen_addq(s, tmp64, rn, rd);
6682 }
6683 if (insn & (1 << 20)) {
6684 gen_logicq_cc(tmp64);
6685 }
6686 gen_storeq_reg(s, rn, rd, tmp64);
6687 tcg_temp_free_i64(tmp64);
6688 break;
6689 default:
6690 goto illegal_op;
6691 }
6692 } else {
6693 rn = (insn >> 16) & 0xf;
6694 rd = (insn >> 12) & 0xf;
6695 if (insn & (1 << 23)) {
6696 /* load/store exclusive */
6697 op1 = (insn >> 21) & 0x3;
6698 if (op1)
6699 ARCH(6K);
6700 else
6701 ARCH(6);
6702 addr = tcg_temp_local_new_i32();
6703 load_reg_var(s, addr, rn);
6704 if (insn & (1 << 20)) {
6705 switch (op1) {
6706 case 0: /* ldrex */
6707 gen_load_exclusive(s, rd, 15, addr, 2);
6708 break;
6709 case 1: /* ldrexd */
6710 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6711 break;
6712 case 2: /* ldrexb */
6713 gen_load_exclusive(s, rd, 15, addr, 0);
6714 break;
6715 case 3: /* ldrexh */
6716 gen_load_exclusive(s, rd, 15, addr, 1);
6717 break;
6718 default:
6719 abort();
6720 }
6721 } else {
6722 rm = insn & 0xf;
6723 switch (op1) {
6724 case 0: /* strex */
6725 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6726 break;
6727 case 1: /* strexd */
6728 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6729 break;
6730 case 2: /* strexb */
6731 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6732 break;
6733 case 3: /* strexh */
6734 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6735 break;
6736 default:
6737 abort();
6738 }
6739 }
6740 tcg_temp_free(addr);
6741 } else {
6742 /* SWP instruction */
6743 rm = (insn) & 0xf;
6744
6745 /* ??? This is not really atomic. However we know
6746 we never have multiple CPUs running in parallel,
6747 so it is good enough. */
6748 addr = load_reg(s, rn);
6749 tmp = load_reg(s, rm);
6750 if (insn & (1 << 22)) {
6751 tmp2 = gen_ld8u(addr, IS_USER(s));
6752 gen_st8(tmp, addr, IS_USER(s));
6753 } else {
6754 tmp2 = gen_ld32(addr, IS_USER(s));
6755 gen_st32(tmp, addr, IS_USER(s));
6756 }
6757 dead_tmp(addr);
6758 store_reg(s, rd, tmp2);
6759 }
6760 }
6761 } else {
6762 int address_offset;
6763 int load;
6764 /* Misc load/store */
6765 rn = (insn >> 16) & 0xf;
6766 rd = (insn >> 12) & 0xf;
6767 addr = load_reg(s, rn);
6768 if (insn & (1 << 24))
6769 gen_add_datah_offset(s, insn, 0, addr);
6770 address_offset = 0;
6771 if (insn & (1 << 20)) {
6772 /* load */
6773 switch(sh) {
6774 case 1:
6775 tmp = gen_ld16u(addr, IS_USER(s));
6776 break;
6777 case 2:
6778 tmp = gen_ld8s(addr, IS_USER(s));
6779 break;
6780 default:
6781 case 3:
6782 tmp = gen_ld16s(addr, IS_USER(s));
6783 break;
6784 }
6785 load = 1;
6786 } else if (sh & 2) {
6787 /* doubleword */
6788 if (sh & 1) {
6789 /* store */
6790 tmp = load_reg(s, rd);
6791 gen_st32(tmp, addr, IS_USER(s));
6792 tcg_gen_addi_i32(addr, addr, 4);
6793 tmp = load_reg(s, rd + 1);
6794 gen_st32(tmp, addr, IS_USER(s));
6795 load = 0;
6796 } else {
6797 /* load */
6798 tmp = gen_ld32(addr, IS_USER(s));
6799 store_reg(s, rd, tmp);
6800 tcg_gen_addi_i32(addr, addr, 4);
6801 tmp = gen_ld32(addr, IS_USER(s));
6802 rd++;
6803 load = 1;
6804 }
6805 address_offset = -4;
6806 } else {
6807 /* store */
6808 tmp = load_reg(s, rd);
6809 gen_st16(tmp, addr, IS_USER(s));
6810 load = 0;
6811 }
6812 /* Perform base writeback before the loaded value to
6813 ensure correct behavior with overlapping index registers.
6814 ldrd with base writeback is is undefined if the
6815 destination and index registers overlap. */
6816 if (!(insn & (1 << 24))) {
6817 gen_add_datah_offset(s, insn, address_offset, addr);
6818 store_reg(s, rn, addr);
6819 } else if (insn & (1 << 21)) {
6820 if (address_offset)
6821 tcg_gen_addi_i32(addr, addr, address_offset);
6822 store_reg(s, rn, addr);
6823 } else {
6824 dead_tmp(addr);
6825 }
6826 if (load) {
6827 /* Complete the load. */
6828 store_reg(s, rd, tmp);
6829 }
6830 }
6831 break;
6832 case 0x4:
6833 case 0x5:
6834 goto do_ldst;
6835 case 0x6:
6836 case 0x7:
6837 if (insn & (1 << 4)) {
6838 ARCH(6);
6839 /* Armv6 Media instructions. */
6840 rm = insn & 0xf;
6841 rn = (insn >> 16) & 0xf;
6842 rd = (insn >> 12) & 0xf;
6843 rs = (insn >> 8) & 0xf;
6844 switch ((insn >> 23) & 3) {
6845 case 0: /* Parallel add/subtract. */
6846 op1 = (insn >> 20) & 7;
6847 tmp = load_reg(s, rn);
6848 tmp2 = load_reg(s, rm);
6849 sh = (insn >> 5) & 7;
6850 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6851 goto illegal_op;
6852 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6853 dead_tmp(tmp2);
6854 store_reg(s, rd, tmp);
6855 break;
6856 case 1:
6857 if ((insn & 0x00700020) == 0) {
6858 /* Halfword pack. */
6859 tmp = load_reg(s, rn);
6860 tmp2 = load_reg(s, rm);
6861 shift = (insn >> 7) & 0x1f;
6862 if (insn & (1 << 6)) {
6863 /* pkhtb */
6864 if (shift == 0)
6865 shift = 31;
6866 tcg_gen_sari_i32(tmp2, tmp2, shift);
6867 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6868 tcg_gen_ext16u_i32(tmp2, tmp2);
6869 } else {
6870 /* pkhbt */
6871 if (shift)
6872 tcg_gen_shli_i32(tmp2, tmp2, shift);
6873 tcg_gen_ext16u_i32(tmp, tmp);
6874 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6875 }
6876 tcg_gen_or_i32(tmp, tmp, tmp2);
6877 dead_tmp(tmp2);
6878 store_reg(s, rd, tmp);
6879 } else if ((insn & 0x00200020) == 0x00200000) {
6880 /* [us]sat */
6881 tmp = load_reg(s, rm);
6882 shift = (insn >> 7) & 0x1f;
6883 if (insn & (1 << 6)) {
6884 if (shift == 0)
6885 shift = 31;
6886 tcg_gen_sari_i32(tmp, tmp, shift);
6887 } else {
6888 tcg_gen_shli_i32(tmp, tmp, shift);
6889 }
6890 sh = (insn >> 16) & 0x1f;
6891 tmp2 = tcg_const_i32(sh);
6892 if (insn & (1 << 22))
6893 gen_helper_usat(tmp, tmp, tmp2);
6894 else
6895 gen_helper_ssat(tmp, tmp, tmp2);
6896 tcg_temp_free_i32(tmp2);
6897 store_reg(s, rd, tmp);
6898 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6899 /* [us]sat16 */
6900 tmp = load_reg(s, rm);
6901 sh = (insn >> 16) & 0x1f;
6902 tmp2 = tcg_const_i32(sh);
6903 if (insn & (1 << 22))
6904 gen_helper_usat16(tmp, tmp, tmp2);
6905 else
6906 gen_helper_ssat16(tmp, tmp, tmp2);
6907 tcg_temp_free_i32(tmp2);
6908 store_reg(s, rd, tmp);
6909 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6910 /* Select bytes. */
6911 tmp = load_reg(s, rn);
6912 tmp2 = load_reg(s, rm);
6913 tmp3 = new_tmp();
6914 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6915 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6916 dead_tmp(tmp3);
6917 dead_tmp(tmp2);
6918 store_reg(s, rd, tmp);
6919 } else if ((insn & 0x000003e0) == 0x00000060) {
6920 tmp = load_reg(s, rm);
6921 shift = (insn >> 10) & 3;
6922 /* ??? In many cases it's not neccessary to do a
6923 rotate, a shift is sufficient. */
6924 if (shift != 0)
6925 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6926 op1 = (insn >> 20) & 7;
6927 switch (op1) {
6928 case 0: gen_sxtb16(tmp); break;
6929 case 2: gen_sxtb(tmp); break;
6930 case 3: gen_sxth(tmp); break;
6931 case 4: gen_uxtb16(tmp); break;
6932 case 6: gen_uxtb(tmp); break;
6933 case 7: gen_uxth(tmp); break;
6934 default: goto illegal_op;
6935 }
6936 if (rn != 15) {
6937 tmp2 = load_reg(s, rn);
6938 if ((op1 & 3) == 0) {
6939 gen_add16(tmp, tmp2);
6940 } else {
6941 tcg_gen_add_i32(tmp, tmp, tmp2);
6942 dead_tmp(tmp2);
6943 }
6944 }
6945 store_reg(s, rd, tmp);
6946 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6947 /* rev */
6948 tmp = load_reg(s, rm);
6949 if (insn & (1 << 22)) {
6950 if (insn & (1 << 7)) {
6951 gen_revsh(tmp);
6952 } else {
6953 ARCH(6T2);
6954 gen_helper_rbit(tmp, tmp);
6955 }
6956 } else {
6957 if (insn & (1 << 7))
6958 gen_rev16(tmp);
6959 else
6960 tcg_gen_bswap32_i32(tmp, tmp);
6961 }
6962 store_reg(s, rd, tmp);
6963 } else {
6964 goto illegal_op;
6965 }
6966 break;
6967 case 2: /* Multiplies (Type 3). */
6968 tmp = load_reg(s, rm);
6969 tmp2 = load_reg(s, rs);
6970 if (insn & (1 << 20)) {
6971 /* Signed multiply most significant [accumulate].
6972 (SMMUL, SMMLA, SMMLS) */
6973 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6974
6975 if (rd != 15) {
6976 tmp = load_reg(s, rd);
6977 if (insn & (1 << 6)) {
6978 tmp64 = gen_subq_msw(tmp64, tmp);
6979 } else {
6980 tmp64 = gen_addq_msw(tmp64, tmp);
6981 }
6982 }
6983 if (insn & (1 << 5)) {
6984 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6985 }
6986 tcg_gen_shri_i64(tmp64, tmp64, 32);
6987 tmp = new_tmp();
6988 tcg_gen_trunc_i64_i32(tmp, tmp64);
6989 tcg_temp_free_i64(tmp64);
6990 store_reg(s, rn, tmp);
6991 } else {
6992 if (insn & (1 << 5))
6993 gen_swap_half(tmp2);
6994 gen_smul_dual(tmp, tmp2);
6995 /* This addition cannot overflow. */
6996 if (insn & (1 << 6)) {
6997 tcg_gen_sub_i32(tmp, tmp, tmp2);
6998 } else {
6999 tcg_gen_add_i32(tmp, tmp, tmp2);
7000 }
7001 dead_tmp(tmp2);
7002 if (insn & (1 << 22)) {
7003 /* smlald, smlsld */
7004 tmp64 = tcg_temp_new_i64();
7005 tcg_gen_ext_i32_i64(tmp64, tmp);
7006 dead_tmp(tmp);
7007 gen_addq(s, tmp64, rd, rn);
7008 gen_storeq_reg(s, rd, rn, tmp64);
7009 tcg_temp_free_i64(tmp64);
7010 } else {
7011 /* smuad, smusd, smlad, smlsd */
7012 if (rd != 15)
7013 {
7014 tmp2 = load_reg(s, rd);
7015 gen_helper_add_setq(tmp, tmp, tmp2);
7016 dead_tmp(tmp2);
7017 }
7018 store_reg(s, rn, tmp);
7019 }
7020 }
7021 break;
7022 case 3:
7023 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7024 switch (op1) {
7025 case 0: /* Unsigned sum of absolute differences. */
7026 ARCH(6);
7027 tmp = load_reg(s, rm);
7028 tmp2 = load_reg(s, rs);
7029 gen_helper_usad8(tmp, tmp, tmp2);
7030 dead_tmp(tmp2);
7031 if (rd != 15) {
7032 tmp2 = load_reg(s, rd);
7033 tcg_gen_add_i32(tmp, tmp, tmp2);
7034 dead_tmp(tmp2);
7035 }
7036 store_reg(s, rn, tmp);
7037 break;
7038 case 0x20: case 0x24: case 0x28: case 0x2c:
7039 /* Bitfield insert/clear. */
7040 ARCH(6T2);
7041 shift = (insn >> 7) & 0x1f;
7042 i = (insn >> 16) & 0x1f;
7043 i = i + 1 - shift;
7044 if (rm == 15) {
7045 tmp = new_tmp();
7046 tcg_gen_movi_i32(tmp, 0);
7047 } else {
7048 tmp = load_reg(s, rm);
7049 }
7050 if (i != 32) {
7051 tmp2 = load_reg(s, rd);
7052 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7053 dead_tmp(tmp2);
7054 }
7055 store_reg(s, rd, tmp);
7056 break;
7057 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7058 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7059 ARCH(6T2);
7060 tmp = load_reg(s, rm);
7061 shift = (insn >> 7) & 0x1f;
7062 i = ((insn >> 16) & 0x1f) + 1;
7063 if (shift + i > 32)
7064 goto illegal_op;
7065 if (i < 32) {
7066 if (op1 & 0x20) {
7067 gen_ubfx(tmp, shift, (1u << i) - 1);
7068 } else {
7069 gen_sbfx(tmp, shift, i);
7070 }
7071 }
7072 store_reg(s, rd, tmp);
7073 break;
7074 default:
7075 goto illegal_op;
7076 }
7077 break;
7078 }
7079 break;
7080 }
7081 do_ldst:
7082 /* Check for undefined extension instructions
7083 * per the ARM Bible IE:
7084 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7085 */
7086 sh = (0xf << 20) | (0xf << 4);
7087 if (op1 == 0x7 && ((insn & sh) == sh))
7088 {
7089 goto illegal_op;
7090 }
7091 /* load/store byte/word */
7092 rn = (insn >> 16) & 0xf;
7093 rd = (insn >> 12) & 0xf;
7094 tmp2 = load_reg(s, rn);
7095 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7096 if (insn & (1 << 24))
7097 gen_add_data_offset(s, insn, tmp2);
7098 if (insn & (1 << 20)) {
7099 /* load */
7100 if (insn & (1 << 22)) {
7101 tmp = gen_ld8u(tmp2, i);
7102 } else {
7103 tmp = gen_ld32(tmp2, i);
7104 }
7105 } else {
7106 /* store */
7107 tmp = load_reg(s, rd);
7108 if (insn & (1 << 22))
7109 gen_st8(tmp, tmp2, i);
7110 else
7111 gen_st32(tmp, tmp2, i);
7112 }
7113 if (!(insn & (1 << 24))) {
7114 gen_add_data_offset(s, insn, tmp2);
7115 store_reg(s, rn, tmp2);
7116 } else if (insn & (1 << 21)) {
7117 store_reg(s, rn, tmp2);
7118 } else {
7119 dead_tmp(tmp2);
7120 }
7121 if (insn & (1 << 20)) {
7122 /* Complete the load. */
7123 if (rd == 15)
7124 gen_bx(s, tmp);
7125 else
7126 store_reg(s, rd, tmp);
7127 }
7128 break;
7129 case 0x08:
7130 case 0x09:
7131 {
7132 int j, n, user, loaded_base;
7133 TCGv loaded_var;
7134 /* load/store multiple words */
7135 /* XXX: store correct base if write back */
7136 user = 0;
7137 if (insn & (1 << 22)) {
7138 if (IS_USER(s))
7139 goto illegal_op; /* only usable in supervisor mode */
7140
7141 if ((insn & (1 << 15)) == 0)
7142 user = 1;
7143 }
7144 rn = (insn >> 16) & 0xf;
7145 addr = load_reg(s, rn);
7146
7147 /* compute total size */
7148 loaded_base = 0;
7149 TCGV_UNUSED(loaded_var);
7150 n = 0;
7151 for(i=0;i<16;i++) {
7152 if (insn & (1 << i))
7153 n++;
7154 }
7155 /* XXX: test invalid n == 0 case ? */
7156 if (insn & (1 << 23)) {
7157 if (insn & (1 << 24)) {
7158 /* pre increment */
7159 tcg_gen_addi_i32(addr, addr, 4);
7160 } else {
7161 /* post increment */
7162 }
7163 } else {
7164 if (insn & (1 << 24)) {
7165 /* pre decrement */
7166 tcg_gen_addi_i32(addr, addr, -(n * 4));
7167 } else {
7168 /* post decrement */
7169 if (n != 1)
7170 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7171 }
7172 }
7173 j = 0;
7174 for(i=0;i<16;i++) {
7175 if (insn & (1 << i)) {
7176 if (insn & (1 << 20)) {
7177 /* load */
7178 tmp = gen_ld32(addr, IS_USER(s));
7179 if (i == 15) {
7180 gen_bx(s, tmp);
7181 } else if (user) {
7182 tmp2 = tcg_const_i32(i);
7183 gen_helper_set_user_reg(tmp2, tmp);
7184 tcg_temp_free_i32(tmp2);
7185 dead_tmp(tmp);
7186 } else if (i == rn) {
7187 loaded_var = tmp;
7188 loaded_base = 1;
7189 } else {
7190 store_reg(s, i, tmp);
7191 }
7192 } else {
7193 /* store */
7194 if (i == 15) {
7195 /* special case: r15 = PC + 8 */
7196 val = (long)s->pc + 4;
7197 tmp = new_tmp();
7198 tcg_gen_movi_i32(tmp, val);
7199 } else if (user) {
7200 tmp = new_tmp();
7201 tmp2 = tcg_const_i32(i);
7202 gen_helper_get_user_reg(tmp, tmp2);
7203 tcg_temp_free_i32(tmp2);
7204 } else {
7205 tmp = load_reg(s, i);
7206 }
7207 gen_st32(tmp, addr, IS_USER(s));
7208 }
7209 j++;
7210 /* no need to add after the last transfer */
7211 if (j != n)
7212 tcg_gen_addi_i32(addr, addr, 4);
7213 }
7214 }
7215 if (insn & (1 << 21)) {
7216 /* write back */
7217 if (insn & (1 << 23)) {
7218 if (insn & (1 << 24)) {
7219 /* pre increment */
7220 } else {
7221 /* post increment */
7222 tcg_gen_addi_i32(addr, addr, 4);
7223 }
7224 } else {
7225 if (insn & (1 << 24)) {
7226 /* pre decrement */
7227 if (n != 1)
7228 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7229 } else {
7230 /* post decrement */
7231 tcg_gen_addi_i32(addr, addr, -(n * 4));
7232 }
7233 }
7234 store_reg(s, rn, addr);
7235 } else {
7236 dead_tmp(addr);
7237 }
7238 if (loaded_base) {
7239 store_reg(s, rn, loaded_var);
7240 }
7241 if ((insn & (1 << 22)) && !user) {
7242 /* Restore CPSR from SPSR. */
7243 tmp = load_cpu_field(spsr);
7244 gen_set_cpsr(tmp, 0xffffffff);
7245 dead_tmp(tmp);
7246 s->is_jmp = DISAS_UPDATE;
7247 }
7248 }
7249 break;
7250 case 0xa:
7251 case 0xb:
7252 {
7253 int32_t offset;
7254
7255 /* branch (and link) */
7256 val = (int32_t)s->pc;
7257 if (insn & (1 << 24)) {
7258 tmp = new_tmp();
7259 tcg_gen_movi_i32(tmp, val);
7260 store_reg(s, 14, tmp);
7261 }
7262 offset = (((int32_t)insn << 8) >> 8);
7263 val += (offset << 2) + 4;
7264 gen_jmp(s, val);
7265 }
7266 break;
7267 case 0xc:
7268 case 0xd:
7269 case 0xe:
7270 /* Coprocessor. */
7271 if (disas_coproc_insn(env, s, insn))
7272 goto illegal_op;
7273 break;
7274 case 0xf:
7275 /* swi */
7276 gen_set_pc_im(s->pc);
7277 s->is_jmp = DISAS_SWI;
7278 break;
7279 default:
7280 illegal_op:
7281 gen_exception_insn(s, 4, EXCP_UDEF);
7282 break;
7283 }
7284 }
7285 }
7286
7287 /* Return true if this is a Thumb-2 logical op. */
7288 static int
7289 thumb2_logic_op(int op)
7290 {
7291 return (op < 8);
7292 }
7293
7294 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7295 then set condition code flags based on the result of the operation.
7296 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7297 to the high bit of T1.
7298 Returns zero if the opcode is valid. */
7299
7300 static int
7301 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7302 {
7303 int logic_cc;
7304
7305 logic_cc = 0;
7306 switch (op) {
7307 case 0: /* and */
7308 tcg_gen_and_i32(t0, t0, t1);
7309 logic_cc = conds;
7310 break;
7311 case 1: /* bic */
7312 tcg_gen_andc_i32(t0, t0, t1);
7313 logic_cc = conds;
7314 break;
7315 case 2: /* orr */
7316 tcg_gen_or_i32(t0, t0, t1);
7317 logic_cc = conds;
7318 break;
7319 case 3: /* orn */
7320 tcg_gen_not_i32(t1, t1);
7321 tcg_gen_or_i32(t0, t0, t1);
7322 logic_cc = conds;
7323 break;
7324 case 4: /* eor */
7325 tcg_gen_xor_i32(t0, t0, t1);
7326 logic_cc = conds;
7327 break;
7328 case 8: /* add */
7329 if (conds)
7330 gen_helper_add_cc(t0, t0, t1);
7331 else
7332 tcg_gen_add_i32(t0, t0, t1);
7333 break;
7334 case 10: /* adc */
7335 if (conds)
7336 gen_helper_adc_cc(t0, t0, t1);
7337 else
7338 gen_adc(t0, t1);
7339 break;
7340 case 11: /* sbc */
7341 if (conds)
7342 gen_helper_sbc_cc(t0, t0, t1);
7343 else
7344 gen_sub_carry(t0, t0, t1);
7345 break;
7346 case 13: /* sub */
7347 if (conds)
7348 gen_helper_sub_cc(t0, t0, t1);
7349 else
7350 tcg_gen_sub_i32(t0, t0, t1);
7351 break;
7352 case 14: /* rsb */
7353 if (conds)
7354 gen_helper_sub_cc(t0, t1, t0);
7355 else
7356 tcg_gen_sub_i32(t0, t1, t0);
7357 break;
7358 default: /* 5, 6, 7, 9, 12, 15. */
7359 return 1;
7360 }
7361 if (logic_cc) {
7362 gen_logic_CC(t0);
7363 if (shifter_out)
7364 gen_set_CF_bit31(t1);
7365 }
7366 return 0;
7367 }
7368
7369 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7370 is not legal. */
7371 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7372 {
7373 uint32_t insn, imm, shift, offset;
7374 uint32_t rd, rn, rm, rs;
7375 TCGv tmp;
7376 TCGv tmp2;
7377 TCGv tmp3;
7378 TCGv addr;
7379 TCGv_i64 tmp64;
7380 int op;
7381 int shiftop;
7382 int conds;
7383 int logic_cc;
7384
7385 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7386 || arm_feature (env, ARM_FEATURE_M))) {
7387 /* Thumb-1 cores may need to treat bl and blx as a pair of
7388 16-bit instructions to get correct prefetch abort behavior. */
7389 insn = insn_hw1;
7390 if ((insn & (1 << 12)) == 0) {
7391 /* Second half of blx. */
7392 offset = ((insn & 0x7ff) << 1);
7393 tmp = load_reg(s, 14);
7394 tcg_gen_addi_i32(tmp, tmp, offset);
7395 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7396
7397 tmp2 = new_tmp();
7398 tcg_gen_movi_i32(tmp2, s->pc | 1);
7399 store_reg(s, 14, tmp2);
7400 gen_bx(s, tmp);
7401 return 0;
7402 }
7403 if (insn & (1 << 11)) {
7404 /* Second half of bl. */
7405 offset = ((insn & 0x7ff) << 1) | 1;
7406 tmp = load_reg(s, 14);
7407 tcg_gen_addi_i32(tmp, tmp, offset);
7408
7409 tmp2 = new_tmp();
7410 tcg_gen_movi_i32(tmp2, s->pc | 1);
7411 store_reg(s, 14, tmp2);
7412 gen_bx(s, tmp);
7413 return 0;
7414 }
7415 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7416 /* Instruction spans a page boundary. Implement it as two
7417 16-bit instructions in case the second half causes an
7418 prefetch abort. */
7419 offset = ((int32_t)insn << 21) >> 9;
7420 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7421 return 0;
7422 }
7423 /* Fall through to 32-bit decode. */
7424 }
7425
7426 insn = lduw_code(s->pc);
7427 s->pc += 2;
7428 insn |= (uint32_t)insn_hw1 << 16;
7429
7430 if ((insn & 0xf800e800) != 0xf000e800) {
7431 ARCH(6T2);
7432 }
7433
7434 rn = (insn >> 16) & 0xf;
7435 rs = (insn >> 12) & 0xf;
7436 rd = (insn >> 8) & 0xf;
7437 rm = insn & 0xf;
7438 switch ((insn >> 25) & 0xf) {
7439 case 0: case 1: case 2: case 3:
7440 /* 16-bit instructions. Should never happen. */
7441 abort();
7442 case 4:
7443 if (insn & (1 << 22)) {
7444 /* Other load/store, table branch. */
7445 if (insn & 0x01200000) {
7446 /* Load/store doubleword. */
7447 if (rn == 15) {
7448 addr = new_tmp();
7449 tcg_gen_movi_i32(addr, s->pc & ~3);
7450 } else {
7451 addr = load_reg(s, rn);
7452 }
7453 offset = (insn & 0xff) * 4;
7454 if ((insn & (1 << 23)) == 0)
7455 offset = -offset;
7456 if (insn & (1 << 24)) {
7457 tcg_gen_addi_i32(addr, addr, offset);
7458 offset = 0;
7459 }
7460 if (insn & (1 << 20)) {
7461 /* ldrd */
7462 tmp = gen_ld32(addr, IS_USER(s));
7463 store_reg(s, rs, tmp);
7464 tcg_gen_addi_i32(addr, addr, 4);
7465 tmp = gen_ld32(addr, IS_USER(s));
7466 store_reg(s, rd, tmp);
7467 } else {
7468 /* strd */
7469 tmp = load_reg(s, rs);
7470 gen_st32(tmp, addr, IS_USER(s));
7471 tcg_gen_addi_i32(addr, addr, 4);
7472 tmp = load_reg(s, rd);
7473 gen_st32(tmp, addr, IS_USER(s));
7474 }
7475 if (insn & (1 << 21)) {
7476 /* Base writeback. */
7477 if (rn == 15)
7478 goto illegal_op;
7479 tcg_gen_addi_i32(addr, addr, offset - 4);
7480 store_reg(s, rn, addr);
7481 } else {
7482 dead_tmp(addr);
7483 }
7484 } else if ((insn & (1 << 23)) == 0) {
7485 /* Load/store exclusive word. */
7486 addr = tcg_temp_local_new();
7487 load_reg_var(s, addr, rn);
7488 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7489 if (insn & (1 << 20)) {
7490 gen_load_exclusive(s, rs, 15, addr, 2);
7491 } else {
7492 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7493 }
7494 tcg_temp_free(addr);
7495 } else if ((insn & (1 << 6)) == 0) {
7496 /* Table Branch. */
7497 if (rn == 15) {
7498 addr = new_tmp();
7499 tcg_gen_movi_i32(addr, s->pc);
7500 } else {
7501 addr = load_reg(s, rn);
7502 }
7503 tmp = load_reg(s, rm);
7504 tcg_gen_add_i32(addr, addr, tmp);
7505 if (insn & (1 << 4)) {
7506 /* tbh */
7507 tcg_gen_add_i32(addr, addr, tmp);
7508 dead_tmp(tmp);
7509 tmp = gen_ld16u(addr, IS_USER(s));
7510 } else { /* tbb */
7511 dead_tmp(tmp);
7512 tmp = gen_ld8u(addr, IS_USER(s));
7513 }
7514 dead_tmp(addr);
7515 tcg_gen_shli_i32(tmp, tmp, 1);
7516 tcg_gen_addi_i32(tmp, tmp, s->pc);
7517 store_reg(s, 15, tmp);
7518 } else {
7519 /* Load/store exclusive byte/halfword/doubleword. */
7520 ARCH(7);
7521 op = (insn >> 4) & 0x3;
7522 if (op == 2) {
7523 goto illegal_op;
7524 }
7525 addr = tcg_temp_local_new();
7526 load_reg_var(s, addr, rn);
7527 if (insn & (1 << 20)) {
7528 gen_load_exclusive(s, rs, rd, addr, op);
7529 } else {
7530 gen_store_exclusive(s, rm, rs, rd, addr, op);
7531 }
7532 tcg_temp_free(addr);
7533 }
7534 } else {
7535 /* Load/store multiple, RFE, SRS. */
7536 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7537 /* Not available in user mode. */
7538 if (IS_USER(s))
7539 goto illegal_op;
7540 if (insn & (1 << 20)) {
7541 /* rfe */
7542 addr = load_reg(s, rn);
7543 if ((insn & (1 << 24)) == 0)
7544 tcg_gen_addi_i32(addr, addr, -8);
7545 /* Load PC into tmp and CPSR into tmp2. */
7546 tmp = gen_ld32(addr, 0);
7547 tcg_gen_addi_i32(addr, addr, 4);
7548 tmp2 = gen_ld32(addr, 0);
7549 if (insn & (1 << 21)) {
7550 /* Base writeback. */
7551 if (insn & (1 << 24)) {
7552 tcg_gen_addi_i32(addr, addr, 4);
7553 } else {
7554 tcg_gen_addi_i32(addr, addr, -4);
7555 }
7556 store_reg(s, rn, addr);
7557 } else {
7558 dead_tmp(addr);
7559 }
7560 gen_rfe(s, tmp, tmp2);
7561 } else {
7562 /* srs */
7563 op = (insn & 0x1f);
7564 addr = new_tmp();
7565 tmp = tcg_const_i32(op);
7566 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7567 tcg_temp_free_i32(tmp);
7568 if ((insn & (1 << 24)) == 0) {
7569 tcg_gen_addi_i32(addr, addr, -8);
7570 }
7571 tmp = load_reg(s, 14);
7572 gen_st32(tmp, addr, 0);
7573 tcg_gen_addi_i32(addr, addr, 4);
7574 tmp = new_tmp();
7575 gen_helper_cpsr_read(tmp);
7576 gen_st32(tmp, addr, 0);
7577 if (insn & (1 << 21)) {
7578 if ((insn & (1 << 24)) == 0) {
7579 tcg_gen_addi_i32(addr, addr, -4);
7580 } else {
7581 tcg_gen_addi_i32(addr, addr, 4);
7582 }
7583 tmp = tcg_const_i32(op);
7584 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7585 tcg_temp_free_i32(tmp);
7586 } else {
7587 dead_tmp(addr);
7588 }
7589 }
7590 } else {
7591 int i;
7592 /* Load/store multiple. */
7593 addr = load_reg(s, rn);
7594 offset = 0;
7595 for (i = 0; i < 16; i++) {
7596 if (insn & (1 << i))
7597 offset += 4;
7598 }
7599 if (insn & (1 << 24)) {
7600 tcg_gen_addi_i32(addr, addr, -offset);
7601 }
7602
7603 for (i = 0; i < 16; i++) {
7604 if ((insn & (1 << i)) == 0)
7605 continue;
7606 if (insn & (1 << 20)) {
7607 /* Load. */
7608 tmp = gen_ld32(addr, IS_USER(s));
7609 if (i == 15) {
7610 gen_bx(s, tmp);
7611 } else {
7612 store_reg(s, i, tmp);
7613 }
7614 } else {
7615 /* Store. */
7616 tmp = load_reg(s, i);
7617 gen_st32(tmp, addr, IS_USER(s));
7618 }
7619 tcg_gen_addi_i32(addr, addr, 4);
7620 }
7621 if (insn & (1 << 21)) {
7622 /* Base register writeback. */
7623 if (insn & (1 << 24)) {
7624 tcg_gen_addi_i32(addr, addr, -offset);
7625 }
7626 /* Fault if writeback register is in register list. */
7627 if (insn & (1 << rn))
7628 goto illegal_op;
7629 store_reg(s, rn, addr);
7630 } else {
7631 dead_tmp(addr);
7632 }
7633 }
7634 }
7635 break;
7636 case 5:
7637
7638 op = (insn >> 21) & 0xf;
7639 if (op == 6) {
7640 /* Halfword pack. */
7641 tmp = load_reg(s, rn);
7642 tmp2 = load_reg(s, rm);
7643 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7644 if (insn & (1 << 5)) {
7645 /* pkhtb */
7646 if (shift == 0)
7647 shift = 31;
7648 tcg_gen_sari_i32(tmp2, tmp2, shift);
7649 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7650 tcg_gen_ext16u_i32(tmp2, tmp2);
7651 } else {
7652 /* pkhbt */
7653 if (shift)
7654 tcg_gen_shli_i32(tmp2, tmp2, shift);
7655 tcg_gen_ext16u_i32(tmp, tmp);
7656 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7657 }
7658 tcg_gen_or_i32(tmp, tmp, tmp2);
7659 dead_tmp(tmp2);
7660 store_reg(s, rd, tmp);
7661 } else {
7662 /* Data processing register constant shift. */
7663 if (rn == 15) {
7664 tmp = new_tmp();
7665 tcg_gen_movi_i32(tmp, 0);
7666 } else {
7667 tmp = load_reg(s, rn);
7668 }
7669 tmp2 = load_reg(s, rm);
7670
7671 shiftop = (insn >> 4) & 3;
7672 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7673 conds = (insn & (1 << 20)) != 0;
7674 logic_cc = (conds && thumb2_logic_op(op));
7675 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7676 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7677 goto illegal_op;
7678 dead_tmp(tmp2);
7679 if (rd != 15) {
7680 store_reg(s, rd, tmp);
7681 } else {
7682 dead_tmp(tmp);
7683 }
7684 }
7685 break;
7686 case 13: /* Misc data processing. */
7687 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7688 if (op < 4 && (insn & 0xf000) != 0xf000)
7689 goto illegal_op;
7690 switch (op) {
7691 case 0: /* Register controlled shift. */
7692 tmp = load_reg(s, rn);
7693 tmp2 = load_reg(s, rm);
7694 if ((insn & 0x70) != 0)
7695 goto illegal_op;
7696 op = (insn >> 21) & 3;
7697 logic_cc = (insn & (1 << 20)) != 0;
7698 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7699 if (logic_cc)
7700 gen_logic_CC(tmp);
7701 store_reg_bx(env, s, rd, tmp);
7702 break;
7703 case 1: /* Sign/zero extend. */
7704 tmp = load_reg(s, rm);
7705 shift = (insn >> 4) & 3;
7706 /* ??? In many cases it's not neccessary to do a
7707 rotate, a shift is sufficient. */
7708 if (shift != 0)
7709 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7710 op = (insn >> 20) & 7;
7711 switch (op) {
7712 case 0: gen_sxth(tmp); break;
7713 case 1: gen_uxth(tmp); break;
7714 case 2: gen_sxtb16(tmp); break;
7715 case 3: gen_uxtb16(tmp); break;
7716 case 4: gen_sxtb(tmp); break;
7717 case 5: gen_uxtb(tmp); break;
7718 default: goto illegal_op;
7719 }
7720 if (rn != 15) {
7721 tmp2 = load_reg(s, rn);
7722 if ((op >> 1) == 1) {
7723 gen_add16(tmp, tmp2);
7724 } else {
7725 tcg_gen_add_i32(tmp, tmp, tmp2);
7726 dead_tmp(tmp2);
7727 }
7728 }
7729 store_reg(s, rd, tmp);
7730 break;
7731 case 2: /* SIMD add/subtract. */
7732 op = (insn >> 20) & 7;
7733 shift = (insn >> 4) & 7;
7734 if ((op & 3) == 3 || (shift & 3) == 3)
7735 goto illegal_op;
7736 tmp = load_reg(s, rn);
7737 tmp2 = load_reg(s, rm);
7738 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7739 dead_tmp(tmp2);
7740 store_reg(s, rd, tmp);
7741 break;
7742 case 3: /* Other data processing. */
7743 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7744 if (op < 4) {
7745 /* Saturating add/subtract. */
7746 tmp = load_reg(s, rn);
7747 tmp2 = load_reg(s, rm);
7748 if (op & 1)
7749 gen_helper_double_saturate(tmp, tmp);
7750 if (op & 2)
7751 gen_helper_sub_saturate(tmp, tmp2, tmp);
7752 else
7753 gen_helper_add_saturate(tmp, tmp, tmp2);
7754 dead_tmp(tmp2);
7755 } else {
7756 tmp = load_reg(s, rn);
7757 switch (op) {
7758 case 0x0a: /* rbit */
7759 gen_helper_rbit(tmp, tmp);
7760 break;
7761 case 0x08: /* rev */
7762 tcg_gen_bswap32_i32(tmp, tmp);
7763 break;
7764 case 0x09: /* rev16 */
7765 gen_rev16(tmp);
7766 break;
7767 case 0x0b: /* revsh */
7768 gen_revsh(tmp);
7769 break;
7770 case 0x10: /* sel */
7771 tmp2 = load_reg(s, rm);
7772 tmp3 = new_tmp();
7773 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7774 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7775 dead_tmp(tmp3);
7776 dead_tmp(tmp2);
7777 break;
7778 case 0x18: /* clz */
7779 gen_helper_clz(tmp, tmp);
7780 break;
7781 default:
7782 goto illegal_op;
7783 }
7784 }
7785 store_reg(s, rd, tmp);
7786 break;
7787 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7788 op = (insn >> 4) & 0xf;
7789 tmp = load_reg(s, rn);
7790 tmp2 = load_reg(s, rm);
7791 switch ((insn >> 20) & 7) {
7792 case 0: /* 32 x 32 -> 32 */
7793 tcg_gen_mul_i32(tmp, tmp, tmp2);
7794 dead_tmp(tmp2);
7795 if (rs != 15) {
7796 tmp2 = load_reg(s, rs);
7797 if (op)
7798 tcg_gen_sub_i32(tmp, tmp2, tmp);
7799 else
7800 tcg_gen_add_i32(tmp, tmp, tmp2);
7801 dead_tmp(tmp2);
7802 }
7803 break;
7804 case 1: /* 16 x 16 -> 32 */
7805 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7806 dead_tmp(tmp2);
7807 if (rs != 15) {
7808 tmp2 = load_reg(s, rs);
7809 gen_helper_add_setq(tmp, tmp, tmp2);
7810 dead_tmp(tmp2);
7811 }
7812 break;
7813 case 2: /* Dual multiply add. */
7814 case 4: /* Dual multiply subtract. */
7815 if (op)
7816 gen_swap_half(tmp2);
7817 gen_smul_dual(tmp, tmp2);
7818 /* This addition cannot overflow. */
7819 if (insn & (1 << 22)) {
7820 tcg_gen_sub_i32(tmp, tmp, tmp2);
7821 } else {
7822 tcg_gen_add_i32(tmp, tmp, tmp2);
7823 }
7824 dead_tmp(tmp2);
7825 if (rs != 15)
7826 {
7827 tmp2 = load_reg(s, rs);
7828 gen_helper_add_setq(tmp, tmp, tmp2);
7829 dead_tmp(tmp2);
7830 }
7831 break;
7832 case 3: /* 32 * 16 -> 32msb */
7833 if (op)
7834 tcg_gen_sari_i32(tmp2, tmp2, 16);
7835 else
7836 gen_sxth(tmp2);
7837 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7838 tcg_gen_shri_i64(tmp64, tmp64, 16);
7839 tmp = new_tmp();
7840 tcg_gen_trunc_i64_i32(tmp, tmp64);
7841 tcg_temp_free_i64(tmp64);
7842 if (rs != 15)
7843 {
7844 tmp2 = load_reg(s, rs);
7845 gen_helper_add_setq(tmp, tmp, tmp2);
7846 dead_tmp(tmp2);
7847 }
7848 break;
7849 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7850 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7851 if (rs != 15) {
7852 tmp = load_reg(s, rs);
7853 if (insn & (1 << 20)) {
7854 tmp64 = gen_addq_msw(tmp64, tmp);
7855 } else {
7856 tmp64 = gen_subq_msw(tmp64, tmp);
7857 }
7858 }
7859 if (insn & (1 << 4)) {
7860 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7861 }
7862 tcg_gen_shri_i64(tmp64, tmp64, 32);
7863 tmp = new_tmp();
7864 tcg_gen_trunc_i64_i32(tmp, tmp64);
7865 tcg_temp_free_i64(tmp64);
7866 break;
7867 case 7: /* Unsigned sum of absolute differences. */
7868 gen_helper_usad8(tmp, tmp, tmp2);
7869 dead_tmp(tmp2);
7870 if (rs != 15) {
7871 tmp2 = load_reg(s, rs);
7872 tcg_gen_add_i32(tmp, tmp, tmp2);
7873 dead_tmp(tmp2);
7874 }
7875 break;
7876 }
7877 store_reg(s, rd, tmp);
7878 break;
7879 case 6: case 7: /* 64-bit multiply, Divide. */
7880 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7881 tmp = load_reg(s, rn);
7882 tmp2 = load_reg(s, rm);
7883 if ((op & 0x50) == 0x10) {
7884 /* sdiv, udiv */
7885 if (!arm_feature(env, ARM_FEATURE_DIV))
7886 goto illegal_op;
7887 if (op & 0x20)
7888 gen_helper_udiv(tmp, tmp, tmp2);
7889 else
7890 gen_helper_sdiv(tmp, tmp, tmp2);
7891 dead_tmp(tmp2);
7892 store_reg(s, rd, tmp);
7893 } else if ((op & 0xe) == 0xc) {
7894 /* Dual multiply accumulate long. */
7895 if (op & 1)
7896 gen_swap_half(tmp2);
7897 gen_smul_dual(tmp, tmp2);
7898 if (op & 0x10) {
7899 tcg_gen_sub_i32(tmp, tmp, tmp2);
7900 } else {
7901 tcg_gen_add_i32(tmp, tmp, tmp2);
7902 }
7903 dead_tmp(tmp2);
7904 /* BUGFIX */
7905 tmp64 = tcg_temp_new_i64();
7906 tcg_gen_ext_i32_i64(tmp64, tmp);
7907 dead_tmp(tmp);
7908 gen_addq(s, tmp64, rs, rd);
7909 gen_storeq_reg(s, rs, rd, tmp64);
7910 tcg_temp_free_i64(tmp64);
7911 } else {
7912 if (op & 0x20) {
7913 /* Unsigned 64-bit multiply */
7914 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7915 } else {
7916 if (op & 8) {
7917 /* smlalxy */
7918 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7919 dead_tmp(tmp2);
7920 tmp64 = tcg_temp_new_i64();
7921 tcg_gen_ext_i32_i64(tmp64, tmp);
7922 dead_tmp(tmp);
7923 } else {
7924 /* Signed 64-bit multiply */
7925 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7926 }
7927 }
7928 if (op & 4) {
7929 /* umaal */
7930 gen_addq_lo(s, tmp64, rs);
7931 gen_addq_lo(s, tmp64, rd);
7932 } else if (op & 0x40) {
7933 /* 64-bit accumulate. */
7934 gen_addq(s, tmp64, rs, rd);
7935 }
7936 gen_storeq_reg(s, rs, rd, tmp64);
7937 tcg_temp_free_i64(tmp64);
7938 }
7939 break;
7940 }
7941 break;
7942 case 6: case 7: case 14: case 15:
7943 /* Coprocessor. */
7944 if (((insn >> 24) & 3) == 3) {
7945 /* Translate into the equivalent ARM encoding. */
7946 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7947 if (disas_neon_data_insn(env, s, insn))
7948 goto illegal_op;
7949 } else {
7950 if (insn & (1 << 28))
7951 goto illegal_op;
7952 if (disas_coproc_insn (env, s, insn))
7953 goto illegal_op;
7954 }
7955 break;
7956 case 8: case 9: case 10: case 11:
7957 if (insn & (1 << 15)) {
7958 /* Branches, misc control. */
7959 if (insn & 0x5000) {
7960 /* Unconditional branch. */
7961 /* signextend(hw1[10:0]) -> offset[:12]. */
7962 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7963 /* hw1[10:0] -> offset[11:1]. */
7964 offset |= (insn & 0x7ff) << 1;
7965 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7966 offset[24:22] already have the same value because of the
7967 sign extension above. */
7968 offset ^= ((~insn) & (1 << 13)) << 10;
7969 offset ^= ((~insn) & (1 << 11)) << 11;
7970
7971 if (insn & (1 << 14)) {
7972 /* Branch and link. */
7973 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7974 }
7975
7976 offset += s->pc;
7977 if (insn & (1 << 12)) {
7978 /* b/bl */
7979 gen_jmp(s, offset);
7980 } else {
7981 /* blx */
7982 offset &= ~(uint32_t)2;
7983 gen_bx_im(s, offset);
7984 }
7985 } else if (((insn >> 23) & 7) == 7) {
7986 /* Misc control */
7987 if (insn & (1 << 13))
7988 goto illegal_op;
7989
7990 if (insn & (1 << 26)) {
7991 /* Secure monitor call (v6Z) */
7992 goto illegal_op; /* not implemented. */
7993 } else {
7994 op = (insn >> 20) & 7;
7995 switch (op) {
7996 case 0: /* msr cpsr. */
7997 if (IS_M(env)) {
7998 tmp = load_reg(s, rn);
7999 addr = tcg_const_i32(insn & 0xff);
8000 gen_helper_v7m_msr(cpu_env, addr, tmp);
8001 tcg_temp_free_i32(addr);
8002 dead_tmp(tmp);
8003 gen_lookup_tb(s);
8004 break;
8005 }
8006 /* fall through */
8007 case 1: /* msr spsr. */
8008 if (IS_M(env))
8009 goto illegal_op;
8010 tmp = load_reg(s, rn);
8011 if (gen_set_psr(s,
8012 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8013 op == 1, tmp))
8014 goto illegal_op;
8015 break;
8016 case 2: /* cps, nop-hint. */
8017 if (((insn >> 8) & 7) == 0) {
8018 gen_nop_hint(s, insn & 0xff);
8019 }
8020 /* Implemented as NOP in user mode. */
8021 if (IS_USER(s))
8022 break;
8023 offset = 0;
8024 imm = 0;
8025 if (insn & (1 << 10)) {
8026 if (insn & (1 << 7))
8027 offset |= CPSR_A;
8028 if (insn & (1 << 6))
8029 offset |= CPSR_I;
8030 if (insn & (1 << 5))
8031 offset |= CPSR_F;
8032 if (insn & (1 << 9))
8033 imm = CPSR_A | CPSR_I | CPSR_F;
8034 }
8035 if (insn & (1 << 8)) {
8036 offset |= 0x1f;
8037 imm |= (insn & 0x1f);
8038 }
8039 if (offset) {
8040 gen_set_psr_im(s, offset, 0, imm);
8041 }
8042 break;
8043 case 3: /* Special control operations. */
8044 ARCH(7);
8045 op = (insn >> 4) & 0xf;
8046 switch (op) {
8047 case 2: /* clrex */
8048 gen_clrex(s);
8049 break;
8050 case 4: /* dsb */
8051 case 5: /* dmb */
8052 case 6: /* isb */
8053 /* These execute as NOPs. */
8054 break;
8055 default:
8056 goto illegal_op;
8057 }
8058 break;
8059 case 4: /* bxj */
8060 /* Trivial implementation equivalent to bx. */
8061 tmp = load_reg(s, rn);
8062 gen_bx(s, tmp);
8063 break;
8064 case 5: /* Exception return. */
8065 if (IS_USER(s)) {
8066 goto illegal_op;
8067 }
8068 if (rn != 14 || rd != 15) {
8069 goto illegal_op;
8070 }
8071 tmp = load_reg(s, rn);
8072 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8073 gen_exception_return(s, tmp);
8074 break;
8075 case 6: /* mrs cpsr. */
8076 tmp = new_tmp();
8077 if (IS_M(env)) {
8078 addr = tcg_const_i32(insn & 0xff);
8079 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8080 tcg_temp_free_i32(addr);
8081 } else {
8082 gen_helper_cpsr_read(tmp);
8083 }
8084 store_reg(s, rd, tmp);
8085 break;
8086 case 7: /* mrs spsr. */
8087 /* Not accessible in user mode. */
8088 if (IS_USER(s) || IS_M(env))
8089 goto illegal_op;
8090 tmp = load_cpu_field(spsr);
8091 store_reg(s, rd, tmp);
8092 break;
8093 }
8094 }
8095 } else {
8096 /* Conditional branch. */
8097 op = (insn >> 22) & 0xf;
8098 /* Generate a conditional jump to next instruction. */
8099 s->condlabel = gen_new_label();
8100 gen_test_cc(op ^ 1, s->condlabel);
8101 s->condjmp = 1;
8102
8103 /* offset[11:1] = insn[10:0] */
8104 offset = (insn & 0x7ff) << 1;
8105 /* offset[17:12] = insn[21:16]. */
8106 offset |= (insn & 0x003f0000) >> 4;
8107 /* offset[31:20] = insn[26]. */
8108 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8109 /* offset[18] = insn[13]. */
8110 offset |= (insn & (1 << 13)) << 5;
8111 /* offset[19] = insn[11]. */
8112 offset |= (insn & (1 << 11)) << 8;
8113
8114 /* jump to the offset */
8115 gen_jmp(s, s->pc + offset);
8116 }
8117 } else {
8118 /* Data processing immediate. */
8119 if (insn & (1 << 25)) {
8120 if (insn & (1 << 24)) {
8121 if (insn & (1 << 20))
8122 goto illegal_op;
8123 /* Bitfield/Saturate. */
8124 op = (insn >> 21) & 7;
8125 imm = insn & 0x1f;
8126 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8127 if (rn == 15) {
8128 tmp = new_tmp();
8129 tcg_gen_movi_i32(tmp, 0);
8130 } else {
8131 tmp = load_reg(s, rn);
8132 }
8133 switch (op) {
8134 case 2: /* Signed bitfield extract. */
8135 imm++;
8136 if (shift + imm > 32)
8137 goto illegal_op;
8138 if (imm < 32)
8139 gen_sbfx(tmp, shift, imm);
8140 break;
8141 case 6: /* Unsigned bitfield extract. */
8142 imm++;
8143 if (shift + imm > 32)
8144 goto illegal_op;
8145 if (imm < 32)
8146 gen_ubfx(tmp, shift, (1u << imm) - 1);
8147 break;
8148 case 3: /* Bitfield insert/clear. */
8149 if (imm < shift)
8150 goto illegal_op;
8151 imm = imm + 1 - shift;
8152 if (imm != 32) {
8153 tmp2 = load_reg(s, rd);
8154 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8155 dead_tmp(tmp2);
8156 }
8157 break;
8158 case 7:
8159 goto illegal_op;
8160 default: /* Saturate. */
8161 if (shift) {
8162 if (op & 1)
8163 tcg_gen_sari_i32(tmp, tmp, shift);
8164 else
8165 tcg_gen_shli_i32(tmp, tmp, shift);
8166 }
8167 tmp2 = tcg_const_i32(imm);
8168 if (op & 4) {
8169 /* Unsigned. */
8170 if ((op & 1) && shift == 0)
8171 gen_helper_usat16(tmp, tmp, tmp2);
8172 else
8173 gen_helper_usat(tmp, tmp, tmp2);
8174 } else {
8175 /* Signed. */
8176 if ((op & 1) && shift == 0)
8177 gen_helper_ssat16(tmp, tmp, tmp2);
8178 else
8179 gen_helper_ssat(tmp, tmp, tmp2);
8180 }
8181 tcg_temp_free_i32(tmp2);
8182 break;
8183 }
8184 store_reg(s, rd, tmp);
8185 } else {
8186 imm = ((insn & 0x04000000) >> 15)
8187 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8188 if (insn & (1 << 22)) {
8189 /* 16-bit immediate. */
8190 imm |= (insn >> 4) & 0xf000;
8191 if (insn & (1 << 23)) {
8192 /* movt */
8193 tmp = load_reg(s, rd);
8194 tcg_gen_ext16u_i32(tmp, tmp);
8195 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8196 } else {
8197 /* movw */
8198 tmp = new_tmp();
8199 tcg_gen_movi_i32(tmp, imm);
8200 }
8201 } else {
8202 /* Add/sub 12-bit immediate. */
8203 if (rn == 15) {
8204 offset = s->pc & ~(uint32_t)3;
8205 if (insn & (1 << 23))
8206 offset -= imm;
8207 else
8208 offset += imm;
8209 tmp = new_tmp();
8210 tcg_gen_movi_i32(tmp, offset);
8211 } else {
8212 tmp = load_reg(s, rn);
8213 if (insn & (1 << 23))
8214 tcg_gen_subi_i32(tmp, tmp, imm);
8215 else
8216 tcg_gen_addi_i32(tmp, tmp, imm);
8217 }
8218 }
8219 store_reg(s, rd, tmp);
8220 }
8221 } else {
8222 int shifter_out = 0;
8223 /* modified 12-bit immediate. */
8224 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8225 imm = (insn & 0xff);
8226 switch (shift) {
8227 case 0: /* XY */
8228 /* Nothing to do. */
8229 break;
8230 case 1: /* 00XY00XY */
8231 imm |= imm << 16;
8232 break;
8233 case 2: /* XY00XY00 */
8234 imm |= imm << 16;
8235 imm <<= 8;
8236 break;
8237 case 3: /* XYXYXYXY */
8238 imm |= imm << 16;
8239 imm |= imm << 8;
8240 break;
8241 default: /* Rotated constant. */
8242 shift = (shift << 1) | (imm >> 7);
8243 imm |= 0x80;
8244 imm = imm << (32 - shift);
8245 shifter_out = 1;
8246 break;
8247 }
8248 tmp2 = new_tmp();
8249 tcg_gen_movi_i32(tmp2, imm);
8250 rn = (insn >> 16) & 0xf;
8251 if (rn == 15) {
8252 tmp = new_tmp();
8253 tcg_gen_movi_i32(tmp, 0);
8254 } else {
8255 tmp = load_reg(s, rn);
8256 }
8257 op = (insn >> 21) & 0xf;
8258 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8259 shifter_out, tmp, tmp2))
8260 goto illegal_op;
8261 dead_tmp(tmp2);
8262 rd = (insn >> 8) & 0xf;
8263 if (rd != 15) {
8264 store_reg(s, rd, tmp);
8265 } else {
8266 dead_tmp(tmp);
8267 }
8268 }
8269 }
8270 break;
8271 case 12: /* Load/store single data item. */
8272 {
8273 int postinc = 0;
8274 int writeback = 0;
8275 int user;
8276 if ((insn & 0x01100000) == 0x01000000) {
8277 if (disas_neon_ls_insn(env, s, insn))
8278 goto illegal_op;
8279 break;
8280 }
8281 user = IS_USER(s);
8282 if (rn == 15) {
8283 addr = new_tmp();
8284 /* PC relative. */
8285 /* s->pc has already been incremented by 4. */
8286 imm = s->pc & 0xfffffffc;
8287 if (insn & (1 << 23))
8288 imm += insn & 0xfff;
8289 else
8290 imm -= insn & 0xfff;
8291 tcg_gen_movi_i32(addr, imm);
8292 } else {
8293 addr = load_reg(s, rn);
8294 if (insn & (1 << 23)) {
8295 /* Positive offset. */
8296 imm = insn & 0xfff;
8297 tcg_gen_addi_i32(addr, addr, imm);
8298 } else {
8299 op = (insn >> 8) & 7;
8300 imm = insn & 0xff;
8301 switch (op) {
8302 case 0: case 8: /* Shifted Register. */
8303 shift = (insn >> 4) & 0xf;
8304 if (shift > 3)
8305 goto illegal_op;
8306 tmp = load_reg(s, rm);
8307 if (shift)
8308 tcg_gen_shli_i32(tmp, tmp, shift);
8309 tcg_gen_add_i32(addr, addr, tmp);
8310 dead_tmp(tmp);
8311 break;
8312 case 4: /* Negative offset. */
8313 tcg_gen_addi_i32(addr, addr, -imm);
8314 break;
8315 case 6: /* User privilege. */
8316 tcg_gen_addi_i32(addr, addr, imm);
8317 user = 1;
8318 break;
8319 case 1: /* Post-decrement. */
8320 imm = -imm;
8321 /* Fall through. */
8322 case 3: /* Post-increment. */
8323 postinc = 1;
8324 writeback = 1;
8325 break;
8326 case 5: /* Pre-decrement. */
8327 imm = -imm;
8328 /* Fall through. */
8329 case 7: /* Pre-increment. */
8330 tcg_gen_addi_i32(addr, addr, imm);
8331 writeback = 1;
8332 break;
8333 default:
8334 goto illegal_op;
8335 }
8336 }
8337 }
8338 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8339 if (insn & (1 << 20)) {
8340 /* Load. */
8341 if (rs == 15 && op != 2) {
8342 if (op & 2)
8343 goto illegal_op;
8344 /* Memory hint. Implemented as NOP. */
8345 } else {
8346 switch (op) {
8347 case 0: tmp = gen_ld8u(addr, user); break;
8348 case 4: tmp = gen_ld8s(addr, user); break;
8349 case 1: tmp = gen_ld16u(addr, user); break;
8350 case 5: tmp = gen_ld16s(addr, user); break;
8351 case 2: tmp = gen_ld32(addr, user); break;
8352 default: goto illegal_op;
8353 }
8354 if (rs == 15) {
8355 gen_bx(s, tmp);
8356 } else {
8357 store_reg(s, rs, tmp);
8358 }
8359 }
8360 } else {
8361 /* Store. */
8362 if (rs == 15)
8363 goto illegal_op;
8364 tmp = load_reg(s, rs);
8365 switch (op) {
8366 case 0: gen_st8(tmp, addr, user); break;
8367 case 1: gen_st16(tmp, addr, user); break;
8368 case 2: gen_st32(tmp, addr, user); break;
8369 default: goto illegal_op;
8370 }
8371 }
8372 if (postinc)
8373 tcg_gen_addi_i32(addr, addr, imm);
8374 if (writeback) {
8375 store_reg(s, rn, addr);
8376 } else {
8377 dead_tmp(addr);
8378 }
8379 }
8380 break;
8381 default:
8382 goto illegal_op;
8383 }
8384 return 0;
8385 illegal_op:
8386 return 1;
8387 }
8388
8389 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8390 {
8391 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8392 int32_t offset;
8393 int i;
8394 TCGv tmp;
8395 TCGv tmp2;
8396 TCGv addr;
8397
8398 if (s->condexec_mask) {
8399 cond = s->condexec_cond;
8400 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8401 s->condlabel = gen_new_label();
8402 gen_test_cc(cond ^ 1, s->condlabel);
8403 s->condjmp = 1;
8404 }
8405 }
8406
8407 insn = lduw_code(s->pc);
8408 s->pc += 2;
8409
8410 switch (insn >> 12) {
8411 case 0: case 1:
8412
8413 rd = insn & 7;
8414 op = (insn >> 11) & 3;
8415 if (op == 3) {
8416 /* add/subtract */
8417 rn = (insn >> 3) & 7;
8418 tmp = load_reg(s, rn);
8419 if (insn & (1 << 10)) {
8420 /* immediate */
8421 tmp2 = new_tmp();
8422 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8423 } else {
8424 /* reg */
8425 rm = (insn >> 6) & 7;
8426 tmp2 = load_reg(s, rm);
8427 }
8428 if (insn & (1 << 9)) {
8429 if (s->condexec_mask)
8430 tcg_gen_sub_i32(tmp, tmp, tmp2);
8431 else
8432 gen_helper_sub_cc(tmp, tmp, tmp2);
8433 } else {
8434 if (s->condexec_mask)
8435 tcg_gen_add_i32(tmp, tmp, tmp2);
8436 else
8437 gen_helper_add_cc(tmp, tmp, tmp2);
8438 }
8439 dead_tmp(tmp2);
8440 store_reg(s, rd, tmp);
8441 } else {
8442 /* shift immediate */
8443 rm = (insn >> 3) & 7;
8444 shift = (insn >> 6) & 0x1f;
8445 tmp = load_reg(s, rm);
8446 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8447 if (!s->condexec_mask)
8448 gen_logic_CC(tmp);
8449 store_reg(s, rd, tmp);
8450 }
8451 break;
8452 case 2: case 3:
8453 /* arithmetic large immediate */
8454 op = (insn >> 11) & 3;
8455 rd = (insn >> 8) & 0x7;
8456 if (op == 0) { /* mov */
8457 tmp = new_tmp();
8458 tcg_gen_movi_i32(tmp, insn & 0xff);
8459 if (!s->condexec_mask)
8460 gen_logic_CC(tmp);
8461 store_reg(s, rd, tmp);
8462 } else {
8463 tmp = load_reg(s, rd);
8464 tmp2 = new_tmp();
8465 tcg_gen_movi_i32(tmp2, insn & 0xff);
8466 switch (op) {
8467 case 1: /* cmp */
8468 gen_helper_sub_cc(tmp, tmp, tmp2);
8469 dead_tmp(tmp);
8470 dead_tmp(tmp2);
8471 break;
8472 case 2: /* add */
8473 if (s->condexec_mask)
8474 tcg_gen_add_i32(tmp, tmp, tmp2);
8475 else
8476 gen_helper_add_cc(tmp, tmp, tmp2);
8477 dead_tmp(tmp2);
8478 store_reg(s, rd, tmp);
8479 break;
8480 case 3: /* sub */
8481 if (s->condexec_mask)
8482 tcg_gen_sub_i32(tmp, tmp, tmp2);
8483 else
8484 gen_helper_sub_cc(tmp, tmp, tmp2);
8485 dead_tmp(tmp2);
8486 store_reg(s, rd, tmp);
8487 break;
8488 }
8489 }
8490 break;
8491 case 4:
8492 if (insn & (1 << 11)) {
8493 rd = (insn >> 8) & 7;
8494 /* load pc-relative. Bit 1 of PC is ignored. */
8495 val = s->pc + 2 + ((insn & 0xff) * 4);
8496 val &= ~(uint32_t)2;
8497 addr = new_tmp();
8498 tcg_gen_movi_i32(addr, val);
8499 tmp = gen_ld32(addr, IS_USER(s));
8500 dead_tmp(addr);
8501 store_reg(s, rd, tmp);
8502 break;
8503 }
8504 if (insn & (1 << 10)) {
8505 /* data processing extended or blx */
8506 rd = (insn & 7) | ((insn >> 4) & 8);
8507 rm = (insn >> 3) & 0xf;
8508 op = (insn >> 8) & 3;
8509 switch (op) {
8510 case 0: /* add */
8511 tmp = load_reg(s, rd);
8512 tmp2 = load_reg(s, rm);
8513 tcg_gen_add_i32(tmp, tmp, tmp2);
8514 dead_tmp(tmp2);
8515 store_reg(s, rd, tmp);
8516 break;
8517 case 1: /* cmp */
8518 tmp = load_reg(s, rd);
8519 tmp2 = load_reg(s, rm);
8520 gen_helper_sub_cc(tmp, tmp, tmp2);
8521 dead_tmp(tmp2);
8522 dead_tmp(tmp);
8523 break;
8524 case 2: /* mov/cpy */
8525 tmp = load_reg(s, rm);
8526 store_reg(s, rd, tmp);
8527 break;
8528 case 3:/* branch [and link] exchange thumb register */
8529 tmp = load_reg(s, rm);
8530 if (insn & (1 << 7)) {
8531 val = (uint32_t)s->pc | 1;
8532 tmp2 = new_tmp();
8533 tcg_gen_movi_i32(tmp2, val);
8534 store_reg(s, 14, tmp2);
8535 }
8536 gen_bx(s, tmp);
8537 break;
8538 }
8539 break;
8540 }
8541
8542 /* data processing register */
8543 rd = insn & 7;
8544 rm = (insn >> 3) & 7;
8545 op = (insn >> 6) & 0xf;
8546 if (op == 2 || op == 3 || op == 4 || op == 7) {
8547 /* the shift/rotate ops want the operands backwards */
8548 val = rm;
8549 rm = rd;
8550 rd = val;
8551 val = 1;
8552 } else {
8553 val = 0;
8554 }
8555
8556 if (op == 9) { /* neg */
8557 tmp = new_tmp();
8558 tcg_gen_movi_i32(tmp, 0);
8559 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8560 tmp = load_reg(s, rd);
8561 } else {
8562 TCGV_UNUSED(tmp);
8563 }
8564
8565 tmp2 = load_reg(s, rm);
8566 switch (op) {
8567 case 0x0: /* and */
8568 tcg_gen_and_i32(tmp, tmp, tmp2);
8569 if (!s->condexec_mask)
8570 gen_logic_CC(tmp);
8571 break;
8572 case 0x1: /* eor */
8573 tcg_gen_xor_i32(tmp, tmp, tmp2);
8574 if (!s->condexec_mask)
8575 gen_logic_CC(tmp);
8576 break;
8577 case 0x2: /* lsl */
8578 if (s->condexec_mask) {
8579 gen_helper_shl(tmp2, tmp2, tmp);
8580 } else {
8581 gen_helper_shl_cc(tmp2, tmp2, tmp);
8582 gen_logic_CC(tmp2);
8583 }
8584 break;
8585 case 0x3: /* lsr */
8586 if (s->condexec_mask) {
8587 gen_helper_shr(tmp2, tmp2, tmp);
8588 } else {
8589 gen_helper_shr_cc(tmp2, tmp2, tmp);
8590 gen_logic_CC(tmp2);
8591 }
8592 break;
8593 case 0x4: /* asr */
8594 if (s->condexec_mask) {
8595 gen_helper_sar(tmp2, tmp2, tmp);
8596 } else {
8597 gen_helper_sar_cc(tmp2, tmp2, tmp);
8598 gen_logic_CC(tmp2);
8599 }
8600 break;
8601 case 0x5: /* adc */
8602 if (s->condexec_mask)
8603 gen_adc(tmp, tmp2);
8604 else
8605 gen_helper_adc_cc(tmp, tmp, tmp2);
8606 break;
8607 case 0x6: /* sbc */
8608 if (s->condexec_mask)
8609 gen_sub_carry(tmp, tmp, tmp2);
8610 else
8611 gen_helper_sbc_cc(tmp, tmp, tmp2);
8612 break;
8613 case 0x7: /* ror */
8614 if (s->condexec_mask) {
8615 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8616 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8617 } else {
8618 gen_helper_ror_cc(tmp2, tmp2, tmp);
8619 gen_logic_CC(tmp2);
8620 }
8621 break;
8622 case 0x8: /* tst */
8623 tcg_gen_and_i32(tmp, tmp, tmp2);
8624 gen_logic_CC(tmp);
8625 rd = 16;
8626 break;
8627 case 0x9: /* neg */
8628 if (s->condexec_mask)
8629 tcg_gen_neg_i32(tmp, tmp2);
8630 else
8631 gen_helper_sub_cc(tmp, tmp, tmp2);
8632 break;
8633 case 0xa: /* cmp */
8634 gen_helper_sub_cc(tmp, tmp, tmp2);
8635 rd = 16;
8636 break;
8637 case 0xb: /* cmn */
8638 gen_helper_add_cc(tmp, tmp, tmp2);
8639 rd = 16;
8640 break;
8641 case 0xc: /* orr */
8642 tcg_gen_or_i32(tmp, tmp, tmp2);
8643 if (!s->condexec_mask)
8644 gen_logic_CC(tmp);
8645 break;
8646 case 0xd: /* mul */
8647 tcg_gen_mul_i32(tmp, tmp, tmp2);
8648 if (!s->condexec_mask)
8649 gen_logic_CC(tmp);
8650 break;
8651 case 0xe: /* bic */
8652 tcg_gen_andc_i32(tmp, tmp, tmp2);
8653 if (!s->condexec_mask)
8654 gen_logic_CC(tmp);
8655 break;
8656 case 0xf: /* mvn */
8657 tcg_gen_not_i32(tmp2, tmp2);
8658 if (!s->condexec_mask)
8659 gen_logic_CC(tmp2);
8660 val = 1;
8661 rm = rd;
8662 break;
8663 }
8664 if (rd != 16) {
8665 if (val) {
8666 store_reg(s, rm, tmp2);
8667 if (op != 0xf)
8668 dead_tmp(tmp);
8669 } else {
8670 store_reg(s, rd, tmp);
8671 dead_tmp(tmp2);
8672 }
8673 } else {
8674 dead_tmp(tmp);
8675 dead_tmp(tmp2);
8676 }
8677 break;
8678
8679 case 5:
8680 /* load/store register offset. */
8681 rd = insn & 7;
8682 rn = (insn >> 3) & 7;
8683 rm = (insn >> 6) & 7;
8684 op = (insn >> 9) & 7;
8685 addr = load_reg(s, rn);
8686 tmp = load_reg(s, rm);
8687 tcg_gen_add_i32(addr, addr, tmp);
8688 dead_tmp(tmp);
8689
8690 if (op < 3) /* store */
8691 tmp = load_reg(s, rd);
8692
8693 switch (op) {
8694 case 0: /* str */
8695 gen_st32(tmp, addr, IS_USER(s));
8696 break;
8697 case 1: /* strh */
8698 gen_st16(tmp, addr, IS_USER(s));
8699 break;
8700 case 2: /* strb */
8701 gen_st8(tmp, addr, IS_USER(s));
8702 break;
8703 case 3: /* ldrsb */
8704 tmp = gen_ld8s(addr, IS_USER(s));
8705 break;
8706 case 4: /* ldr */
8707 tmp = gen_ld32(addr, IS_USER(s));
8708 break;
8709 case 5: /* ldrh */
8710 tmp = gen_ld16u(addr, IS_USER(s));
8711 break;
8712 case 6: /* ldrb */
8713 tmp = gen_ld8u(addr, IS_USER(s));
8714 break;
8715 case 7: /* ldrsh */
8716 tmp = gen_ld16s(addr, IS_USER(s));
8717 break;
8718 }
8719 if (op >= 3) /* load */
8720 store_reg(s, rd, tmp);
8721 dead_tmp(addr);
8722 break;
8723
8724 case 6:
8725 /* load/store word immediate offset */
8726 rd = insn & 7;
8727 rn = (insn >> 3) & 7;
8728 addr = load_reg(s, rn);
8729 val = (insn >> 4) & 0x7c;
8730 tcg_gen_addi_i32(addr, addr, val);
8731
8732 if (insn & (1 << 11)) {
8733 /* load */
8734 tmp = gen_ld32(addr, IS_USER(s));
8735 store_reg(s, rd, tmp);
8736 } else {
8737 /* store */
8738 tmp = load_reg(s, rd);
8739 gen_st32(tmp, addr, IS_USER(s));
8740 }
8741 dead_tmp(addr);
8742 break;
8743
8744 case 7:
8745 /* load/store byte immediate offset */
8746 rd = insn & 7;
8747 rn = (insn >> 3) & 7;
8748 addr = load_reg(s, rn);
8749 val = (insn >> 6) & 0x1f;
8750 tcg_gen_addi_i32(addr, addr, val);
8751
8752 if (insn & (1 << 11)) {
8753 /* load */
8754 tmp = gen_ld8u(addr, IS_USER(s));
8755 store_reg(s, rd, tmp);
8756 } else {
8757 /* store */
8758 tmp = load_reg(s, rd);
8759 gen_st8(tmp, addr, IS_USER(s));
8760 }
8761 dead_tmp(addr);
8762 break;
8763
8764 case 8:
8765 /* load/store halfword immediate offset */
8766 rd = insn & 7;
8767 rn = (insn >> 3) & 7;
8768 addr = load_reg(s, rn);
8769 val = (insn >> 5) & 0x3e;
8770 tcg_gen_addi_i32(addr, addr, val);
8771
8772 if (insn & (1 << 11)) {
8773 /* load */
8774 tmp = gen_ld16u(addr, IS_USER(s));
8775 store_reg(s, rd, tmp);
8776 } else {
8777 /* store */
8778 tmp = load_reg(s, rd);
8779 gen_st16(tmp, addr, IS_USER(s));
8780 }
8781 dead_tmp(addr);
8782 break;
8783
8784 case 9:
8785 /* load/store from stack */
8786 rd = (insn >> 8) & 7;
8787 addr = load_reg(s, 13);
8788 val = (insn & 0xff) * 4;
8789 tcg_gen_addi_i32(addr, addr, val);
8790
8791 if (insn & (1 << 11)) {
8792 /* load */
8793 tmp = gen_ld32(addr, IS_USER(s));
8794 store_reg(s, rd, tmp);
8795 } else {
8796 /* store */
8797 tmp = load_reg(s, rd);
8798 gen_st32(tmp, addr, IS_USER(s));
8799 }
8800 dead_tmp(addr);
8801 break;
8802
8803 case 10:
8804 /* add to high reg */
8805 rd = (insn >> 8) & 7;
8806 if (insn & (1 << 11)) {
8807 /* SP */
8808 tmp = load_reg(s, 13);
8809 } else {
8810 /* PC. bit 1 is ignored. */
8811 tmp = new_tmp();
8812 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8813 }
8814 val = (insn & 0xff) * 4;
8815 tcg_gen_addi_i32(tmp, tmp, val);
8816 store_reg(s, rd, tmp);
8817 break;
8818
8819 case 11:
8820 /* misc */
8821 op = (insn >> 8) & 0xf;
8822 switch (op) {
8823 case 0:
8824 /* adjust stack pointer */
8825 tmp = load_reg(s, 13);
8826 val = (insn & 0x7f) * 4;
8827 if (insn & (1 << 7))
8828 val = -(int32_t)val;
8829 tcg_gen_addi_i32(tmp, tmp, val);
8830 store_reg(s, 13, tmp);
8831 break;
8832
8833 case 2: /* sign/zero extend. */
8834 ARCH(6);
8835 rd = insn & 7;
8836 rm = (insn >> 3) & 7;
8837 tmp = load_reg(s, rm);
8838 switch ((insn >> 6) & 3) {
8839 case 0: gen_sxth(tmp); break;
8840 case 1: gen_sxtb(tmp); break;
8841 case 2: gen_uxth(tmp); break;
8842 case 3: gen_uxtb(tmp); break;
8843 }
8844 store_reg(s, rd, tmp);
8845 break;
8846 case 4: case 5: case 0xc: case 0xd:
8847 /* push/pop */
8848 addr = load_reg(s, 13);
8849 if (insn & (1 << 8))
8850 offset = 4;
8851 else
8852 offset = 0;
8853 for (i = 0; i < 8; i++) {
8854 if (insn & (1 << i))
8855 offset += 4;
8856 }
8857 if ((insn & (1 << 11)) == 0) {
8858 tcg_gen_addi_i32(addr, addr, -offset);
8859 }
8860 for (i = 0; i < 8; i++) {
8861 if (insn & (1 << i)) {
8862 if (insn & (1 << 11)) {
8863 /* pop */
8864 tmp = gen_ld32(addr, IS_USER(s));
8865 store_reg(s, i, tmp);
8866 } else {
8867 /* push */
8868 tmp = load_reg(s, i);
8869 gen_st32(tmp, addr, IS_USER(s));
8870 }
8871 /* advance to the next address. */
8872 tcg_gen_addi_i32(addr, addr, 4);
8873 }
8874 }
8875 TCGV_UNUSED(tmp);
8876 if (insn & (1 << 8)) {
8877 if (insn & (1 << 11)) {
8878 /* pop pc */
8879 tmp = gen_ld32(addr, IS_USER(s));
8880 /* don't set the pc until the rest of the instruction
8881 has completed */
8882 } else {
8883 /* push lr */
8884 tmp = load_reg(s, 14);
8885 gen_st32(tmp, addr, IS_USER(s));
8886 }
8887 tcg_gen_addi_i32(addr, addr, 4);
8888 }
8889 if ((insn & (1 << 11)) == 0) {
8890 tcg_gen_addi_i32(addr, addr, -offset);
8891 }
8892 /* write back the new stack pointer */
8893 store_reg(s, 13, addr);
8894 /* set the new PC value */
8895 if ((insn & 0x0900) == 0x0900)
8896 gen_bx(s, tmp);
8897 break;
8898
8899 case 1: case 3: case 9: case 11: /* czb */
8900 rm = insn & 7;
8901 tmp = load_reg(s, rm);
8902 s->condlabel = gen_new_label();
8903 s->condjmp = 1;
8904 if (insn & (1 << 11))
8905 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8906 else
8907 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8908 dead_tmp(tmp);
8909 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8910 val = (uint32_t)s->pc + 2;
8911 val += offset;
8912 gen_jmp(s, val);
8913 break;
8914
8915 case 15: /* IT, nop-hint. */
8916 if ((insn & 0xf) == 0) {
8917 gen_nop_hint(s, (insn >> 4) & 0xf);
8918 break;
8919 }
8920 /* If Then. */
8921 s->condexec_cond = (insn >> 4) & 0xe;
8922 s->condexec_mask = insn & 0x1f;
8923 /* No actual code generated for this insn, just setup state. */
8924 break;
8925
8926 case 0xe: /* bkpt */
8927 gen_exception_insn(s, 2, EXCP_BKPT);
8928 break;
8929
8930 case 0xa: /* rev */
8931 ARCH(6);
8932 rn = (insn >> 3) & 0x7;
8933 rd = insn & 0x7;
8934 tmp = load_reg(s, rn);
8935 switch ((insn >> 6) & 3) {
8936 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8937 case 1: gen_rev16(tmp); break;
8938 case 3: gen_revsh(tmp); break;
8939 default: goto illegal_op;
8940 }
8941 store_reg(s, rd, tmp);
8942 break;
8943
8944 case 6: /* cps */
8945 ARCH(6);
8946 if (IS_USER(s))
8947 break;
8948 if (IS_M(env)) {
8949 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8950 /* PRIMASK */
8951 if (insn & 1) {
8952 addr = tcg_const_i32(16);
8953 gen_helper_v7m_msr(cpu_env, addr, tmp);
8954 tcg_temp_free_i32(addr);
8955 }
8956 /* FAULTMASK */
8957 if (insn & 2) {
8958 addr = tcg_const_i32(17);
8959 gen_helper_v7m_msr(cpu_env, addr, tmp);
8960 tcg_temp_free_i32(addr);
8961 }
8962 tcg_temp_free_i32(tmp);
8963 gen_lookup_tb(s);
8964 } else {
8965 if (insn & (1 << 4))
8966 shift = CPSR_A | CPSR_I | CPSR_F;
8967 else
8968 shift = 0;
8969 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8970 }
8971 break;
8972
8973 default:
8974 goto undef;
8975 }
8976 break;
8977
8978 case 12:
8979 /* load/store multiple */
8980 rn = (insn >> 8) & 0x7;
8981 addr = load_reg(s, rn);
8982 for (i = 0; i < 8; i++) {
8983 if (insn & (1 << i)) {
8984 if (insn & (1 << 11)) {
8985 /* load */
8986 tmp = gen_ld32(addr, IS_USER(s));
8987 store_reg(s, i, tmp);
8988 } else {
8989 /* store */
8990 tmp = load_reg(s, i);
8991 gen_st32(tmp, addr, IS_USER(s));
8992 }
8993 /* advance to the next address */
8994 tcg_gen_addi_i32(addr, addr, 4);
8995 }
8996 }
8997 /* Base register writeback. */
8998 if ((insn & (1 << rn)) == 0) {
8999 store_reg(s, rn, addr);
9000 } else {
9001 dead_tmp(addr);
9002 }
9003 break;
9004
9005 case 13:
9006 /* conditional branch or swi */
9007 cond = (insn >> 8) & 0xf;
9008 if (cond == 0xe)
9009 goto undef;
9010
9011 if (cond == 0xf) {
9012 /* swi */
9013 gen_set_pc_im(s->pc);
9014 s->is_jmp = DISAS_SWI;
9015 break;
9016 }
9017 /* generate a conditional jump to next instruction */
9018 s->condlabel = gen_new_label();
9019 gen_test_cc(cond ^ 1, s->condlabel);
9020 s->condjmp = 1;
9021
9022 /* jump to the offset */
9023 val = (uint32_t)s->pc + 2;
9024 offset = ((int32_t)insn << 24) >> 24;
9025 val += offset << 1;
9026 gen_jmp(s, val);
9027 break;
9028
9029 case 14:
9030 if (insn & (1 << 11)) {
9031 if (disas_thumb2_insn(env, s, insn))
9032 goto undef32;
9033 break;
9034 }
9035 /* unconditional branch */
9036 val = (uint32_t)s->pc;
9037 offset = ((int32_t)insn << 21) >> 21;
9038 val += (offset << 1) + 2;
9039 gen_jmp(s, val);
9040 break;
9041
9042 case 15:
9043 if (disas_thumb2_insn(env, s, insn))
9044 goto undef32;
9045 break;
9046 }
9047 return;
9048 undef32:
9049 gen_exception_insn(s, 4, EXCP_UDEF);
9050 return;
9051 illegal_op:
9052 undef:
9053 gen_exception_insn(s, 2, EXCP_UDEF);
9054 }
9055
9056 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9057 basic block 'tb'. If search_pc is TRUE, also generate PC
9058 information for each intermediate instruction. */
9059 static inline void gen_intermediate_code_internal(CPUState *env,
9060 TranslationBlock *tb,
9061 int search_pc)
9062 {
9063 DisasContext dc1, *dc = &dc1;
9064 CPUBreakpoint *bp;
9065 uint16_t *gen_opc_end;
9066 int j, lj;
9067 target_ulong pc_start;
9068 uint32_t next_page_start;
9069 int num_insns;
9070 int max_insns;
9071
9072 /* generate intermediate code */
9073 num_temps = 0;
9074
9075 pc_start = tb->pc;
9076
9077 dc->tb = tb;
9078
9079 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9080
9081 dc->is_jmp = DISAS_NEXT;
9082 dc->pc = pc_start;
9083 dc->singlestep_enabled = env->singlestep_enabled;
9084 dc->condjmp = 0;
9085 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9086 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9087 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9088 #if !defined(CONFIG_USER_ONLY)
9089 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9090 #endif
9091 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9092 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9093 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9094 cpu_F0s = tcg_temp_new_i32();
9095 cpu_F1s = tcg_temp_new_i32();
9096 cpu_F0d = tcg_temp_new_i64();
9097 cpu_F1d = tcg_temp_new_i64();
9098 cpu_V0 = cpu_F0d;
9099 cpu_V1 = cpu_F1d;
9100 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9101 cpu_M0 = tcg_temp_new_i64();
9102 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9103 lj = -1;
9104 num_insns = 0;
9105 max_insns = tb->cflags & CF_COUNT_MASK;
9106 if (max_insns == 0)
9107 max_insns = CF_COUNT_MASK;
9108
9109 gen_icount_start();
9110
9111 /* A note on handling of the condexec (IT) bits:
9112 *
9113 * We want to avoid the overhead of having to write the updated condexec
9114 * bits back to the CPUState for every instruction in an IT block. So:
9115 * (1) if the condexec bits are not already zero then we write
9116 * zero back into the CPUState now. This avoids complications trying
9117 * to do it at the end of the block. (For example if we don't do this
9118 * it's hard to identify whether we can safely skip writing condexec
9119 * at the end of the TB, which we definitely want to do for the case
9120 * where a TB doesn't do anything with the IT state at all.)
9121 * (2) if we are going to leave the TB then we call gen_set_condexec()
9122 * which will write the correct value into CPUState if zero is wrong.
9123 * This is done both for leaving the TB at the end, and for leaving
9124 * it because of an exception we know will happen, which is done in
9125 * gen_exception_insn(). The latter is necessary because we need to
9126 * leave the TB with the PC/IT state just prior to execution of the
9127 * instruction which caused the exception.
9128 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9129 * then the CPUState will be wrong and we need to reset it.
9130 * This is handled in the same way as restoration of the
9131 * PC in these situations: we will be called again with search_pc=1
9132 * and generate a mapping of the condexec bits for each PC in
9133 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9134 * the condexec bits.
9135 *
9136 * Note that there are no instructions which can read the condexec
9137 * bits, and none which can write non-static values to them, so
9138 * we don't need to care about whether CPUState is correct in the
9139 * middle of a TB.
9140 */
9141
9142 /* Reset the conditional execution bits immediately. This avoids
9143 complications trying to do it at the end of the block. */
9144 if (dc->condexec_mask || dc->condexec_cond)
9145 {
9146 TCGv tmp = new_tmp();
9147 tcg_gen_movi_i32(tmp, 0);
9148 store_cpu_field(tmp, condexec_bits);
9149 }
9150 do {
9151 #ifdef CONFIG_USER_ONLY
9152 /* Intercept jump to the magic kernel page. */
9153 if (dc->pc >= 0xffff0000) {
9154 /* We always get here via a jump, so know we are not in a
9155 conditional execution block. */
9156 gen_exception(EXCP_KERNEL_TRAP);
9157 dc->is_jmp = DISAS_UPDATE;
9158 break;
9159 }
9160 #else
9161 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9162 /* We always get here via a jump, so know we are not in a
9163 conditional execution block. */
9164 gen_exception(EXCP_EXCEPTION_EXIT);
9165 dc->is_jmp = DISAS_UPDATE;
9166 break;
9167 }
9168 #endif
9169
9170 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9171 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9172 if (bp->pc == dc->pc) {
9173 gen_exception_insn(dc, 0, EXCP_DEBUG);
9174 /* Advance PC so that clearing the breakpoint will
9175 invalidate this TB. */
9176 dc->pc += 2;
9177 goto done_generating;
9178 break;
9179 }
9180 }
9181 }
9182 if (search_pc) {
9183 j = gen_opc_ptr - gen_opc_buf;
9184 if (lj < j) {
9185 lj++;
9186 while (lj < j)
9187 gen_opc_instr_start[lj++] = 0;
9188 }
9189 gen_opc_pc[lj] = dc->pc;
9190 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9191 gen_opc_instr_start[lj] = 1;
9192 gen_opc_icount[lj] = num_insns;
9193 }
9194
9195 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9196 gen_io_start();
9197
9198 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9199 tcg_gen_debug_insn_start(dc->pc);
9200 }
9201
9202 if (dc->thumb) {
9203 disas_thumb_insn(env, dc);
9204 if (dc->condexec_mask) {
9205 dc->condexec_cond = (dc->condexec_cond & 0xe)
9206 | ((dc->condexec_mask >> 4) & 1);
9207 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9208 if (dc->condexec_mask == 0) {
9209 dc->condexec_cond = 0;
9210 }
9211 }
9212 } else {
9213 disas_arm_insn(env, dc);
9214 }
9215 if (num_temps) {
9216 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9217 num_temps = 0;
9218 }
9219
9220 if (dc->condjmp && !dc->is_jmp) {
9221 gen_set_label(dc->condlabel);
9222 dc->condjmp = 0;
9223 }
9224 /* Translation stops when a conditional branch is encountered.
9225 * Otherwise the subsequent code could get translated several times.
9226 * Also stop translation when a page boundary is reached. This
9227 * ensures prefetch aborts occur at the right place. */
9228 num_insns ++;
9229 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9230 !env->singlestep_enabled &&
9231 !singlestep &&
9232 dc->pc < next_page_start &&
9233 num_insns < max_insns);
9234
9235 if (tb->cflags & CF_LAST_IO) {
9236 if (dc->condjmp) {
9237 /* FIXME: This can theoretically happen with self-modifying
9238 code. */
9239 cpu_abort(env, "IO on conditional branch instruction");
9240 }
9241 gen_io_end();
9242 }
9243
9244 /* At this stage dc->condjmp will only be set when the skipped
9245 instruction was a conditional branch or trap, and the PC has
9246 already been written. */
9247 if (unlikely(env->singlestep_enabled)) {
9248 /* Make sure the pc is updated, and raise a debug exception. */
9249 if (dc->condjmp) {
9250 gen_set_condexec(dc);
9251 if (dc->is_jmp == DISAS_SWI) {
9252 gen_exception(EXCP_SWI);
9253 } else {
9254 gen_exception(EXCP_DEBUG);
9255 }
9256 gen_set_label(dc->condlabel);
9257 }
9258 if (dc->condjmp || !dc->is_jmp) {
9259 gen_set_pc_im(dc->pc);
9260 dc->condjmp = 0;
9261 }
9262 gen_set_condexec(dc);
9263 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9264 gen_exception(EXCP_SWI);
9265 } else {
9266 /* FIXME: Single stepping a WFI insn will not halt
9267 the CPU. */
9268 gen_exception(EXCP_DEBUG);
9269 }
9270 } else {
9271 /* While branches must always occur at the end of an IT block,
9272 there are a few other things that can cause us to terminate
9273 the TB in the middel of an IT block:
9274 - Exception generating instructions (bkpt, swi, undefined).
9275 - Page boundaries.
9276 - Hardware watchpoints.
9277 Hardware breakpoints have already been handled and skip this code.
9278 */
9279 gen_set_condexec(dc);
9280 switch(dc->is_jmp) {
9281 case DISAS_NEXT:
9282 gen_goto_tb(dc, 1, dc->pc);
9283 break;
9284 default:
9285 case DISAS_JUMP:
9286 case DISAS_UPDATE:
9287 /* indicate that the hash table must be used to find the next TB */
9288 tcg_gen_exit_tb(0);
9289 break;
9290 case DISAS_TB_JUMP:
9291 /* nothing more to generate */
9292 break;
9293 case DISAS_WFI:
9294 gen_helper_wfi();
9295 break;
9296 case DISAS_SWI:
9297 gen_exception(EXCP_SWI);
9298 break;
9299 }
9300 if (dc->condjmp) {
9301 gen_set_label(dc->condlabel);
9302 gen_set_condexec(dc);
9303 gen_goto_tb(dc, 1, dc->pc);
9304 dc->condjmp = 0;
9305 }
9306 }
9307
9308 done_generating:
9309 gen_icount_end(tb, num_insns);
9310 *gen_opc_ptr = INDEX_op_end;
9311
9312 #ifdef DEBUG_DISAS
9313 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9314 qemu_log("----------------\n");
9315 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9316 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9317 qemu_log("\n");
9318 }
9319 #endif
9320 if (search_pc) {
9321 j = gen_opc_ptr - gen_opc_buf;
9322 lj++;
9323 while (lj <= j)
9324 gen_opc_instr_start[lj++] = 0;
9325 } else {
9326 tb->size = dc->pc - pc_start;
9327 tb->icount = num_insns;
9328 }
9329 }
9330
9331 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9332 {
9333 gen_intermediate_code_internal(env, tb, 0);
9334 }
9335
9336 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9337 {
9338 gen_intermediate_code_internal(env, tb, 1);
9339 }
9340
9341 static const char *cpu_mode_names[16] = {
9342 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9343 "???", "???", "???", "und", "???", "???", "???", "sys"
9344 };
9345
9346 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9347 int flags)
9348 {
9349 int i;
9350 #if 0
9351 union {
9352 uint32_t i;
9353 float s;
9354 } s0, s1;
9355 CPU_DoubleU d;
9356 /* ??? This assumes float64 and double have the same layout.
9357 Oh well, it's only debug dumps. */
9358 union {
9359 float64 f64;
9360 double d;
9361 } d0;
9362 #endif
9363 uint32_t psr;
9364
9365 for(i=0;i<16;i++) {
9366 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9367 if ((i % 4) == 3)
9368 cpu_fprintf(f, "\n");
9369 else
9370 cpu_fprintf(f, " ");
9371 }
9372 psr = cpsr_read(env);
9373 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9374 psr,
9375 psr & (1 << 31) ? 'N' : '-',
9376 psr & (1 << 30) ? 'Z' : '-',
9377 psr & (1 << 29) ? 'C' : '-',
9378 psr & (1 << 28) ? 'V' : '-',
9379 psr & CPSR_T ? 'T' : 'A',
9380 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9381
9382 #if 0
9383 for (i = 0; i < 16; i++) {
9384 d.d = env->vfp.regs[i];
9385 s0.i = d.l.lower;
9386 s1.i = d.l.upper;
9387 d0.f64 = d.d;
9388 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9389 i * 2, (int)s0.i, s0.s,
9390 i * 2 + 1, (int)s1.i, s1.s,
9391 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9392 d0.d);
9393 }
9394 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9395 #endif
9396 }
9397
9398 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9399 unsigned long searched_pc, int pc_pos, void *puc)
9400 {
9401 env->regs[15] = gen_opc_pc[pc_pos];
9402 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9403 }