]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Refactor to pull narrowing decode into separate function
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3544 case 0: \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 1: \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 2: \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 3: \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 4: \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 5: \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 default: return 1; \
3563 }} while (0)
3564
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3567 case 0: \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3569 break; \
3570 case 1: \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3572 break; \
3573 case 2: \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3575 break; \
3576 case 3: \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3578 break; \
3579 case 4: \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3581 break; \
3582 case 5: \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3584 break; \
3585 default: return 1; \
3586 }} while (0)
3587
3588 static TCGv neon_load_scratch(int scratch)
3589 {
3590 TCGv tmp = new_tmp();
3591 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 return tmp;
3593 }
3594
3595 static void neon_store_scratch(int scratch, TCGv var)
3596 {
3597 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 dead_tmp(var);
3599 }
3600
3601 static inline TCGv neon_get_scalar(int size, int reg)
3602 {
3603 TCGv tmp;
3604 if (size == 1) {
3605 tmp = neon_load_reg(reg & 7, reg >> 4);
3606 if (reg & 8) {
3607 gen_neon_dup_high16(tmp);
3608 } else {
3609 gen_neon_dup_low16(tmp);
3610 }
3611 } else {
3612 tmp = neon_load_reg(reg & 15, reg >> 4);
3613 }
3614 return tmp;
3615 }
3616
3617 static int gen_neon_unzip(int rd, int rm, int size, int q)
3618 {
3619 TCGv tmp, tmp2;
3620 if (size == 3 || (!q && size == 2)) {
3621 return 1;
3622 }
3623 tmp = tcg_const_i32(rd);
3624 tmp2 = tcg_const_i32(rm);
3625 if (q) {
3626 switch (size) {
3627 case 0:
3628 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3629 break;
3630 case 1:
3631 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3632 break;
3633 case 2:
3634 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3635 break;
3636 default:
3637 abort();
3638 }
3639 } else {
3640 switch (size) {
3641 case 0:
3642 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3643 break;
3644 case 1:
3645 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3646 break;
3647 default:
3648 abort();
3649 }
3650 }
3651 tcg_temp_free_i32(tmp);
3652 tcg_temp_free_i32(tmp2);
3653 return 0;
3654 }
3655
3656 static int gen_neon_zip(int rd, int rm, int size, int q)
3657 {
3658 TCGv tmp, tmp2;
3659 if (size == 3 || (!q && size == 2)) {
3660 return 1;
3661 }
3662 tmp = tcg_const_i32(rd);
3663 tmp2 = tcg_const_i32(rm);
3664 if (q) {
3665 switch (size) {
3666 case 0:
3667 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3668 break;
3669 case 1:
3670 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3671 break;
3672 case 2:
3673 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3674 break;
3675 default:
3676 abort();
3677 }
3678 } else {
3679 switch (size) {
3680 case 0:
3681 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3682 break;
3683 case 1:
3684 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3685 break;
3686 default:
3687 abort();
3688 }
3689 }
3690 tcg_temp_free_i32(tmp);
3691 tcg_temp_free_i32(tmp2);
3692 return 0;
3693 }
3694
3695 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3696 {
3697 TCGv rd, tmp;
3698
3699 rd = new_tmp();
3700 tmp = new_tmp();
3701
3702 tcg_gen_shli_i32(rd, t0, 8);
3703 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3704 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3705 tcg_gen_or_i32(rd, rd, tmp);
3706
3707 tcg_gen_shri_i32(t1, t1, 8);
3708 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3709 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3710 tcg_gen_or_i32(t1, t1, tmp);
3711 tcg_gen_mov_i32(t0, rd);
3712
3713 dead_tmp(tmp);
3714 dead_tmp(rd);
3715 }
3716
3717 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3718 {
3719 TCGv rd, tmp;
3720
3721 rd = new_tmp();
3722 tmp = new_tmp();
3723
3724 tcg_gen_shli_i32(rd, t0, 16);
3725 tcg_gen_andi_i32(tmp, t1, 0xffff);
3726 tcg_gen_or_i32(rd, rd, tmp);
3727 tcg_gen_shri_i32(t1, t1, 16);
3728 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3729 tcg_gen_or_i32(t1, t1, tmp);
3730 tcg_gen_mov_i32(t0, rd);
3731
3732 dead_tmp(tmp);
3733 dead_tmp(rd);
3734 }
3735
3736
3737 static struct {
3738 int nregs;
3739 int interleave;
3740 int spacing;
3741 } neon_ls_element_type[11] = {
3742 {4, 4, 1},
3743 {4, 4, 2},
3744 {4, 1, 1},
3745 {4, 2, 1},
3746 {3, 3, 1},
3747 {3, 3, 2},
3748 {3, 1, 1},
3749 {1, 1, 1},
3750 {2, 2, 1},
3751 {2, 2, 2},
3752 {2, 1, 1}
3753 };
3754
3755 /* Translate a NEON load/store element instruction. Return nonzero if the
3756 instruction is invalid. */
3757 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3758 {
3759 int rd, rn, rm;
3760 int op;
3761 int nregs;
3762 int interleave;
3763 int spacing;
3764 int stride;
3765 int size;
3766 int reg;
3767 int pass;
3768 int load;
3769 int shift;
3770 int n;
3771 TCGv addr;
3772 TCGv tmp;
3773 TCGv tmp2;
3774 TCGv_i64 tmp64;
3775
3776 if (!s->vfp_enabled)
3777 return 1;
3778 VFP_DREG_D(rd, insn);
3779 rn = (insn >> 16) & 0xf;
3780 rm = insn & 0xf;
3781 load = (insn & (1 << 21)) != 0;
3782 addr = new_tmp();
3783 if ((insn & (1 << 23)) == 0) {
3784 /* Load store all elements. */
3785 op = (insn >> 8) & 0xf;
3786 size = (insn >> 6) & 3;
3787 if (op > 10)
3788 return 1;
3789 nregs = neon_ls_element_type[op].nregs;
3790 interleave = neon_ls_element_type[op].interleave;
3791 spacing = neon_ls_element_type[op].spacing;
3792 if (size == 3 && (interleave | spacing) != 1)
3793 return 1;
3794 load_reg_var(s, addr, rn);
3795 stride = (1 << size) * interleave;
3796 for (reg = 0; reg < nregs; reg++) {
3797 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3798 load_reg_var(s, addr, rn);
3799 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3800 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3801 load_reg_var(s, addr, rn);
3802 tcg_gen_addi_i32(addr, addr, 1 << size);
3803 }
3804 if (size == 3) {
3805 if (load) {
3806 tmp64 = gen_ld64(addr, IS_USER(s));
3807 neon_store_reg64(tmp64, rd);
3808 tcg_temp_free_i64(tmp64);
3809 } else {
3810 tmp64 = tcg_temp_new_i64();
3811 neon_load_reg64(tmp64, rd);
3812 gen_st64(tmp64, addr, IS_USER(s));
3813 }
3814 tcg_gen_addi_i32(addr, addr, stride);
3815 } else {
3816 for (pass = 0; pass < 2; pass++) {
3817 if (size == 2) {
3818 if (load) {
3819 tmp = gen_ld32(addr, IS_USER(s));
3820 neon_store_reg(rd, pass, tmp);
3821 } else {
3822 tmp = neon_load_reg(rd, pass);
3823 gen_st32(tmp, addr, IS_USER(s));
3824 }
3825 tcg_gen_addi_i32(addr, addr, stride);
3826 } else if (size == 1) {
3827 if (load) {
3828 tmp = gen_ld16u(addr, IS_USER(s));
3829 tcg_gen_addi_i32(addr, addr, stride);
3830 tmp2 = gen_ld16u(addr, IS_USER(s));
3831 tcg_gen_addi_i32(addr, addr, stride);
3832 tcg_gen_shli_i32(tmp2, tmp2, 16);
3833 tcg_gen_or_i32(tmp, tmp, tmp2);
3834 dead_tmp(tmp2);
3835 neon_store_reg(rd, pass, tmp);
3836 } else {
3837 tmp = neon_load_reg(rd, pass);
3838 tmp2 = new_tmp();
3839 tcg_gen_shri_i32(tmp2, tmp, 16);
3840 gen_st16(tmp, addr, IS_USER(s));
3841 tcg_gen_addi_i32(addr, addr, stride);
3842 gen_st16(tmp2, addr, IS_USER(s));
3843 tcg_gen_addi_i32(addr, addr, stride);
3844 }
3845 } else /* size == 0 */ {
3846 if (load) {
3847 TCGV_UNUSED(tmp2);
3848 for (n = 0; n < 4; n++) {
3849 tmp = gen_ld8u(addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 if (n == 0) {
3852 tmp2 = tmp;
3853 } else {
3854 tcg_gen_shli_i32(tmp, tmp, n * 8);
3855 tcg_gen_or_i32(tmp2, tmp2, tmp);
3856 dead_tmp(tmp);
3857 }
3858 }
3859 neon_store_reg(rd, pass, tmp2);
3860 } else {
3861 tmp2 = neon_load_reg(rd, pass);
3862 for (n = 0; n < 4; n++) {
3863 tmp = new_tmp();
3864 if (n == 0) {
3865 tcg_gen_mov_i32(tmp, tmp2);
3866 } else {
3867 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3868 }
3869 gen_st8(tmp, addr, IS_USER(s));
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 }
3872 dead_tmp(tmp2);
3873 }
3874 }
3875 }
3876 }
3877 rd += spacing;
3878 }
3879 stride = nregs * 8;
3880 } else {
3881 size = (insn >> 10) & 3;
3882 if (size == 3) {
3883 /* Load single element to all lanes. */
3884 if (!load)
3885 return 1;
3886 size = (insn >> 6) & 3;
3887 nregs = ((insn >> 8) & 3) + 1;
3888 stride = (insn & (1 << 5)) ? 2 : 1;
3889 load_reg_var(s, addr, rn);
3890 for (reg = 0; reg < nregs; reg++) {
3891 switch (size) {
3892 case 0:
3893 tmp = gen_ld8u(addr, IS_USER(s));
3894 gen_neon_dup_u8(tmp, 0);
3895 break;
3896 case 1:
3897 tmp = gen_ld16u(addr, IS_USER(s));
3898 gen_neon_dup_low16(tmp);
3899 break;
3900 case 2:
3901 tmp = gen_ld32(addr, IS_USER(s));
3902 break;
3903 case 3:
3904 return 1;
3905 default: /* Avoid compiler warnings. */
3906 abort();
3907 }
3908 tcg_gen_addi_i32(addr, addr, 1 << size);
3909 tmp2 = new_tmp();
3910 tcg_gen_mov_i32(tmp2, tmp);
3911 neon_store_reg(rd, 0, tmp2);
3912 neon_store_reg(rd, 1, tmp);
3913 rd += stride;
3914 }
3915 stride = (1 << size) * nregs;
3916 } else {
3917 /* Single element. */
3918 pass = (insn >> 7) & 1;
3919 switch (size) {
3920 case 0:
3921 shift = ((insn >> 5) & 3) * 8;
3922 stride = 1;
3923 break;
3924 case 1:
3925 shift = ((insn >> 6) & 1) * 16;
3926 stride = (insn & (1 << 5)) ? 2 : 1;
3927 break;
3928 case 2:
3929 shift = 0;
3930 stride = (insn & (1 << 6)) ? 2 : 1;
3931 break;
3932 default:
3933 abort();
3934 }
3935 nregs = ((insn >> 8) & 3) + 1;
3936 load_reg_var(s, addr, rn);
3937 for (reg = 0; reg < nregs; reg++) {
3938 if (load) {
3939 switch (size) {
3940 case 0:
3941 tmp = gen_ld8u(addr, IS_USER(s));
3942 break;
3943 case 1:
3944 tmp = gen_ld16u(addr, IS_USER(s));
3945 break;
3946 case 2:
3947 tmp = gen_ld32(addr, IS_USER(s));
3948 break;
3949 default: /* Avoid compiler warnings. */
3950 abort();
3951 }
3952 if (size != 2) {
3953 tmp2 = neon_load_reg(rd, pass);
3954 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3955 dead_tmp(tmp2);
3956 }
3957 neon_store_reg(rd, pass, tmp);
3958 } else { /* Store */
3959 tmp = neon_load_reg(rd, pass);
3960 if (shift)
3961 tcg_gen_shri_i32(tmp, tmp, shift);
3962 switch (size) {
3963 case 0:
3964 gen_st8(tmp, addr, IS_USER(s));
3965 break;
3966 case 1:
3967 gen_st16(tmp, addr, IS_USER(s));
3968 break;
3969 case 2:
3970 gen_st32(tmp, addr, IS_USER(s));
3971 break;
3972 }
3973 }
3974 rd += stride;
3975 tcg_gen_addi_i32(addr, addr, 1 << size);
3976 }
3977 stride = nregs * (1 << size);
3978 }
3979 }
3980 dead_tmp(addr);
3981 if (rm != 15) {
3982 TCGv base;
3983
3984 base = load_reg(s, rn);
3985 if (rm == 13) {
3986 tcg_gen_addi_i32(base, base, stride);
3987 } else {
3988 TCGv index;
3989 index = load_reg(s, rm);
3990 tcg_gen_add_i32(base, base, index);
3991 dead_tmp(index);
3992 }
3993 store_reg(s, rn, base);
3994 }
3995 return 0;
3996 }
3997
3998 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3999 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4000 {
4001 tcg_gen_and_i32(t, t, c);
4002 tcg_gen_andc_i32(f, f, c);
4003 tcg_gen_or_i32(dest, t, f);
4004 }
4005
4006 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4007 {
4008 switch (size) {
4009 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4010 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4011 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4012 default: abort();
4013 }
4014 }
4015
4016 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4017 {
4018 switch (size) {
4019 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4020 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4021 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4022 default: abort();
4023 }
4024 }
4025
4026 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4027 {
4028 switch (size) {
4029 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4030 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4031 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4032 default: abort();
4033 }
4034 }
4035
4036 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4037 {
4038 switch (size) {
4039 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4040 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4041 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4042 default: abort();
4043 }
4044 }
4045
4046 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4047 int q, int u)
4048 {
4049 if (q) {
4050 if (u) {
4051 switch (size) {
4052 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4053 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4054 default: abort();
4055 }
4056 } else {
4057 switch (size) {
4058 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4059 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4060 default: abort();
4061 }
4062 }
4063 } else {
4064 if (u) {
4065 switch (size) {
4066 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4067 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4068 default: abort();
4069 }
4070 } else {
4071 switch (size) {
4072 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4073 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4074 default: abort();
4075 }
4076 }
4077 }
4078 }
4079
4080 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4081 {
4082 if (u) {
4083 switch (size) {
4084 case 0: gen_helper_neon_widen_u8(dest, src); break;
4085 case 1: gen_helper_neon_widen_u16(dest, src); break;
4086 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4087 default: abort();
4088 }
4089 } else {
4090 switch (size) {
4091 case 0: gen_helper_neon_widen_s8(dest, src); break;
4092 case 1: gen_helper_neon_widen_s16(dest, src); break;
4093 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4094 default: abort();
4095 }
4096 }
4097 dead_tmp(src);
4098 }
4099
4100 static inline void gen_neon_addl(int size)
4101 {
4102 switch (size) {
4103 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4104 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4105 case 2: tcg_gen_add_i64(CPU_V001); break;
4106 default: abort();
4107 }
4108 }
4109
4110 static inline void gen_neon_subl(int size)
4111 {
4112 switch (size) {
4113 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4114 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4115 case 2: tcg_gen_sub_i64(CPU_V001); break;
4116 default: abort();
4117 }
4118 }
4119
4120 static inline void gen_neon_negl(TCGv_i64 var, int size)
4121 {
4122 switch (size) {
4123 case 0: gen_helper_neon_negl_u16(var, var); break;
4124 case 1: gen_helper_neon_negl_u32(var, var); break;
4125 case 2: gen_helper_neon_negl_u64(var, var); break;
4126 default: abort();
4127 }
4128 }
4129
4130 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4131 {
4132 switch (size) {
4133 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4134 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4135 default: abort();
4136 }
4137 }
4138
4139 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4140 {
4141 TCGv_i64 tmp;
4142
4143 switch ((size << 1) | u) {
4144 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4145 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4146 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4147 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4148 case 4:
4149 tmp = gen_muls_i64_i32(a, b);
4150 tcg_gen_mov_i64(dest, tmp);
4151 break;
4152 case 5:
4153 tmp = gen_mulu_i64_i32(a, b);
4154 tcg_gen_mov_i64(dest, tmp);
4155 break;
4156 default: abort();
4157 }
4158
4159 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4160 Don't forget to clean them now. */
4161 if (size < 2) {
4162 dead_tmp(a);
4163 dead_tmp(b);
4164 }
4165 }
4166
4167 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4168 {
4169 if (op) {
4170 if (u) {
4171 gen_neon_unarrow_sats(size, dest, src);
4172 } else {
4173 gen_neon_narrow(size, dest, src);
4174 }
4175 } else {
4176 if (u) {
4177 gen_neon_narrow_satu(size, dest, src);
4178 } else {
4179 gen_neon_narrow_sats(size, dest, src);
4180 }
4181 }
4182 }
4183
4184 /* Translate a NEON data processing instruction. Return nonzero if the
4185 instruction is invalid.
4186 We process data in a mixture of 32-bit and 64-bit chunks.
4187 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4188
4189 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4190 {
4191 int op;
4192 int q;
4193 int rd, rn, rm;
4194 int size;
4195 int shift;
4196 int pass;
4197 int count;
4198 int pairwise;
4199 int u;
4200 int n;
4201 uint32_t imm, mask;
4202 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4203 TCGv_i64 tmp64;
4204
4205 if (!s->vfp_enabled)
4206 return 1;
4207 q = (insn & (1 << 6)) != 0;
4208 u = (insn >> 24) & 1;
4209 VFP_DREG_D(rd, insn);
4210 VFP_DREG_N(rn, insn);
4211 VFP_DREG_M(rm, insn);
4212 size = (insn >> 20) & 3;
4213 if ((insn & (1 << 23)) == 0) {
4214 /* Three register same length. */
4215 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4216 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4217 || op == 10 || op == 11 || op == 16)) {
4218 /* 64-bit element instructions. */
4219 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4220 neon_load_reg64(cpu_V0, rn + pass);
4221 neon_load_reg64(cpu_V1, rm + pass);
4222 switch (op) {
4223 case 1: /* VQADD */
4224 if (u) {
4225 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4226 cpu_V0, cpu_V1);
4227 } else {
4228 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4229 cpu_V0, cpu_V1);
4230 }
4231 break;
4232 case 5: /* VQSUB */
4233 if (u) {
4234 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4235 cpu_V0, cpu_V1);
4236 } else {
4237 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4238 cpu_V0, cpu_V1);
4239 }
4240 break;
4241 case 8: /* VSHL */
4242 if (u) {
4243 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4244 } else {
4245 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4246 }
4247 break;
4248 case 9: /* VQSHL */
4249 if (u) {
4250 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4251 cpu_V1, cpu_V0);
4252 } else {
4253 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4254 cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 10: /* VRSHL */
4258 if (u) {
4259 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4260 } else {
4261 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4262 }
4263 break;
4264 case 11: /* VQRSHL */
4265 if (u) {
4266 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4267 cpu_V1, cpu_V0);
4268 } else {
4269 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4270 cpu_V1, cpu_V0);
4271 }
4272 break;
4273 case 16:
4274 if (u) {
4275 tcg_gen_sub_i64(CPU_V001);
4276 } else {
4277 tcg_gen_add_i64(CPU_V001);
4278 }
4279 break;
4280 default:
4281 abort();
4282 }
4283 neon_store_reg64(cpu_V0, rd + pass);
4284 }
4285 return 0;
4286 }
4287 switch (op) {
4288 case 8: /* VSHL */
4289 case 9: /* VQSHL */
4290 case 10: /* VRSHL */
4291 case 11: /* VQRSHL */
4292 {
4293 int rtmp;
4294 /* Shift instruction operands are reversed. */
4295 rtmp = rn;
4296 rn = rm;
4297 rm = rtmp;
4298 pairwise = 0;
4299 }
4300 break;
4301 case 20: /* VPMAX */
4302 case 21: /* VPMIN */
4303 case 23: /* VPADD */
4304 pairwise = 1;
4305 break;
4306 case 26: /* VPADD (float) */
4307 pairwise = (u && size < 2);
4308 break;
4309 case 30: /* VPMIN/VPMAX (float) */
4310 pairwise = u;
4311 break;
4312 default:
4313 pairwise = 0;
4314 break;
4315 }
4316
4317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4318
4319 if (pairwise) {
4320 /* Pairwise. */
4321 if (q)
4322 n = (pass & 1) * 2;
4323 else
4324 n = 0;
4325 if (pass < q + 1) {
4326 tmp = neon_load_reg(rn, n);
4327 tmp2 = neon_load_reg(rn, n + 1);
4328 } else {
4329 tmp = neon_load_reg(rm, n);
4330 tmp2 = neon_load_reg(rm, n + 1);
4331 }
4332 } else {
4333 /* Elementwise. */
4334 tmp = neon_load_reg(rn, pass);
4335 tmp2 = neon_load_reg(rm, pass);
4336 }
4337 switch (op) {
4338 case 0: /* VHADD */
4339 GEN_NEON_INTEGER_OP(hadd);
4340 break;
4341 case 1: /* VQADD */
4342 GEN_NEON_INTEGER_OP_ENV(qadd);
4343 break;
4344 case 2: /* VRHADD */
4345 GEN_NEON_INTEGER_OP(rhadd);
4346 break;
4347 case 3: /* Logic ops. */
4348 switch ((u << 2) | size) {
4349 case 0: /* VAND */
4350 tcg_gen_and_i32(tmp, tmp, tmp2);
4351 break;
4352 case 1: /* BIC */
4353 tcg_gen_andc_i32(tmp, tmp, tmp2);
4354 break;
4355 case 2: /* VORR */
4356 tcg_gen_or_i32(tmp, tmp, tmp2);
4357 break;
4358 case 3: /* VORN */
4359 tcg_gen_orc_i32(tmp, tmp, tmp2);
4360 break;
4361 case 4: /* VEOR */
4362 tcg_gen_xor_i32(tmp, tmp, tmp2);
4363 break;
4364 case 5: /* VBSL */
4365 tmp3 = neon_load_reg(rd, pass);
4366 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4367 dead_tmp(tmp3);
4368 break;
4369 case 6: /* VBIT */
4370 tmp3 = neon_load_reg(rd, pass);
4371 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4372 dead_tmp(tmp3);
4373 break;
4374 case 7: /* VBIF */
4375 tmp3 = neon_load_reg(rd, pass);
4376 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4377 dead_tmp(tmp3);
4378 break;
4379 }
4380 break;
4381 case 4: /* VHSUB */
4382 GEN_NEON_INTEGER_OP(hsub);
4383 break;
4384 case 5: /* VQSUB */
4385 GEN_NEON_INTEGER_OP_ENV(qsub);
4386 break;
4387 case 6: /* VCGT */
4388 GEN_NEON_INTEGER_OP(cgt);
4389 break;
4390 case 7: /* VCGE */
4391 GEN_NEON_INTEGER_OP(cge);
4392 break;
4393 case 8: /* VSHL */
4394 GEN_NEON_INTEGER_OP(shl);
4395 break;
4396 case 9: /* VQSHL */
4397 GEN_NEON_INTEGER_OP_ENV(qshl);
4398 break;
4399 case 10: /* VRSHL */
4400 GEN_NEON_INTEGER_OP(rshl);
4401 break;
4402 case 11: /* VQRSHL */
4403 GEN_NEON_INTEGER_OP_ENV(qrshl);
4404 break;
4405 case 12: /* VMAX */
4406 GEN_NEON_INTEGER_OP(max);
4407 break;
4408 case 13: /* VMIN */
4409 GEN_NEON_INTEGER_OP(min);
4410 break;
4411 case 14: /* VABD */
4412 GEN_NEON_INTEGER_OP(abd);
4413 break;
4414 case 15: /* VABA */
4415 GEN_NEON_INTEGER_OP(abd);
4416 dead_tmp(tmp2);
4417 tmp2 = neon_load_reg(rd, pass);
4418 gen_neon_add(size, tmp, tmp2);
4419 break;
4420 case 16:
4421 if (!u) { /* VADD */
4422 if (gen_neon_add(size, tmp, tmp2))
4423 return 1;
4424 } else { /* VSUB */
4425 switch (size) {
4426 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4427 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4428 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4429 default: return 1;
4430 }
4431 }
4432 break;
4433 case 17:
4434 if (!u) { /* VTST */
4435 switch (size) {
4436 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4437 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4438 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4439 default: return 1;
4440 }
4441 } else { /* VCEQ */
4442 switch (size) {
4443 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4444 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4445 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4446 default: return 1;
4447 }
4448 }
4449 break;
4450 case 18: /* Multiply. */
4451 switch (size) {
4452 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4453 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4454 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4455 default: return 1;
4456 }
4457 dead_tmp(tmp2);
4458 tmp2 = neon_load_reg(rd, pass);
4459 if (u) { /* VMLS */
4460 gen_neon_rsb(size, tmp, tmp2);
4461 } else { /* VMLA */
4462 gen_neon_add(size, tmp, tmp2);
4463 }
4464 break;
4465 case 19: /* VMUL */
4466 if (u) { /* polynomial */
4467 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4468 } else { /* Integer */
4469 switch (size) {
4470 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4471 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4472 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4473 default: return 1;
4474 }
4475 }
4476 break;
4477 case 20: /* VPMAX */
4478 GEN_NEON_INTEGER_OP(pmax);
4479 break;
4480 case 21: /* VPMIN */
4481 GEN_NEON_INTEGER_OP(pmin);
4482 break;
4483 case 22: /* Hultiply high. */
4484 if (!u) { /* VQDMULH */
4485 switch (size) {
4486 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4487 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4488 default: return 1;
4489 }
4490 } else { /* VQRDHMUL */
4491 switch (size) {
4492 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4493 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4494 default: return 1;
4495 }
4496 }
4497 break;
4498 case 23: /* VPADD */
4499 if (u)
4500 return 1;
4501 switch (size) {
4502 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4503 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4504 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4505 default: return 1;
4506 }
4507 break;
4508 case 26: /* Floating point arithnetic. */
4509 switch ((u << 2) | size) {
4510 case 0: /* VADD */
4511 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4512 break;
4513 case 2: /* VSUB */
4514 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4515 break;
4516 case 4: /* VPADD */
4517 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4518 break;
4519 case 6: /* VABD */
4520 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4521 break;
4522 default:
4523 return 1;
4524 }
4525 break;
4526 case 27: /* Float multiply. */
4527 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4528 if (!u) {
4529 dead_tmp(tmp2);
4530 tmp2 = neon_load_reg(rd, pass);
4531 if (size == 0) {
4532 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4533 } else {
4534 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4535 }
4536 }
4537 break;
4538 case 28: /* Float compare. */
4539 if (!u) {
4540 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4541 } else {
4542 if (size == 0)
4543 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4544 else
4545 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4546 }
4547 break;
4548 case 29: /* Float compare absolute. */
4549 if (!u)
4550 return 1;
4551 if (size == 0)
4552 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4553 else
4554 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4555 break;
4556 case 30: /* Float min/max. */
4557 if (size == 0)
4558 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4559 else
4560 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4561 break;
4562 case 31:
4563 if (size == 0)
4564 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4565 else
4566 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4567 break;
4568 default:
4569 abort();
4570 }
4571 dead_tmp(tmp2);
4572
4573 /* Save the result. For elementwise operations we can put it
4574 straight into the destination register. For pairwise operations
4575 we have to be careful to avoid clobbering the source operands. */
4576 if (pairwise && rd == rm) {
4577 neon_store_scratch(pass, tmp);
4578 } else {
4579 neon_store_reg(rd, pass, tmp);
4580 }
4581
4582 } /* for pass */
4583 if (pairwise && rd == rm) {
4584 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4585 tmp = neon_load_scratch(pass);
4586 neon_store_reg(rd, pass, tmp);
4587 }
4588 }
4589 /* End of 3 register same size operations. */
4590 } else if (insn & (1 << 4)) {
4591 if ((insn & 0x00380080) != 0) {
4592 /* Two registers and shift. */
4593 op = (insn >> 8) & 0xf;
4594 if (insn & (1 << 7)) {
4595 /* 64-bit shift. */
4596 size = 3;
4597 } else {
4598 size = 2;
4599 while ((insn & (1 << (size + 19))) == 0)
4600 size--;
4601 }
4602 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4603 /* To avoid excessive dumplication of ops we implement shift
4604 by immediate using the variable shift operations. */
4605 if (op < 8) {
4606 /* Shift by immediate:
4607 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4608 /* Right shifts are encoded as N - shift, where N is the
4609 element size in bits. */
4610 if (op <= 4)
4611 shift = shift - (1 << (size + 3));
4612 if (size == 3) {
4613 count = q + 1;
4614 } else {
4615 count = q ? 4: 2;
4616 }
4617 switch (size) {
4618 case 0:
4619 imm = (uint8_t) shift;
4620 imm |= imm << 8;
4621 imm |= imm << 16;
4622 break;
4623 case 1:
4624 imm = (uint16_t) shift;
4625 imm |= imm << 16;
4626 break;
4627 case 2:
4628 case 3:
4629 imm = shift;
4630 break;
4631 default:
4632 abort();
4633 }
4634
4635 for (pass = 0; pass < count; pass++) {
4636 if (size == 3) {
4637 neon_load_reg64(cpu_V0, rm + pass);
4638 tcg_gen_movi_i64(cpu_V1, imm);
4639 switch (op) {
4640 case 0: /* VSHR */
4641 case 1: /* VSRA */
4642 if (u)
4643 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4644 else
4645 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 2: /* VRSHR */
4648 case 3: /* VRSRA */
4649 if (u)
4650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 else
4652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4653 break;
4654 case 4: /* VSRI */
4655 if (!u)
4656 return 1;
4657 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4658 break;
4659 case 5: /* VSHL, VSLI */
4660 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4661 break;
4662 case 6: /* VQSHLU */
4663 if (u) {
4664 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4665 cpu_V0, cpu_V1);
4666 } else {
4667 return 1;
4668 }
4669 break;
4670 case 7: /* VQSHL */
4671 if (u) {
4672 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4673 cpu_V0, cpu_V1);
4674 } else {
4675 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4676 cpu_V0, cpu_V1);
4677 }
4678 break;
4679 }
4680 if (op == 1 || op == 3) {
4681 /* Accumulate. */
4682 neon_load_reg64(cpu_V1, rd + pass);
4683 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4684 } else if (op == 4 || (op == 5 && u)) {
4685 /* Insert */
4686 neon_load_reg64(cpu_V1, rd + pass);
4687 uint64_t mask;
4688 if (shift < -63 || shift > 63) {
4689 mask = 0;
4690 } else {
4691 if (op == 4) {
4692 mask = 0xffffffffffffffffull >> -shift;
4693 } else {
4694 mask = 0xffffffffffffffffull << shift;
4695 }
4696 }
4697 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4698 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4699 }
4700 neon_store_reg64(cpu_V0, rd + pass);
4701 } else { /* size < 3 */
4702 /* Operands in T0 and T1. */
4703 tmp = neon_load_reg(rm, pass);
4704 tmp2 = new_tmp();
4705 tcg_gen_movi_i32(tmp2, imm);
4706 switch (op) {
4707 case 0: /* VSHR */
4708 case 1: /* VSRA */
4709 GEN_NEON_INTEGER_OP(shl);
4710 break;
4711 case 2: /* VRSHR */
4712 case 3: /* VRSRA */
4713 GEN_NEON_INTEGER_OP(rshl);
4714 break;
4715 case 4: /* VSRI */
4716 if (!u)
4717 return 1;
4718 GEN_NEON_INTEGER_OP(shl);
4719 break;
4720 case 5: /* VSHL, VSLI */
4721 switch (size) {
4722 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4723 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4724 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4725 default: return 1;
4726 }
4727 break;
4728 case 6: /* VQSHLU */
4729 if (!u) {
4730 return 1;
4731 }
4732 switch (size) {
4733 case 0:
4734 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4735 tmp, tmp2);
4736 break;
4737 case 1:
4738 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4739 tmp, tmp2);
4740 break;
4741 case 2:
4742 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4743 tmp, tmp2);
4744 break;
4745 default:
4746 return 1;
4747 }
4748 break;
4749 case 7: /* VQSHL */
4750 GEN_NEON_INTEGER_OP_ENV(qshl);
4751 break;
4752 }
4753 dead_tmp(tmp2);
4754
4755 if (op == 1 || op == 3) {
4756 /* Accumulate. */
4757 tmp2 = neon_load_reg(rd, pass);
4758 gen_neon_add(size, tmp, tmp2);
4759 dead_tmp(tmp2);
4760 } else if (op == 4 || (op == 5 && u)) {
4761 /* Insert */
4762 switch (size) {
4763 case 0:
4764 if (op == 4)
4765 mask = 0xff >> -shift;
4766 else
4767 mask = (uint8_t)(0xff << shift);
4768 mask |= mask << 8;
4769 mask |= mask << 16;
4770 break;
4771 case 1:
4772 if (op == 4)
4773 mask = 0xffff >> -shift;
4774 else
4775 mask = (uint16_t)(0xffff << shift);
4776 mask |= mask << 16;
4777 break;
4778 case 2:
4779 if (shift < -31 || shift > 31) {
4780 mask = 0;
4781 } else {
4782 if (op == 4)
4783 mask = 0xffffffffu >> -shift;
4784 else
4785 mask = 0xffffffffu << shift;
4786 }
4787 break;
4788 default:
4789 abort();
4790 }
4791 tmp2 = neon_load_reg(rd, pass);
4792 tcg_gen_andi_i32(tmp, tmp, mask);
4793 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4794 tcg_gen_or_i32(tmp, tmp, tmp2);
4795 dead_tmp(tmp2);
4796 }
4797 neon_store_reg(rd, pass, tmp);
4798 }
4799 } /* for pass */
4800 } else if (op < 10) {
4801 /* Shift by immediate and narrow:
4802 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4803 int input_unsigned = (op == 8) ? !u : u;
4804
4805 shift = shift - (1 << (size + 3));
4806 size++;
4807 switch (size) {
4808 case 1:
4809 imm = (uint16_t)shift;
4810 imm |= imm << 16;
4811 tmp2 = tcg_const_i32(imm);
4812 TCGV_UNUSED_I64(tmp64);
4813 break;
4814 case 2:
4815 imm = (uint32_t)shift;
4816 tmp2 = tcg_const_i32(imm);
4817 TCGV_UNUSED_I64(tmp64);
4818 break;
4819 case 3:
4820 tmp64 = tcg_const_i64(shift);
4821 TCGV_UNUSED(tmp2);
4822 break;
4823 default:
4824 abort();
4825 }
4826
4827 for (pass = 0; pass < 2; pass++) {
4828 if (size == 3) {
4829 neon_load_reg64(cpu_V0, rm + pass);
4830 if (q) {
4831 if (input_unsigned) {
4832 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0,
4833 tmp64);
4834 } else {
4835 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0,
4836 tmp64);
4837 }
4838 } else {
4839 if (input_unsigned) {
4840 gen_helper_neon_shl_u64(cpu_V0, cpu_V0,
4841 tmp64);
4842 } else {
4843 gen_helper_neon_shl_s64(cpu_V0, cpu_V0,
4844 tmp64);
4845 }
4846 }
4847 } else {
4848 tmp = neon_load_reg(rm + pass, 0);
4849 gen_neon_shift_narrow(size, tmp, tmp2, q,
4850 input_unsigned);
4851 tmp3 = neon_load_reg(rm + pass, 1);
4852 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4853 input_unsigned);
4854 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4855 dead_tmp(tmp);
4856 dead_tmp(tmp3);
4857 }
4858 tmp = new_tmp();
4859 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4860 neon_store_reg(rd, pass, tmp);
4861 } /* for pass */
4862 if (size == 3) {
4863 tcg_temp_free_i64(tmp64);
4864 } else {
4865 tcg_temp_free_i32(tmp2);
4866 }
4867 } else if (op == 10) {
4868 /* VSHLL */
4869 if (q || size == 3)
4870 return 1;
4871 tmp = neon_load_reg(rm, 0);
4872 tmp2 = neon_load_reg(rm, 1);
4873 for (pass = 0; pass < 2; pass++) {
4874 if (pass == 1)
4875 tmp = tmp2;
4876
4877 gen_neon_widen(cpu_V0, tmp, size, u);
4878
4879 if (shift != 0) {
4880 /* The shift is less than the width of the source
4881 type, so we can just shift the whole register. */
4882 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4883 /* Widen the result of shift: we need to clear
4884 * the potential overflow bits resulting from
4885 * left bits of the narrow input appearing as
4886 * right bits of left the neighbour narrow
4887 * input. */
4888 if (size < 2 || !u) {
4889 uint64_t imm64;
4890 if (size == 0) {
4891 imm = (0xffu >> (8 - shift));
4892 imm |= imm << 16;
4893 } else if (size == 1) {
4894 imm = 0xffff >> (16 - shift);
4895 } else {
4896 /* size == 2 */
4897 imm = 0xffffffff >> (32 - shift);
4898 }
4899 if (size < 2) {
4900 imm64 = imm | (((uint64_t)imm) << 32);
4901 } else {
4902 imm64 = imm;
4903 }
4904 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4905 }
4906 }
4907 neon_store_reg64(cpu_V0, rd + pass);
4908 }
4909 } else if (op >= 14) {
4910 /* VCVT fixed-point. */
4911 /* We have already masked out the must-be-1 top bit of imm6,
4912 * hence this 32-shift where the ARM ARM has 64-imm6.
4913 */
4914 shift = 32 - shift;
4915 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4916 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4917 if (!(op & 1)) {
4918 if (u)
4919 gen_vfp_ulto(0, shift);
4920 else
4921 gen_vfp_slto(0, shift);
4922 } else {
4923 if (u)
4924 gen_vfp_toul(0, shift);
4925 else
4926 gen_vfp_tosl(0, shift);
4927 }
4928 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4929 }
4930 } else {
4931 return 1;
4932 }
4933 } else { /* (insn & 0x00380080) == 0 */
4934 int invert;
4935
4936 op = (insn >> 8) & 0xf;
4937 /* One register and immediate. */
4938 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4939 invert = (insn & (1 << 5)) != 0;
4940 switch (op) {
4941 case 0: case 1:
4942 /* no-op */
4943 break;
4944 case 2: case 3:
4945 imm <<= 8;
4946 break;
4947 case 4: case 5:
4948 imm <<= 16;
4949 break;
4950 case 6: case 7:
4951 imm <<= 24;
4952 break;
4953 case 8: case 9:
4954 imm |= imm << 16;
4955 break;
4956 case 10: case 11:
4957 imm = (imm << 8) | (imm << 24);
4958 break;
4959 case 12:
4960 imm = (imm << 8) | 0xff;
4961 break;
4962 case 13:
4963 imm = (imm << 16) | 0xffff;
4964 break;
4965 case 14:
4966 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4967 if (invert)
4968 imm = ~imm;
4969 break;
4970 case 15:
4971 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4972 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4973 break;
4974 }
4975 if (invert)
4976 imm = ~imm;
4977
4978 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4979 if (op & 1 && op < 12) {
4980 tmp = neon_load_reg(rd, pass);
4981 if (invert) {
4982 /* The immediate value has already been inverted, so
4983 BIC becomes AND. */
4984 tcg_gen_andi_i32(tmp, tmp, imm);
4985 } else {
4986 tcg_gen_ori_i32(tmp, tmp, imm);
4987 }
4988 } else {
4989 /* VMOV, VMVN. */
4990 tmp = new_tmp();
4991 if (op == 14 && invert) {
4992 uint32_t val;
4993 val = 0;
4994 for (n = 0; n < 4; n++) {
4995 if (imm & (1 << (n + (pass & 1) * 4)))
4996 val |= 0xff << (n * 8);
4997 }
4998 tcg_gen_movi_i32(tmp, val);
4999 } else {
5000 tcg_gen_movi_i32(tmp, imm);
5001 }
5002 }
5003 neon_store_reg(rd, pass, tmp);
5004 }
5005 }
5006 } else { /* (insn & 0x00800010 == 0x00800000) */
5007 if (size != 3) {
5008 op = (insn >> 8) & 0xf;
5009 if ((insn & (1 << 6)) == 0) {
5010 /* Three registers of different lengths. */
5011 int src1_wide;
5012 int src2_wide;
5013 int prewiden;
5014 /* prewiden, src1_wide, src2_wide */
5015 static const int neon_3reg_wide[16][3] = {
5016 {1, 0, 0}, /* VADDL */
5017 {1, 1, 0}, /* VADDW */
5018 {1, 0, 0}, /* VSUBL */
5019 {1, 1, 0}, /* VSUBW */
5020 {0, 1, 1}, /* VADDHN */
5021 {0, 0, 0}, /* VABAL */
5022 {0, 1, 1}, /* VSUBHN */
5023 {0, 0, 0}, /* VABDL */
5024 {0, 0, 0}, /* VMLAL */
5025 {0, 0, 0}, /* VQDMLAL */
5026 {0, 0, 0}, /* VMLSL */
5027 {0, 0, 0}, /* VQDMLSL */
5028 {0, 0, 0}, /* Integer VMULL */
5029 {0, 0, 0}, /* VQDMULL */
5030 {0, 0, 0} /* Polynomial VMULL */
5031 };
5032
5033 prewiden = neon_3reg_wide[op][0];
5034 src1_wide = neon_3reg_wide[op][1];
5035 src2_wide = neon_3reg_wide[op][2];
5036
5037 if (size == 0 && (op == 9 || op == 11 || op == 13))
5038 return 1;
5039
5040 /* Avoid overlapping operands. Wide source operands are
5041 always aligned so will never overlap with wide
5042 destinations in problematic ways. */
5043 if (rd == rm && !src2_wide) {
5044 tmp = neon_load_reg(rm, 1);
5045 neon_store_scratch(2, tmp);
5046 } else if (rd == rn && !src1_wide) {
5047 tmp = neon_load_reg(rn, 1);
5048 neon_store_scratch(2, tmp);
5049 }
5050 TCGV_UNUSED(tmp3);
5051 for (pass = 0; pass < 2; pass++) {
5052 if (src1_wide) {
5053 neon_load_reg64(cpu_V0, rn + pass);
5054 TCGV_UNUSED(tmp);
5055 } else {
5056 if (pass == 1 && rd == rn) {
5057 tmp = neon_load_scratch(2);
5058 } else {
5059 tmp = neon_load_reg(rn, pass);
5060 }
5061 if (prewiden) {
5062 gen_neon_widen(cpu_V0, tmp, size, u);
5063 }
5064 }
5065 if (src2_wide) {
5066 neon_load_reg64(cpu_V1, rm + pass);
5067 TCGV_UNUSED(tmp2);
5068 } else {
5069 if (pass == 1 && rd == rm) {
5070 tmp2 = neon_load_scratch(2);
5071 } else {
5072 tmp2 = neon_load_reg(rm, pass);
5073 }
5074 if (prewiden) {
5075 gen_neon_widen(cpu_V1, tmp2, size, u);
5076 }
5077 }
5078 switch (op) {
5079 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5080 gen_neon_addl(size);
5081 break;
5082 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5083 gen_neon_subl(size);
5084 break;
5085 case 5: case 7: /* VABAL, VABDL */
5086 switch ((size << 1) | u) {
5087 case 0:
5088 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5089 break;
5090 case 1:
5091 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5092 break;
5093 case 2:
5094 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5095 break;
5096 case 3:
5097 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5098 break;
5099 case 4:
5100 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5101 break;
5102 case 5:
5103 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5104 break;
5105 default: abort();
5106 }
5107 dead_tmp(tmp2);
5108 dead_tmp(tmp);
5109 break;
5110 case 8: case 9: case 10: case 11: case 12: case 13:
5111 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5112 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5113 break;
5114 case 14: /* Polynomial VMULL */
5115 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5116 dead_tmp(tmp2);
5117 dead_tmp(tmp);
5118 break;
5119 default: /* 15 is RESERVED. */
5120 return 1;
5121 }
5122 if (op == 13) {
5123 /* VQDMULL */
5124 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5125 neon_store_reg64(cpu_V0, rd + pass);
5126 } else if (op == 5 || (op >= 8 && op <= 11)) {
5127 /* Accumulate. */
5128 neon_load_reg64(cpu_V1, rd + pass);
5129 switch (op) {
5130 case 10: /* VMLSL */
5131 gen_neon_negl(cpu_V0, size);
5132 /* Fall through */
5133 case 5: case 8: /* VABAL, VMLAL */
5134 gen_neon_addl(size);
5135 break;
5136 case 9: case 11: /* VQDMLAL, VQDMLSL */
5137 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5138 if (op == 11) {
5139 gen_neon_negl(cpu_V0, size);
5140 }
5141 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5142 break;
5143 default:
5144 abort();
5145 }
5146 neon_store_reg64(cpu_V0, rd + pass);
5147 } else if (op == 4 || op == 6) {
5148 /* Narrowing operation. */
5149 tmp = new_tmp();
5150 if (!u) {
5151 switch (size) {
5152 case 0:
5153 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5154 break;
5155 case 1:
5156 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5157 break;
5158 case 2:
5159 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5160 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5161 break;
5162 default: abort();
5163 }
5164 } else {
5165 switch (size) {
5166 case 0:
5167 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5168 break;
5169 case 1:
5170 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5171 break;
5172 case 2:
5173 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5174 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5175 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5176 break;
5177 default: abort();
5178 }
5179 }
5180 if (pass == 0) {
5181 tmp3 = tmp;
5182 } else {
5183 neon_store_reg(rd, 0, tmp3);
5184 neon_store_reg(rd, 1, tmp);
5185 }
5186 } else {
5187 /* Write back the result. */
5188 neon_store_reg64(cpu_V0, rd + pass);
5189 }
5190 }
5191 } else {
5192 /* Two registers and a scalar. */
5193 switch (op) {
5194 case 0: /* Integer VMLA scalar */
5195 case 1: /* Float VMLA scalar */
5196 case 4: /* Integer VMLS scalar */
5197 case 5: /* Floating point VMLS scalar */
5198 case 8: /* Integer VMUL scalar */
5199 case 9: /* Floating point VMUL scalar */
5200 case 12: /* VQDMULH scalar */
5201 case 13: /* VQRDMULH scalar */
5202 tmp = neon_get_scalar(size, rm);
5203 neon_store_scratch(0, tmp);
5204 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5205 tmp = neon_load_scratch(0);
5206 tmp2 = neon_load_reg(rn, pass);
5207 if (op == 12) {
5208 if (size == 1) {
5209 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5210 } else {
5211 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5212 }
5213 } else if (op == 13) {
5214 if (size == 1) {
5215 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5216 } else {
5217 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5218 }
5219 } else if (op & 1) {
5220 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5221 } else {
5222 switch (size) {
5223 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5224 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5225 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5226 default: return 1;
5227 }
5228 }
5229 dead_tmp(tmp2);
5230 if (op < 8) {
5231 /* Accumulate. */
5232 tmp2 = neon_load_reg(rd, pass);
5233 switch (op) {
5234 case 0:
5235 gen_neon_add(size, tmp, tmp2);
5236 break;
5237 case 1:
5238 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5239 break;
5240 case 4:
5241 gen_neon_rsb(size, tmp, tmp2);
5242 break;
5243 case 5:
5244 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5245 break;
5246 default:
5247 abort();
5248 }
5249 dead_tmp(tmp2);
5250 }
5251 neon_store_reg(rd, pass, tmp);
5252 }
5253 break;
5254 case 2: /* VMLAL sclar */
5255 case 3: /* VQDMLAL scalar */
5256 case 6: /* VMLSL scalar */
5257 case 7: /* VQDMLSL scalar */
5258 case 10: /* VMULL scalar */
5259 case 11: /* VQDMULL scalar */
5260 if (size == 0 && (op == 3 || op == 7 || op == 11))
5261 return 1;
5262
5263 tmp2 = neon_get_scalar(size, rm);
5264 /* We need a copy of tmp2 because gen_neon_mull
5265 * deletes it during pass 0. */
5266 tmp4 = new_tmp();
5267 tcg_gen_mov_i32(tmp4, tmp2);
5268 tmp3 = neon_load_reg(rn, 1);
5269
5270 for (pass = 0; pass < 2; pass++) {
5271 if (pass == 0) {
5272 tmp = neon_load_reg(rn, 0);
5273 } else {
5274 tmp = tmp3;
5275 tmp2 = tmp4;
5276 }
5277 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5278 if (op != 11) {
5279 neon_load_reg64(cpu_V1, rd + pass);
5280 }
5281 switch (op) {
5282 case 6:
5283 gen_neon_negl(cpu_V0, size);
5284 /* Fall through */
5285 case 2:
5286 gen_neon_addl(size);
5287 break;
5288 case 3: case 7:
5289 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5290 if (op == 7) {
5291 gen_neon_negl(cpu_V0, size);
5292 }
5293 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5294 break;
5295 case 10:
5296 /* no-op */
5297 break;
5298 case 11:
5299 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5300 break;
5301 default:
5302 abort();
5303 }
5304 neon_store_reg64(cpu_V0, rd + pass);
5305 }
5306
5307
5308 break;
5309 default: /* 14 and 15 are RESERVED */
5310 return 1;
5311 }
5312 }
5313 } else { /* size == 3 */
5314 if (!u) {
5315 /* Extract. */
5316 imm = (insn >> 8) & 0xf;
5317
5318 if (imm > 7 && !q)
5319 return 1;
5320
5321 if (imm == 0) {
5322 neon_load_reg64(cpu_V0, rn);
5323 if (q) {
5324 neon_load_reg64(cpu_V1, rn + 1);
5325 }
5326 } else if (imm == 8) {
5327 neon_load_reg64(cpu_V0, rn + 1);
5328 if (q) {
5329 neon_load_reg64(cpu_V1, rm);
5330 }
5331 } else if (q) {
5332 tmp64 = tcg_temp_new_i64();
5333 if (imm < 8) {
5334 neon_load_reg64(cpu_V0, rn);
5335 neon_load_reg64(tmp64, rn + 1);
5336 } else {
5337 neon_load_reg64(cpu_V0, rn + 1);
5338 neon_load_reg64(tmp64, rm);
5339 }
5340 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5341 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5342 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5343 if (imm < 8) {
5344 neon_load_reg64(cpu_V1, rm);
5345 } else {
5346 neon_load_reg64(cpu_V1, rm + 1);
5347 imm -= 8;
5348 }
5349 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5350 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5351 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5352 tcg_temp_free_i64(tmp64);
5353 } else {
5354 /* BUGFIX */
5355 neon_load_reg64(cpu_V0, rn);
5356 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5357 neon_load_reg64(cpu_V1, rm);
5358 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5359 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5360 }
5361 neon_store_reg64(cpu_V0, rd);
5362 if (q) {
5363 neon_store_reg64(cpu_V1, rd + 1);
5364 }
5365 } else if ((insn & (1 << 11)) == 0) {
5366 /* Two register misc. */
5367 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5368 size = (insn >> 18) & 3;
5369 switch (op) {
5370 case 0: /* VREV64 */
5371 if (size == 3)
5372 return 1;
5373 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5374 tmp = neon_load_reg(rm, pass * 2);
5375 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5376 switch (size) {
5377 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5378 case 1: gen_swap_half(tmp); break;
5379 case 2: /* no-op */ break;
5380 default: abort();
5381 }
5382 neon_store_reg(rd, pass * 2 + 1, tmp);
5383 if (size == 2) {
5384 neon_store_reg(rd, pass * 2, tmp2);
5385 } else {
5386 switch (size) {
5387 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5388 case 1: gen_swap_half(tmp2); break;
5389 default: abort();
5390 }
5391 neon_store_reg(rd, pass * 2, tmp2);
5392 }
5393 }
5394 break;
5395 case 4: case 5: /* VPADDL */
5396 case 12: case 13: /* VPADAL */
5397 if (size == 3)
5398 return 1;
5399 for (pass = 0; pass < q + 1; pass++) {
5400 tmp = neon_load_reg(rm, pass * 2);
5401 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5402 tmp = neon_load_reg(rm, pass * 2 + 1);
5403 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5404 switch (size) {
5405 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5406 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5407 case 2: tcg_gen_add_i64(CPU_V001); break;
5408 default: abort();
5409 }
5410 if (op >= 12) {
5411 /* Accumulate. */
5412 neon_load_reg64(cpu_V1, rd + pass);
5413 gen_neon_addl(size);
5414 }
5415 neon_store_reg64(cpu_V0, rd + pass);
5416 }
5417 break;
5418 case 33: /* VTRN */
5419 if (size == 2) {
5420 for (n = 0; n < (q ? 4 : 2); n += 2) {
5421 tmp = neon_load_reg(rm, n);
5422 tmp2 = neon_load_reg(rd, n + 1);
5423 neon_store_reg(rm, n, tmp2);
5424 neon_store_reg(rd, n + 1, tmp);
5425 }
5426 } else {
5427 goto elementwise;
5428 }
5429 break;
5430 case 34: /* VUZP */
5431 if (gen_neon_unzip(rd, rm, size, q)) {
5432 return 1;
5433 }
5434 break;
5435 case 35: /* VZIP */
5436 if (gen_neon_zip(rd, rm, size, q)) {
5437 return 1;
5438 }
5439 break;
5440 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5441 if (size == 3)
5442 return 1;
5443 TCGV_UNUSED(tmp2);
5444 for (pass = 0; pass < 2; pass++) {
5445 neon_load_reg64(cpu_V0, rm + pass);
5446 tmp = new_tmp();
5447 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5448 if (pass == 0) {
5449 tmp2 = tmp;
5450 } else {
5451 neon_store_reg(rd, 0, tmp2);
5452 neon_store_reg(rd, 1, tmp);
5453 }
5454 }
5455 break;
5456 case 38: /* VSHLL */
5457 if (q || size == 3)
5458 return 1;
5459 tmp = neon_load_reg(rm, 0);
5460 tmp2 = neon_load_reg(rm, 1);
5461 for (pass = 0; pass < 2; pass++) {
5462 if (pass == 1)
5463 tmp = tmp2;
5464 gen_neon_widen(cpu_V0, tmp, size, 1);
5465 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5466 neon_store_reg64(cpu_V0, rd + pass);
5467 }
5468 break;
5469 case 44: /* VCVT.F16.F32 */
5470 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5471 return 1;
5472 tmp = new_tmp();
5473 tmp2 = new_tmp();
5474 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5475 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5476 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5477 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5478 tcg_gen_shli_i32(tmp2, tmp2, 16);
5479 tcg_gen_or_i32(tmp2, tmp2, tmp);
5480 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5481 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5482 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5483 neon_store_reg(rd, 0, tmp2);
5484 tmp2 = new_tmp();
5485 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5486 tcg_gen_shli_i32(tmp2, tmp2, 16);
5487 tcg_gen_or_i32(tmp2, tmp2, tmp);
5488 neon_store_reg(rd, 1, tmp2);
5489 dead_tmp(tmp);
5490 break;
5491 case 46: /* VCVT.F32.F16 */
5492 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5493 return 1;
5494 tmp3 = new_tmp();
5495 tmp = neon_load_reg(rm, 0);
5496 tmp2 = neon_load_reg(rm, 1);
5497 tcg_gen_ext16u_i32(tmp3, tmp);
5498 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5499 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5500 tcg_gen_shri_i32(tmp3, tmp, 16);
5501 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5502 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5503 dead_tmp(tmp);
5504 tcg_gen_ext16u_i32(tmp3, tmp2);
5505 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5506 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5507 tcg_gen_shri_i32(tmp3, tmp2, 16);
5508 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5509 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5510 dead_tmp(tmp2);
5511 dead_tmp(tmp3);
5512 break;
5513 default:
5514 elementwise:
5515 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5516 if (op == 30 || op == 31 || op >= 58) {
5517 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5518 neon_reg_offset(rm, pass));
5519 TCGV_UNUSED(tmp);
5520 } else {
5521 tmp = neon_load_reg(rm, pass);
5522 }
5523 switch (op) {
5524 case 1: /* VREV32 */
5525 switch (size) {
5526 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5527 case 1: gen_swap_half(tmp); break;
5528 default: return 1;
5529 }
5530 break;
5531 case 2: /* VREV16 */
5532 if (size != 0)
5533 return 1;
5534 gen_rev16(tmp);
5535 break;
5536 case 8: /* CLS */
5537 switch (size) {
5538 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5539 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5540 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5541 default: return 1;
5542 }
5543 break;
5544 case 9: /* CLZ */
5545 switch (size) {
5546 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5547 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5548 case 2: gen_helper_clz(tmp, tmp); break;
5549 default: return 1;
5550 }
5551 break;
5552 case 10: /* CNT */
5553 if (size != 0)
5554 return 1;
5555 gen_helper_neon_cnt_u8(tmp, tmp);
5556 break;
5557 case 11: /* VNOT */
5558 if (size != 0)
5559 return 1;
5560 tcg_gen_not_i32(tmp, tmp);
5561 break;
5562 case 14: /* VQABS */
5563 switch (size) {
5564 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5565 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5566 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5567 default: return 1;
5568 }
5569 break;
5570 case 15: /* VQNEG */
5571 switch (size) {
5572 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5573 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5574 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5575 default: return 1;
5576 }
5577 break;
5578 case 16: case 19: /* VCGT #0, VCLE #0 */
5579 tmp2 = tcg_const_i32(0);
5580 switch(size) {
5581 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5582 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5583 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5584 default: return 1;
5585 }
5586 tcg_temp_free(tmp2);
5587 if (op == 19)
5588 tcg_gen_not_i32(tmp, tmp);
5589 break;
5590 case 17: case 20: /* VCGE #0, VCLT #0 */
5591 tmp2 = tcg_const_i32(0);
5592 switch(size) {
5593 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5594 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5595 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5596 default: return 1;
5597 }
5598 tcg_temp_free(tmp2);
5599 if (op == 20)
5600 tcg_gen_not_i32(tmp, tmp);
5601 break;
5602 case 18: /* VCEQ #0 */
5603 tmp2 = tcg_const_i32(0);
5604 switch(size) {
5605 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5606 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5607 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5608 default: return 1;
5609 }
5610 tcg_temp_free(tmp2);
5611 break;
5612 case 22: /* VABS */
5613 switch(size) {
5614 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5615 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5616 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5617 default: return 1;
5618 }
5619 break;
5620 case 23: /* VNEG */
5621 if (size == 3)
5622 return 1;
5623 tmp2 = tcg_const_i32(0);
5624 gen_neon_rsb(size, tmp, tmp2);
5625 tcg_temp_free(tmp2);
5626 break;
5627 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5628 tmp2 = tcg_const_i32(0);
5629 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5630 tcg_temp_free(tmp2);
5631 if (op == 27)
5632 tcg_gen_not_i32(tmp, tmp);
5633 break;
5634 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5635 tmp2 = tcg_const_i32(0);
5636 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5637 tcg_temp_free(tmp2);
5638 if (op == 28)
5639 tcg_gen_not_i32(tmp, tmp);
5640 break;
5641 case 26: /* Float VCEQ #0 */
5642 tmp2 = tcg_const_i32(0);
5643 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5644 tcg_temp_free(tmp2);
5645 break;
5646 case 30: /* Float VABS */
5647 gen_vfp_abs(0);
5648 break;
5649 case 31: /* Float VNEG */
5650 gen_vfp_neg(0);
5651 break;
5652 case 32: /* VSWP */
5653 tmp2 = neon_load_reg(rd, pass);
5654 neon_store_reg(rm, pass, tmp2);
5655 break;
5656 case 33: /* VTRN */
5657 tmp2 = neon_load_reg(rd, pass);
5658 switch (size) {
5659 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5660 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5661 case 2: abort();
5662 default: return 1;
5663 }
5664 neon_store_reg(rm, pass, tmp2);
5665 break;
5666 case 56: /* Integer VRECPE */
5667 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5668 break;
5669 case 57: /* Integer VRSQRTE */
5670 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5671 break;
5672 case 58: /* Float VRECPE */
5673 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5674 break;
5675 case 59: /* Float VRSQRTE */
5676 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5677 break;
5678 case 60: /* VCVT.F32.S32 */
5679 gen_vfp_sito(0);
5680 break;
5681 case 61: /* VCVT.F32.U32 */
5682 gen_vfp_uito(0);
5683 break;
5684 case 62: /* VCVT.S32.F32 */
5685 gen_vfp_tosiz(0);
5686 break;
5687 case 63: /* VCVT.U32.F32 */
5688 gen_vfp_touiz(0);
5689 break;
5690 default:
5691 /* Reserved: 21, 29, 39-56 */
5692 return 1;
5693 }
5694 if (op == 30 || op == 31 || op >= 58) {
5695 tcg_gen_st_f32(cpu_F0s, cpu_env,
5696 neon_reg_offset(rd, pass));
5697 } else {
5698 neon_store_reg(rd, pass, tmp);
5699 }
5700 }
5701 break;
5702 }
5703 } else if ((insn & (1 << 10)) == 0) {
5704 /* VTBL, VTBX. */
5705 n = ((insn >> 5) & 0x18) + 8;
5706 if (insn & (1 << 6)) {
5707 tmp = neon_load_reg(rd, 0);
5708 } else {
5709 tmp = new_tmp();
5710 tcg_gen_movi_i32(tmp, 0);
5711 }
5712 tmp2 = neon_load_reg(rm, 0);
5713 tmp4 = tcg_const_i32(rn);
5714 tmp5 = tcg_const_i32(n);
5715 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5716 dead_tmp(tmp);
5717 if (insn & (1 << 6)) {
5718 tmp = neon_load_reg(rd, 1);
5719 } else {
5720 tmp = new_tmp();
5721 tcg_gen_movi_i32(tmp, 0);
5722 }
5723 tmp3 = neon_load_reg(rm, 1);
5724 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5725 tcg_temp_free_i32(tmp5);
5726 tcg_temp_free_i32(tmp4);
5727 neon_store_reg(rd, 0, tmp2);
5728 neon_store_reg(rd, 1, tmp3);
5729 dead_tmp(tmp);
5730 } else if ((insn & 0x380) == 0) {
5731 /* VDUP */
5732 if (insn & (1 << 19)) {
5733 tmp = neon_load_reg(rm, 1);
5734 } else {
5735 tmp = neon_load_reg(rm, 0);
5736 }
5737 if (insn & (1 << 16)) {
5738 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5739 } else if (insn & (1 << 17)) {
5740 if ((insn >> 18) & 1)
5741 gen_neon_dup_high16(tmp);
5742 else
5743 gen_neon_dup_low16(tmp);
5744 }
5745 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5746 tmp2 = new_tmp();
5747 tcg_gen_mov_i32(tmp2, tmp);
5748 neon_store_reg(rd, pass, tmp2);
5749 }
5750 dead_tmp(tmp);
5751 } else {
5752 return 1;
5753 }
5754 }
5755 }
5756 return 0;
5757 }
5758
5759 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5760 {
5761 int crn = (insn >> 16) & 0xf;
5762 int crm = insn & 0xf;
5763 int op1 = (insn >> 21) & 7;
5764 int op2 = (insn >> 5) & 7;
5765 int rt = (insn >> 12) & 0xf;
5766 TCGv tmp;
5767
5768 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5769 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5770 /* TEECR */
5771 if (IS_USER(s))
5772 return 1;
5773 tmp = load_cpu_field(teecr);
5774 store_reg(s, rt, tmp);
5775 return 0;
5776 }
5777 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5778 /* TEEHBR */
5779 if (IS_USER(s) && (env->teecr & 1))
5780 return 1;
5781 tmp = load_cpu_field(teehbr);
5782 store_reg(s, rt, tmp);
5783 return 0;
5784 }
5785 }
5786 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5787 op1, crn, crm, op2);
5788 return 1;
5789 }
5790
5791 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5792 {
5793 int crn = (insn >> 16) & 0xf;
5794 int crm = insn & 0xf;
5795 int op1 = (insn >> 21) & 7;
5796 int op2 = (insn >> 5) & 7;
5797 int rt = (insn >> 12) & 0xf;
5798 TCGv tmp;
5799
5800 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5801 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5802 /* TEECR */
5803 if (IS_USER(s))
5804 return 1;
5805 tmp = load_reg(s, rt);
5806 gen_helper_set_teecr(cpu_env, tmp);
5807 dead_tmp(tmp);
5808 return 0;
5809 }
5810 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5811 /* TEEHBR */
5812 if (IS_USER(s) && (env->teecr & 1))
5813 return 1;
5814 tmp = load_reg(s, rt);
5815 store_cpu_field(tmp, teehbr);
5816 return 0;
5817 }
5818 }
5819 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5820 op1, crn, crm, op2);
5821 return 1;
5822 }
5823
5824 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5825 {
5826 int cpnum;
5827
5828 cpnum = (insn >> 8) & 0xf;
5829 if (arm_feature(env, ARM_FEATURE_XSCALE)
5830 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5831 return 1;
5832
5833 switch (cpnum) {
5834 case 0:
5835 case 1:
5836 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5837 return disas_iwmmxt_insn(env, s, insn);
5838 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5839 return disas_dsp_insn(env, s, insn);
5840 }
5841 return 1;
5842 case 10:
5843 case 11:
5844 return disas_vfp_insn (env, s, insn);
5845 case 14:
5846 /* Coprocessors 7-15 are architecturally reserved by ARM.
5847 Unfortunately Intel decided to ignore this. */
5848 if (arm_feature(env, ARM_FEATURE_XSCALE))
5849 goto board;
5850 if (insn & (1 << 20))
5851 return disas_cp14_read(env, s, insn);
5852 else
5853 return disas_cp14_write(env, s, insn);
5854 case 15:
5855 return disas_cp15_insn (env, s, insn);
5856 default:
5857 board:
5858 /* Unknown coprocessor. See if the board has hooked it. */
5859 return disas_cp_insn (env, s, insn);
5860 }
5861 }
5862
5863
5864 /* Store a 64-bit value to a register pair. Clobbers val. */
5865 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5866 {
5867 TCGv tmp;
5868 tmp = new_tmp();
5869 tcg_gen_trunc_i64_i32(tmp, val);
5870 store_reg(s, rlow, tmp);
5871 tmp = new_tmp();
5872 tcg_gen_shri_i64(val, val, 32);
5873 tcg_gen_trunc_i64_i32(tmp, val);
5874 store_reg(s, rhigh, tmp);
5875 }
5876
5877 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5878 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5879 {
5880 TCGv_i64 tmp;
5881 TCGv tmp2;
5882
5883 /* Load value and extend to 64 bits. */
5884 tmp = tcg_temp_new_i64();
5885 tmp2 = load_reg(s, rlow);
5886 tcg_gen_extu_i32_i64(tmp, tmp2);
5887 dead_tmp(tmp2);
5888 tcg_gen_add_i64(val, val, tmp);
5889 tcg_temp_free_i64(tmp);
5890 }
5891
5892 /* load and add a 64-bit value from a register pair. */
5893 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5894 {
5895 TCGv_i64 tmp;
5896 TCGv tmpl;
5897 TCGv tmph;
5898
5899 /* Load 64-bit value rd:rn. */
5900 tmpl = load_reg(s, rlow);
5901 tmph = load_reg(s, rhigh);
5902 tmp = tcg_temp_new_i64();
5903 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5904 dead_tmp(tmpl);
5905 dead_tmp(tmph);
5906 tcg_gen_add_i64(val, val, tmp);
5907 tcg_temp_free_i64(tmp);
5908 }
5909
5910 /* Set N and Z flags from a 64-bit value. */
5911 static void gen_logicq_cc(TCGv_i64 val)
5912 {
5913 TCGv tmp = new_tmp();
5914 gen_helper_logicq_cc(tmp, val);
5915 gen_logic_CC(tmp);
5916 dead_tmp(tmp);
5917 }
5918
5919 /* Load/Store exclusive instructions are implemented by remembering
5920 the value/address loaded, and seeing if these are the same
5921 when the store is performed. This should be is sufficient to implement
5922 the architecturally mandated semantics, and avoids having to monitor
5923 regular stores.
5924
5925 In system emulation mode only one CPU will be running at once, so
5926 this sequence is effectively atomic. In user emulation mode we
5927 throw an exception and handle the atomic operation elsewhere. */
5928 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5929 TCGv addr, int size)
5930 {
5931 TCGv tmp;
5932
5933 switch (size) {
5934 case 0:
5935 tmp = gen_ld8u(addr, IS_USER(s));
5936 break;
5937 case 1:
5938 tmp = gen_ld16u(addr, IS_USER(s));
5939 break;
5940 case 2:
5941 case 3:
5942 tmp = gen_ld32(addr, IS_USER(s));
5943 break;
5944 default:
5945 abort();
5946 }
5947 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5948 store_reg(s, rt, tmp);
5949 if (size == 3) {
5950 TCGv tmp2 = new_tmp();
5951 tcg_gen_addi_i32(tmp2, addr, 4);
5952 tmp = gen_ld32(tmp2, IS_USER(s));
5953 dead_tmp(tmp2);
5954 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5955 store_reg(s, rt2, tmp);
5956 }
5957 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5958 }
5959
5960 static void gen_clrex(DisasContext *s)
5961 {
5962 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5963 }
5964
5965 #ifdef CONFIG_USER_ONLY
5966 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5967 TCGv addr, int size)
5968 {
5969 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5970 tcg_gen_movi_i32(cpu_exclusive_info,
5971 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5972 gen_exception_insn(s, 4, EXCP_STREX);
5973 }
5974 #else
5975 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5976 TCGv addr, int size)
5977 {
5978 TCGv tmp;
5979 int done_label;
5980 int fail_label;
5981
5982 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5983 [addr] = {Rt};
5984 {Rd} = 0;
5985 } else {
5986 {Rd} = 1;
5987 } */
5988 fail_label = gen_new_label();
5989 done_label = gen_new_label();
5990 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5991 switch (size) {
5992 case 0:
5993 tmp = gen_ld8u(addr, IS_USER(s));
5994 break;
5995 case 1:
5996 tmp = gen_ld16u(addr, IS_USER(s));
5997 break;
5998 case 2:
5999 case 3:
6000 tmp = gen_ld32(addr, IS_USER(s));
6001 break;
6002 default:
6003 abort();
6004 }
6005 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6006 dead_tmp(tmp);
6007 if (size == 3) {
6008 TCGv tmp2 = new_tmp();
6009 tcg_gen_addi_i32(tmp2, addr, 4);
6010 tmp = gen_ld32(tmp2, IS_USER(s));
6011 dead_tmp(tmp2);
6012 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6013 dead_tmp(tmp);
6014 }
6015 tmp = load_reg(s, rt);
6016 switch (size) {
6017 case 0:
6018 gen_st8(tmp, addr, IS_USER(s));
6019 break;
6020 case 1:
6021 gen_st16(tmp, addr, IS_USER(s));
6022 break;
6023 case 2:
6024 case 3:
6025 gen_st32(tmp, addr, IS_USER(s));
6026 break;
6027 default:
6028 abort();
6029 }
6030 if (size == 3) {
6031 tcg_gen_addi_i32(addr, addr, 4);
6032 tmp = load_reg(s, rt2);
6033 gen_st32(tmp, addr, IS_USER(s));
6034 }
6035 tcg_gen_movi_i32(cpu_R[rd], 0);
6036 tcg_gen_br(done_label);
6037 gen_set_label(fail_label);
6038 tcg_gen_movi_i32(cpu_R[rd], 1);
6039 gen_set_label(done_label);
6040 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6041 }
6042 #endif
6043
6044 static void disas_arm_insn(CPUState * env, DisasContext *s)
6045 {
6046 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6047 TCGv tmp;
6048 TCGv tmp2;
6049 TCGv tmp3;
6050 TCGv addr;
6051 TCGv_i64 tmp64;
6052
6053 insn = ldl_code(s->pc);
6054 s->pc += 4;
6055
6056 /* M variants do not implement ARM mode. */
6057 if (IS_M(env))
6058 goto illegal_op;
6059 cond = insn >> 28;
6060 if (cond == 0xf){
6061 /* Unconditional instructions. */
6062 if (((insn >> 25) & 7) == 1) {
6063 /* NEON Data processing. */
6064 if (!arm_feature(env, ARM_FEATURE_NEON))
6065 goto illegal_op;
6066
6067 if (disas_neon_data_insn(env, s, insn))
6068 goto illegal_op;
6069 return;
6070 }
6071 if ((insn & 0x0f100000) == 0x04000000) {
6072 /* NEON load/store. */
6073 if (!arm_feature(env, ARM_FEATURE_NEON))
6074 goto illegal_op;
6075
6076 if (disas_neon_ls_insn(env, s, insn))
6077 goto illegal_op;
6078 return;
6079 }
6080 if (((insn & 0x0f30f000) == 0x0510f000) ||
6081 ((insn & 0x0f30f010) == 0x0710f000)) {
6082 if ((insn & (1 << 22)) == 0) {
6083 /* PLDW; v7MP */
6084 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6085 goto illegal_op;
6086 }
6087 }
6088 /* Otherwise PLD; v5TE+ */
6089 return;
6090 }
6091 if (((insn & 0x0f70f000) == 0x0450f000) ||
6092 ((insn & 0x0f70f010) == 0x0650f000)) {
6093 ARCH(7);
6094 return; /* PLI; V7 */
6095 }
6096 if (((insn & 0x0f700000) == 0x04100000) ||
6097 ((insn & 0x0f700010) == 0x06100000)) {
6098 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6099 goto illegal_op;
6100 }
6101 return; /* v7MP: Unallocated memory hint: must NOP */
6102 }
6103
6104 if ((insn & 0x0ffffdff) == 0x01010000) {
6105 ARCH(6);
6106 /* setend */
6107 if (insn & (1 << 9)) {
6108 /* BE8 mode not implemented. */
6109 goto illegal_op;
6110 }
6111 return;
6112 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6113 switch ((insn >> 4) & 0xf) {
6114 case 1: /* clrex */
6115 ARCH(6K);
6116 gen_clrex(s);
6117 return;
6118 case 4: /* dsb */
6119 case 5: /* dmb */
6120 case 6: /* isb */
6121 ARCH(7);
6122 /* We don't emulate caches so these are a no-op. */
6123 return;
6124 default:
6125 goto illegal_op;
6126 }
6127 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6128 /* srs */
6129 int32_t offset;
6130 if (IS_USER(s))
6131 goto illegal_op;
6132 ARCH(6);
6133 op1 = (insn & 0x1f);
6134 addr = new_tmp();
6135 tmp = tcg_const_i32(op1);
6136 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6137 tcg_temp_free_i32(tmp);
6138 i = (insn >> 23) & 3;
6139 switch (i) {
6140 case 0: offset = -4; break; /* DA */
6141 case 1: offset = 0; break; /* IA */
6142 case 2: offset = -8; break; /* DB */
6143 case 3: offset = 4; break; /* IB */
6144 default: abort();
6145 }
6146 if (offset)
6147 tcg_gen_addi_i32(addr, addr, offset);
6148 tmp = load_reg(s, 14);
6149 gen_st32(tmp, addr, 0);
6150 tmp = load_cpu_field(spsr);
6151 tcg_gen_addi_i32(addr, addr, 4);
6152 gen_st32(tmp, addr, 0);
6153 if (insn & (1 << 21)) {
6154 /* Base writeback. */
6155 switch (i) {
6156 case 0: offset = -8; break;
6157 case 1: offset = 4; break;
6158 case 2: offset = -4; break;
6159 case 3: offset = 0; break;
6160 default: abort();
6161 }
6162 if (offset)
6163 tcg_gen_addi_i32(addr, addr, offset);
6164 tmp = tcg_const_i32(op1);
6165 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6166 tcg_temp_free_i32(tmp);
6167 dead_tmp(addr);
6168 } else {
6169 dead_tmp(addr);
6170 }
6171 return;
6172 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6173 /* rfe */
6174 int32_t offset;
6175 if (IS_USER(s))
6176 goto illegal_op;
6177 ARCH(6);
6178 rn = (insn >> 16) & 0xf;
6179 addr = load_reg(s, rn);
6180 i = (insn >> 23) & 3;
6181 switch (i) {
6182 case 0: offset = -4; break; /* DA */
6183 case 1: offset = 0; break; /* IA */
6184 case 2: offset = -8; break; /* DB */
6185 case 3: offset = 4; break; /* IB */
6186 default: abort();
6187 }
6188 if (offset)
6189 tcg_gen_addi_i32(addr, addr, offset);
6190 /* Load PC into tmp and CPSR into tmp2. */
6191 tmp = gen_ld32(addr, 0);
6192 tcg_gen_addi_i32(addr, addr, 4);
6193 tmp2 = gen_ld32(addr, 0);
6194 if (insn & (1 << 21)) {
6195 /* Base writeback. */
6196 switch (i) {
6197 case 0: offset = -8; break;
6198 case 1: offset = 4; break;
6199 case 2: offset = -4; break;
6200 case 3: offset = 0; break;
6201 default: abort();
6202 }
6203 if (offset)
6204 tcg_gen_addi_i32(addr, addr, offset);
6205 store_reg(s, rn, addr);
6206 } else {
6207 dead_tmp(addr);
6208 }
6209 gen_rfe(s, tmp, tmp2);
6210 return;
6211 } else if ((insn & 0x0e000000) == 0x0a000000) {
6212 /* branch link and change to thumb (blx <offset>) */
6213 int32_t offset;
6214
6215 val = (uint32_t)s->pc;
6216 tmp = new_tmp();
6217 tcg_gen_movi_i32(tmp, val);
6218 store_reg(s, 14, tmp);
6219 /* Sign-extend the 24-bit offset */
6220 offset = (((int32_t)insn) << 8) >> 8;
6221 /* offset * 4 + bit24 * 2 + (thumb bit) */
6222 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6223 /* pipeline offset */
6224 val += 4;
6225 gen_bx_im(s, val);
6226 return;
6227 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6228 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6229 /* iWMMXt register transfer. */
6230 if (env->cp15.c15_cpar & (1 << 1))
6231 if (!disas_iwmmxt_insn(env, s, insn))
6232 return;
6233 }
6234 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6235 /* Coprocessor double register transfer. */
6236 } else if ((insn & 0x0f000010) == 0x0e000010) {
6237 /* Additional coprocessor register transfer. */
6238 } else if ((insn & 0x0ff10020) == 0x01000000) {
6239 uint32_t mask;
6240 uint32_t val;
6241 /* cps (privileged) */
6242 if (IS_USER(s))
6243 return;
6244 mask = val = 0;
6245 if (insn & (1 << 19)) {
6246 if (insn & (1 << 8))
6247 mask |= CPSR_A;
6248 if (insn & (1 << 7))
6249 mask |= CPSR_I;
6250 if (insn & (1 << 6))
6251 mask |= CPSR_F;
6252 if (insn & (1 << 18))
6253 val |= mask;
6254 }
6255 if (insn & (1 << 17)) {
6256 mask |= CPSR_M;
6257 val |= (insn & 0x1f);
6258 }
6259 if (mask) {
6260 gen_set_psr_im(s, mask, 0, val);
6261 }
6262 return;
6263 }
6264 goto illegal_op;
6265 }
6266 if (cond != 0xe) {
6267 /* if not always execute, we generate a conditional jump to
6268 next instruction */
6269 s->condlabel = gen_new_label();
6270 gen_test_cc(cond ^ 1, s->condlabel);
6271 s->condjmp = 1;
6272 }
6273 if ((insn & 0x0f900000) == 0x03000000) {
6274 if ((insn & (1 << 21)) == 0) {
6275 ARCH(6T2);
6276 rd = (insn >> 12) & 0xf;
6277 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6278 if ((insn & (1 << 22)) == 0) {
6279 /* MOVW */
6280 tmp = new_tmp();
6281 tcg_gen_movi_i32(tmp, val);
6282 } else {
6283 /* MOVT */
6284 tmp = load_reg(s, rd);
6285 tcg_gen_ext16u_i32(tmp, tmp);
6286 tcg_gen_ori_i32(tmp, tmp, val << 16);
6287 }
6288 store_reg(s, rd, tmp);
6289 } else {
6290 if (((insn >> 12) & 0xf) != 0xf)
6291 goto illegal_op;
6292 if (((insn >> 16) & 0xf) == 0) {
6293 gen_nop_hint(s, insn & 0xff);
6294 } else {
6295 /* CPSR = immediate */
6296 val = insn & 0xff;
6297 shift = ((insn >> 8) & 0xf) * 2;
6298 if (shift)
6299 val = (val >> shift) | (val << (32 - shift));
6300 i = ((insn & (1 << 22)) != 0);
6301 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6302 goto illegal_op;
6303 }
6304 }
6305 } else if ((insn & 0x0f900000) == 0x01000000
6306 && (insn & 0x00000090) != 0x00000090) {
6307 /* miscellaneous instructions */
6308 op1 = (insn >> 21) & 3;
6309 sh = (insn >> 4) & 0xf;
6310 rm = insn & 0xf;
6311 switch (sh) {
6312 case 0x0: /* move program status register */
6313 if (op1 & 1) {
6314 /* PSR = reg */
6315 tmp = load_reg(s, rm);
6316 i = ((op1 & 2) != 0);
6317 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6318 goto illegal_op;
6319 } else {
6320 /* reg = PSR */
6321 rd = (insn >> 12) & 0xf;
6322 if (op1 & 2) {
6323 if (IS_USER(s))
6324 goto illegal_op;
6325 tmp = load_cpu_field(spsr);
6326 } else {
6327 tmp = new_tmp();
6328 gen_helper_cpsr_read(tmp);
6329 }
6330 store_reg(s, rd, tmp);
6331 }
6332 break;
6333 case 0x1:
6334 if (op1 == 1) {
6335 /* branch/exchange thumb (bx). */
6336 tmp = load_reg(s, rm);
6337 gen_bx(s, tmp);
6338 } else if (op1 == 3) {
6339 /* clz */
6340 rd = (insn >> 12) & 0xf;
6341 tmp = load_reg(s, rm);
6342 gen_helper_clz(tmp, tmp);
6343 store_reg(s, rd, tmp);
6344 } else {
6345 goto illegal_op;
6346 }
6347 break;
6348 case 0x2:
6349 if (op1 == 1) {
6350 ARCH(5J); /* bxj */
6351 /* Trivial implementation equivalent to bx. */
6352 tmp = load_reg(s, rm);
6353 gen_bx(s, tmp);
6354 } else {
6355 goto illegal_op;
6356 }
6357 break;
6358 case 0x3:
6359 if (op1 != 1)
6360 goto illegal_op;
6361
6362 /* branch link/exchange thumb (blx) */
6363 tmp = load_reg(s, rm);
6364 tmp2 = new_tmp();
6365 tcg_gen_movi_i32(tmp2, s->pc);
6366 store_reg(s, 14, tmp2);
6367 gen_bx(s, tmp);
6368 break;
6369 case 0x5: /* saturating add/subtract */
6370 rd = (insn >> 12) & 0xf;
6371 rn = (insn >> 16) & 0xf;
6372 tmp = load_reg(s, rm);
6373 tmp2 = load_reg(s, rn);
6374 if (op1 & 2)
6375 gen_helper_double_saturate(tmp2, tmp2);
6376 if (op1 & 1)
6377 gen_helper_sub_saturate(tmp, tmp, tmp2);
6378 else
6379 gen_helper_add_saturate(tmp, tmp, tmp2);
6380 dead_tmp(tmp2);
6381 store_reg(s, rd, tmp);
6382 break;
6383 case 7:
6384 /* SMC instruction (op1 == 3)
6385 and undefined instructions (op1 == 0 || op1 == 2)
6386 will trap */
6387 if (op1 != 1) {
6388 goto illegal_op;
6389 }
6390 /* bkpt */
6391 gen_exception_insn(s, 4, EXCP_BKPT);
6392 break;
6393 case 0x8: /* signed multiply */
6394 case 0xa:
6395 case 0xc:
6396 case 0xe:
6397 rs = (insn >> 8) & 0xf;
6398 rn = (insn >> 12) & 0xf;
6399 rd = (insn >> 16) & 0xf;
6400 if (op1 == 1) {
6401 /* (32 * 16) >> 16 */
6402 tmp = load_reg(s, rm);
6403 tmp2 = load_reg(s, rs);
6404 if (sh & 4)
6405 tcg_gen_sari_i32(tmp2, tmp2, 16);
6406 else
6407 gen_sxth(tmp2);
6408 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6409 tcg_gen_shri_i64(tmp64, tmp64, 16);
6410 tmp = new_tmp();
6411 tcg_gen_trunc_i64_i32(tmp, tmp64);
6412 tcg_temp_free_i64(tmp64);
6413 if ((sh & 2) == 0) {
6414 tmp2 = load_reg(s, rn);
6415 gen_helper_add_setq(tmp, tmp, tmp2);
6416 dead_tmp(tmp2);
6417 }
6418 store_reg(s, rd, tmp);
6419 } else {
6420 /* 16 * 16 */
6421 tmp = load_reg(s, rm);
6422 tmp2 = load_reg(s, rs);
6423 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6424 dead_tmp(tmp2);
6425 if (op1 == 2) {
6426 tmp64 = tcg_temp_new_i64();
6427 tcg_gen_ext_i32_i64(tmp64, tmp);
6428 dead_tmp(tmp);
6429 gen_addq(s, tmp64, rn, rd);
6430 gen_storeq_reg(s, rn, rd, tmp64);
6431 tcg_temp_free_i64(tmp64);
6432 } else {
6433 if (op1 == 0) {
6434 tmp2 = load_reg(s, rn);
6435 gen_helper_add_setq(tmp, tmp, tmp2);
6436 dead_tmp(tmp2);
6437 }
6438 store_reg(s, rd, tmp);
6439 }
6440 }
6441 break;
6442 default:
6443 goto illegal_op;
6444 }
6445 } else if (((insn & 0x0e000000) == 0 &&
6446 (insn & 0x00000090) != 0x90) ||
6447 ((insn & 0x0e000000) == (1 << 25))) {
6448 int set_cc, logic_cc, shiftop;
6449
6450 op1 = (insn >> 21) & 0xf;
6451 set_cc = (insn >> 20) & 1;
6452 logic_cc = table_logic_cc[op1] & set_cc;
6453
6454 /* data processing instruction */
6455 if (insn & (1 << 25)) {
6456 /* immediate operand */
6457 val = insn & 0xff;
6458 shift = ((insn >> 8) & 0xf) * 2;
6459 if (shift) {
6460 val = (val >> shift) | (val << (32 - shift));
6461 }
6462 tmp2 = new_tmp();
6463 tcg_gen_movi_i32(tmp2, val);
6464 if (logic_cc && shift) {
6465 gen_set_CF_bit31(tmp2);
6466 }
6467 } else {
6468 /* register */
6469 rm = (insn) & 0xf;
6470 tmp2 = load_reg(s, rm);
6471 shiftop = (insn >> 5) & 3;
6472 if (!(insn & (1 << 4))) {
6473 shift = (insn >> 7) & 0x1f;
6474 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6475 } else {
6476 rs = (insn >> 8) & 0xf;
6477 tmp = load_reg(s, rs);
6478 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6479 }
6480 }
6481 if (op1 != 0x0f && op1 != 0x0d) {
6482 rn = (insn >> 16) & 0xf;
6483 tmp = load_reg(s, rn);
6484 } else {
6485 TCGV_UNUSED(tmp);
6486 }
6487 rd = (insn >> 12) & 0xf;
6488 switch(op1) {
6489 case 0x00:
6490 tcg_gen_and_i32(tmp, tmp, tmp2);
6491 if (logic_cc) {
6492 gen_logic_CC(tmp);
6493 }
6494 store_reg_bx(env, s, rd, tmp);
6495 break;
6496 case 0x01:
6497 tcg_gen_xor_i32(tmp, tmp, tmp2);
6498 if (logic_cc) {
6499 gen_logic_CC(tmp);
6500 }
6501 store_reg_bx(env, s, rd, tmp);
6502 break;
6503 case 0x02:
6504 if (set_cc && rd == 15) {
6505 /* SUBS r15, ... is used for exception return. */
6506 if (IS_USER(s)) {
6507 goto illegal_op;
6508 }
6509 gen_helper_sub_cc(tmp, tmp, tmp2);
6510 gen_exception_return(s, tmp);
6511 } else {
6512 if (set_cc) {
6513 gen_helper_sub_cc(tmp, tmp, tmp2);
6514 } else {
6515 tcg_gen_sub_i32(tmp, tmp, tmp2);
6516 }
6517 store_reg_bx(env, s, rd, tmp);
6518 }
6519 break;
6520 case 0x03:
6521 if (set_cc) {
6522 gen_helper_sub_cc(tmp, tmp2, tmp);
6523 } else {
6524 tcg_gen_sub_i32(tmp, tmp2, tmp);
6525 }
6526 store_reg_bx(env, s, rd, tmp);
6527 break;
6528 case 0x04:
6529 if (set_cc) {
6530 gen_helper_add_cc(tmp, tmp, tmp2);
6531 } else {
6532 tcg_gen_add_i32(tmp, tmp, tmp2);
6533 }
6534 store_reg_bx(env, s, rd, tmp);
6535 break;
6536 case 0x05:
6537 if (set_cc) {
6538 gen_helper_adc_cc(tmp, tmp, tmp2);
6539 } else {
6540 gen_add_carry(tmp, tmp, tmp2);
6541 }
6542 store_reg_bx(env, s, rd, tmp);
6543 break;
6544 case 0x06:
6545 if (set_cc) {
6546 gen_helper_sbc_cc(tmp, tmp, tmp2);
6547 } else {
6548 gen_sub_carry(tmp, tmp, tmp2);
6549 }
6550 store_reg_bx(env, s, rd, tmp);
6551 break;
6552 case 0x07:
6553 if (set_cc) {
6554 gen_helper_sbc_cc(tmp, tmp2, tmp);
6555 } else {
6556 gen_sub_carry(tmp, tmp2, tmp);
6557 }
6558 store_reg_bx(env, s, rd, tmp);
6559 break;
6560 case 0x08:
6561 if (set_cc) {
6562 tcg_gen_and_i32(tmp, tmp, tmp2);
6563 gen_logic_CC(tmp);
6564 }
6565 dead_tmp(tmp);
6566 break;
6567 case 0x09:
6568 if (set_cc) {
6569 tcg_gen_xor_i32(tmp, tmp, tmp2);
6570 gen_logic_CC(tmp);
6571 }
6572 dead_tmp(tmp);
6573 break;
6574 case 0x0a:
6575 if (set_cc) {
6576 gen_helper_sub_cc(tmp, tmp, tmp2);
6577 }
6578 dead_tmp(tmp);
6579 break;
6580 case 0x0b:
6581 if (set_cc) {
6582 gen_helper_add_cc(tmp, tmp, tmp2);
6583 }
6584 dead_tmp(tmp);
6585 break;
6586 case 0x0c:
6587 tcg_gen_or_i32(tmp, tmp, tmp2);
6588 if (logic_cc) {
6589 gen_logic_CC(tmp);
6590 }
6591 store_reg_bx(env, s, rd, tmp);
6592 break;
6593 case 0x0d:
6594 if (logic_cc && rd == 15) {
6595 /* MOVS r15, ... is used for exception return. */
6596 if (IS_USER(s)) {
6597 goto illegal_op;
6598 }
6599 gen_exception_return(s, tmp2);
6600 } else {
6601 if (logic_cc) {
6602 gen_logic_CC(tmp2);
6603 }
6604 store_reg_bx(env, s, rd, tmp2);
6605 }
6606 break;
6607 case 0x0e:
6608 tcg_gen_andc_i32(tmp, tmp, tmp2);
6609 if (logic_cc) {
6610 gen_logic_CC(tmp);
6611 }
6612 store_reg_bx(env, s, rd, tmp);
6613 break;
6614 default:
6615 case 0x0f:
6616 tcg_gen_not_i32(tmp2, tmp2);
6617 if (logic_cc) {
6618 gen_logic_CC(tmp2);
6619 }
6620 store_reg_bx(env, s, rd, tmp2);
6621 break;
6622 }
6623 if (op1 != 0x0f && op1 != 0x0d) {
6624 dead_tmp(tmp2);
6625 }
6626 } else {
6627 /* other instructions */
6628 op1 = (insn >> 24) & 0xf;
6629 switch(op1) {
6630 case 0x0:
6631 case 0x1:
6632 /* multiplies, extra load/stores */
6633 sh = (insn >> 5) & 3;
6634 if (sh == 0) {
6635 if (op1 == 0x0) {
6636 rd = (insn >> 16) & 0xf;
6637 rn = (insn >> 12) & 0xf;
6638 rs = (insn >> 8) & 0xf;
6639 rm = (insn) & 0xf;
6640 op1 = (insn >> 20) & 0xf;
6641 switch (op1) {
6642 case 0: case 1: case 2: case 3: case 6:
6643 /* 32 bit mul */
6644 tmp = load_reg(s, rs);
6645 tmp2 = load_reg(s, rm);
6646 tcg_gen_mul_i32(tmp, tmp, tmp2);
6647 dead_tmp(tmp2);
6648 if (insn & (1 << 22)) {
6649 /* Subtract (mls) */
6650 ARCH(6T2);
6651 tmp2 = load_reg(s, rn);
6652 tcg_gen_sub_i32(tmp, tmp2, tmp);
6653 dead_tmp(tmp2);
6654 } else if (insn & (1 << 21)) {
6655 /* Add */
6656 tmp2 = load_reg(s, rn);
6657 tcg_gen_add_i32(tmp, tmp, tmp2);
6658 dead_tmp(tmp2);
6659 }
6660 if (insn & (1 << 20))
6661 gen_logic_CC(tmp);
6662 store_reg(s, rd, tmp);
6663 break;
6664 case 4:
6665 /* 64 bit mul double accumulate (UMAAL) */
6666 ARCH(6);
6667 tmp = load_reg(s, rs);
6668 tmp2 = load_reg(s, rm);
6669 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6670 gen_addq_lo(s, tmp64, rn);
6671 gen_addq_lo(s, tmp64, rd);
6672 gen_storeq_reg(s, rn, rd, tmp64);
6673 tcg_temp_free_i64(tmp64);
6674 break;
6675 case 8: case 9: case 10: case 11:
6676 case 12: case 13: case 14: case 15:
6677 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6678 tmp = load_reg(s, rs);
6679 tmp2 = load_reg(s, rm);
6680 if (insn & (1 << 22)) {
6681 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6682 } else {
6683 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6684 }
6685 if (insn & (1 << 21)) { /* mult accumulate */
6686 gen_addq(s, tmp64, rn, rd);
6687 }
6688 if (insn & (1 << 20)) {
6689 gen_logicq_cc(tmp64);
6690 }
6691 gen_storeq_reg(s, rn, rd, tmp64);
6692 tcg_temp_free_i64(tmp64);
6693 break;
6694 default:
6695 goto illegal_op;
6696 }
6697 } else {
6698 rn = (insn >> 16) & 0xf;
6699 rd = (insn >> 12) & 0xf;
6700 if (insn & (1 << 23)) {
6701 /* load/store exclusive */
6702 op1 = (insn >> 21) & 0x3;
6703 if (op1)
6704 ARCH(6K);
6705 else
6706 ARCH(6);
6707 addr = tcg_temp_local_new_i32();
6708 load_reg_var(s, addr, rn);
6709 if (insn & (1 << 20)) {
6710 switch (op1) {
6711 case 0: /* ldrex */
6712 gen_load_exclusive(s, rd, 15, addr, 2);
6713 break;
6714 case 1: /* ldrexd */
6715 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6716 break;
6717 case 2: /* ldrexb */
6718 gen_load_exclusive(s, rd, 15, addr, 0);
6719 break;
6720 case 3: /* ldrexh */
6721 gen_load_exclusive(s, rd, 15, addr, 1);
6722 break;
6723 default:
6724 abort();
6725 }
6726 } else {
6727 rm = insn & 0xf;
6728 switch (op1) {
6729 case 0: /* strex */
6730 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6731 break;
6732 case 1: /* strexd */
6733 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6734 break;
6735 case 2: /* strexb */
6736 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6737 break;
6738 case 3: /* strexh */
6739 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6740 break;
6741 default:
6742 abort();
6743 }
6744 }
6745 tcg_temp_free(addr);
6746 } else {
6747 /* SWP instruction */
6748 rm = (insn) & 0xf;
6749
6750 /* ??? This is not really atomic. However we know
6751 we never have multiple CPUs running in parallel,
6752 so it is good enough. */
6753 addr = load_reg(s, rn);
6754 tmp = load_reg(s, rm);
6755 if (insn & (1 << 22)) {
6756 tmp2 = gen_ld8u(addr, IS_USER(s));
6757 gen_st8(tmp, addr, IS_USER(s));
6758 } else {
6759 tmp2 = gen_ld32(addr, IS_USER(s));
6760 gen_st32(tmp, addr, IS_USER(s));
6761 }
6762 dead_tmp(addr);
6763 store_reg(s, rd, tmp2);
6764 }
6765 }
6766 } else {
6767 int address_offset;
6768 int load;
6769 /* Misc load/store */
6770 rn = (insn >> 16) & 0xf;
6771 rd = (insn >> 12) & 0xf;
6772 addr = load_reg(s, rn);
6773 if (insn & (1 << 24))
6774 gen_add_datah_offset(s, insn, 0, addr);
6775 address_offset = 0;
6776 if (insn & (1 << 20)) {
6777 /* load */
6778 switch(sh) {
6779 case 1:
6780 tmp = gen_ld16u(addr, IS_USER(s));
6781 break;
6782 case 2:
6783 tmp = gen_ld8s(addr, IS_USER(s));
6784 break;
6785 default:
6786 case 3:
6787 tmp = gen_ld16s(addr, IS_USER(s));
6788 break;
6789 }
6790 load = 1;
6791 } else if (sh & 2) {
6792 /* doubleword */
6793 if (sh & 1) {
6794 /* store */
6795 tmp = load_reg(s, rd);
6796 gen_st32(tmp, addr, IS_USER(s));
6797 tcg_gen_addi_i32(addr, addr, 4);
6798 tmp = load_reg(s, rd + 1);
6799 gen_st32(tmp, addr, IS_USER(s));
6800 load = 0;
6801 } else {
6802 /* load */
6803 tmp = gen_ld32(addr, IS_USER(s));
6804 store_reg(s, rd, tmp);
6805 tcg_gen_addi_i32(addr, addr, 4);
6806 tmp = gen_ld32(addr, IS_USER(s));
6807 rd++;
6808 load = 1;
6809 }
6810 address_offset = -4;
6811 } else {
6812 /* store */
6813 tmp = load_reg(s, rd);
6814 gen_st16(tmp, addr, IS_USER(s));
6815 load = 0;
6816 }
6817 /* Perform base writeback before the loaded value to
6818 ensure correct behavior with overlapping index registers.
6819 ldrd with base writeback is is undefined if the
6820 destination and index registers overlap. */
6821 if (!(insn & (1 << 24))) {
6822 gen_add_datah_offset(s, insn, address_offset, addr);
6823 store_reg(s, rn, addr);
6824 } else if (insn & (1 << 21)) {
6825 if (address_offset)
6826 tcg_gen_addi_i32(addr, addr, address_offset);
6827 store_reg(s, rn, addr);
6828 } else {
6829 dead_tmp(addr);
6830 }
6831 if (load) {
6832 /* Complete the load. */
6833 store_reg(s, rd, tmp);
6834 }
6835 }
6836 break;
6837 case 0x4:
6838 case 0x5:
6839 goto do_ldst;
6840 case 0x6:
6841 case 0x7:
6842 if (insn & (1 << 4)) {
6843 ARCH(6);
6844 /* Armv6 Media instructions. */
6845 rm = insn & 0xf;
6846 rn = (insn >> 16) & 0xf;
6847 rd = (insn >> 12) & 0xf;
6848 rs = (insn >> 8) & 0xf;
6849 switch ((insn >> 23) & 3) {
6850 case 0: /* Parallel add/subtract. */
6851 op1 = (insn >> 20) & 7;
6852 tmp = load_reg(s, rn);
6853 tmp2 = load_reg(s, rm);
6854 sh = (insn >> 5) & 7;
6855 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6856 goto illegal_op;
6857 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6858 dead_tmp(tmp2);
6859 store_reg(s, rd, tmp);
6860 break;
6861 case 1:
6862 if ((insn & 0x00700020) == 0) {
6863 /* Halfword pack. */
6864 tmp = load_reg(s, rn);
6865 tmp2 = load_reg(s, rm);
6866 shift = (insn >> 7) & 0x1f;
6867 if (insn & (1 << 6)) {
6868 /* pkhtb */
6869 if (shift == 0)
6870 shift = 31;
6871 tcg_gen_sari_i32(tmp2, tmp2, shift);
6872 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6873 tcg_gen_ext16u_i32(tmp2, tmp2);
6874 } else {
6875 /* pkhbt */
6876 if (shift)
6877 tcg_gen_shli_i32(tmp2, tmp2, shift);
6878 tcg_gen_ext16u_i32(tmp, tmp);
6879 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6880 }
6881 tcg_gen_or_i32(tmp, tmp, tmp2);
6882 dead_tmp(tmp2);
6883 store_reg(s, rd, tmp);
6884 } else if ((insn & 0x00200020) == 0x00200000) {
6885 /* [us]sat */
6886 tmp = load_reg(s, rm);
6887 shift = (insn >> 7) & 0x1f;
6888 if (insn & (1 << 6)) {
6889 if (shift == 0)
6890 shift = 31;
6891 tcg_gen_sari_i32(tmp, tmp, shift);
6892 } else {
6893 tcg_gen_shli_i32(tmp, tmp, shift);
6894 }
6895 sh = (insn >> 16) & 0x1f;
6896 tmp2 = tcg_const_i32(sh);
6897 if (insn & (1 << 22))
6898 gen_helper_usat(tmp, tmp, tmp2);
6899 else
6900 gen_helper_ssat(tmp, tmp, tmp2);
6901 tcg_temp_free_i32(tmp2);
6902 store_reg(s, rd, tmp);
6903 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6904 /* [us]sat16 */
6905 tmp = load_reg(s, rm);
6906 sh = (insn >> 16) & 0x1f;
6907 tmp2 = tcg_const_i32(sh);
6908 if (insn & (1 << 22))
6909 gen_helper_usat16(tmp, tmp, tmp2);
6910 else
6911 gen_helper_ssat16(tmp, tmp, tmp2);
6912 tcg_temp_free_i32(tmp2);
6913 store_reg(s, rd, tmp);
6914 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6915 /* Select bytes. */
6916 tmp = load_reg(s, rn);
6917 tmp2 = load_reg(s, rm);
6918 tmp3 = new_tmp();
6919 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6920 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6921 dead_tmp(tmp3);
6922 dead_tmp(tmp2);
6923 store_reg(s, rd, tmp);
6924 } else if ((insn & 0x000003e0) == 0x00000060) {
6925 tmp = load_reg(s, rm);
6926 shift = (insn >> 10) & 3;
6927 /* ??? In many cases it's not neccessary to do a
6928 rotate, a shift is sufficient. */
6929 if (shift != 0)
6930 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6931 op1 = (insn >> 20) & 7;
6932 switch (op1) {
6933 case 0: gen_sxtb16(tmp); break;
6934 case 2: gen_sxtb(tmp); break;
6935 case 3: gen_sxth(tmp); break;
6936 case 4: gen_uxtb16(tmp); break;
6937 case 6: gen_uxtb(tmp); break;
6938 case 7: gen_uxth(tmp); break;
6939 default: goto illegal_op;
6940 }
6941 if (rn != 15) {
6942 tmp2 = load_reg(s, rn);
6943 if ((op1 & 3) == 0) {
6944 gen_add16(tmp, tmp2);
6945 } else {
6946 tcg_gen_add_i32(tmp, tmp, tmp2);
6947 dead_tmp(tmp2);
6948 }
6949 }
6950 store_reg(s, rd, tmp);
6951 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6952 /* rev */
6953 tmp = load_reg(s, rm);
6954 if (insn & (1 << 22)) {
6955 if (insn & (1 << 7)) {
6956 gen_revsh(tmp);
6957 } else {
6958 ARCH(6T2);
6959 gen_helper_rbit(tmp, tmp);
6960 }
6961 } else {
6962 if (insn & (1 << 7))
6963 gen_rev16(tmp);
6964 else
6965 tcg_gen_bswap32_i32(tmp, tmp);
6966 }
6967 store_reg(s, rd, tmp);
6968 } else {
6969 goto illegal_op;
6970 }
6971 break;
6972 case 2: /* Multiplies (Type 3). */
6973 tmp = load_reg(s, rm);
6974 tmp2 = load_reg(s, rs);
6975 if (insn & (1 << 20)) {
6976 /* Signed multiply most significant [accumulate].
6977 (SMMUL, SMMLA, SMMLS) */
6978 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6979
6980 if (rd != 15) {
6981 tmp = load_reg(s, rd);
6982 if (insn & (1 << 6)) {
6983 tmp64 = gen_subq_msw(tmp64, tmp);
6984 } else {
6985 tmp64 = gen_addq_msw(tmp64, tmp);
6986 }
6987 }
6988 if (insn & (1 << 5)) {
6989 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6990 }
6991 tcg_gen_shri_i64(tmp64, tmp64, 32);
6992 tmp = new_tmp();
6993 tcg_gen_trunc_i64_i32(tmp, tmp64);
6994 tcg_temp_free_i64(tmp64);
6995 store_reg(s, rn, tmp);
6996 } else {
6997 if (insn & (1 << 5))
6998 gen_swap_half(tmp2);
6999 gen_smul_dual(tmp, tmp2);
7000 /* This addition cannot overflow. */
7001 if (insn & (1 << 6)) {
7002 tcg_gen_sub_i32(tmp, tmp, tmp2);
7003 } else {
7004 tcg_gen_add_i32(tmp, tmp, tmp2);
7005 }
7006 dead_tmp(tmp2);
7007 if (insn & (1 << 22)) {
7008 /* smlald, smlsld */
7009 tmp64 = tcg_temp_new_i64();
7010 tcg_gen_ext_i32_i64(tmp64, tmp);
7011 dead_tmp(tmp);
7012 gen_addq(s, tmp64, rd, rn);
7013 gen_storeq_reg(s, rd, rn, tmp64);
7014 tcg_temp_free_i64(tmp64);
7015 } else {
7016 /* smuad, smusd, smlad, smlsd */
7017 if (rd != 15)
7018 {
7019 tmp2 = load_reg(s, rd);
7020 gen_helper_add_setq(tmp, tmp, tmp2);
7021 dead_tmp(tmp2);
7022 }
7023 store_reg(s, rn, tmp);
7024 }
7025 }
7026 break;
7027 case 3:
7028 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7029 switch (op1) {
7030 case 0: /* Unsigned sum of absolute differences. */
7031 ARCH(6);
7032 tmp = load_reg(s, rm);
7033 tmp2 = load_reg(s, rs);
7034 gen_helper_usad8(tmp, tmp, tmp2);
7035 dead_tmp(tmp2);
7036 if (rd != 15) {
7037 tmp2 = load_reg(s, rd);
7038 tcg_gen_add_i32(tmp, tmp, tmp2);
7039 dead_tmp(tmp2);
7040 }
7041 store_reg(s, rn, tmp);
7042 break;
7043 case 0x20: case 0x24: case 0x28: case 0x2c:
7044 /* Bitfield insert/clear. */
7045 ARCH(6T2);
7046 shift = (insn >> 7) & 0x1f;
7047 i = (insn >> 16) & 0x1f;
7048 i = i + 1 - shift;
7049 if (rm == 15) {
7050 tmp = new_tmp();
7051 tcg_gen_movi_i32(tmp, 0);
7052 } else {
7053 tmp = load_reg(s, rm);
7054 }
7055 if (i != 32) {
7056 tmp2 = load_reg(s, rd);
7057 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7058 dead_tmp(tmp2);
7059 }
7060 store_reg(s, rd, tmp);
7061 break;
7062 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7063 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7064 ARCH(6T2);
7065 tmp = load_reg(s, rm);
7066 shift = (insn >> 7) & 0x1f;
7067 i = ((insn >> 16) & 0x1f) + 1;
7068 if (shift + i > 32)
7069 goto illegal_op;
7070 if (i < 32) {
7071 if (op1 & 0x20) {
7072 gen_ubfx(tmp, shift, (1u << i) - 1);
7073 } else {
7074 gen_sbfx(tmp, shift, i);
7075 }
7076 }
7077 store_reg(s, rd, tmp);
7078 break;
7079 default:
7080 goto illegal_op;
7081 }
7082 break;
7083 }
7084 break;
7085 }
7086 do_ldst:
7087 /* Check for undefined extension instructions
7088 * per the ARM Bible IE:
7089 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7090 */
7091 sh = (0xf << 20) | (0xf << 4);
7092 if (op1 == 0x7 && ((insn & sh) == sh))
7093 {
7094 goto illegal_op;
7095 }
7096 /* load/store byte/word */
7097 rn = (insn >> 16) & 0xf;
7098 rd = (insn >> 12) & 0xf;
7099 tmp2 = load_reg(s, rn);
7100 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7101 if (insn & (1 << 24))
7102 gen_add_data_offset(s, insn, tmp2);
7103 if (insn & (1 << 20)) {
7104 /* load */
7105 if (insn & (1 << 22)) {
7106 tmp = gen_ld8u(tmp2, i);
7107 } else {
7108 tmp = gen_ld32(tmp2, i);
7109 }
7110 } else {
7111 /* store */
7112 tmp = load_reg(s, rd);
7113 if (insn & (1 << 22))
7114 gen_st8(tmp, tmp2, i);
7115 else
7116 gen_st32(tmp, tmp2, i);
7117 }
7118 if (!(insn & (1 << 24))) {
7119 gen_add_data_offset(s, insn, tmp2);
7120 store_reg(s, rn, tmp2);
7121 } else if (insn & (1 << 21)) {
7122 store_reg(s, rn, tmp2);
7123 } else {
7124 dead_tmp(tmp2);
7125 }
7126 if (insn & (1 << 20)) {
7127 /* Complete the load. */
7128 if (rd == 15)
7129 gen_bx(s, tmp);
7130 else
7131 store_reg(s, rd, tmp);
7132 }
7133 break;
7134 case 0x08:
7135 case 0x09:
7136 {
7137 int j, n, user, loaded_base;
7138 TCGv loaded_var;
7139 /* load/store multiple words */
7140 /* XXX: store correct base if write back */
7141 user = 0;
7142 if (insn & (1 << 22)) {
7143 if (IS_USER(s))
7144 goto illegal_op; /* only usable in supervisor mode */
7145
7146 if ((insn & (1 << 15)) == 0)
7147 user = 1;
7148 }
7149 rn = (insn >> 16) & 0xf;
7150 addr = load_reg(s, rn);
7151
7152 /* compute total size */
7153 loaded_base = 0;
7154 TCGV_UNUSED(loaded_var);
7155 n = 0;
7156 for(i=0;i<16;i++) {
7157 if (insn & (1 << i))
7158 n++;
7159 }
7160 /* XXX: test invalid n == 0 case ? */
7161 if (insn & (1 << 23)) {
7162 if (insn & (1 << 24)) {
7163 /* pre increment */
7164 tcg_gen_addi_i32(addr, addr, 4);
7165 } else {
7166 /* post increment */
7167 }
7168 } else {
7169 if (insn & (1 << 24)) {
7170 /* pre decrement */
7171 tcg_gen_addi_i32(addr, addr, -(n * 4));
7172 } else {
7173 /* post decrement */
7174 if (n != 1)
7175 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7176 }
7177 }
7178 j = 0;
7179 for(i=0;i<16;i++) {
7180 if (insn & (1 << i)) {
7181 if (insn & (1 << 20)) {
7182 /* load */
7183 tmp = gen_ld32(addr, IS_USER(s));
7184 if (i == 15) {
7185 gen_bx(s, tmp);
7186 } else if (user) {
7187 tmp2 = tcg_const_i32(i);
7188 gen_helper_set_user_reg(tmp2, tmp);
7189 tcg_temp_free_i32(tmp2);
7190 dead_tmp(tmp);
7191 } else if (i == rn) {
7192 loaded_var = tmp;
7193 loaded_base = 1;
7194 } else {
7195 store_reg(s, i, tmp);
7196 }
7197 } else {
7198 /* store */
7199 if (i == 15) {
7200 /* special case: r15 = PC + 8 */
7201 val = (long)s->pc + 4;
7202 tmp = new_tmp();
7203 tcg_gen_movi_i32(tmp, val);
7204 } else if (user) {
7205 tmp = new_tmp();
7206 tmp2 = tcg_const_i32(i);
7207 gen_helper_get_user_reg(tmp, tmp2);
7208 tcg_temp_free_i32(tmp2);
7209 } else {
7210 tmp = load_reg(s, i);
7211 }
7212 gen_st32(tmp, addr, IS_USER(s));
7213 }
7214 j++;
7215 /* no need to add after the last transfer */
7216 if (j != n)
7217 tcg_gen_addi_i32(addr, addr, 4);
7218 }
7219 }
7220 if (insn & (1 << 21)) {
7221 /* write back */
7222 if (insn & (1 << 23)) {
7223 if (insn & (1 << 24)) {
7224 /* pre increment */
7225 } else {
7226 /* post increment */
7227 tcg_gen_addi_i32(addr, addr, 4);
7228 }
7229 } else {
7230 if (insn & (1 << 24)) {
7231 /* pre decrement */
7232 if (n != 1)
7233 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7234 } else {
7235 /* post decrement */
7236 tcg_gen_addi_i32(addr, addr, -(n * 4));
7237 }
7238 }
7239 store_reg(s, rn, addr);
7240 } else {
7241 dead_tmp(addr);
7242 }
7243 if (loaded_base) {
7244 store_reg(s, rn, loaded_var);
7245 }
7246 if ((insn & (1 << 22)) && !user) {
7247 /* Restore CPSR from SPSR. */
7248 tmp = load_cpu_field(spsr);
7249 gen_set_cpsr(tmp, 0xffffffff);
7250 dead_tmp(tmp);
7251 s->is_jmp = DISAS_UPDATE;
7252 }
7253 }
7254 break;
7255 case 0xa:
7256 case 0xb:
7257 {
7258 int32_t offset;
7259
7260 /* branch (and link) */
7261 val = (int32_t)s->pc;
7262 if (insn & (1 << 24)) {
7263 tmp = new_tmp();
7264 tcg_gen_movi_i32(tmp, val);
7265 store_reg(s, 14, tmp);
7266 }
7267 offset = (((int32_t)insn << 8) >> 8);
7268 val += (offset << 2) + 4;
7269 gen_jmp(s, val);
7270 }
7271 break;
7272 case 0xc:
7273 case 0xd:
7274 case 0xe:
7275 /* Coprocessor. */
7276 if (disas_coproc_insn(env, s, insn))
7277 goto illegal_op;
7278 break;
7279 case 0xf:
7280 /* swi */
7281 gen_set_pc_im(s->pc);
7282 s->is_jmp = DISAS_SWI;
7283 break;
7284 default:
7285 illegal_op:
7286 gen_exception_insn(s, 4, EXCP_UDEF);
7287 break;
7288 }
7289 }
7290 }
7291
7292 /* Return true if this is a Thumb-2 logical op. */
7293 static int
7294 thumb2_logic_op(int op)
7295 {
7296 return (op < 8);
7297 }
7298
7299 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7300 then set condition code flags based on the result of the operation.
7301 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7302 to the high bit of T1.
7303 Returns zero if the opcode is valid. */
7304
7305 static int
7306 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7307 {
7308 int logic_cc;
7309
7310 logic_cc = 0;
7311 switch (op) {
7312 case 0: /* and */
7313 tcg_gen_and_i32(t0, t0, t1);
7314 logic_cc = conds;
7315 break;
7316 case 1: /* bic */
7317 tcg_gen_andc_i32(t0, t0, t1);
7318 logic_cc = conds;
7319 break;
7320 case 2: /* orr */
7321 tcg_gen_or_i32(t0, t0, t1);
7322 logic_cc = conds;
7323 break;
7324 case 3: /* orn */
7325 tcg_gen_not_i32(t1, t1);
7326 tcg_gen_or_i32(t0, t0, t1);
7327 logic_cc = conds;
7328 break;
7329 case 4: /* eor */
7330 tcg_gen_xor_i32(t0, t0, t1);
7331 logic_cc = conds;
7332 break;
7333 case 8: /* add */
7334 if (conds)
7335 gen_helper_add_cc(t0, t0, t1);
7336 else
7337 tcg_gen_add_i32(t0, t0, t1);
7338 break;
7339 case 10: /* adc */
7340 if (conds)
7341 gen_helper_adc_cc(t0, t0, t1);
7342 else
7343 gen_adc(t0, t1);
7344 break;
7345 case 11: /* sbc */
7346 if (conds)
7347 gen_helper_sbc_cc(t0, t0, t1);
7348 else
7349 gen_sub_carry(t0, t0, t1);
7350 break;
7351 case 13: /* sub */
7352 if (conds)
7353 gen_helper_sub_cc(t0, t0, t1);
7354 else
7355 tcg_gen_sub_i32(t0, t0, t1);
7356 break;
7357 case 14: /* rsb */
7358 if (conds)
7359 gen_helper_sub_cc(t0, t1, t0);
7360 else
7361 tcg_gen_sub_i32(t0, t1, t0);
7362 break;
7363 default: /* 5, 6, 7, 9, 12, 15. */
7364 return 1;
7365 }
7366 if (logic_cc) {
7367 gen_logic_CC(t0);
7368 if (shifter_out)
7369 gen_set_CF_bit31(t1);
7370 }
7371 return 0;
7372 }
7373
7374 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7375 is not legal. */
7376 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7377 {
7378 uint32_t insn, imm, shift, offset;
7379 uint32_t rd, rn, rm, rs;
7380 TCGv tmp;
7381 TCGv tmp2;
7382 TCGv tmp3;
7383 TCGv addr;
7384 TCGv_i64 tmp64;
7385 int op;
7386 int shiftop;
7387 int conds;
7388 int logic_cc;
7389
7390 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7391 || arm_feature (env, ARM_FEATURE_M))) {
7392 /* Thumb-1 cores may need to treat bl and blx as a pair of
7393 16-bit instructions to get correct prefetch abort behavior. */
7394 insn = insn_hw1;
7395 if ((insn & (1 << 12)) == 0) {
7396 /* Second half of blx. */
7397 offset = ((insn & 0x7ff) << 1);
7398 tmp = load_reg(s, 14);
7399 tcg_gen_addi_i32(tmp, tmp, offset);
7400 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7401
7402 tmp2 = new_tmp();
7403 tcg_gen_movi_i32(tmp2, s->pc | 1);
7404 store_reg(s, 14, tmp2);
7405 gen_bx(s, tmp);
7406 return 0;
7407 }
7408 if (insn & (1 << 11)) {
7409 /* Second half of bl. */
7410 offset = ((insn & 0x7ff) << 1) | 1;
7411 tmp = load_reg(s, 14);
7412 tcg_gen_addi_i32(tmp, tmp, offset);
7413
7414 tmp2 = new_tmp();
7415 tcg_gen_movi_i32(tmp2, s->pc | 1);
7416 store_reg(s, 14, tmp2);
7417 gen_bx(s, tmp);
7418 return 0;
7419 }
7420 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7421 /* Instruction spans a page boundary. Implement it as two
7422 16-bit instructions in case the second half causes an
7423 prefetch abort. */
7424 offset = ((int32_t)insn << 21) >> 9;
7425 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7426 return 0;
7427 }
7428 /* Fall through to 32-bit decode. */
7429 }
7430
7431 insn = lduw_code(s->pc);
7432 s->pc += 2;
7433 insn |= (uint32_t)insn_hw1 << 16;
7434
7435 if ((insn & 0xf800e800) != 0xf000e800) {
7436 ARCH(6T2);
7437 }
7438
7439 rn = (insn >> 16) & 0xf;
7440 rs = (insn >> 12) & 0xf;
7441 rd = (insn >> 8) & 0xf;
7442 rm = insn & 0xf;
7443 switch ((insn >> 25) & 0xf) {
7444 case 0: case 1: case 2: case 3:
7445 /* 16-bit instructions. Should never happen. */
7446 abort();
7447 case 4:
7448 if (insn & (1 << 22)) {
7449 /* Other load/store, table branch. */
7450 if (insn & 0x01200000) {
7451 /* Load/store doubleword. */
7452 if (rn == 15) {
7453 addr = new_tmp();
7454 tcg_gen_movi_i32(addr, s->pc & ~3);
7455 } else {
7456 addr = load_reg(s, rn);
7457 }
7458 offset = (insn & 0xff) * 4;
7459 if ((insn & (1 << 23)) == 0)
7460 offset = -offset;
7461 if (insn & (1 << 24)) {
7462 tcg_gen_addi_i32(addr, addr, offset);
7463 offset = 0;
7464 }
7465 if (insn & (1 << 20)) {
7466 /* ldrd */
7467 tmp = gen_ld32(addr, IS_USER(s));
7468 store_reg(s, rs, tmp);
7469 tcg_gen_addi_i32(addr, addr, 4);
7470 tmp = gen_ld32(addr, IS_USER(s));
7471 store_reg(s, rd, tmp);
7472 } else {
7473 /* strd */
7474 tmp = load_reg(s, rs);
7475 gen_st32(tmp, addr, IS_USER(s));
7476 tcg_gen_addi_i32(addr, addr, 4);
7477 tmp = load_reg(s, rd);
7478 gen_st32(tmp, addr, IS_USER(s));
7479 }
7480 if (insn & (1 << 21)) {
7481 /* Base writeback. */
7482 if (rn == 15)
7483 goto illegal_op;
7484 tcg_gen_addi_i32(addr, addr, offset - 4);
7485 store_reg(s, rn, addr);
7486 } else {
7487 dead_tmp(addr);
7488 }
7489 } else if ((insn & (1 << 23)) == 0) {
7490 /* Load/store exclusive word. */
7491 addr = tcg_temp_local_new();
7492 load_reg_var(s, addr, rn);
7493 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7494 if (insn & (1 << 20)) {
7495 gen_load_exclusive(s, rs, 15, addr, 2);
7496 } else {
7497 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7498 }
7499 tcg_temp_free(addr);
7500 } else if ((insn & (1 << 6)) == 0) {
7501 /* Table Branch. */
7502 if (rn == 15) {
7503 addr = new_tmp();
7504 tcg_gen_movi_i32(addr, s->pc);
7505 } else {
7506 addr = load_reg(s, rn);
7507 }
7508 tmp = load_reg(s, rm);
7509 tcg_gen_add_i32(addr, addr, tmp);
7510 if (insn & (1 << 4)) {
7511 /* tbh */
7512 tcg_gen_add_i32(addr, addr, tmp);
7513 dead_tmp(tmp);
7514 tmp = gen_ld16u(addr, IS_USER(s));
7515 } else { /* tbb */
7516 dead_tmp(tmp);
7517 tmp = gen_ld8u(addr, IS_USER(s));
7518 }
7519 dead_tmp(addr);
7520 tcg_gen_shli_i32(tmp, tmp, 1);
7521 tcg_gen_addi_i32(tmp, tmp, s->pc);
7522 store_reg(s, 15, tmp);
7523 } else {
7524 /* Load/store exclusive byte/halfword/doubleword. */
7525 ARCH(7);
7526 op = (insn >> 4) & 0x3;
7527 if (op == 2) {
7528 goto illegal_op;
7529 }
7530 addr = tcg_temp_local_new();
7531 load_reg_var(s, addr, rn);
7532 if (insn & (1 << 20)) {
7533 gen_load_exclusive(s, rs, rd, addr, op);
7534 } else {
7535 gen_store_exclusive(s, rm, rs, rd, addr, op);
7536 }
7537 tcg_temp_free(addr);
7538 }
7539 } else {
7540 /* Load/store multiple, RFE, SRS. */
7541 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7542 /* Not available in user mode. */
7543 if (IS_USER(s))
7544 goto illegal_op;
7545 if (insn & (1 << 20)) {
7546 /* rfe */
7547 addr = load_reg(s, rn);
7548 if ((insn & (1 << 24)) == 0)
7549 tcg_gen_addi_i32(addr, addr, -8);
7550 /* Load PC into tmp and CPSR into tmp2. */
7551 tmp = gen_ld32(addr, 0);
7552 tcg_gen_addi_i32(addr, addr, 4);
7553 tmp2 = gen_ld32(addr, 0);
7554 if (insn & (1 << 21)) {
7555 /* Base writeback. */
7556 if (insn & (1 << 24)) {
7557 tcg_gen_addi_i32(addr, addr, 4);
7558 } else {
7559 tcg_gen_addi_i32(addr, addr, -4);
7560 }
7561 store_reg(s, rn, addr);
7562 } else {
7563 dead_tmp(addr);
7564 }
7565 gen_rfe(s, tmp, tmp2);
7566 } else {
7567 /* srs */
7568 op = (insn & 0x1f);
7569 addr = new_tmp();
7570 tmp = tcg_const_i32(op);
7571 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7572 tcg_temp_free_i32(tmp);
7573 if ((insn & (1 << 24)) == 0) {
7574 tcg_gen_addi_i32(addr, addr, -8);
7575 }
7576 tmp = load_reg(s, 14);
7577 gen_st32(tmp, addr, 0);
7578 tcg_gen_addi_i32(addr, addr, 4);
7579 tmp = new_tmp();
7580 gen_helper_cpsr_read(tmp);
7581 gen_st32(tmp, addr, 0);
7582 if (insn & (1 << 21)) {
7583 if ((insn & (1 << 24)) == 0) {
7584 tcg_gen_addi_i32(addr, addr, -4);
7585 } else {
7586 tcg_gen_addi_i32(addr, addr, 4);
7587 }
7588 tmp = tcg_const_i32(op);
7589 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7590 tcg_temp_free_i32(tmp);
7591 } else {
7592 dead_tmp(addr);
7593 }
7594 }
7595 } else {
7596 int i;
7597 /* Load/store multiple. */
7598 addr = load_reg(s, rn);
7599 offset = 0;
7600 for (i = 0; i < 16; i++) {
7601 if (insn & (1 << i))
7602 offset += 4;
7603 }
7604 if (insn & (1 << 24)) {
7605 tcg_gen_addi_i32(addr, addr, -offset);
7606 }
7607
7608 for (i = 0; i < 16; i++) {
7609 if ((insn & (1 << i)) == 0)
7610 continue;
7611 if (insn & (1 << 20)) {
7612 /* Load. */
7613 tmp = gen_ld32(addr, IS_USER(s));
7614 if (i == 15) {
7615 gen_bx(s, tmp);
7616 } else {
7617 store_reg(s, i, tmp);
7618 }
7619 } else {
7620 /* Store. */
7621 tmp = load_reg(s, i);
7622 gen_st32(tmp, addr, IS_USER(s));
7623 }
7624 tcg_gen_addi_i32(addr, addr, 4);
7625 }
7626 if (insn & (1 << 21)) {
7627 /* Base register writeback. */
7628 if (insn & (1 << 24)) {
7629 tcg_gen_addi_i32(addr, addr, -offset);
7630 }
7631 /* Fault if writeback register is in register list. */
7632 if (insn & (1 << rn))
7633 goto illegal_op;
7634 store_reg(s, rn, addr);
7635 } else {
7636 dead_tmp(addr);
7637 }
7638 }
7639 }
7640 break;
7641 case 5:
7642
7643 op = (insn >> 21) & 0xf;
7644 if (op == 6) {
7645 /* Halfword pack. */
7646 tmp = load_reg(s, rn);
7647 tmp2 = load_reg(s, rm);
7648 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7649 if (insn & (1 << 5)) {
7650 /* pkhtb */
7651 if (shift == 0)
7652 shift = 31;
7653 tcg_gen_sari_i32(tmp2, tmp2, shift);
7654 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7655 tcg_gen_ext16u_i32(tmp2, tmp2);
7656 } else {
7657 /* pkhbt */
7658 if (shift)
7659 tcg_gen_shli_i32(tmp2, tmp2, shift);
7660 tcg_gen_ext16u_i32(tmp, tmp);
7661 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7662 }
7663 tcg_gen_or_i32(tmp, tmp, tmp2);
7664 dead_tmp(tmp2);
7665 store_reg(s, rd, tmp);
7666 } else {
7667 /* Data processing register constant shift. */
7668 if (rn == 15) {
7669 tmp = new_tmp();
7670 tcg_gen_movi_i32(tmp, 0);
7671 } else {
7672 tmp = load_reg(s, rn);
7673 }
7674 tmp2 = load_reg(s, rm);
7675
7676 shiftop = (insn >> 4) & 3;
7677 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7678 conds = (insn & (1 << 20)) != 0;
7679 logic_cc = (conds && thumb2_logic_op(op));
7680 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7681 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7682 goto illegal_op;
7683 dead_tmp(tmp2);
7684 if (rd != 15) {
7685 store_reg(s, rd, tmp);
7686 } else {
7687 dead_tmp(tmp);
7688 }
7689 }
7690 break;
7691 case 13: /* Misc data processing. */
7692 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7693 if (op < 4 && (insn & 0xf000) != 0xf000)
7694 goto illegal_op;
7695 switch (op) {
7696 case 0: /* Register controlled shift. */
7697 tmp = load_reg(s, rn);
7698 tmp2 = load_reg(s, rm);
7699 if ((insn & 0x70) != 0)
7700 goto illegal_op;
7701 op = (insn >> 21) & 3;
7702 logic_cc = (insn & (1 << 20)) != 0;
7703 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7704 if (logic_cc)
7705 gen_logic_CC(tmp);
7706 store_reg_bx(env, s, rd, tmp);
7707 break;
7708 case 1: /* Sign/zero extend. */
7709 tmp = load_reg(s, rm);
7710 shift = (insn >> 4) & 3;
7711 /* ??? In many cases it's not neccessary to do a
7712 rotate, a shift is sufficient. */
7713 if (shift != 0)
7714 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7715 op = (insn >> 20) & 7;
7716 switch (op) {
7717 case 0: gen_sxth(tmp); break;
7718 case 1: gen_uxth(tmp); break;
7719 case 2: gen_sxtb16(tmp); break;
7720 case 3: gen_uxtb16(tmp); break;
7721 case 4: gen_sxtb(tmp); break;
7722 case 5: gen_uxtb(tmp); break;
7723 default: goto illegal_op;
7724 }
7725 if (rn != 15) {
7726 tmp2 = load_reg(s, rn);
7727 if ((op >> 1) == 1) {
7728 gen_add16(tmp, tmp2);
7729 } else {
7730 tcg_gen_add_i32(tmp, tmp, tmp2);
7731 dead_tmp(tmp2);
7732 }
7733 }
7734 store_reg(s, rd, tmp);
7735 break;
7736 case 2: /* SIMD add/subtract. */
7737 op = (insn >> 20) & 7;
7738 shift = (insn >> 4) & 7;
7739 if ((op & 3) == 3 || (shift & 3) == 3)
7740 goto illegal_op;
7741 tmp = load_reg(s, rn);
7742 tmp2 = load_reg(s, rm);
7743 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7744 dead_tmp(tmp2);
7745 store_reg(s, rd, tmp);
7746 break;
7747 case 3: /* Other data processing. */
7748 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7749 if (op < 4) {
7750 /* Saturating add/subtract. */
7751 tmp = load_reg(s, rn);
7752 tmp2 = load_reg(s, rm);
7753 if (op & 1)
7754 gen_helper_double_saturate(tmp, tmp);
7755 if (op & 2)
7756 gen_helper_sub_saturate(tmp, tmp2, tmp);
7757 else
7758 gen_helper_add_saturate(tmp, tmp, tmp2);
7759 dead_tmp(tmp2);
7760 } else {
7761 tmp = load_reg(s, rn);
7762 switch (op) {
7763 case 0x0a: /* rbit */
7764 gen_helper_rbit(tmp, tmp);
7765 break;
7766 case 0x08: /* rev */
7767 tcg_gen_bswap32_i32(tmp, tmp);
7768 break;
7769 case 0x09: /* rev16 */
7770 gen_rev16(tmp);
7771 break;
7772 case 0x0b: /* revsh */
7773 gen_revsh(tmp);
7774 break;
7775 case 0x10: /* sel */
7776 tmp2 = load_reg(s, rm);
7777 tmp3 = new_tmp();
7778 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7779 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7780 dead_tmp(tmp3);
7781 dead_tmp(tmp2);
7782 break;
7783 case 0x18: /* clz */
7784 gen_helper_clz(tmp, tmp);
7785 break;
7786 default:
7787 goto illegal_op;
7788 }
7789 }
7790 store_reg(s, rd, tmp);
7791 break;
7792 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7793 op = (insn >> 4) & 0xf;
7794 tmp = load_reg(s, rn);
7795 tmp2 = load_reg(s, rm);
7796 switch ((insn >> 20) & 7) {
7797 case 0: /* 32 x 32 -> 32 */
7798 tcg_gen_mul_i32(tmp, tmp, tmp2);
7799 dead_tmp(tmp2);
7800 if (rs != 15) {
7801 tmp2 = load_reg(s, rs);
7802 if (op)
7803 tcg_gen_sub_i32(tmp, tmp2, tmp);
7804 else
7805 tcg_gen_add_i32(tmp, tmp, tmp2);
7806 dead_tmp(tmp2);
7807 }
7808 break;
7809 case 1: /* 16 x 16 -> 32 */
7810 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7811 dead_tmp(tmp2);
7812 if (rs != 15) {
7813 tmp2 = load_reg(s, rs);
7814 gen_helper_add_setq(tmp, tmp, tmp2);
7815 dead_tmp(tmp2);
7816 }
7817 break;
7818 case 2: /* Dual multiply add. */
7819 case 4: /* Dual multiply subtract. */
7820 if (op)
7821 gen_swap_half(tmp2);
7822 gen_smul_dual(tmp, tmp2);
7823 /* This addition cannot overflow. */
7824 if (insn & (1 << 22)) {
7825 tcg_gen_sub_i32(tmp, tmp, tmp2);
7826 } else {
7827 tcg_gen_add_i32(tmp, tmp, tmp2);
7828 }
7829 dead_tmp(tmp2);
7830 if (rs != 15)
7831 {
7832 tmp2 = load_reg(s, rs);
7833 gen_helper_add_setq(tmp, tmp, tmp2);
7834 dead_tmp(tmp2);
7835 }
7836 break;
7837 case 3: /* 32 * 16 -> 32msb */
7838 if (op)
7839 tcg_gen_sari_i32(tmp2, tmp2, 16);
7840 else
7841 gen_sxth(tmp2);
7842 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7843 tcg_gen_shri_i64(tmp64, tmp64, 16);
7844 tmp = new_tmp();
7845 tcg_gen_trunc_i64_i32(tmp, tmp64);
7846 tcg_temp_free_i64(tmp64);
7847 if (rs != 15)
7848 {
7849 tmp2 = load_reg(s, rs);
7850 gen_helper_add_setq(tmp, tmp, tmp2);
7851 dead_tmp(tmp2);
7852 }
7853 break;
7854 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7855 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7856 if (rs != 15) {
7857 tmp = load_reg(s, rs);
7858 if (insn & (1 << 20)) {
7859 tmp64 = gen_addq_msw(tmp64, tmp);
7860 } else {
7861 tmp64 = gen_subq_msw(tmp64, tmp);
7862 }
7863 }
7864 if (insn & (1 << 4)) {
7865 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7866 }
7867 tcg_gen_shri_i64(tmp64, tmp64, 32);
7868 tmp = new_tmp();
7869 tcg_gen_trunc_i64_i32(tmp, tmp64);
7870 tcg_temp_free_i64(tmp64);
7871 break;
7872 case 7: /* Unsigned sum of absolute differences. */
7873 gen_helper_usad8(tmp, tmp, tmp2);
7874 dead_tmp(tmp2);
7875 if (rs != 15) {
7876 tmp2 = load_reg(s, rs);
7877 tcg_gen_add_i32(tmp, tmp, tmp2);
7878 dead_tmp(tmp2);
7879 }
7880 break;
7881 }
7882 store_reg(s, rd, tmp);
7883 break;
7884 case 6: case 7: /* 64-bit multiply, Divide. */
7885 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7886 tmp = load_reg(s, rn);
7887 tmp2 = load_reg(s, rm);
7888 if ((op & 0x50) == 0x10) {
7889 /* sdiv, udiv */
7890 if (!arm_feature(env, ARM_FEATURE_DIV))
7891 goto illegal_op;
7892 if (op & 0x20)
7893 gen_helper_udiv(tmp, tmp, tmp2);
7894 else
7895 gen_helper_sdiv(tmp, tmp, tmp2);
7896 dead_tmp(tmp2);
7897 store_reg(s, rd, tmp);
7898 } else if ((op & 0xe) == 0xc) {
7899 /* Dual multiply accumulate long. */
7900 if (op & 1)
7901 gen_swap_half(tmp2);
7902 gen_smul_dual(tmp, tmp2);
7903 if (op & 0x10) {
7904 tcg_gen_sub_i32(tmp, tmp, tmp2);
7905 } else {
7906 tcg_gen_add_i32(tmp, tmp, tmp2);
7907 }
7908 dead_tmp(tmp2);
7909 /* BUGFIX */
7910 tmp64 = tcg_temp_new_i64();
7911 tcg_gen_ext_i32_i64(tmp64, tmp);
7912 dead_tmp(tmp);
7913 gen_addq(s, tmp64, rs, rd);
7914 gen_storeq_reg(s, rs, rd, tmp64);
7915 tcg_temp_free_i64(tmp64);
7916 } else {
7917 if (op & 0x20) {
7918 /* Unsigned 64-bit multiply */
7919 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7920 } else {
7921 if (op & 8) {
7922 /* smlalxy */
7923 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7924 dead_tmp(tmp2);
7925 tmp64 = tcg_temp_new_i64();
7926 tcg_gen_ext_i32_i64(tmp64, tmp);
7927 dead_tmp(tmp);
7928 } else {
7929 /* Signed 64-bit multiply */
7930 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7931 }
7932 }
7933 if (op & 4) {
7934 /* umaal */
7935 gen_addq_lo(s, tmp64, rs);
7936 gen_addq_lo(s, tmp64, rd);
7937 } else if (op & 0x40) {
7938 /* 64-bit accumulate. */
7939 gen_addq(s, tmp64, rs, rd);
7940 }
7941 gen_storeq_reg(s, rs, rd, tmp64);
7942 tcg_temp_free_i64(tmp64);
7943 }
7944 break;
7945 }
7946 break;
7947 case 6: case 7: case 14: case 15:
7948 /* Coprocessor. */
7949 if (((insn >> 24) & 3) == 3) {
7950 /* Translate into the equivalent ARM encoding. */
7951 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7952 if (disas_neon_data_insn(env, s, insn))
7953 goto illegal_op;
7954 } else {
7955 if (insn & (1 << 28))
7956 goto illegal_op;
7957 if (disas_coproc_insn (env, s, insn))
7958 goto illegal_op;
7959 }
7960 break;
7961 case 8: case 9: case 10: case 11:
7962 if (insn & (1 << 15)) {
7963 /* Branches, misc control. */
7964 if (insn & 0x5000) {
7965 /* Unconditional branch. */
7966 /* signextend(hw1[10:0]) -> offset[:12]. */
7967 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7968 /* hw1[10:0] -> offset[11:1]. */
7969 offset |= (insn & 0x7ff) << 1;
7970 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7971 offset[24:22] already have the same value because of the
7972 sign extension above. */
7973 offset ^= ((~insn) & (1 << 13)) << 10;
7974 offset ^= ((~insn) & (1 << 11)) << 11;
7975
7976 if (insn & (1 << 14)) {
7977 /* Branch and link. */
7978 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7979 }
7980
7981 offset += s->pc;
7982 if (insn & (1 << 12)) {
7983 /* b/bl */
7984 gen_jmp(s, offset);
7985 } else {
7986 /* blx */
7987 offset &= ~(uint32_t)2;
7988 gen_bx_im(s, offset);
7989 }
7990 } else if (((insn >> 23) & 7) == 7) {
7991 /* Misc control */
7992 if (insn & (1 << 13))
7993 goto illegal_op;
7994
7995 if (insn & (1 << 26)) {
7996 /* Secure monitor call (v6Z) */
7997 goto illegal_op; /* not implemented. */
7998 } else {
7999 op = (insn >> 20) & 7;
8000 switch (op) {
8001 case 0: /* msr cpsr. */
8002 if (IS_M(env)) {
8003 tmp = load_reg(s, rn);
8004 addr = tcg_const_i32(insn & 0xff);
8005 gen_helper_v7m_msr(cpu_env, addr, tmp);
8006 tcg_temp_free_i32(addr);
8007 dead_tmp(tmp);
8008 gen_lookup_tb(s);
8009 break;
8010 }
8011 /* fall through */
8012 case 1: /* msr spsr. */
8013 if (IS_M(env))
8014 goto illegal_op;
8015 tmp = load_reg(s, rn);
8016 if (gen_set_psr(s,
8017 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8018 op == 1, tmp))
8019 goto illegal_op;
8020 break;
8021 case 2: /* cps, nop-hint. */
8022 if (((insn >> 8) & 7) == 0) {
8023 gen_nop_hint(s, insn & 0xff);
8024 }
8025 /* Implemented as NOP in user mode. */
8026 if (IS_USER(s))
8027 break;
8028 offset = 0;
8029 imm = 0;
8030 if (insn & (1 << 10)) {
8031 if (insn & (1 << 7))
8032 offset |= CPSR_A;
8033 if (insn & (1 << 6))
8034 offset |= CPSR_I;
8035 if (insn & (1 << 5))
8036 offset |= CPSR_F;
8037 if (insn & (1 << 9))
8038 imm = CPSR_A | CPSR_I | CPSR_F;
8039 }
8040 if (insn & (1 << 8)) {
8041 offset |= 0x1f;
8042 imm |= (insn & 0x1f);
8043 }
8044 if (offset) {
8045 gen_set_psr_im(s, offset, 0, imm);
8046 }
8047 break;
8048 case 3: /* Special control operations. */
8049 ARCH(7);
8050 op = (insn >> 4) & 0xf;
8051 switch (op) {
8052 case 2: /* clrex */
8053 gen_clrex(s);
8054 break;
8055 case 4: /* dsb */
8056 case 5: /* dmb */
8057 case 6: /* isb */
8058 /* These execute as NOPs. */
8059 break;
8060 default:
8061 goto illegal_op;
8062 }
8063 break;
8064 case 4: /* bxj */
8065 /* Trivial implementation equivalent to bx. */
8066 tmp = load_reg(s, rn);
8067 gen_bx(s, tmp);
8068 break;
8069 case 5: /* Exception return. */
8070 if (IS_USER(s)) {
8071 goto illegal_op;
8072 }
8073 if (rn != 14 || rd != 15) {
8074 goto illegal_op;
8075 }
8076 tmp = load_reg(s, rn);
8077 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8078 gen_exception_return(s, tmp);
8079 break;
8080 case 6: /* mrs cpsr. */
8081 tmp = new_tmp();
8082 if (IS_M(env)) {
8083 addr = tcg_const_i32(insn & 0xff);
8084 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8085 tcg_temp_free_i32(addr);
8086 } else {
8087 gen_helper_cpsr_read(tmp);
8088 }
8089 store_reg(s, rd, tmp);
8090 break;
8091 case 7: /* mrs spsr. */
8092 /* Not accessible in user mode. */
8093 if (IS_USER(s) || IS_M(env))
8094 goto illegal_op;
8095 tmp = load_cpu_field(spsr);
8096 store_reg(s, rd, tmp);
8097 break;
8098 }
8099 }
8100 } else {
8101 /* Conditional branch. */
8102 op = (insn >> 22) & 0xf;
8103 /* Generate a conditional jump to next instruction. */
8104 s->condlabel = gen_new_label();
8105 gen_test_cc(op ^ 1, s->condlabel);
8106 s->condjmp = 1;
8107
8108 /* offset[11:1] = insn[10:0] */
8109 offset = (insn & 0x7ff) << 1;
8110 /* offset[17:12] = insn[21:16]. */
8111 offset |= (insn & 0x003f0000) >> 4;
8112 /* offset[31:20] = insn[26]. */
8113 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8114 /* offset[18] = insn[13]. */
8115 offset |= (insn & (1 << 13)) << 5;
8116 /* offset[19] = insn[11]. */
8117 offset |= (insn & (1 << 11)) << 8;
8118
8119 /* jump to the offset */
8120 gen_jmp(s, s->pc + offset);
8121 }
8122 } else {
8123 /* Data processing immediate. */
8124 if (insn & (1 << 25)) {
8125 if (insn & (1 << 24)) {
8126 if (insn & (1 << 20))
8127 goto illegal_op;
8128 /* Bitfield/Saturate. */
8129 op = (insn >> 21) & 7;
8130 imm = insn & 0x1f;
8131 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8132 if (rn == 15) {
8133 tmp = new_tmp();
8134 tcg_gen_movi_i32(tmp, 0);
8135 } else {
8136 tmp = load_reg(s, rn);
8137 }
8138 switch (op) {
8139 case 2: /* Signed bitfield extract. */
8140 imm++;
8141 if (shift + imm > 32)
8142 goto illegal_op;
8143 if (imm < 32)
8144 gen_sbfx(tmp, shift, imm);
8145 break;
8146 case 6: /* Unsigned bitfield extract. */
8147 imm++;
8148 if (shift + imm > 32)
8149 goto illegal_op;
8150 if (imm < 32)
8151 gen_ubfx(tmp, shift, (1u << imm) - 1);
8152 break;
8153 case 3: /* Bitfield insert/clear. */
8154 if (imm < shift)
8155 goto illegal_op;
8156 imm = imm + 1 - shift;
8157 if (imm != 32) {
8158 tmp2 = load_reg(s, rd);
8159 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8160 dead_tmp(tmp2);
8161 }
8162 break;
8163 case 7:
8164 goto illegal_op;
8165 default: /* Saturate. */
8166 if (shift) {
8167 if (op & 1)
8168 tcg_gen_sari_i32(tmp, tmp, shift);
8169 else
8170 tcg_gen_shli_i32(tmp, tmp, shift);
8171 }
8172 tmp2 = tcg_const_i32(imm);
8173 if (op & 4) {
8174 /* Unsigned. */
8175 if ((op & 1) && shift == 0)
8176 gen_helper_usat16(tmp, tmp, tmp2);
8177 else
8178 gen_helper_usat(tmp, tmp, tmp2);
8179 } else {
8180 /* Signed. */
8181 if ((op & 1) && shift == 0)
8182 gen_helper_ssat16(tmp, tmp, tmp2);
8183 else
8184 gen_helper_ssat(tmp, tmp, tmp2);
8185 }
8186 tcg_temp_free_i32(tmp2);
8187 break;
8188 }
8189 store_reg(s, rd, tmp);
8190 } else {
8191 imm = ((insn & 0x04000000) >> 15)
8192 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8193 if (insn & (1 << 22)) {
8194 /* 16-bit immediate. */
8195 imm |= (insn >> 4) & 0xf000;
8196 if (insn & (1 << 23)) {
8197 /* movt */
8198 tmp = load_reg(s, rd);
8199 tcg_gen_ext16u_i32(tmp, tmp);
8200 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8201 } else {
8202 /* movw */
8203 tmp = new_tmp();
8204 tcg_gen_movi_i32(tmp, imm);
8205 }
8206 } else {
8207 /* Add/sub 12-bit immediate. */
8208 if (rn == 15) {
8209 offset = s->pc & ~(uint32_t)3;
8210 if (insn & (1 << 23))
8211 offset -= imm;
8212 else
8213 offset += imm;
8214 tmp = new_tmp();
8215 tcg_gen_movi_i32(tmp, offset);
8216 } else {
8217 tmp = load_reg(s, rn);
8218 if (insn & (1 << 23))
8219 tcg_gen_subi_i32(tmp, tmp, imm);
8220 else
8221 tcg_gen_addi_i32(tmp, tmp, imm);
8222 }
8223 }
8224 store_reg(s, rd, tmp);
8225 }
8226 } else {
8227 int shifter_out = 0;
8228 /* modified 12-bit immediate. */
8229 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8230 imm = (insn & 0xff);
8231 switch (shift) {
8232 case 0: /* XY */
8233 /* Nothing to do. */
8234 break;
8235 case 1: /* 00XY00XY */
8236 imm |= imm << 16;
8237 break;
8238 case 2: /* XY00XY00 */
8239 imm |= imm << 16;
8240 imm <<= 8;
8241 break;
8242 case 3: /* XYXYXYXY */
8243 imm |= imm << 16;
8244 imm |= imm << 8;
8245 break;
8246 default: /* Rotated constant. */
8247 shift = (shift << 1) | (imm >> 7);
8248 imm |= 0x80;
8249 imm = imm << (32 - shift);
8250 shifter_out = 1;
8251 break;
8252 }
8253 tmp2 = new_tmp();
8254 tcg_gen_movi_i32(tmp2, imm);
8255 rn = (insn >> 16) & 0xf;
8256 if (rn == 15) {
8257 tmp = new_tmp();
8258 tcg_gen_movi_i32(tmp, 0);
8259 } else {
8260 tmp = load_reg(s, rn);
8261 }
8262 op = (insn >> 21) & 0xf;
8263 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8264 shifter_out, tmp, tmp2))
8265 goto illegal_op;
8266 dead_tmp(tmp2);
8267 rd = (insn >> 8) & 0xf;
8268 if (rd != 15) {
8269 store_reg(s, rd, tmp);
8270 } else {
8271 dead_tmp(tmp);
8272 }
8273 }
8274 }
8275 break;
8276 case 12: /* Load/store single data item. */
8277 {
8278 int postinc = 0;
8279 int writeback = 0;
8280 int user;
8281 if ((insn & 0x01100000) == 0x01000000) {
8282 if (disas_neon_ls_insn(env, s, insn))
8283 goto illegal_op;
8284 break;
8285 }
8286 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8287 if (rs == 15) {
8288 if (!(insn & (1 << 20))) {
8289 goto illegal_op;
8290 }
8291 if (op != 2) {
8292 /* Byte or halfword load space with dest == r15 : memory hints.
8293 * Catch them early so we don't emit pointless addressing code.
8294 * This space is a mix of:
8295 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8296 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8297 * cores)
8298 * unallocated hints, which must be treated as NOPs
8299 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8300 * which is easiest for the decoding logic
8301 * Some space which must UNDEF
8302 */
8303 int op1 = (insn >> 23) & 3;
8304 int op2 = (insn >> 6) & 0x3f;
8305 if (op & 2) {
8306 goto illegal_op;
8307 }
8308 if (rn == 15) {
8309 /* UNPREDICTABLE or unallocated hint */
8310 return 0;
8311 }
8312 if (op1 & 1) {
8313 return 0; /* PLD* or unallocated hint */
8314 }
8315 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8316 return 0; /* PLD* or unallocated hint */
8317 }
8318 /* UNDEF space, or an UNPREDICTABLE */
8319 return 1;
8320 }
8321 }
8322 user = IS_USER(s);
8323 if (rn == 15) {
8324 addr = new_tmp();
8325 /* PC relative. */
8326 /* s->pc has already been incremented by 4. */
8327 imm = s->pc & 0xfffffffc;
8328 if (insn & (1 << 23))
8329 imm += insn & 0xfff;
8330 else
8331 imm -= insn & 0xfff;
8332 tcg_gen_movi_i32(addr, imm);
8333 } else {
8334 addr = load_reg(s, rn);
8335 if (insn & (1 << 23)) {
8336 /* Positive offset. */
8337 imm = insn & 0xfff;
8338 tcg_gen_addi_i32(addr, addr, imm);
8339 } else {
8340 imm = insn & 0xff;
8341 switch ((insn >> 8) & 7) {
8342 case 0: case 8: /* Shifted Register. */
8343 shift = (insn >> 4) & 0xf;
8344 if (shift > 3)
8345 goto illegal_op;
8346 tmp = load_reg(s, rm);
8347 if (shift)
8348 tcg_gen_shli_i32(tmp, tmp, shift);
8349 tcg_gen_add_i32(addr, addr, tmp);
8350 dead_tmp(tmp);
8351 break;
8352 case 4: /* Negative offset. */
8353 tcg_gen_addi_i32(addr, addr, -imm);
8354 break;
8355 case 6: /* User privilege. */
8356 tcg_gen_addi_i32(addr, addr, imm);
8357 user = 1;
8358 break;
8359 case 1: /* Post-decrement. */
8360 imm = -imm;
8361 /* Fall through. */
8362 case 3: /* Post-increment. */
8363 postinc = 1;
8364 writeback = 1;
8365 break;
8366 case 5: /* Pre-decrement. */
8367 imm = -imm;
8368 /* Fall through. */
8369 case 7: /* Pre-increment. */
8370 tcg_gen_addi_i32(addr, addr, imm);
8371 writeback = 1;
8372 break;
8373 default:
8374 goto illegal_op;
8375 }
8376 }
8377 }
8378 if (insn & (1 << 20)) {
8379 /* Load. */
8380 switch (op) {
8381 case 0: tmp = gen_ld8u(addr, user); break;
8382 case 4: tmp = gen_ld8s(addr, user); break;
8383 case 1: tmp = gen_ld16u(addr, user); break;
8384 case 5: tmp = gen_ld16s(addr, user); break;
8385 case 2: tmp = gen_ld32(addr, user); break;
8386 default: goto illegal_op;
8387 }
8388 if (rs == 15) {
8389 gen_bx(s, tmp);
8390 } else {
8391 store_reg(s, rs, tmp);
8392 }
8393 } else {
8394 /* Store. */
8395 tmp = load_reg(s, rs);
8396 switch (op) {
8397 case 0: gen_st8(tmp, addr, user); break;
8398 case 1: gen_st16(tmp, addr, user); break;
8399 case 2: gen_st32(tmp, addr, user); break;
8400 default: goto illegal_op;
8401 }
8402 }
8403 if (postinc)
8404 tcg_gen_addi_i32(addr, addr, imm);
8405 if (writeback) {
8406 store_reg(s, rn, addr);
8407 } else {
8408 dead_tmp(addr);
8409 }
8410 }
8411 break;
8412 default:
8413 goto illegal_op;
8414 }
8415 return 0;
8416 illegal_op:
8417 return 1;
8418 }
8419
8420 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8421 {
8422 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8423 int32_t offset;
8424 int i;
8425 TCGv tmp;
8426 TCGv tmp2;
8427 TCGv addr;
8428
8429 if (s->condexec_mask) {
8430 cond = s->condexec_cond;
8431 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8432 s->condlabel = gen_new_label();
8433 gen_test_cc(cond ^ 1, s->condlabel);
8434 s->condjmp = 1;
8435 }
8436 }
8437
8438 insn = lduw_code(s->pc);
8439 s->pc += 2;
8440
8441 switch (insn >> 12) {
8442 case 0: case 1:
8443
8444 rd = insn & 7;
8445 op = (insn >> 11) & 3;
8446 if (op == 3) {
8447 /* add/subtract */
8448 rn = (insn >> 3) & 7;
8449 tmp = load_reg(s, rn);
8450 if (insn & (1 << 10)) {
8451 /* immediate */
8452 tmp2 = new_tmp();
8453 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8454 } else {
8455 /* reg */
8456 rm = (insn >> 6) & 7;
8457 tmp2 = load_reg(s, rm);
8458 }
8459 if (insn & (1 << 9)) {
8460 if (s->condexec_mask)
8461 tcg_gen_sub_i32(tmp, tmp, tmp2);
8462 else
8463 gen_helper_sub_cc(tmp, tmp, tmp2);
8464 } else {
8465 if (s->condexec_mask)
8466 tcg_gen_add_i32(tmp, tmp, tmp2);
8467 else
8468 gen_helper_add_cc(tmp, tmp, tmp2);
8469 }
8470 dead_tmp(tmp2);
8471 store_reg(s, rd, tmp);
8472 } else {
8473 /* shift immediate */
8474 rm = (insn >> 3) & 7;
8475 shift = (insn >> 6) & 0x1f;
8476 tmp = load_reg(s, rm);
8477 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8478 if (!s->condexec_mask)
8479 gen_logic_CC(tmp);
8480 store_reg(s, rd, tmp);
8481 }
8482 break;
8483 case 2: case 3:
8484 /* arithmetic large immediate */
8485 op = (insn >> 11) & 3;
8486 rd = (insn >> 8) & 0x7;
8487 if (op == 0) { /* mov */
8488 tmp = new_tmp();
8489 tcg_gen_movi_i32(tmp, insn & 0xff);
8490 if (!s->condexec_mask)
8491 gen_logic_CC(tmp);
8492 store_reg(s, rd, tmp);
8493 } else {
8494 tmp = load_reg(s, rd);
8495 tmp2 = new_tmp();
8496 tcg_gen_movi_i32(tmp2, insn & 0xff);
8497 switch (op) {
8498 case 1: /* cmp */
8499 gen_helper_sub_cc(tmp, tmp, tmp2);
8500 dead_tmp(tmp);
8501 dead_tmp(tmp2);
8502 break;
8503 case 2: /* add */
8504 if (s->condexec_mask)
8505 tcg_gen_add_i32(tmp, tmp, tmp2);
8506 else
8507 gen_helper_add_cc(tmp, tmp, tmp2);
8508 dead_tmp(tmp2);
8509 store_reg(s, rd, tmp);
8510 break;
8511 case 3: /* sub */
8512 if (s->condexec_mask)
8513 tcg_gen_sub_i32(tmp, tmp, tmp2);
8514 else
8515 gen_helper_sub_cc(tmp, tmp, tmp2);
8516 dead_tmp(tmp2);
8517 store_reg(s, rd, tmp);
8518 break;
8519 }
8520 }
8521 break;
8522 case 4:
8523 if (insn & (1 << 11)) {
8524 rd = (insn >> 8) & 7;
8525 /* load pc-relative. Bit 1 of PC is ignored. */
8526 val = s->pc + 2 + ((insn & 0xff) * 4);
8527 val &= ~(uint32_t)2;
8528 addr = new_tmp();
8529 tcg_gen_movi_i32(addr, val);
8530 tmp = gen_ld32(addr, IS_USER(s));
8531 dead_tmp(addr);
8532 store_reg(s, rd, tmp);
8533 break;
8534 }
8535 if (insn & (1 << 10)) {
8536 /* data processing extended or blx */
8537 rd = (insn & 7) | ((insn >> 4) & 8);
8538 rm = (insn >> 3) & 0xf;
8539 op = (insn >> 8) & 3;
8540 switch (op) {
8541 case 0: /* add */
8542 tmp = load_reg(s, rd);
8543 tmp2 = load_reg(s, rm);
8544 tcg_gen_add_i32(tmp, tmp, tmp2);
8545 dead_tmp(tmp2);
8546 store_reg(s, rd, tmp);
8547 break;
8548 case 1: /* cmp */
8549 tmp = load_reg(s, rd);
8550 tmp2 = load_reg(s, rm);
8551 gen_helper_sub_cc(tmp, tmp, tmp2);
8552 dead_tmp(tmp2);
8553 dead_tmp(tmp);
8554 break;
8555 case 2: /* mov/cpy */
8556 tmp = load_reg(s, rm);
8557 store_reg(s, rd, tmp);
8558 break;
8559 case 3:/* branch [and link] exchange thumb register */
8560 tmp = load_reg(s, rm);
8561 if (insn & (1 << 7)) {
8562 val = (uint32_t)s->pc | 1;
8563 tmp2 = new_tmp();
8564 tcg_gen_movi_i32(tmp2, val);
8565 store_reg(s, 14, tmp2);
8566 }
8567 gen_bx(s, tmp);
8568 break;
8569 }
8570 break;
8571 }
8572
8573 /* data processing register */
8574 rd = insn & 7;
8575 rm = (insn >> 3) & 7;
8576 op = (insn >> 6) & 0xf;
8577 if (op == 2 || op == 3 || op == 4 || op == 7) {
8578 /* the shift/rotate ops want the operands backwards */
8579 val = rm;
8580 rm = rd;
8581 rd = val;
8582 val = 1;
8583 } else {
8584 val = 0;
8585 }
8586
8587 if (op == 9) { /* neg */
8588 tmp = new_tmp();
8589 tcg_gen_movi_i32(tmp, 0);
8590 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8591 tmp = load_reg(s, rd);
8592 } else {
8593 TCGV_UNUSED(tmp);
8594 }
8595
8596 tmp2 = load_reg(s, rm);
8597 switch (op) {
8598 case 0x0: /* and */
8599 tcg_gen_and_i32(tmp, tmp, tmp2);
8600 if (!s->condexec_mask)
8601 gen_logic_CC(tmp);
8602 break;
8603 case 0x1: /* eor */
8604 tcg_gen_xor_i32(tmp, tmp, tmp2);
8605 if (!s->condexec_mask)
8606 gen_logic_CC(tmp);
8607 break;
8608 case 0x2: /* lsl */
8609 if (s->condexec_mask) {
8610 gen_helper_shl(tmp2, tmp2, tmp);
8611 } else {
8612 gen_helper_shl_cc(tmp2, tmp2, tmp);
8613 gen_logic_CC(tmp2);
8614 }
8615 break;
8616 case 0x3: /* lsr */
8617 if (s->condexec_mask) {
8618 gen_helper_shr(tmp2, tmp2, tmp);
8619 } else {
8620 gen_helper_shr_cc(tmp2, tmp2, tmp);
8621 gen_logic_CC(tmp2);
8622 }
8623 break;
8624 case 0x4: /* asr */
8625 if (s->condexec_mask) {
8626 gen_helper_sar(tmp2, tmp2, tmp);
8627 } else {
8628 gen_helper_sar_cc(tmp2, tmp2, tmp);
8629 gen_logic_CC(tmp2);
8630 }
8631 break;
8632 case 0x5: /* adc */
8633 if (s->condexec_mask)
8634 gen_adc(tmp, tmp2);
8635 else
8636 gen_helper_adc_cc(tmp, tmp, tmp2);
8637 break;
8638 case 0x6: /* sbc */
8639 if (s->condexec_mask)
8640 gen_sub_carry(tmp, tmp, tmp2);
8641 else
8642 gen_helper_sbc_cc(tmp, tmp, tmp2);
8643 break;
8644 case 0x7: /* ror */
8645 if (s->condexec_mask) {
8646 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8647 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8648 } else {
8649 gen_helper_ror_cc(tmp2, tmp2, tmp);
8650 gen_logic_CC(tmp2);
8651 }
8652 break;
8653 case 0x8: /* tst */
8654 tcg_gen_and_i32(tmp, tmp, tmp2);
8655 gen_logic_CC(tmp);
8656 rd = 16;
8657 break;
8658 case 0x9: /* neg */
8659 if (s->condexec_mask)
8660 tcg_gen_neg_i32(tmp, tmp2);
8661 else
8662 gen_helper_sub_cc(tmp, tmp, tmp2);
8663 break;
8664 case 0xa: /* cmp */
8665 gen_helper_sub_cc(tmp, tmp, tmp2);
8666 rd = 16;
8667 break;
8668 case 0xb: /* cmn */
8669 gen_helper_add_cc(tmp, tmp, tmp2);
8670 rd = 16;
8671 break;
8672 case 0xc: /* orr */
8673 tcg_gen_or_i32(tmp, tmp, tmp2);
8674 if (!s->condexec_mask)
8675 gen_logic_CC(tmp);
8676 break;
8677 case 0xd: /* mul */
8678 tcg_gen_mul_i32(tmp, tmp, tmp2);
8679 if (!s->condexec_mask)
8680 gen_logic_CC(tmp);
8681 break;
8682 case 0xe: /* bic */
8683 tcg_gen_andc_i32(tmp, tmp, tmp2);
8684 if (!s->condexec_mask)
8685 gen_logic_CC(tmp);
8686 break;
8687 case 0xf: /* mvn */
8688 tcg_gen_not_i32(tmp2, tmp2);
8689 if (!s->condexec_mask)
8690 gen_logic_CC(tmp2);
8691 val = 1;
8692 rm = rd;
8693 break;
8694 }
8695 if (rd != 16) {
8696 if (val) {
8697 store_reg(s, rm, tmp2);
8698 if (op != 0xf)
8699 dead_tmp(tmp);
8700 } else {
8701 store_reg(s, rd, tmp);
8702 dead_tmp(tmp2);
8703 }
8704 } else {
8705 dead_tmp(tmp);
8706 dead_tmp(tmp2);
8707 }
8708 break;
8709
8710 case 5:
8711 /* load/store register offset. */
8712 rd = insn & 7;
8713 rn = (insn >> 3) & 7;
8714 rm = (insn >> 6) & 7;
8715 op = (insn >> 9) & 7;
8716 addr = load_reg(s, rn);
8717 tmp = load_reg(s, rm);
8718 tcg_gen_add_i32(addr, addr, tmp);
8719 dead_tmp(tmp);
8720
8721 if (op < 3) /* store */
8722 tmp = load_reg(s, rd);
8723
8724 switch (op) {
8725 case 0: /* str */
8726 gen_st32(tmp, addr, IS_USER(s));
8727 break;
8728 case 1: /* strh */
8729 gen_st16(tmp, addr, IS_USER(s));
8730 break;
8731 case 2: /* strb */
8732 gen_st8(tmp, addr, IS_USER(s));
8733 break;
8734 case 3: /* ldrsb */
8735 tmp = gen_ld8s(addr, IS_USER(s));
8736 break;
8737 case 4: /* ldr */
8738 tmp = gen_ld32(addr, IS_USER(s));
8739 break;
8740 case 5: /* ldrh */
8741 tmp = gen_ld16u(addr, IS_USER(s));
8742 break;
8743 case 6: /* ldrb */
8744 tmp = gen_ld8u(addr, IS_USER(s));
8745 break;
8746 case 7: /* ldrsh */
8747 tmp = gen_ld16s(addr, IS_USER(s));
8748 break;
8749 }
8750 if (op >= 3) /* load */
8751 store_reg(s, rd, tmp);
8752 dead_tmp(addr);
8753 break;
8754
8755 case 6:
8756 /* load/store word immediate offset */
8757 rd = insn & 7;
8758 rn = (insn >> 3) & 7;
8759 addr = load_reg(s, rn);
8760 val = (insn >> 4) & 0x7c;
8761 tcg_gen_addi_i32(addr, addr, val);
8762
8763 if (insn & (1 << 11)) {
8764 /* load */
8765 tmp = gen_ld32(addr, IS_USER(s));
8766 store_reg(s, rd, tmp);
8767 } else {
8768 /* store */
8769 tmp = load_reg(s, rd);
8770 gen_st32(tmp, addr, IS_USER(s));
8771 }
8772 dead_tmp(addr);
8773 break;
8774
8775 case 7:
8776 /* load/store byte immediate offset */
8777 rd = insn & 7;
8778 rn = (insn >> 3) & 7;
8779 addr = load_reg(s, rn);
8780 val = (insn >> 6) & 0x1f;
8781 tcg_gen_addi_i32(addr, addr, val);
8782
8783 if (insn & (1 << 11)) {
8784 /* load */
8785 tmp = gen_ld8u(addr, IS_USER(s));
8786 store_reg(s, rd, tmp);
8787 } else {
8788 /* store */
8789 tmp = load_reg(s, rd);
8790 gen_st8(tmp, addr, IS_USER(s));
8791 }
8792 dead_tmp(addr);
8793 break;
8794
8795 case 8:
8796 /* load/store halfword immediate offset */
8797 rd = insn & 7;
8798 rn = (insn >> 3) & 7;
8799 addr = load_reg(s, rn);
8800 val = (insn >> 5) & 0x3e;
8801 tcg_gen_addi_i32(addr, addr, val);
8802
8803 if (insn & (1 << 11)) {
8804 /* load */
8805 tmp = gen_ld16u(addr, IS_USER(s));
8806 store_reg(s, rd, tmp);
8807 } else {
8808 /* store */
8809 tmp = load_reg(s, rd);
8810 gen_st16(tmp, addr, IS_USER(s));
8811 }
8812 dead_tmp(addr);
8813 break;
8814
8815 case 9:
8816 /* load/store from stack */
8817 rd = (insn >> 8) & 7;
8818 addr = load_reg(s, 13);
8819 val = (insn & 0xff) * 4;
8820 tcg_gen_addi_i32(addr, addr, val);
8821
8822 if (insn & (1 << 11)) {
8823 /* load */
8824 tmp = gen_ld32(addr, IS_USER(s));
8825 store_reg(s, rd, tmp);
8826 } else {
8827 /* store */
8828 tmp = load_reg(s, rd);
8829 gen_st32(tmp, addr, IS_USER(s));
8830 }
8831 dead_tmp(addr);
8832 break;
8833
8834 case 10:
8835 /* add to high reg */
8836 rd = (insn >> 8) & 7;
8837 if (insn & (1 << 11)) {
8838 /* SP */
8839 tmp = load_reg(s, 13);
8840 } else {
8841 /* PC. bit 1 is ignored. */
8842 tmp = new_tmp();
8843 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8844 }
8845 val = (insn & 0xff) * 4;
8846 tcg_gen_addi_i32(tmp, tmp, val);
8847 store_reg(s, rd, tmp);
8848 break;
8849
8850 case 11:
8851 /* misc */
8852 op = (insn >> 8) & 0xf;
8853 switch (op) {
8854 case 0:
8855 /* adjust stack pointer */
8856 tmp = load_reg(s, 13);
8857 val = (insn & 0x7f) * 4;
8858 if (insn & (1 << 7))
8859 val = -(int32_t)val;
8860 tcg_gen_addi_i32(tmp, tmp, val);
8861 store_reg(s, 13, tmp);
8862 break;
8863
8864 case 2: /* sign/zero extend. */
8865 ARCH(6);
8866 rd = insn & 7;
8867 rm = (insn >> 3) & 7;
8868 tmp = load_reg(s, rm);
8869 switch ((insn >> 6) & 3) {
8870 case 0: gen_sxth(tmp); break;
8871 case 1: gen_sxtb(tmp); break;
8872 case 2: gen_uxth(tmp); break;
8873 case 3: gen_uxtb(tmp); break;
8874 }
8875 store_reg(s, rd, tmp);
8876 break;
8877 case 4: case 5: case 0xc: case 0xd:
8878 /* push/pop */
8879 addr = load_reg(s, 13);
8880 if (insn & (1 << 8))
8881 offset = 4;
8882 else
8883 offset = 0;
8884 for (i = 0; i < 8; i++) {
8885 if (insn & (1 << i))
8886 offset += 4;
8887 }
8888 if ((insn & (1 << 11)) == 0) {
8889 tcg_gen_addi_i32(addr, addr, -offset);
8890 }
8891 for (i = 0; i < 8; i++) {
8892 if (insn & (1 << i)) {
8893 if (insn & (1 << 11)) {
8894 /* pop */
8895 tmp = gen_ld32(addr, IS_USER(s));
8896 store_reg(s, i, tmp);
8897 } else {
8898 /* push */
8899 tmp = load_reg(s, i);
8900 gen_st32(tmp, addr, IS_USER(s));
8901 }
8902 /* advance to the next address. */
8903 tcg_gen_addi_i32(addr, addr, 4);
8904 }
8905 }
8906 TCGV_UNUSED(tmp);
8907 if (insn & (1 << 8)) {
8908 if (insn & (1 << 11)) {
8909 /* pop pc */
8910 tmp = gen_ld32(addr, IS_USER(s));
8911 /* don't set the pc until the rest of the instruction
8912 has completed */
8913 } else {
8914 /* push lr */
8915 tmp = load_reg(s, 14);
8916 gen_st32(tmp, addr, IS_USER(s));
8917 }
8918 tcg_gen_addi_i32(addr, addr, 4);
8919 }
8920 if ((insn & (1 << 11)) == 0) {
8921 tcg_gen_addi_i32(addr, addr, -offset);
8922 }
8923 /* write back the new stack pointer */
8924 store_reg(s, 13, addr);
8925 /* set the new PC value */
8926 if ((insn & 0x0900) == 0x0900)
8927 gen_bx(s, tmp);
8928 break;
8929
8930 case 1: case 3: case 9: case 11: /* czb */
8931 rm = insn & 7;
8932 tmp = load_reg(s, rm);
8933 s->condlabel = gen_new_label();
8934 s->condjmp = 1;
8935 if (insn & (1 << 11))
8936 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8937 else
8938 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8939 dead_tmp(tmp);
8940 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8941 val = (uint32_t)s->pc + 2;
8942 val += offset;
8943 gen_jmp(s, val);
8944 break;
8945
8946 case 15: /* IT, nop-hint. */
8947 if ((insn & 0xf) == 0) {
8948 gen_nop_hint(s, (insn >> 4) & 0xf);
8949 break;
8950 }
8951 /* If Then. */
8952 s->condexec_cond = (insn >> 4) & 0xe;
8953 s->condexec_mask = insn & 0x1f;
8954 /* No actual code generated for this insn, just setup state. */
8955 break;
8956
8957 case 0xe: /* bkpt */
8958 gen_exception_insn(s, 2, EXCP_BKPT);
8959 break;
8960
8961 case 0xa: /* rev */
8962 ARCH(6);
8963 rn = (insn >> 3) & 0x7;
8964 rd = insn & 0x7;
8965 tmp = load_reg(s, rn);
8966 switch ((insn >> 6) & 3) {
8967 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8968 case 1: gen_rev16(tmp); break;
8969 case 3: gen_revsh(tmp); break;
8970 default: goto illegal_op;
8971 }
8972 store_reg(s, rd, tmp);
8973 break;
8974
8975 case 6: /* cps */
8976 ARCH(6);
8977 if (IS_USER(s))
8978 break;
8979 if (IS_M(env)) {
8980 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8981 /* PRIMASK */
8982 if (insn & 1) {
8983 addr = tcg_const_i32(16);
8984 gen_helper_v7m_msr(cpu_env, addr, tmp);
8985 tcg_temp_free_i32(addr);
8986 }
8987 /* FAULTMASK */
8988 if (insn & 2) {
8989 addr = tcg_const_i32(17);
8990 gen_helper_v7m_msr(cpu_env, addr, tmp);
8991 tcg_temp_free_i32(addr);
8992 }
8993 tcg_temp_free_i32(tmp);
8994 gen_lookup_tb(s);
8995 } else {
8996 if (insn & (1 << 4))
8997 shift = CPSR_A | CPSR_I | CPSR_F;
8998 else
8999 shift = 0;
9000 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9001 }
9002 break;
9003
9004 default:
9005 goto undef;
9006 }
9007 break;
9008
9009 case 12:
9010 /* load/store multiple */
9011 rn = (insn >> 8) & 0x7;
9012 addr = load_reg(s, rn);
9013 for (i = 0; i < 8; i++) {
9014 if (insn & (1 << i)) {
9015 if (insn & (1 << 11)) {
9016 /* load */
9017 tmp = gen_ld32(addr, IS_USER(s));
9018 store_reg(s, i, tmp);
9019 } else {
9020 /* store */
9021 tmp = load_reg(s, i);
9022 gen_st32(tmp, addr, IS_USER(s));
9023 }
9024 /* advance to the next address */
9025 tcg_gen_addi_i32(addr, addr, 4);
9026 }
9027 }
9028 /* Base register writeback. */
9029 if ((insn & (1 << rn)) == 0) {
9030 store_reg(s, rn, addr);
9031 } else {
9032 dead_tmp(addr);
9033 }
9034 break;
9035
9036 case 13:
9037 /* conditional branch or swi */
9038 cond = (insn >> 8) & 0xf;
9039 if (cond == 0xe)
9040 goto undef;
9041
9042 if (cond == 0xf) {
9043 /* swi */
9044 gen_set_pc_im(s->pc);
9045 s->is_jmp = DISAS_SWI;
9046 break;
9047 }
9048 /* generate a conditional jump to next instruction */
9049 s->condlabel = gen_new_label();
9050 gen_test_cc(cond ^ 1, s->condlabel);
9051 s->condjmp = 1;
9052
9053 /* jump to the offset */
9054 val = (uint32_t)s->pc + 2;
9055 offset = ((int32_t)insn << 24) >> 24;
9056 val += offset << 1;
9057 gen_jmp(s, val);
9058 break;
9059
9060 case 14:
9061 if (insn & (1 << 11)) {
9062 if (disas_thumb2_insn(env, s, insn))
9063 goto undef32;
9064 break;
9065 }
9066 /* unconditional branch */
9067 val = (uint32_t)s->pc;
9068 offset = ((int32_t)insn << 21) >> 21;
9069 val += (offset << 1) + 2;
9070 gen_jmp(s, val);
9071 break;
9072
9073 case 15:
9074 if (disas_thumb2_insn(env, s, insn))
9075 goto undef32;
9076 break;
9077 }
9078 return;
9079 undef32:
9080 gen_exception_insn(s, 4, EXCP_UDEF);
9081 return;
9082 illegal_op:
9083 undef:
9084 gen_exception_insn(s, 2, EXCP_UDEF);
9085 }
9086
9087 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9088 basic block 'tb'. If search_pc is TRUE, also generate PC
9089 information for each intermediate instruction. */
9090 static inline void gen_intermediate_code_internal(CPUState *env,
9091 TranslationBlock *tb,
9092 int search_pc)
9093 {
9094 DisasContext dc1, *dc = &dc1;
9095 CPUBreakpoint *bp;
9096 uint16_t *gen_opc_end;
9097 int j, lj;
9098 target_ulong pc_start;
9099 uint32_t next_page_start;
9100 int num_insns;
9101 int max_insns;
9102
9103 /* generate intermediate code */
9104 num_temps = 0;
9105
9106 pc_start = tb->pc;
9107
9108 dc->tb = tb;
9109
9110 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9111
9112 dc->is_jmp = DISAS_NEXT;
9113 dc->pc = pc_start;
9114 dc->singlestep_enabled = env->singlestep_enabled;
9115 dc->condjmp = 0;
9116 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9117 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9118 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9119 #if !defined(CONFIG_USER_ONLY)
9120 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9121 #endif
9122 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9123 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9124 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9125 cpu_F0s = tcg_temp_new_i32();
9126 cpu_F1s = tcg_temp_new_i32();
9127 cpu_F0d = tcg_temp_new_i64();
9128 cpu_F1d = tcg_temp_new_i64();
9129 cpu_V0 = cpu_F0d;
9130 cpu_V1 = cpu_F1d;
9131 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9132 cpu_M0 = tcg_temp_new_i64();
9133 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9134 lj = -1;
9135 num_insns = 0;
9136 max_insns = tb->cflags & CF_COUNT_MASK;
9137 if (max_insns == 0)
9138 max_insns = CF_COUNT_MASK;
9139
9140 gen_icount_start();
9141
9142 /* A note on handling of the condexec (IT) bits:
9143 *
9144 * We want to avoid the overhead of having to write the updated condexec
9145 * bits back to the CPUState for every instruction in an IT block. So:
9146 * (1) if the condexec bits are not already zero then we write
9147 * zero back into the CPUState now. This avoids complications trying
9148 * to do it at the end of the block. (For example if we don't do this
9149 * it's hard to identify whether we can safely skip writing condexec
9150 * at the end of the TB, which we definitely want to do for the case
9151 * where a TB doesn't do anything with the IT state at all.)
9152 * (2) if we are going to leave the TB then we call gen_set_condexec()
9153 * which will write the correct value into CPUState if zero is wrong.
9154 * This is done both for leaving the TB at the end, and for leaving
9155 * it because of an exception we know will happen, which is done in
9156 * gen_exception_insn(). The latter is necessary because we need to
9157 * leave the TB with the PC/IT state just prior to execution of the
9158 * instruction which caused the exception.
9159 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9160 * then the CPUState will be wrong and we need to reset it.
9161 * This is handled in the same way as restoration of the
9162 * PC in these situations: we will be called again with search_pc=1
9163 * and generate a mapping of the condexec bits for each PC in
9164 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9165 * the condexec bits.
9166 *
9167 * Note that there are no instructions which can read the condexec
9168 * bits, and none which can write non-static values to them, so
9169 * we don't need to care about whether CPUState is correct in the
9170 * middle of a TB.
9171 */
9172
9173 /* Reset the conditional execution bits immediately. This avoids
9174 complications trying to do it at the end of the block. */
9175 if (dc->condexec_mask || dc->condexec_cond)
9176 {
9177 TCGv tmp = new_tmp();
9178 tcg_gen_movi_i32(tmp, 0);
9179 store_cpu_field(tmp, condexec_bits);
9180 }
9181 do {
9182 #ifdef CONFIG_USER_ONLY
9183 /* Intercept jump to the magic kernel page. */
9184 if (dc->pc >= 0xffff0000) {
9185 /* We always get here via a jump, so know we are not in a
9186 conditional execution block. */
9187 gen_exception(EXCP_KERNEL_TRAP);
9188 dc->is_jmp = DISAS_UPDATE;
9189 break;
9190 }
9191 #else
9192 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9193 /* We always get here via a jump, so know we are not in a
9194 conditional execution block. */
9195 gen_exception(EXCP_EXCEPTION_EXIT);
9196 dc->is_jmp = DISAS_UPDATE;
9197 break;
9198 }
9199 #endif
9200
9201 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9202 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9203 if (bp->pc == dc->pc) {
9204 gen_exception_insn(dc, 0, EXCP_DEBUG);
9205 /* Advance PC so that clearing the breakpoint will
9206 invalidate this TB. */
9207 dc->pc += 2;
9208 goto done_generating;
9209 break;
9210 }
9211 }
9212 }
9213 if (search_pc) {
9214 j = gen_opc_ptr - gen_opc_buf;
9215 if (lj < j) {
9216 lj++;
9217 while (lj < j)
9218 gen_opc_instr_start[lj++] = 0;
9219 }
9220 gen_opc_pc[lj] = dc->pc;
9221 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9222 gen_opc_instr_start[lj] = 1;
9223 gen_opc_icount[lj] = num_insns;
9224 }
9225
9226 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9227 gen_io_start();
9228
9229 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9230 tcg_gen_debug_insn_start(dc->pc);
9231 }
9232
9233 if (dc->thumb) {
9234 disas_thumb_insn(env, dc);
9235 if (dc->condexec_mask) {
9236 dc->condexec_cond = (dc->condexec_cond & 0xe)
9237 | ((dc->condexec_mask >> 4) & 1);
9238 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9239 if (dc->condexec_mask == 0) {
9240 dc->condexec_cond = 0;
9241 }
9242 }
9243 } else {
9244 disas_arm_insn(env, dc);
9245 }
9246 if (num_temps) {
9247 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9248 num_temps = 0;
9249 }
9250
9251 if (dc->condjmp && !dc->is_jmp) {
9252 gen_set_label(dc->condlabel);
9253 dc->condjmp = 0;
9254 }
9255 /* Translation stops when a conditional branch is encountered.
9256 * Otherwise the subsequent code could get translated several times.
9257 * Also stop translation when a page boundary is reached. This
9258 * ensures prefetch aborts occur at the right place. */
9259 num_insns ++;
9260 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9261 !env->singlestep_enabled &&
9262 !singlestep &&
9263 dc->pc < next_page_start &&
9264 num_insns < max_insns);
9265
9266 if (tb->cflags & CF_LAST_IO) {
9267 if (dc->condjmp) {
9268 /* FIXME: This can theoretically happen with self-modifying
9269 code. */
9270 cpu_abort(env, "IO on conditional branch instruction");
9271 }
9272 gen_io_end();
9273 }
9274
9275 /* At this stage dc->condjmp will only be set when the skipped
9276 instruction was a conditional branch or trap, and the PC has
9277 already been written. */
9278 if (unlikely(env->singlestep_enabled)) {
9279 /* Make sure the pc is updated, and raise a debug exception. */
9280 if (dc->condjmp) {
9281 gen_set_condexec(dc);
9282 if (dc->is_jmp == DISAS_SWI) {
9283 gen_exception(EXCP_SWI);
9284 } else {
9285 gen_exception(EXCP_DEBUG);
9286 }
9287 gen_set_label(dc->condlabel);
9288 }
9289 if (dc->condjmp || !dc->is_jmp) {
9290 gen_set_pc_im(dc->pc);
9291 dc->condjmp = 0;
9292 }
9293 gen_set_condexec(dc);
9294 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9295 gen_exception(EXCP_SWI);
9296 } else {
9297 /* FIXME: Single stepping a WFI insn will not halt
9298 the CPU. */
9299 gen_exception(EXCP_DEBUG);
9300 }
9301 } else {
9302 /* While branches must always occur at the end of an IT block,
9303 there are a few other things that can cause us to terminate
9304 the TB in the middel of an IT block:
9305 - Exception generating instructions (bkpt, swi, undefined).
9306 - Page boundaries.
9307 - Hardware watchpoints.
9308 Hardware breakpoints have already been handled and skip this code.
9309 */
9310 gen_set_condexec(dc);
9311 switch(dc->is_jmp) {
9312 case DISAS_NEXT:
9313 gen_goto_tb(dc, 1, dc->pc);
9314 break;
9315 default:
9316 case DISAS_JUMP:
9317 case DISAS_UPDATE:
9318 /* indicate that the hash table must be used to find the next TB */
9319 tcg_gen_exit_tb(0);
9320 break;
9321 case DISAS_TB_JUMP:
9322 /* nothing more to generate */
9323 break;
9324 case DISAS_WFI:
9325 gen_helper_wfi();
9326 break;
9327 case DISAS_SWI:
9328 gen_exception(EXCP_SWI);
9329 break;
9330 }
9331 if (dc->condjmp) {
9332 gen_set_label(dc->condlabel);
9333 gen_set_condexec(dc);
9334 gen_goto_tb(dc, 1, dc->pc);
9335 dc->condjmp = 0;
9336 }
9337 }
9338
9339 done_generating:
9340 gen_icount_end(tb, num_insns);
9341 *gen_opc_ptr = INDEX_op_end;
9342
9343 #ifdef DEBUG_DISAS
9344 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9345 qemu_log("----------------\n");
9346 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9347 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9348 qemu_log("\n");
9349 }
9350 #endif
9351 if (search_pc) {
9352 j = gen_opc_ptr - gen_opc_buf;
9353 lj++;
9354 while (lj <= j)
9355 gen_opc_instr_start[lj++] = 0;
9356 } else {
9357 tb->size = dc->pc - pc_start;
9358 tb->icount = num_insns;
9359 }
9360 }
9361
9362 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9363 {
9364 gen_intermediate_code_internal(env, tb, 0);
9365 }
9366
9367 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9368 {
9369 gen_intermediate_code_internal(env, tb, 1);
9370 }
9371
9372 static const char *cpu_mode_names[16] = {
9373 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9374 "???", "???", "???", "und", "???", "???", "???", "sys"
9375 };
9376
9377 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9378 int flags)
9379 {
9380 int i;
9381 #if 0
9382 union {
9383 uint32_t i;
9384 float s;
9385 } s0, s1;
9386 CPU_DoubleU d;
9387 /* ??? This assumes float64 and double have the same layout.
9388 Oh well, it's only debug dumps. */
9389 union {
9390 float64 f64;
9391 double d;
9392 } d0;
9393 #endif
9394 uint32_t psr;
9395
9396 for(i=0;i<16;i++) {
9397 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9398 if ((i % 4) == 3)
9399 cpu_fprintf(f, "\n");
9400 else
9401 cpu_fprintf(f, " ");
9402 }
9403 psr = cpsr_read(env);
9404 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9405 psr,
9406 psr & (1 << 31) ? 'N' : '-',
9407 psr & (1 << 30) ? 'Z' : '-',
9408 psr & (1 << 29) ? 'C' : '-',
9409 psr & (1 << 28) ? 'V' : '-',
9410 psr & CPSR_T ? 'T' : 'A',
9411 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9412
9413 #if 0
9414 for (i = 0; i < 16; i++) {
9415 d.d = env->vfp.regs[i];
9416 s0.i = d.l.lower;
9417 s1.i = d.l.upper;
9418 d0.f64 = d.d;
9419 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9420 i * 2, (int)s0.i, s0.s,
9421 i * 2 + 1, (int)s1.i, s1.s,
9422 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9423 d0.d);
9424 }
9425 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9426 #endif
9427 }
9428
9429 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9430 unsigned long searched_pc, int pc_pos, void *puc)
9431 {
9432 env->regs[15] = gen_opc_pc[pc_pos];
9433 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9434 }