]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Fix shift by immediate and narrow where src, dest overlap
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3544 case 0: \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 1: \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 2: \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 3: \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 4: \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 5: \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 default: return 1; \
3563 }} while (0)
3564
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3567 case 0: \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3569 break; \
3570 case 1: \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3572 break; \
3573 case 2: \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3575 break; \
3576 case 3: \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3578 break; \
3579 case 4: \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3581 break; \
3582 case 5: \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3584 break; \
3585 default: return 1; \
3586 }} while (0)
3587
3588 static TCGv neon_load_scratch(int scratch)
3589 {
3590 TCGv tmp = new_tmp();
3591 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 return tmp;
3593 }
3594
3595 static void neon_store_scratch(int scratch, TCGv var)
3596 {
3597 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 dead_tmp(var);
3599 }
3600
3601 static inline TCGv neon_get_scalar(int size, int reg)
3602 {
3603 TCGv tmp;
3604 if (size == 1) {
3605 tmp = neon_load_reg(reg & 7, reg >> 4);
3606 if (reg & 8) {
3607 gen_neon_dup_high16(tmp);
3608 } else {
3609 gen_neon_dup_low16(tmp);
3610 }
3611 } else {
3612 tmp = neon_load_reg(reg & 15, reg >> 4);
3613 }
3614 return tmp;
3615 }
3616
3617 static int gen_neon_unzip(int rd, int rm, int size, int q)
3618 {
3619 TCGv tmp, tmp2;
3620 if (size == 3 || (!q && size == 2)) {
3621 return 1;
3622 }
3623 tmp = tcg_const_i32(rd);
3624 tmp2 = tcg_const_i32(rm);
3625 if (q) {
3626 switch (size) {
3627 case 0:
3628 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3629 break;
3630 case 1:
3631 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3632 break;
3633 case 2:
3634 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3635 break;
3636 default:
3637 abort();
3638 }
3639 } else {
3640 switch (size) {
3641 case 0:
3642 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3643 break;
3644 case 1:
3645 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3646 break;
3647 default:
3648 abort();
3649 }
3650 }
3651 tcg_temp_free_i32(tmp);
3652 tcg_temp_free_i32(tmp2);
3653 return 0;
3654 }
3655
3656 static int gen_neon_zip(int rd, int rm, int size, int q)
3657 {
3658 TCGv tmp, tmp2;
3659 if (size == 3 || (!q && size == 2)) {
3660 return 1;
3661 }
3662 tmp = tcg_const_i32(rd);
3663 tmp2 = tcg_const_i32(rm);
3664 if (q) {
3665 switch (size) {
3666 case 0:
3667 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3668 break;
3669 case 1:
3670 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3671 break;
3672 case 2:
3673 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3674 break;
3675 default:
3676 abort();
3677 }
3678 } else {
3679 switch (size) {
3680 case 0:
3681 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3682 break;
3683 case 1:
3684 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3685 break;
3686 default:
3687 abort();
3688 }
3689 }
3690 tcg_temp_free_i32(tmp);
3691 tcg_temp_free_i32(tmp2);
3692 return 0;
3693 }
3694
3695 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3696 {
3697 TCGv rd, tmp;
3698
3699 rd = new_tmp();
3700 tmp = new_tmp();
3701
3702 tcg_gen_shli_i32(rd, t0, 8);
3703 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3704 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3705 tcg_gen_or_i32(rd, rd, tmp);
3706
3707 tcg_gen_shri_i32(t1, t1, 8);
3708 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3709 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3710 tcg_gen_or_i32(t1, t1, tmp);
3711 tcg_gen_mov_i32(t0, rd);
3712
3713 dead_tmp(tmp);
3714 dead_tmp(rd);
3715 }
3716
3717 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3718 {
3719 TCGv rd, tmp;
3720
3721 rd = new_tmp();
3722 tmp = new_tmp();
3723
3724 tcg_gen_shli_i32(rd, t0, 16);
3725 tcg_gen_andi_i32(tmp, t1, 0xffff);
3726 tcg_gen_or_i32(rd, rd, tmp);
3727 tcg_gen_shri_i32(t1, t1, 16);
3728 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3729 tcg_gen_or_i32(t1, t1, tmp);
3730 tcg_gen_mov_i32(t0, rd);
3731
3732 dead_tmp(tmp);
3733 dead_tmp(rd);
3734 }
3735
3736
3737 static struct {
3738 int nregs;
3739 int interleave;
3740 int spacing;
3741 } neon_ls_element_type[11] = {
3742 {4, 4, 1},
3743 {4, 4, 2},
3744 {4, 1, 1},
3745 {4, 2, 1},
3746 {3, 3, 1},
3747 {3, 3, 2},
3748 {3, 1, 1},
3749 {1, 1, 1},
3750 {2, 2, 1},
3751 {2, 2, 2},
3752 {2, 1, 1}
3753 };
3754
3755 /* Translate a NEON load/store element instruction. Return nonzero if the
3756 instruction is invalid. */
3757 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3758 {
3759 int rd, rn, rm;
3760 int op;
3761 int nregs;
3762 int interleave;
3763 int spacing;
3764 int stride;
3765 int size;
3766 int reg;
3767 int pass;
3768 int load;
3769 int shift;
3770 int n;
3771 TCGv addr;
3772 TCGv tmp;
3773 TCGv tmp2;
3774 TCGv_i64 tmp64;
3775
3776 if (!s->vfp_enabled)
3777 return 1;
3778 VFP_DREG_D(rd, insn);
3779 rn = (insn >> 16) & 0xf;
3780 rm = insn & 0xf;
3781 load = (insn & (1 << 21)) != 0;
3782 addr = new_tmp();
3783 if ((insn & (1 << 23)) == 0) {
3784 /* Load store all elements. */
3785 op = (insn >> 8) & 0xf;
3786 size = (insn >> 6) & 3;
3787 if (op > 10)
3788 return 1;
3789 nregs = neon_ls_element_type[op].nregs;
3790 interleave = neon_ls_element_type[op].interleave;
3791 spacing = neon_ls_element_type[op].spacing;
3792 if (size == 3 && (interleave | spacing) != 1)
3793 return 1;
3794 load_reg_var(s, addr, rn);
3795 stride = (1 << size) * interleave;
3796 for (reg = 0; reg < nregs; reg++) {
3797 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3798 load_reg_var(s, addr, rn);
3799 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3800 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3801 load_reg_var(s, addr, rn);
3802 tcg_gen_addi_i32(addr, addr, 1 << size);
3803 }
3804 if (size == 3) {
3805 if (load) {
3806 tmp64 = gen_ld64(addr, IS_USER(s));
3807 neon_store_reg64(tmp64, rd);
3808 tcg_temp_free_i64(tmp64);
3809 } else {
3810 tmp64 = tcg_temp_new_i64();
3811 neon_load_reg64(tmp64, rd);
3812 gen_st64(tmp64, addr, IS_USER(s));
3813 }
3814 tcg_gen_addi_i32(addr, addr, stride);
3815 } else {
3816 for (pass = 0; pass < 2; pass++) {
3817 if (size == 2) {
3818 if (load) {
3819 tmp = gen_ld32(addr, IS_USER(s));
3820 neon_store_reg(rd, pass, tmp);
3821 } else {
3822 tmp = neon_load_reg(rd, pass);
3823 gen_st32(tmp, addr, IS_USER(s));
3824 }
3825 tcg_gen_addi_i32(addr, addr, stride);
3826 } else if (size == 1) {
3827 if (load) {
3828 tmp = gen_ld16u(addr, IS_USER(s));
3829 tcg_gen_addi_i32(addr, addr, stride);
3830 tmp2 = gen_ld16u(addr, IS_USER(s));
3831 tcg_gen_addi_i32(addr, addr, stride);
3832 tcg_gen_shli_i32(tmp2, tmp2, 16);
3833 tcg_gen_or_i32(tmp, tmp, tmp2);
3834 dead_tmp(tmp2);
3835 neon_store_reg(rd, pass, tmp);
3836 } else {
3837 tmp = neon_load_reg(rd, pass);
3838 tmp2 = new_tmp();
3839 tcg_gen_shri_i32(tmp2, tmp, 16);
3840 gen_st16(tmp, addr, IS_USER(s));
3841 tcg_gen_addi_i32(addr, addr, stride);
3842 gen_st16(tmp2, addr, IS_USER(s));
3843 tcg_gen_addi_i32(addr, addr, stride);
3844 }
3845 } else /* size == 0 */ {
3846 if (load) {
3847 TCGV_UNUSED(tmp2);
3848 for (n = 0; n < 4; n++) {
3849 tmp = gen_ld8u(addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 if (n == 0) {
3852 tmp2 = tmp;
3853 } else {
3854 tcg_gen_shli_i32(tmp, tmp, n * 8);
3855 tcg_gen_or_i32(tmp2, tmp2, tmp);
3856 dead_tmp(tmp);
3857 }
3858 }
3859 neon_store_reg(rd, pass, tmp2);
3860 } else {
3861 tmp2 = neon_load_reg(rd, pass);
3862 for (n = 0; n < 4; n++) {
3863 tmp = new_tmp();
3864 if (n == 0) {
3865 tcg_gen_mov_i32(tmp, tmp2);
3866 } else {
3867 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3868 }
3869 gen_st8(tmp, addr, IS_USER(s));
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 }
3872 dead_tmp(tmp2);
3873 }
3874 }
3875 }
3876 }
3877 rd += spacing;
3878 }
3879 stride = nregs * 8;
3880 } else {
3881 size = (insn >> 10) & 3;
3882 if (size == 3) {
3883 /* Load single element to all lanes. */
3884 if (!load)
3885 return 1;
3886 size = (insn >> 6) & 3;
3887 nregs = ((insn >> 8) & 3) + 1;
3888 stride = (insn & (1 << 5)) ? 2 : 1;
3889 load_reg_var(s, addr, rn);
3890 for (reg = 0; reg < nregs; reg++) {
3891 switch (size) {
3892 case 0:
3893 tmp = gen_ld8u(addr, IS_USER(s));
3894 gen_neon_dup_u8(tmp, 0);
3895 break;
3896 case 1:
3897 tmp = gen_ld16u(addr, IS_USER(s));
3898 gen_neon_dup_low16(tmp);
3899 break;
3900 case 2:
3901 tmp = gen_ld32(addr, IS_USER(s));
3902 break;
3903 case 3:
3904 return 1;
3905 default: /* Avoid compiler warnings. */
3906 abort();
3907 }
3908 tcg_gen_addi_i32(addr, addr, 1 << size);
3909 tmp2 = new_tmp();
3910 tcg_gen_mov_i32(tmp2, tmp);
3911 neon_store_reg(rd, 0, tmp2);
3912 neon_store_reg(rd, 1, tmp);
3913 rd += stride;
3914 }
3915 stride = (1 << size) * nregs;
3916 } else {
3917 /* Single element. */
3918 pass = (insn >> 7) & 1;
3919 switch (size) {
3920 case 0:
3921 shift = ((insn >> 5) & 3) * 8;
3922 stride = 1;
3923 break;
3924 case 1:
3925 shift = ((insn >> 6) & 1) * 16;
3926 stride = (insn & (1 << 5)) ? 2 : 1;
3927 break;
3928 case 2:
3929 shift = 0;
3930 stride = (insn & (1 << 6)) ? 2 : 1;
3931 break;
3932 default:
3933 abort();
3934 }
3935 nregs = ((insn >> 8) & 3) + 1;
3936 load_reg_var(s, addr, rn);
3937 for (reg = 0; reg < nregs; reg++) {
3938 if (load) {
3939 switch (size) {
3940 case 0:
3941 tmp = gen_ld8u(addr, IS_USER(s));
3942 break;
3943 case 1:
3944 tmp = gen_ld16u(addr, IS_USER(s));
3945 break;
3946 case 2:
3947 tmp = gen_ld32(addr, IS_USER(s));
3948 break;
3949 default: /* Avoid compiler warnings. */
3950 abort();
3951 }
3952 if (size != 2) {
3953 tmp2 = neon_load_reg(rd, pass);
3954 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3955 dead_tmp(tmp2);
3956 }
3957 neon_store_reg(rd, pass, tmp);
3958 } else { /* Store */
3959 tmp = neon_load_reg(rd, pass);
3960 if (shift)
3961 tcg_gen_shri_i32(tmp, tmp, shift);
3962 switch (size) {
3963 case 0:
3964 gen_st8(tmp, addr, IS_USER(s));
3965 break;
3966 case 1:
3967 gen_st16(tmp, addr, IS_USER(s));
3968 break;
3969 case 2:
3970 gen_st32(tmp, addr, IS_USER(s));
3971 break;
3972 }
3973 }
3974 rd += stride;
3975 tcg_gen_addi_i32(addr, addr, 1 << size);
3976 }
3977 stride = nregs * (1 << size);
3978 }
3979 }
3980 dead_tmp(addr);
3981 if (rm != 15) {
3982 TCGv base;
3983
3984 base = load_reg(s, rn);
3985 if (rm == 13) {
3986 tcg_gen_addi_i32(base, base, stride);
3987 } else {
3988 TCGv index;
3989 index = load_reg(s, rm);
3990 tcg_gen_add_i32(base, base, index);
3991 dead_tmp(index);
3992 }
3993 store_reg(s, rn, base);
3994 }
3995 return 0;
3996 }
3997
3998 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3999 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4000 {
4001 tcg_gen_and_i32(t, t, c);
4002 tcg_gen_andc_i32(f, f, c);
4003 tcg_gen_or_i32(dest, t, f);
4004 }
4005
4006 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4007 {
4008 switch (size) {
4009 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4010 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4011 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4012 default: abort();
4013 }
4014 }
4015
4016 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4017 {
4018 switch (size) {
4019 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4020 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4021 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4022 default: abort();
4023 }
4024 }
4025
4026 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4027 {
4028 switch (size) {
4029 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4030 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4031 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4032 default: abort();
4033 }
4034 }
4035
4036 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4037 {
4038 switch (size) {
4039 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4040 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4041 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4042 default: abort();
4043 }
4044 }
4045
4046 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4047 int q, int u)
4048 {
4049 if (q) {
4050 if (u) {
4051 switch (size) {
4052 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4053 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4054 default: abort();
4055 }
4056 } else {
4057 switch (size) {
4058 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4059 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4060 default: abort();
4061 }
4062 }
4063 } else {
4064 if (u) {
4065 switch (size) {
4066 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4067 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4068 default: abort();
4069 }
4070 } else {
4071 switch (size) {
4072 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4073 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4074 default: abort();
4075 }
4076 }
4077 }
4078 }
4079
4080 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4081 {
4082 if (u) {
4083 switch (size) {
4084 case 0: gen_helper_neon_widen_u8(dest, src); break;
4085 case 1: gen_helper_neon_widen_u16(dest, src); break;
4086 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4087 default: abort();
4088 }
4089 } else {
4090 switch (size) {
4091 case 0: gen_helper_neon_widen_s8(dest, src); break;
4092 case 1: gen_helper_neon_widen_s16(dest, src); break;
4093 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4094 default: abort();
4095 }
4096 }
4097 dead_tmp(src);
4098 }
4099
4100 static inline void gen_neon_addl(int size)
4101 {
4102 switch (size) {
4103 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4104 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4105 case 2: tcg_gen_add_i64(CPU_V001); break;
4106 default: abort();
4107 }
4108 }
4109
4110 static inline void gen_neon_subl(int size)
4111 {
4112 switch (size) {
4113 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4114 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4115 case 2: tcg_gen_sub_i64(CPU_V001); break;
4116 default: abort();
4117 }
4118 }
4119
4120 static inline void gen_neon_negl(TCGv_i64 var, int size)
4121 {
4122 switch (size) {
4123 case 0: gen_helper_neon_negl_u16(var, var); break;
4124 case 1: gen_helper_neon_negl_u32(var, var); break;
4125 case 2: gen_helper_neon_negl_u64(var, var); break;
4126 default: abort();
4127 }
4128 }
4129
4130 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4131 {
4132 switch (size) {
4133 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4134 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4135 default: abort();
4136 }
4137 }
4138
4139 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4140 {
4141 TCGv_i64 tmp;
4142
4143 switch ((size << 1) | u) {
4144 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4145 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4146 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4147 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4148 case 4:
4149 tmp = gen_muls_i64_i32(a, b);
4150 tcg_gen_mov_i64(dest, tmp);
4151 break;
4152 case 5:
4153 tmp = gen_mulu_i64_i32(a, b);
4154 tcg_gen_mov_i64(dest, tmp);
4155 break;
4156 default: abort();
4157 }
4158
4159 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4160 Don't forget to clean them now. */
4161 if (size < 2) {
4162 dead_tmp(a);
4163 dead_tmp(b);
4164 }
4165 }
4166
4167 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4168 {
4169 if (op) {
4170 if (u) {
4171 gen_neon_unarrow_sats(size, dest, src);
4172 } else {
4173 gen_neon_narrow(size, dest, src);
4174 }
4175 } else {
4176 if (u) {
4177 gen_neon_narrow_satu(size, dest, src);
4178 } else {
4179 gen_neon_narrow_sats(size, dest, src);
4180 }
4181 }
4182 }
4183
4184 /* Translate a NEON data processing instruction. Return nonzero if the
4185 instruction is invalid.
4186 We process data in a mixture of 32-bit and 64-bit chunks.
4187 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4188
4189 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4190 {
4191 int op;
4192 int q;
4193 int rd, rn, rm;
4194 int size;
4195 int shift;
4196 int pass;
4197 int count;
4198 int pairwise;
4199 int u;
4200 int n;
4201 uint32_t imm, mask;
4202 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4203 TCGv_i64 tmp64;
4204
4205 if (!s->vfp_enabled)
4206 return 1;
4207 q = (insn & (1 << 6)) != 0;
4208 u = (insn >> 24) & 1;
4209 VFP_DREG_D(rd, insn);
4210 VFP_DREG_N(rn, insn);
4211 VFP_DREG_M(rm, insn);
4212 size = (insn >> 20) & 3;
4213 if ((insn & (1 << 23)) == 0) {
4214 /* Three register same length. */
4215 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4216 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4217 || op == 10 || op == 11 || op == 16)) {
4218 /* 64-bit element instructions. */
4219 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4220 neon_load_reg64(cpu_V0, rn + pass);
4221 neon_load_reg64(cpu_V1, rm + pass);
4222 switch (op) {
4223 case 1: /* VQADD */
4224 if (u) {
4225 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4226 cpu_V0, cpu_V1);
4227 } else {
4228 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4229 cpu_V0, cpu_V1);
4230 }
4231 break;
4232 case 5: /* VQSUB */
4233 if (u) {
4234 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4235 cpu_V0, cpu_V1);
4236 } else {
4237 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4238 cpu_V0, cpu_V1);
4239 }
4240 break;
4241 case 8: /* VSHL */
4242 if (u) {
4243 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4244 } else {
4245 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4246 }
4247 break;
4248 case 9: /* VQSHL */
4249 if (u) {
4250 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4251 cpu_V1, cpu_V0);
4252 } else {
4253 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4254 cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 10: /* VRSHL */
4258 if (u) {
4259 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4260 } else {
4261 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4262 }
4263 break;
4264 case 11: /* VQRSHL */
4265 if (u) {
4266 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4267 cpu_V1, cpu_V0);
4268 } else {
4269 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4270 cpu_V1, cpu_V0);
4271 }
4272 break;
4273 case 16:
4274 if (u) {
4275 tcg_gen_sub_i64(CPU_V001);
4276 } else {
4277 tcg_gen_add_i64(CPU_V001);
4278 }
4279 break;
4280 default:
4281 abort();
4282 }
4283 neon_store_reg64(cpu_V0, rd + pass);
4284 }
4285 return 0;
4286 }
4287 switch (op) {
4288 case 8: /* VSHL */
4289 case 9: /* VQSHL */
4290 case 10: /* VRSHL */
4291 case 11: /* VQRSHL */
4292 {
4293 int rtmp;
4294 /* Shift instruction operands are reversed. */
4295 rtmp = rn;
4296 rn = rm;
4297 rm = rtmp;
4298 pairwise = 0;
4299 }
4300 break;
4301 case 20: /* VPMAX */
4302 case 21: /* VPMIN */
4303 case 23: /* VPADD */
4304 pairwise = 1;
4305 break;
4306 case 26: /* VPADD (float) */
4307 pairwise = (u && size < 2);
4308 break;
4309 case 30: /* VPMIN/VPMAX (float) */
4310 pairwise = u;
4311 break;
4312 default:
4313 pairwise = 0;
4314 break;
4315 }
4316
4317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4318
4319 if (pairwise) {
4320 /* Pairwise. */
4321 if (q)
4322 n = (pass & 1) * 2;
4323 else
4324 n = 0;
4325 if (pass < q + 1) {
4326 tmp = neon_load_reg(rn, n);
4327 tmp2 = neon_load_reg(rn, n + 1);
4328 } else {
4329 tmp = neon_load_reg(rm, n);
4330 tmp2 = neon_load_reg(rm, n + 1);
4331 }
4332 } else {
4333 /* Elementwise. */
4334 tmp = neon_load_reg(rn, pass);
4335 tmp2 = neon_load_reg(rm, pass);
4336 }
4337 switch (op) {
4338 case 0: /* VHADD */
4339 GEN_NEON_INTEGER_OP(hadd);
4340 break;
4341 case 1: /* VQADD */
4342 GEN_NEON_INTEGER_OP_ENV(qadd);
4343 break;
4344 case 2: /* VRHADD */
4345 GEN_NEON_INTEGER_OP(rhadd);
4346 break;
4347 case 3: /* Logic ops. */
4348 switch ((u << 2) | size) {
4349 case 0: /* VAND */
4350 tcg_gen_and_i32(tmp, tmp, tmp2);
4351 break;
4352 case 1: /* BIC */
4353 tcg_gen_andc_i32(tmp, tmp, tmp2);
4354 break;
4355 case 2: /* VORR */
4356 tcg_gen_or_i32(tmp, tmp, tmp2);
4357 break;
4358 case 3: /* VORN */
4359 tcg_gen_orc_i32(tmp, tmp, tmp2);
4360 break;
4361 case 4: /* VEOR */
4362 tcg_gen_xor_i32(tmp, tmp, tmp2);
4363 break;
4364 case 5: /* VBSL */
4365 tmp3 = neon_load_reg(rd, pass);
4366 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4367 dead_tmp(tmp3);
4368 break;
4369 case 6: /* VBIT */
4370 tmp3 = neon_load_reg(rd, pass);
4371 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4372 dead_tmp(tmp3);
4373 break;
4374 case 7: /* VBIF */
4375 tmp3 = neon_load_reg(rd, pass);
4376 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4377 dead_tmp(tmp3);
4378 break;
4379 }
4380 break;
4381 case 4: /* VHSUB */
4382 GEN_NEON_INTEGER_OP(hsub);
4383 break;
4384 case 5: /* VQSUB */
4385 GEN_NEON_INTEGER_OP_ENV(qsub);
4386 break;
4387 case 6: /* VCGT */
4388 GEN_NEON_INTEGER_OP(cgt);
4389 break;
4390 case 7: /* VCGE */
4391 GEN_NEON_INTEGER_OP(cge);
4392 break;
4393 case 8: /* VSHL */
4394 GEN_NEON_INTEGER_OP(shl);
4395 break;
4396 case 9: /* VQSHL */
4397 GEN_NEON_INTEGER_OP_ENV(qshl);
4398 break;
4399 case 10: /* VRSHL */
4400 GEN_NEON_INTEGER_OP(rshl);
4401 break;
4402 case 11: /* VQRSHL */
4403 GEN_NEON_INTEGER_OP_ENV(qrshl);
4404 break;
4405 case 12: /* VMAX */
4406 GEN_NEON_INTEGER_OP(max);
4407 break;
4408 case 13: /* VMIN */
4409 GEN_NEON_INTEGER_OP(min);
4410 break;
4411 case 14: /* VABD */
4412 GEN_NEON_INTEGER_OP(abd);
4413 break;
4414 case 15: /* VABA */
4415 GEN_NEON_INTEGER_OP(abd);
4416 dead_tmp(tmp2);
4417 tmp2 = neon_load_reg(rd, pass);
4418 gen_neon_add(size, tmp, tmp2);
4419 break;
4420 case 16:
4421 if (!u) { /* VADD */
4422 if (gen_neon_add(size, tmp, tmp2))
4423 return 1;
4424 } else { /* VSUB */
4425 switch (size) {
4426 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4427 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4428 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4429 default: return 1;
4430 }
4431 }
4432 break;
4433 case 17:
4434 if (!u) { /* VTST */
4435 switch (size) {
4436 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4437 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4438 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4439 default: return 1;
4440 }
4441 } else { /* VCEQ */
4442 switch (size) {
4443 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4444 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4445 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4446 default: return 1;
4447 }
4448 }
4449 break;
4450 case 18: /* Multiply. */
4451 switch (size) {
4452 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4453 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4454 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4455 default: return 1;
4456 }
4457 dead_tmp(tmp2);
4458 tmp2 = neon_load_reg(rd, pass);
4459 if (u) { /* VMLS */
4460 gen_neon_rsb(size, tmp, tmp2);
4461 } else { /* VMLA */
4462 gen_neon_add(size, tmp, tmp2);
4463 }
4464 break;
4465 case 19: /* VMUL */
4466 if (u) { /* polynomial */
4467 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4468 } else { /* Integer */
4469 switch (size) {
4470 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4471 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4472 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4473 default: return 1;
4474 }
4475 }
4476 break;
4477 case 20: /* VPMAX */
4478 GEN_NEON_INTEGER_OP(pmax);
4479 break;
4480 case 21: /* VPMIN */
4481 GEN_NEON_INTEGER_OP(pmin);
4482 break;
4483 case 22: /* Hultiply high. */
4484 if (!u) { /* VQDMULH */
4485 switch (size) {
4486 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4487 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4488 default: return 1;
4489 }
4490 } else { /* VQRDHMUL */
4491 switch (size) {
4492 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4493 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4494 default: return 1;
4495 }
4496 }
4497 break;
4498 case 23: /* VPADD */
4499 if (u)
4500 return 1;
4501 switch (size) {
4502 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4503 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4504 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4505 default: return 1;
4506 }
4507 break;
4508 case 26: /* Floating point arithnetic. */
4509 switch ((u << 2) | size) {
4510 case 0: /* VADD */
4511 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4512 break;
4513 case 2: /* VSUB */
4514 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4515 break;
4516 case 4: /* VPADD */
4517 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4518 break;
4519 case 6: /* VABD */
4520 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4521 break;
4522 default:
4523 return 1;
4524 }
4525 break;
4526 case 27: /* Float multiply. */
4527 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4528 if (!u) {
4529 dead_tmp(tmp2);
4530 tmp2 = neon_load_reg(rd, pass);
4531 if (size == 0) {
4532 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4533 } else {
4534 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4535 }
4536 }
4537 break;
4538 case 28: /* Float compare. */
4539 if (!u) {
4540 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4541 } else {
4542 if (size == 0)
4543 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4544 else
4545 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4546 }
4547 break;
4548 case 29: /* Float compare absolute. */
4549 if (!u)
4550 return 1;
4551 if (size == 0)
4552 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4553 else
4554 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4555 break;
4556 case 30: /* Float min/max. */
4557 if (size == 0)
4558 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4559 else
4560 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4561 break;
4562 case 31:
4563 if (size == 0)
4564 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4565 else
4566 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4567 break;
4568 default:
4569 abort();
4570 }
4571 dead_tmp(tmp2);
4572
4573 /* Save the result. For elementwise operations we can put it
4574 straight into the destination register. For pairwise operations
4575 we have to be careful to avoid clobbering the source operands. */
4576 if (pairwise && rd == rm) {
4577 neon_store_scratch(pass, tmp);
4578 } else {
4579 neon_store_reg(rd, pass, tmp);
4580 }
4581
4582 } /* for pass */
4583 if (pairwise && rd == rm) {
4584 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4585 tmp = neon_load_scratch(pass);
4586 neon_store_reg(rd, pass, tmp);
4587 }
4588 }
4589 /* End of 3 register same size operations. */
4590 } else if (insn & (1 << 4)) {
4591 if ((insn & 0x00380080) != 0) {
4592 /* Two registers and shift. */
4593 op = (insn >> 8) & 0xf;
4594 if (insn & (1 << 7)) {
4595 /* 64-bit shift. */
4596 size = 3;
4597 } else {
4598 size = 2;
4599 while ((insn & (1 << (size + 19))) == 0)
4600 size--;
4601 }
4602 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4603 /* To avoid excessive dumplication of ops we implement shift
4604 by immediate using the variable shift operations. */
4605 if (op < 8) {
4606 /* Shift by immediate:
4607 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4608 /* Right shifts are encoded as N - shift, where N is the
4609 element size in bits. */
4610 if (op <= 4)
4611 shift = shift - (1 << (size + 3));
4612 if (size == 3) {
4613 count = q + 1;
4614 } else {
4615 count = q ? 4: 2;
4616 }
4617 switch (size) {
4618 case 0:
4619 imm = (uint8_t) shift;
4620 imm |= imm << 8;
4621 imm |= imm << 16;
4622 break;
4623 case 1:
4624 imm = (uint16_t) shift;
4625 imm |= imm << 16;
4626 break;
4627 case 2:
4628 case 3:
4629 imm = shift;
4630 break;
4631 default:
4632 abort();
4633 }
4634
4635 for (pass = 0; pass < count; pass++) {
4636 if (size == 3) {
4637 neon_load_reg64(cpu_V0, rm + pass);
4638 tcg_gen_movi_i64(cpu_V1, imm);
4639 switch (op) {
4640 case 0: /* VSHR */
4641 case 1: /* VSRA */
4642 if (u)
4643 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4644 else
4645 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 2: /* VRSHR */
4648 case 3: /* VRSRA */
4649 if (u)
4650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 else
4652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4653 break;
4654 case 4: /* VSRI */
4655 if (!u)
4656 return 1;
4657 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4658 break;
4659 case 5: /* VSHL, VSLI */
4660 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4661 break;
4662 case 6: /* VQSHLU */
4663 if (u) {
4664 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4665 cpu_V0, cpu_V1);
4666 } else {
4667 return 1;
4668 }
4669 break;
4670 case 7: /* VQSHL */
4671 if (u) {
4672 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4673 cpu_V0, cpu_V1);
4674 } else {
4675 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4676 cpu_V0, cpu_V1);
4677 }
4678 break;
4679 }
4680 if (op == 1 || op == 3) {
4681 /* Accumulate. */
4682 neon_load_reg64(cpu_V1, rd + pass);
4683 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4684 } else if (op == 4 || (op == 5 && u)) {
4685 /* Insert */
4686 neon_load_reg64(cpu_V1, rd + pass);
4687 uint64_t mask;
4688 if (shift < -63 || shift > 63) {
4689 mask = 0;
4690 } else {
4691 if (op == 4) {
4692 mask = 0xffffffffffffffffull >> -shift;
4693 } else {
4694 mask = 0xffffffffffffffffull << shift;
4695 }
4696 }
4697 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4698 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4699 }
4700 neon_store_reg64(cpu_V0, rd + pass);
4701 } else { /* size < 3 */
4702 /* Operands in T0 and T1. */
4703 tmp = neon_load_reg(rm, pass);
4704 tmp2 = new_tmp();
4705 tcg_gen_movi_i32(tmp2, imm);
4706 switch (op) {
4707 case 0: /* VSHR */
4708 case 1: /* VSRA */
4709 GEN_NEON_INTEGER_OP(shl);
4710 break;
4711 case 2: /* VRSHR */
4712 case 3: /* VRSRA */
4713 GEN_NEON_INTEGER_OP(rshl);
4714 break;
4715 case 4: /* VSRI */
4716 if (!u)
4717 return 1;
4718 GEN_NEON_INTEGER_OP(shl);
4719 break;
4720 case 5: /* VSHL, VSLI */
4721 switch (size) {
4722 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4723 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4724 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4725 default: return 1;
4726 }
4727 break;
4728 case 6: /* VQSHLU */
4729 if (!u) {
4730 return 1;
4731 }
4732 switch (size) {
4733 case 0:
4734 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4735 tmp, tmp2);
4736 break;
4737 case 1:
4738 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4739 tmp, tmp2);
4740 break;
4741 case 2:
4742 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4743 tmp, tmp2);
4744 break;
4745 default:
4746 return 1;
4747 }
4748 break;
4749 case 7: /* VQSHL */
4750 GEN_NEON_INTEGER_OP_ENV(qshl);
4751 break;
4752 }
4753 dead_tmp(tmp2);
4754
4755 if (op == 1 || op == 3) {
4756 /* Accumulate. */
4757 tmp2 = neon_load_reg(rd, pass);
4758 gen_neon_add(size, tmp, tmp2);
4759 dead_tmp(tmp2);
4760 } else if (op == 4 || (op == 5 && u)) {
4761 /* Insert */
4762 switch (size) {
4763 case 0:
4764 if (op == 4)
4765 mask = 0xff >> -shift;
4766 else
4767 mask = (uint8_t)(0xff << shift);
4768 mask |= mask << 8;
4769 mask |= mask << 16;
4770 break;
4771 case 1:
4772 if (op == 4)
4773 mask = 0xffff >> -shift;
4774 else
4775 mask = (uint16_t)(0xffff << shift);
4776 mask |= mask << 16;
4777 break;
4778 case 2:
4779 if (shift < -31 || shift > 31) {
4780 mask = 0;
4781 } else {
4782 if (op == 4)
4783 mask = 0xffffffffu >> -shift;
4784 else
4785 mask = 0xffffffffu << shift;
4786 }
4787 break;
4788 default:
4789 abort();
4790 }
4791 tmp2 = neon_load_reg(rd, pass);
4792 tcg_gen_andi_i32(tmp, tmp, mask);
4793 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4794 tcg_gen_or_i32(tmp, tmp, tmp2);
4795 dead_tmp(tmp2);
4796 }
4797 neon_store_reg(rd, pass, tmp);
4798 }
4799 } /* for pass */
4800 } else if (op < 10) {
4801 /* Shift by immediate and narrow:
4802 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4803 int input_unsigned = (op == 8) ? !u : u;
4804
4805 shift = shift - (1 << (size + 3));
4806 size++;
4807 if (size == 3) {
4808 tmp64 = tcg_const_i64(shift);
4809 neon_load_reg64(cpu_V0, rm);
4810 neon_load_reg64(cpu_V1, rm + 1);
4811 for (pass = 0; pass < 2; pass++) {
4812 TCGv_i64 in;
4813 if (pass == 0) {
4814 in = cpu_V0;
4815 } else {
4816 in = cpu_V1;
4817 }
4818 if (q) {
4819 if (input_unsigned) {
4820 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4821 } else {
4822 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4823 }
4824 } else {
4825 if (input_unsigned) {
4826 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4827 } else {
4828 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4829 }
4830 }
4831 tmp = new_tmp();
4832 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4833 neon_store_reg(rd, pass, tmp);
4834 } /* for pass */
4835 tcg_temp_free_i64(tmp64);
4836 } else {
4837 if (size == 1) {
4838 imm = (uint16_t)shift;
4839 imm |= imm << 16;
4840 } else {
4841 /* size == 2 */
4842 imm = (uint32_t)shift;
4843 }
4844 tmp2 = tcg_const_i32(imm);
4845 tmp4 = neon_load_reg(rm + 1, 0);
4846 tmp5 = neon_load_reg(rm + 1, 1);
4847 for (pass = 0; pass < 2; pass++) {
4848 if (pass == 0) {
4849 tmp = neon_load_reg(rm, 0);
4850 } else {
4851 tmp = tmp4;
4852 }
4853 gen_neon_shift_narrow(size, tmp, tmp2, q,
4854 input_unsigned);
4855 if (pass == 0) {
4856 tmp3 = neon_load_reg(rm, 1);
4857 } else {
4858 tmp3 = tmp5;
4859 }
4860 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4861 input_unsigned);
4862 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4863 dead_tmp(tmp);
4864 dead_tmp(tmp3);
4865 tmp = new_tmp();
4866 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4867 neon_store_reg(rd, pass, tmp);
4868 } /* for pass */
4869 tcg_temp_free_i32(tmp2);
4870 }
4871 } else if (op == 10) {
4872 /* VSHLL */
4873 if (q || size == 3)
4874 return 1;
4875 tmp = neon_load_reg(rm, 0);
4876 tmp2 = neon_load_reg(rm, 1);
4877 for (pass = 0; pass < 2; pass++) {
4878 if (pass == 1)
4879 tmp = tmp2;
4880
4881 gen_neon_widen(cpu_V0, tmp, size, u);
4882
4883 if (shift != 0) {
4884 /* The shift is less than the width of the source
4885 type, so we can just shift the whole register. */
4886 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4887 /* Widen the result of shift: we need to clear
4888 * the potential overflow bits resulting from
4889 * left bits of the narrow input appearing as
4890 * right bits of left the neighbour narrow
4891 * input. */
4892 if (size < 2 || !u) {
4893 uint64_t imm64;
4894 if (size == 0) {
4895 imm = (0xffu >> (8 - shift));
4896 imm |= imm << 16;
4897 } else if (size == 1) {
4898 imm = 0xffff >> (16 - shift);
4899 } else {
4900 /* size == 2 */
4901 imm = 0xffffffff >> (32 - shift);
4902 }
4903 if (size < 2) {
4904 imm64 = imm | (((uint64_t)imm) << 32);
4905 } else {
4906 imm64 = imm;
4907 }
4908 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4909 }
4910 }
4911 neon_store_reg64(cpu_V0, rd + pass);
4912 }
4913 } else if (op >= 14) {
4914 /* VCVT fixed-point. */
4915 /* We have already masked out the must-be-1 top bit of imm6,
4916 * hence this 32-shift where the ARM ARM has 64-imm6.
4917 */
4918 shift = 32 - shift;
4919 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4920 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4921 if (!(op & 1)) {
4922 if (u)
4923 gen_vfp_ulto(0, shift);
4924 else
4925 gen_vfp_slto(0, shift);
4926 } else {
4927 if (u)
4928 gen_vfp_toul(0, shift);
4929 else
4930 gen_vfp_tosl(0, shift);
4931 }
4932 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4933 }
4934 } else {
4935 return 1;
4936 }
4937 } else { /* (insn & 0x00380080) == 0 */
4938 int invert;
4939
4940 op = (insn >> 8) & 0xf;
4941 /* One register and immediate. */
4942 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4943 invert = (insn & (1 << 5)) != 0;
4944 switch (op) {
4945 case 0: case 1:
4946 /* no-op */
4947 break;
4948 case 2: case 3:
4949 imm <<= 8;
4950 break;
4951 case 4: case 5:
4952 imm <<= 16;
4953 break;
4954 case 6: case 7:
4955 imm <<= 24;
4956 break;
4957 case 8: case 9:
4958 imm |= imm << 16;
4959 break;
4960 case 10: case 11:
4961 imm = (imm << 8) | (imm << 24);
4962 break;
4963 case 12:
4964 imm = (imm << 8) | 0xff;
4965 break;
4966 case 13:
4967 imm = (imm << 16) | 0xffff;
4968 break;
4969 case 14:
4970 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4971 if (invert)
4972 imm = ~imm;
4973 break;
4974 case 15:
4975 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4976 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4977 break;
4978 }
4979 if (invert)
4980 imm = ~imm;
4981
4982 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4983 if (op & 1 && op < 12) {
4984 tmp = neon_load_reg(rd, pass);
4985 if (invert) {
4986 /* The immediate value has already been inverted, so
4987 BIC becomes AND. */
4988 tcg_gen_andi_i32(tmp, tmp, imm);
4989 } else {
4990 tcg_gen_ori_i32(tmp, tmp, imm);
4991 }
4992 } else {
4993 /* VMOV, VMVN. */
4994 tmp = new_tmp();
4995 if (op == 14 && invert) {
4996 uint32_t val;
4997 val = 0;
4998 for (n = 0; n < 4; n++) {
4999 if (imm & (1 << (n + (pass & 1) * 4)))
5000 val |= 0xff << (n * 8);
5001 }
5002 tcg_gen_movi_i32(tmp, val);
5003 } else {
5004 tcg_gen_movi_i32(tmp, imm);
5005 }
5006 }
5007 neon_store_reg(rd, pass, tmp);
5008 }
5009 }
5010 } else { /* (insn & 0x00800010 == 0x00800000) */
5011 if (size != 3) {
5012 op = (insn >> 8) & 0xf;
5013 if ((insn & (1 << 6)) == 0) {
5014 /* Three registers of different lengths. */
5015 int src1_wide;
5016 int src2_wide;
5017 int prewiden;
5018 /* prewiden, src1_wide, src2_wide */
5019 static const int neon_3reg_wide[16][3] = {
5020 {1, 0, 0}, /* VADDL */
5021 {1, 1, 0}, /* VADDW */
5022 {1, 0, 0}, /* VSUBL */
5023 {1, 1, 0}, /* VSUBW */
5024 {0, 1, 1}, /* VADDHN */
5025 {0, 0, 0}, /* VABAL */
5026 {0, 1, 1}, /* VSUBHN */
5027 {0, 0, 0}, /* VABDL */
5028 {0, 0, 0}, /* VMLAL */
5029 {0, 0, 0}, /* VQDMLAL */
5030 {0, 0, 0}, /* VMLSL */
5031 {0, 0, 0}, /* VQDMLSL */
5032 {0, 0, 0}, /* Integer VMULL */
5033 {0, 0, 0}, /* VQDMULL */
5034 {0, 0, 0} /* Polynomial VMULL */
5035 };
5036
5037 prewiden = neon_3reg_wide[op][0];
5038 src1_wide = neon_3reg_wide[op][1];
5039 src2_wide = neon_3reg_wide[op][2];
5040
5041 if (size == 0 && (op == 9 || op == 11 || op == 13))
5042 return 1;
5043
5044 /* Avoid overlapping operands. Wide source operands are
5045 always aligned so will never overlap with wide
5046 destinations in problematic ways. */
5047 if (rd == rm && !src2_wide) {
5048 tmp = neon_load_reg(rm, 1);
5049 neon_store_scratch(2, tmp);
5050 } else if (rd == rn && !src1_wide) {
5051 tmp = neon_load_reg(rn, 1);
5052 neon_store_scratch(2, tmp);
5053 }
5054 TCGV_UNUSED(tmp3);
5055 for (pass = 0; pass < 2; pass++) {
5056 if (src1_wide) {
5057 neon_load_reg64(cpu_V0, rn + pass);
5058 TCGV_UNUSED(tmp);
5059 } else {
5060 if (pass == 1 && rd == rn) {
5061 tmp = neon_load_scratch(2);
5062 } else {
5063 tmp = neon_load_reg(rn, pass);
5064 }
5065 if (prewiden) {
5066 gen_neon_widen(cpu_V0, tmp, size, u);
5067 }
5068 }
5069 if (src2_wide) {
5070 neon_load_reg64(cpu_V1, rm + pass);
5071 TCGV_UNUSED(tmp2);
5072 } else {
5073 if (pass == 1 && rd == rm) {
5074 tmp2 = neon_load_scratch(2);
5075 } else {
5076 tmp2 = neon_load_reg(rm, pass);
5077 }
5078 if (prewiden) {
5079 gen_neon_widen(cpu_V1, tmp2, size, u);
5080 }
5081 }
5082 switch (op) {
5083 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5084 gen_neon_addl(size);
5085 break;
5086 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5087 gen_neon_subl(size);
5088 break;
5089 case 5: case 7: /* VABAL, VABDL */
5090 switch ((size << 1) | u) {
5091 case 0:
5092 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5093 break;
5094 case 1:
5095 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5096 break;
5097 case 2:
5098 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5099 break;
5100 case 3:
5101 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5102 break;
5103 case 4:
5104 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5105 break;
5106 case 5:
5107 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5108 break;
5109 default: abort();
5110 }
5111 dead_tmp(tmp2);
5112 dead_tmp(tmp);
5113 break;
5114 case 8: case 9: case 10: case 11: case 12: case 13:
5115 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5116 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5117 break;
5118 case 14: /* Polynomial VMULL */
5119 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5120 dead_tmp(tmp2);
5121 dead_tmp(tmp);
5122 break;
5123 default: /* 15 is RESERVED. */
5124 return 1;
5125 }
5126 if (op == 13) {
5127 /* VQDMULL */
5128 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5129 neon_store_reg64(cpu_V0, rd + pass);
5130 } else if (op == 5 || (op >= 8 && op <= 11)) {
5131 /* Accumulate. */
5132 neon_load_reg64(cpu_V1, rd + pass);
5133 switch (op) {
5134 case 10: /* VMLSL */
5135 gen_neon_negl(cpu_V0, size);
5136 /* Fall through */
5137 case 5: case 8: /* VABAL, VMLAL */
5138 gen_neon_addl(size);
5139 break;
5140 case 9: case 11: /* VQDMLAL, VQDMLSL */
5141 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5142 if (op == 11) {
5143 gen_neon_negl(cpu_V0, size);
5144 }
5145 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5146 break;
5147 default:
5148 abort();
5149 }
5150 neon_store_reg64(cpu_V0, rd + pass);
5151 } else if (op == 4 || op == 6) {
5152 /* Narrowing operation. */
5153 tmp = new_tmp();
5154 if (!u) {
5155 switch (size) {
5156 case 0:
5157 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5158 break;
5159 case 1:
5160 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5161 break;
5162 case 2:
5163 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5164 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5165 break;
5166 default: abort();
5167 }
5168 } else {
5169 switch (size) {
5170 case 0:
5171 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5172 break;
5173 case 1:
5174 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5175 break;
5176 case 2:
5177 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5178 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5179 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5180 break;
5181 default: abort();
5182 }
5183 }
5184 if (pass == 0) {
5185 tmp3 = tmp;
5186 } else {
5187 neon_store_reg(rd, 0, tmp3);
5188 neon_store_reg(rd, 1, tmp);
5189 }
5190 } else {
5191 /* Write back the result. */
5192 neon_store_reg64(cpu_V0, rd + pass);
5193 }
5194 }
5195 } else {
5196 /* Two registers and a scalar. */
5197 switch (op) {
5198 case 0: /* Integer VMLA scalar */
5199 case 1: /* Float VMLA scalar */
5200 case 4: /* Integer VMLS scalar */
5201 case 5: /* Floating point VMLS scalar */
5202 case 8: /* Integer VMUL scalar */
5203 case 9: /* Floating point VMUL scalar */
5204 case 12: /* VQDMULH scalar */
5205 case 13: /* VQRDMULH scalar */
5206 tmp = neon_get_scalar(size, rm);
5207 neon_store_scratch(0, tmp);
5208 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5209 tmp = neon_load_scratch(0);
5210 tmp2 = neon_load_reg(rn, pass);
5211 if (op == 12) {
5212 if (size == 1) {
5213 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5214 } else {
5215 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5216 }
5217 } else if (op == 13) {
5218 if (size == 1) {
5219 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5220 } else {
5221 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5222 }
5223 } else if (op & 1) {
5224 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5225 } else {
5226 switch (size) {
5227 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5228 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5229 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5230 default: return 1;
5231 }
5232 }
5233 dead_tmp(tmp2);
5234 if (op < 8) {
5235 /* Accumulate. */
5236 tmp2 = neon_load_reg(rd, pass);
5237 switch (op) {
5238 case 0:
5239 gen_neon_add(size, tmp, tmp2);
5240 break;
5241 case 1:
5242 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5243 break;
5244 case 4:
5245 gen_neon_rsb(size, tmp, tmp2);
5246 break;
5247 case 5:
5248 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5249 break;
5250 default:
5251 abort();
5252 }
5253 dead_tmp(tmp2);
5254 }
5255 neon_store_reg(rd, pass, tmp);
5256 }
5257 break;
5258 case 2: /* VMLAL sclar */
5259 case 3: /* VQDMLAL scalar */
5260 case 6: /* VMLSL scalar */
5261 case 7: /* VQDMLSL scalar */
5262 case 10: /* VMULL scalar */
5263 case 11: /* VQDMULL scalar */
5264 if (size == 0 && (op == 3 || op == 7 || op == 11))
5265 return 1;
5266
5267 tmp2 = neon_get_scalar(size, rm);
5268 /* We need a copy of tmp2 because gen_neon_mull
5269 * deletes it during pass 0. */
5270 tmp4 = new_tmp();
5271 tcg_gen_mov_i32(tmp4, tmp2);
5272 tmp3 = neon_load_reg(rn, 1);
5273
5274 for (pass = 0; pass < 2; pass++) {
5275 if (pass == 0) {
5276 tmp = neon_load_reg(rn, 0);
5277 } else {
5278 tmp = tmp3;
5279 tmp2 = tmp4;
5280 }
5281 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5282 if (op != 11) {
5283 neon_load_reg64(cpu_V1, rd + pass);
5284 }
5285 switch (op) {
5286 case 6:
5287 gen_neon_negl(cpu_V0, size);
5288 /* Fall through */
5289 case 2:
5290 gen_neon_addl(size);
5291 break;
5292 case 3: case 7:
5293 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5294 if (op == 7) {
5295 gen_neon_negl(cpu_V0, size);
5296 }
5297 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5298 break;
5299 case 10:
5300 /* no-op */
5301 break;
5302 case 11:
5303 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5304 break;
5305 default:
5306 abort();
5307 }
5308 neon_store_reg64(cpu_V0, rd + pass);
5309 }
5310
5311
5312 break;
5313 default: /* 14 and 15 are RESERVED */
5314 return 1;
5315 }
5316 }
5317 } else { /* size == 3 */
5318 if (!u) {
5319 /* Extract. */
5320 imm = (insn >> 8) & 0xf;
5321
5322 if (imm > 7 && !q)
5323 return 1;
5324
5325 if (imm == 0) {
5326 neon_load_reg64(cpu_V0, rn);
5327 if (q) {
5328 neon_load_reg64(cpu_V1, rn + 1);
5329 }
5330 } else if (imm == 8) {
5331 neon_load_reg64(cpu_V0, rn + 1);
5332 if (q) {
5333 neon_load_reg64(cpu_V1, rm);
5334 }
5335 } else if (q) {
5336 tmp64 = tcg_temp_new_i64();
5337 if (imm < 8) {
5338 neon_load_reg64(cpu_V0, rn);
5339 neon_load_reg64(tmp64, rn + 1);
5340 } else {
5341 neon_load_reg64(cpu_V0, rn + 1);
5342 neon_load_reg64(tmp64, rm);
5343 }
5344 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5345 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5346 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5347 if (imm < 8) {
5348 neon_load_reg64(cpu_V1, rm);
5349 } else {
5350 neon_load_reg64(cpu_V1, rm + 1);
5351 imm -= 8;
5352 }
5353 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5354 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5355 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5356 tcg_temp_free_i64(tmp64);
5357 } else {
5358 /* BUGFIX */
5359 neon_load_reg64(cpu_V0, rn);
5360 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5361 neon_load_reg64(cpu_V1, rm);
5362 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5363 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5364 }
5365 neon_store_reg64(cpu_V0, rd);
5366 if (q) {
5367 neon_store_reg64(cpu_V1, rd + 1);
5368 }
5369 } else if ((insn & (1 << 11)) == 0) {
5370 /* Two register misc. */
5371 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5372 size = (insn >> 18) & 3;
5373 switch (op) {
5374 case 0: /* VREV64 */
5375 if (size == 3)
5376 return 1;
5377 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5378 tmp = neon_load_reg(rm, pass * 2);
5379 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5380 switch (size) {
5381 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5382 case 1: gen_swap_half(tmp); break;
5383 case 2: /* no-op */ break;
5384 default: abort();
5385 }
5386 neon_store_reg(rd, pass * 2 + 1, tmp);
5387 if (size == 2) {
5388 neon_store_reg(rd, pass * 2, tmp2);
5389 } else {
5390 switch (size) {
5391 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5392 case 1: gen_swap_half(tmp2); break;
5393 default: abort();
5394 }
5395 neon_store_reg(rd, pass * 2, tmp2);
5396 }
5397 }
5398 break;
5399 case 4: case 5: /* VPADDL */
5400 case 12: case 13: /* VPADAL */
5401 if (size == 3)
5402 return 1;
5403 for (pass = 0; pass < q + 1; pass++) {
5404 tmp = neon_load_reg(rm, pass * 2);
5405 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5406 tmp = neon_load_reg(rm, pass * 2 + 1);
5407 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5408 switch (size) {
5409 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5410 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5411 case 2: tcg_gen_add_i64(CPU_V001); break;
5412 default: abort();
5413 }
5414 if (op >= 12) {
5415 /* Accumulate. */
5416 neon_load_reg64(cpu_V1, rd + pass);
5417 gen_neon_addl(size);
5418 }
5419 neon_store_reg64(cpu_V0, rd + pass);
5420 }
5421 break;
5422 case 33: /* VTRN */
5423 if (size == 2) {
5424 for (n = 0; n < (q ? 4 : 2); n += 2) {
5425 tmp = neon_load_reg(rm, n);
5426 tmp2 = neon_load_reg(rd, n + 1);
5427 neon_store_reg(rm, n, tmp2);
5428 neon_store_reg(rd, n + 1, tmp);
5429 }
5430 } else {
5431 goto elementwise;
5432 }
5433 break;
5434 case 34: /* VUZP */
5435 if (gen_neon_unzip(rd, rm, size, q)) {
5436 return 1;
5437 }
5438 break;
5439 case 35: /* VZIP */
5440 if (gen_neon_zip(rd, rm, size, q)) {
5441 return 1;
5442 }
5443 break;
5444 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5445 if (size == 3)
5446 return 1;
5447 TCGV_UNUSED(tmp2);
5448 for (pass = 0; pass < 2; pass++) {
5449 neon_load_reg64(cpu_V0, rm + pass);
5450 tmp = new_tmp();
5451 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5452 if (pass == 0) {
5453 tmp2 = tmp;
5454 } else {
5455 neon_store_reg(rd, 0, tmp2);
5456 neon_store_reg(rd, 1, tmp);
5457 }
5458 }
5459 break;
5460 case 38: /* VSHLL */
5461 if (q || size == 3)
5462 return 1;
5463 tmp = neon_load_reg(rm, 0);
5464 tmp2 = neon_load_reg(rm, 1);
5465 for (pass = 0; pass < 2; pass++) {
5466 if (pass == 1)
5467 tmp = tmp2;
5468 gen_neon_widen(cpu_V0, tmp, size, 1);
5469 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5470 neon_store_reg64(cpu_V0, rd + pass);
5471 }
5472 break;
5473 case 44: /* VCVT.F16.F32 */
5474 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5475 return 1;
5476 tmp = new_tmp();
5477 tmp2 = new_tmp();
5478 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5479 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5480 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5481 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5482 tcg_gen_shli_i32(tmp2, tmp2, 16);
5483 tcg_gen_or_i32(tmp2, tmp2, tmp);
5484 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5485 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5486 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5487 neon_store_reg(rd, 0, tmp2);
5488 tmp2 = new_tmp();
5489 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5490 tcg_gen_shli_i32(tmp2, tmp2, 16);
5491 tcg_gen_or_i32(tmp2, tmp2, tmp);
5492 neon_store_reg(rd, 1, tmp2);
5493 dead_tmp(tmp);
5494 break;
5495 case 46: /* VCVT.F32.F16 */
5496 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5497 return 1;
5498 tmp3 = new_tmp();
5499 tmp = neon_load_reg(rm, 0);
5500 tmp2 = neon_load_reg(rm, 1);
5501 tcg_gen_ext16u_i32(tmp3, tmp);
5502 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5503 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5504 tcg_gen_shri_i32(tmp3, tmp, 16);
5505 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5506 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5507 dead_tmp(tmp);
5508 tcg_gen_ext16u_i32(tmp3, tmp2);
5509 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5510 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5511 tcg_gen_shri_i32(tmp3, tmp2, 16);
5512 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5513 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5514 dead_tmp(tmp2);
5515 dead_tmp(tmp3);
5516 break;
5517 default:
5518 elementwise:
5519 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5520 if (op == 30 || op == 31 || op >= 58) {
5521 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5522 neon_reg_offset(rm, pass));
5523 TCGV_UNUSED(tmp);
5524 } else {
5525 tmp = neon_load_reg(rm, pass);
5526 }
5527 switch (op) {
5528 case 1: /* VREV32 */
5529 switch (size) {
5530 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5531 case 1: gen_swap_half(tmp); break;
5532 default: return 1;
5533 }
5534 break;
5535 case 2: /* VREV16 */
5536 if (size != 0)
5537 return 1;
5538 gen_rev16(tmp);
5539 break;
5540 case 8: /* CLS */
5541 switch (size) {
5542 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5543 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5544 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5545 default: return 1;
5546 }
5547 break;
5548 case 9: /* CLZ */
5549 switch (size) {
5550 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5551 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5552 case 2: gen_helper_clz(tmp, tmp); break;
5553 default: return 1;
5554 }
5555 break;
5556 case 10: /* CNT */
5557 if (size != 0)
5558 return 1;
5559 gen_helper_neon_cnt_u8(tmp, tmp);
5560 break;
5561 case 11: /* VNOT */
5562 if (size != 0)
5563 return 1;
5564 tcg_gen_not_i32(tmp, tmp);
5565 break;
5566 case 14: /* VQABS */
5567 switch (size) {
5568 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5569 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5570 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5571 default: return 1;
5572 }
5573 break;
5574 case 15: /* VQNEG */
5575 switch (size) {
5576 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5577 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5578 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5579 default: return 1;
5580 }
5581 break;
5582 case 16: case 19: /* VCGT #0, VCLE #0 */
5583 tmp2 = tcg_const_i32(0);
5584 switch(size) {
5585 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5586 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5587 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5588 default: return 1;
5589 }
5590 tcg_temp_free(tmp2);
5591 if (op == 19)
5592 tcg_gen_not_i32(tmp, tmp);
5593 break;
5594 case 17: case 20: /* VCGE #0, VCLT #0 */
5595 tmp2 = tcg_const_i32(0);
5596 switch(size) {
5597 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5598 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5599 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5600 default: return 1;
5601 }
5602 tcg_temp_free(tmp2);
5603 if (op == 20)
5604 tcg_gen_not_i32(tmp, tmp);
5605 break;
5606 case 18: /* VCEQ #0 */
5607 tmp2 = tcg_const_i32(0);
5608 switch(size) {
5609 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5610 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5611 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5612 default: return 1;
5613 }
5614 tcg_temp_free(tmp2);
5615 break;
5616 case 22: /* VABS */
5617 switch(size) {
5618 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5619 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5620 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5621 default: return 1;
5622 }
5623 break;
5624 case 23: /* VNEG */
5625 if (size == 3)
5626 return 1;
5627 tmp2 = tcg_const_i32(0);
5628 gen_neon_rsb(size, tmp, tmp2);
5629 tcg_temp_free(tmp2);
5630 break;
5631 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5632 tmp2 = tcg_const_i32(0);
5633 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5634 tcg_temp_free(tmp2);
5635 if (op == 27)
5636 tcg_gen_not_i32(tmp, tmp);
5637 break;
5638 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5639 tmp2 = tcg_const_i32(0);
5640 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5641 tcg_temp_free(tmp2);
5642 if (op == 28)
5643 tcg_gen_not_i32(tmp, tmp);
5644 break;
5645 case 26: /* Float VCEQ #0 */
5646 tmp2 = tcg_const_i32(0);
5647 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5648 tcg_temp_free(tmp2);
5649 break;
5650 case 30: /* Float VABS */
5651 gen_vfp_abs(0);
5652 break;
5653 case 31: /* Float VNEG */
5654 gen_vfp_neg(0);
5655 break;
5656 case 32: /* VSWP */
5657 tmp2 = neon_load_reg(rd, pass);
5658 neon_store_reg(rm, pass, tmp2);
5659 break;
5660 case 33: /* VTRN */
5661 tmp2 = neon_load_reg(rd, pass);
5662 switch (size) {
5663 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5664 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5665 case 2: abort();
5666 default: return 1;
5667 }
5668 neon_store_reg(rm, pass, tmp2);
5669 break;
5670 case 56: /* Integer VRECPE */
5671 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5672 break;
5673 case 57: /* Integer VRSQRTE */
5674 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5675 break;
5676 case 58: /* Float VRECPE */
5677 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5678 break;
5679 case 59: /* Float VRSQRTE */
5680 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5681 break;
5682 case 60: /* VCVT.F32.S32 */
5683 gen_vfp_sito(0);
5684 break;
5685 case 61: /* VCVT.F32.U32 */
5686 gen_vfp_uito(0);
5687 break;
5688 case 62: /* VCVT.S32.F32 */
5689 gen_vfp_tosiz(0);
5690 break;
5691 case 63: /* VCVT.U32.F32 */
5692 gen_vfp_touiz(0);
5693 break;
5694 default:
5695 /* Reserved: 21, 29, 39-56 */
5696 return 1;
5697 }
5698 if (op == 30 || op == 31 || op >= 58) {
5699 tcg_gen_st_f32(cpu_F0s, cpu_env,
5700 neon_reg_offset(rd, pass));
5701 } else {
5702 neon_store_reg(rd, pass, tmp);
5703 }
5704 }
5705 break;
5706 }
5707 } else if ((insn & (1 << 10)) == 0) {
5708 /* VTBL, VTBX. */
5709 n = ((insn >> 5) & 0x18) + 8;
5710 if (insn & (1 << 6)) {
5711 tmp = neon_load_reg(rd, 0);
5712 } else {
5713 tmp = new_tmp();
5714 tcg_gen_movi_i32(tmp, 0);
5715 }
5716 tmp2 = neon_load_reg(rm, 0);
5717 tmp4 = tcg_const_i32(rn);
5718 tmp5 = tcg_const_i32(n);
5719 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5720 dead_tmp(tmp);
5721 if (insn & (1 << 6)) {
5722 tmp = neon_load_reg(rd, 1);
5723 } else {
5724 tmp = new_tmp();
5725 tcg_gen_movi_i32(tmp, 0);
5726 }
5727 tmp3 = neon_load_reg(rm, 1);
5728 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5729 tcg_temp_free_i32(tmp5);
5730 tcg_temp_free_i32(tmp4);
5731 neon_store_reg(rd, 0, tmp2);
5732 neon_store_reg(rd, 1, tmp3);
5733 dead_tmp(tmp);
5734 } else if ((insn & 0x380) == 0) {
5735 /* VDUP */
5736 if (insn & (1 << 19)) {
5737 tmp = neon_load_reg(rm, 1);
5738 } else {
5739 tmp = neon_load_reg(rm, 0);
5740 }
5741 if (insn & (1 << 16)) {
5742 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5743 } else if (insn & (1 << 17)) {
5744 if ((insn >> 18) & 1)
5745 gen_neon_dup_high16(tmp);
5746 else
5747 gen_neon_dup_low16(tmp);
5748 }
5749 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5750 tmp2 = new_tmp();
5751 tcg_gen_mov_i32(tmp2, tmp);
5752 neon_store_reg(rd, pass, tmp2);
5753 }
5754 dead_tmp(tmp);
5755 } else {
5756 return 1;
5757 }
5758 }
5759 }
5760 return 0;
5761 }
5762
5763 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5764 {
5765 int crn = (insn >> 16) & 0xf;
5766 int crm = insn & 0xf;
5767 int op1 = (insn >> 21) & 7;
5768 int op2 = (insn >> 5) & 7;
5769 int rt = (insn >> 12) & 0xf;
5770 TCGv tmp;
5771
5772 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5773 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5774 /* TEECR */
5775 if (IS_USER(s))
5776 return 1;
5777 tmp = load_cpu_field(teecr);
5778 store_reg(s, rt, tmp);
5779 return 0;
5780 }
5781 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5782 /* TEEHBR */
5783 if (IS_USER(s) && (env->teecr & 1))
5784 return 1;
5785 tmp = load_cpu_field(teehbr);
5786 store_reg(s, rt, tmp);
5787 return 0;
5788 }
5789 }
5790 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5791 op1, crn, crm, op2);
5792 return 1;
5793 }
5794
5795 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5796 {
5797 int crn = (insn >> 16) & 0xf;
5798 int crm = insn & 0xf;
5799 int op1 = (insn >> 21) & 7;
5800 int op2 = (insn >> 5) & 7;
5801 int rt = (insn >> 12) & 0xf;
5802 TCGv tmp;
5803
5804 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5805 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5806 /* TEECR */
5807 if (IS_USER(s))
5808 return 1;
5809 tmp = load_reg(s, rt);
5810 gen_helper_set_teecr(cpu_env, tmp);
5811 dead_tmp(tmp);
5812 return 0;
5813 }
5814 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5815 /* TEEHBR */
5816 if (IS_USER(s) && (env->teecr & 1))
5817 return 1;
5818 tmp = load_reg(s, rt);
5819 store_cpu_field(tmp, teehbr);
5820 return 0;
5821 }
5822 }
5823 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5824 op1, crn, crm, op2);
5825 return 1;
5826 }
5827
5828 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5829 {
5830 int cpnum;
5831
5832 cpnum = (insn >> 8) & 0xf;
5833 if (arm_feature(env, ARM_FEATURE_XSCALE)
5834 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5835 return 1;
5836
5837 switch (cpnum) {
5838 case 0:
5839 case 1:
5840 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5841 return disas_iwmmxt_insn(env, s, insn);
5842 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5843 return disas_dsp_insn(env, s, insn);
5844 }
5845 return 1;
5846 case 10:
5847 case 11:
5848 return disas_vfp_insn (env, s, insn);
5849 case 14:
5850 /* Coprocessors 7-15 are architecturally reserved by ARM.
5851 Unfortunately Intel decided to ignore this. */
5852 if (arm_feature(env, ARM_FEATURE_XSCALE))
5853 goto board;
5854 if (insn & (1 << 20))
5855 return disas_cp14_read(env, s, insn);
5856 else
5857 return disas_cp14_write(env, s, insn);
5858 case 15:
5859 return disas_cp15_insn (env, s, insn);
5860 default:
5861 board:
5862 /* Unknown coprocessor. See if the board has hooked it. */
5863 return disas_cp_insn (env, s, insn);
5864 }
5865 }
5866
5867
5868 /* Store a 64-bit value to a register pair. Clobbers val. */
5869 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5870 {
5871 TCGv tmp;
5872 tmp = new_tmp();
5873 tcg_gen_trunc_i64_i32(tmp, val);
5874 store_reg(s, rlow, tmp);
5875 tmp = new_tmp();
5876 tcg_gen_shri_i64(val, val, 32);
5877 tcg_gen_trunc_i64_i32(tmp, val);
5878 store_reg(s, rhigh, tmp);
5879 }
5880
5881 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5882 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5883 {
5884 TCGv_i64 tmp;
5885 TCGv tmp2;
5886
5887 /* Load value and extend to 64 bits. */
5888 tmp = tcg_temp_new_i64();
5889 tmp2 = load_reg(s, rlow);
5890 tcg_gen_extu_i32_i64(tmp, tmp2);
5891 dead_tmp(tmp2);
5892 tcg_gen_add_i64(val, val, tmp);
5893 tcg_temp_free_i64(tmp);
5894 }
5895
5896 /* load and add a 64-bit value from a register pair. */
5897 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5898 {
5899 TCGv_i64 tmp;
5900 TCGv tmpl;
5901 TCGv tmph;
5902
5903 /* Load 64-bit value rd:rn. */
5904 tmpl = load_reg(s, rlow);
5905 tmph = load_reg(s, rhigh);
5906 tmp = tcg_temp_new_i64();
5907 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5908 dead_tmp(tmpl);
5909 dead_tmp(tmph);
5910 tcg_gen_add_i64(val, val, tmp);
5911 tcg_temp_free_i64(tmp);
5912 }
5913
5914 /* Set N and Z flags from a 64-bit value. */
5915 static void gen_logicq_cc(TCGv_i64 val)
5916 {
5917 TCGv tmp = new_tmp();
5918 gen_helper_logicq_cc(tmp, val);
5919 gen_logic_CC(tmp);
5920 dead_tmp(tmp);
5921 }
5922
5923 /* Load/Store exclusive instructions are implemented by remembering
5924 the value/address loaded, and seeing if these are the same
5925 when the store is performed. This should be is sufficient to implement
5926 the architecturally mandated semantics, and avoids having to monitor
5927 regular stores.
5928
5929 In system emulation mode only one CPU will be running at once, so
5930 this sequence is effectively atomic. In user emulation mode we
5931 throw an exception and handle the atomic operation elsewhere. */
5932 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5933 TCGv addr, int size)
5934 {
5935 TCGv tmp;
5936
5937 switch (size) {
5938 case 0:
5939 tmp = gen_ld8u(addr, IS_USER(s));
5940 break;
5941 case 1:
5942 tmp = gen_ld16u(addr, IS_USER(s));
5943 break;
5944 case 2:
5945 case 3:
5946 tmp = gen_ld32(addr, IS_USER(s));
5947 break;
5948 default:
5949 abort();
5950 }
5951 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5952 store_reg(s, rt, tmp);
5953 if (size == 3) {
5954 TCGv tmp2 = new_tmp();
5955 tcg_gen_addi_i32(tmp2, addr, 4);
5956 tmp = gen_ld32(tmp2, IS_USER(s));
5957 dead_tmp(tmp2);
5958 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5959 store_reg(s, rt2, tmp);
5960 }
5961 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5962 }
5963
5964 static void gen_clrex(DisasContext *s)
5965 {
5966 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5967 }
5968
5969 #ifdef CONFIG_USER_ONLY
5970 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5971 TCGv addr, int size)
5972 {
5973 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5974 tcg_gen_movi_i32(cpu_exclusive_info,
5975 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5976 gen_exception_insn(s, 4, EXCP_STREX);
5977 }
5978 #else
5979 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5980 TCGv addr, int size)
5981 {
5982 TCGv tmp;
5983 int done_label;
5984 int fail_label;
5985
5986 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5987 [addr] = {Rt};
5988 {Rd} = 0;
5989 } else {
5990 {Rd} = 1;
5991 } */
5992 fail_label = gen_new_label();
5993 done_label = gen_new_label();
5994 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5995 switch (size) {
5996 case 0:
5997 tmp = gen_ld8u(addr, IS_USER(s));
5998 break;
5999 case 1:
6000 tmp = gen_ld16u(addr, IS_USER(s));
6001 break;
6002 case 2:
6003 case 3:
6004 tmp = gen_ld32(addr, IS_USER(s));
6005 break;
6006 default:
6007 abort();
6008 }
6009 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6010 dead_tmp(tmp);
6011 if (size == 3) {
6012 TCGv tmp2 = new_tmp();
6013 tcg_gen_addi_i32(tmp2, addr, 4);
6014 tmp = gen_ld32(tmp2, IS_USER(s));
6015 dead_tmp(tmp2);
6016 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6017 dead_tmp(tmp);
6018 }
6019 tmp = load_reg(s, rt);
6020 switch (size) {
6021 case 0:
6022 gen_st8(tmp, addr, IS_USER(s));
6023 break;
6024 case 1:
6025 gen_st16(tmp, addr, IS_USER(s));
6026 break;
6027 case 2:
6028 case 3:
6029 gen_st32(tmp, addr, IS_USER(s));
6030 break;
6031 default:
6032 abort();
6033 }
6034 if (size == 3) {
6035 tcg_gen_addi_i32(addr, addr, 4);
6036 tmp = load_reg(s, rt2);
6037 gen_st32(tmp, addr, IS_USER(s));
6038 }
6039 tcg_gen_movi_i32(cpu_R[rd], 0);
6040 tcg_gen_br(done_label);
6041 gen_set_label(fail_label);
6042 tcg_gen_movi_i32(cpu_R[rd], 1);
6043 gen_set_label(done_label);
6044 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6045 }
6046 #endif
6047
6048 static void disas_arm_insn(CPUState * env, DisasContext *s)
6049 {
6050 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6051 TCGv tmp;
6052 TCGv tmp2;
6053 TCGv tmp3;
6054 TCGv addr;
6055 TCGv_i64 tmp64;
6056
6057 insn = ldl_code(s->pc);
6058 s->pc += 4;
6059
6060 /* M variants do not implement ARM mode. */
6061 if (IS_M(env))
6062 goto illegal_op;
6063 cond = insn >> 28;
6064 if (cond == 0xf){
6065 /* Unconditional instructions. */
6066 if (((insn >> 25) & 7) == 1) {
6067 /* NEON Data processing. */
6068 if (!arm_feature(env, ARM_FEATURE_NEON))
6069 goto illegal_op;
6070
6071 if (disas_neon_data_insn(env, s, insn))
6072 goto illegal_op;
6073 return;
6074 }
6075 if ((insn & 0x0f100000) == 0x04000000) {
6076 /* NEON load/store. */
6077 if (!arm_feature(env, ARM_FEATURE_NEON))
6078 goto illegal_op;
6079
6080 if (disas_neon_ls_insn(env, s, insn))
6081 goto illegal_op;
6082 return;
6083 }
6084 if (((insn & 0x0f30f000) == 0x0510f000) ||
6085 ((insn & 0x0f30f010) == 0x0710f000)) {
6086 if ((insn & (1 << 22)) == 0) {
6087 /* PLDW; v7MP */
6088 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6089 goto illegal_op;
6090 }
6091 }
6092 /* Otherwise PLD; v5TE+ */
6093 return;
6094 }
6095 if (((insn & 0x0f70f000) == 0x0450f000) ||
6096 ((insn & 0x0f70f010) == 0x0650f000)) {
6097 ARCH(7);
6098 return; /* PLI; V7 */
6099 }
6100 if (((insn & 0x0f700000) == 0x04100000) ||
6101 ((insn & 0x0f700010) == 0x06100000)) {
6102 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6103 goto illegal_op;
6104 }
6105 return; /* v7MP: Unallocated memory hint: must NOP */
6106 }
6107
6108 if ((insn & 0x0ffffdff) == 0x01010000) {
6109 ARCH(6);
6110 /* setend */
6111 if (insn & (1 << 9)) {
6112 /* BE8 mode not implemented. */
6113 goto illegal_op;
6114 }
6115 return;
6116 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6117 switch ((insn >> 4) & 0xf) {
6118 case 1: /* clrex */
6119 ARCH(6K);
6120 gen_clrex(s);
6121 return;
6122 case 4: /* dsb */
6123 case 5: /* dmb */
6124 case 6: /* isb */
6125 ARCH(7);
6126 /* We don't emulate caches so these are a no-op. */
6127 return;
6128 default:
6129 goto illegal_op;
6130 }
6131 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6132 /* srs */
6133 int32_t offset;
6134 if (IS_USER(s))
6135 goto illegal_op;
6136 ARCH(6);
6137 op1 = (insn & 0x1f);
6138 addr = new_tmp();
6139 tmp = tcg_const_i32(op1);
6140 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6141 tcg_temp_free_i32(tmp);
6142 i = (insn >> 23) & 3;
6143 switch (i) {
6144 case 0: offset = -4; break; /* DA */
6145 case 1: offset = 0; break; /* IA */
6146 case 2: offset = -8; break; /* DB */
6147 case 3: offset = 4; break; /* IB */
6148 default: abort();
6149 }
6150 if (offset)
6151 tcg_gen_addi_i32(addr, addr, offset);
6152 tmp = load_reg(s, 14);
6153 gen_st32(tmp, addr, 0);
6154 tmp = load_cpu_field(spsr);
6155 tcg_gen_addi_i32(addr, addr, 4);
6156 gen_st32(tmp, addr, 0);
6157 if (insn & (1 << 21)) {
6158 /* Base writeback. */
6159 switch (i) {
6160 case 0: offset = -8; break;
6161 case 1: offset = 4; break;
6162 case 2: offset = -4; break;
6163 case 3: offset = 0; break;
6164 default: abort();
6165 }
6166 if (offset)
6167 tcg_gen_addi_i32(addr, addr, offset);
6168 tmp = tcg_const_i32(op1);
6169 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6170 tcg_temp_free_i32(tmp);
6171 dead_tmp(addr);
6172 } else {
6173 dead_tmp(addr);
6174 }
6175 return;
6176 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6177 /* rfe */
6178 int32_t offset;
6179 if (IS_USER(s))
6180 goto illegal_op;
6181 ARCH(6);
6182 rn = (insn >> 16) & 0xf;
6183 addr = load_reg(s, rn);
6184 i = (insn >> 23) & 3;
6185 switch (i) {
6186 case 0: offset = -4; break; /* DA */
6187 case 1: offset = 0; break; /* IA */
6188 case 2: offset = -8; break; /* DB */
6189 case 3: offset = 4; break; /* IB */
6190 default: abort();
6191 }
6192 if (offset)
6193 tcg_gen_addi_i32(addr, addr, offset);
6194 /* Load PC into tmp and CPSR into tmp2. */
6195 tmp = gen_ld32(addr, 0);
6196 tcg_gen_addi_i32(addr, addr, 4);
6197 tmp2 = gen_ld32(addr, 0);
6198 if (insn & (1 << 21)) {
6199 /* Base writeback. */
6200 switch (i) {
6201 case 0: offset = -8; break;
6202 case 1: offset = 4; break;
6203 case 2: offset = -4; break;
6204 case 3: offset = 0; break;
6205 default: abort();
6206 }
6207 if (offset)
6208 tcg_gen_addi_i32(addr, addr, offset);
6209 store_reg(s, rn, addr);
6210 } else {
6211 dead_tmp(addr);
6212 }
6213 gen_rfe(s, tmp, tmp2);
6214 return;
6215 } else if ((insn & 0x0e000000) == 0x0a000000) {
6216 /* branch link and change to thumb (blx <offset>) */
6217 int32_t offset;
6218
6219 val = (uint32_t)s->pc;
6220 tmp = new_tmp();
6221 tcg_gen_movi_i32(tmp, val);
6222 store_reg(s, 14, tmp);
6223 /* Sign-extend the 24-bit offset */
6224 offset = (((int32_t)insn) << 8) >> 8;
6225 /* offset * 4 + bit24 * 2 + (thumb bit) */
6226 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6227 /* pipeline offset */
6228 val += 4;
6229 gen_bx_im(s, val);
6230 return;
6231 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6232 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6233 /* iWMMXt register transfer. */
6234 if (env->cp15.c15_cpar & (1 << 1))
6235 if (!disas_iwmmxt_insn(env, s, insn))
6236 return;
6237 }
6238 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6239 /* Coprocessor double register transfer. */
6240 } else if ((insn & 0x0f000010) == 0x0e000010) {
6241 /* Additional coprocessor register transfer. */
6242 } else if ((insn & 0x0ff10020) == 0x01000000) {
6243 uint32_t mask;
6244 uint32_t val;
6245 /* cps (privileged) */
6246 if (IS_USER(s))
6247 return;
6248 mask = val = 0;
6249 if (insn & (1 << 19)) {
6250 if (insn & (1 << 8))
6251 mask |= CPSR_A;
6252 if (insn & (1 << 7))
6253 mask |= CPSR_I;
6254 if (insn & (1 << 6))
6255 mask |= CPSR_F;
6256 if (insn & (1 << 18))
6257 val |= mask;
6258 }
6259 if (insn & (1 << 17)) {
6260 mask |= CPSR_M;
6261 val |= (insn & 0x1f);
6262 }
6263 if (mask) {
6264 gen_set_psr_im(s, mask, 0, val);
6265 }
6266 return;
6267 }
6268 goto illegal_op;
6269 }
6270 if (cond != 0xe) {
6271 /* if not always execute, we generate a conditional jump to
6272 next instruction */
6273 s->condlabel = gen_new_label();
6274 gen_test_cc(cond ^ 1, s->condlabel);
6275 s->condjmp = 1;
6276 }
6277 if ((insn & 0x0f900000) == 0x03000000) {
6278 if ((insn & (1 << 21)) == 0) {
6279 ARCH(6T2);
6280 rd = (insn >> 12) & 0xf;
6281 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6282 if ((insn & (1 << 22)) == 0) {
6283 /* MOVW */
6284 tmp = new_tmp();
6285 tcg_gen_movi_i32(tmp, val);
6286 } else {
6287 /* MOVT */
6288 tmp = load_reg(s, rd);
6289 tcg_gen_ext16u_i32(tmp, tmp);
6290 tcg_gen_ori_i32(tmp, tmp, val << 16);
6291 }
6292 store_reg(s, rd, tmp);
6293 } else {
6294 if (((insn >> 12) & 0xf) != 0xf)
6295 goto illegal_op;
6296 if (((insn >> 16) & 0xf) == 0) {
6297 gen_nop_hint(s, insn & 0xff);
6298 } else {
6299 /* CPSR = immediate */
6300 val = insn & 0xff;
6301 shift = ((insn >> 8) & 0xf) * 2;
6302 if (shift)
6303 val = (val >> shift) | (val << (32 - shift));
6304 i = ((insn & (1 << 22)) != 0);
6305 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6306 goto illegal_op;
6307 }
6308 }
6309 } else if ((insn & 0x0f900000) == 0x01000000
6310 && (insn & 0x00000090) != 0x00000090) {
6311 /* miscellaneous instructions */
6312 op1 = (insn >> 21) & 3;
6313 sh = (insn >> 4) & 0xf;
6314 rm = insn & 0xf;
6315 switch (sh) {
6316 case 0x0: /* move program status register */
6317 if (op1 & 1) {
6318 /* PSR = reg */
6319 tmp = load_reg(s, rm);
6320 i = ((op1 & 2) != 0);
6321 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6322 goto illegal_op;
6323 } else {
6324 /* reg = PSR */
6325 rd = (insn >> 12) & 0xf;
6326 if (op1 & 2) {
6327 if (IS_USER(s))
6328 goto illegal_op;
6329 tmp = load_cpu_field(spsr);
6330 } else {
6331 tmp = new_tmp();
6332 gen_helper_cpsr_read(tmp);
6333 }
6334 store_reg(s, rd, tmp);
6335 }
6336 break;
6337 case 0x1:
6338 if (op1 == 1) {
6339 /* branch/exchange thumb (bx). */
6340 tmp = load_reg(s, rm);
6341 gen_bx(s, tmp);
6342 } else if (op1 == 3) {
6343 /* clz */
6344 rd = (insn >> 12) & 0xf;
6345 tmp = load_reg(s, rm);
6346 gen_helper_clz(tmp, tmp);
6347 store_reg(s, rd, tmp);
6348 } else {
6349 goto illegal_op;
6350 }
6351 break;
6352 case 0x2:
6353 if (op1 == 1) {
6354 ARCH(5J); /* bxj */
6355 /* Trivial implementation equivalent to bx. */
6356 tmp = load_reg(s, rm);
6357 gen_bx(s, tmp);
6358 } else {
6359 goto illegal_op;
6360 }
6361 break;
6362 case 0x3:
6363 if (op1 != 1)
6364 goto illegal_op;
6365
6366 /* branch link/exchange thumb (blx) */
6367 tmp = load_reg(s, rm);
6368 tmp2 = new_tmp();
6369 tcg_gen_movi_i32(tmp2, s->pc);
6370 store_reg(s, 14, tmp2);
6371 gen_bx(s, tmp);
6372 break;
6373 case 0x5: /* saturating add/subtract */
6374 rd = (insn >> 12) & 0xf;
6375 rn = (insn >> 16) & 0xf;
6376 tmp = load_reg(s, rm);
6377 tmp2 = load_reg(s, rn);
6378 if (op1 & 2)
6379 gen_helper_double_saturate(tmp2, tmp2);
6380 if (op1 & 1)
6381 gen_helper_sub_saturate(tmp, tmp, tmp2);
6382 else
6383 gen_helper_add_saturate(tmp, tmp, tmp2);
6384 dead_tmp(tmp2);
6385 store_reg(s, rd, tmp);
6386 break;
6387 case 7:
6388 /* SMC instruction (op1 == 3)
6389 and undefined instructions (op1 == 0 || op1 == 2)
6390 will trap */
6391 if (op1 != 1) {
6392 goto illegal_op;
6393 }
6394 /* bkpt */
6395 gen_exception_insn(s, 4, EXCP_BKPT);
6396 break;
6397 case 0x8: /* signed multiply */
6398 case 0xa:
6399 case 0xc:
6400 case 0xe:
6401 rs = (insn >> 8) & 0xf;
6402 rn = (insn >> 12) & 0xf;
6403 rd = (insn >> 16) & 0xf;
6404 if (op1 == 1) {
6405 /* (32 * 16) >> 16 */
6406 tmp = load_reg(s, rm);
6407 tmp2 = load_reg(s, rs);
6408 if (sh & 4)
6409 tcg_gen_sari_i32(tmp2, tmp2, 16);
6410 else
6411 gen_sxth(tmp2);
6412 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6413 tcg_gen_shri_i64(tmp64, tmp64, 16);
6414 tmp = new_tmp();
6415 tcg_gen_trunc_i64_i32(tmp, tmp64);
6416 tcg_temp_free_i64(tmp64);
6417 if ((sh & 2) == 0) {
6418 tmp2 = load_reg(s, rn);
6419 gen_helper_add_setq(tmp, tmp, tmp2);
6420 dead_tmp(tmp2);
6421 }
6422 store_reg(s, rd, tmp);
6423 } else {
6424 /* 16 * 16 */
6425 tmp = load_reg(s, rm);
6426 tmp2 = load_reg(s, rs);
6427 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6428 dead_tmp(tmp2);
6429 if (op1 == 2) {
6430 tmp64 = tcg_temp_new_i64();
6431 tcg_gen_ext_i32_i64(tmp64, tmp);
6432 dead_tmp(tmp);
6433 gen_addq(s, tmp64, rn, rd);
6434 gen_storeq_reg(s, rn, rd, tmp64);
6435 tcg_temp_free_i64(tmp64);
6436 } else {
6437 if (op1 == 0) {
6438 tmp2 = load_reg(s, rn);
6439 gen_helper_add_setq(tmp, tmp, tmp2);
6440 dead_tmp(tmp2);
6441 }
6442 store_reg(s, rd, tmp);
6443 }
6444 }
6445 break;
6446 default:
6447 goto illegal_op;
6448 }
6449 } else if (((insn & 0x0e000000) == 0 &&
6450 (insn & 0x00000090) != 0x90) ||
6451 ((insn & 0x0e000000) == (1 << 25))) {
6452 int set_cc, logic_cc, shiftop;
6453
6454 op1 = (insn >> 21) & 0xf;
6455 set_cc = (insn >> 20) & 1;
6456 logic_cc = table_logic_cc[op1] & set_cc;
6457
6458 /* data processing instruction */
6459 if (insn & (1 << 25)) {
6460 /* immediate operand */
6461 val = insn & 0xff;
6462 shift = ((insn >> 8) & 0xf) * 2;
6463 if (shift) {
6464 val = (val >> shift) | (val << (32 - shift));
6465 }
6466 tmp2 = new_tmp();
6467 tcg_gen_movi_i32(tmp2, val);
6468 if (logic_cc && shift) {
6469 gen_set_CF_bit31(tmp2);
6470 }
6471 } else {
6472 /* register */
6473 rm = (insn) & 0xf;
6474 tmp2 = load_reg(s, rm);
6475 shiftop = (insn >> 5) & 3;
6476 if (!(insn & (1 << 4))) {
6477 shift = (insn >> 7) & 0x1f;
6478 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6479 } else {
6480 rs = (insn >> 8) & 0xf;
6481 tmp = load_reg(s, rs);
6482 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6483 }
6484 }
6485 if (op1 != 0x0f && op1 != 0x0d) {
6486 rn = (insn >> 16) & 0xf;
6487 tmp = load_reg(s, rn);
6488 } else {
6489 TCGV_UNUSED(tmp);
6490 }
6491 rd = (insn >> 12) & 0xf;
6492 switch(op1) {
6493 case 0x00:
6494 tcg_gen_and_i32(tmp, tmp, tmp2);
6495 if (logic_cc) {
6496 gen_logic_CC(tmp);
6497 }
6498 store_reg_bx(env, s, rd, tmp);
6499 break;
6500 case 0x01:
6501 tcg_gen_xor_i32(tmp, tmp, tmp2);
6502 if (logic_cc) {
6503 gen_logic_CC(tmp);
6504 }
6505 store_reg_bx(env, s, rd, tmp);
6506 break;
6507 case 0x02:
6508 if (set_cc && rd == 15) {
6509 /* SUBS r15, ... is used for exception return. */
6510 if (IS_USER(s)) {
6511 goto illegal_op;
6512 }
6513 gen_helper_sub_cc(tmp, tmp, tmp2);
6514 gen_exception_return(s, tmp);
6515 } else {
6516 if (set_cc) {
6517 gen_helper_sub_cc(tmp, tmp, tmp2);
6518 } else {
6519 tcg_gen_sub_i32(tmp, tmp, tmp2);
6520 }
6521 store_reg_bx(env, s, rd, tmp);
6522 }
6523 break;
6524 case 0x03:
6525 if (set_cc) {
6526 gen_helper_sub_cc(tmp, tmp2, tmp);
6527 } else {
6528 tcg_gen_sub_i32(tmp, tmp2, tmp);
6529 }
6530 store_reg_bx(env, s, rd, tmp);
6531 break;
6532 case 0x04:
6533 if (set_cc) {
6534 gen_helper_add_cc(tmp, tmp, tmp2);
6535 } else {
6536 tcg_gen_add_i32(tmp, tmp, tmp2);
6537 }
6538 store_reg_bx(env, s, rd, tmp);
6539 break;
6540 case 0x05:
6541 if (set_cc) {
6542 gen_helper_adc_cc(tmp, tmp, tmp2);
6543 } else {
6544 gen_add_carry(tmp, tmp, tmp2);
6545 }
6546 store_reg_bx(env, s, rd, tmp);
6547 break;
6548 case 0x06:
6549 if (set_cc) {
6550 gen_helper_sbc_cc(tmp, tmp, tmp2);
6551 } else {
6552 gen_sub_carry(tmp, tmp, tmp2);
6553 }
6554 store_reg_bx(env, s, rd, tmp);
6555 break;
6556 case 0x07:
6557 if (set_cc) {
6558 gen_helper_sbc_cc(tmp, tmp2, tmp);
6559 } else {
6560 gen_sub_carry(tmp, tmp2, tmp);
6561 }
6562 store_reg_bx(env, s, rd, tmp);
6563 break;
6564 case 0x08:
6565 if (set_cc) {
6566 tcg_gen_and_i32(tmp, tmp, tmp2);
6567 gen_logic_CC(tmp);
6568 }
6569 dead_tmp(tmp);
6570 break;
6571 case 0x09:
6572 if (set_cc) {
6573 tcg_gen_xor_i32(tmp, tmp, tmp2);
6574 gen_logic_CC(tmp);
6575 }
6576 dead_tmp(tmp);
6577 break;
6578 case 0x0a:
6579 if (set_cc) {
6580 gen_helper_sub_cc(tmp, tmp, tmp2);
6581 }
6582 dead_tmp(tmp);
6583 break;
6584 case 0x0b:
6585 if (set_cc) {
6586 gen_helper_add_cc(tmp, tmp, tmp2);
6587 }
6588 dead_tmp(tmp);
6589 break;
6590 case 0x0c:
6591 tcg_gen_or_i32(tmp, tmp, tmp2);
6592 if (logic_cc) {
6593 gen_logic_CC(tmp);
6594 }
6595 store_reg_bx(env, s, rd, tmp);
6596 break;
6597 case 0x0d:
6598 if (logic_cc && rd == 15) {
6599 /* MOVS r15, ... is used for exception return. */
6600 if (IS_USER(s)) {
6601 goto illegal_op;
6602 }
6603 gen_exception_return(s, tmp2);
6604 } else {
6605 if (logic_cc) {
6606 gen_logic_CC(tmp2);
6607 }
6608 store_reg_bx(env, s, rd, tmp2);
6609 }
6610 break;
6611 case 0x0e:
6612 tcg_gen_andc_i32(tmp, tmp, tmp2);
6613 if (logic_cc) {
6614 gen_logic_CC(tmp);
6615 }
6616 store_reg_bx(env, s, rd, tmp);
6617 break;
6618 default:
6619 case 0x0f:
6620 tcg_gen_not_i32(tmp2, tmp2);
6621 if (logic_cc) {
6622 gen_logic_CC(tmp2);
6623 }
6624 store_reg_bx(env, s, rd, tmp2);
6625 break;
6626 }
6627 if (op1 != 0x0f && op1 != 0x0d) {
6628 dead_tmp(tmp2);
6629 }
6630 } else {
6631 /* other instructions */
6632 op1 = (insn >> 24) & 0xf;
6633 switch(op1) {
6634 case 0x0:
6635 case 0x1:
6636 /* multiplies, extra load/stores */
6637 sh = (insn >> 5) & 3;
6638 if (sh == 0) {
6639 if (op1 == 0x0) {
6640 rd = (insn >> 16) & 0xf;
6641 rn = (insn >> 12) & 0xf;
6642 rs = (insn >> 8) & 0xf;
6643 rm = (insn) & 0xf;
6644 op1 = (insn >> 20) & 0xf;
6645 switch (op1) {
6646 case 0: case 1: case 2: case 3: case 6:
6647 /* 32 bit mul */
6648 tmp = load_reg(s, rs);
6649 tmp2 = load_reg(s, rm);
6650 tcg_gen_mul_i32(tmp, tmp, tmp2);
6651 dead_tmp(tmp2);
6652 if (insn & (1 << 22)) {
6653 /* Subtract (mls) */
6654 ARCH(6T2);
6655 tmp2 = load_reg(s, rn);
6656 tcg_gen_sub_i32(tmp, tmp2, tmp);
6657 dead_tmp(tmp2);
6658 } else if (insn & (1 << 21)) {
6659 /* Add */
6660 tmp2 = load_reg(s, rn);
6661 tcg_gen_add_i32(tmp, tmp, tmp2);
6662 dead_tmp(tmp2);
6663 }
6664 if (insn & (1 << 20))
6665 gen_logic_CC(tmp);
6666 store_reg(s, rd, tmp);
6667 break;
6668 case 4:
6669 /* 64 bit mul double accumulate (UMAAL) */
6670 ARCH(6);
6671 tmp = load_reg(s, rs);
6672 tmp2 = load_reg(s, rm);
6673 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6674 gen_addq_lo(s, tmp64, rn);
6675 gen_addq_lo(s, tmp64, rd);
6676 gen_storeq_reg(s, rn, rd, tmp64);
6677 tcg_temp_free_i64(tmp64);
6678 break;
6679 case 8: case 9: case 10: case 11:
6680 case 12: case 13: case 14: case 15:
6681 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6682 tmp = load_reg(s, rs);
6683 tmp2 = load_reg(s, rm);
6684 if (insn & (1 << 22)) {
6685 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6686 } else {
6687 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6688 }
6689 if (insn & (1 << 21)) { /* mult accumulate */
6690 gen_addq(s, tmp64, rn, rd);
6691 }
6692 if (insn & (1 << 20)) {
6693 gen_logicq_cc(tmp64);
6694 }
6695 gen_storeq_reg(s, rn, rd, tmp64);
6696 tcg_temp_free_i64(tmp64);
6697 break;
6698 default:
6699 goto illegal_op;
6700 }
6701 } else {
6702 rn = (insn >> 16) & 0xf;
6703 rd = (insn >> 12) & 0xf;
6704 if (insn & (1 << 23)) {
6705 /* load/store exclusive */
6706 op1 = (insn >> 21) & 0x3;
6707 if (op1)
6708 ARCH(6K);
6709 else
6710 ARCH(6);
6711 addr = tcg_temp_local_new_i32();
6712 load_reg_var(s, addr, rn);
6713 if (insn & (1 << 20)) {
6714 switch (op1) {
6715 case 0: /* ldrex */
6716 gen_load_exclusive(s, rd, 15, addr, 2);
6717 break;
6718 case 1: /* ldrexd */
6719 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6720 break;
6721 case 2: /* ldrexb */
6722 gen_load_exclusive(s, rd, 15, addr, 0);
6723 break;
6724 case 3: /* ldrexh */
6725 gen_load_exclusive(s, rd, 15, addr, 1);
6726 break;
6727 default:
6728 abort();
6729 }
6730 } else {
6731 rm = insn & 0xf;
6732 switch (op1) {
6733 case 0: /* strex */
6734 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6735 break;
6736 case 1: /* strexd */
6737 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6738 break;
6739 case 2: /* strexb */
6740 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6741 break;
6742 case 3: /* strexh */
6743 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6744 break;
6745 default:
6746 abort();
6747 }
6748 }
6749 tcg_temp_free(addr);
6750 } else {
6751 /* SWP instruction */
6752 rm = (insn) & 0xf;
6753
6754 /* ??? This is not really atomic. However we know
6755 we never have multiple CPUs running in parallel,
6756 so it is good enough. */
6757 addr = load_reg(s, rn);
6758 tmp = load_reg(s, rm);
6759 if (insn & (1 << 22)) {
6760 tmp2 = gen_ld8u(addr, IS_USER(s));
6761 gen_st8(tmp, addr, IS_USER(s));
6762 } else {
6763 tmp2 = gen_ld32(addr, IS_USER(s));
6764 gen_st32(tmp, addr, IS_USER(s));
6765 }
6766 dead_tmp(addr);
6767 store_reg(s, rd, tmp2);
6768 }
6769 }
6770 } else {
6771 int address_offset;
6772 int load;
6773 /* Misc load/store */
6774 rn = (insn >> 16) & 0xf;
6775 rd = (insn >> 12) & 0xf;
6776 addr = load_reg(s, rn);
6777 if (insn & (1 << 24))
6778 gen_add_datah_offset(s, insn, 0, addr);
6779 address_offset = 0;
6780 if (insn & (1 << 20)) {
6781 /* load */
6782 switch(sh) {
6783 case 1:
6784 tmp = gen_ld16u(addr, IS_USER(s));
6785 break;
6786 case 2:
6787 tmp = gen_ld8s(addr, IS_USER(s));
6788 break;
6789 default:
6790 case 3:
6791 tmp = gen_ld16s(addr, IS_USER(s));
6792 break;
6793 }
6794 load = 1;
6795 } else if (sh & 2) {
6796 /* doubleword */
6797 if (sh & 1) {
6798 /* store */
6799 tmp = load_reg(s, rd);
6800 gen_st32(tmp, addr, IS_USER(s));
6801 tcg_gen_addi_i32(addr, addr, 4);
6802 tmp = load_reg(s, rd + 1);
6803 gen_st32(tmp, addr, IS_USER(s));
6804 load = 0;
6805 } else {
6806 /* load */
6807 tmp = gen_ld32(addr, IS_USER(s));
6808 store_reg(s, rd, tmp);
6809 tcg_gen_addi_i32(addr, addr, 4);
6810 tmp = gen_ld32(addr, IS_USER(s));
6811 rd++;
6812 load = 1;
6813 }
6814 address_offset = -4;
6815 } else {
6816 /* store */
6817 tmp = load_reg(s, rd);
6818 gen_st16(tmp, addr, IS_USER(s));
6819 load = 0;
6820 }
6821 /* Perform base writeback before the loaded value to
6822 ensure correct behavior with overlapping index registers.
6823 ldrd with base writeback is is undefined if the
6824 destination and index registers overlap. */
6825 if (!(insn & (1 << 24))) {
6826 gen_add_datah_offset(s, insn, address_offset, addr);
6827 store_reg(s, rn, addr);
6828 } else if (insn & (1 << 21)) {
6829 if (address_offset)
6830 tcg_gen_addi_i32(addr, addr, address_offset);
6831 store_reg(s, rn, addr);
6832 } else {
6833 dead_tmp(addr);
6834 }
6835 if (load) {
6836 /* Complete the load. */
6837 store_reg(s, rd, tmp);
6838 }
6839 }
6840 break;
6841 case 0x4:
6842 case 0x5:
6843 goto do_ldst;
6844 case 0x6:
6845 case 0x7:
6846 if (insn & (1 << 4)) {
6847 ARCH(6);
6848 /* Armv6 Media instructions. */
6849 rm = insn & 0xf;
6850 rn = (insn >> 16) & 0xf;
6851 rd = (insn >> 12) & 0xf;
6852 rs = (insn >> 8) & 0xf;
6853 switch ((insn >> 23) & 3) {
6854 case 0: /* Parallel add/subtract. */
6855 op1 = (insn >> 20) & 7;
6856 tmp = load_reg(s, rn);
6857 tmp2 = load_reg(s, rm);
6858 sh = (insn >> 5) & 7;
6859 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6860 goto illegal_op;
6861 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6862 dead_tmp(tmp2);
6863 store_reg(s, rd, tmp);
6864 break;
6865 case 1:
6866 if ((insn & 0x00700020) == 0) {
6867 /* Halfword pack. */
6868 tmp = load_reg(s, rn);
6869 tmp2 = load_reg(s, rm);
6870 shift = (insn >> 7) & 0x1f;
6871 if (insn & (1 << 6)) {
6872 /* pkhtb */
6873 if (shift == 0)
6874 shift = 31;
6875 tcg_gen_sari_i32(tmp2, tmp2, shift);
6876 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6877 tcg_gen_ext16u_i32(tmp2, tmp2);
6878 } else {
6879 /* pkhbt */
6880 if (shift)
6881 tcg_gen_shli_i32(tmp2, tmp2, shift);
6882 tcg_gen_ext16u_i32(tmp, tmp);
6883 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6884 }
6885 tcg_gen_or_i32(tmp, tmp, tmp2);
6886 dead_tmp(tmp2);
6887 store_reg(s, rd, tmp);
6888 } else if ((insn & 0x00200020) == 0x00200000) {
6889 /* [us]sat */
6890 tmp = load_reg(s, rm);
6891 shift = (insn >> 7) & 0x1f;
6892 if (insn & (1 << 6)) {
6893 if (shift == 0)
6894 shift = 31;
6895 tcg_gen_sari_i32(tmp, tmp, shift);
6896 } else {
6897 tcg_gen_shli_i32(tmp, tmp, shift);
6898 }
6899 sh = (insn >> 16) & 0x1f;
6900 tmp2 = tcg_const_i32(sh);
6901 if (insn & (1 << 22))
6902 gen_helper_usat(tmp, tmp, tmp2);
6903 else
6904 gen_helper_ssat(tmp, tmp, tmp2);
6905 tcg_temp_free_i32(tmp2);
6906 store_reg(s, rd, tmp);
6907 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6908 /* [us]sat16 */
6909 tmp = load_reg(s, rm);
6910 sh = (insn >> 16) & 0x1f;
6911 tmp2 = tcg_const_i32(sh);
6912 if (insn & (1 << 22))
6913 gen_helper_usat16(tmp, tmp, tmp2);
6914 else
6915 gen_helper_ssat16(tmp, tmp, tmp2);
6916 tcg_temp_free_i32(tmp2);
6917 store_reg(s, rd, tmp);
6918 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6919 /* Select bytes. */
6920 tmp = load_reg(s, rn);
6921 tmp2 = load_reg(s, rm);
6922 tmp3 = new_tmp();
6923 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6924 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6925 dead_tmp(tmp3);
6926 dead_tmp(tmp2);
6927 store_reg(s, rd, tmp);
6928 } else if ((insn & 0x000003e0) == 0x00000060) {
6929 tmp = load_reg(s, rm);
6930 shift = (insn >> 10) & 3;
6931 /* ??? In many cases it's not neccessary to do a
6932 rotate, a shift is sufficient. */
6933 if (shift != 0)
6934 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6935 op1 = (insn >> 20) & 7;
6936 switch (op1) {
6937 case 0: gen_sxtb16(tmp); break;
6938 case 2: gen_sxtb(tmp); break;
6939 case 3: gen_sxth(tmp); break;
6940 case 4: gen_uxtb16(tmp); break;
6941 case 6: gen_uxtb(tmp); break;
6942 case 7: gen_uxth(tmp); break;
6943 default: goto illegal_op;
6944 }
6945 if (rn != 15) {
6946 tmp2 = load_reg(s, rn);
6947 if ((op1 & 3) == 0) {
6948 gen_add16(tmp, tmp2);
6949 } else {
6950 tcg_gen_add_i32(tmp, tmp, tmp2);
6951 dead_tmp(tmp2);
6952 }
6953 }
6954 store_reg(s, rd, tmp);
6955 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6956 /* rev */
6957 tmp = load_reg(s, rm);
6958 if (insn & (1 << 22)) {
6959 if (insn & (1 << 7)) {
6960 gen_revsh(tmp);
6961 } else {
6962 ARCH(6T2);
6963 gen_helper_rbit(tmp, tmp);
6964 }
6965 } else {
6966 if (insn & (1 << 7))
6967 gen_rev16(tmp);
6968 else
6969 tcg_gen_bswap32_i32(tmp, tmp);
6970 }
6971 store_reg(s, rd, tmp);
6972 } else {
6973 goto illegal_op;
6974 }
6975 break;
6976 case 2: /* Multiplies (Type 3). */
6977 tmp = load_reg(s, rm);
6978 tmp2 = load_reg(s, rs);
6979 if (insn & (1 << 20)) {
6980 /* Signed multiply most significant [accumulate].
6981 (SMMUL, SMMLA, SMMLS) */
6982 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6983
6984 if (rd != 15) {
6985 tmp = load_reg(s, rd);
6986 if (insn & (1 << 6)) {
6987 tmp64 = gen_subq_msw(tmp64, tmp);
6988 } else {
6989 tmp64 = gen_addq_msw(tmp64, tmp);
6990 }
6991 }
6992 if (insn & (1 << 5)) {
6993 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6994 }
6995 tcg_gen_shri_i64(tmp64, tmp64, 32);
6996 tmp = new_tmp();
6997 tcg_gen_trunc_i64_i32(tmp, tmp64);
6998 tcg_temp_free_i64(tmp64);
6999 store_reg(s, rn, tmp);
7000 } else {
7001 if (insn & (1 << 5))
7002 gen_swap_half(tmp2);
7003 gen_smul_dual(tmp, tmp2);
7004 /* This addition cannot overflow. */
7005 if (insn & (1 << 6)) {
7006 tcg_gen_sub_i32(tmp, tmp, tmp2);
7007 } else {
7008 tcg_gen_add_i32(tmp, tmp, tmp2);
7009 }
7010 dead_tmp(tmp2);
7011 if (insn & (1 << 22)) {
7012 /* smlald, smlsld */
7013 tmp64 = tcg_temp_new_i64();
7014 tcg_gen_ext_i32_i64(tmp64, tmp);
7015 dead_tmp(tmp);
7016 gen_addq(s, tmp64, rd, rn);
7017 gen_storeq_reg(s, rd, rn, tmp64);
7018 tcg_temp_free_i64(tmp64);
7019 } else {
7020 /* smuad, smusd, smlad, smlsd */
7021 if (rd != 15)
7022 {
7023 tmp2 = load_reg(s, rd);
7024 gen_helper_add_setq(tmp, tmp, tmp2);
7025 dead_tmp(tmp2);
7026 }
7027 store_reg(s, rn, tmp);
7028 }
7029 }
7030 break;
7031 case 3:
7032 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7033 switch (op1) {
7034 case 0: /* Unsigned sum of absolute differences. */
7035 ARCH(6);
7036 tmp = load_reg(s, rm);
7037 tmp2 = load_reg(s, rs);
7038 gen_helper_usad8(tmp, tmp, tmp2);
7039 dead_tmp(tmp2);
7040 if (rd != 15) {
7041 tmp2 = load_reg(s, rd);
7042 tcg_gen_add_i32(tmp, tmp, tmp2);
7043 dead_tmp(tmp2);
7044 }
7045 store_reg(s, rn, tmp);
7046 break;
7047 case 0x20: case 0x24: case 0x28: case 0x2c:
7048 /* Bitfield insert/clear. */
7049 ARCH(6T2);
7050 shift = (insn >> 7) & 0x1f;
7051 i = (insn >> 16) & 0x1f;
7052 i = i + 1 - shift;
7053 if (rm == 15) {
7054 tmp = new_tmp();
7055 tcg_gen_movi_i32(tmp, 0);
7056 } else {
7057 tmp = load_reg(s, rm);
7058 }
7059 if (i != 32) {
7060 tmp2 = load_reg(s, rd);
7061 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7062 dead_tmp(tmp2);
7063 }
7064 store_reg(s, rd, tmp);
7065 break;
7066 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7067 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7068 ARCH(6T2);
7069 tmp = load_reg(s, rm);
7070 shift = (insn >> 7) & 0x1f;
7071 i = ((insn >> 16) & 0x1f) + 1;
7072 if (shift + i > 32)
7073 goto illegal_op;
7074 if (i < 32) {
7075 if (op1 & 0x20) {
7076 gen_ubfx(tmp, shift, (1u << i) - 1);
7077 } else {
7078 gen_sbfx(tmp, shift, i);
7079 }
7080 }
7081 store_reg(s, rd, tmp);
7082 break;
7083 default:
7084 goto illegal_op;
7085 }
7086 break;
7087 }
7088 break;
7089 }
7090 do_ldst:
7091 /* Check for undefined extension instructions
7092 * per the ARM Bible IE:
7093 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7094 */
7095 sh = (0xf << 20) | (0xf << 4);
7096 if (op1 == 0x7 && ((insn & sh) == sh))
7097 {
7098 goto illegal_op;
7099 }
7100 /* load/store byte/word */
7101 rn = (insn >> 16) & 0xf;
7102 rd = (insn >> 12) & 0xf;
7103 tmp2 = load_reg(s, rn);
7104 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7105 if (insn & (1 << 24))
7106 gen_add_data_offset(s, insn, tmp2);
7107 if (insn & (1 << 20)) {
7108 /* load */
7109 if (insn & (1 << 22)) {
7110 tmp = gen_ld8u(tmp2, i);
7111 } else {
7112 tmp = gen_ld32(tmp2, i);
7113 }
7114 } else {
7115 /* store */
7116 tmp = load_reg(s, rd);
7117 if (insn & (1 << 22))
7118 gen_st8(tmp, tmp2, i);
7119 else
7120 gen_st32(tmp, tmp2, i);
7121 }
7122 if (!(insn & (1 << 24))) {
7123 gen_add_data_offset(s, insn, tmp2);
7124 store_reg(s, rn, tmp2);
7125 } else if (insn & (1 << 21)) {
7126 store_reg(s, rn, tmp2);
7127 } else {
7128 dead_tmp(tmp2);
7129 }
7130 if (insn & (1 << 20)) {
7131 /* Complete the load. */
7132 if (rd == 15)
7133 gen_bx(s, tmp);
7134 else
7135 store_reg(s, rd, tmp);
7136 }
7137 break;
7138 case 0x08:
7139 case 0x09:
7140 {
7141 int j, n, user, loaded_base;
7142 TCGv loaded_var;
7143 /* load/store multiple words */
7144 /* XXX: store correct base if write back */
7145 user = 0;
7146 if (insn & (1 << 22)) {
7147 if (IS_USER(s))
7148 goto illegal_op; /* only usable in supervisor mode */
7149
7150 if ((insn & (1 << 15)) == 0)
7151 user = 1;
7152 }
7153 rn = (insn >> 16) & 0xf;
7154 addr = load_reg(s, rn);
7155
7156 /* compute total size */
7157 loaded_base = 0;
7158 TCGV_UNUSED(loaded_var);
7159 n = 0;
7160 for(i=0;i<16;i++) {
7161 if (insn & (1 << i))
7162 n++;
7163 }
7164 /* XXX: test invalid n == 0 case ? */
7165 if (insn & (1 << 23)) {
7166 if (insn & (1 << 24)) {
7167 /* pre increment */
7168 tcg_gen_addi_i32(addr, addr, 4);
7169 } else {
7170 /* post increment */
7171 }
7172 } else {
7173 if (insn & (1 << 24)) {
7174 /* pre decrement */
7175 tcg_gen_addi_i32(addr, addr, -(n * 4));
7176 } else {
7177 /* post decrement */
7178 if (n != 1)
7179 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7180 }
7181 }
7182 j = 0;
7183 for(i=0;i<16;i++) {
7184 if (insn & (1 << i)) {
7185 if (insn & (1 << 20)) {
7186 /* load */
7187 tmp = gen_ld32(addr, IS_USER(s));
7188 if (i == 15) {
7189 gen_bx(s, tmp);
7190 } else if (user) {
7191 tmp2 = tcg_const_i32(i);
7192 gen_helper_set_user_reg(tmp2, tmp);
7193 tcg_temp_free_i32(tmp2);
7194 dead_tmp(tmp);
7195 } else if (i == rn) {
7196 loaded_var = tmp;
7197 loaded_base = 1;
7198 } else {
7199 store_reg(s, i, tmp);
7200 }
7201 } else {
7202 /* store */
7203 if (i == 15) {
7204 /* special case: r15 = PC + 8 */
7205 val = (long)s->pc + 4;
7206 tmp = new_tmp();
7207 tcg_gen_movi_i32(tmp, val);
7208 } else if (user) {
7209 tmp = new_tmp();
7210 tmp2 = tcg_const_i32(i);
7211 gen_helper_get_user_reg(tmp, tmp2);
7212 tcg_temp_free_i32(tmp2);
7213 } else {
7214 tmp = load_reg(s, i);
7215 }
7216 gen_st32(tmp, addr, IS_USER(s));
7217 }
7218 j++;
7219 /* no need to add after the last transfer */
7220 if (j != n)
7221 tcg_gen_addi_i32(addr, addr, 4);
7222 }
7223 }
7224 if (insn & (1 << 21)) {
7225 /* write back */
7226 if (insn & (1 << 23)) {
7227 if (insn & (1 << 24)) {
7228 /* pre increment */
7229 } else {
7230 /* post increment */
7231 tcg_gen_addi_i32(addr, addr, 4);
7232 }
7233 } else {
7234 if (insn & (1 << 24)) {
7235 /* pre decrement */
7236 if (n != 1)
7237 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7238 } else {
7239 /* post decrement */
7240 tcg_gen_addi_i32(addr, addr, -(n * 4));
7241 }
7242 }
7243 store_reg(s, rn, addr);
7244 } else {
7245 dead_tmp(addr);
7246 }
7247 if (loaded_base) {
7248 store_reg(s, rn, loaded_var);
7249 }
7250 if ((insn & (1 << 22)) && !user) {
7251 /* Restore CPSR from SPSR. */
7252 tmp = load_cpu_field(spsr);
7253 gen_set_cpsr(tmp, 0xffffffff);
7254 dead_tmp(tmp);
7255 s->is_jmp = DISAS_UPDATE;
7256 }
7257 }
7258 break;
7259 case 0xa:
7260 case 0xb:
7261 {
7262 int32_t offset;
7263
7264 /* branch (and link) */
7265 val = (int32_t)s->pc;
7266 if (insn & (1 << 24)) {
7267 tmp = new_tmp();
7268 tcg_gen_movi_i32(tmp, val);
7269 store_reg(s, 14, tmp);
7270 }
7271 offset = (((int32_t)insn << 8) >> 8);
7272 val += (offset << 2) + 4;
7273 gen_jmp(s, val);
7274 }
7275 break;
7276 case 0xc:
7277 case 0xd:
7278 case 0xe:
7279 /* Coprocessor. */
7280 if (disas_coproc_insn(env, s, insn))
7281 goto illegal_op;
7282 break;
7283 case 0xf:
7284 /* swi */
7285 gen_set_pc_im(s->pc);
7286 s->is_jmp = DISAS_SWI;
7287 break;
7288 default:
7289 illegal_op:
7290 gen_exception_insn(s, 4, EXCP_UDEF);
7291 break;
7292 }
7293 }
7294 }
7295
7296 /* Return true if this is a Thumb-2 logical op. */
7297 static int
7298 thumb2_logic_op(int op)
7299 {
7300 return (op < 8);
7301 }
7302
7303 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7304 then set condition code flags based on the result of the operation.
7305 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7306 to the high bit of T1.
7307 Returns zero if the opcode is valid. */
7308
7309 static int
7310 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7311 {
7312 int logic_cc;
7313
7314 logic_cc = 0;
7315 switch (op) {
7316 case 0: /* and */
7317 tcg_gen_and_i32(t0, t0, t1);
7318 logic_cc = conds;
7319 break;
7320 case 1: /* bic */
7321 tcg_gen_andc_i32(t0, t0, t1);
7322 logic_cc = conds;
7323 break;
7324 case 2: /* orr */
7325 tcg_gen_or_i32(t0, t0, t1);
7326 logic_cc = conds;
7327 break;
7328 case 3: /* orn */
7329 tcg_gen_not_i32(t1, t1);
7330 tcg_gen_or_i32(t0, t0, t1);
7331 logic_cc = conds;
7332 break;
7333 case 4: /* eor */
7334 tcg_gen_xor_i32(t0, t0, t1);
7335 logic_cc = conds;
7336 break;
7337 case 8: /* add */
7338 if (conds)
7339 gen_helper_add_cc(t0, t0, t1);
7340 else
7341 tcg_gen_add_i32(t0, t0, t1);
7342 break;
7343 case 10: /* adc */
7344 if (conds)
7345 gen_helper_adc_cc(t0, t0, t1);
7346 else
7347 gen_adc(t0, t1);
7348 break;
7349 case 11: /* sbc */
7350 if (conds)
7351 gen_helper_sbc_cc(t0, t0, t1);
7352 else
7353 gen_sub_carry(t0, t0, t1);
7354 break;
7355 case 13: /* sub */
7356 if (conds)
7357 gen_helper_sub_cc(t0, t0, t1);
7358 else
7359 tcg_gen_sub_i32(t0, t0, t1);
7360 break;
7361 case 14: /* rsb */
7362 if (conds)
7363 gen_helper_sub_cc(t0, t1, t0);
7364 else
7365 tcg_gen_sub_i32(t0, t1, t0);
7366 break;
7367 default: /* 5, 6, 7, 9, 12, 15. */
7368 return 1;
7369 }
7370 if (logic_cc) {
7371 gen_logic_CC(t0);
7372 if (shifter_out)
7373 gen_set_CF_bit31(t1);
7374 }
7375 return 0;
7376 }
7377
7378 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7379 is not legal. */
7380 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7381 {
7382 uint32_t insn, imm, shift, offset;
7383 uint32_t rd, rn, rm, rs;
7384 TCGv tmp;
7385 TCGv tmp2;
7386 TCGv tmp3;
7387 TCGv addr;
7388 TCGv_i64 tmp64;
7389 int op;
7390 int shiftop;
7391 int conds;
7392 int logic_cc;
7393
7394 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7395 || arm_feature (env, ARM_FEATURE_M))) {
7396 /* Thumb-1 cores may need to treat bl and blx as a pair of
7397 16-bit instructions to get correct prefetch abort behavior. */
7398 insn = insn_hw1;
7399 if ((insn & (1 << 12)) == 0) {
7400 /* Second half of blx. */
7401 offset = ((insn & 0x7ff) << 1);
7402 tmp = load_reg(s, 14);
7403 tcg_gen_addi_i32(tmp, tmp, offset);
7404 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7405
7406 tmp2 = new_tmp();
7407 tcg_gen_movi_i32(tmp2, s->pc | 1);
7408 store_reg(s, 14, tmp2);
7409 gen_bx(s, tmp);
7410 return 0;
7411 }
7412 if (insn & (1 << 11)) {
7413 /* Second half of bl. */
7414 offset = ((insn & 0x7ff) << 1) | 1;
7415 tmp = load_reg(s, 14);
7416 tcg_gen_addi_i32(tmp, tmp, offset);
7417
7418 tmp2 = new_tmp();
7419 tcg_gen_movi_i32(tmp2, s->pc | 1);
7420 store_reg(s, 14, tmp2);
7421 gen_bx(s, tmp);
7422 return 0;
7423 }
7424 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7425 /* Instruction spans a page boundary. Implement it as two
7426 16-bit instructions in case the second half causes an
7427 prefetch abort. */
7428 offset = ((int32_t)insn << 21) >> 9;
7429 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7430 return 0;
7431 }
7432 /* Fall through to 32-bit decode. */
7433 }
7434
7435 insn = lduw_code(s->pc);
7436 s->pc += 2;
7437 insn |= (uint32_t)insn_hw1 << 16;
7438
7439 if ((insn & 0xf800e800) != 0xf000e800) {
7440 ARCH(6T2);
7441 }
7442
7443 rn = (insn >> 16) & 0xf;
7444 rs = (insn >> 12) & 0xf;
7445 rd = (insn >> 8) & 0xf;
7446 rm = insn & 0xf;
7447 switch ((insn >> 25) & 0xf) {
7448 case 0: case 1: case 2: case 3:
7449 /* 16-bit instructions. Should never happen. */
7450 abort();
7451 case 4:
7452 if (insn & (1 << 22)) {
7453 /* Other load/store, table branch. */
7454 if (insn & 0x01200000) {
7455 /* Load/store doubleword. */
7456 if (rn == 15) {
7457 addr = new_tmp();
7458 tcg_gen_movi_i32(addr, s->pc & ~3);
7459 } else {
7460 addr = load_reg(s, rn);
7461 }
7462 offset = (insn & 0xff) * 4;
7463 if ((insn & (1 << 23)) == 0)
7464 offset = -offset;
7465 if (insn & (1 << 24)) {
7466 tcg_gen_addi_i32(addr, addr, offset);
7467 offset = 0;
7468 }
7469 if (insn & (1 << 20)) {
7470 /* ldrd */
7471 tmp = gen_ld32(addr, IS_USER(s));
7472 store_reg(s, rs, tmp);
7473 tcg_gen_addi_i32(addr, addr, 4);
7474 tmp = gen_ld32(addr, IS_USER(s));
7475 store_reg(s, rd, tmp);
7476 } else {
7477 /* strd */
7478 tmp = load_reg(s, rs);
7479 gen_st32(tmp, addr, IS_USER(s));
7480 tcg_gen_addi_i32(addr, addr, 4);
7481 tmp = load_reg(s, rd);
7482 gen_st32(tmp, addr, IS_USER(s));
7483 }
7484 if (insn & (1 << 21)) {
7485 /* Base writeback. */
7486 if (rn == 15)
7487 goto illegal_op;
7488 tcg_gen_addi_i32(addr, addr, offset - 4);
7489 store_reg(s, rn, addr);
7490 } else {
7491 dead_tmp(addr);
7492 }
7493 } else if ((insn & (1 << 23)) == 0) {
7494 /* Load/store exclusive word. */
7495 addr = tcg_temp_local_new();
7496 load_reg_var(s, addr, rn);
7497 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7498 if (insn & (1 << 20)) {
7499 gen_load_exclusive(s, rs, 15, addr, 2);
7500 } else {
7501 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7502 }
7503 tcg_temp_free(addr);
7504 } else if ((insn & (1 << 6)) == 0) {
7505 /* Table Branch. */
7506 if (rn == 15) {
7507 addr = new_tmp();
7508 tcg_gen_movi_i32(addr, s->pc);
7509 } else {
7510 addr = load_reg(s, rn);
7511 }
7512 tmp = load_reg(s, rm);
7513 tcg_gen_add_i32(addr, addr, tmp);
7514 if (insn & (1 << 4)) {
7515 /* tbh */
7516 tcg_gen_add_i32(addr, addr, tmp);
7517 dead_tmp(tmp);
7518 tmp = gen_ld16u(addr, IS_USER(s));
7519 } else { /* tbb */
7520 dead_tmp(tmp);
7521 tmp = gen_ld8u(addr, IS_USER(s));
7522 }
7523 dead_tmp(addr);
7524 tcg_gen_shli_i32(tmp, tmp, 1);
7525 tcg_gen_addi_i32(tmp, tmp, s->pc);
7526 store_reg(s, 15, tmp);
7527 } else {
7528 /* Load/store exclusive byte/halfword/doubleword. */
7529 ARCH(7);
7530 op = (insn >> 4) & 0x3;
7531 if (op == 2) {
7532 goto illegal_op;
7533 }
7534 addr = tcg_temp_local_new();
7535 load_reg_var(s, addr, rn);
7536 if (insn & (1 << 20)) {
7537 gen_load_exclusive(s, rs, rd, addr, op);
7538 } else {
7539 gen_store_exclusive(s, rm, rs, rd, addr, op);
7540 }
7541 tcg_temp_free(addr);
7542 }
7543 } else {
7544 /* Load/store multiple, RFE, SRS. */
7545 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7546 /* Not available in user mode. */
7547 if (IS_USER(s))
7548 goto illegal_op;
7549 if (insn & (1 << 20)) {
7550 /* rfe */
7551 addr = load_reg(s, rn);
7552 if ((insn & (1 << 24)) == 0)
7553 tcg_gen_addi_i32(addr, addr, -8);
7554 /* Load PC into tmp and CPSR into tmp2. */
7555 tmp = gen_ld32(addr, 0);
7556 tcg_gen_addi_i32(addr, addr, 4);
7557 tmp2 = gen_ld32(addr, 0);
7558 if (insn & (1 << 21)) {
7559 /* Base writeback. */
7560 if (insn & (1 << 24)) {
7561 tcg_gen_addi_i32(addr, addr, 4);
7562 } else {
7563 tcg_gen_addi_i32(addr, addr, -4);
7564 }
7565 store_reg(s, rn, addr);
7566 } else {
7567 dead_tmp(addr);
7568 }
7569 gen_rfe(s, tmp, tmp2);
7570 } else {
7571 /* srs */
7572 op = (insn & 0x1f);
7573 addr = new_tmp();
7574 tmp = tcg_const_i32(op);
7575 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7576 tcg_temp_free_i32(tmp);
7577 if ((insn & (1 << 24)) == 0) {
7578 tcg_gen_addi_i32(addr, addr, -8);
7579 }
7580 tmp = load_reg(s, 14);
7581 gen_st32(tmp, addr, 0);
7582 tcg_gen_addi_i32(addr, addr, 4);
7583 tmp = new_tmp();
7584 gen_helper_cpsr_read(tmp);
7585 gen_st32(tmp, addr, 0);
7586 if (insn & (1 << 21)) {
7587 if ((insn & (1 << 24)) == 0) {
7588 tcg_gen_addi_i32(addr, addr, -4);
7589 } else {
7590 tcg_gen_addi_i32(addr, addr, 4);
7591 }
7592 tmp = tcg_const_i32(op);
7593 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7594 tcg_temp_free_i32(tmp);
7595 } else {
7596 dead_tmp(addr);
7597 }
7598 }
7599 } else {
7600 int i;
7601 /* Load/store multiple. */
7602 addr = load_reg(s, rn);
7603 offset = 0;
7604 for (i = 0; i < 16; i++) {
7605 if (insn & (1 << i))
7606 offset += 4;
7607 }
7608 if (insn & (1 << 24)) {
7609 tcg_gen_addi_i32(addr, addr, -offset);
7610 }
7611
7612 for (i = 0; i < 16; i++) {
7613 if ((insn & (1 << i)) == 0)
7614 continue;
7615 if (insn & (1 << 20)) {
7616 /* Load. */
7617 tmp = gen_ld32(addr, IS_USER(s));
7618 if (i == 15) {
7619 gen_bx(s, tmp);
7620 } else {
7621 store_reg(s, i, tmp);
7622 }
7623 } else {
7624 /* Store. */
7625 tmp = load_reg(s, i);
7626 gen_st32(tmp, addr, IS_USER(s));
7627 }
7628 tcg_gen_addi_i32(addr, addr, 4);
7629 }
7630 if (insn & (1 << 21)) {
7631 /* Base register writeback. */
7632 if (insn & (1 << 24)) {
7633 tcg_gen_addi_i32(addr, addr, -offset);
7634 }
7635 /* Fault if writeback register is in register list. */
7636 if (insn & (1 << rn))
7637 goto illegal_op;
7638 store_reg(s, rn, addr);
7639 } else {
7640 dead_tmp(addr);
7641 }
7642 }
7643 }
7644 break;
7645 case 5:
7646
7647 op = (insn >> 21) & 0xf;
7648 if (op == 6) {
7649 /* Halfword pack. */
7650 tmp = load_reg(s, rn);
7651 tmp2 = load_reg(s, rm);
7652 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7653 if (insn & (1 << 5)) {
7654 /* pkhtb */
7655 if (shift == 0)
7656 shift = 31;
7657 tcg_gen_sari_i32(tmp2, tmp2, shift);
7658 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7659 tcg_gen_ext16u_i32(tmp2, tmp2);
7660 } else {
7661 /* pkhbt */
7662 if (shift)
7663 tcg_gen_shli_i32(tmp2, tmp2, shift);
7664 tcg_gen_ext16u_i32(tmp, tmp);
7665 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7666 }
7667 tcg_gen_or_i32(tmp, tmp, tmp2);
7668 dead_tmp(tmp2);
7669 store_reg(s, rd, tmp);
7670 } else {
7671 /* Data processing register constant shift. */
7672 if (rn == 15) {
7673 tmp = new_tmp();
7674 tcg_gen_movi_i32(tmp, 0);
7675 } else {
7676 tmp = load_reg(s, rn);
7677 }
7678 tmp2 = load_reg(s, rm);
7679
7680 shiftop = (insn >> 4) & 3;
7681 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7682 conds = (insn & (1 << 20)) != 0;
7683 logic_cc = (conds && thumb2_logic_op(op));
7684 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7685 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7686 goto illegal_op;
7687 dead_tmp(tmp2);
7688 if (rd != 15) {
7689 store_reg(s, rd, tmp);
7690 } else {
7691 dead_tmp(tmp);
7692 }
7693 }
7694 break;
7695 case 13: /* Misc data processing. */
7696 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7697 if (op < 4 && (insn & 0xf000) != 0xf000)
7698 goto illegal_op;
7699 switch (op) {
7700 case 0: /* Register controlled shift. */
7701 tmp = load_reg(s, rn);
7702 tmp2 = load_reg(s, rm);
7703 if ((insn & 0x70) != 0)
7704 goto illegal_op;
7705 op = (insn >> 21) & 3;
7706 logic_cc = (insn & (1 << 20)) != 0;
7707 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7708 if (logic_cc)
7709 gen_logic_CC(tmp);
7710 store_reg_bx(env, s, rd, tmp);
7711 break;
7712 case 1: /* Sign/zero extend. */
7713 tmp = load_reg(s, rm);
7714 shift = (insn >> 4) & 3;
7715 /* ??? In many cases it's not neccessary to do a
7716 rotate, a shift is sufficient. */
7717 if (shift != 0)
7718 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7719 op = (insn >> 20) & 7;
7720 switch (op) {
7721 case 0: gen_sxth(tmp); break;
7722 case 1: gen_uxth(tmp); break;
7723 case 2: gen_sxtb16(tmp); break;
7724 case 3: gen_uxtb16(tmp); break;
7725 case 4: gen_sxtb(tmp); break;
7726 case 5: gen_uxtb(tmp); break;
7727 default: goto illegal_op;
7728 }
7729 if (rn != 15) {
7730 tmp2 = load_reg(s, rn);
7731 if ((op >> 1) == 1) {
7732 gen_add16(tmp, tmp2);
7733 } else {
7734 tcg_gen_add_i32(tmp, tmp, tmp2);
7735 dead_tmp(tmp2);
7736 }
7737 }
7738 store_reg(s, rd, tmp);
7739 break;
7740 case 2: /* SIMD add/subtract. */
7741 op = (insn >> 20) & 7;
7742 shift = (insn >> 4) & 7;
7743 if ((op & 3) == 3 || (shift & 3) == 3)
7744 goto illegal_op;
7745 tmp = load_reg(s, rn);
7746 tmp2 = load_reg(s, rm);
7747 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7748 dead_tmp(tmp2);
7749 store_reg(s, rd, tmp);
7750 break;
7751 case 3: /* Other data processing. */
7752 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7753 if (op < 4) {
7754 /* Saturating add/subtract. */
7755 tmp = load_reg(s, rn);
7756 tmp2 = load_reg(s, rm);
7757 if (op & 1)
7758 gen_helper_double_saturate(tmp, tmp);
7759 if (op & 2)
7760 gen_helper_sub_saturate(tmp, tmp2, tmp);
7761 else
7762 gen_helper_add_saturate(tmp, tmp, tmp2);
7763 dead_tmp(tmp2);
7764 } else {
7765 tmp = load_reg(s, rn);
7766 switch (op) {
7767 case 0x0a: /* rbit */
7768 gen_helper_rbit(tmp, tmp);
7769 break;
7770 case 0x08: /* rev */
7771 tcg_gen_bswap32_i32(tmp, tmp);
7772 break;
7773 case 0x09: /* rev16 */
7774 gen_rev16(tmp);
7775 break;
7776 case 0x0b: /* revsh */
7777 gen_revsh(tmp);
7778 break;
7779 case 0x10: /* sel */
7780 tmp2 = load_reg(s, rm);
7781 tmp3 = new_tmp();
7782 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7783 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7784 dead_tmp(tmp3);
7785 dead_tmp(tmp2);
7786 break;
7787 case 0x18: /* clz */
7788 gen_helper_clz(tmp, tmp);
7789 break;
7790 default:
7791 goto illegal_op;
7792 }
7793 }
7794 store_reg(s, rd, tmp);
7795 break;
7796 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7797 op = (insn >> 4) & 0xf;
7798 tmp = load_reg(s, rn);
7799 tmp2 = load_reg(s, rm);
7800 switch ((insn >> 20) & 7) {
7801 case 0: /* 32 x 32 -> 32 */
7802 tcg_gen_mul_i32(tmp, tmp, tmp2);
7803 dead_tmp(tmp2);
7804 if (rs != 15) {
7805 tmp2 = load_reg(s, rs);
7806 if (op)
7807 tcg_gen_sub_i32(tmp, tmp2, tmp);
7808 else
7809 tcg_gen_add_i32(tmp, tmp, tmp2);
7810 dead_tmp(tmp2);
7811 }
7812 break;
7813 case 1: /* 16 x 16 -> 32 */
7814 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7815 dead_tmp(tmp2);
7816 if (rs != 15) {
7817 tmp2 = load_reg(s, rs);
7818 gen_helper_add_setq(tmp, tmp, tmp2);
7819 dead_tmp(tmp2);
7820 }
7821 break;
7822 case 2: /* Dual multiply add. */
7823 case 4: /* Dual multiply subtract. */
7824 if (op)
7825 gen_swap_half(tmp2);
7826 gen_smul_dual(tmp, tmp2);
7827 /* This addition cannot overflow. */
7828 if (insn & (1 << 22)) {
7829 tcg_gen_sub_i32(tmp, tmp, tmp2);
7830 } else {
7831 tcg_gen_add_i32(tmp, tmp, tmp2);
7832 }
7833 dead_tmp(tmp2);
7834 if (rs != 15)
7835 {
7836 tmp2 = load_reg(s, rs);
7837 gen_helper_add_setq(tmp, tmp, tmp2);
7838 dead_tmp(tmp2);
7839 }
7840 break;
7841 case 3: /* 32 * 16 -> 32msb */
7842 if (op)
7843 tcg_gen_sari_i32(tmp2, tmp2, 16);
7844 else
7845 gen_sxth(tmp2);
7846 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7847 tcg_gen_shri_i64(tmp64, tmp64, 16);
7848 tmp = new_tmp();
7849 tcg_gen_trunc_i64_i32(tmp, tmp64);
7850 tcg_temp_free_i64(tmp64);
7851 if (rs != 15)
7852 {
7853 tmp2 = load_reg(s, rs);
7854 gen_helper_add_setq(tmp, tmp, tmp2);
7855 dead_tmp(tmp2);
7856 }
7857 break;
7858 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7859 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7860 if (rs != 15) {
7861 tmp = load_reg(s, rs);
7862 if (insn & (1 << 20)) {
7863 tmp64 = gen_addq_msw(tmp64, tmp);
7864 } else {
7865 tmp64 = gen_subq_msw(tmp64, tmp);
7866 }
7867 }
7868 if (insn & (1 << 4)) {
7869 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7870 }
7871 tcg_gen_shri_i64(tmp64, tmp64, 32);
7872 tmp = new_tmp();
7873 tcg_gen_trunc_i64_i32(tmp, tmp64);
7874 tcg_temp_free_i64(tmp64);
7875 break;
7876 case 7: /* Unsigned sum of absolute differences. */
7877 gen_helper_usad8(tmp, tmp, tmp2);
7878 dead_tmp(tmp2);
7879 if (rs != 15) {
7880 tmp2 = load_reg(s, rs);
7881 tcg_gen_add_i32(tmp, tmp, tmp2);
7882 dead_tmp(tmp2);
7883 }
7884 break;
7885 }
7886 store_reg(s, rd, tmp);
7887 break;
7888 case 6: case 7: /* 64-bit multiply, Divide. */
7889 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7890 tmp = load_reg(s, rn);
7891 tmp2 = load_reg(s, rm);
7892 if ((op & 0x50) == 0x10) {
7893 /* sdiv, udiv */
7894 if (!arm_feature(env, ARM_FEATURE_DIV))
7895 goto illegal_op;
7896 if (op & 0x20)
7897 gen_helper_udiv(tmp, tmp, tmp2);
7898 else
7899 gen_helper_sdiv(tmp, tmp, tmp2);
7900 dead_tmp(tmp2);
7901 store_reg(s, rd, tmp);
7902 } else if ((op & 0xe) == 0xc) {
7903 /* Dual multiply accumulate long. */
7904 if (op & 1)
7905 gen_swap_half(tmp2);
7906 gen_smul_dual(tmp, tmp2);
7907 if (op & 0x10) {
7908 tcg_gen_sub_i32(tmp, tmp, tmp2);
7909 } else {
7910 tcg_gen_add_i32(tmp, tmp, tmp2);
7911 }
7912 dead_tmp(tmp2);
7913 /* BUGFIX */
7914 tmp64 = tcg_temp_new_i64();
7915 tcg_gen_ext_i32_i64(tmp64, tmp);
7916 dead_tmp(tmp);
7917 gen_addq(s, tmp64, rs, rd);
7918 gen_storeq_reg(s, rs, rd, tmp64);
7919 tcg_temp_free_i64(tmp64);
7920 } else {
7921 if (op & 0x20) {
7922 /* Unsigned 64-bit multiply */
7923 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7924 } else {
7925 if (op & 8) {
7926 /* smlalxy */
7927 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7928 dead_tmp(tmp2);
7929 tmp64 = tcg_temp_new_i64();
7930 tcg_gen_ext_i32_i64(tmp64, tmp);
7931 dead_tmp(tmp);
7932 } else {
7933 /* Signed 64-bit multiply */
7934 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7935 }
7936 }
7937 if (op & 4) {
7938 /* umaal */
7939 gen_addq_lo(s, tmp64, rs);
7940 gen_addq_lo(s, tmp64, rd);
7941 } else if (op & 0x40) {
7942 /* 64-bit accumulate. */
7943 gen_addq(s, tmp64, rs, rd);
7944 }
7945 gen_storeq_reg(s, rs, rd, tmp64);
7946 tcg_temp_free_i64(tmp64);
7947 }
7948 break;
7949 }
7950 break;
7951 case 6: case 7: case 14: case 15:
7952 /* Coprocessor. */
7953 if (((insn >> 24) & 3) == 3) {
7954 /* Translate into the equivalent ARM encoding. */
7955 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7956 if (disas_neon_data_insn(env, s, insn))
7957 goto illegal_op;
7958 } else {
7959 if (insn & (1 << 28))
7960 goto illegal_op;
7961 if (disas_coproc_insn (env, s, insn))
7962 goto illegal_op;
7963 }
7964 break;
7965 case 8: case 9: case 10: case 11:
7966 if (insn & (1 << 15)) {
7967 /* Branches, misc control. */
7968 if (insn & 0x5000) {
7969 /* Unconditional branch. */
7970 /* signextend(hw1[10:0]) -> offset[:12]. */
7971 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7972 /* hw1[10:0] -> offset[11:1]. */
7973 offset |= (insn & 0x7ff) << 1;
7974 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7975 offset[24:22] already have the same value because of the
7976 sign extension above. */
7977 offset ^= ((~insn) & (1 << 13)) << 10;
7978 offset ^= ((~insn) & (1 << 11)) << 11;
7979
7980 if (insn & (1 << 14)) {
7981 /* Branch and link. */
7982 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7983 }
7984
7985 offset += s->pc;
7986 if (insn & (1 << 12)) {
7987 /* b/bl */
7988 gen_jmp(s, offset);
7989 } else {
7990 /* blx */
7991 offset &= ~(uint32_t)2;
7992 gen_bx_im(s, offset);
7993 }
7994 } else if (((insn >> 23) & 7) == 7) {
7995 /* Misc control */
7996 if (insn & (1 << 13))
7997 goto illegal_op;
7998
7999 if (insn & (1 << 26)) {
8000 /* Secure monitor call (v6Z) */
8001 goto illegal_op; /* not implemented. */
8002 } else {
8003 op = (insn >> 20) & 7;
8004 switch (op) {
8005 case 0: /* msr cpsr. */
8006 if (IS_M(env)) {
8007 tmp = load_reg(s, rn);
8008 addr = tcg_const_i32(insn & 0xff);
8009 gen_helper_v7m_msr(cpu_env, addr, tmp);
8010 tcg_temp_free_i32(addr);
8011 dead_tmp(tmp);
8012 gen_lookup_tb(s);
8013 break;
8014 }
8015 /* fall through */
8016 case 1: /* msr spsr. */
8017 if (IS_M(env))
8018 goto illegal_op;
8019 tmp = load_reg(s, rn);
8020 if (gen_set_psr(s,
8021 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8022 op == 1, tmp))
8023 goto illegal_op;
8024 break;
8025 case 2: /* cps, nop-hint. */
8026 if (((insn >> 8) & 7) == 0) {
8027 gen_nop_hint(s, insn & 0xff);
8028 }
8029 /* Implemented as NOP in user mode. */
8030 if (IS_USER(s))
8031 break;
8032 offset = 0;
8033 imm = 0;
8034 if (insn & (1 << 10)) {
8035 if (insn & (1 << 7))
8036 offset |= CPSR_A;
8037 if (insn & (1 << 6))
8038 offset |= CPSR_I;
8039 if (insn & (1 << 5))
8040 offset |= CPSR_F;
8041 if (insn & (1 << 9))
8042 imm = CPSR_A | CPSR_I | CPSR_F;
8043 }
8044 if (insn & (1 << 8)) {
8045 offset |= 0x1f;
8046 imm |= (insn & 0x1f);
8047 }
8048 if (offset) {
8049 gen_set_psr_im(s, offset, 0, imm);
8050 }
8051 break;
8052 case 3: /* Special control operations. */
8053 ARCH(7);
8054 op = (insn >> 4) & 0xf;
8055 switch (op) {
8056 case 2: /* clrex */
8057 gen_clrex(s);
8058 break;
8059 case 4: /* dsb */
8060 case 5: /* dmb */
8061 case 6: /* isb */
8062 /* These execute as NOPs. */
8063 break;
8064 default:
8065 goto illegal_op;
8066 }
8067 break;
8068 case 4: /* bxj */
8069 /* Trivial implementation equivalent to bx. */
8070 tmp = load_reg(s, rn);
8071 gen_bx(s, tmp);
8072 break;
8073 case 5: /* Exception return. */
8074 if (IS_USER(s)) {
8075 goto illegal_op;
8076 }
8077 if (rn != 14 || rd != 15) {
8078 goto illegal_op;
8079 }
8080 tmp = load_reg(s, rn);
8081 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8082 gen_exception_return(s, tmp);
8083 break;
8084 case 6: /* mrs cpsr. */
8085 tmp = new_tmp();
8086 if (IS_M(env)) {
8087 addr = tcg_const_i32(insn & 0xff);
8088 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8089 tcg_temp_free_i32(addr);
8090 } else {
8091 gen_helper_cpsr_read(tmp);
8092 }
8093 store_reg(s, rd, tmp);
8094 break;
8095 case 7: /* mrs spsr. */
8096 /* Not accessible in user mode. */
8097 if (IS_USER(s) || IS_M(env))
8098 goto illegal_op;
8099 tmp = load_cpu_field(spsr);
8100 store_reg(s, rd, tmp);
8101 break;
8102 }
8103 }
8104 } else {
8105 /* Conditional branch. */
8106 op = (insn >> 22) & 0xf;
8107 /* Generate a conditional jump to next instruction. */
8108 s->condlabel = gen_new_label();
8109 gen_test_cc(op ^ 1, s->condlabel);
8110 s->condjmp = 1;
8111
8112 /* offset[11:1] = insn[10:0] */
8113 offset = (insn & 0x7ff) << 1;
8114 /* offset[17:12] = insn[21:16]. */
8115 offset |= (insn & 0x003f0000) >> 4;
8116 /* offset[31:20] = insn[26]. */
8117 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8118 /* offset[18] = insn[13]. */
8119 offset |= (insn & (1 << 13)) << 5;
8120 /* offset[19] = insn[11]. */
8121 offset |= (insn & (1 << 11)) << 8;
8122
8123 /* jump to the offset */
8124 gen_jmp(s, s->pc + offset);
8125 }
8126 } else {
8127 /* Data processing immediate. */
8128 if (insn & (1 << 25)) {
8129 if (insn & (1 << 24)) {
8130 if (insn & (1 << 20))
8131 goto illegal_op;
8132 /* Bitfield/Saturate. */
8133 op = (insn >> 21) & 7;
8134 imm = insn & 0x1f;
8135 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8136 if (rn == 15) {
8137 tmp = new_tmp();
8138 tcg_gen_movi_i32(tmp, 0);
8139 } else {
8140 tmp = load_reg(s, rn);
8141 }
8142 switch (op) {
8143 case 2: /* Signed bitfield extract. */
8144 imm++;
8145 if (shift + imm > 32)
8146 goto illegal_op;
8147 if (imm < 32)
8148 gen_sbfx(tmp, shift, imm);
8149 break;
8150 case 6: /* Unsigned bitfield extract. */
8151 imm++;
8152 if (shift + imm > 32)
8153 goto illegal_op;
8154 if (imm < 32)
8155 gen_ubfx(tmp, shift, (1u << imm) - 1);
8156 break;
8157 case 3: /* Bitfield insert/clear. */
8158 if (imm < shift)
8159 goto illegal_op;
8160 imm = imm + 1 - shift;
8161 if (imm != 32) {
8162 tmp2 = load_reg(s, rd);
8163 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8164 dead_tmp(tmp2);
8165 }
8166 break;
8167 case 7:
8168 goto illegal_op;
8169 default: /* Saturate. */
8170 if (shift) {
8171 if (op & 1)
8172 tcg_gen_sari_i32(tmp, tmp, shift);
8173 else
8174 tcg_gen_shli_i32(tmp, tmp, shift);
8175 }
8176 tmp2 = tcg_const_i32(imm);
8177 if (op & 4) {
8178 /* Unsigned. */
8179 if ((op & 1) && shift == 0)
8180 gen_helper_usat16(tmp, tmp, tmp2);
8181 else
8182 gen_helper_usat(tmp, tmp, tmp2);
8183 } else {
8184 /* Signed. */
8185 if ((op & 1) && shift == 0)
8186 gen_helper_ssat16(tmp, tmp, tmp2);
8187 else
8188 gen_helper_ssat(tmp, tmp, tmp2);
8189 }
8190 tcg_temp_free_i32(tmp2);
8191 break;
8192 }
8193 store_reg(s, rd, tmp);
8194 } else {
8195 imm = ((insn & 0x04000000) >> 15)
8196 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8197 if (insn & (1 << 22)) {
8198 /* 16-bit immediate. */
8199 imm |= (insn >> 4) & 0xf000;
8200 if (insn & (1 << 23)) {
8201 /* movt */
8202 tmp = load_reg(s, rd);
8203 tcg_gen_ext16u_i32(tmp, tmp);
8204 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8205 } else {
8206 /* movw */
8207 tmp = new_tmp();
8208 tcg_gen_movi_i32(tmp, imm);
8209 }
8210 } else {
8211 /* Add/sub 12-bit immediate. */
8212 if (rn == 15) {
8213 offset = s->pc & ~(uint32_t)3;
8214 if (insn & (1 << 23))
8215 offset -= imm;
8216 else
8217 offset += imm;
8218 tmp = new_tmp();
8219 tcg_gen_movi_i32(tmp, offset);
8220 } else {
8221 tmp = load_reg(s, rn);
8222 if (insn & (1 << 23))
8223 tcg_gen_subi_i32(tmp, tmp, imm);
8224 else
8225 tcg_gen_addi_i32(tmp, tmp, imm);
8226 }
8227 }
8228 store_reg(s, rd, tmp);
8229 }
8230 } else {
8231 int shifter_out = 0;
8232 /* modified 12-bit immediate. */
8233 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8234 imm = (insn & 0xff);
8235 switch (shift) {
8236 case 0: /* XY */
8237 /* Nothing to do. */
8238 break;
8239 case 1: /* 00XY00XY */
8240 imm |= imm << 16;
8241 break;
8242 case 2: /* XY00XY00 */
8243 imm |= imm << 16;
8244 imm <<= 8;
8245 break;
8246 case 3: /* XYXYXYXY */
8247 imm |= imm << 16;
8248 imm |= imm << 8;
8249 break;
8250 default: /* Rotated constant. */
8251 shift = (shift << 1) | (imm >> 7);
8252 imm |= 0x80;
8253 imm = imm << (32 - shift);
8254 shifter_out = 1;
8255 break;
8256 }
8257 tmp2 = new_tmp();
8258 tcg_gen_movi_i32(tmp2, imm);
8259 rn = (insn >> 16) & 0xf;
8260 if (rn == 15) {
8261 tmp = new_tmp();
8262 tcg_gen_movi_i32(tmp, 0);
8263 } else {
8264 tmp = load_reg(s, rn);
8265 }
8266 op = (insn >> 21) & 0xf;
8267 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8268 shifter_out, tmp, tmp2))
8269 goto illegal_op;
8270 dead_tmp(tmp2);
8271 rd = (insn >> 8) & 0xf;
8272 if (rd != 15) {
8273 store_reg(s, rd, tmp);
8274 } else {
8275 dead_tmp(tmp);
8276 }
8277 }
8278 }
8279 break;
8280 case 12: /* Load/store single data item. */
8281 {
8282 int postinc = 0;
8283 int writeback = 0;
8284 int user;
8285 if ((insn & 0x01100000) == 0x01000000) {
8286 if (disas_neon_ls_insn(env, s, insn))
8287 goto illegal_op;
8288 break;
8289 }
8290 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8291 if (rs == 15) {
8292 if (!(insn & (1 << 20))) {
8293 goto illegal_op;
8294 }
8295 if (op != 2) {
8296 /* Byte or halfword load space with dest == r15 : memory hints.
8297 * Catch them early so we don't emit pointless addressing code.
8298 * This space is a mix of:
8299 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8300 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8301 * cores)
8302 * unallocated hints, which must be treated as NOPs
8303 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8304 * which is easiest for the decoding logic
8305 * Some space which must UNDEF
8306 */
8307 int op1 = (insn >> 23) & 3;
8308 int op2 = (insn >> 6) & 0x3f;
8309 if (op & 2) {
8310 goto illegal_op;
8311 }
8312 if (rn == 15) {
8313 /* UNPREDICTABLE or unallocated hint */
8314 return 0;
8315 }
8316 if (op1 & 1) {
8317 return 0; /* PLD* or unallocated hint */
8318 }
8319 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8320 return 0; /* PLD* or unallocated hint */
8321 }
8322 /* UNDEF space, or an UNPREDICTABLE */
8323 return 1;
8324 }
8325 }
8326 user = IS_USER(s);
8327 if (rn == 15) {
8328 addr = new_tmp();
8329 /* PC relative. */
8330 /* s->pc has already been incremented by 4. */
8331 imm = s->pc & 0xfffffffc;
8332 if (insn & (1 << 23))
8333 imm += insn & 0xfff;
8334 else
8335 imm -= insn & 0xfff;
8336 tcg_gen_movi_i32(addr, imm);
8337 } else {
8338 addr = load_reg(s, rn);
8339 if (insn & (1 << 23)) {
8340 /* Positive offset. */
8341 imm = insn & 0xfff;
8342 tcg_gen_addi_i32(addr, addr, imm);
8343 } else {
8344 imm = insn & 0xff;
8345 switch ((insn >> 8) & 7) {
8346 case 0: case 8: /* Shifted Register. */
8347 shift = (insn >> 4) & 0xf;
8348 if (shift > 3)
8349 goto illegal_op;
8350 tmp = load_reg(s, rm);
8351 if (shift)
8352 tcg_gen_shli_i32(tmp, tmp, shift);
8353 tcg_gen_add_i32(addr, addr, tmp);
8354 dead_tmp(tmp);
8355 break;
8356 case 4: /* Negative offset. */
8357 tcg_gen_addi_i32(addr, addr, -imm);
8358 break;
8359 case 6: /* User privilege. */
8360 tcg_gen_addi_i32(addr, addr, imm);
8361 user = 1;
8362 break;
8363 case 1: /* Post-decrement. */
8364 imm = -imm;
8365 /* Fall through. */
8366 case 3: /* Post-increment. */
8367 postinc = 1;
8368 writeback = 1;
8369 break;
8370 case 5: /* Pre-decrement. */
8371 imm = -imm;
8372 /* Fall through. */
8373 case 7: /* Pre-increment. */
8374 tcg_gen_addi_i32(addr, addr, imm);
8375 writeback = 1;
8376 break;
8377 default:
8378 goto illegal_op;
8379 }
8380 }
8381 }
8382 if (insn & (1 << 20)) {
8383 /* Load. */
8384 switch (op) {
8385 case 0: tmp = gen_ld8u(addr, user); break;
8386 case 4: tmp = gen_ld8s(addr, user); break;
8387 case 1: tmp = gen_ld16u(addr, user); break;
8388 case 5: tmp = gen_ld16s(addr, user); break;
8389 case 2: tmp = gen_ld32(addr, user); break;
8390 default: goto illegal_op;
8391 }
8392 if (rs == 15) {
8393 gen_bx(s, tmp);
8394 } else {
8395 store_reg(s, rs, tmp);
8396 }
8397 } else {
8398 /* Store. */
8399 tmp = load_reg(s, rs);
8400 switch (op) {
8401 case 0: gen_st8(tmp, addr, user); break;
8402 case 1: gen_st16(tmp, addr, user); break;
8403 case 2: gen_st32(tmp, addr, user); break;
8404 default: goto illegal_op;
8405 }
8406 }
8407 if (postinc)
8408 tcg_gen_addi_i32(addr, addr, imm);
8409 if (writeback) {
8410 store_reg(s, rn, addr);
8411 } else {
8412 dead_tmp(addr);
8413 }
8414 }
8415 break;
8416 default:
8417 goto illegal_op;
8418 }
8419 return 0;
8420 illegal_op:
8421 return 1;
8422 }
8423
8424 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8425 {
8426 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8427 int32_t offset;
8428 int i;
8429 TCGv tmp;
8430 TCGv tmp2;
8431 TCGv addr;
8432
8433 if (s->condexec_mask) {
8434 cond = s->condexec_cond;
8435 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8436 s->condlabel = gen_new_label();
8437 gen_test_cc(cond ^ 1, s->condlabel);
8438 s->condjmp = 1;
8439 }
8440 }
8441
8442 insn = lduw_code(s->pc);
8443 s->pc += 2;
8444
8445 switch (insn >> 12) {
8446 case 0: case 1:
8447
8448 rd = insn & 7;
8449 op = (insn >> 11) & 3;
8450 if (op == 3) {
8451 /* add/subtract */
8452 rn = (insn >> 3) & 7;
8453 tmp = load_reg(s, rn);
8454 if (insn & (1 << 10)) {
8455 /* immediate */
8456 tmp2 = new_tmp();
8457 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8458 } else {
8459 /* reg */
8460 rm = (insn >> 6) & 7;
8461 tmp2 = load_reg(s, rm);
8462 }
8463 if (insn & (1 << 9)) {
8464 if (s->condexec_mask)
8465 tcg_gen_sub_i32(tmp, tmp, tmp2);
8466 else
8467 gen_helper_sub_cc(tmp, tmp, tmp2);
8468 } else {
8469 if (s->condexec_mask)
8470 tcg_gen_add_i32(tmp, tmp, tmp2);
8471 else
8472 gen_helper_add_cc(tmp, tmp, tmp2);
8473 }
8474 dead_tmp(tmp2);
8475 store_reg(s, rd, tmp);
8476 } else {
8477 /* shift immediate */
8478 rm = (insn >> 3) & 7;
8479 shift = (insn >> 6) & 0x1f;
8480 tmp = load_reg(s, rm);
8481 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8482 if (!s->condexec_mask)
8483 gen_logic_CC(tmp);
8484 store_reg(s, rd, tmp);
8485 }
8486 break;
8487 case 2: case 3:
8488 /* arithmetic large immediate */
8489 op = (insn >> 11) & 3;
8490 rd = (insn >> 8) & 0x7;
8491 if (op == 0) { /* mov */
8492 tmp = new_tmp();
8493 tcg_gen_movi_i32(tmp, insn & 0xff);
8494 if (!s->condexec_mask)
8495 gen_logic_CC(tmp);
8496 store_reg(s, rd, tmp);
8497 } else {
8498 tmp = load_reg(s, rd);
8499 tmp2 = new_tmp();
8500 tcg_gen_movi_i32(tmp2, insn & 0xff);
8501 switch (op) {
8502 case 1: /* cmp */
8503 gen_helper_sub_cc(tmp, tmp, tmp2);
8504 dead_tmp(tmp);
8505 dead_tmp(tmp2);
8506 break;
8507 case 2: /* add */
8508 if (s->condexec_mask)
8509 tcg_gen_add_i32(tmp, tmp, tmp2);
8510 else
8511 gen_helper_add_cc(tmp, tmp, tmp2);
8512 dead_tmp(tmp2);
8513 store_reg(s, rd, tmp);
8514 break;
8515 case 3: /* sub */
8516 if (s->condexec_mask)
8517 tcg_gen_sub_i32(tmp, tmp, tmp2);
8518 else
8519 gen_helper_sub_cc(tmp, tmp, tmp2);
8520 dead_tmp(tmp2);
8521 store_reg(s, rd, tmp);
8522 break;
8523 }
8524 }
8525 break;
8526 case 4:
8527 if (insn & (1 << 11)) {
8528 rd = (insn >> 8) & 7;
8529 /* load pc-relative. Bit 1 of PC is ignored. */
8530 val = s->pc + 2 + ((insn & 0xff) * 4);
8531 val &= ~(uint32_t)2;
8532 addr = new_tmp();
8533 tcg_gen_movi_i32(addr, val);
8534 tmp = gen_ld32(addr, IS_USER(s));
8535 dead_tmp(addr);
8536 store_reg(s, rd, tmp);
8537 break;
8538 }
8539 if (insn & (1 << 10)) {
8540 /* data processing extended or blx */
8541 rd = (insn & 7) | ((insn >> 4) & 8);
8542 rm = (insn >> 3) & 0xf;
8543 op = (insn >> 8) & 3;
8544 switch (op) {
8545 case 0: /* add */
8546 tmp = load_reg(s, rd);
8547 tmp2 = load_reg(s, rm);
8548 tcg_gen_add_i32(tmp, tmp, tmp2);
8549 dead_tmp(tmp2);
8550 store_reg(s, rd, tmp);
8551 break;
8552 case 1: /* cmp */
8553 tmp = load_reg(s, rd);
8554 tmp2 = load_reg(s, rm);
8555 gen_helper_sub_cc(tmp, tmp, tmp2);
8556 dead_tmp(tmp2);
8557 dead_tmp(tmp);
8558 break;
8559 case 2: /* mov/cpy */
8560 tmp = load_reg(s, rm);
8561 store_reg(s, rd, tmp);
8562 break;
8563 case 3:/* branch [and link] exchange thumb register */
8564 tmp = load_reg(s, rm);
8565 if (insn & (1 << 7)) {
8566 val = (uint32_t)s->pc | 1;
8567 tmp2 = new_tmp();
8568 tcg_gen_movi_i32(tmp2, val);
8569 store_reg(s, 14, tmp2);
8570 }
8571 gen_bx(s, tmp);
8572 break;
8573 }
8574 break;
8575 }
8576
8577 /* data processing register */
8578 rd = insn & 7;
8579 rm = (insn >> 3) & 7;
8580 op = (insn >> 6) & 0xf;
8581 if (op == 2 || op == 3 || op == 4 || op == 7) {
8582 /* the shift/rotate ops want the operands backwards */
8583 val = rm;
8584 rm = rd;
8585 rd = val;
8586 val = 1;
8587 } else {
8588 val = 0;
8589 }
8590
8591 if (op == 9) { /* neg */
8592 tmp = new_tmp();
8593 tcg_gen_movi_i32(tmp, 0);
8594 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8595 tmp = load_reg(s, rd);
8596 } else {
8597 TCGV_UNUSED(tmp);
8598 }
8599
8600 tmp2 = load_reg(s, rm);
8601 switch (op) {
8602 case 0x0: /* and */
8603 tcg_gen_and_i32(tmp, tmp, tmp2);
8604 if (!s->condexec_mask)
8605 gen_logic_CC(tmp);
8606 break;
8607 case 0x1: /* eor */
8608 tcg_gen_xor_i32(tmp, tmp, tmp2);
8609 if (!s->condexec_mask)
8610 gen_logic_CC(tmp);
8611 break;
8612 case 0x2: /* lsl */
8613 if (s->condexec_mask) {
8614 gen_helper_shl(tmp2, tmp2, tmp);
8615 } else {
8616 gen_helper_shl_cc(tmp2, tmp2, tmp);
8617 gen_logic_CC(tmp2);
8618 }
8619 break;
8620 case 0x3: /* lsr */
8621 if (s->condexec_mask) {
8622 gen_helper_shr(tmp2, tmp2, tmp);
8623 } else {
8624 gen_helper_shr_cc(tmp2, tmp2, tmp);
8625 gen_logic_CC(tmp2);
8626 }
8627 break;
8628 case 0x4: /* asr */
8629 if (s->condexec_mask) {
8630 gen_helper_sar(tmp2, tmp2, tmp);
8631 } else {
8632 gen_helper_sar_cc(tmp2, tmp2, tmp);
8633 gen_logic_CC(tmp2);
8634 }
8635 break;
8636 case 0x5: /* adc */
8637 if (s->condexec_mask)
8638 gen_adc(tmp, tmp2);
8639 else
8640 gen_helper_adc_cc(tmp, tmp, tmp2);
8641 break;
8642 case 0x6: /* sbc */
8643 if (s->condexec_mask)
8644 gen_sub_carry(tmp, tmp, tmp2);
8645 else
8646 gen_helper_sbc_cc(tmp, tmp, tmp2);
8647 break;
8648 case 0x7: /* ror */
8649 if (s->condexec_mask) {
8650 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8651 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8652 } else {
8653 gen_helper_ror_cc(tmp2, tmp2, tmp);
8654 gen_logic_CC(tmp2);
8655 }
8656 break;
8657 case 0x8: /* tst */
8658 tcg_gen_and_i32(tmp, tmp, tmp2);
8659 gen_logic_CC(tmp);
8660 rd = 16;
8661 break;
8662 case 0x9: /* neg */
8663 if (s->condexec_mask)
8664 tcg_gen_neg_i32(tmp, tmp2);
8665 else
8666 gen_helper_sub_cc(tmp, tmp, tmp2);
8667 break;
8668 case 0xa: /* cmp */
8669 gen_helper_sub_cc(tmp, tmp, tmp2);
8670 rd = 16;
8671 break;
8672 case 0xb: /* cmn */
8673 gen_helper_add_cc(tmp, tmp, tmp2);
8674 rd = 16;
8675 break;
8676 case 0xc: /* orr */
8677 tcg_gen_or_i32(tmp, tmp, tmp2);
8678 if (!s->condexec_mask)
8679 gen_logic_CC(tmp);
8680 break;
8681 case 0xd: /* mul */
8682 tcg_gen_mul_i32(tmp, tmp, tmp2);
8683 if (!s->condexec_mask)
8684 gen_logic_CC(tmp);
8685 break;
8686 case 0xe: /* bic */
8687 tcg_gen_andc_i32(tmp, tmp, tmp2);
8688 if (!s->condexec_mask)
8689 gen_logic_CC(tmp);
8690 break;
8691 case 0xf: /* mvn */
8692 tcg_gen_not_i32(tmp2, tmp2);
8693 if (!s->condexec_mask)
8694 gen_logic_CC(tmp2);
8695 val = 1;
8696 rm = rd;
8697 break;
8698 }
8699 if (rd != 16) {
8700 if (val) {
8701 store_reg(s, rm, tmp2);
8702 if (op != 0xf)
8703 dead_tmp(tmp);
8704 } else {
8705 store_reg(s, rd, tmp);
8706 dead_tmp(tmp2);
8707 }
8708 } else {
8709 dead_tmp(tmp);
8710 dead_tmp(tmp2);
8711 }
8712 break;
8713
8714 case 5:
8715 /* load/store register offset. */
8716 rd = insn & 7;
8717 rn = (insn >> 3) & 7;
8718 rm = (insn >> 6) & 7;
8719 op = (insn >> 9) & 7;
8720 addr = load_reg(s, rn);
8721 tmp = load_reg(s, rm);
8722 tcg_gen_add_i32(addr, addr, tmp);
8723 dead_tmp(tmp);
8724
8725 if (op < 3) /* store */
8726 tmp = load_reg(s, rd);
8727
8728 switch (op) {
8729 case 0: /* str */
8730 gen_st32(tmp, addr, IS_USER(s));
8731 break;
8732 case 1: /* strh */
8733 gen_st16(tmp, addr, IS_USER(s));
8734 break;
8735 case 2: /* strb */
8736 gen_st8(tmp, addr, IS_USER(s));
8737 break;
8738 case 3: /* ldrsb */
8739 tmp = gen_ld8s(addr, IS_USER(s));
8740 break;
8741 case 4: /* ldr */
8742 tmp = gen_ld32(addr, IS_USER(s));
8743 break;
8744 case 5: /* ldrh */
8745 tmp = gen_ld16u(addr, IS_USER(s));
8746 break;
8747 case 6: /* ldrb */
8748 tmp = gen_ld8u(addr, IS_USER(s));
8749 break;
8750 case 7: /* ldrsh */
8751 tmp = gen_ld16s(addr, IS_USER(s));
8752 break;
8753 }
8754 if (op >= 3) /* load */
8755 store_reg(s, rd, tmp);
8756 dead_tmp(addr);
8757 break;
8758
8759 case 6:
8760 /* load/store word immediate offset */
8761 rd = insn & 7;
8762 rn = (insn >> 3) & 7;
8763 addr = load_reg(s, rn);
8764 val = (insn >> 4) & 0x7c;
8765 tcg_gen_addi_i32(addr, addr, val);
8766
8767 if (insn & (1 << 11)) {
8768 /* load */
8769 tmp = gen_ld32(addr, IS_USER(s));
8770 store_reg(s, rd, tmp);
8771 } else {
8772 /* store */
8773 tmp = load_reg(s, rd);
8774 gen_st32(tmp, addr, IS_USER(s));
8775 }
8776 dead_tmp(addr);
8777 break;
8778
8779 case 7:
8780 /* load/store byte immediate offset */
8781 rd = insn & 7;
8782 rn = (insn >> 3) & 7;
8783 addr = load_reg(s, rn);
8784 val = (insn >> 6) & 0x1f;
8785 tcg_gen_addi_i32(addr, addr, val);
8786
8787 if (insn & (1 << 11)) {
8788 /* load */
8789 tmp = gen_ld8u(addr, IS_USER(s));
8790 store_reg(s, rd, tmp);
8791 } else {
8792 /* store */
8793 tmp = load_reg(s, rd);
8794 gen_st8(tmp, addr, IS_USER(s));
8795 }
8796 dead_tmp(addr);
8797 break;
8798
8799 case 8:
8800 /* load/store halfword immediate offset */
8801 rd = insn & 7;
8802 rn = (insn >> 3) & 7;
8803 addr = load_reg(s, rn);
8804 val = (insn >> 5) & 0x3e;
8805 tcg_gen_addi_i32(addr, addr, val);
8806
8807 if (insn & (1 << 11)) {
8808 /* load */
8809 tmp = gen_ld16u(addr, IS_USER(s));
8810 store_reg(s, rd, tmp);
8811 } else {
8812 /* store */
8813 tmp = load_reg(s, rd);
8814 gen_st16(tmp, addr, IS_USER(s));
8815 }
8816 dead_tmp(addr);
8817 break;
8818
8819 case 9:
8820 /* load/store from stack */
8821 rd = (insn >> 8) & 7;
8822 addr = load_reg(s, 13);
8823 val = (insn & 0xff) * 4;
8824 tcg_gen_addi_i32(addr, addr, val);
8825
8826 if (insn & (1 << 11)) {
8827 /* load */
8828 tmp = gen_ld32(addr, IS_USER(s));
8829 store_reg(s, rd, tmp);
8830 } else {
8831 /* store */
8832 tmp = load_reg(s, rd);
8833 gen_st32(tmp, addr, IS_USER(s));
8834 }
8835 dead_tmp(addr);
8836 break;
8837
8838 case 10:
8839 /* add to high reg */
8840 rd = (insn >> 8) & 7;
8841 if (insn & (1 << 11)) {
8842 /* SP */
8843 tmp = load_reg(s, 13);
8844 } else {
8845 /* PC. bit 1 is ignored. */
8846 tmp = new_tmp();
8847 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8848 }
8849 val = (insn & 0xff) * 4;
8850 tcg_gen_addi_i32(tmp, tmp, val);
8851 store_reg(s, rd, tmp);
8852 break;
8853
8854 case 11:
8855 /* misc */
8856 op = (insn >> 8) & 0xf;
8857 switch (op) {
8858 case 0:
8859 /* adjust stack pointer */
8860 tmp = load_reg(s, 13);
8861 val = (insn & 0x7f) * 4;
8862 if (insn & (1 << 7))
8863 val = -(int32_t)val;
8864 tcg_gen_addi_i32(tmp, tmp, val);
8865 store_reg(s, 13, tmp);
8866 break;
8867
8868 case 2: /* sign/zero extend. */
8869 ARCH(6);
8870 rd = insn & 7;
8871 rm = (insn >> 3) & 7;
8872 tmp = load_reg(s, rm);
8873 switch ((insn >> 6) & 3) {
8874 case 0: gen_sxth(tmp); break;
8875 case 1: gen_sxtb(tmp); break;
8876 case 2: gen_uxth(tmp); break;
8877 case 3: gen_uxtb(tmp); break;
8878 }
8879 store_reg(s, rd, tmp);
8880 break;
8881 case 4: case 5: case 0xc: case 0xd:
8882 /* push/pop */
8883 addr = load_reg(s, 13);
8884 if (insn & (1 << 8))
8885 offset = 4;
8886 else
8887 offset = 0;
8888 for (i = 0; i < 8; i++) {
8889 if (insn & (1 << i))
8890 offset += 4;
8891 }
8892 if ((insn & (1 << 11)) == 0) {
8893 tcg_gen_addi_i32(addr, addr, -offset);
8894 }
8895 for (i = 0; i < 8; i++) {
8896 if (insn & (1 << i)) {
8897 if (insn & (1 << 11)) {
8898 /* pop */
8899 tmp = gen_ld32(addr, IS_USER(s));
8900 store_reg(s, i, tmp);
8901 } else {
8902 /* push */
8903 tmp = load_reg(s, i);
8904 gen_st32(tmp, addr, IS_USER(s));
8905 }
8906 /* advance to the next address. */
8907 tcg_gen_addi_i32(addr, addr, 4);
8908 }
8909 }
8910 TCGV_UNUSED(tmp);
8911 if (insn & (1 << 8)) {
8912 if (insn & (1 << 11)) {
8913 /* pop pc */
8914 tmp = gen_ld32(addr, IS_USER(s));
8915 /* don't set the pc until the rest of the instruction
8916 has completed */
8917 } else {
8918 /* push lr */
8919 tmp = load_reg(s, 14);
8920 gen_st32(tmp, addr, IS_USER(s));
8921 }
8922 tcg_gen_addi_i32(addr, addr, 4);
8923 }
8924 if ((insn & (1 << 11)) == 0) {
8925 tcg_gen_addi_i32(addr, addr, -offset);
8926 }
8927 /* write back the new stack pointer */
8928 store_reg(s, 13, addr);
8929 /* set the new PC value */
8930 if ((insn & 0x0900) == 0x0900)
8931 gen_bx(s, tmp);
8932 break;
8933
8934 case 1: case 3: case 9: case 11: /* czb */
8935 rm = insn & 7;
8936 tmp = load_reg(s, rm);
8937 s->condlabel = gen_new_label();
8938 s->condjmp = 1;
8939 if (insn & (1 << 11))
8940 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8941 else
8942 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8943 dead_tmp(tmp);
8944 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8945 val = (uint32_t)s->pc + 2;
8946 val += offset;
8947 gen_jmp(s, val);
8948 break;
8949
8950 case 15: /* IT, nop-hint. */
8951 if ((insn & 0xf) == 0) {
8952 gen_nop_hint(s, (insn >> 4) & 0xf);
8953 break;
8954 }
8955 /* If Then. */
8956 s->condexec_cond = (insn >> 4) & 0xe;
8957 s->condexec_mask = insn & 0x1f;
8958 /* No actual code generated for this insn, just setup state. */
8959 break;
8960
8961 case 0xe: /* bkpt */
8962 gen_exception_insn(s, 2, EXCP_BKPT);
8963 break;
8964
8965 case 0xa: /* rev */
8966 ARCH(6);
8967 rn = (insn >> 3) & 0x7;
8968 rd = insn & 0x7;
8969 tmp = load_reg(s, rn);
8970 switch ((insn >> 6) & 3) {
8971 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8972 case 1: gen_rev16(tmp); break;
8973 case 3: gen_revsh(tmp); break;
8974 default: goto illegal_op;
8975 }
8976 store_reg(s, rd, tmp);
8977 break;
8978
8979 case 6: /* cps */
8980 ARCH(6);
8981 if (IS_USER(s))
8982 break;
8983 if (IS_M(env)) {
8984 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8985 /* PRIMASK */
8986 if (insn & 1) {
8987 addr = tcg_const_i32(16);
8988 gen_helper_v7m_msr(cpu_env, addr, tmp);
8989 tcg_temp_free_i32(addr);
8990 }
8991 /* FAULTMASK */
8992 if (insn & 2) {
8993 addr = tcg_const_i32(17);
8994 gen_helper_v7m_msr(cpu_env, addr, tmp);
8995 tcg_temp_free_i32(addr);
8996 }
8997 tcg_temp_free_i32(tmp);
8998 gen_lookup_tb(s);
8999 } else {
9000 if (insn & (1 << 4))
9001 shift = CPSR_A | CPSR_I | CPSR_F;
9002 else
9003 shift = 0;
9004 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9005 }
9006 break;
9007
9008 default:
9009 goto undef;
9010 }
9011 break;
9012
9013 case 12:
9014 /* load/store multiple */
9015 rn = (insn >> 8) & 0x7;
9016 addr = load_reg(s, rn);
9017 for (i = 0; i < 8; i++) {
9018 if (insn & (1 << i)) {
9019 if (insn & (1 << 11)) {
9020 /* load */
9021 tmp = gen_ld32(addr, IS_USER(s));
9022 store_reg(s, i, tmp);
9023 } else {
9024 /* store */
9025 tmp = load_reg(s, i);
9026 gen_st32(tmp, addr, IS_USER(s));
9027 }
9028 /* advance to the next address */
9029 tcg_gen_addi_i32(addr, addr, 4);
9030 }
9031 }
9032 /* Base register writeback. */
9033 if ((insn & (1 << rn)) == 0) {
9034 store_reg(s, rn, addr);
9035 } else {
9036 dead_tmp(addr);
9037 }
9038 break;
9039
9040 case 13:
9041 /* conditional branch or swi */
9042 cond = (insn >> 8) & 0xf;
9043 if (cond == 0xe)
9044 goto undef;
9045
9046 if (cond == 0xf) {
9047 /* swi */
9048 gen_set_pc_im(s->pc);
9049 s->is_jmp = DISAS_SWI;
9050 break;
9051 }
9052 /* generate a conditional jump to next instruction */
9053 s->condlabel = gen_new_label();
9054 gen_test_cc(cond ^ 1, s->condlabel);
9055 s->condjmp = 1;
9056
9057 /* jump to the offset */
9058 val = (uint32_t)s->pc + 2;
9059 offset = ((int32_t)insn << 24) >> 24;
9060 val += offset << 1;
9061 gen_jmp(s, val);
9062 break;
9063
9064 case 14:
9065 if (insn & (1 << 11)) {
9066 if (disas_thumb2_insn(env, s, insn))
9067 goto undef32;
9068 break;
9069 }
9070 /* unconditional branch */
9071 val = (uint32_t)s->pc;
9072 offset = ((int32_t)insn << 21) >> 21;
9073 val += (offset << 1) + 2;
9074 gen_jmp(s, val);
9075 break;
9076
9077 case 15:
9078 if (disas_thumb2_insn(env, s, insn))
9079 goto undef32;
9080 break;
9081 }
9082 return;
9083 undef32:
9084 gen_exception_insn(s, 4, EXCP_UDEF);
9085 return;
9086 illegal_op:
9087 undef:
9088 gen_exception_insn(s, 2, EXCP_UDEF);
9089 }
9090
9091 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9092 basic block 'tb'. If search_pc is TRUE, also generate PC
9093 information for each intermediate instruction. */
9094 static inline void gen_intermediate_code_internal(CPUState *env,
9095 TranslationBlock *tb,
9096 int search_pc)
9097 {
9098 DisasContext dc1, *dc = &dc1;
9099 CPUBreakpoint *bp;
9100 uint16_t *gen_opc_end;
9101 int j, lj;
9102 target_ulong pc_start;
9103 uint32_t next_page_start;
9104 int num_insns;
9105 int max_insns;
9106
9107 /* generate intermediate code */
9108 num_temps = 0;
9109
9110 pc_start = tb->pc;
9111
9112 dc->tb = tb;
9113
9114 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9115
9116 dc->is_jmp = DISAS_NEXT;
9117 dc->pc = pc_start;
9118 dc->singlestep_enabled = env->singlestep_enabled;
9119 dc->condjmp = 0;
9120 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9121 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9122 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9123 #if !defined(CONFIG_USER_ONLY)
9124 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9125 #endif
9126 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9127 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9128 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9129 cpu_F0s = tcg_temp_new_i32();
9130 cpu_F1s = tcg_temp_new_i32();
9131 cpu_F0d = tcg_temp_new_i64();
9132 cpu_F1d = tcg_temp_new_i64();
9133 cpu_V0 = cpu_F0d;
9134 cpu_V1 = cpu_F1d;
9135 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9136 cpu_M0 = tcg_temp_new_i64();
9137 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9138 lj = -1;
9139 num_insns = 0;
9140 max_insns = tb->cflags & CF_COUNT_MASK;
9141 if (max_insns == 0)
9142 max_insns = CF_COUNT_MASK;
9143
9144 gen_icount_start();
9145
9146 /* A note on handling of the condexec (IT) bits:
9147 *
9148 * We want to avoid the overhead of having to write the updated condexec
9149 * bits back to the CPUState for every instruction in an IT block. So:
9150 * (1) if the condexec bits are not already zero then we write
9151 * zero back into the CPUState now. This avoids complications trying
9152 * to do it at the end of the block. (For example if we don't do this
9153 * it's hard to identify whether we can safely skip writing condexec
9154 * at the end of the TB, which we definitely want to do for the case
9155 * where a TB doesn't do anything with the IT state at all.)
9156 * (2) if we are going to leave the TB then we call gen_set_condexec()
9157 * which will write the correct value into CPUState if zero is wrong.
9158 * This is done both for leaving the TB at the end, and for leaving
9159 * it because of an exception we know will happen, which is done in
9160 * gen_exception_insn(). The latter is necessary because we need to
9161 * leave the TB with the PC/IT state just prior to execution of the
9162 * instruction which caused the exception.
9163 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9164 * then the CPUState will be wrong and we need to reset it.
9165 * This is handled in the same way as restoration of the
9166 * PC in these situations: we will be called again with search_pc=1
9167 * and generate a mapping of the condexec bits for each PC in
9168 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9169 * the condexec bits.
9170 *
9171 * Note that there are no instructions which can read the condexec
9172 * bits, and none which can write non-static values to them, so
9173 * we don't need to care about whether CPUState is correct in the
9174 * middle of a TB.
9175 */
9176
9177 /* Reset the conditional execution bits immediately. This avoids
9178 complications trying to do it at the end of the block. */
9179 if (dc->condexec_mask || dc->condexec_cond)
9180 {
9181 TCGv tmp = new_tmp();
9182 tcg_gen_movi_i32(tmp, 0);
9183 store_cpu_field(tmp, condexec_bits);
9184 }
9185 do {
9186 #ifdef CONFIG_USER_ONLY
9187 /* Intercept jump to the magic kernel page. */
9188 if (dc->pc >= 0xffff0000) {
9189 /* We always get here via a jump, so know we are not in a
9190 conditional execution block. */
9191 gen_exception(EXCP_KERNEL_TRAP);
9192 dc->is_jmp = DISAS_UPDATE;
9193 break;
9194 }
9195 #else
9196 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9197 /* We always get here via a jump, so know we are not in a
9198 conditional execution block. */
9199 gen_exception(EXCP_EXCEPTION_EXIT);
9200 dc->is_jmp = DISAS_UPDATE;
9201 break;
9202 }
9203 #endif
9204
9205 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9206 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9207 if (bp->pc == dc->pc) {
9208 gen_exception_insn(dc, 0, EXCP_DEBUG);
9209 /* Advance PC so that clearing the breakpoint will
9210 invalidate this TB. */
9211 dc->pc += 2;
9212 goto done_generating;
9213 break;
9214 }
9215 }
9216 }
9217 if (search_pc) {
9218 j = gen_opc_ptr - gen_opc_buf;
9219 if (lj < j) {
9220 lj++;
9221 while (lj < j)
9222 gen_opc_instr_start[lj++] = 0;
9223 }
9224 gen_opc_pc[lj] = dc->pc;
9225 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9226 gen_opc_instr_start[lj] = 1;
9227 gen_opc_icount[lj] = num_insns;
9228 }
9229
9230 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9231 gen_io_start();
9232
9233 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9234 tcg_gen_debug_insn_start(dc->pc);
9235 }
9236
9237 if (dc->thumb) {
9238 disas_thumb_insn(env, dc);
9239 if (dc->condexec_mask) {
9240 dc->condexec_cond = (dc->condexec_cond & 0xe)
9241 | ((dc->condexec_mask >> 4) & 1);
9242 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9243 if (dc->condexec_mask == 0) {
9244 dc->condexec_cond = 0;
9245 }
9246 }
9247 } else {
9248 disas_arm_insn(env, dc);
9249 }
9250 if (num_temps) {
9251 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9252 num_temps = 0;
9253 }
9254
9255 if (dc->condjmp && !dc->is_jmp) {
9256 gen_set_label(dc->condlabel);
9257 dc->condjmp = 0;
9258 }
9259 /* Translation stops when a conditional branch is encountered.
9260 * Otherwise the subsequent code could get translated several times.
9261 * Also stop translation when a page boundary is reached. This
9262 * ensures prefetch aborts occur at the right place. */
9263 num_insns ++;
9264 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9265 !env->singlestep_enabled &&
9266 !singlestep &&
9267 dc->pc < next_page_start &&
9268 num_insns < max_insns);
9269
9270 if (tb->cflags & CF_LAST_IO) {
9271 if (dc->condjmp) {
9272 /* FIXME: This can theoretically happen with self-modifying
9273 code. */
9274 cpu_abort(env, "IO on conditional branch instruction");
9275 }
9276 gen_io_end();
9277 }
9278
9279 /* At this stage dc->condjmp will only be set when the skipped
9280 instruction was a conditional branch or trap, and the PC has
9281 already been written. */
9282 if (unlikely(env->singlestep_enabled)) {
9283 /* Make sure the pc is updated, and raise a debug exception. */
9284 if (dc->condjmp) {
9285 gen_set_condexec(dc);
9286 if (dc->is_jmp == DISAS_SWI) {
9287 gen_exception(EXCP_SWI);
9288 } else {
9289 gen_exception(EXCP_DEBUG);
9290 }
9291 gen_set_label(dc->condlabel);
9292 }
9293 if (dc->condjmp || !dc->is_jmp) {
9294 gen_set_pc_im(dc->pc);
9295 dc->condjmp = 0;
9296 }
9297 gen_set_condexec(dc);
9298 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9299 gen_exception(EXCP_SWI);
9300 } else {
9301 /* FIXME: Single stepping a WFI insn will not halt
9302 the CPU. */
9303 gen_exception(EXCP_DEBUG);
9304 }
9305 } else {
9306 /* While branches must always occur at the end of an IT block,
9307 there are a few other things that can cause us to terminate
9308 the TB in the middel of an IT block:
9309 - Exception generating instructions (bkpt, swi, undefined).
9310 - Page boundaries.
9311 - Hardware watchpoints.
9312 Hardware breakpoints have already been handled and skip this code.
9313 */
9314 gen_set_condexec(dc);
9315 switch(dc->is_jmp) {
9316 case DISAS_NEXT:
9317 gen_goto_tb(dc, 1, dc->pc);
9318 break;
9319 default:
9320 case DISAS_JUMP:
9321 case DISAS_UPDATE:
9322 /* indicate that the hash table must be used to find the next TB */
9323 tcg_gen_exit_tb(0);
9324 break;
9325 case DISAS_TB_JUMP:
9326 /* nothing more to generate */
9327 break;
9328 case DISAS_WFI:
9329 gen_helper_wfi();
9330 break;
9331 case DISAS_SWI:
9332 gen_exception(EXCP_SWI);
9333 break;
9334 }
9335 if (dc->condjmp) {
9336 gen_set_label(dc->condlabel);
9337 gen_set_condexec(dc);
9338 gen_goto_tb(dc, 1, dc->pc);
9339 dc->condjmp = 0;
9340 }
9341 }
9342
9343 done_generating:
9344 gen_icount_end(tb, num_insns);
9345 *gen_opc_ptr = INDEX_op_end;
9346
9347 #ifdef DEBUG_DISAS
9348 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9349 qemu_log("----------------\n");
9350 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9351 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9352 qemu_log("\n");
9353 }
9354 #endif
9355 if (search_pc) {
9356 j = gen_opc_ptr - gen_opc_buf;
9357 lj++;
9358 while (lj <= j)
9359 gen_opc_instr_start[lj++] = 0;
9360 } else {
9361 tb->size = dc->pc - pc_start;
9362 tb->icount = num_insns;
9363 }
9364 }
9365
9366 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9367 {
9368 gen_intermediate_code_internal(env, tb, 0);
9369 }
9370
9371 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9372 {
9373 gen_intermediate_code_internal(env, tb, 1);
9374 }
9375
9376 static const char *cpu_mode_names[16] = {
9377 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9378 "???", "???", "???", "und", "???", "???", "???", "sys"
9379 };
9380
9381 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9382 int flags)
9383 {
9384 int i;
9385 #if 0
9386 union {
9387 uint32_t i;
9388 float s;
9389 } s0, s1;
9390 CPU_DoubleU d;
9391 /* ??? This assumes float64 and double have the same layout.
9392 Oh well, it's only debug dumps. */
9393 union {
9394 float64 f64;
9395 double d;
9396 } d0;
9397 #endif
9398 uint32_t psr;
9399
9400 for(i=0;i<16;i++) {
9401 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9402 if ((i % 4) == 3)
9403 cpu_fprintf(f, "\n");
9404 else
9405 cpu_fprintf(f, " ");
9406 }
9407 psr = cpsr_read(env);
9408 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9409 psr,
9410 psr & (1 << 31) ? 'N' : '-',
9411 psr & (1 << 30) ? 'Z' : '-',
9412 psr & (1 << 29) ? 'C' : '-',
9413 psr & (1 << 28) ? 'V' : '-',
9414 psr & CPSR_T ? 'T' : 'A',
9415 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9416
9417 #if 0
9418 for (i = 0; i < 16; i++) {
9419 d.d = env->vfp.regs[i];
9420 s0.i = d.l.lower;
9421 s1.i = d.l.upper;
9422 d0.f64 = d.d;
9423 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9424 i * 2, (int)s0.i, s0.s,
9425 i * 2 + 1, (int)s1.i, s1.s,
9426 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9427 d0.d);
9428 }
9429 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9430 #endif
9431 }
9432
9433 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9434 unsigned long searched_pc, int pc_pos, void *puc)
9435 {
9436 env->regs[15] = gen_opc_pc[pc_pos];
9437 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9438 }