]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Move Neon VUZP to helper functions
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3544 case 0: \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 1: \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 2: \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 3: \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 4: \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 5: \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 default: return 1; \
3563 }} while (0)
3564
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3567 case 0: \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3569 break; \
3570 case 1: \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3572 break; \
3573 case 2: \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3575 break; \
3576 case 3: \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3578 break; \
3579 case 4: \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3581 break; \
3582 case 5: \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3584 break; \
3585 default: return 1; \
3586 }} while (0)
3587
3588 static TCGv neon_load_scratch(int scratch)
3589 {
3590 TCGv tmp = new_tmp();
3591 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 return tmp;
3593 }
3594
3595 static void neon_store_scratch(int scratch, TCGv var)
3596 {
3597 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 dead_tmp(var);
3599 }
3600
3601 static inline TCGv neon_get_scalar(int size, int reg)
3602 {
3603 TCGv tmp;
3604 if (size == 1) {
3605 tmp = neon_load_reg(reg & 7, reg >> 4);
3606 if (reg & 8) {
3607 gen_neon_dup_high16(tmp);
3608 } else {
3609 gen_neon_dup_low16(tmp);
3610 }
3611 } else {
3612 tmp = neon_load_reg(reg & 15, reg >> 4);
3613 }
3614 return tmp;
3615 }
3616
3617 static int gen_neon_unzip(int rd, int rm, int size, int q)
3618 {
3619 TCGv tmp, tmp2;
3620 if (size == 3 || (!q && size == 2)) {
3621 return 1;
3622 }
3623 tmp = tcg_const_i32(rd);
3624 tmp2 = tcg_const_i32(rm);
3625 if (q) {
3626 switch (size) {
3627 case 0:
3628 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3629 break;
3630 case 1:
3631 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3632 break;
3633 case 2:
3634 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3635 break;
3636 default:
3637 abort();
3638 }
3639 } else {
3640 switch (size) {
3641 case 0:
3642 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3643 break;
3644 case 1:
3645 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3646 break;
3647 default:
3648 abort();
3649 }
3650 }
3651 tcg_temp_free_i32(tmp);
3652 tcg_temp_free_i32(tmp2);
3653 return 0;
3654 }
3655
3656 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3657 {
3658 TCGv rd, rm, tmp;
3659
3660 rd = new_tmp();
3661 rm = new_tmp();
3662 tmp = new_tmp();
3663
3664 tcg_gen_andi_i32(rd, t0, 0xff);
3665 tcg_gen_shli_i32(tmp, t1, 8);
3666 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3667 tcg_gen_or_i32(rd, rd, tmp);
3668 tcg_gen_shli_i32(tmp, t0, 16);
3669 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3670 tcg_gen_or_i32(rd, rd, tmp);
3671 tcg_gen_shli_i32(tmp, t1, 24);
3672 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3673 tcg_gen_or_i32(rd, rd, tmp);
3674
3675 tcg_gen_andi_i32(rm, t1, 0xff000000);
3676 tcg_gen_shri_i32(tmp, t0, 8);
3677 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3678 tcg_gen_or_i32(rm, rm, tmp);
3679 tcg_gen_shri_i32(tmp, t1, 8);
3680 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3681 tcg_gen_or_i32(rm, rm, tmp);
3682 tcg_gen_shri_i32(tmp, t0, 16);
3683 tcg_gen_andi_i32(tmp, tmp, 0xff);
3684 tcg_gen_or_i32(t1, rm, tmp);
3685 tcg_gen_mov_i32(t0, rd);
3686
3687 dead_tmp(tmp);
3688 dead_tmp(rm);
3689 dead_tmp(rd);
3690 }
3691
3692 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3693 {
3694 TCGv tmp, tmp2;
3695
3696 tmp = new_tmp();
3697 tmp2 = new_tmp();
3698
3699 tcg_gen_andi_i32(tmp, t0, 0xffff);
3700 tcg_gen_shli_i32(tmp2, t1, 16);
3701 tcg_gen_or_i32(tmp, tmp, tmp2);
3702 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3703 tcg_gen_shri_i32(tmp2, t0, 16);
3704 tcg_gen_or_i32(t1, t1, tmp2);
3705 tcg_gen_mov_i32(t0, tmp);
3706
3707 dead_tmp(tmp2);
3708 dead_tmp(tmp);
3709 }
3710
3711 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3712 {
3713 TCGv rd, tmp;
3714
3715 rd = new_tmp();
3716 tmp = new_tmp();
3717
3718 tcg_gen_shli_i32(rd, t0, 8);
3719 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3720 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3721 tcg_gen_or_i32(rd, rd, tmp);
3722
3723 tcg_gen_shri_i32(t1, t1, 8);
3724 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3725 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3726 tcg_gen_or_i32(t1, t1, tmp);
3727 tcg_gen_mov_i32(t0, rd);
3728
3729 dead_tmp(tmp);
3730 dead_tmp(rd);
3731 }
3732
3733 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3734 {
3735 TCGv rd, tmp;
3736
3737 rd = new_tmp();
3738 tmp = new_tmp();
3739
3740 tcg_gen_shli_i32(rd, t0, 16);
3741 tcg_gen_andi_i32(tmp, t1, 0xffff);
3742 tcg_gen_or_i32(rd, rd, tmp);
3743 tcg_gen_shri_i32(t1, t1, 16);
3744 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3745 tcg_gen_or_i32(t1, t1, tmp);
3746 tcg_gen_mov_i32(t0, rd);
3747
3748 dead_tmp(tmp);
3749 dead_tmp(rd);
3750 }
3751
3752
3753 static struct {
3754 int nregs;
3755 int interleave;
3756 int spacing;
3757 } neon_ls_element_type[11] = {
3758 {4, 4, 1},
3759 {4, 4, 2},
3760 {4, 1, 1},
3761 {4, 2, 1},
3762 {3, 3, 1},
3763 {3, 3, 2},
3764 {3, 1, 1},
3765 {1, 1, 1},
3766 {2, 2, 1},
3767 {2, 2, 2},
3768 {2, 1, 1}
3769 };
3770
3771 /* Translate a NEON load/store element instruction. Return nonzero if the
3772 instruction is invalid. */
3773 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3774 {
3775 int rd, rn, rm;
3776 int op;
3777 int nregs;
3778 int interleave;
3779 int spacing;
3780 int stride;
3781 int size;
3782 int reg;
3783 int pass;
3784 int load;
3785 int shift;
3786 int n;
3787 TCGv addr;
3788 TCGv tmp;
3789 TCGv tmp2;
3790 TCGv_i64 tmp64;
3791
3792 if (!s->vfp_enabled)
3793 return 1;
3794 VFP_DREG_D(rd, insn);
3795 rn = (insn >> 16) & 0xf;
3796 rm = insn & 0xf;
3797 load = (insn & (1 << 21)) != 0;
3798 addr = new_tmp();
3799 if ((insn & (1 << 23)) == 0) {
3800 /* Load store all elements. */
3801 op = (insn >> 8) & 0xf;
3802 size = (insn >> 6) & 3;
3803 if (op > 10)
3804 return 1;
3805 nregs = neon_ls_element_type[op].nregs;
3806 interleave = neon_ls_element_type[op].interleave;
3807 spacing = neon_ls_element_type[op].spacing;
3808 if (size == 3 && (interleave | spacing) != 1)
3809 return 1;
3810 load_reg_var(s, addr, rn);
3811 stride = (1 << size) * interleave;
3812 for (reg = 0; reg < nregs; reg++) {
3813 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3814 load_reg_var(s, addr, rn);
3815 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3816 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3817 load_reg_var(s, addr, rn);
3818 tcg_gen_addi_i32(addr, addr, 1 << size);
3819 }
3820 if (size == 3) {
3821 if (load) {
3822 tmp64 = gen_ld64(addr, IS_USER(s));
3823 neon_store_reg64(tmp64, rd);
3824 tcg_temp_free_i64(tmp64);
3825 } else {
3826 tmp64 = tcg_temp_new_i64();
3827 neon_load_reg64(tmp64, rd);
3828 gen_st64(tmp64, addr, IS_USER(s));
3829 }
3830 tcg_gen_addi_i32(addr, addr, stride);
3831 } else {
3832 for (pass = 0; pass < 2; pass++) {
3833 if (size == 2) {
3834 if (load) {
3835 tmp = gen_ld32(addr, IS_USER(s));
3836 neon_store_reg(rd, pass, tmp);
3837 } else {
3838 tmp = neon_load_reg(rd, pass);
3839 gen_st32(tmp, addr, IS_USER(s));
3840 }
3841 tcg_gen_addi_i32(addr, addr, stride);
3842 } else if (size == 1) {
3843 if (load) {
3844 tmp = gen_ld16u(addr, IS_USER(s));
3845 tcg_gen_addi_i32(addr, addr, stride);
3846 tmp2 = gen_ld16u(addr, IS_USER(s));
3847 tcg_gen_addi_i32(addr, addr, stride);
3848 tcg_gen_shli_i32(tmp2, tmp2, 16);
3849 tcg_gen_or_i32(tmp, tmp, tmp2);
3850 dead_tmp(tmp2);
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 tmp2 = new_tmp();
3855 tcg_gen_shri_i32(tmp2, tmp, 16);
3856 gen_st16(tmp, addr, IS_USER(s));
3857 tcg_gen_addi_i32(addr, addr, stride);
3858 gen_st16(tmp2, addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 }
3861 } else /* size == 0 */ {
3862 if (load) {
3863 TCGV_UNUSED(tmp2);
3864 for (n = 0; n < 4; n++) {
3865 tmp = gen_ld8u(addr, IS_USER(s));
3866 tcg_gen_addi_i32(addr, addr, stride);
3867 if (n == 0) {
3868 tmp2 = tmp;
3869 } else {
3870 tcg_gen_shli_i32(tmp, tmp, n * 8);
3871 tcg_gen_or_i32(tmp2, tmp2, tmp);
3872 dead_tmp(tmp);
3873 }
3874 }
3875 neon_store_reg(rd, pass, tmp2);
3876 } else {
3877 tmp2 = neon_load_reg(rd, pass);
3878 for (n = 0; n < 4; n++) {
3879 tmp = new_tmp();
3880 if (n == 0) {
3881 tcg_gen_mov_i32(tmp, tmp2);
3882 } else {
3883 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3884 }
3885 gen_st8(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 }
3888 dead_tmp(tmp2);
3889 }
3890 }
3891 }
3892 }
3893 rd += spacing;
3894 }
3895 stride = nregs * 8;
3896 } else {
3897 size = (insn >> 10) & 3;
3898 if (size == 3) {
3899 /* Load single element to all lanes. */
3900 if (!load)
3901 return 1;
3902 size = (insn >> 6) & 3;
3903 nregs = ((insn >> 8) & 3) + 1;
3904 stride = (insn & (1 << 5)) ? 2 : 1;
3905 load_reg_var(s, addr, rn);
3906 for (reg = 0; reg < nregs; reg++) {
3907 switch (size) {
3908 case 0:
3909 tmp = gen_ld8u(addr, IS_USER(s));
3910 gen_neon_dup_u8(tmp, 0);
3911 break;
3912 case 1:
3913 tmp = gen_ld16u(addr, IS_USER(s));
3914 gen_neon_dup_low16(tmp);
3915 break;
3916 case 2:
3917 tmp = gen_ld32(addr, IS_USER(s));
3918 break;
3919 case 3:
3920 return 1;
3921 default: /* Avoid compiler warnings. */
3922 abort();
3923 }
3924 tcg_gen_addi_i32(addr, addr, 1 << size);
3925 tmp2 = new_tmp();
3926 tcg_gen_mov_i32(tmp2, tmp);
3927 neon_store_reg(rd, 0, tmp2);
3928 neon_store_reg(rd, 1, tmp);
3929 rd += stride;
3930 }
3931 stride = (1 << size) * nregs;
3932 } else {
3933 /* Single element. */
3934 pass = (insn >> 7) & 1;
3935 switch (size) {
3936 case 0:
3937 shift = ((insn >> 5) & 3) * 8;
3938 stride = 1;
3939 break;
3940 case 1:
3941 shift = ((insn >> 6) & 1) * 16;
3942 stride = (insn & (1 << 5)) ? 2 : 1;
3943 break;
3944 case 2:
3945 shift = 0;
3946 stride = (insn & (1 << 6)) ? 2 : 1;
3947 break;
3948 default:
3949 abort();
3950 }
3951 nregs = ((insn >> 8) & 3) + 1;
3952 load_reg_var(s, addr, rn);
3953 for (reg = 0; reg < nregs; reg++) {
3954 if (load) {
3955 switch (size) {
3956 case 0:
3957 tmp = gen_ld8u(addr, IS_USER(s));
3958 break;
3959 case 1:
3960 tmp = gen_ld16u(addr, IS_USER(s));
3961 break;
3962 case 2:
3963 tmp = gen_ld32(addr, IS_USER(s));
3964 break;
3965 default: /* Avoid compiler warnings. */
3966 abort();
3967 }
3968 if (size != 2) {
3969 tmp2 = neon_load_reg(rd, pass);
3970 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3971 dead_tmp(tmp2);
3972 }
3973 neon_store_reg(rd, pass, tmp);
3974 } else { /* Store */
3975 tmp = neon_load_reg(rd, pass);
3976 if (shift)
3977 tcg_gen_shri_i32(tmp, tmp, shift);
3978 switch (size) {
3979 case 0:
3980 gen_st8(tmp, addr, IS_USER(s));
3981 break;
3982 case 1:
3983 gen_st16(tmp, addr, IS_USER(s));
3984 break;
3985 case 2:
3986 gen_st32(tmp, addr, IS_USER(s));
3987 break;
3988 }
3989 }
3990 rd += stride;
3991 tcg_gen_addi_i32(addr, addr, 1 << size);
3992 }
3993 stride = nregs * (1 << size);
3994 }
3995 }
3996 dead_tmp(addr);
3997 if (rm != 15) {
3998 TCGv base;
3999
4000 base = load_reg(s, rn);
4001 if (rm == 13) {
4002 tcg_gen_addi_i32(base, base, stride);
4003 } else {
4004 TCGv index;
4005 index = load_reg(s, rm);
4006 tcg_gen_add_i32(base, base, index);
4007 dead_tmp(index);
4008 }
4009 store_reg(s, rn, base);
4010 }
4011 return 0;
4012 }
4013
4014 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4015 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4016 {
4017 tcg_gen_and_i32(t, t, c);
4018 tcg_gen_andc_i32(f, f, c);
4019 tcg_gen_or_i32(dest, t, f);
4020 }
4021
4022 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4023 {
4024 switch (size) {
4025 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4026 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4027 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4028 default: abort();
4029 }
4030 }
4031
4032 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4033 {
4034 switch (size) {
4035 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4036 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4037 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4038 default: abort();
4039 }
4040 }
4041
4042 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4043 {
4044 switch (size) {
4045 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4046 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4047 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4048 default: abort();
4049 }
4050 }
4051
4052 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4053 {
4054 switch (size) {
4055 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4056 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4057 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4058 default: abort();
4059 }
4060 }
4061
4062 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4063 int q, int u)
4064 {
4065 if (q) {
4066 if (u) {
4067 switch (size) {
4068 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4069 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4070 default: abort();
4071 }
4072 } else {
4073 switch (size) {
4074 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4075 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4076 default: abort();
4077 }
4078 }
4079 } else {
4080 if (u) {
4081 switch (size) {
4082 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4083 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4084 default: abort();
4085 }
4086 } else {
4087 switch (size) {
4088 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4089 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4090 default: abort();
4091 }
4092 }
4093 }
4094 }
4095
4096 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4097 {
4098 if (u) {
4099 switch (size) {
4100 case 0: gen_helper_neon_widen_u8(dest, src); break;
4101 case 1: gen_helper_neon_widen_u16(dest, src); break;
4102 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4103 default: abort();
4104 }
4105 } else {
4106 switch (size) {
4107 case 0: gen_helper_neon_widen_s8(dest, src); break;
4108 case 1: gen_helper_neon_widen_s16(dest, src); break;
4109 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4110 default: abort();
4111 }
4112 }
4113 dead_tmp(src);
4114 }
4115
4116 static inline void gen_neon_addl(int size)
4117 {
4118 switch (size) {
4119 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4120 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4121 case 2: tcg_gen_add_i64(CPU_V001); break;
4122 default: abort();
4123 }
4124 }
4125
4126 static inline void gen_neon_subl(int size)
4127 {
4128 switch (size) {
4129 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4130 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4131 case 2: tcg_gen_sub_i64(CPU_V001); break;
4132 default: abort();
4133 }
4134 }
4135
4136 static inline void gen_neon_negl(TCGv_i64 var, int size)
4137 {
4138 switch (size) {
4139 case 0: gen_helper_neon_negl_u16(var, var); break;
4140 case 1: gen_helper_neon_negl_u32(var, var); break;
4141 case 2: gen_helper_neon_negl_u64(var, var); break;
4142 default: abort();
4143 }
4144 }
4145
4146 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4147 {
4148 switch (size) {
4149 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4150 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4151 default: abort();
4152 }
4153 }
4154
4155 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4156 {
4157 TCGv_i64 tmp;
4158
4159 switch ((size << 1) | u) {
4160 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4161 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4162 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4163 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4164 case 4:
4165 tmp = gen_muls_i64_i32(a, b);
4166 tcg_gen_mov_i64(dest, tmp);
4167 break;
4168 case 5:
4169 tmp = gen_mulu_i64_i32(a, b);
4170 tcg_gen_mov_i64(dest, tmp);
4171 break;
4172 default: abort();
4173 }
4174
4175 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4176 Don't forget to clean them now. */
4177 if (size < 2) {
4178 dead_tmp(a);
4179 dead_tmp(b);
4180 }
4181 }
4182
4183 /* Translate a NEON data processing instruction. Return nonzero if the
4184 instruction is invalid.
4185 We process data in a mixture of 32-bit and 64-bit chunks.
4186 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4187
4188 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4189 {
4190 int op;
4191 int q;
4192 int rd, rn, rm;
4193 int size;
4194 int shift;
4195 int pass;
4196 int count;
4197 int pairwise;
4198 int u;
4199 int n;
4200 uint32_t imm, mask;
4201 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4202 TCGv_i64 tmp64;
4203
4204 if (!s->vfp_enabled)
4205 return 1;
4206 q = (insn & (1 << 6)) != 0;
4207 u = (insn >> 24) & 1;
4208 VFP_DREG_D(rd, insn);
4209 VFP_DREG_N(rn, insn);
4210 VFP_DREG_M(rm, insn);
4211 size = (insn >> 20) & 3;
4212 if ((insn & (1 << 23)) == 0) {
4213 /* Three register same length. */
4214 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4215 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4216 || op == 10 || op == 11 || op == 16)) {
4217 /* 64-bit element instructions. */
4218 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4219 neon_load_reg64(cpu_V0, rn + pass);
4220 neon_load_reg64(cpu_V1, rm + pass);
4221 switch (op) {
4222 case 1: /* VQADD */
4223 if (u) {
4224 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4225 cpu_V0, cpu_V1);
4226 } else {
4227 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4228 cpu_V0, cpu_V1);
4229 }
4230 break;
4231 case 5: /* VQSUB */
4232 if (u) {
4233 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4234 cpu_V0, cpu_V1);
4235 } else {
4236 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4237 cpu_V0, cpu_V1);
4238 }
4239 break;
4240 case 8: /* VSHL */
4241 if (u) {
4242 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4243 } else {
4244 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4245 }
4246 break;
4247 case 9: /* VQSHL */
4248 if (u) {
4249 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4250 cpu_V1, cpu_V0);
4251 } else {
4252 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4253 cpu_V1, cpu_V0);
4254 }
4255 break;
4256 case 10: /* VRSHL */
4257 if (u) {
4258 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4259 } else {
4260 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4261 }
4262 break;
4263 case 11: /* VQRSHL */
4264 if (u) {
4265 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4266 cpu_V1, cpu_V0);
4267 } else {
4268 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4269 cpu_V1, cpu_V0);
4270 }
4271 break;
4272 case 16:
4273 if (u) {
4274 tcg_gen_sub_i64(CPU_V001);
4275 } else {
4276 tcg_gen_add_i64(CPU_V001);
4277 }
4278 break;
4279 default:
4280 abort();
4281 }
4282 neon_store_reg64(cpu_V0, rd + pass);
4283 }
4284 return 0;
4285 }
4286 switch (op) {
4287 case 8: /* VSHL */
4288 case 9: /* VQSHL */
4289 case 10: /* VRSHL */
4290 case 11: /* VQRSHL */
4291 {
4292 int rtmp;
4293 /* Shift instruction operands are reversed. */
4294 rtmp = rn;
4295 rn = rm;
4296 rm = rtmp;
4297 pairwise = 0;
4298 }
4299 break;
4300 case 20: /* VPMAX */
4301 case 21: /* VPMIN */
4302 case 23: /* VPADD */
4303 pairwise = 1;
4304 break;
4305 case 26: /* VPADD (float) */
4306 pairwise = (u && size < 2);
4307 break;
4308 case 30: /* VPMIN/VPMAX (float) */
4309 pairwise = u;
4310 break;
4311 default:
4312 pairwise = 0;
4313 break;
4314 }
4315
4316 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4317
4318 if (pairwise) {
4319 /* Pairwise. */
4320 if (q)
4321 n = (pass & 1) * 2;
4322 else
4323 n = 0;
4324 if (pass < q + 1) {
4325 tmp = neon_load_reg(rn, n);
4326 tmp2 = neon_load_reg(rn, n + 1);
4327 } else {
4328 tmp = neon_load_reg(rm, n);
4329 tmp2 = neon_load_reg(rm, n + 1);
4330 }
4331 } else {
4332 /* Elementwise. */
4333 tmp = neon_load_reg(rn, pass);
4334 tmp2 = neon_load_reg(rm, pass);
4335 }
4336 switch (op) {
4337 case 0: /* VHADD */
4338 GEN_NEON_INTEGER_OP(hadd);
4339 break;
4340 case 1: /* VQADD */
4341 GEN_NEON_INTEGER_OP_ENV(qadd);
4342 break;
4343 case 2: /* VRHADD */
4344 GEN_NEON_INTEGER_OP(rhadd);
4345 break;
4346 case 3: /* Logic ops. */
4347 switch ((u << 2) | size) {
4348 case 0: /* VAND */
4349 tcg_gen_and_i32(tmp, tmp, tmp2);
4350 break;
4351 case 1: /* BIC */
4352 tcg_gen_andc_i32(tmp, tmp, tmp2);
4353 break;
4354 case 2: /* VORR */
4355 tcg_gen_or_i32(tmp, tmp, tmp2);
4356 break;
4357 case 3: /* VORN */
4358 tcg_gen_orc_i32(tmp, tmp, tmp2);
4359 break;
4360 case 4: /* VEOR */
4361 tcg_gen_xor_i32(tmp, tmp, tmp2);
4362 break;
4363 case 5: /* VBSL */
4364 tmp3 = neon_load_reg(rd, pass);
4365 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4366 dead_tmp(tmp3);
4367 break;
4368 case 6: /* VBIT */
4369 tmp3 = neon_load_reg(rd, pass);
4370 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4371 dead_tmp(tmp3);
4372 break;
4373 case 7: /* VBIF */
4374 tmp3 = neon_load_reg(rd, pass);
4375 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4376 dead_tmp(tmp3);
4377 break;
4378 }
4379 break;
4380 case 4: /* VHSUB */
4381 GEN_NEON_INTEGER_OP(hsub);
4382 break;
4383 case 5: /* VQSUB */
4384 GEN_NEON_INTEGER_OP_ENV(qsub);
4385 break;
4386 case 6: /* VCGT */
4387 GEN_NEON_INTEGER_OP(cgt);
4388 break;
4389 case 7: /* VCGE */
4390 GEN_NEON_INTEGER_OP(cge);
4391 break;
4392 case 8: /* VSHL */
4393 GEN_NEON_INTEGER_OP(shl);
4394 break;
4395 case 9: /* VQSHL */
4396 GEN_NEON_INTEGER_OP_ENV(qshl);
4397 break;
4398 case 10: /* VRSHL */
4399 GEN_NEON_INTEGER_OP(rshl);
4400 break;
4401 case 11: /* VQRSHL */
4402 GEN_NEON_INTEGER_OP_ENV(qrshl);
4403 break;
4404 case 12: /* VMAX */
4405 GEN_NEON_INTEGER_OP(max);
4406 break;
4407 case 13: /* VMIN */
4408 GEN_NEON_INTEGER_OP(min);
4409 break;
4410 case 14: /* VABD */
4411 GEN_NEON_INTEGER_OP(abd);
4412 break;
4413 case 15: /* VABA */
4414 GEN_NEON_INTEGER_OP(abd);
4415 dead_tmp(tmp2);
4416 tmp2 = neon_load_reg(rd, pass);
4417 gen_neon_add(size, tmp, tmp2);
4418 break;
4419 case 16:
4420 if (!u) { /* VADD */
4421 if (gen_neon_add(size, tmp, tmp2))
4422 return 1;
4423 } else { /* VSUB */
4424 switch (size) {
4425 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4426 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4427 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4428 default: return 1;
4429 }
4430 }
4431 break;
4432 case 17:
4433 if (!u) { /* VTST */
4434 switch (size) {
4435 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4436 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4437 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4438 default: return 1;
4439 }
4440 } else { /* VCEQ */
4441 switch (size) {
4442 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4443 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4444 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4445 default: return 1;
4446 }
4447 }
4448 break;
4449 case 18: /* Multiply. */
4450 switch (size) {
4451 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4452 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4453 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4454 default: return 1;
4455 }
4456 dead_tmp(tmp2);
4457 tmp2 = neon_load_reg(rd, pass);
4458 if (u) { /* VMLS */
4459 gen_neon_rsb(size, tmp, tmp2);
4460 } else { /* VMLA */
4461 gen_neon_add(size, tmp, tmp2);
4462 }
4463 break;
4464 case 19: /* VMUL */
4465 if (u) { /* polynomial */
4466 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4467 } else { /* Integer */
4468 switch (size) {
4469 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4470 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4471 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4472 default: return 1;
4473 }
4474 }
4475 break;
4476 case 20: /* VPMAX */
4477 GEN_NEON_INTEGER_OP(pmax);
4478 break;
4479 case 21: /* VPMIN */
4480 GEN_NEON_INTEGER_OP(pmin);
4481 break;
4482 case 22: /* Hultiply high. */
4483 if (!u) { /* VQDMULH */
4484 switch (size) {
4485 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4486 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4487 default: return 1;
4488 }
4489 } else { /* VQRDHMUL */
4490 switch (size) {
4491 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4492 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4493 default: return 1;
4494 }
4495 }
4496 break;
4497 case 23: /* VPADD */
4498 if (u)
4499 return 1;
4500 switch (size) {
4501 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4502 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4503 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4504 default: return 1;
4505 }
4506 break;
4507 case 26: /* Floating point arithnetic. */
4508 switch ((u << 2) | size) {
4509 case 0: /* VADD */
4510 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4511 break;
4512 case 2: /* VSUB */
4513 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4514 break;
4515 case 4: /* VPADD */
4516 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4517 break;
4518 case 6: /* VABD */
4519 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4520 break;
4521 default:
4522 return 1;
4523 }
4524 break;
4525 case 27: /* Float multiply. */
4526 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4527 if (!u) {
4528 dead_tmp(tmp2);
4529 tmp2 = neon_load_reg(rd, pass);
4530 if (size == 0) {
4531 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4532 } else {
4533 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4534 }
4535 }
4536 break;
4537 case 28: /* Float compare. */
4538 if (!u) {
4539 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4540 } else {
4541 if (size == 0)
4542 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4543 else
4544 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4545 }
4546 break;
4547 case 29: /* Float compare absolute. */
4548 if (!u)
4549 return 1;
4550 if (size == 0)
4551 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4552 else
4553 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4554 break;
4555 case 30: /* Float min/max. */
4556 if (size == 0)
4557 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4558 else
4559 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4560 break;
4561 case 31:
4562 if (size == 0)
4563 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4564 else
4565 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4566 break;
4567 default:
4568 abort();
4569 }
4570 dead_tmp(tmp2);
4571
4572 /* Save the result. For elementwise operations we can put it
4573 straight into the destination register. For pairwise operations
4574 we have to be careful to avoid clobbering the source operands. */
4575 if (pairwise && rd == rm) {
4576 neon_store_scratch(pass, tmp);
4577 } else {
4578 neon_store_reg(rd, pass, tmp);
4579 }
4580
4581 } /* for pass */
4582 if (pairwise && rd == rm) {
4583 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4584 tmp = neon_load_scratch(pass);
4585 neon_store_reg(rd, pass, tmp);
4586 }
4587 }
4588 /* End of 3 register same size operations. */
4589 } else if (insn & (1 << 4)) {
4590 if ((insn & 0x00380080) != 0) {
4591 /* Two registers and shift. */
4592 op = (insn >> 8) & 0xf;
4593 if (insn & (1 << 7)) {
4594 /* 64-bit shift. */
4595 size = 3;
4596 } else {
4597 size = 2;
4598 while ((insn & (1 << (size + 19))) == 0)
4599 size--;
4600 }
4601 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4602 /* To avoid excessive dumplication of ops we implement shift
4603 by immediate using the variable shift operations. */
4604 if (op < 8) {
4605 /* Shift by immediate:
4606 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4607 /* Right shifts are encoded as N - shift, where N is the
4608 element size in bits. */
4609 if (op <= 4)
4610 shift = shift - (1 << (size + 3));
4611 if (size == 3) {
4612 count = q + 1;
4613 } else {
4614 count = q ? 4: 2;
4615 }
4616 switch (size) {
4617 case 0:
4618 imm = (uint8_t) shift;
4619 imm |= imm << 8;
4620 imm |= imm << 16;
4621 break;
4622 case 1:
4623 imm = (uint16_t) shift;
4624 imm |= imm << 16;
4625 break;
4626 case 2:
4627 case 3:
4628 imm = shift;
4629 break;
4630 default:
4631 abort();
4632 }
4633
4634 for (pass = 0; pass < count; pass++) {
4635 if (size == 3) {
4636 neon_load_reg64(cpu_V0, rm + pass);
4637 tcg_gen_movi_i64(cpu_V1, imm);
4638 switch (op) {
4639 case 0: /* VSHR */
4640 case 1: /* VSRA */
4641 if (u)
4642 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4643 else
4644 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4645 break;
4646 case 2: /* VRSHR */
4647 case 3: /* VRSRA */
4648 if (u)
4649 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4650 else
4651 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4652 break;
4653 case 4: /* VSRI */
4654 if (!u)
4655 return 1;
4656 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4657 break;
4658 case 5: /* VSHL, VSLI */
4659 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4660 break;
4661 case 6: /* VQSHLU */
4662 if (u) {
4663 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4664 cpu_V0, cpu_V1);
4665 } else {
4666 return 1;
4667 }
4668 break;
4669 case 7: /* VQSHL */
4670 if (u) {
4671 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4672 cpu_V0, cpu_V1);
4673 } else {
4674 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4675 cpu_V0, cpu_V1);
4676 }
4677 break;
4678 }
4679 if (op == 1 || op == 3) {
4680 /* Accumulate. */
4681 neon_load_reg64(cpu_V1, rd + pass);
4682 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4683 } else if (op == 4 || (op == 5 && u)) {
4684 /* Insert */
4685 neon_load_reg64(cpu_V1, rd + pass);
4686 uint64_t mask;
4687 if (shift < -63 || shift > 63) {
4688 mask = 0;
4689 } else {
4690 if (op == 4) {
4691 mask = 0xffffffffffffffffull >> -shift;
4692 } else {
4693 mask = 0xffffffffffffffffull << shift;
4694 }
4695 }
4696 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4697 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4698 }
4699 neon_store_reg64(cpu_V0, rd + pass);
4700 } else { /* size < 3 */
4701 /* Operands in T0 and T1. */
4702 tmp = neon_load_reg(rm, pass);
4703 tmp2 = new_tmp();
4704 tcg_gen_movi_i32(tmp2, imm);
4705 switch (op) {
4706 case 0: /* VSHR */
4707 case 1: /* VSRA */
4708 GEN_NEON_INTEGER_OP(shl);
4709 break;
4710 case 2: /* VRSHR */
4711 case 3: /* VRSRA */
4712 GEN_NEON_INTEGER_OP(rshl);
4713 break;
4714 case 4: /* VSRI */
4715 if (!u)
4716 return 1;
4717 GEN_NEON_INTEGER_OP(shl);
4718 break;
4719 case 5: /* VSHL, VSLI */
4720 switch (size) {
4721 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4722 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4723 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4724 default: return 1;
4725 }
4726 break;
4727 case 6: /* VQSHLU */
4728 if (!u) {
4729 return 1;
4730 }
4731 switch (size) {
4732 case 0:
4733 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4734 tmp, tmp2);
4735 break;
4736 case 1:
4737 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4738 tmp, tmp2);
4739 break;
4740 case 2:
4741 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4742 tmp, tmp2);
4743 break;
4744 default:
4745 return 1;
4746 }
4747 break;
4748 case 7: /* VQSHL */
4749 GEN_NEON_INTEGER_OP_ENV(qshl);
4750 break;
4751 }
4752 dead_tmp(tmp2);
4753
4754 if (op == 1 || op == 3) {
4755 /* Accumulate. */
4756 tmp2 = neon_load_reg(rd, pass);
4757 gen_neon_add(size, tmp, tmp2);
4758 dead_tmp(tmp2);
4759 } else if (op == 4 || (op == 5 && u)) {
4760 /* Insert */
4761 switch (size) {
4762 case 0:
4763 if (op == 4)
4764 mask = 0xff >> -shift;
4765 else
4766 mask = (uint8_t)(0xff << shift);
4767 mask |= mask << 8;
4768 mask |= mask << 16;
4769 break;
4770 case 1:
4771 if (op == 4)
4772 mask = 0xffff >> -shift;
4773 else
4774 mask = (uint16_t)(0xffff << shift);
4775 mask |= mask << 16;
4776 break;
4777 case 2:
4778 if (shift < -31 || shift > 31) {
4779 mask = 0;
4780 } else {
4781 if (op == 4)
4782 mask = 0xffffffffu >> -shift;
4783 else
4784 mask = 0xffffffffu << shift;
4785 }
4786 break;
4787 default:
4788 abort();
4789 }
4790 tmp2 = neon_load_reg(rd, pass);
4791 tcg_gen_andi_i32(tmp, tmp, mask);
4792 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4793 tcg_gen_or_i32(tmp, tmp, tmp2);
4794 dead_tmp(tmp2);
4795 }
4796 neon_store_reg(rd, pass, tmp);
4797 }
4798 } /* for pass */
4799 } else if (op < 10) {
4800 /* Shift by immediate and narrow:
4801 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4802 shift = shift - (1 << (size + 3));
4803 size++;
4804 switch (size) {
4805 case 1:
4806 imm = (uint16_t)shift;
4807 imm |= imm << 16;
4808 tmp2 = tcg_const_i32(imm);
4809 TCGV_UNUSED_I64(tmp64);
4810 break;
4811 case 2:
4812 imm = (uint32_t)shift;
4813 tmp2 = tcg_const_i32(imm);
4814 TCGV_UNUSED_I64(tmp64);
4815 break;
4816 case 3:
4817 tmp64 = tcg_const_i64(shift);
4818 TCGV_UNUSED(tmp2);
4819 break;
4820 default:
4821 abort();
4822 }
4823
4824 for (pass = 0; pass < 2; pass++) {
4825 if (size == 3) {
4826 neon_load_reg64(cpu_V0, rm + pass);
4827 if (q) {
4828 if (u)
4829 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4830 else
4831 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4832 } else {
4833 if (u)
4834 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4835 else
4836 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4837 }
4838 } else {
4839 tmp = neon_load_reg(rm + pass, 0);
4840 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4841 tmp3 = neon_load_reg(rm + pass, 1);
4842 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4843 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4844 dead_tmp(tmp);
4845 dead_tmp(tmp3);
4846 }
4847 tmp = new_tmp();
4848 if (op == 8 && !u) {
4849 gen_neon_narrow(size - 1, tmp, cpu_V0);
4850 } else {
4851 if (op == 8)
4852 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4853 else
4854 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4855 }
4856 neon_store_reg(rd, pass, tmp);
4857 } /* for pass */
4858 if (size == 3) {
4859 tcg_temp_free_i64(tmp64);
4860 } else {
4861 tcg_temp_free_i32(tmp2);
4862 }
4863 } else if (op == 10) {
4864 /* VSHLL */
4865 if (q || size == 3)
4866 return 1;
4867 tmp = neon_load_reg(rm, 0);
4868 tmp2 = neon_load_reg(rm, 1);
4869 for (pass = 0; pass < 2; pass++) {
4870 if (pass == 1)
4871 tmp = tmp2;
4872
4873 gen_neon_widen(cpu_V0, tmp, size, u);
4874
4875 if (shift != 0) {
4876 /* The shift is less than the width of the source
4877 type, so we can just shift the whole register. */
4878 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4879 /* Widen the result of shift: we need to clear
4880 * the potential overflow bits resulting from
4881 * left bits of the narrow input appearing as
4882 * right bits of left the neighbour narrow
4883 * input. */
4884 if (size < 2 || !u) {
4885 uint64_t imm64;
4886 if (size == 0) {
4887 imm = (0xffu >> (8 - shift));
4888 imm |= imm << 16;
4889 } else if (size == 1) {
4890 imm = 0xffff >> (16 - shift);
4891 } else {
4892 /* size == 2 */
4893 imm = 0xffffffff >> (32 - shift);
4894 }
4895 if (size < 2) {
4896 imm64 = imm | (((uint64_t)imm) << 32);
4897 } else {
4898 imm64 = imm;
4899 }
4900 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4901 }
4902 }
4903 neon_store_reg64(cpu_V0, rd + pass);
4904 }
4905 } else if (op >= 14) {
4906 /* VCVT fixed-point. */
4907 /* We have already masked out the must-be-1 top bit of imm6,
4908 * hence this 32-shift where the ARM ARM has 64-imm6.
4909 */
4910 shift = 32 - shift;
4911 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4912 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4913 if (!(op & 1)) {
4914 if (u)
4915 gen_vfp_ulto(0, shift);
4916 else
4917 gen_vfp_slto(0, shift);
4918 } else {
4919 if (u)
4920 gen_vfp_toul(0, shift);
4921 else
4922 gen_vfp_tosl(0, shift);
4923 }
4924 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4925 }
4926 } else {
4927 return 1;
4928 }
4929 } else { /* (insn & 0x00380080) == 0 */
4930 int invert;
4931
4932 op = (insn >> 8) & 0xf;
4933 /* One register and immediate. */
4934 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4935 invert = (insn & (1 << 5)) != 0;
4936 switch (op) {
4937 case 0: case 1:
4938 /* no-op */
4939 break;
4940 case 2: case 3:
4941 imm <<= 8;
4942 break;
4943 case 4: case 5:
4944 imm <<= 16;
4945 break;
4946 case 6: case 7:
4947 imm <<= 24;
4948 break;
4949 case 8: case 9:
4950 imm |= imm << 16;
4951 break;
4952 case 10: case 11:
4953 imm = (imm << 8) | (imm << 24);
4954 break;
4955 case 12:
4956 imm = (imm << 8) | 0xff;
4957 break;
4958 case 13:
4959 imm = (imm << 16) | 0xffff;
4960 break;
4961 case 14:
4962 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4963 if (invert)
4964 imm = ~imm;
4965 break;
4966 case 15:
4967 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4968 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4969 break;
4970 }
4971 if (invert)
4972 imm = ~imm;
4973
4974 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4975 if (op & 1 && op < 12) {
4976 tmp = neon_load_reg(rd, pass);
4977 if (invert) {
4978 /* The immediate value has already been inverted, so
4979 BIC becomes AND. */
4980 tcg_gen_andi_i32(tmp, tmp, imm);
4981 } else {
4982 tcg_gen_ori_i32(tmp, tmp, imm);
4983 }
4984 } else {
4985 /* VMOV, VMVN. */
4986 tmp = new_tmp();
4987 if (op == 14 && invert) {
4988 uint32_t val;
4989 val = 0;
4990 for (n = 0; n < 4; n++) {
4991 if (imm & (1 << (n + (pass & 1) * 4)))
4992 val |= 0xff << (n * 8);
4993 }
4994 tcg_gen_movi_i32(tmp, val);
4995 } else {
4996 tcg_gen_movi_i32(tmp, imm);
4997 }
4998 }
4999 neon_store_reg(rd, pass, tmp);
5000 }
5001 }
5002 } else { /* (insn & 0x00800010 == 0x00800000) */
5003 if (size != 3) {
5004 op = (insn >> 8) & 0xf;
5005 if ((insn & (1 << 6)) == 0) {
5006 /* Three registers of different lengths. */
5007 int src1_wide;
5008 int src2_wide;
5009 int prewiden;
5010 /* prewiden, src1_wide, src2_wide */
5011 static const int neon_3reg_wide[16][3] = {
5012 {1, 0, 0}, /* VADDL */
5013 {1, 1, 0}, /* VADDW */
5014 {1, 0, 0}, /* VSUBL */
5015 {1, 1, 0}, /* VSUBW */
5016 {0, 1, 1}, /* VADDHN */
5017 {0, 0, 0}, /* VABAL */
5018 {0, 1, 1}, /* VSUBHN */
5019 {0, 0, 0}, /* VABDL */
5020 {0, 0, 0}, /* VMLAL */
5021 {0, 0, 0}, /* VQDMLAL */
5022 {0, 0, 0}, /* VMLSL */
5023 {0, 0, 0}, /* VQDMLSL */
5024 {0, 0, 0}, /* Integer VMULL */
5025 {0, 0, 0}, /* VQDMULL */
5026 {0, 0, 0} /* Polynomial VMULL */
5027 };
5028
5029 prewiden = neon_3reg_wide[op][0];
5030 src1_wide = neon_3reg_wide[op][1];
5031 src2_wide = neon_3reg_wide[op][2];
5032
5033 if (size == 0 && (op == 9 || op == 11 || op == 13))
5034 return 1;
5035
5036 /* Avoid overlapping operands. Wide source operands are
5037 always aligned so will never overlap with wide
5038 destinations in problematic ways. */
5039 if (rd == rm && !src2_wide) {
5040 tmp = neon_load_reg(rm, 1);
5041 neon_store_scratch(2, tmp);
5042 } else if (rd == rn && !src1_wide) {
5043 tmp = neon_load_reg(rn, 1);
5044 neon_store_scratch(2, tmp);
5045 }
5046 TCGV_UNUSED(tmp3);
5047 for (pass = 0; pass < 2; pass++) {
5048 if (src1_wide) {
5049 neon_load_reg64(cpu_V0, rn + pass);
5050 TCGV_UNUSED(tmp);
5051 } else {
5052 if (pass == 1 && rd == rn) {
5053 tmp = neon_load_scratch(2);
5054 } else {
5055 tmp = neon_load_reg(rn, pass);
5056 }
5057 if (prewiden) {
5058 gen_neon_widen(cpu_V0, tmp, size, u);
5059 }
5060 }
5061 if (src2_wide) {
5062 neon_load_reg64(cpu_V1, rm + pass);
5063 TCGV_UNUSED(tmp2);
5064 } else {
5065 if (pass == 1 && rd == rm) {
5066 tmp2 = neon_load_scratch(2);
5067 } else {
5068 tmp2 = neon_load_reg(rm, pass);
5069 }
5070 if (prewiden) {
5071 gen_neon_widen(cpu_V1, tmp2, size, u);
5072 }
5073 }
5074 switch (op) {
5075 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5076 gen_neon_addl(size);
5077 break;
5078 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5079 gen_neon_subl(size);
5080 break;
5081 case 5: case 7: /* VABAL, VABDL */
5082 switch ((size << 1) | u) {
5083 case 0:
5084 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5085 break;
5086 case 1:
5087 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5088 break;
5089 case 2:
5090 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5091 break;
5092 case 3:
5093 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5094 break;
5095 case 4:
5096 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5097 break;
5098 case 5:
5099 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5100 break;
5101 default: abort();
5102 }
5103 dead_tmp(tmp2);
5104 dead_tmp(tmp);
5105 break;
5106 case 8: case 9: case 10: case 11: case 12: case 13:
5107 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5108 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5109 break;
5110 case 14: /* Polynomial VMULL */
5111 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5112 dead_tmp(tmp2);
5113 dead_tmp(tmp);
5114 break;
5115 default: /* 15 is RESERVED. */
5116 return 1;
5117 }
5118 if (op == 13) {
5119 /* VQDMULL */
5120 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5121 neon_store_reg64(cpu_V0, rd + pass);
5122 } else if (op == 5 || (op >= 8 && op <= 11)) {
5123 /* Accumulate. */
5124 neon_load_reg64(cpu_V1, rd + pass);
5125 switch (op) {
5126 case 10: /* VMLSL */
5127 gen_neon_negl(cpu_V0, size);
5128 /* Fall through */
5129 case 5: case 8: /* VABAL, VMLAL */
5130 gen_neon_addl(size);
5131 break;
5132 case 9: case 11: /* VQDMLAL, VQDMLSL */
5133 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5134 if (op == 11) {
5135 gen_neon_negl(cpu_V0, size);
5136 }
5137 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5138 break;
5139 default:
5140 abort();
5141 }
5142 neon_store_reg64(cpu_V0, rd + pass);
5143 } else if (op == 4 || op == 6) {
5144 /* Narrowing operation. */
5145 tmp = new_tmp();
5146 if (!u) {
5147 switch (size) {
5148 case 0:
5149 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5150 break;
5151 case 1:
5152 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5153 break;
5154 case 2:
5155 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5156 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5157 break;
5158 default: abort();
5159 }
5160 } else {
5161 switch (size) {
5162 case 0:
5163 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5164 break;
5165 case 1:
5166 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5167 break;
5168 case 2:
5169 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5170 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5171 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5172 break;
5173 default: abort();
5174 }
5175 }
5176 if (pass == 0) {
5177 tmp3 = tmp;
5178 } else {
5179 neon_store_reg(rd, 0, tmp3);
5180 neon_store_reg(rd, 1, tmp);
5181 }
5182 } else {
5183 /* Write back the result. */
5184 neon_store_reg64(cpu_V0, rd + pass);
5185 }
5186 }
5187 } else {
5188 /* Two registers and a scalar. */
5189 switch (op) {
5190 case 0: /* Integer VMLA scalar */
5191 case 1: /* Float VMLA scalar */
5192 case 4: /* Integer VMLS scalar */
5193 case 5: /* Floating point VMLS scalar */
5194 case 8: /* Integer VMUL scalar */
5195 case 9: /* Floating point VMUL scalar */
5196 case 12: /* VQDMULH scalar */
5197 case 13: /* VQRDMULH scalar */
5198 tmp = neon_get_scalar(size, rm);
5199 neon_store_scratch(0, tmp);
5200 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5201 tmp = neon_load_scratch(0);
5202 tmp2 = neon_load_reg(rn, pass);
5203 if (op == 12) {
5204 if (size == 1) {
5205 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5206 } else {
5207 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5208 }
5209 } else if (op == 13) {
5210 if (size == 1) {
5211 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5212 } else {
5213 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5214 }
5215 } else if (op & 1) {
5216 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5217 } else {
5218 switch (size) {
5219 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5220 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5221 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5222 default: return 1;
5223 }
5224 }
5225 dead_tmp(tmp2);
5226 if (op < 8) {
5227 /* Accumulate. */
5228 tmp2 = neon_load_reg(rd, pass);
5229 switch (op) {
5230 case 0:
5231 gen_neon_add(size, tmp, tmp2);
5232 break;
5233 case 1:
5234 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5235 break;
5236 case 4:
5237 gen_neon_rsb(size, tmp, tmp2);
5238 break;
5239 case 5:
5240 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5241 break;
5242 default:
5243 abort();
5244 }
5245 dead_tmp(tmp2);
5246 }
5247 neon_store_reg(rd, pass, tmp);
5248 }
5249 break;
5250 case 2: /* VMLAL sclar */
5251 case 3: /* VQDMLAL scalar */
5252 case 6: /* VMLSL scalar */
5253 case 7: /* VQDMLSL scalar */
5254 case 10: /* VMULL scalar */
5255 case 11: /* VQDMULL scalar */
5256 if (size == 0 && (op == 3 || op == 7 || op == 11))
5257 return 1;
5258
5259 tmp2 = neon_get_scalar(size, rm);
5260 /* We need a copy of tmp2 because gen_neon_mull
5261 * deletes it during pass 0. */
5262 tmp4 = new_tmp();
5263 tcg_gen_mov_i32(tmp4, tmp2);
5264 tmp3 = neon_load_reg(rn, 1);
5265
5266 for (pass = 0; pass < 2; pass++) {
5267 if (pass == 0) {
5268 tmp = neon_load_reg(rn, 0);
5269 } else {
5270 tmp = tmp3;
5271 tmp2 = tmp4;
5272 }
5273 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5274 if (op != 11) {
5275 neon_load_reg64(cpu_V1, rd + pass);
5276 }
5277 switch (op) {
5278 case 6:
5279 gen_neon_negl(cpu_V0, size);
5280 /* Fall through */
5281 case 2:
5282 gen_neon_addl(size);
5283 break;
5284 case 3: case 7:
5285 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5286 if (op == 7) {
5287 gen_neon_negl(cpu_V0, size);
5288 }
5289 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5290 break;
5291 case 10:
5292 /* no-op */
5293 break;
5294 case 11:
5295 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5296 break;
5297 default:
5298 abort();
5299 }
5300 neon_store_reg64(cpu_V0, rd + pass);
5301 }
5302
5303
5304 break;
5305 default: /* 14 and 15 are RESERVED */
5306 return 1;
5307 }
5308 }
5309 } else { /* size == 3 */
5310 if (!u) {
5311 /* Extract. */
5312 imm = (insn >> 8) & 0xf;
5313
5314 if (imm > 7 && !q)
5315 return 1;
5316
5317 if (imm == 0) {
5318 neon_load_reg64(cpu_V0, rn);
5319 if (q) {
5320 neon_load_reg64(cpu_V1, rn + 1);
5321 }
5322 } else if (imm == 8) {
5323 neon_load_reg64(cpu_V0, rn + 1);
5324 if (q) {
5325 neon_load_reg64(cpu_V1, rm);
5326 }
5327 } else if (q) {
5328 tmp64 = tcg_temp_new_i64();
5329 if (imm < 8) {
5330 neon_load_reg64(cpu_V0, rn);
5331 neon_load_reg64(tmp64, rn + 1);
5332 } else {
5333 neon_load_reg64(cpu_V0, rn + 1);
5334 neon_load_reg64(tmp64, rm);
5335 }
5336 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5337 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5338 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5339 if (imm < 8) {
5340 neon_load_reg64(cpu_V1, rm);
5341 } else {
5342 neon_load_reg64(cpu_V1, rm + 1);
5343 imm -= 8;
5344 }
5345 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5346 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5347 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5348 tcg_temp_free_i64(tmp64);
5349 } else {
5350 /* BUGFIX */
5351 neon_load_reg64(cpu_V0, rn);
5352 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5353 neon_load_reg64(cpu_V1, rm);
5354 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5355 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5356 }
5357 neon_store_reg64(cpu_V0, rd);
5358 if (q) {
5359 neon_store_reg64(cpu_V1, rd + 1);
5360 }
5361 } else if ((insn & (1 << 11)) == 0) {
5362 /* Two register misc. */
5363 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5364 size = (insn >> 18) & 3;
5365 switch (op) {
5366 case 0: /* VREV64 */
5367 if (size == 3)
5368 return 1;
5369 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5370 tmp = neon_load_reg(rm, pass * 2);
5371 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5372 switch (size) {
5373 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5374 case 1: gen_swap_half(tmp); break;
5375 case 2: /* no-op */ break;
5376 default: abort();
5377 }
5378 neon_store_reg(rd, pass * 2 + 1, tmp);
5379 if (size == 2) {
5380 neon_store_reg(rd, pass * 2, tmp2);
5381 } else {
5382 switch (size) {
5383 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5384 case 1: gen_swap_half(tmp2); break;
5385 default: abort();
5386 }
5387 neon_store_reg(rd, pass * 2, tmp2);
5388 }
5389 }
5390 break;
5391 case 4: case 5: /* VPADDL */
5392 case 12: case 13: /* VPADAL */
5393 if (size == 3)
5394 return 1;
5395 for (pass = 0; pass < q + 1; pass++) {
5396 tmp = neon_load_reg(rm, pass * 2);
5397 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5398 tmp = neon_load_reg(rm, pass * 2 + 1);
5399 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5400 switch (size) {
5401 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5402 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5403 case 2: tcg_gen_add_i64(CPU_V001); break;
5404 default: abort();
5405 }
5406 if (op >= 12) {
5407 /* Accumulate. */
5408 neon_load_reg64(cpu_V1, rd + pass);
5409 gen_neon_addl(size);
5410 }
5411 neon_store_reg64(cpu_V0, rd + pass);
5412 }
5413 break;
5414 case 33: /* VTRN */
5415 if (size == 2) {
5416 for (n = 0; n < (q ? 4 : 2); n += 2) {
5417 tmp = neon_load_reg(rm, n);
5418 tmp2 = neon_load_reg(rd, n + 1);
5419 neon_store_reg(rm, n, tmp2);
5420 neon_store_reg(rd, n + 1, tmp);
5421 }
5422 } else {
5423 goto elementwise;
5424 }
5425 break;
5426 case 34: /* VUZP */
5427 if (gen_neon_unzip(rd, rm, size, q)) {
5428 return 1;
5429 }
5430 break;
5431 case 35: /* VZIP */
5432 /* Reg Before After
5433 Rd A3 A2 A1 A0 B1 A1 B0 A0
5434 Rm B3 B2 B1 B0 B3 A3 B2 A2
5435 */
5436 if (size == 3)
5437 return 1;
5438 count = (q ? 4 : 2);
5439 for (n = 0; n < count; n++) {
5440 tmp = neon_load_reg(rd, n);
5441 tmp2 = neon_load_reg(rd, n);
5442 switch (size) {
5443 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5444 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5445 case 2: /* no-op */; break;
5446 default: abort();
5447 }
5448 neon_store_scratch(n * 2, tmp);
5449 neon_store_scratch(n * 2 + 1, tmp2);
5450 }
5451 for (n = 0; n < count * 2; n++) {
5452 int reg = (n < count) ? rd : rm;
5453 tmp = neon_load_scratch(n);
5454 neon_store_reg(reg, n % count, tmp);
5455 }
5456 break;
5457 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5458 if (size == 3)
5459 return 1;
5460 TCGV_UNUSED(tmp2);
5461 for (pass = 0; pass < 2; pass++) {
5462 neon_load_reg64(cpu_V0, rm + pass);
5463 tmp = new_tmp();
5464 if (op == 36) {
5465 if (q) { /* VQMOVUN */
5466 gen_neon_unarrow_sats(size, tmp, cpu_V0);
5467 } else { /* VMOVN */
5468 gen_neon_narrow(size, tmp, cpu_V0);
5469 }
5470 } else { /* VQMOVN */
5471 if (q) {
5472 gen_neon_narrow_satu(size, tmp, cpu_V0);
5473 } else {
5474 gen_neon_narrow_sats(size, tmp, cpu_V0);
5475 }
5476 }
5477 if (pass == 0) {
5478 tmp2 = tmp;
5479 } else {
5480 neon_store_reg(rd, 0, tmp2);
5481 neon_store_reg(rd, 1, tmp);
5482 }
5483 }
5484 break;
5485 case 38: /* VSHLL */
5486 if (q || size == 3)
5487 return 1;
5488 tmp = neon_load_reg(rm, 0);
5489 tmp2 = neon_load_reg(rm, 1);
5490 for (pass = 0; pass < 2; pass++) {
5491 if (pass == 1)
5492 tmp = tmp2;
5493 gen_neon_widen(cpu_V0, tmp, size, 1);
5494 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5495 neon_store_reg64(cpu_V0, rd + pass);
5496 }
5497 break;
5498 case 44: /* VCVT.F16.F32 */
5499 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5500 return 1;
5501 tmp = new_tmp();
5502 tmp2 = new_tmp();
5503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5504 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5505 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5506 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5507 tcg_gen_shli_i32(tmp2, tmp2, 16);
5508 tcg_gen_or_i32(tmp2, tmp2, tmp);
5509 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5510 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5511 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5512 neon_store_reg(rd, 0, tmp2);
5513 tmp2 = new_tmp();
5514 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5515 tcg_gen_shli_i32(tmp2, tmp2, 16);
5516 tcg_gen_or_i32(tmp2, tmp2, tmp);
5517 neon_store_reg(rd, 1, tmp2);
5518 dead_tmp(tmp);
5519 break;
5520 case 46: /* VCVT.F32.F16 */
5521 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5522 return 1;
5523 tmp3 = new_tmp();
5524 tmp = neon_load_reg(rm, 0);
5525 tmp2 = neon_load_reg(rm, 1);
5526 tcg_gen_ext16u_i32(tmp3, tmp);
5527 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5528 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5529 tcg_gen_shri_i32(tmp3, tmp, 16);
5530 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5531 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5532 dead_tmp(tmp);
5533 tcg_gen_ext16u_i32(tmp3, tmp2);
5534 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5535 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5536 tcg_gen_shri_i32(tmp3, tmp2, 16);
5537 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5538 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5539 dead_tmp(tmp2);
5540 dead_tmp(tmp3);
5541 break;
5542 default:
5543 elementwise:
5544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5545 if (op == 30 || op == 31 || op >= 58) {
5546 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5547 neon_reg_offset(rm, pass));
5548 TCGV_UNUSED(tmp);
5549 } else {
5550 tmp = neon_load_reg(rm, pass);
5551 }
5552 switch (op) {
5553 case 1: /* VREV32 */
5554 switch (size) {
5555 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5556 case 1: gen_swap_half(tmp); break;
5557 default: return 1;
5558 }
5559 break;
5560 case 2: /* VREV16 */
5561 if (size != 0)
5562 return 1;
5563 gen_rev16(tmp);
5564 break;
5565 case 8: /* CLS */
5566 switch (size) {
5567 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5568 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5569 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5570 default: return 1;
5571 }
5572 break;
5573 case 9: /* CLZ */
5574 switch (size) {
5575 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5576 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5577 case 2: gen_helper_clz(tmp, tmp); break;
5578 default: return 1;
5579 }
5580 break;
5581 case 10: /* CNT */
5582 if (size != 0)
5583 return 1;
5584 gen_helper_neon_cnt_u8(tmp, tmp);
5585 break;
5586 case 11: /* VNOT */
5587 if (size != 0)
5588 return 1;
5589 tcg_gen_not_i32(tmp, tmp);
5590 break;
5591 case 14: /* VQABS */
5592 switch (size) {
5593 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5594 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5595 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5596 default: return 1;
5597 }
5598 break;
5599 case 15: /* VQNEG */
5600 switch (size) {
5601 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5602 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5603 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5604 default: return 1;
5605 }
5606 break;
5607 case 16: case 19: /* VCGT #0, VCLE #0 */
5608 tmp2 = tcg_const_i32(0);
5609 switch(size) {
5610 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5611 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5612 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5613 default: return 1;
5614 }
5615 tcg_temp_free(tmp2);
5616 if (op == 19)
5617 tcg_gen_not_i32(tmp, tmp);
5618 break;
5619 case 17: case 20: /* VCGE #0, VCLT #0 */
5620 tmp2 = tcg_const_i32(0);
5621 switch(size) {
5622 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5623 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5624 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5625 default: return 1;
5626 }
5627 tcg_temp_free(tmp2);
5628 if (op == 20)
5629 tcg_gen_not_i32(tmp, tmp);
5630 break;
5631 case 18: /* VCEQ #0 */
5632 tmp2 = tcg_const_i32(0);
5633 switch(size) {
5634 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5635 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5636 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5637 default: return 1;
5638 }
5639 tcg_temp_free(tmp2);
5640 break;
5641 case 22: /* VABS */
5642 switch(size) {
5643 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5644 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5645 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5646 default: return 1;
5647 }
5648 break;
5649 case 23: /* VNEG */
5650 if (size == 3)
5651 return 1;
5652 tmp2 = tcg_const_i32(0);
5653 gen_neon_rsb(size, tmp, tmp2);
5654 tcg_temp_free(tmp2);
5655 break;
5656 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5657 tmp2 = tcg_const_i32(0);
5658 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5659 tcg_temp_free(tmp2);
5660 if (op == 27)
5661 tcg_gen_not_i32(tmp, tmp);
5662 break;
5663 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5664 tmp2 = tcg_const_i32(0);
5665 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5666 tcg_temp_free(tmp2);
5667 if (op == 28)
5668 tcg_gen_not_i32(tmp, tmp);
5669 break;
5670 case 26: /* Float VCEQ #0 */
5671 tmp2 = tcg_const_i32(0);
5672 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5673 tcg_temp_free(tmp2);
5674 break;
5675 case 30: /* Float VABS */
5676 gen_vfp_abs(0);
5677 break;
5678 case 31: /* Float VNEG */
5679 gen_vfp_neg(0);
5680 break;
5681 case 32: /* VSWP */
5682 tmp2 = neon_load_reg(rd, pass);
5683 neon_store_reg(rm, pass, tmp2);
5684 break;
5685 case 33: /* VTRN */
5686 tmp2 = neon_load_reg(rd, pass);
5687 switch (size) {
5688 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5689 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5690 case 2: abort();
5691 default: return 1;
5692 }
5693 neon_store_reg(rm, pass, tmp2);
5694 break;
5695 case 56: /* Integer VRECPE */
5696 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5697 break;
5698 case 57: /* Integer VRSQRTE */
5699 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5700 break;
5701 case 58: /* Float VRECPE */
5702 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5703 break;
5704 case 59: /* Float VRSQRTE */
5705 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5706 break;
5707 case 60: /* VCVT.F32.S32 */
5708 gen_vfp_sito(0);
5709 break;
5710 case 61: /* VCVT.F32.U32 */
5711 gen_vfp_uito(0);
5712 break;
5713 case 62: /* VCVT.S32.F32 */
5714 gen_vfp_tosiz(0);
5715 break;
5716 case 63: /* VCVT.U32.F32 */
5717 gen_vfp_touiz(0);
5718 break;
5719 default:
5720 /* Reserved: 21, 29, 39-56 */
5721 return 1;
5722 }
5723 if (op == 30 || op == 31 || op >= 58) {
5724 tcg_gen_st_f32(cpu_F0s, cpu_env,
5725 neon_reg_offset(rd, pass));
5726 } else {
5727 neon_store_reg(rd, pass, tmp);
5728 }
5729 }
5730 break;
5731 }
5732 } else if ((insn & (1 << 10)) == 0) {
5733 /* VTBL, VTBX. */
5734 n = ((insn >> 5) & 0x18) + 8;
5735 if (insn & (1 << 6)) {
5736 tmp = neon_load_reg(rd, 0);
5737 } else {
5738 tmp = new_tmp();
5739 tcg_gen_movi_i32(tmp, 0);
5740 }
5741 tmp2 = neon_load_reg(rm, 0);
5742 tmp4 = tcg_const_i32(rn);
5743 tmp5 = tcg_const_i32(n);
5744 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5745 dead_tmp(tmp);
5746 if (insn & (1 << 6)) {
5747 tmp = neon_load_reg(rd, 1);
5748 } else {
5749 tmp = new_tmp();
5750 tcg_gen_movi_i32(tmp, 0);
5751 }
5752 tmp3 = neon_load_reg(rm, 1);
5753 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5754 tcg_temp_free_i32(tmp5);
5755 tcg_temp_free_i32(tmp4);
5756 neon_store_reg(rd, 0, tmp2);
5757 neon_store_reg(rd, 1, tmp3);
5758 dead_tmp(tmp);
5759 } else if ((insn & 0x380) == 0) {
5760 /* VDUP */
5761 if (insn & (1 << 19)) {
5762 tmp = neon_load_reg(rm, 1);
5763 } else {
5764 tmp = neon_load_reg(rm, 0);
5765 }
5766 if (insn & (1 << 16)) {
5767 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5768 } else if (insn & (1 << 17)) {
5769 if ((insn >> 18) & 1)
5770 gen_neon_dup_high16(tmp);
5771 else
5772 gen_neon_dup_low16(tmp);
5773 }
5774 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5775 tmp2 = new_tmp();
5776 tcg_gen_mov_i32(tmp2, tmp);
5777 neon_store_reg(rd, pass, tmp2);
5778 }
5779 dead_tmp(tmp);
5780 } else {
5781 return 1;
5782 }
5783 }
5784 }
5785 return 0;
5786 }
5787
5788 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5789 {
5790 int crn = (insn >> 16) & 0xf;
5791 int crm = insn & 0xf;
5792 int op1 = (insn >> 21) & 7;
5793 int op2 = (insn >> 5) & 7;
5794 int rt = (insn >> 12) & 0xf;
5795 TCGv tmp;
5796
5797 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5798 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5799 /* TEECR */
5800 if (IS_USER(s))
5801 return 1;
5802 tmp = load_cpu_field(teecr);
5803 store_reg(s, rt, tmp);
5804 return 0;
5805 }
5806 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5807 /* TEEHBR */
5808 if (IS_USER(s) && (env->teecr & 1))
5809 return 1;
5810 tmp = load_cpu_field(teehbr);
5811 store_reg(s, rt, tmp);
5812 return 0;
5813 }
5814 }
5815 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5816 op1, crn, crm, op2);
5817 return 1;
5818 }
5819
5820 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5821 {
5822 int crn = (insn >> 16) & 0xf;
5823 int crm = insn & 0xf;
5824 int op1 = (insn >> 21) & 7;
5825 int op2 = (insn >> 5) & 7;
5826 int rt = (insn >> 12) & 0xf;
5827 TCGv tmp;
5828
5829 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5830 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5831 /* TEECR */
5832 if (IS_USER(s))
5833 return 1;
5834 tmp = load_reg(s, rt);
5835 gen_helper_set_teecr(cpu_env, tmp);
5836 dead_tmp(tmp);
5837 return 0;
5838 }
5839 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5840 /* TEEHBR */
5841 if (IS_USER(s) && (env->teecr & 1))
5842 return 1;
5843 tmp = load_reg(s, rt);
5844 store_cpu_field(tmp, teehbr);
5845 return 0;
5846 }
5847 }
5848 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5849 op1, crn, crm, op2);
5850 return 1;
5851 }
5852
5853 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5854 {
5855 int cpnum;
5856
5857 cpnum = (insn >> 8) & 0xf;
5858 if (arm_feature(env, ARM_FEATURE_XSCALE)
5859 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5860 return 1;
5861
5862 switch (cpnum) {
5863 case 0:
5864 case 1:
5865 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5866 return disas_iwmmxt_insn(env, s, insn);
5867 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5868 return disas_dsp_insn(env, s, insn);
5869 }
5870 return 1;
5871 case 10:
5872 case 11:
5873 return disas_vfp_insn (env, s, insn);
5874 case 14:
5875 /* Coprocessors 7-15 are architecturally reserved by ARM.
5876 Unfortunately Intel decided to ignore this. */
5877 if (arm_feature(env, ARM_FEATURE_XSCALE))
5878 goto board;
5879 if (insn & (1 << 20))
5880 return disas_cp14_read(env, s, insn);
5881 else
5882 return disas_cp14_write(env, s, insn);
5883 case 15:
5884 return disas_cp15_insn (env, s, insn);
5885 default:
5886 board:
5887 /* Unknown coprocessor. See if the board has hooked it. */
5888 return disas_cp_insn (env, s, insn);
5889 }
5890 }
5891
5892
5893 /* Store a 64-bit value to a register pair. Clobbers val. */
5894 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5895 {
5896 TCGv tmp;
5897 tmp = new_tmp();
5898 tcg_gen_trunc_i64_i32(tmp, val);
5899 store_reg(s, rlow, tmp);
5900 tmp = new_tmp();
5901 tcg_gen_shri_i64(val, val, 32);
5902 tcg_gen_trunc_i64_i32(tmp, val);
5903 store_reg(s, rhigh, tmp);
5904 }
5905
5906 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5907 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5908 {
5909 TCGv_i64 tmp;
5910 TCGv tmp2;
5911
5912 /* Load value and extend to 64 bits. */
5913 tmp = tcg_temp_new_i64();
5914 tmp2 = load_reg(s, rlow);
5915 tcg_gen_extu_i32_i64(tmp, tmp2);
5916 dead_tmp(tmp2);
5917 tcg_gen_add_i64(val, val, tmp);
5918 tcg_temp_free_i64(tmp);
5919 }
5920
5921 /* load and add a 64-bit value from a register pair. */
5922 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5923 {
5924 TCGv_i64 tmp;
5925 TCGv tmpl;
5926 TCGv tmph;
5927
5928 /* Load 64-bit value rd:rn. */
5929 tmpl = load_reg(s, rlow);
5930 tmph = load_reg(s, rhigh);
5931 tmp = tcg_temp_new_i64();
5932 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5933 dead_tmp(tmpl);
5934 dead_tmp(tmph);
5935 tcg_gen_add_i64(val, val, tmp);
5936 tcg_temp_free_i64(tmp);
5937 }
5938
5939 /* Set N and Z flags from a 64-bit value. */
5940 static void gen_logicq_cc(TCGv_i64 val)
5941 {
5942 TCGv tmp = new_tmp();
5943 gen_helper_logicq_cc(tmp, val);
5944 gen_logic_CC(tmp);
5945 dead_tmp(tmp);
5946 }
5947
5948 /* Load/Store exclusive instructions are implemented by remembering
5949 the value/address loaded, and seeing if these are the same
5950 when the store is performed. This should be is sufficient to implement
5951 the architecturally mandated semantics, and avoids having to monitor
5952 regular stores.
5953
5954 In system emulation mode only one CPU will be running at once, so
5955 this sequence is effectively atomic. In user emulation mode we
5956 throw an exception and handle the atomic operation elsewhere. */
5957 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5958 TCGv addr, int size)
5959 {
5960 TCGv tmp;
5961
5962 switch (size) {
5963 case 0:
5964 tmp = gen_ld8u(addr, IS_USER(s));
5965 break;
5966 case 1:
5967 tmp = gen_ld16u(addr, IS_USER(s));
5968 break;
5969 case 2:
5970 case 3:
5971 tmp = gen_ld32(addr, IS_USER(s));
5972 break;
5973 default:
5974 abort();
5975 }
5976 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5977 store_reg(s, rt, tmp);
5978 if (size == 3) {
5979 TCGv tmp2 = new_tmp();
5980 tcg_gen_addi_i32(tmp2, addr, 4);
5981 tmp = gen_ld32(tmp2, IS_USER(s));
5982 dead_tmp(tmp2);
5983 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5984 store_reg(s, rt2, tmp);
5985 }
5986 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5987 }
5988
5989 static void gen_clrex(DisasContext *s)
5990 {
5991 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5992 }
5993
5994 #ifdef CONFIG_USER_ONLY
5995 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5996 TCGv addr, int size)
5997 {
5998 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5999 tcg_gen_movi_i32(cpu_exclusive_info,
6000 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6001 gen_exception_insn(s, 4, EXCP_STREX);
6002 }
6003 #else
6004 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6005 TCGv addr, int size)
6006 {
6007 TCGv tmp;
6008 int done_label;
6009 int fail_label;
6010
6011 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6012 [addr] = {Rt};
6013 {Rd} = 0;
6014 } else {
6015 {Rd} = 1;
6016 } */
6017 fail_label = gen_new_label();
6018 done_label = gen_new_label();
6019 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6020 switch (size) {
6021 case 0:
6022 tmp = gen_ld8u(addr, IS_USER(s));
6023 break;
6024 case 1:
6025 tmp = gen_ld16u(addr, IS_USER(s));
6026 break;
6027 case 2:
6028 case 3:
6029 tmp = gen_ld32(addr, IS_USER(s));
6030 break;
6031 default:
6032 abort();
6033 }
6034 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6035 dead_tmp(tmp);
6036 if (size == 3) {
6037 TCGv tmp2 = new_tmp();
6038 tcg_gen_addi_i32(tmp2, addr, 4);
6039 tmp = gen_ld32(tmp2, IS_USER(s));
6040 dead_tmp(tmp2);
6041 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6042 dead_tmp(tmp);
6043 }
6044 tmp = load_reg(s, rt);
6045 switch (size) {
6046 case 0:
6047 gen_st8(tmp, addr, IS_USER(s));
6048 break;
6049 case 1:
6050 gen_st16(tmp, addr, IS_USER(s));
6051 break;
6052 case 2:
6053 case 3:
6054 gen_st32(tmp, addr, IS_USER(s));
6055 break;
6056 default:
6057 abort();
6058 }
6059 if (size == 3) {
6060 tcg_gen_addi_i32(addr, addr, 4);
6061 tmp = load_reg(s, rt2);
6062 gen_st32(tmp, addr, IS_USER(s));
6063 }
6064 tcg_gen_movi_i32(cpu_R[rd], 0);
6065 tcg_gen_br(done_label);
6066 gen_set_label(fail_label);
6067 tcg_gen_movi_i32(cpu_R[rd], 1);
6068 gen_set_label(done_label);
6069 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6070 }
6071 #endif
6072
6073 static void disas_arm_insn(CPUState * env, DisasContext *s)
6074 {
6075 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6076 TCGv tmp;
6077 TCGv tmp2;
6078 TCGv tmp3;
6079 TCGv addr;
6080 TCGv_i64 tmp64;
6081
6082 insn = ldl_code(s->pc);
6083 s->pc += 4;
6084
6085 /* M variants do not implement ARM mode. */
6086 if (IS_M(env))
6087 goto illegal_op;
6088 cond = insn >> 28;
6089 if (cond == 0xf){
6090 /* Unconditional instructions. */
6091 if (((insn >> 25) & 7) == 1) {
6092 /* NEON Data processing. */
6093 if (!arm_feature(env, ARM_FEATURE_NEON))
6094 goto illegal_op;
6095
6096 if (disas_neon_data_insn(env, s, insn))
6097 goto illegal_op;
6098 return;
6099 }
6100 if ((insn & 0x0f100000) == 0x04000000) {
6101 /* NEON load/store. */
6102 if (!arm_feature(env, ARM_FEATURE_NEON))
6103 goto illegal_op;
6104
6105 if (disas_neon_ls_insn(env, s, insn))
6106 goto illegal_op;
6107 return;
6108 }
6109 if (((insn & 0x0f30f000) == 0x0510f000) ||
6110 ((insn & 0x0f30f010) == 0x0710f000)) {
6111 if ((insn & (1 << 22)) == 0) {
6112 /* PLDW; v7MP */
6113 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6114 goto illegal_op;
6115 }
6116 }
6117 /* Otherwise PLD; v5TE+ */
6118 return;
6119 }
6120 if (((insn & 0x0f70f000) == 0x0450f000) ||
6121 ((insn & 0x0f70f010) == 0x0650f000)) {
6122 ARCH(7);
6123 return; /* PLI; V7 */
6124 }
6125 if (((insn & 0x0f700000) == 0x04100000) ||
6126 ((insn & 0x0f700010) == 0x06100000)) {
6127 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6128 goto illegal_op;
6129 }
6130 return; /* v7MP: Unallocated memory hint: must NOP */
6131 }
6132
6133 if ((insn & 0x0ffffdff) == 0x01010000) {
6134 ARCH(6);
6135 /* setend */
6136 if (insn & (1 << 9)) {
6137 /* BE8 mode not implemented. */
6138 goto illegal_op;
6139 }
6140 return;
6141 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6142 switch ((insn >> 4) & 0xf) {
6143 case 1: /* clrex */
6144 ARCH(6K);
6145 gen_clrex(s);
6146 return;
6147 case 4: /* dsb */
6148 case 5: /* dmb */
6149 case 6: /* isb */
6150 ARCH(7);
6151 /* We don't emulate caches so these are a no-op. */
6152 return;
6153 default:
6154 goto illegal_op;
6155 }
6156 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6157 /* srs */
6158 int32_t offset;
6159 if (IS_USER(s))
6160 goto illegal_op;
6161 ARCH(6);
6162 op1 = (insn & 0x1f);
6163 addr = new_tmp();
6164 tmp = tcg_const_i32(op1);
6165 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6166 tcg_temp_free_i32(tmp);
6167 i = (insn >> 23) & 3;
6168 switch (i) {
6169 case 0: offset = -4; break; /* DA */
6170 case 1: offset = 0; break; /* IA */
6171 case 2: offset = -8; break; /* DB */
6172 case 3: offset = 4; break; /* IB */
6173 default: abort();
6174 }
6175 if (offset)
6176 tcg_gen_addi_i32(addr, addr, offset);
6177 tmp = load_reg(s, 14);
6178 gen_st32(tmp, addr, 0);
6179 tmp = load_cpu_field(spsr);
6180 tcg_gen_addi_i32(addr, addr, 4);
6181 gen_st32(tmp, addr, 0);
6182 if (insn & (1 << 21)) {
6183 /* Base writeback. */
6184 switch (i) {
6185 case 0: offset = -8; break;
6186 case 1: offset = 4; break;
6187 case 2: offset = -4; break;
6188 case 3: offset = 0; break;
6189 default: abort();
6190 }
6191 if (offset)
6192 tcg_gen_addi_i32(addr, addr, offset);
6193 tmp = tcg_const_i32(op1);
6194 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6195 tcg_temp_free_i32(tmp);
6196 dead_tmp(addr);
6197 } else {
6198 dead_tmp(addr);
6199 }
6200 return;
6201 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6202 /* rfe */
6203 int32_t offset;
6204 if (IS_USER(s))
6205 goto illegal_op;
6206 ARCH(6);
6207 rn = (insn >> 16) & 0xf;
6208 addr = load_reg(s, rn);
6209 i = (insn >> 23) & 3;
6210 switch (i) {
6211 case 0: offset = -4; break; /* DA */
6212 case 1: offset = 0; break; /* IA */
6213 case 2: offset = -8; break; /* DB */
6214 case 3: offset = 4; break; /* IB */
6215 default: abort();
6216 }
6217 if (offset)
6218 tcg_gen_addi_i32(addr, addr, offset);
6219 /* Load PC into tmp and CPSR into tmp2. */
6220 tmp = gen_ld32(addr, 0);
6221 tcg_gen_addi_i32(addr, addr, 4);
6222 tmp2 = gen_ld32(addr, 0);
6223 if (insn & (1 << 21)) {
6224 /* Base writeback. */
6225 switch (i) {
6226 case 0: offset = -8; break;
6227 case 1: offset = 4; break;
6228 case 2: offset = -4; break;
6229 case 3: offset = 0; break;
6230 default: abort();
6231 }
6232 if (offset)
6233 tcg_gen_addi_i32(addr, addr, offset);
6234 store_reg(s, rn, addr);
6235 } else {
6236 dead_tmp(addr);
6237 }
6238 gen_rfe(s, tmp, tmp2);
6239 return;
6240 } else if ((insn & 0x0e000000) == 0x0a000000) {
6241 /* branch link and change to thumb (blx <offset>) */
6242 int32_t offset;
6243
6244 val = (uint32_t)s->pc;
6245 tmp = new_tmp();
6246 tcg_gen_movi_i32(tmp, val);
6247 store_reg(s, 14, tmp);
6248 /* Sign-extend the 24-bit offset */
6249 offset = (((int32_t)insn) << 8) >> 8;
6250 /* offset * 4 + bit24 * 2 + (thumb bit) */
6251 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6252 /* pipeline offset */
6253 val += 4;
6254 gen_bx_im(s, val);
6255 return;
6256 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6257 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6258 /* iWMMXt register transfer. */
6259 if (env->cp15.c15_cpar & (1 << 1))
6260 if (!disas_iwmmxt_insn(env, s, insn))
6261 return;
6262 }
6263 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6264 /* Coprocessor double register transfer. */
6265 } else if ((insn & 0x0f000010) == 0x0e000010) {
6266 /* Additional coprocessor register transfer. */
6267 } else if ((insn & 0x0ff10020) == 0x01000000) {
6268 uint32_t mask;
6269 uint32_t val;
6270 /* cps (privileged) */
6271 if (IS_USER(s))
6272 return;
6273 mask = val = 0;
6274 if (insn & (1 << 19)) {
6275 if (insn & (1 << 8))
6276 mask |= CPSR_A;
6277 if (insn & (1 << 7))
6278 mask |= CPSR_I;
6279 if (insn & (1 << 6))
6280 mask |= CPSR_F;
6281 if (insn & (1 << 18))
6282 val |= mask;
6283 }
6284 if (insn & (1 << 17)) {
6285 mask |= CPSR_M;
6286 val |= (insn & 0x1f);
6287 }
6288 if (mask) {
6289 gen_set_psr_im(s, mask, 0, val);
6290 }
6291 return;
6292 }
6293 goto illegal_op;
6294 }
6295 if (cond != 0xe) {
6296 /* if not always execute, we generate a conditional jump to
6297 next instruction */
6298 s->condlabel = gen_new_label();
6299 gen_test_cc(cond ^ 1, s->condlabel);
6300 s->condjmp = 1;
6301 }
6302 if ((insn & 0x0f900000) == 0x03000000) {
6303 if ((insn & (1 << 21)) == 0) {
6304 ARCH(6T2);
6305 rd = (insn >> 12) & 0xf;
6306 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6307 if ((insn & (1 << 22)) == 0) {
6308 /* MOVW */
6309 tmp = new_tmp();
6310 tcg_gen_movi_i32(tmp, val);
6311 } else {
6312 /* MOVT */
6313 tmp = load_reg(s, rd);
6314 tcg_gen_ext16u_i32(tmp, tmp);
6315 tcg_gen_ori_i32(tmp, tmp, val << 16);
6316 }
6317 store_reg(s, rd, tmp);
6318 } else {
6319 if (((insn >> 12) & 0xf) != 0xf)
6320 goto illegal_op;
6321 if (((insn >> 16) & 0xf) == 0) {
6322 gen_nop_hint(s, insn & 0xff);
6323 } else {
6324 /* CPSR = immediate */
6325 val = insn & 0xff;
6326 shift = ((insn >> 8) & 0xf) * 2;
6327 if (shift)
6328 val = (val >> shift) | (val << (32 - shift));
6329 i = ((insn & (1 << 22)) != 0);
6330 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6331 goto illegal_op;
6332 }
6333 }
6334 } else if ((insn & 0x0f900000) == 0x01000000
6335 && (insn & 0x00000090) != 0x00000090) {
6336 /* miscellaneous instructions */
6337 op1 = (insn >> 21) & 3;
6338 sh = (insn >> 4) & 0xf;
6339 rm = insn & 0xf;
6340 switch (sh) {
6341 case 0x0: /* move program status register */
6342 if (op1 & 1) {
6343 /* PSR = reg */
6344 tmp = load_reg(s, rm);
6345 i = ((op1 & 2) != 0);
6346 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6347 goto illegal_op;
6348 } else {
6349 /* reg = PSR */
6350 rd = (insn >> 12) & 0xf;
6351 if (op1 & 2) {
6352 if (IS_USER(s))
6353 goto illegal_op;
6354 tmp = load_cpu_field(spsr);
6355 } else {
6356 tmp = new_tmp();
6357 gen_helper_cpsr_read(tmp);
6358 }
6359 store_reg(s, rd, tmp);
6360 }
6361 break;
6362 case 0x1:
6363 if (op1 == 1) {
6364 /* branch/exchange thumb (bx). */
6365 tmp = load_reg(s, rm);
6366 gen_bx(s, tmp);
6367 } else if (op1 == 3) {
6368 /* clz */
6369 rd = (insn >> 12) & 0xf;
6370 tmp = load_reg(s, rm);
6371 gen_helper_clz(tmp, tmp);
6372 store_reg(s, rd, tmp);
6373 } else {
6374 goto illegal_op;
6375 }
6376 break;
6377 case 0x2:
6378 if (op1 == 1) {
6379 ARCH(5J); /* bxj */
6380 /* Trivial implementation equivalent to bx. */
6381 tmp = load_reg(s, rm);
6382 gen_bx(s, tmp);
6383 } else {
6384 goto illegal_op;
6385 }
6386 break;
6387 case 0x3:
6388 if (op1 != 1)
6389 goto illegal_op;
6390
6391 /* branch link/exchange thumb (blx) */
6392 tmp = load_reg(s, rm);
6393 tmp2 = new_tmp();
6394 tcg_gen_movi_i32(tmp2, s->pc);
6395 store_reg(s, 14, tmp2);
6396 gen_bx(s, tmp);
6397 break;
6398 case 0x5: /* saturating add/subtract */
6399 rd = (insn >> 12) & 0xf;
6400 rn = (insn >> 16) & 0xf;
6401 tmp = load_reg(s, rm);
6402 tmp2 = load_reg(s, rn);
6403 if (op1 & 2)
6404 gen_helper_double_saturate(tmp2, tmp2);
6405 if (op1 & 1)
6406 gen_helper_sub_saturate(tmp, tmp, tmp2);
6407 else
6408 gen_helper_add_saturate(tmp, tmp, tmp2);
6409 dead_tmp(tmp2);
6410 store_reg(s, rd, tmp);
6411 break;
6412 case 7:
6413 /* SMC instruction (op1 == 3)
6414 and undefined instructions (op1 == 0 || op1 == 2)
6415 will trap */
6416 if (op1 != 1) {
6417 goto illegal_op;
6418 }
6419 /* bkpt */
6420 gen_exception_insn(s, 4, EXCP_BKPT);
6421 break;
6422 case 0x8: /* signed multiply */
6423 case 0xa:
6424 case 0xc:
6425 case 0xe:
6426 rs = (insn >> 8) & 0xf;
6427 rn = (insn >> 12) & 0xf;
6428 rd = (insn >> 16) & 0xf;
6429 if (op1 == 1) {
6430 /* (32 * 16) >> 16 */
6431 tmp = load_reg(s, rm);
6432 tmp2 = load_reg(s, rs);
6433 if (sh & 4)
6434 tcg_gen_sari_i32(tmp2, tmp2, 16);
6435 else
6436 gen_sxth(tmp2);
6437 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6438 tcg_gen_shri_i64(tmp64, tmp64, 16);
6439 tmp = new_tmp();
6440 tcg_gen_trunc_i64_i32(tmp, tmp64);
6441 tcg_temp_free_i64(tmp64);
6442 if ((sh & 2) == 0) {
6443 tmp2 = load_reg(s, rn);
6444 gen_helper_add_setq(tmp, tmp, tmp2);
6445 dead_tmp(tmp2);
6446 }
6447 store_reg(s, rd, tmp);
6448 } else {
6449 /* 16 * 16 */
6450 tmp = load_reg(s, rm);
6451 tmp2 = load_reg(s, rs);
6452 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6453 dead_tmp(tmp2);
6454 if (op1 == 2) {
6455 tmp64 = tcg_temp_new_i64();
6456 tcg_gen_ext_i32_i64(tmp64, tmp);
6457 dead_tmp(tmp);
6458 gen_addq(s, tmp64, rn, rd);
6459 gen_storeq_reg(s, rn, rd, tmp64);
6460 tcg_temp_free_i64(tmp64);
6461 } else {
6462 if (op1 == 0) {
6463 tmp2 = load_reg(s, rn);
6464 gen_helper_add_setq(tmp, tmp, tmp2);
6465 dead_tmp(tmp2);
6466 }
6467 store_reg(s, rd, tmp);
6468 }
6469 }
6470 break;
6471 default:
6472 goto illegal_op;
6473 }
6474 } else if (((insn & 0x0e000000) == 0 &&
6475 (insn & 0x00000090) != 0x90) ||
6476 ((insn & 0x0e000000) == (1 << 25))) {
6477 int set_cc, logic_cc, shiftop;
6478
6479 op1 = (insn >> 21) & 0xf;
6480 set_cc = (insn >> 20) & 1;
6481 logic_cc = table_logic_cc[op1] & set_cc;
6482
6483 /* data processing instruction */
6484 if (insn & (1 << 25)) {
6485 /* immediate operand */
6486 val = insn & 0xff;
6487 shift = ((insn >> 8) & 0xf) * 2;
6488 if (shift) {
6489 val = (val >> shift) | (val << (32 - shift));
6490 }
6491 tmp2 = new_tmp();
6492 tcg_gen_movi_i32(tmp2, val);
6493 if (logic_cc && shift) {
6494 gen_set_CF_bit31(tmp2);
6495 }
6496 } else {
6497 /* register */
6498 rm = (insn) & 0xf;
6499 tmp2 = load_reg(s, rm);
6500 shiftop = (insn >> 5) & 3;
6501 if (!(insn & (1 << 4))) {
6502 shift = (insn >> 7) & 0x1f;
6503 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6504 } else {
6505 rs = (insn >> 8) & 0xf;
6506 tmp = load_reg(s, rs);
6507 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6508 }
6509 }
6510 if (op1 != 0x0f && op1 != 0x0d) {
6511 rn = (insn >> 16) & 0xf;
6512 tmp = load_reg(s, rn);
6513 } else {
6514 TCGV_UNUSED(tmp);
6515 }
6516 rd = (insn >> 12) & 0xf;
6517 switch(op1) {
6518 case 0x00:
6519 tcg_gen_and_i32(tmp, tmp, tmp2);
6520 if (logic_cc) {
6521 gen_logic_CC(tmp);
6522 }
6523 store_reg_bx(env, s, rd, tmp);
6524 break;
6525 case 0x01:
6526 tcg_gen_xor_i32(tmp, tmp, tmp2);
6527 if (logic_cc) {
6528 gen_logic_CC(tmp);
6529 }
6530 store_reg_bx(env, s, rd, tmp);
6531 break;
6532 case 0x02:
6533 if (set_cc && rd == 15) {
6534 /* SUBS r15, ... is used for exception return. */
6535 if (IS_USER(s)) {
6536 goto illegal_op;
6537 }
6538 gen_helper_sub_cc(tmp, tmp, tmp2);
6539 gen_exception_return(s, tmp);
6540 } else {
6541 if (set_cc) {
6542 gen_helper_sub_cc(tmp, tmp, tmp2);
6543 } else {
6544 tcg_gen_sub_i32(tmp, tmp, tmp2);
6545 }
6546 store_reg_bx(env, s, rd, tmp);
6547 }
6548 break;
6549 case 0x03:
6550 if (set_cc) {
6551 gen_helper_sub_cc(tmp, tmp2, tmp);
6552 } else {
6553 tcg_gen_sub_i32(tmp, tmp2, tmp);
6554 }
6555 store_reg_bx(env, s, rd, tmp);
6556 break;
6557 case 0x04:
6558 if (set_cc) {
6559 gen_helper_add_cc(tmp, tmp, tmp2);
6560 } else {
6561 tcg_gen_add_i32(tmp, tmp, tmp2);
6562 }
6563 store_reg_bx(env, s, rd, tmp);
6564 break;
6565 case 0x05:
6566 if (set_cc) {
6567 gen_helper_adc_cc(tmp, tmp, tmp2);
6568 } else {
6569 gen_add_carry(tmp, tmp, tmp2);
6570 }
6571 store_reg_bx(env, s, rd, tmp);
6572 break;
6573 case 0x06:
6574 if (set_cc) {
6575 gen_helper_sbc_cc(tmp, tmp, tmp2);
6576 } else {
6577 gen_sub_carry(tmp, tmp, tmp2);
6578 }
6579 store_reg_bx(env, s, rd, tmp);
6580 break;
6581 case 0x07:
6582 if (set_cc) {
6583 gen_helper_sbc_cc(tmp, tmp2, tmp);
6584 } else {
6585 gen_sub_carry(tmp, tmp2, tmp);
6586 }
6587 store_reg_bx(env, s, rd, tmp);
6588 break;
6589 case 0x08:
6590 if (set_cc) {
6591 tcg_gen_and_i32(tmp, tmp, tmp2);
6592 gen_logic_CC(tmp);
6593 }
6594 dead_tmp(tmp);
6595 break;
6596 case 0x09:
6597 if (set_cc) {
6598 tcg_gen_xor_i32(tmp, tmp, tmp2);
6599 gen_logic_CC(tmp);
6600 }
6601 dead_tmp(tmp);
6602 break;
6603 case 0x0a:
6604 if (set_cc) {
6605 gen_helper_sub_cc(tmp, tmp, tmp2);
6606 }
6607 dead_tmp(tmp);
6608 break;
6609 case 0x0b:
6610 if (set_cc) {
6611 gen_helper_add_cc(tmp, tmp, tmp2);
6612 }
6613 dead_tmp(tmp);
6614 break;
6615 case 0x0c:
6616 tcg_gen_or_i32(tmp, tmp, tmp2);
6617 if (logic_cc) {
6618 gen_logic_CC(tmp);
6619 }
6620 store_reg_bx(env, s, rd, tmp);
6621 break;
6622 case 0x0d:
6623 if (logic_cc && rd == 15) {
6624 /* MOVS r15, ... is used for exception return. */
6625 if (IS_USER(s)) {
6626 goto illegal_op;
6627 }
6628 gen_exception_return(s, tmp2);
6629 } else {
6630 if (logic_cc) {
6631 gen_logic_CC(tmp2);
6632 }
6633 store_reg_bx(env, s, rd, tmp2);
6634 }
6635 break;
6636 case 0x0e:
6637 tcg_gen_andc_i32(tmp, tmp, tmp2);
6638 if (logic_cc) {
6639 gen_logic_CC(tmp);
6640 }
6641 store_reg_bx(env, s, rd, tmp);
6642 break;
6643 default:
6644 case 0x0f:
6645 tcg_gen_not_i32(tmp2, tmp2);
6646 if (logic_cc) {
6647 gen_logic_CC(tmp2);
6648 }
6649 store_reg_bx(env, s, rd, tmp2);
6650 break;
6651 }
6652 if (op1 != 0x0f && op1 != 0x0d) {
6653 dead_tmp(tmp2);
6654 }
6655 } else {
6656 /* other instructions */
6657 op1 = (insn >> 24) & 0xf;
6658 switch(op1) {
6659 case 0x0:
6660 case 0x1:
6661 /* multiplies, extra load/stores */
6662 sh = (insn >> 5) & 3;
6663 if (sh == 0) {
6664 if (op1 == 0x0) {
6665 rd = (insn >> 16) & 0xf;
6666 rn = (insn >> 12) & 0xf;
6667 rs = (insn >> 8) & 0xf;
6668 rm = (insn) & 0xf;
6669 op1 = (insn >> 20) & 0xf;
6670 switch (op1) {
6671 case 0: case 1: case 2: case 3: case 6:
6672 /* 32 bit mul */
6673 tmp = load_reg(s, rs);
6674 tmp2 = load_reg(s, rm);
6675 tcg_gen_mul_i32(tmp, tmp, tmp2);
6676 dead_tmp(tmp2);
6677 if (insn & (1 << 22)) {
6678 /* Subtract (mls) */
6679 ARCH(6T2);
6680 tmp2 = load_reg(s, rn);
6681 tcg_gen_sub_i32(tmp, tmp2, tmp);
6682 dead_tmp(tmp2);
6683 } else if (insn & (1 << 21)) {
6684 /* Add */
6685 tmp2 = load_reg(s, rn);
6686 tcg_gen_add_i32(tmp, tmp, tmp2);
6687 dead_tmp(tmp2);
6688 }
6689 if (insn & (1 << 20))
6690 gen_logic_CC(tmp);
6691 store_reg(s, rd, tmp);
6692 break;
6693 case 4:
6694 /* 64 bit mul double accumulate (UMAAL) */
6695 ARCH(6);
6696 tmp = load_reg(s, rs);
6697 tmp2 = load_reg(s, rm);
6698 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6699 gen_addq_lo(s, tmp64, rn);
6700 gen_addq_lo(s, tmp64, rd);
6701 gen_storeq_reg(s, rn, rd, tmp64);
6702 tcg_temp_free_i64(tmp64);
6703 break;
6704 case 8: case 9: case 10: case 11:
6705 case 12: case 13: case 14: case 15:
6706 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6707 tmp = load_reg(s, rs);
6708 tmp2 = load_reg(s, rm);
6709 if (insn & (1 << 22)) {
6710 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6711 } else {
6712 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6713 }
6714 if (insn & (1 << 21)) { /* mult accumulate */
6715 gen_addq(s, tmp64, rn, rd);
6716 }
6717 if (insn & (1 << 20)) {
6718 gen_logicq_cc(tmp64);
6719 }
6720 gen_storeq_reg(s, rn, rd, tmp64);
6721 tcg_temp_free_i64(tmp64);
6722 break;
6723 default:
6724 goto illegal_op;
6725 }
6726 } else {
6727 rn = (insn >> 16) & 0xf;
6728 rd = (insn >> 12) & 0xf;
6729 if (insn & (1 << 23)) {
6730 /* load/store exclusive */
6731 op1 = (insn >> 21) & 0x3;
6732 if (op1)
6733 ARCH(6K);
6734 else
6735 ARCH(6);
6736 addr = tcg_temp_local_new_i32();
6737 load_reg_var(s, addr, rn);
6738 if (insn & (1 << 20)) {
6739 switch (op1) {
6740 case 0: /* ldrex */
6741 gen_load_exclusive(s, rd, 15, addr, 2);
6742 break;
6743 case 1: /* ldrexd */
6744 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6745 break;
6746 case 2: /* ldrexb */
6747 gen_load_exclusive(s, rd, 15, addr, 0);
6748 break;
6749 case 3: /* ldrexh */
6750 gen_load_exclusive(s, rd, 15, addr, 1);
6751 break;
6752 default:
6753 abort();
6754 }
6755 } else {
6756 rm = insn & 0xf;
6757 switch (op1) {
6758 case 0: /* strex */
6759 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6760 break;
6761 case 1: /* strexd */
6762 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6763 break;
6764 case 2: /* strexb */
6765 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6766 break;
6767 case 3: /* strexh */
6768 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6769 break;
6770 default:
6771 abort();
6772 }
6773 }
6774 tcg_temp_free(addr);
6775 } else {
6776 /* SWP instruction */
6777 rm = (insn) & 0xf;
6778
6779 /* ??? This is not really atomic. However we know
6780 we never have multiple CPUs running in parallel,
6781 so it is good enough. */
6782 addr = load_reg(s, rn);
6783 tmp = load_reg(s, rm);
6784 if (insn & (1 << 22)) {
6785 tmp2 = gen_ld8u(addr, IS_USER(s));
6786 gen_st8(tmp, addr, IS_USER(s));
6787 } else {
6788 tmp2 = gen_ld32(addr, IS_USER(s));
6789 gen_st32(tmp, addr, IS_USER(s));
6790 }
6791 dead_tmp(addr);
6792 store_reg(s, rd, tmp2);
6793 }
6794 }
6795 } else {
6796 int address_offset;
6797 int load;
6798 /* Misc load/store */
6799 rn = (insn >> 16) & 0xf;
6800 rd = (insn >> 12) & 0xf;
6801 addr = load_reg(s, rn);
6802 if (insn & (1 << 24))
6803 gen_add_datah_offset(s, insn, 0, addr);
6804 address_offset = 0;
6805 if (insn & (1 << 20)) {
6806 /* load */
6807 switch(sh) {
6808 case 1:
6809 tmp = gen_ld16u(addr, IS_USER(s));
6810 break;
6811 case 2:
6812 tmp = gen_ld8s(addr, IS_USER(s));
6813 break;
6814 default:
6815 case 3:
6816 tmp = gen_ld16s(addr, IS_USER(s));
6817 break;
6818 }
6819 load = 1;
6820 } else if (sh & 2) {
6821 /* doubleword */
6822 if (sh & 1) {
6823 /* store */
6824 tmp = load_reg(s, rd);
6825 gen_st32(tmp, addr, IS_USER(s));
6826 tcg_gen_addi_i32(addr, addr, 4);
6827 tmp = load_reg(s, rd + 1);
6828 gen_st32(tmp, addr, IS_USER(s));
6829 load = 0;
6830 } else {
6831 /* load */
6832 tmp = gen_ld32(addr, IS_USER(s));
6833 store_reg(s, rd, tmp);
6834 tcg_gen_addi_i32(addr, addr, 4);
6835 tmp = gen_ld32(addr, IS_USER(s));
6836 rd++;
6837 load = 1;
6838 }
6839 address_offset = -4;
6840 } else {
6841 /* store */
6842 tmp = load_reg(s, rd);
6843 gen_st16(tmp, addr, IS_USER(s));
6844 load = 0;
6845 }
6846 /* Perform base writeback before the loaded value to
6847 ensure correct behavior with overlapping index registers.
6848 ldrd with base writeback is is undefined if the
6849 destination and index registers overlap. */
6850 if (!(insn & (1 << 24))) {
6851 gen_add_datah_offset(s, insn, address_offset, addr);
6852 store_reg(s, rn, addr);
6853 } else if (insn & (1 << 21)) {
6854 if (address_offset)
6855 tcg_gen_addi_i32(addr, addr, address_offset);
6856 store_reg(s, rn, addr);
6857 } else {
6858 dead_tmp(addr);
6859 }
6860 if (load) {
6861 /* Complete the load. */
6862 store_reg(s, rd, tmp);
6863 }
6864 }
6865 break;
6866 case 0x4:
6867 case 0x5:
6868 goto do_ldst;
6869 case 0x6:
6870 case 0x7:
6871 if (insn & (1 << 4)) {
6872 ARCH(6);
6873 /* Armv6 Media instructions. */
6874 rm = insn & 0xf;
6875 rn = (insn >> 16) & 0xf;
6876 rd = (insn >> 12) & 0xf;
6877 rs = (insn >> 8) & 0xf;
6878 switch ((insn >> 23) & 3) {
6879 case 0: /* Parallel add/subtract. */
6880 op1 = (insn >> 20) & 7;
6881 tmp = load_reg(s, rn);
6882 tmp2 = load_reg(s, rm);
6883 sh = (insn >> 5) & 7;
6884 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6885 goto illegal_op;
6886 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6887 dead_tmp(tmp2);
6888 store_reg(s, rd, tmp);
6889 break;
6890 case 1:
6891 if ((insn & 0x00700020) == 0) {
6892 /* Halfword pack. */
6893 tmp = load_reg(s, rn);
6894 tmp2 = load_reg(s, rm);
6895 shift = (insn >> 7) & 0x1f;
6896 if (insn & (1 << 6)) {
6897 /* pkhtb */
6898 if (shift == 0)
6899 shift = 31;
6900 tcg_gen_sari_i32(tmp2, tmp2, shift);
6901 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6902 tcg_gen_ext16u_i32(tmp2, tmp2);
6903 } else {
6904 /* pkhbt */
6905 if (shift)
6906 tcg_gen_shli_i32(tmp2, tmp2, shift);
6907 tcg_gen_ext16u_i32(tmp, tmp);
6908 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6909 }
6910 tcg_gen_or_i32(tmp, tmp, tmp2);
6911 dead_tmp(tmp2);
6912 store_reg(s, rd, tmp);
6913 } else if ((insn & 0x00200020) == 0x00200000) {
6914 /* [us]sat */
6915 tmp = load_reg(s, rm);
6916 shift = (insn >> 7) & 0x1f;
6917 if (insn & (1 << 6)) {
6918 if (shift == 0)
6919 shift = 31;
6920 tcg_gen_sari_i32(tmp, tmp, shift);
6921 } else {
6922 tcg_gen_shli_i32(tmp, tmp, shift);
6923 }
6924 sh = (insn >> 16) & 0x1f;
6925 tmp2 = tcg_const_i32(sh);
6926 if (insn & (1 << 22))
6927 gen_helper_usat(tmp, tmp, tmp2);
6928 else
6929 gen_helper_ssat(tmp, tmp, tmp2);
6930 tcg_temp_free_i32(tmp2);
6931 store_reg(s, rd, tmp);
6932 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6933 /* [us]sat16 */
6934 tmp = load_reg(s, rm);
6935 sh = (insn >> 16) & 0x1f;
6936 tmp2 = tcg_const_i32(sh);
6937 if (insn & (1 << 22))
6938 gen_helper_usat16(tmp, tmp, tmp2);
6939 else
6940 gen_helper_ssat16(tmp, tmp, tmp2);
6941 tcg_temp_free_i32(tmp2);
6942 store_reg(s, rd, tmp);
6943 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6944 /* Select bytes. */
6945 tmp = load_reg(s, rn);
6946 tmp2 = load_reg(s, rm);
6947 tmp3 = new_tmp();
6948 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6949 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6950 dead_tmp(tmp3);
6951 dead_tmp(tmp2);
6952 store_reg(s, rd, tmp);
6953 } else if ((insn & 0x000003e0) == 0x00000060) {
6954 tmp = load_reg(s, rm);
6955 shift = (insn >> 10) & 3;
6956 /* ??? In many cases it's not neccessary to do a
6957 rotate, a shift is sufficient. */
6958 if (shift != 0)
6959 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6960 op1 = (insn >> 20) & 7;
6961 switch (op1) {
6962 case 0: gen_sxtb16(tmp); break;
6963 case 2: gen_sxtb(tmp); break;
6964 case 3: gen_sxth(tmp); break;
6965 case 4: gen_uxtb16(tmp); break;
6966 case 6: gen_uxtb(tmp); break;
6967 case 7: gen_uxth(tmp); break;
6968 default: goto illegal_op;
6969 }
6970 if (rn != 15) {
6971 tmp2 = load_reg(s, rn);
6972 if ((op1 & 3) == 0) {
6973 gen_add16(tmp, tmp2);
6974 } else {
6975 tcg_gen_add_i32(tmp, tmp, tmp2);
6976 dead_tmp(tmp2);
6977 }
6978 }
6979 store_reg(s, rd, tmp);
6980 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6981 /* rev */
6982 tmp = load_reg(s, rm);
6983 if (insn & (1 << 22)) {
6984 if (insn & (1 << 7)) {
6985 gen_revsh(tmp);
6986 } else {
6987 ARCH(6T2);
6988 gen_helper_rbit(tmp, tmp);
6989 }
6990 } else {
6991 if (insn & (1 << 7))
6992 gen_rev16(tmp);
6993 else
6994 tcg_gen_bswap32_i32(tmp, tmp);
6995 }
6996 store_reg(s, rd, tmp);
6997 } else {
6998 goto illegal_op;
6999 }
7000 break;
7001 case 2: /* Multiplies (Type 3). */
7002 tmp = load_reg(s, rm);
7003 tmp2 = load_reg(s, rs);
7004 if (insn & (1 << 20)) {
7005 /* Signed multiply most significant [accumulate].
7006 (SMMUL, SMMLA, SMMLS) */
7007 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7008
7009 if (rd != 15) {
7010 tmp = load_reg(s, rd);
7011 if (insn & (1 << 6)) {
7012 tmp64 = gen_subq_msw(tmp64, tmp);
7013 } else {
7014 tmp64 = gen_addq_msw(tmp64, tmp);
7015 }
7016 }
7017 if (insn & (1 << 5)) {
7018 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7019 }
7020 tcg_gen_shri_i64(tmp64, tmp64, 32);
7021 tmp = new_tmp();
7022 tcg_gen_trunc_i64_i32(tmp, tmp64);
7023 tcg_temp_free_i64(tmp64);
7024 store_reg(s, rn, tmp);
7025 } else {
7026 if (insn & (1 << 5))
7027 gen_swap_half(tmp2);
7028 gen_smul_dual(tmp, tmp2);
7029 /* This addition cannot overflow. */
7030 if (insn & (1 << 6)) {
7031 tcg_gen_sub_i32(tmp, tmp, tmp2);
7032 } else {
7033 tcg_gen_add_i32(tmp, tmp, tmp2);
7034 }
7035 dead_tmp(tmp2);
7036 if (insn & (1 << 22)) {
7037 /* smlald, smlsld */
7038 tmp64 = tcg_temp_new_i64();
7039 tcg_gen_ext_i32_i64(tmp64, tmp);
7040 dead_tmp(tmp);
7041 gen_addq(s, tmp64, rd, rn);
7042 gen_storeq_reg(s, rd, rn, tmp64);
7043 tcg_temp_free_i64(tmp64);
7044 } else {
7045 /* smuad, smusd, smlad, smlsd */
7046 if (rd != 15)
7047 {
7048 tmp2 = load_reg(s, rd);
7049 gen_helper_add_setq(tmp, tmp, tmp2);
7050 dead_tmp(tmp2);
7051 }
7052 store_reg(s, rn, tmp);
7053 }
7054 }
7055 break;
7056 case 3:
7057 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7058 switch (op1) {
7059 case 0: /* Unsigned sum of absolute differences. */
7060 ARCH(6);
7061 tmp = load_reg(s, rm);
7062 tmp2 = load_reg(s, rs);
7063 gen_helper_usad8(tmp, tmp, tmp2);
7064 dead_tmp(tmp2);
7065 if (rd != 15) {
7066 tmp2 = load_reg(s, rd);
7067 tcg_gen_add_i32(tmp, tmp, tmp2);
7068 dead_tmp(tmp2);
7069 }
7070 store_reg(s, rn, tmp);
7071 break;
7072 case 0x20: case 0x24: case 0x28: case 0x2c:
7073 /* Bitfield insert/clear. */
7074 ARCH(6T2);
7075 shift = (insn >> 7) & 0x1f;
7076 i = (insn >> 16) & 0x1f;
7077 i = i + 1 - shift;
7078 if (rm == 15) {
7079 tmp = new_tmp();
7080 tcg_gen_movi_i32(tmp, 0);
7081 } else {
7082 tmp = load_reg(s, rm);
7083 }
7084 if (i != 32) {
7085 tmp2 = load_reg(s, rd);
7086 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7087 dead_tmp(tmp2);
7088 }
7089 store_reg(s, rd, tmp);
7090 break;
7091 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7092 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7093 ARCH(6T2);
7094 tmp = load_reg(s, rm);
7095 shift = (insn >> 7) & 0x1f;
7096 i = ((insn >> 16) & 0x1f) + 1;
7097 if (shift + i > 32)
7098 goto illegal_op;
7099 if (i < 32) {
7100 if (op1 & 0x20) {
7101 gen_ubfx(tmp, shift, (1u << i) - 1);
7102 } else {
7103 gen_sbfx(tmp, shift, i);
7104 }
7105 }
7106 store_reg(s, rd, tmp);
7107 break;
7108 default:
7109 goto illegal_op;
7110 }
7111 break;
7112 }
7113 break;
7114 }
7115 do_ldst:
7116 /* Check for undefined extension instructions
7117 * per the ARM Bible IE:
7118 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7119 */
7120 sh = (0xf << 20) | (0xf << 4);
7121 if (op1 == 0x7 && ((insn & sh) == sh))
7122 {
7123 goto illegal_op;
7124 }
7125 /* load/store byte/word */
7126 rn = (insn >> 16) & 0xf;
7127 rd = (insn >> 12) & 0xf;
7128 tmp2 = load_reg(s, rn);
7129 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7130 if (insn & (1 << 24))
7131 gen_add_data_offset(s, insn, tmp2);
7132 if (insn & (1 << 20)) {
7133 /* load */
7134 if (insn & (1 << 22)) {
7135 tmp = gen_ld8u(tmp2, i);
7136 } else {
7137 tmp = gen_ld32(tmp2, i);
7138 }
7139 } else {
7140 /* store */
7141 tmp = load_reg(s, rd);
7142 if (insn & (1 << 22))
7143 gen_st8(tmp, tmp2, i);
7144 else
7145 gen_st32(tmp, tmp2, i);
7146 }
7147 if (!(insn & (1 << 24))) {
7148 gen_add_data_offset(s, insn, tmp2);
7149 store_reg(s, rn, tmp2);
7150 } else if (insn & (1 << 21)) {
7151 store_reg(s, rn, tmp2);
7152 } else {
7153 dead_tmp(tmp2);
7154 }
7155 if (insn & (1 << 20)) {
7156 /* Complete the load. */
7157 if (rd == 15)
7158 gen_bx(s, tmp);
7159 else
7160 store_reg(s, rd, tmp);
7161 }
7162 break;
7163 case 0x08:
7164 case 0x09:
7165 {
7166 int j, n, user, loaded_base;
7167 TCGv loaded_var;
7168 /* load/store multiple words */
7169 /* XXX: store correct base if write back */
7170 user = 0;
7171 if (insn & (1 << 22)) {
7172 if (IS_USER(s))
7173 goto illegal_op; /* only usable in supervisor mode */
7174
7175 if ((insn & (1 << 15)) == 0)
7176 user = 1;
7177 }
7178 rn = (insn >> 16) & 0xf;
7179 addr = load_reg(s, rn);
7180
7181 /* compute total size */
7182 loaded_base = 0;
7183 TCGV_UNUSED(loaded_var);
7184 n = 0;
7185 for(i=0;i<16;i++) {
7186 if (insn & (1 << i))
7187 n++;
7188 }
7189 /* XXX: test invalid n == 0 case ? */
7190 if (insn & (1 << 23)) {
7191 if (insn & (1 << 24)) {
7192 /* pre increment */
7193 tcg_gen_addi_i32(addr, addr, 4);
7194 } else {
7195 /* post increment */
7196 }
7197 } else {
7198 if (insn & (1 << 24)) {
7199 /* pre decrement */
7200 tcg_gen_addi_i32(addr, addr, -(n * 4));
7201 } else {
7202 /* post decrement */
7203 if (n != 1)
7204 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7205 }
7206 }
7207 j = 0;
7208 for(i=0;i<16;i++) {
7209 if (insn & (1 << i)) {
7210 if (insn & (1 << 20)) {
7211 /* load */
7212 tmp = gen_ld32(addr, IS_USER(s));
7213 if (i == 15) {
7214 gen_bx(s, tmp);
7215 } else if (user) {
7216 tmp2 = tcg_const_i32(i);
7217 gen_helper_set_user_reg(tmp2, tmp);
7218 tcg_temp_free_i32(tmp2);
7219 dead_tmp(tmp);
7220 } else if (i == rn) {
7221 loaded_var = tmp;
7222 loaded_base = 1;
7223 } else {
7224 store_reg(s, i, tmp);
7225 }
7226 } else {
7227 /* store */
7228 if (i == 15) {
7229 /* special case: r15 = PC + 8 */
7230 val = (long)s->pc + 4;
7231 tmp = new_tmp();
7232 tcg_gen_movi_i32(tmp, val);
7233 } else if (user) {
7234 tmp = new_tmp();
7235 tmp2 = tcg_const_i32(i);
7236 gen_helper_get_user_reg(tmp, tmp2);
7237 tcg_temp_free_i32(tmp2);
7238 } else {
7239 tmp = load_reg(s, i);
7240 }
7241 gen_st32(tmp, addr, IS_USER(s));
7242 }
7243 j++;
7244 /* no need to add after the last transfer */
7245 if (j != n)
7246 tcg_gen_addi_i32(addr, addr, 4);
7247 }
7248 }
7249 if (insn & (1 << 21)) {
7250 /* write back */
7251 if (insn & (1 << 23)) {
7252 if (insn & (1 << 24)) {
7253 /* pre increment */
7254 } else {
7255 /* post increment */
7256 tcg_gen_addi_i32(addr, addr, 4);
7257 }
7258 } else {
7259 if (insn & (1 << 24)) {
7260 /* pre decrement */
7261 if (n != 1)
7262 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7263 } else {
7264 /* post decrement */
7265 tcg_gen_addi_i32(addr, addr, -(n * 4));
7266 }
7267 }
7268 store_reg(s, rn, addr);
7269 } else {
7270 dead_tmp(addr);
7271 }
7272 if (loaded_base) {
7273 store_reg(s, rn, loaded_var);
7274 }
7275 if ((insn & (1 << 22)) && !user) {
7276 /* Restore CPSR from SPSR. */
7277 tmp = load_cpu_field(spsr);
7278 gen_set_cpsr(tmp, 0xffffffff);
7279 dead_tmp(tmp);
7280 s->is_jmp = DISAS_UPDATE;
7281 }
7282 }
7283 break;
7284 case 0xa:
7285 case 0xb:
7286 {
7287 int32_t offset;
7288
7289 /* branch (and link) */
7290 val = (int32_t)s->pc;
7291 if (insn & (1 << 24)) {
7292 tmp = new_tmp();
7293 tcg_gen_movi_i32(tmp, val);
7294 store_reg(s, 14, tmp);
7295 }
7296 offset = (((int32_t)insn << 8) >> 8);
7297 val += (offset << 2) + 4;
7298 gen_jmp(s, val);
7299 }
7300 break;
7301 case 0xc:
7302 case 0xd:
7303 case 0xe:
7304 /* Coprocessor. */
7305 if (disas_coproc_insn(env, s, insn))
7306 goto illegal_op;
7307 break;
7308 case 0xf:
7309 /* swi */
7310 gen_set_pc_im(s->pc);
7311 s->is_jmp = DISAS_SWI;
7312 break;
7313 default:
7314 illegal_op:
7315 gen_exception_insn(s, 4, EXCP_UDEF);
7316 break;
7317 }
7318 }
7319 }
7320
7321 /* Return true if this is a Thumb-2 logical op. */
7322 static int
7323 thumb2_logic_op(int op)
7324 {
7325 return (op < 8);
7326 }
7327
7328 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7329 then set condition code flags based on the result of the operation.
7330 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7331 to the high bit of T1.
7332 Returns zero if the opcode is valid. */
7333
7334 static int
7335 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7336 {
7337 int logic_cc;
7338
7339 logic_cc = 0;
7340 switch (op) {
7341 case 0: /* and */
7342 tcg_gen_and_i32(t0, t0, t1);
7343 logic_cc = conds;
7344 break;
7345 case 1: /* bic */
7346 tcg_gen_andc_i32(t0, t0, t1);
7347 logic_cc = conds;
7348 break;
7349 case 2: /* orr */
7350 tcg_gen_or_i32(t0, t0, t1);
7351 logic_cc = conds;
7352 break;
7353 case 3: /* orn */
7354 tcg_gen_not_i32(t1, t1);
7355 tcg_gen_or_i32(t0, t0, t1);
7356 logic_cc = conds;
7357 break;
7358 case 4: /* eor */
7359 tcg_gen_xor_i32(t0, t0, t1);
7360 logic_cc = conds;
7361 break;
7362 case 8: /* add */
7363 if (conds)
7364 gen_helper_add_cc(t0, t0, t1);
7365 else
7366 tcg_gen_add_i32(t0, t0, t1);
7367 break;
7368 case 10: /* adc */
7369 if (conds)
7370 gen_helper_adc_cc(t0, t0, t1);
7371 else
7372 gen_adc(t0, t1);
7373 break;
7374 case 11: /* sbc */
7375 if (conds)
7376 gen_helper_sbc_cc(t0, t0, t1);
7377 else
7378 gen_sub_carry(t0, t0, t1);
7379 break;
7380 case 13: /* sub */
7381 if (conds)
7382 gen_helper_sub_cc(t0, t0, t1);
7383 else
7384 tcg_gen_sub_i32(t0, t0, t1);
7385 break;
7386 case 14: /* rsb */
7387 if (conds)
7388 gen_helper_sub_cc(t0, t1, t0);
7389 else
7390 tcg_gen_sub_i32(t0, t1, t0);
7391 break;
7392 default: /* 5, 6, 7, 9, 12, 15. */
7393 return 1;
7394 }
7395 if (logic_cc) {
7396 gen_logic_CC(t0);
7397 if (shifter_out)
7398 gen_set_CF_bit31(t1);
7399 }
7400 return 0;
7401 }
7402
7403 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7404 is not legal. */
7405 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7406 {
7407 uint32_t insn, imm, shift, offset;
7408 uint32_t rd, rn, rm, rs;
7409 TCGv tmp;
7410 TCGv tmp2;
7411 TCGv tmp3;
7412 TCGv addr;
7413 TCGv_i64 tmp64;
7414 int op;
7415 int shiftop;
7416 int conds;
7417 int logic_cc;
7418
7419 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7420 || arm_feature (env, ARM_FEATURE_M))) {
7421 /* Thumb-1 cores may need to treat bl and blx as a pair of
7422 16-bit instructions to get correct prefetch abort behavior. */
7423 insn = insn_hw1;
7424 if ((insn & (1 << 12)) == 0) {
7425 /* Second half of blx. */
7426 offset = ((insn & 0x7ff) << 1);
7427 tmp = load_reg(s, 14);
7428 tcg_gen_addi_i32(tmp, tmp, offset);
7429 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7430
7431 tmp2 = new_tmp();
7432 tcg_gen_movi_i32(tmp2, s->pc | 1);
7433 store_reg(s, 14, tmp2);
7434 gen_bx(s, tmp);
7435 return 0;
7436 }
7437 if (insn & (1 << 11)) {
7438 /* Second half of bl. */
7439 offset = ((insn & 0x7ff) << 1) | 1;
7440 tmp = load_reg(s, 14);
7441 tcg_gen_addi_i32(tmp, tmp, offset);
7442
7443 tmp2 = new_tmp();
7444 tcg_gen_movi_i32(tmp2, s->pc | 1);
7445 store_reg(s, 14, tmp2);
7446 gen_bx(s, tmp);
7447 return 0;
7448 }
7449 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7450 /* Instruction spans a page boundary. Implement it as two
7451 16-bit instructions in case the second half causes an
7452 prefetch abort. */
7453 offset = ((int32_t)insn << 21) >> 9;
7454 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7455 return 0;
7456 }
7457 /* Fall through to 32-bit decode. */
7458 }
7459
7460 insn = lduw_code(s->pc);
7461 s->pc += 2;
7462 insn |= (uint32_t)insn_hw1 << 16;
7463
7464 if ((insn & 0xf800e800) != 0xf000e800) {
7465 ARCH(6T2);
7466 }
7467
7468 rn = (insn >> 16) & 0xf;
7469 rs = (insn >> 12) & 0xf;
7470 rd = (insn >> 8) & 0xf;
7471 rm = insn & 0xf;
7472 switch ((insn >> 25) & 0xf) {
7473 case 0: case 1: case 2: case 3:
7474 /* 16-bit instructions. Should never happen. */
7475 abort();
7476 case 4:
7477 if (insn & (1 << 22)) {
7478 /* Other load/store, table branch. */
7479 if (insn & 0x01200000) {
7480 /* Load/store doubleword. */
7481 if (rn == 15) {
7482 addr = new_tmp();
7483 tcg_gen_movi_i32(addr, s->pc & ~3);
7484 } else {
7485 addr = load_reg(s, rn);
7486 }
7487 offset = (insn & 0xff) * 4;
7488 if ((insn & (1 << 23)) == 0)
7489 offset = -offset;
7490 if (insn & (1 << 24)) {
7491 tcg_gen_addi_i32(addr, addr, offset);
7492 offset = 0;
7493 }
7494 if (insn & (1 << 20)) {
7495 /* ldrd */
7496 tmp = gen_ld32(addr, IS_USER(s));
7497 store_reg(s, rs, tmp);
7498 tcg_gen_addi_i32(addr, addr, 4);
7499 tmp = gen_ld32(addr, IS_USER(s));
7500 store_reg(s, rd, tmp);
7501 } else {
7502 /* strd */
7503 tmp = load_reg(s, rs);
7504 gen_st32(tmp, addr, IS_USER(s));
7505 tcg_gen_addi_i32(addr, addr, 4);
7506 tmp = load_reg(s, rd);
7507 gen_st32(tmp, addr, IS_USER(s));
7508 }
7509 if (insn & (1 << 21)) {
7510 /* Base writeback. */
7511 if (rn == 15)
7512 goto illegal_op;
7513 tcg_gen_addi_i32(addr, addr, offset - 4);
7514 store_reg(s, rn, addr);
7515 } else {
7516 dead_tmp(addr);
7517 }
7518 } else if ((insn & (1 << 23)) == 0) {
7519 /* Load/store exclusive word. */
7520 addr = tcg_temp_local_new();
7521 load_reg_var(s, addr, rn);
7522 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7523 if (insn & (1 << 20)) {
7524 gen_load_exclusive(s, rs, 15, addr, 2);
7525 } else {
7526 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7527 }
7528 tcg_temp_free(addr);
7529 } else if ((insn & (1 << 6)) == 0) {
7530 /* Table Branch. */
7531 if (rn == 15) {
7532 addr = new_tmp();
7533 tcg_gen_movi_i32(addr, s->pc);
7534 } else {
7535 addr = load_reg(s, rn);
7536 }
7537 tmp = load_reg(s, rm);
7538 tcg_gen_add_i32(addr, addr, tmp);
7539 if (insn & (1 << 4)) {
7540 /* tbh */
7541 tcg_gen_add_i32(addr, addr, tmp);
7542 dead_tmp(tmp);
7543 tmp = gen_ld16u(addr, IS_USER(s));
7544 } else { /* tbb */
7545 dead_tmp(tmp);
7546 tmp = gen_ld8u(addr, IS_USER(s));
7547 }
7548 dead_tmp(addr);
7549 tcg_gen_shli_i32(tmp, tmp, 1);
7550 tcg_gen_addi_i32(tmp, tmp, s->pc);
7551 store_reg(s, 15, tmp);
7552 } else {
7553 /* Load/store exclusive byte/halfword/doubleword. */
7554 ARCH(7);
7555 op = (insn >> 4) & 0x3;
7556 if (op == 2) {
7557 goto illegal_op;
7558 }
7559 addr = tcg_temp_local_new();
7560 load_reg_var(s, addr, rn);
7561 if (insn & (1 << 20)) {
7562 gen_load_exclusive(s, rs, rd, addr, op);
7563 } else {
7564 gen_store_exclusive(s, rm, rs, rd, addr, op);
7565 }
7566 tcg_temp_free(addr);
7567 }
7568 } else {
7569 /* Load/store multiple, RFE, SRS. */
7570 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7571 /* Not available in user mode. */
7572 if (IS_USER(s))
7573 goto illegal_op;
7574 if (insn & (1 << 20)) {
7575 /* rfe */
7576 addr = load_reg(s, rn);
7577 if ((insn & (1 << 24)) == 0)
7578 tcg_gen_addi_i32(addr, addr, -8);
7579 /* Load PC into tmp and CPSR into tmp2. */
7580 tmp = gen_ld32(addr, 0);
7581 tcg_gen_addi_i32(addr, addr, 4);
7582 tmp2 = gen_ld32(addr, 0);
7583 if (insn & (1 << 21)) {
7584 /* Base writeback. */
7585 if (insn & (1 << 24)) {
7586 tcg_gen_addi_i32(addr, addr, 4);
7587 } else {
7588 tcg_gen_addi_i32(addr, addr, -4);
7589 }
7590 store_reg(s, rn, addr);
7591 } else {
7592 dead_tmp(addr);
7593 }
7594 gen_rfe(s, tmp, tmp2);
7595 } else {
7596 /* srs */
7597 op = (insn & 0x1f);
7598 addr = new_tmp();
7599 tmp = tcg_const_i32(op);
7600 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7601 tcg_temp_free_i32(tmp);
7602 if ((insn & (1 << 24)) == 0) {
7603 tcg_gen_addi_i32(addr, addr, -8);
7604 }
7605 tmp = load_reg(s, 14);
7606 gen_st32(tmp, addr, 0);
7607 tcg_gen_addi_i32(addr, addr, 4);
7608 tmp = new_tmp();
7609 gen_helper_cpsr_read(tmp);
7610 gen_st32(tmp, addr, 0);
7611 if (insn & (1 << 21)) {
7612 if ((insn & (1 << 24)) == 0) {
7613 tcg_gen_addi_i32(addr, addr, -4);
7614 } else {
7615 tcg_gen_addi_i32(addr, addr, 4);
7616 }
7617 tmp = tcg_const_i32(op);
7618 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7619 tcg_temp_free_i32(tmp);
7620 } else {
7621 dead_tmp(addr);
7622 }
7623 }
7624 } else {
7625 int i;
7626 /* Load/store multiple. */
7627 addr = load_reg(s, rn);
7628 offset = 0;
7629 for (i = 0; i < 16; i++) {
7630 if (insn & (1 << i))
7631 offset += 4;
7632 }
7633 if (insn & (1 << 24)) {
7634 tcg_gen_addi_i32(addr, addr, -offset);
7635 }
7636
7637 for (i = 0; i < 16; i++) {
7638 if ((insn & (1 << i)) == 0)
7639 continue;
7640 if (insn & (1 << 20)) {
7641 /* Load. */
7642 tmp = gen_ld32(addr, IS_USER(s));
7643 if (i == 15) {
7644 gen_bx(s, tmp);
7645 } else {
7646 store_reg(s, i, tmp);
7647 }
7648 } else {
7649 /* Store. */
7650 tmp = load_reg(s, i);
7651 gen_st32(tmp, addr, IS_USER(s));
7652 }
7653 tcg_gen_addi_i32(addr, addr, 4);
7654 }
7655 if (insn & (1 << 21)) {
7656 /* Base register writeback. */
7657 if (insn & (1 << 24)) {
7658 tcg_gen_addi_i32(addr, addr, -offset);
7659 }
7660 /* Fault if writeback register is in register list. */
7661 if (insn & (1 << rn))
7662 goto illegal_op;
7663 store_reg(s, rn, addr);
7664 } else {
7665 dead_tmp(addr);
7666 }
7667 }
7668 }
7669 break;
7670 case 5:
7671
7672 op = (insn >> 21) & 0xf;
7673 if (op == 6) {
7674 /* Halfword pack. */
7675 tmp = load_reg(s, rn);
7676 tmp2 = load_reg(s, rm);
7677 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7678 if (insn & (1 << 5)) {
7679 /* pkhtb */
7680 if (shift == 0)
7681 shift = 31;
7682 tcg_gen_sari_i32(tmp2, tmp2, shift);
7683 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7684 tcg_gen_ext16u_i32(tmp2, tmp2);
7685 } else {
7686 /* pkhbt */
7687 if (shift)
7688 tcg_gen_shli_i32(tmp2, tmp2, shift);
7689 tcg_gen_ext16u_i32(tmp, tmp);
7690 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7691 }
7692 tcg_gen_or_i32(tmp, tmp, tmp2);
7693 dead_tmp(tmp2);
7694 store_reg(s, rd, tmp);
7695 } else {
7696 /* Data processing register constant shift. */
7697 if (rn == 15) {
7698 tmp = new_tmp();
7699 tcg_gen_movi_i32(tmp, 0);
7700 } else {
7701 tmp = load_reg(s, rn);
7702 }
7703 tmp2 = load_reg(s, rm);
7704
7705 shiftop = (insn >> 4) & 3;
7706 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7707 conds = (insn & (1 << 20)) != 0;
7708 logic_cc = (conds && thumb2_logic_op(op));
7709 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7710 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7711 goto illegal_op;
7712 dead_tmp(tmp2);
7713 if (rd != 15) {
7714 store_reg(s, rd, tmp);
7715 } else {
7716 dead_tmp(tmp);
7717 }
7718 }
7719 break;
7720 case 13: /* Misc data processing. */
7721 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7722 if (op < 4 && (insn & 0xf000) != 0xf000)
7723 goto illegal_op;
7724 switch (op) {
7725 case 0: /* Register controlled shift. */
7726 tmp = load_reg(s, rn);
7727 tmp2 = load_reg(s, rm);
7728 if ((insn & 0x70) != 0)
7729 goto illegal_op;
7730 op = (insn >> 21) & 3;
7731 logic_cc = (insn & (1 << 20)) != 0;
7732 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7733 if (logic_cc)
7734 gen_logic_CC(tmp);
7735 store_reg_bx(env, s, rd, tmp);
7736 break;
7737 case 1: /* Sign/zero extend. */
7738 tmp = load_reg(s, rm);
7739 shift = (insn >> 4) & 3;
7740 /* ??? In many cases it's not neccessary to do a
7741 rotate, a shift is sufficient. */
7742 if (shift != 0)
7743 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7744 op = (insn >> 20) & 7;
7745 switch (op) {
7746 case 0: gen_sxth(tmp); break;
7747 case 1: gen_uxth(tmp); break;
7748 case 2: gen_sxtb16(tmp); break;
7749 case 3: gen_uxtb16(tmp); break;
7750 case 4: gen_sxtb(tmp); break;
7751 case 5: gen_uxtb(tmp); break;
7752 default: goto illegal_op;
7753 }
7754 if (rn != 15) {
7755 tmp2 = load_reg(s, rn);
7756 if ((op >> 1) == 1) {
7757 gen_add16(tmp, tmp2);
7758 } else {
7759 tcg_gen_add_i32(tmp, tmp, tmp2);
7760 dead_tmp(tmp2);
7761 }
7762 }
7763 store_reg(s, rd, tmp);
7764 break;
7765 case 2: /* SIMD add/subtract. */
7766 op = (insn >> 20) & 7;
7767 shift = (insn >> 4) & 7;
7768 if ((op & 3) == 3 || (shift & 3) == 3)
7769 goto illegal_op;
7770 tmp = load_reg(s, rn);
7771 tmp2 = load_reg(s, rm);
7772 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7773 dead_tmp(tmp2);
7774 store_reg(s, rd, tmp);
7775 break;
7776 case 3: /* Other data processing. */
7777 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7778 if (op < 4) {
7779 /* Saturating add/subtract. */
7780 tmp = load_reg(s, rn);
7781 tmp2 = load_reg(s, rm);
7782 if (op & 1)
7783 gen_helper_double_saturate(tmp, tmp);
7784 if (op & 2)
7785 gen_helper_sub_saturate(tmp, tmp2, tmp);
7786 else
7787 gen_helper_add_saturate(tmp, tmp, tmp2);
7788 dead_tmp(tmp2);
7789 } else {
7790 tmp = load_reg(s, rn);
7791 switch (op) {
7792 case 0x0a: /* rbit */
7793 gen_helper_rbit(tmp, tmp);
7794 break;
7795 case 0x08: /* rev */
7796 tcg_gen_bswap32_i32(tmp, tmp);
7797 break;
7798 case 0x09: /* rev16 */
7799 gen_rev16(tmp);
7800 break;
7801 case 0x0b: /* revsh */
7802 gen_revsh(tmp);
7803 break;
7804 case 0x10: /* sel */
7805 tmp2 = load_reg(s, rm);
7806 tmp3 = new_tmp();
7807 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7808 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7809 dead_tmp(tmp3);
7810 dead_tmp(tmp2);
7811 break;
7812 case 0x18: /* clz */
7813 gen_helper_clz(tmp, tmp);
7814 break;
7815 default:
7816 goto illegal_op;
7817 }
7818 }
7819 store_reg(s, rd, tmp);
7820 break;
7821 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7822 op = (insn >> 4) & 0xf;
7823 tmp = load_reg(s, rn);
7824 tmp2 = load_reg(s, rm);
7825 switch ((insn >> 20) & 7) {
7826 case 0: /* 32 x 32 -> 32 */
7827 tcg_gen_mul_i32(tmp, tmp, tmp2);
7828 dead_tmp(tmp2);
7829 if (rs != 15) {
7830 tmp2 = load_reg(s, rs);
7831 if (op)
7832 tcg_gen_sub_i32(tmp, tmp2, tmp);
7833 else
7834 tcg_gen_add_i32(tmp, tmp, tmp2);
7835 dead_tmp(tmp2);
7836 }
7837 break;
7838 case 1: /* 16 x 16 -> 32 */
7839 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7840 dead_tmp(tmp2);
7841 if (rs != 15) {
7842 tmp2 = load_reg(s, rs);
7843 gen_helper_add_setq(tmp, tmp, tmp2);
7844 dead_tmp(tmp2);
7845 }
7846 break;
7847 case 2: /* Dual multiply add. */
7848 case 4: /* Dual multiply subtract. */
7849 if (op)
7850 gen_swap_half(tmp2);
7851 gen_smul_dual(tmp, tmp2);
7852 /* This addition cannot overflow. */
7853 if (insn & (1 << 22)) {
7854 tcg_gen_sub_i32(tmp, tmp, tmp2);
7855 } else {
7856 tcg_gen_add_i32(tmp, tmp, tmp2);
7857 }
7858 dead_tmp(tmp2);
7859 if (rs != 15)
7860 {
7861 tmp2 = load_reg(s, rs);
7862 gen_helper_add_setq(tmp, tmp, tmp2);
7863 dead_tmp(tmp2);
7864 }
7865 break;
7866 case 3: /* 32 * 16 -> 32msb */
7867 if (op)
7868 tcg_gen_sari_i32(tmp2, tmp2, 16);
7869 else
7870 gen_sxth(tmp2);
7871 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7872 tcg_gen_shri_i64(tmp64, tmp64, 16);
7873 tmp = new_tmp();
7874 tcg_gen_trunc_i64_i32(tmp, tmp64);
7875 tcg_temp_free_i64(tmp64);
7876 if (rs != 15)
7877 {
7878 tmp2 = load_reg(s, rs);
7879 gen_helper_add_setq(tmp, tmp, tmp2);
7880 dead_tmp(tmp2);
7881 }
7882 break;
7883 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7884 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7885 if (rs != 15) {
7886 tmp = load_reg(s, rs);
7887 if (insn & (1 << 20)) {
7888 tmp64 = gen_addq_msw(tmp64, tmp);
7889 } else {
7890 tmp64 = gen_subq_msw(tmp64, tmp);
7891 }
7892 }
7893 if (insn & (1 << 4)) {
7894 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7895 }
7896 tcg_gen_shri_i64(tmp64, tmp64, 32);
7897 tmp = new_tmp();
7898 tcg_gen_trunc_i64_i32(tmp, tmp64);
7899 tcg_temp_free_i64(tmp64);
7900 break;
7901 case 7: /* Unsigned sum of absolute differences. */
7902 gen_helper_usad8(tmp, tmp, tmp2);
7903 dead_tmp(tmp2);
7904 if (rs != 15) {
7905 tmp2 = load_reg(s, rs);
7906 tcg_gen_add_i32(tmp, tmp, tmp2);
7907 dead_tmp(tmp2);
7908 }
7909 break;
7910 }
7911 store_reg(s, rd, tmp);
7912 break;
7913 case 6: case 7: /* 64-bit multiply, Divide. */
7914 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7915 tmp = load_reg(s, rn);
7916 tmp2 = load_reg(s, rm);
7917 if ((op & 0x50) == 0x10) {
7918 /* sdiv, udiv */
7919 if (!arm_feature(env, ARM_FEATURE_DIV))
7920 goto illegal_op;
7921 if (op & 0x20)
7922 gen_helper_udiv(tmp, tmp, tmp2);
7923 else
7924 gen_helper_sdiv(tmp, tmp, tmp2);
7925 dead_tmp(tmp2);
7926 store_reg(s, rd, tmp);
7927 } else if ((op & 0xe) == 0xc) {
7928 /* Dual multiply accumulate long. */
7929 if (op & 1)
7930 gen_swap_half(tmp2);
7931 gen_smul_dual(tmp, tmp2);
7932 if (op & 0x10) {
7933 tcg_gen_sub_i32(tmp, tmp, tmp2);
7934 } else {
7935 tcg_gen_add_i32(tmp, tmp, tmp2);
7936 }
7937 dead_tmp(tmp2);
7938 /* BUGFIX */
7939 tmp64 = tcg_temp_new_i64();
7940 tcg_gen_ext_i32_i64(tmp64, tmp);
7941 dead_tmp(tmp);
7942 gen_addq(s, tmp64, rs, rd);
7943 gen_storeq_reg(s, rs, rd, tmp64);
7944 tcg_temp_free_i64(tmp64);
7945 } else {
7946 if (op & 0x20) {
7947 /* Unsigned 64-bit multiply */
7948 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7949 } else {
7950 if (op & 8) {
7951 /* smlalxy */
7952 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7953 dead_tmp(tmp2);
7954 tmp64 = tcg_temp_new_i64();
7955 tcg_gen_ext_i32_i64(tmp64, tmp);
7956 dead_tmp(tmp);
7957 } else {
7958 /* Signed 64-bit multiply */
7959 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7960 }
7961 }
7962 if (op & 4) {
7963 /* umaal */
7964 gen_addq_lo(s, tmp64, rs);
7965 gen_addq_lo(s, tmp64, rd);
7966 } else if (op & 0x40) {
7967 /* 64-bit accumulate. */
7968 gen_addq(s, tmp64, rs, rd);
7969 }
7970 gen_storeq_reg(s, rs, rd, tmp64);
7971 tcg_temp_free_i64(tmp64);
7972 }
7973 break;
7974 }
7975 break;
7976 case 6: case 7: case 14: case 15:
7977 /* Coprocessor. */
7978 if (((insn >> 24) & 3) == 3) {
7979 /* Translate into the equivalent ARM encoding. */
7980 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7981 if (disas_neon_data_insn(env, s, insn))
7982 goto illegal_op;
7983 } else {
7984 if (insn & (1 << 28))
7985 goto illegal_op;
7986 if (disas_coproc_insn (env, s, insn))
7987 goto illegal_op;
7988 }
7989 break;
7990 case 8: case 9: case 10: case 11:
7991 if (insn & (1 << 15)) {
7992 /* Branches, misc control. */
7993 if (insn & 0x5000) {
7994 /* Unconditional branch. */
7995 /* signextend(hw1[10:0]) -> offset[:12]. */
7996 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7997 /* hw1[10:0] -> offset[11:1]. */
7998 offset |= (insn & 0x7ff) << 1;
7999 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8000 offset[24:22] already have the same value because of the
8001 sign extension above. */
8002 offset ^= ((~insn) & (1 << 13)) << 10;
8003 offset ^= ((~insn) & (1 << 11)) << 11;
8004
8005 if (insn & (1 << 14)) {
8006 /* Branch and link. */
8007 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8008 }
8009
8010 offset += s->pc;
8011 if (insn & (1 << 12)) {
8012 /* b/bl */
8013 gen_jmp(s, offset);
8014 } else {
8015 /* blx */
8016 offset &= ~(uint32_t)2;
8017 gen_bx_im(s, offset);
8018 }
8019 } else if (((insn >> 23) & 7) == 7) {
8020 /* Misc control */
8021 if (insn & (1 << 13))
8022 goto illegal_op;
8023
8024 if (insn & (1 << 26)) {
8025 /* Secure monitor call (v6Z) */
8026 goto illegal_op; /* not implemented. */
8027 } else {
8028 op = (insn >> 20) & 7;
8029 switch (op) {
8030 case 0: /* msr cpsr. */
8031 if (IS_M(env)) {
8032 tmp = load_reg(s, rn);
8033 addr = tcg_const_i32(insn & 0xff);
8034 gen_helper_v7m_msr(cpu_env, addr, tmp);
8035 tcg_temp_free_i32(addr);
8036 dead_tmp(tmp);
8037 gen_lookup_tb(s);
8038 break;
8039 }
8040 /* fall through */
8041 case 1: /* msr spsr. */
8042 if (IS_M(env))
8043 goto illegal_op;
8044 tmp = load_reg(s, rn);
8045 if (gen_set_psr(s,
8046 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8047 op == 1, tmp))
8048 goto illegal_op;
8049 break;
8050 case 2: /* cps, nop-hint. */
8051 if (((insn >> 8) & 7) == 0) {
8052 gen_nop_hint(s, insn & 0xff);
8053 }
8054 /* Implemented as NOP in user mode. */
8055 if (IS_USER(s))
8056 break;
8057 offset = 0;
8058 imm = 0;
8059 if (insn & (1 << 10)) {
8060 if (insn & (1 << 7))
8061 offset |= CPSR_A;
8062 if (insn & (1 << 6))
8063 offset |= CPSR_I;
8064 if (insn & (1 << 5))
8065 offset |= CPSR_F;
8066 if (insn & (1 << 9))
8067 imm = CPSR_A | CPSR_I | CPSR_F;
8068 }
8069 if (insn & (1 << 8)) {
8070 offset |= 0x1f;
8071 imm |= (insn & 0x1f);
8072 }
8073 if (offset) {
8074 gen_set_psr_im(s, offset, 0, imm);
8075 }
8076 break;
8077 case 3: /* Special control operations. */
8078 ARCH(7);
8079 op = (insn >> 4) & 0xf;
8080 switch (op) {
8081 case 2: /* clrex */
8082 gen_clrex(s);
8083 break;
8084 case 4: /* dsb */
8085 case 5: /* dmb */
8086 case 6: /* isb */
8087 /* These execute as NOPs. */
8088 break;
8089 default:
8090 goto illegal_op;
8091 }
8092 break;
8093 case 4: /* bxj */
8094 /* Trivial implementation equivalent to bx. */
8095 tmp = load_reg(s, rn);
8096 gen_bx(s, tmp);
8097 break;
8098 case 5: /* Exception return. */
8099 if (IS_USER(s)) {
8100 goto illegal_op;
8101 }
8102 if (rn != 14 || rd != 15) {
8103 goto illegal_op;
8104 }
8105 tmp = load_reg(s, rn);
8106 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8107 gen_exception_return(s, tmp);
8108 break;
8109 case 6: /* mrs cpsr. */
8110 tmp = new_tmp();
8111 if (IS_M(env)) {
8112 addr = tcg_const_i32(insn & 0xff);
8113 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8114 tcg_temp_free_i32(addr);
8115 } else {
8116 gen_helper_cpsr_read(tmp);
8117 }
8118 store_reg(s, rd, tmp);
8119 break;
8120 case 7: /* mrs spsr. */
8121 /* Not accessible in user mode. */
8122 if (IS_USER(s) || IS_M(env))
8123 goto illegal_op;
8124 tmp = load_cpu_field(spsr);
8125 store_reg(s, rd, tmp);
8126 break;
8127 }
8128 }
8129 } else {
8130 /* Conditional branch. */
8131 op = (insn >> 22) & 0xf;
8132 /* Generate a conditional jump to next instruction. */
8133 s->condlabel = gen_new_label();
8134 gen_test_cc(op ^ 1, s->condlabel);
8135 s->condjmp = 1;
8136
8137 /* offset[11:1] = insn[10:0] */
8138 offset = (insn & 0x7ff) << 1;
8139 /* offset[17:12] = insn[21:16]. */
8140 offset |= (insn & 0x003f0000) >> 4;
8141 /* offset[31:20] = insn[26]. */
8142 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8143 /* offset[18] = insn[13]. */
8144 offset |= (insn & (1 << 13)) << 5;
8145 /* offset[19] = insn[11]. */
8146 offset |= (insn & (1 << 11)) << 8;
8147
8148 /* jump to the offset */
8149 gen_jmp(s, s->pc + offset);
8150 }
8151 } else {
8152 /* Data processing immediate. */
8153 if (insn & (1 << 25)) {
8154 if (insn & (1 << 24)) {
8155 if (insn & (1 << 20))
8156 goto illegal_op;
8157 /* Bitfield/Saturate. */
8158 op = (insn >> 21) & 7;
8159 imm = insn & 0x1f;
8160 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8161 if (rn == 15) {
8162 tmp = new_tmp();
8163 tcg_gen_movi_i32(tmp, 0);
8164 } else {
8165 tmp = load_reg(s, rn);
8166 }
8167 switch (op) {
8168 case 2: /* Signed bitfield extract. */
8169 imm++;
8170 if (shift + imm > 32)
8171 goto illegal_op;
8172 if (imm < 32)
8173 gen_sbfx(tmp, shift, imm);
8174 break;
8175 case 6: /* Unsigned bitfield extract. */
8176 imm++;
8177 if (shift + imm > 32)
8178 goto illegal_op;
8179 if (imm < 32)
8180 gen_ubfx(tmp, shift, (1u << imm) - 1);
8181 break;
8182 case 3: /* Bitfield insert/clear. */
8183 if (imm < shift)
8184 goto illegal_op;
8185 imm = imm + 1 - shift;
8186 if (imm != 32) {
8187 tmp2 = load_reg(s, rd);
8188 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8189 dead_tmp(tmp2);
8190 }
8191 break;
8192 case 7:
8193 goto illegal_op;
8194 default: /* Saturate. */
8195 if (shift) {
8196 if (op & 1)
8197 tcg_gen_sari_i32(tmp, tmp, shift);
8198 else
8199 tcg_gen_shli_i32(tmp, tmp, shift);
8200 }
8201 tmp2 = tcg_const_i32(imm);
8202 if (op & 4) {
8203 /* Unsigned. */
8204 if ((op & 1) && shift == 0)
8205 gen_helper_usat16(tmp, tmp, tmp2);
8206 else
8207 gen_helper_usat(tmp, tmp, tmp2);
8208 } else {
8209 /* Signed. */
8210 if ((op & 1) && shift == 0)
8211 gen_helper_ssat16(tmp, tmp, tmp2);
8212 else
8213 gen_helper_ssat(tmp, tmp, tmp2);
8214 }
8215 tcg_temp_free_i32(tmp2);
8216 break;
8217 }
8218 store_reg(s, rd, tmp);
8219 } else {
8220 imm = ((insn & 0x04000000) >> 15)
8221 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8222 if (insn & (1 << 22)) {
8223 /* 16-bit immediate. */
8224 imm |= (insn >> 4) & 0xf000;
8225 if (insn & (1 << 23)) {
8226 /* movt */
8227 tmp = load_reg(s, rd);
8228 tcg_gen_ext16u_i32(tmp, tmp);
8229 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8230 } else {
8231 /* movw */
8232 tmp = new_tmp();
8233 tcg_gen_movi_i32(tmp, imm);
8234 }
8235 } else {
8236 /* Add/sub 12-bit immediate. */
8237 if (rn == 15) {
8238 offset = s->pc & ~(uint32_t)3;
8239 if (insn & (1 << 23))
8240 offset -= imm;
8241 else
8242 offset += imm;
8243 tmp = new_tmp();
8244 tcg_gen_movi_i32(tmp, offset);
8245 } else {
8246 tmp = load_reg(s, rn);
8247 if (insn & (1 << 23))
8248 tcg_gen_subi_i32(tmp, tmp, imm);
8249 else
8250 tcg_gen_addi_i32(tmp, tmp, imm);
8251 }
8252 }
8253 store_reg(s, rd, tmp);
8254 }
8255 } else {
8256 int shifter_out = 0;
8257 /* modified 12-bit immediate. */
8258 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8259 imm = (insn & 0xff);
8260 switch (shift) {
8261 case 0: /* XY */
8262 /* Nothing to do. */
8263 break;
8264 case 1: /* 00XY00XY */
8265 imm |= imm << 16;
8266 break;
8267 case 2: /* XY00XY00 */
8268 imm |= imm << 16;
8269 imm <<= 8;
8270 break;
8271 case 3: /* XYXYXYXY */
8272 imm |= imm << 16;
8273 imm |= imm << 8;
8274 break;
8275 default: /* Rotated constant. */
8276 shift = (shift << 1) | (imm >> 7);
8277 imm |= 0x80;
8278 imm = imm << (32 - shift);
8279 shifter_out = 1;
8280 break;
8281 }
8282 tmp2 = new_tmp();
8283 tcg_gen_movi_i32(tmp2, imm);
8284 rn = (insn >> 16) & 0xf;
8285 if (rn == 15) {
8286 tmp = new_tmp();
8287 tcg_gen_movi_i32(tmp, 0);
8288 } else {
8289 tmp = load_reg(s, rn);
8290 }
8291 op = (insn >> 21) & 0xf;
8292 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8293 shifter_out, tmp, tmp2))
8294 goto illegal_op;
8295 dead_tmp(tmp2);
8296 rd = (insn >> 8) & 0xf;
8297 if (rd != 15) {
8298 store_reg(s, rd, tmp);
8299 } else {
8300 dead_tmp(tmp);
8301 }
8302 }
8303 }
8304 break;
8305 case 12: /* Load/store single data item. */
8306 {
8307 int postinc = 0;
8308 int writeback = 0;
8309 int user;
8310 if ((insn & 0x01100000) == 0x01000000) {
8311 if (disas_neon_ls_insn(env, s, insn))
8312 goto illegal_op;
8313 break;
8314 }
8315 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8316 if (rs == 15) {
8317 if (!(insn & (1 << 20))) {
8318 goto illegal_op;
8319 }
8320 if (op != 2) {
8321 /* Byte or halfword load space with dest == r15 : memory hints.
8322 * Catch them early so we don't emit pointless addressing code.
8323 * This space is a mix of:
8324 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8325 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8326 * cores)
8327 * unallocated hints, which must be treated as NOPs
8328 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8329 * which is easiest for the decoding logic
8330 * Some space which must UNDEF
8331 */
8332 int op1 = (insn >> 23) & 3;
8333 int op2 = (insn >> 6) & 0x3f;
8334 if (op & 2) {
8335 goto illegal_op;
8336 }
8337 if (rn == 15) {
8338 /* UNPREDICTABLE or unallocated hint */
8339 return 0;
8340 }
8341 if (op1 & 1) {
8342 return 0; /* PLD* or unallocated hint */
8343 }
8344 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8345 return 0; /* PLD* or unallocated hint */
8346 }
8347 /* UNDEF space, or an UNPREDICTABLE */
8348 return 1;
8349 }
8350 }
8351 user = IS_USER(s);
8352 if (rn == 15) {
8353 addr = new_tmp();
8354 /* PC relative. */
8355 /* s->pc has already been incremented by 4. */
8356 imm = s->pc & 0xfffffffc;
8357 if (insn & (1 << 23))
8358 imm += insn & 0xfff;
8359 else
8360 imm -= insn & 0xfff;
8361 tcg_gen_movi_i32(addr, imm);
8362 } else {
8363 addr = load_reg(s, rn);
8364 if (insn & (1 << 23)) {
8365 /* Positive offset. */
8366 imm = insn & 0xfff;
8367 tcg_gen_addi_i32(addr, addr, imm);
8368 } else {
8369 imm = insn & 0xff;
8370 switch ((insn >> 8) & 7) {
8371 case 0: case 8: /* Shifted Register. */
8372 shift = (insn >> 4) & 0xf;
8373 if (shift > 3)
8374 goto illegal_op;
8375 tmp = load_reg(s, rm);
8376 if (shift)
8377 tcg_gen_shli_i32(tmp, tmp, shift);
8378 tcg_gen_add_i32(addr, addr, tmp);
8379 dead_tmp(tmp);
8380 break;
8381 case 4: /* Negative offset. */
8382 tcg_gen_addi_i32(addr, addr, -imm);
8383 break;
8384 case 6: /* User privilege. */
8385 tcg_gen_addi_i32(addr, addr, imm);
8386 user = 1;
8387 break;
8388 case 1: /* Post-decrement. */
8389 imm = -imm;
8390 /* Fall through. */
8391 case 3: /* Post-increment. */
8392 postinc = 1;
8393 writeback = 1;
8394 break;
8395 case 5: /* Pre-decrement. */
8396 imm = -imm;
8397 /* Fall through. */
8398 case 7: /* Pre-increment. */
8399 tcg_gen_addi_i32(addr, addr, imm);
8400 writeback = 1;
8401 break;
8402 default:
8403 goto illegal_op;
8404 }
8405 }
8406 }
8407 if (insn & (1 << 20)) {
8408 /* Load. */
8409 switch (op) {
8410 case 0: tmp = gen_ld8u(addr, user); break;
8411 case 4: tmp = gen_ld8s(addr, user); break;
8412 case 1: tmp = gen_ld16u(addr, user); break;
8413 case 5: tmp = gen_ld16s(addr, user); break;
8414 case 2: tmp = gen_ld32(addr, user); break;
8415 default: goto illegal_op;
8416 }
8417 if (rs == 15) {
8418 gen_bx(s, tmp);
8419 } else {
8420 store_reg(s, rs, tmp);
8421 }
8422 } else {
8423 /* Store. */
8424 tmp = load_reg(s, rs);
8425 switch (op) {
8426 case 0: gen_st8(tmp, addr, user); break;
8427 case 1: gen_st16(tmp, addr, user); break;
8428 case 2: gen_st32(tmp, addr, user); break;
8429 default: goto illegal_op;
8430 }
8431 }
8432 if (postinc)
8433 tcg_gen_addi_i32(addr, addr, imm);
8434 if (writeback) {
8435 store_reg(s, rn, addr);
8436 } else {
8437 dead_tmp(addr);
8438 }
8439 }
8440 break;
8441 default:
8442 goto illegal_op;
8443 }
8444 return 0;
8445 illegal_op:
8446 return 1;
8447 }
8448
8449 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8450 {
8451 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8452 int32_t offset;
8453 int i;
8454 TCGv tmp;
8455 TCGv tmp2;
8456 TCGv addr;
8457
8458 if (s->condexec_mask) {
8459 cond = s->condexec_cond;
8460 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8461 s->condlabel = gen_new_label();
8462 gen_test_cc(cond ^ 1, s->condlabel);
8463 s->condjmp = 1;
8464 }
8465 }
8466
8467 insn = lduw_code(s->pc);
8468 s->pc += 2;
8469
8470 switch (insn >> 12) {
8471 case 0: case 1:
8472
8473 rd = insn & 7;
8474 op = (insn >> 11) & 3;
8475 if (op == 3) {
8476 /* add/subtract */
8477 rn = (insn >> 3) & 7;
8478 tmp = load_reg(s, rn);
8479 if (insn & (1 << 10)) {
8480 /* immediate */
8481 tmp2 = new_tmp();
8482 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8483 } else {
8484 /* reg */
8485 rm = (insn >> 6) & 7;
8486 tmp2 = load_reg(s, rm);
8487 }
8488 if (insn & (1 << 9)) {
8489 if (s->condexec_mask)
8490 tcg_gen_sub_i32(tmp, tmp, tmp2);
8491 else
8492 gen_helper_sub_cc(tmp, tmp, tmp2);
8493 } else {
8494 if (s->condexec_mask)
8495 tcg_gen_add_i32(tmp, tmp, tmp2);
8496 else
8497 gen_helper_add_cc(tmp, tmp, tmp2);
8498 }
8499 dead_tmp(tmp2);
8500 store_reg(s, rd, tmp);
8501 } else {
8502 /* shift immediate */
8503 rm = (insn >> 3) & 7;
8504 shift = (insn >> 6) & 0x1f;
8505 tmp = load_reg(s, rm);
8506 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8507 if (!s->condexec_mask)
8508 gen_logic_CC(tmp);
8509 store_reg(s, rd, tmp);
8510 }
8511 break;
8512 case 2: case 3:
8513 /* arithmetic large immediate */
8514 op = (insn >> 11) & 3;
8515 rd = (insn >> 8) & 0x7;
8516 if (op == 0) { /* mov */
8517 tmp = new_tmp();
8518 tcg_gen_movi_i32(tmp, insn & 0xff);
8519 if (!s->condexec_mask)
8520 gen_logic_CC(tmp);
8521 store_reg(s, rd, tmp);
8522 } else {
8523 tmp = load_reg(s, rd);
8524 tmp2 = new_tmp();
8525 tcg_gen_movi_i32(tmp2, insn & 0xff);
8526 switch (op) {
8527 case 1: /* cmp */
8528 gen_helper_sub_cc(tmp, tmp, tmp2);
8529 dead_tmp(tmp);
8530 dead_tmp(tmp2);
8531 break;
8532 case 2: /* add */
8533 if (s->condexec_mask)
8534 tcg_gen_add_i32(tmp, tmp, tmp2);
8535 else
8536 gen_helper_add_cc(tmp, tmp, tmp2);
8537 dead_tmp(tmp2);
8538 store_reg(s, rd, tmp);
8539 break;
8540 case 3: /* sub */
8541 if (s->condexec_mask)
8542 tcg_gen_sub_i32(tmp, tmp, tmp2);
8543 else
8544 gen_helper_sub_cc(tmp, tmp, tmp2);
8545 dead_tmp(tmp2);
8546 store_reg(s, rd, tmp);
8547 break;
8548 }
8549 }
8550 break;
8551 case 4:
8552 if (insn & (1 << 11)) {
8553 rd = (insn >> 8) & 7;
8554 /* load pc-relative. Bit 1 of PC is ignored. */
8555 val = s->pc + 2 + ((insn & 0xff) * 4);
8556 val &= ~(uint32_t)2;
8557 addr = new_tmp();
8558 tcg_gen_movi_i32(addr, val);
8559 tmp = gen_ld32(addr, IS_USER(s));
8560 dead_tmp(addr);
8561 store_reg(s, rd, tmp);
8562 break;
8563 }
8564 if (insn & (1 << 10)) {
8565 /* data processing extended or blx */
8566 rd = (insn & 7) | ((insn >> 4) & 8);
8567 rm = (insn >> 3) & 0xf;
8568 op = (insn >> 8) & 3;
8569 switch (op) {
8570 case 0: /* add */
8571 tmp = load_reg(s, rd);
8572 tmp2 = load_reg(s, rm);
8573 tcg_gen_add_i32(tmp, tmp, tmp2);
8574 dead_tmp(tmp2);
8575 store_reg(s, rd, tmp);
8576 break;
8577 case 1: /* cmp */
8578 tmp = load_reg(s, rd);
8579 tmp2 = load_reg(s, rm);
8580 gen_helper_sub_cc(tmp, tmp, tmp2);
8581 dead_tmp(tmp2);
8582 dead_tmp(tmp);
8583 break;
8584 case 2: /* mov/cpy */
8585 tmp = load_reg(s, rm);
8586 store_reg(s, rd, tmp);
8587 break;
8588 case 3:/* branch [and link] exchange thumb register */
8589 tmp = load_reg(s, rm);
8590 if (insn & (1 << 7)) {
8591 val = (uint32_t)s->pc | 1;
8592 tmp2 = new_tmp();
8593 tcg_gen_movi_i32(tmp2, val);
8594 store_reg(s, 14, tmp2);
8595 }
8596 gen_bx(s, tmp);
8597 break;
8598 }
8599 break;
8600 }
8601
8602 /* data processing register */
8603 rd = insn & 7;
8604 rm = (insn >> 3) & 7;
8605 op = (insn >> 6) & 0xf;
8606 if (op == 2 || op == 3 || op == 4 || op == 7) {
8607 /* the shift/rotate ops want the operands backwards */
8608 val = rm;
8609 rm = rd;
8610 rd = val;
8611 val = 1;
8612 } else {
8613 val = 0;
8614 }
8615
8616 if (op == 9) { /* neg */
8617 tmp = new_tmp();
8618 tcg_gen_movi_i32(tmp, 0);
8619 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8620 tmp = load_reg(s, rd);
8621 } else {
8622 TCGV_UNUSED(tmp);
8623 }
8624
8625 tmp2 = load_reg(s, rm);
8626 switch (op) {
8627 case 0x0: /* and */
8628 tcg_gen_and_i32(tmp, tmp, tmp2);
8629 if (!s->condexec_mask)
8630 gen_logic_CC(tmp);
8631 break;
8632 case 0x1: /* eor */
8633 tcg_gen_xor_i32(tmp, tmp, tmp2);
8634 if (!s->condexec_mask)
8635 gen_logic_CC(tmp);
8636 break;
8637 case 0x2: /* lsl */
8638 if (s->condexec_mask) {
8639 gen_helper_shl(tmp2, tmp2, tmp);
8640 } else {
8641 gen_helper_shl_cc(tmp2, tmp2, tmp);
8642 gen_logic_CC(tmp2);
8643 }
8644 break;
8645 case 0x3: /* lsr */
8646 if (s->condexec_mask) {
8647 gen_helper_shr(tmp2, tmp2, tmp);
8648 } else {
8649 gen_helper_shr_cc(tmp2, tmp2, tmp);
8650 gen_logic_CC(tmp2);
8651 }
8652 break;
8653 case 0x4: /* asr */
8654 if (s->condexec_mask) {
8655 gen_helper_sar(tmp2, tmp2, tmp);
8656 } else {
8657 gen_helper_sar_cc(tmp2, tmp2, tmp);
8658 gen_logic_CC(tmp2);
8659 }
8660 break;
8661 case 0x5: /* adc */
8662 if (s->condexec_mask)
8663 gen_adc(tmp, tmp2);
8664 else
8665 gen_helper_adc_cc(tmp, tmp, tmp2);
8666 break;
8667 case 0x6: /* sbc */
8668 if (s->condexec_mask)
8669 gen_sub_carry(tmp, tmp, tmp2);
8670 else
8671 gen_helper_sbc_cc(tmp, tmp, tmp2);
8672 break;
8673 case 0x7: /* ror */
8674 if (s->condexec_mask) {
8675 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8676 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8677 } else {
8678 gen_helper_ror_cc(tmp2, tmp2, tmp);
8679 gen_logic_CC(tmp2);
8680 }
8681 break;
8682 case 0x8: /* tst */
8683 tcg_gen_and_i32(tmp, tmp, tmp2);
8684 gen_logic_CC(tmp);
8685 rd = 16;
8686 break;
8687 case 0x9: /* neg */
8688 if (s->condexec_mask)
8689 tcg_gen_neg_i32(tmp, tmp2);
8690 else
8691 gen_helper_sub_cc(tmp, tmp, tmp2);
8692 break;
8693 case 0xa: /* cmp */
8694 gen_helper_sub_cc(tmp, tmp, tmp2);
8695 rd = 16;
8696 break;
8697 case 0xb: /* cmn */
8698 gen_helper_add_cc(tmp, tmp, tmp2);
8699 rd = 16;
8700 break;
8701 case 0xc: /* orr */
8702 tcg_gen_or_i32(tmp, tmp, tmp2);
8703 if (!s->condexec_mask)
8704 gen_logic_CC(tmp);
8705 break;
8706 case 0xd: /* mul */
8707 tcg_gen_mul_i32(tmp, tmp, tmp2);
8708 if (!s->condexec_mask)
8709 gen_logic_CC(tmp);
8710 break;
8711 case 0xe: /* bic */
8712 tcg_gen_andc_i32(tmp, tmp, tmp2);
8713 if (!s->condexec_mask)
8714 gen_logic_CC(tmp);
8715 break;
8716 case 0xf: /* mvn */
8717 tcg_gen_not_i32(tmp2, tmp2);
8718 if (!s->condexec_mask)
8719 gen_logic_CC(tmp2);
8720 val = 1;
8721 rm = rd;
8722 break;
8723 }
8724 if (rd != 16) {
8725 if (val) {
8726 store_reg(s, rm, tmp2);
8727 if (op != 0xf)
8728 dead_tmp(tmp);
8729 } else {
8730 store_reg(s, rd, tmp);
8731 dead_tmp(tmp2);
8732 }
8733 } else {
8734 dead_tmp(tmp);
8735 dead_tmp(tmp2);
8736 }
8737 break;
8738
8739 case 5:
8740 /* load/store register offset. */
8741 rd = insn & 7;
8742 rn = (insn >> 3) & 7;
8743 rm = (insn >> 6) & 7;
8744 op = (insn >> 9) & 7;
8745 addr = load_reg(s, rn);
8746 tmp = load_reg(s, rm);
8747 tcg_gen_add_i32(addr, addr, tmp);
8748 dead_tmp(tmp);
8749
8750 if (op < 3) /* store */
8751 tmp = load_reg(s, rd);
8752
8753 switch (op) {
8754 case 0: /* str */
8755 gen_st32(tmp, addr, IS_USER(s));
8756 break;
8757 case 1: /* strh */
8758 gen_st16(tmp, addr, IS_USER(s));
8759 break;
8760 case 2: /* strb */
8761 gen_st8(tmp, addr, IS_USER(s));
8762 break;
8763 case 3: /* ldrsb */
8764 tmp = gen_ld8s(addr, IS_USER(s));
8765 break;
8766 case 4: /* ldr */
8767 tmp = gen_ld32(addr, IS_USER(s));
8768 break;
8769 case 5: /* ldrh */
8770 tmp = gen_ld16u(addr, IS_USER(s));
8771 break;
8772 case 6: /* ldrb */
8773 tmp = gen_ld8u(addr, IS_USER(s));
8774 break;
8775 case 7: /* ldrsh */
8776 tmp = gen_ld16s(addr, IS_USER(s));
8777 break;
8778 }
8779 if (op >= 3) /* load */
8780 store_reg(s, rd, tmp);
8781 dead_tmp(addr);
8782 break;
8783
8784 case 6:
8785 /* load/store word immediate offset */
8786 rd = insn & 7;
8787 rn = (insn >> 3) & 7;
8788 addr = load_reg(s, rn);
8789 val = (insn >> 4) & 0x7c;
8790 tcg_gen_addi_i32(addr, addr, val);
8791
8792 if (insn & (1 << 11)) {
8793 /* load */
8794 tmp = gen_ld32(addr, IS_USER(s));
8795 store_reg(s, rd, tmp);
8796 } else {
8797 /* store */
8798 tmp = load_reg(s, rd);
8799 gen_st32(tmp, addr, IS_USER(s));
8800 }
8801 dead_tmp(addr);
8802 break;
8803
8804 case 7:
8805 /* load/store byte immediate offset */
8806 rd = insn & 7;
8807 rn = (insn >> 3) & 7;
8808 addr = load_reg(s, rn);
8809 val = (insn >> 6) & 0x1f;
8810 tcg_gen_addi_i32(addr, addr, val);
8811
8812 if (insn & (1 << 11)) {
8813 /* load */
8814 tmp = gen_ld8u(addr, IS_USER(s));
8815 store_reg(s, rd, tmp);
8816 } else {
8817 /* store */
8818 tmp = load_reg(s, rd);
8819 gen_st8(tmp, addr, IS_USER(s));
8820 }
8821 dead_tmp(addr);
8822 break;
8823
8824 case 8:
8825 /* load/store halfword immediate offset */
8826 rd = insn & 7;
8827 rn = (insn >> 3) & 7;
8828 addr = load_reg(s, rn);
8829 val = (insn >> 5) & 0x3e;
8830 tcg_gen_addi_i32(addr, addr, val);
8831
8832 if (insn & (1 << 11)) {
8833 /* load */
8834 tmp = gen_ld16u(addr, IS_USER(s));
8835 store_reg(s, rd, tmp);
8836 } else {
8837 /* store */
8838 tmp = load_reg(s, rd);
8839 gen_st16(tmp, addr, IS_USER(s));
8840 }
8841 dead_tmp(addr);
8842 break;
8843
8844 case 9:
8845 /* load/store from stack */
8846 rd = (insn >> 8) & 7;
8847 addr = load_reg(s, 13);
8848 val = (insn & 0xff) * 4;
8849 tcg_gen_addi_i32(addr, addr, val);
8850
8851 if (insn & (1 << 11)) {
8852 /* load */
8853 tmp = gen_ld32(addr, IS_USER(s));
8854 store_reg(s, rd, tmp);
8855 } else {
8856 /* store */
8857 tmp = load_reg(s, rd);
8858 gen_st32(tmp, addr, IS_USER(s));
8859 }
8860 dead_tmp(addr);
8861 break;
8862
8863 case 10:
8864 /* add to high reg */
8865 rd = (insn >> 8) & 7;
8866 if (insn & (1 << 11)) {
8867 /* SP */
8868 tmp = load_reg(s, 13);
8869 } else {
8870 /* PC. bit 1 is ignored. */
8871 tmp = new_tmp();
8872 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8873 }
8874 val = (insn & 0xff) * 4;
8875 tcg_gen_addi_i32(tmp, tmp, val);
8876 store_reg(s, rd, tmp);
8877 break;
8878
8879 case 11:
8880 /* misc */
8881 op = (insn >> 8) & 0xf;
8882 switch (op) {
8883 case 0:
8884 /* adjust stack pointer */
8885 tmp = load_reg(s, 13);
8886 val = (insn & 0x7f) * 4;
8887 if (insn & (1 << 7))
8888 val = -(int32_t)val;
8889 tcg_gen_addi_i32(tmp, tmp, val);
8890 store_reg(s, 13, tmp);
8891 break;
8892
8893 case 2: /* sign/zero extend. */
8894 ARCH(6);
8895 rd = insn & 7;
8896 rm = (insn >> 3) & 7;
8897 tmp = load_reg(s, rm);
8898 switch ((insn >> 6) & 3) {
8899 case 0: gen_sxth(tmp); break;
8900 case 1: gen_sxtb(tmp); break;
8901 case 2: gen_uxth(tmp); break;
8902 case 3: gen_uxtb(tmp); break;
8903 }
8904 store_reg(s, rd, tmp);
8905 break;
8906 case 4: case 5: case 0xc: case 0xd:
8907 /* push/pop */
8908 addr = load_reg(s, 13);
8909 if (insn & (1 << 8))
8910 offset = 4;
8911 else
8912 offset = 0;
8913 for (i = 0; i < 8; i++) {
8914 if (insn & (1 << i))
8915 offset += 4;
8916 }
8917 if ((insn & (1 << 11)) == 0) {
8918 tcg_gen_addi_i32(addr, addr, -offset);
8919 }
8920 for (i = 0; i < 8; i++) {
8921 if (insn & (1 << i)) {
8922 if (insn & (1 << 11)) {
8923 /* pop */
8924 tmp = gen_ld32(addr, IS_USER(s));
8925 store_reg(s, i, tmp);
8926 } else {
8927 /* push */
8928 tmp = load_reg(s, i);
8929 gen_st32(tmp, addr, IS_USER(s));
8930 }
8931 /* advance to the next address. */
8932 tcg_gen_addi_i32(addr, addr, 4);
8933 }
8934 }
8935 TCGV_UNUSED(tmp);
8936 if (insn & (1 << 8)) {
8937 if (insn & (1 << 11)) {
8938 /* pop pc */
8939 tmp = gen_ld32(addr, IS_USER(s));
8940 /* don't set the pc until the rest of the instruction
8941 has completed */
8942 } else {
8943 /* push lr */
8944 tmp = load_reg(s, 14);
8945 gen_st32(tmp, addr, IS_USER(s));
8946 }
8947 tcg_gen_addi_i32(addr, addr, 4);
8948 }
8949 if ((insn & (1 << 11)) == 0) {
8950 tcg_gen_addi_i32(addr, addr, -offset);
8951 }
8952 /* write back the new stack pointer */
8953 store_reg(s, 13, addr);
8954 /* set the new PC value */
8955 if ((insn & 0x0900) == 0x0900)
8956 gen_bx(s, tmp);
8957 break;
8958
8959 case 1: case 3: case 9: case 11: /* czb */
8960 rm = insn & 7;
8961 tmp = load_reg(s, rm);
8962 s->condlabel = gen_new_label();
8963 s->condjmp = 1;
8964 if (insn & (1 << 11))
8965 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8966 else
8967 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8968 dead_tmp(tmp);
8969 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8970 val = (uint32_t)s->pc + 2;
8971 val += offset;
8972 gen_jmp(s, val);
8973 break;
8974
8975 case 15: /* IT, nop-hint. */
8976 if ((insn & 0xf) == 0) {
8977 gen_nop_hint(s, (insn >> 4) & 0xf);
8978 break;
8979 }
8980 /* If Then. */
8981 s->condexec_cond = (insn >> 4) & 0xe;
8982 s->condexec_mask = insn & 0x1f;
8983 /* No actual code generated for this insn, just setup state. */
8984 break;
8985
8986 case 0xe: /* bkpt */
8987 gen_exception_insn(s, 2, EXCP_BKPT);
8988 break;
8989
8990 case 0xa: /* rev */
8991 ARCH(6);
8992 rn = (insn >> 3) & 0x7;
8993 rd = insn & 0x7;
8994 tmp = load_reg(s, rn);
8995 switch ((insn >> 6) & 3) {
8996 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8997 case 1: gen_rev16(tmp); break;
8998 case 3: gen_revsh(tmp); break;
8999 default: goto illegal_op;
9000 }
9001 store_reg(s, rd, tmp);
9002 break;
9003
9004 case 6: /* cps */
9005 ARCH(6);
9006 if (IS_USER(s))
9007 break;
9008 if (IS_M(env)) {
9009 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9010 /* PRIMASK */
9011 if (insn & 1) {
9012 addr = tcg_const_i32(16);
9013 gen_helper_v7m_msr(cpu_env, addr, tmp);
9014 tcg_temp_free_i32(addr);
9015 }
9016 /* FAULTMASK */
9017 if (insn & 2) {
9018 addr = tcg_const_i32(17);
9019 gen_helper_v7m_msr(cpu_env, addr, tmp);
9020 tcg_temp_free_i32(addr);
9021 }
9022 tcg_temp_free_i32(tmp);
9023 gen_lookup_tb(s);
9024 } else {
9025 if (insn & (1 << 4))
9026 shift = CPSR_A | CPSR_I | CPSR_F;
9027 else
9028 shift = 0;
9029 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9030 }
9031 break;
9032
9033 default:
9034 goto undef;
9035 }
9036 break;
9037
9038 case 12:
9039 /* load/store multiple */
9040 rn = (insn >> 8) & 0x7;
9041 addr = load_reg(s, rn);
9042 for (i = 0; i < 8; i++) {
9043 if (insn & (1 << i)) {
9044 if (insn & (1 << 11)) {
9045 /* load */
9046 tmp = gen_ld32(addr, IS_USER(s));
9047 store_reg(s, i, tmp);
9048 } else {
9049 /* store */
9050 tmp = load_reg(s, i);
9051 gen_st32(tmp, addr, IS_USER(s));
9052 }
9053 /* advance to the next address */
9054 tcg_gen_addi_i32(addr, addr, 4);
9055 }
9056 }
9057 /* Base register writeback. */
9058 if ((insn & (1 << rn)) == 0) {
9059 store_reg(s, rn, addr);
9060 } else {
9061 dead_tmp(addr);
9062 }
9063 break;
9064
9065 case 13:
9066 /* conditional branch or swi */
9067 cond = (insn >> 8) & 0xf;
9068 if (cond == 0xe)
9069 goto undef;
9070
9071 if (cond == 0xf) {
9072 /* swi */
9073 gen_set_pc_im(s->pc);
9074 s->is_jmp = DISAS_SWI;
9075 break;
9076 }
9077 /* generate a conditional jump to next instruction */
9078 s->condlabel = gen_new_label();
9079 gen_test_cc(cond ^ 1, s->condlabel);
9080 s->condjmp = 1;
9081
9082 /* jump to the offset */
9083 val = (uint32_t)s->pc + 2;
9084 offset = ((int32_t)insn << 24) >> 24;
9085 val += offset << 1;
9086 gen_jmp(s, val);
9087 break;
9088
9089 case 14:
9090 if (insn & (1 << 11)) {
9091 if (disas_thumb2_insn(env, s, insn))
9092 goto undef32;
9093 break;
9094 }
9095 /* unconditional branch */
9096 val = (uint32_t)s->pc;
9097 offset = ((int32_t)insn << 21) >> 21;
9098 val += (offset << 1) + 2;
9099 gen_jmp(s, val);
9100 break;
9101
9102 case 15:
9103 if (disas_thumb2_insn(env, s, insn))
9104 goto undef32;
9105 break;
9106 }
9107 return;
9108 undef32:
9109 gen_exception_insn(s, 4, EXCP_UDEF);
9110 return;
9111 illegal_op:
9112 undef:
9113 gen_exception_insn(s, 2, EXCP_UDEF);
9114 }
9115
9116 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9117 basic block 'tb'. If search_pc is TRUE, also generate PC
9118 information for each intermediate instruction. */
9119 static inline void gen_intermediate_code_internal(CPUState *env,
9120 TranslationBlock *tb,
9121 int search_pc)
9122 {
9123 DisasContext dc1, *dc = &dc1;
9124 CPUBreakpoint *bp;
9125 uint16_t *gen_opc_end;
9126 int j, lj;
9127 target_ulong pc_start;
9128 uint32_t next_page_start;
9129 int num_insns;
9130 int max_insns;
9131
9132 /* generate intermediate code */
9133 num_temps = 0;
9134
9135 pc_start = tb->pc;
9136
9137 dc->tb = tb;
9138
9139 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9140
9141 dc->is_jmp = DISAS_NEXT;
9142 dc->pc = pc_start;
9143 dc->singlestep_enabled = env->singlestep_enabled;
9144 dc->condjmp = 0;
9145 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9146 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9147 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9148 #if !defined(CONFIG_USER_ONLY)
9149 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9150 #endif
9151 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9152 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9153 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9154 cpu_F0s = tcg_temp_new_i32();
9155 cpu_F1s = tcg_temp_new_i32();
9156 cpu_F0d = tcg_temp_new_i64();
9157 cpu_F1d = tcg_temp_new_i64();
9158 cpu_V0 = cpu_F0d;
9159 cpu_V1 = cpu_F1d;
9160 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9161 cpu_M0 = tcg_temp_new_i64();
9162 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9163 lj = -1;
9164 num_insns = 0;
9165 max_insns = tb->cflags & CF_COUNT_MASK;
9166 if (max_insns == 0)
9167 max_insns = CF_COUNT_MASK;
9168
9169 gen_icount_start();
9170
9171 /* A note on handling of the condexec (IT) bits:
9172 *
9173 * We want to avoid the overhead of having to write the updated condexec
9174 * bits back to the CPUState for every instruction in an IT block. So:
9175 * (1) if the condexec bits are not already zero then we write
9176 * zero back into the CPUState now. This avoids complications trying
9177 * to do it at the end of the block. (For example if we don't do this
9178 * it's hard to identify whether we can safely skip writing condexec
9179 * at the end of the TB, which we definitely want to do for the case
9180 * where a TB doesn't do anything with the IT state at all.)
9181 * (2) if we are going to leave the TB then we call gen_set_condexec()
9182 * which will write the correct value into CPUState if zero is wrong.
9183 * This is done both for leaving the TB at the end, and for leaving
9184 * it because of an exception we know will happen, which is done in
9185 * gen_exception_insn(). The latter is necessary because we need to
9186 * leave the TB with the PC/IT state just prior to execution of the
9187 * instruction which caused the exception.
9188 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9189 * then the CPUState will be wrong and we need to reset it.
9190 * This is handled in the same way as restoration of the
9191 * PC in these situations: we will be called again with search_pc=1
9192 * and generate a mapping of the condexec bits for each PC in
9193 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9194 * the condexec bits.
9195 *
9196 * Note that there are no instructions which can read the condexec
9197 * bits, and none which can write non-static values to them, so
9198 * we don't need to care about whether CPUState is correct in the
9199 * middle of a TB.
9200 */
9201
9202 /* Reset the conditional execution bits immediately. This avoids
9203 complications trying to do it at the end of the block. */
9204 if (dc->condexec_mask || dc->condexec_cond)
9205 {
9206 TCGv tmp = new_tmp();
9207 tcg_gen_movi_i32(tmp, 0);
9208 store_cpu_field(tmp, condexec_bits);
9209 }
9210 do {
9211 #ifdef CONFIG_USER_ONLY
9212 /* Intercept jump to the magic kernel page. */
9213 if (dc->pc >= 0xffff0000) {
9214 /* We always get here via a jump, so know we are not in a
9215 conditional execution block. */
9216 gen_exception(EXCP_KERNEL_TRAP);
9217 dc->is_jmp = DISAS_UPDATE;
9218 break;
9219 }
9220 #else
9221 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9222 /* We always get here via a jump, so know we are not in a
9223 conditional execution block. */
9224 gen_exception(EXCP_EXCEPTION_EXIT);
9225 dc->is_jmp = DISAS_UPDATE;
9226 break;
9227 }
9228 #endif
9229
9230 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9231 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9232 if (bp->pc == dc->pc) {
9233 gen_exception_insn(dc, 0, EXCP_DEBUG);
9234 /* Advance PC so that clearing the breakpoint will
9235 invalidate this TB. */
9236 dc->pc += 2;
9237 goto done_generating;
9238 break;
9239 }
9240 }
9241 }
9242 if (search_pc) {
9243 j = gen_opc_ptr - gen_opc_buf;
9244 if (lj < j) {
9245 lj++;
9246 while (lj < j)
9247 gen_opc_instr_start[lj++] = 0;
9248 }
9249 gen_opc_pc[lj] = dc->pc;
9250 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9251 gen_opc_instr_start[lj] = 1;
9252 gen_opc_icount[lj] = num_insns;
9253 }
9254
9255 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9256 gen_io_start();
9257
9258 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9259 tcg_gen_debug_insn_start(dc->pc);
9260 }
9261
9262 if (dc->thumb) {
9263 disas_thumb_insn(env, dc);
9264 if (dc->condexec_mask) {
9265 dc->condexec_cond = (dc->condexec_cond & 0xe)
9266 | ((dc->condexec_mask >> 4) & 1);
9267 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9268 if (dc->condexec_mask == 0) {
9269 dc->condexec_cond = 0;
9270 }
9271 }
9272 } else {
9273 disas_arm_insn(env, dc);
9274 }
9275 if (num_temps) {
9276 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9277 num_temps = 0;
9278 }
9279
9280 if (dc->condjmp && !dc->is_jmp) {
9281 gen_set_label(dc->condlabel);
9282 dc->condjmp = 0;
9283 }
9284 /* Translation stops when a conditional branch is encountered.
9285 * Otherwise the subsequent code could get translated several times.
9286 * Also stop translation when a page boundary is reached. This
9287 * ensures prefetch aborts occur at the right place. */
9288 num_insns ++;
9289 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9290 !env->singlestep_enabled &&
9291 !singlestep &&
9292 dc->pc < next_page_start &&
9293 num_insns < max_insns);
9294
9295 if (tb->cflags & CF_LAST_IO) {
9296 if (dc->condjmp) {
9297 /* FIXME: This can theoretically happen with self-modifying
9298 code. */
9299 cpu_abort(env, "IO on conditional branch instruction");
9300 }
9301 gen_io_end();
9302 }
9303
9304 /* At this stage dc->condjmp will only be set when the skipped
9305 instruction was a conditional branch or trap, and the PC has
9306 already been written. */
9307 if (unlikely(env->singlestep_enabled)) {
9308 /* Make sure the pc is updated, and raise a debug exception. */
9309 if (dc->condjmp) {
9310 gen_set_condexec(dc);
9311 if (dc->is_jmp == DISAS_SWI) {
9312 gen_exception(EXCP_SWI);
9313 } else {
9314 gen_exception(EXCP_DEBUG);
9315 }
9316 gen_set_label(dc->condlabel);
9317 }
9318 if (dc->condjmp || !dc->is_jmp) {
9319 gen_set_pc_im(dc->pc);
9320 dc->condjmp = 0;
9321 }
9322 gen_set_condexec(dc);
9323 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9324 gen_exception(EXCP_SWI);
9325 } else {
9326 /* FIXME: Single stepping a WFI insn will not halt
9327 the CPU. */
9328 gen_exception(EXCP_DEBUG);
9329 }
9330 } else {
9331 /* While branches must always occur at the end of an IT block,
9332 there are a few other things that can cause us to terminate
9333 the TB in the middel of an IT block:
9334 - Exception generating instructions (bkpt, swi, undefined).
9335 - Page boundaries.
9336 - Hardware watchpoints.
9337 Hardware breakpoints have already been handled and skip this code.
9338 */
9339 gen_set_condexec(dc);
9340 switch(dc->is_jmp) {
9341 case DISAS_NEXT:
9342 gen_goto_tb(dc, 1, dc->pc);
9343 break;
9344 default:
9345 case DISAS_JUMP:
9346 case DISAS_UPDATE:
9347 /* indicate that the hash table must be used to find the next TB */
9348 tcg_gen_exit_tb(0);
9349 break;
9350 case DISAS_TB_JUMP:
9351 /* nothing more to generate */
9352 break;
9353 case DISAS_WFI:
9354 gen_helper_wfi();
9355 break;
9356 case DISAS_SWI:
9357 gen_exception(EXCP_SWI);
9358 break;
9359 }
9360 if (dc->condjmp) {
9361 gen_set_label(dc->condlabel);
9362 gen_set_condexec(dc);
9363 gen_goto_tb(dc, 1, dc->pc);
9364 dc->condjmp = 0;
9365 }
9366 }
9367
9368 done_generating:
9369 gen_icount_end(tb, num_insns);
9370 *gen_opc_ptr = INDEX_op_end;
9371
9372 #ifdef DEBUG_DISAS
9373 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9374 qemu_log("----------------\n");
9375 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9376 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9377 qemu_log("\n");
9378 }
9379 #endif
9380 if (search_pc) {
9381 j = gen_opc_ptr - gen_opc_buf;
9382 lj++;
9383 while (lj <= j)
9384 gen_opc_instr_start[lj++] = 0;
9385 } else {
9386 tb->size = dc->pc - pc_start;
9387 tb->icount = num_insns;
9388 }
9389 }
9390
9391 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9392 {
9393 gen_intermediate_code_internal(env, tb, 0);
9394 }
9395
9396 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9397 {
9398 gen_intermediate_code_internal(env, tb, 1);
9399 }
9400
9401 static const char *cpu_mode_names[16] = {
9402 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9403 "???", "???", "???", "und", "???", "???", "???", "sys"
9404 };
9405
9406 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9407 int flags)
9408 {
9409 int i;
9410 #if 0
9411 union {
9412 uint32_t i;
9413 float s;
9414 } s0, s1;
9415 CPU_DoubleU d;
9416 /* ??? This assumes float64 and double have the same layout.
9417 Oh well, it's only debug dumps. */
9418 union {
9419 float64 f64;
9420 double d;
9421 } d0;
9422 #endif
9423 uint32_t psr;
9424
9425 for(i=0;i<16;i++) {
9426 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9427 if ((i % 4) == 3)
9428 cpu_fprintf(f, "\n");
9429 else
9430 cpu_fprintf(f, " ");
9431 }
9432 psr = cpsr_read(env);
9433 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9434 psr,
9435 psr & (1 << 31) ? 'N' : '-',
9436 psr & (1 << 30) ? 'Z' : '-',
9437 psr & (1 << 29) ? 'C' : '-',
9438 psr & (1 << 28) ? 'V' : '-',
9439 psr & CPSR_T ? 'T' : 'A',
9440 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9441
9442 #if 0
9443 for (i = 0; i < 16; i++) {
9444 d.d = env->vfp.regs[i];
9445 s0.i = d.l.lower;
9446 s1.i = d.l.upper;
9447 d0.f64 = d.d;
9448 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9449 i * 2, (int)s0.i, s0.s,
9450 i * 2 + 1, (int)s1.i, s1.s,
9451 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9452 d0.d);
9453 }
9454 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9455 #endif
9456 }
9457
9458 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9459 unsigned long searched_pc, int pc_pos, void *puc)
9460 {
9461 env->regs[15] = gen_opc_pc[pc_pos];
9462 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9463 }