]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Set the right overflow bit for neon 32 and 64 bit saturating add/sub.
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static int num_temps;
132
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
135 {
136 num_temps++;
137 return tcg_temp_new_i32();
138 }
139
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
142 {
143 tcg_temp_free(tmp);
144 num_temps--;
145 }
146
147 static inline TCGv load_cpu_offset(int offset)
148 {
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
152 }
153
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
155
156 static inline void store_cpu_offset(TCGv var, int offset)
157 {
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
160 }
161
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
164
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
167 {
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
178 }
179 }
180
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
183 {
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
187 }
188
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
192 {
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
196 }
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
199 }
200
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
206
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
209
210
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
212 {
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
216 }
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
219
220 static void gen_exception(int excp)
221 {
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
226 }
227
228 static void gen_smul_dual(TCGv a, TCGv b)
229 {
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
241 }
242
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
245 {
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
253 }
254
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
257 {
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
261 }
262
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 {
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
269 }
270
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
273 {
274 uint32_t signbit;
275
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
283 }
284 }
285
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 {
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
293 }
294
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
311 {
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
313
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
318
319 tcg_temp_free_i64(tmp64);
320 return a;
321 }
322
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 {
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
338 }
339
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
352 }
353
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
356 {
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
362 }
363
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
369 */
370
371 static void gen_add16(TCGv t0, TCGv t1)
372 {
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
382 }
383
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
385
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
388 {
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
393 }
394
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
397 {
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
400 }
401
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
404 {
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
410 }
411
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
414 {
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
420 }
421
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
424 {
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
431 }
432
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
435
436 static void shifter_out_im(TCGv var, int shift)
437 {
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
445 }
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
448 }
449
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
452 {
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
459 }
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
466 }
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
472 }
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
496 }
497 }
498 };
499
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
502 {
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
509 }
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
517 }
518 }
519 dead_tmp(shift);
520 }
521
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
530 }
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
532 {
533 TCGv_ptr tmp;
534
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
564 }
565 }
566 #undef PAS_OP
567
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
577 }
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 {
580 TCGv_ptr tmp;
581
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
611 }
612 }
613 #undef PAS_OP
614
615 static void gen_test_cc(int cc, int label)
616 {
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
620
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
709 }
710 dead_tmp(tmp);
711 }
712
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
730 };
731
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
734 {
735 TCGv tmp;
736
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
743 }
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
745 }
746
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
749 {
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
761 {
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
766 }
767 }
768
769 static inline TCGv gen_ld8s(TCGv addr, int index)
770 {
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
774 }
775 static inline TCGv gen_ld8u(TCGv addr, int index)
776 {
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
780 }
781 static inline TCGv gen_ld16s(TCGv addr, int index)
782 {
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
786 }
787 static inline TCGv gen_ld16u(TCGv addr, int index)
788 {
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
792 }
793 static inline TCGv gen_ld32(TCGv addr, int index)
794 {
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
798 }
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
800 {
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
804 }
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
819 }
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
821 {
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
824 }
825
826 static inline void gen_set_pc_im(uint32_t val)
827 {
828 tcg_gen_movi_i32(cpu_R[15], val);
829 }
830
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
833 {
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
836 }
837
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
840 {
841 int val, rm, shift, shiftop;
842 TCGv offset;
843
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
863 }
864 }
865
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
868 {
869 int val, rm;
870 TCGv offset;
871
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
891 }
892 }
893
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
896 { \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
901 }
902
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
907
908 #undef VFP_OP2
909
910 static inline void gen_vfp_abs(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
916 }
917
918 static inline void gen_vfp_neg(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
924 }
925
926 static inline void gen_vfp_sqrt(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
932 }
933
934 static inline void gen_vfp_cmp(int dp)
935 {
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
940 }
941
942 static inline void gen_vfp_cmpe(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
948 }
949
950 static inline void gen_vfp_F1_ld0(int dp)
951 {
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
956 }
957
958 static inline void gen_vfp_uito(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_sito(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_toui(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_touiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 static inline void gen_vfp_tosi(int dp)
991 {
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
996 }
997
998 static inline void gen_vfp_tosiz(int dp)
999 {
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1004 }
1005
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1008 { \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1015 }
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1025
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1027 {
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1032 }
1033
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1035 {
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1040 }
1041
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1044 {
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1053 }
1054 }
1055
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1060 {
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1064 }
1065
1066 static TCGv neon_load_reg(int reg, int pass)
1067 {
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1071 }
1072
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1074 {
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1077 }
1078
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1085 {
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1087 }
1088
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1093
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1103 {
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1108 }
1109
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1111 {
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1116 }
1117
1118 #define ARM_CP_RW_BIT (1 << 20)
1119
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1128 }
1129
1130 static inline TCGv iwmmxt_load_creg(int reg)
1131 {
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1135 }
1136
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1138 {
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1144 {
1145 iwmmxt_store_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_M0, rn);
1151 }
1152
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1154 {
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1157 }
1158
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1160 {
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1163 }
1164
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1166 {
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1169 }
1170
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1173 { \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1176 }
1177
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1180 { \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1183 }
1184
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1189
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1192 { \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1194 }
1195
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1206
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1209
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1222
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1226
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1231
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1238
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1243
1244 IWMMXT_OP(msadb)
1245
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1252
1253 static void gen_op_iwmmxt_set_mup(void)
1254 {
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 }
1260
1261 static void gen_op_iwmmxt_set_cup(void)
1262 {
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1267 }
1268
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1270 {
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1274 }
1275
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1281 }
1282
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1284 {
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1288
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1291
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1315 }
1316
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1318 {
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1321
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1327 }
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1332 }
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1337 }
1338
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1342 {
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1347
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1362 }
1363 return 0;
1364 }
1365
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1371 }
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1385 }
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1391 }
1392 }
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1396 }
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1398 }
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1421 }
1422 }
1423 }
1424 }
1425 dead_tmp(addr);
1426 return 0;
1427 }
1428
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1431
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1473 }
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1546 }
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1568 }
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1618 }
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1639 }
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1659 }
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1700 }
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1723 }
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1732 }
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1738 }
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1755 }
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1776 }
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1792 }
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 }
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1804 }
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1825 }
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1840 }
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1852 }
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1874 }
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1935 }
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1966 }
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1982 }
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1993 }
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2068 }
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2075 }
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2082 }
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2085 }
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2118 }
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2149 }
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2261 }
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2330 }
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2346 {
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2349
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2355
2356 if (acc != 0)
2357 return 1;
2358
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2380 }
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2383
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2386 }
2387
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2393
2394 if (acc != 0)
2395 return 1;
2396
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2406 }
2407 return 0;
2408 }
2409
2410 return 1;
2411 }
2412
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2416 {
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2422 }
2423
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2442 }
2443 return 0;
2444 }
2445
2446 static int cp15_user_ok(uint32_t insn)
2447 {
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2451
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2456 }
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2462 }
2463 return 0;
2464 }
2465
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2467 {
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2472
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2475
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2478
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2492 }
2493 store_reg(s, rd, tmp);
2494
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2510 }
2511 }
2512 return 1;
2513 }
2514
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2518 {
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2521
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2525
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2530 }
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2533 }
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2537 }
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2540 }
2541 if ((insn & 0x0fff0fff) == 0x0e070f90
2542 || (insn & 0x0fff0fff) == 0x0e070f58) {
2543 /* Wait for interrupt. */
2544 gen_set_pc_im(s->pc);
2545 s->is_jmp = DISAS_WFI;
2546 return 0;
2547 }
2548 rd = (insn >> 12) & 0xf;
2549
2550 if (cp15_tls_load_store(env, s, insn, rd))
2551 return 0;
2552
2553 tmp2 = tcg_const_i32(insn);
2554 if (insn & ARM_CP_RW_BIT) {
2555 tmp = new_tmp();
2556 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2557 /* If the destination register is r15 then sets condition codes. */
2558 if (rd != 15)
2559 store_reg(s, rd, tmp);
2560 else
2561 dead_tmp(tmp);
2562 } else {
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2565 dead_tmp(tmp);
2566 /* Normally we would always end the TB here, but Linux
2567 * arch/arm/mach-pxa/sleep.S expects two instructions following
2568 * an MMU enable to execute from cache. Imitate this behaviour. */
2569 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2570 (insn & 0x0fff0fff) != 0x0e010f10)
2571 gen_lookup_tb(s);
2572 }
2573 tcg_temp_free_i32(tmp2);
2574 return 0;
2575 }
2576
2577 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2578 #define VFP_SREG(insn, bigbit, smallbit) \
2579 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2580 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2581 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2582 reg = (((insn) >> (bigbit)) & 0x0f) \
2583 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2584 } else { \
2585 if (insn & (1 << (smallbit))) \
2586 return 1; \
2587 reg = ((insn) >> (bigbit)) & 0x0f; \
2588 }} while (0)
2589
2590 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2591 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2592 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2593 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2594 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2595 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2596
2597 /* Move between integer and VFP cores. */
2598 static TCGv gen_vfp_mrs(void)
2599 {
2600 TCGv tmp = new_tmp();
2601 tcg_gen_mov_i32(tmp, cpu_F0s);
2602 return tmp;
2603 }
2604
2605 static void gen_vfp_msr(TCGv tmp)
2606 {
2607 tcg_gen_mov_i32(cpu_F0s, tmp);
2608 dead_tmp(tmp);
2609 }
2610
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2612 {
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2622 }
2623
2624 static void gen_neon_dup_low16(TCGv var)
2625 {
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2631 }
2632
2633 static void gen_neon_dup_high16(TCGv var)
2634 {
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2640 }
2641
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645 {
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2651
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2654
2655 if (!s->vfp_enabled) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2663 }
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2673
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2680
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2691 }
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2710 }
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2716 }
2717 }
2718 break;
2719 case 2:
2720 break;
2721 }
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2732 }
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2737 }
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2754 }
2755 neon_store_reg(rn, pass, tmp);
2756 }
2757 }
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2767
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798 }
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2809 }
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2813 }
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2820 }
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2853 }
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2857 }
2858 }
2859 }
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2871 }
2872
2873 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2878 }
2879 if (op == 15 &&
2880 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881 /* VCVT from int is always from S reg regardless of dp bit.
2882 * VCVT with immediate frac_bits has same format as SREG_M
2883 */
2884 rm = VFP_SREG_M(insn);
2885 } else {
2886 VFP_DREG_M(rm, insn);
2887 }
2888 } else {
2889 rn = VFP_SREG_N(insn);
2890 if (op == 15 && rn == 15) {
2891 /* Double precision destination. */
2892 VFP_DREG_D(rd, insn);
2893 } else {
2894 rd = VFP_SREG_D(insn);
2895 }
2896 /* NB that we implicitly rely on the encoding for the frac_bits
2897 * in VCVT of fixed to float being the same as that of an SREG_M
2898 */
2899 rm = VFP_SREG_M(insn);
2900 }
2901
2902 veclen = s->vec_len;
2903 if (op == 15 && rn > 3)
2904 veclen = 0;
2905
2906 /* Shut up compiler warnings. */
2907 delta_m = 0;
2908 delta_d = 0;
2909 bank_mask = 0;
2910
2911 if (veclen > 0) {
2912 if (dp)
2913 bank_mask = 0xc;
2914 else
2915 bank_mask = 0x18;
2916
2917 /* Figure out what type of vector operation this is. */
2918 if ((rd & bank_mask) == 0) {
2919 /* scalar */
2920 veclen = 0;
2921 } else {
2922 if (dp)
2923 delta_d = (s->vec_stride >> 1) + 1;
2924 else
2925 delta_d = s->vec_stride + 1;
2926
2927 if ((rm & bank_mask) == 0) {
2928 /* mixed scalar/vector */
2929 delta_m = 0;
2930 } else {
2931 /* vector */
2932 delta_m = delta_d;
2933 }
2934 }
2935 }
2936
2937 /* Load the initial operands. */
2938 if (op == 15) {
2939 switch (rn) {
2940 case 16:
2941 case 17:
2942 /* Integer source */
2943 gen_mov_F0_vreg(0, rm);
2944 break;
2945 case 8:
2946 case 9:
2947 /* Compare */
2948 gen_mov_F0_vreg(dp, rd);
2949 gen_mov_F1_vreg(dp, rm);
2950 break;
2951 case 10:
2952 case 11:
2953 /* Compare with zero */
2954 gen_mov_F0_vreg(dp, rd);
2955 gen_vfp_F1_ld0(dp);
2956 break;
2957 case 20:
2958 case 21:
2959 case 22:
2960 case 23:
2961 case 28:
2962 case 29:
2963 case 30:
2964 case 31:
2965 /* Source and destination the same. */
2966 gen_mov_F0_vreg(dp, rd);
2967 break;
2968 default:
2969 /* One source operand. */
2970 gen_mov_F0_vreg(dp, rm);
2971 break;
2972 }
2973 } else {
2974 /* Two source operands. */
2975 gen_mov_F0_vreg(dp, rn);
2976 gen_mov_F1_vreg(dp, rm);
2977 }
2978
2979 for (;;) {
2980 /* Perform the calculation. */
2981 switch (op) {
2982 case 0: /* mac: fd + (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_mov_F1_vreg(dp, rd);
2985 gen_vfp_add(dp);
2986 break;
2987 case 1: /* nmac: fd - (fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_add(dp);
2992 break;
2993 case 2: /* msc: -fd + (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_mov_F1_vreg(dp, rd);
2996 gen_vfp_sub(dp);
2997 break;
2998 case 3: /* nmsc: -fd - (fn * fm) */
2999 gen_vfp_mul(dp);
3000 gen_vfp_neg(dp);
3001 gen_mov_F1_vreg(dp, rd);
3002 gen_vfp_sub(dp);
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
3020 case 14: /* fconst */
3021 if (!arm_feature(env, ARM_FEATURE_VFP3))
3022 return 1;
3023
3024 n = (insn << 12) & 0x80000000;
3025 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026 if (dp) {
3027 if (i & 0x40)
3028 i |= 0x3f80;
3029 else
3030 i |= 0x4000;
3031 n |= i << 16;
3032 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033 } else {
3034 if (i & 0x40)
3035 i |= 0x780;
3036 else
3037 i |= 0x800;
3038 n |= i << 19;
3039 tcg_gen_movi_i32(cpu_F0s, n);
3040 }
3041 break;
3042 case 15: /* extension space */
3043 switch (rn) {
3044 case 0: /* cpy */
3045 /* no-op */
3046 break;
3047 case 1: /* abs */
3048 gen_vfp_abs(dp);
3049 break;
3050 case 2: /* neg */
3051 gen_vfp_neg(dp);
3052 break;
3053 case 3: /* sqrt */
3054 gen_vfp_sqrt(dp);
3055 break;
3056 case 4: /* vcvtb.f32.f16 */
3057 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058 return 1;
3059 tmp = gen_vfp_mrs();
3060 tcg_gen_ext16u_i32(tmp, tmp);
3061 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062 dead_tmp(tmp);
3063 break;
3064 case 5: /* vcvtt.f32.f16 */
3065 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066 return 1;
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_shri_i32(tmp, tmp, 16);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 dead_tmp(tmp);
3071 break;
3072 case 6: /* vcvtb.f16.f32 */
3073 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074 return 1;
3075 tmp = new_tmp();
3076 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
3081 dead_tmp(tmp2);
3082 gen_vfp_msr(tmp);
3083 break;
3084 case 7: /* vcvtt.f16.f32 */
3085 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086 return 1;
3087 tmp = new_tmp();
3088 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089 tcg_gen_shli_i32(tmp, tmp, 16);
3090 gen_mov_F0_vreg(0, rd);
3091 tmp2 = gen_vfp_mrs();
3092 tcg_gen_ext16u_i32(tmp2, tmp2);
3093 tcg_gen_or_i32(tmp, tmp, tmp2);
3094 dead_tmp(tmp2);
3095 gen_vfp_msr(tmp);
3096 break;
3097 case 8: /* cmp */
3098 gen_vfp_cmp(dp);
3099 break;
3100 case 9: /* cmpe */
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 10: /* cmpz */
3104 gen_vfp_cmp(dp);
3105 break;
3106 case 11: /* cmpez */
3107 gen_vfp_F1_ld0(dp);
3108 gen_vfp_cmpe(dp);
3109 break;
3110 case 15: /* single<->double conversion */
3111 if (dp)
3112 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113 else
3114 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115 break;
3116 case 16: /* fuito */
3117 gen_vfp_uito(dp);
3118 break;
3119 case 17: /* fsito */
3120 gen_vfp_sito(dp);
3121 break;
3122 case 20: /* fshto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_shto(dp, 16 - rm);
3126 break;
3127 case 21: /* fslto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_slto(dp, 32 - rm);
3131 break;
3132 case 22: /* fuhto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_uhto(dp, 16 - rm);
3136 break;
3137 case 23: /* fulto */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
3140 gen_vfp_ulto(dp, 32 - rm);
3141 break;
3142 case 24: /* ftoui */
3143 gen_vfp_toui(dp);
3144 break;
3145 case 25: /* ftouiz */
3146 gen_vfp_touiz(dp);
3147 break;
3148 case 26: /* ftosi */
3149 gen_vfp_tosi(dp);
3150 break;
3151 case 27: /* ftosiz */
3152 gen_vfp_tosiz(dp);
3153 break;
3154 case 28: /* ftosh */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosh(dp, 16 - rm);
3158 break;
3159 case 29: /* ftosl */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_tosl(dp, 32 - rm);
3163 break;
3164 case 30: /* ftouh */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_touh(dp, 16 - rm);
3168 break;
3169 case 31: /* ftoul */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_toul(dp, 32 - rm);
3173 break;
3174 default: /* undefined */
3175 printf ("rn:%d\n", rn);
3176 return 1;
3177 }
3178 break;
3179 default: /* undefined */
3180 printf ("op:%d\n", op);
3181 return 1;
3182 }
3183
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3191 /* conversion */
3192 gen_mov_vreg_F0(!dp, rd);
3193 else
3194 gen_mov_vreg_F0(dp, rd);
3195
3196 /* break out of the loop if we have finished */
3197 if (veclen == 0)
3198 break;
3199
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3202 while (veclen--) {
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3204 | (rd & bank_mask);
3205 gen_mov_vreg_F0(dp, rd);
3206 }
3207 break;
3208 }
3209 /* Setup the next operands. */
3210 veclen--;
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3212 | (rd & bank_mask);
3213
3214 if (op == 15) {
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3217 | (rm & bank_mask);
3218 gen_mov_F0_vreg(dp, rm);
3219 } else {
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3222 | (rn & bank_mask);
3223 gen_mov_F0_vreg(dp, rn);
3224 if (delta_m) {
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3226 | (rm & bank_mask);
3227 gen_mov_F1_vreg(dp, rm);
3228 }
3229 }
3230 }
3231 }
3232 break;
3233 case 0xc:
3234 case 0xd:
3235 if (dp && (insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3239 if (dp) {
3240 VFP_DREG_M(rm, insn);
3241 } else {
3242 rm = VFP_SREG_M(insn);
3243 }
3244
3245 if (insn & ARM_CP_RW_BIT) {
3246 /* vfp->arm */
3247 if (dp) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 } else {
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rd, tmp);
3261 }
3262 } else {
3263 /* arm->vfp */
3264 if (dp) {
3265 tmp = load_reg(s, rd);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3271 } else {
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rd);
3276 gen_vfp_msr(tmp);
3277 gen_mov_vreg_F0(0, rm + 1);
3278 }
3279 }
3280 } else {
3281 /* Load/store */
3282 rn = (insn >> 16) & 0xf;
3283 if (dp)
3284 VFP_DREG_D(rd, insn);
3285 else
3286 rd = VFP_SREG_D(insn);
3287 if (s->thumb && rn == 15) {
3288 addr = new_tmp();
3289 tcg_gen_movi_i32(addr, s->pc & ~2);
3290 } else {
3291 addr = load_reg(s, rn);
3292 }
3293 if ((insn & 0x01200000) == 0x01000000) {
3294 /* Single load/store */
3295 offset = (insn & 0xff) << 2;
3296 if ((insn & (1 << 23)) == 0)
3297 offset = -offset;
3298 tcg_gen_addi_i32(addr, addr, offset);
3299 if (insn & (1 << 20)) {
3300 gen_vfp_ld(s, dp, addr);
3301 gen_mov_vreg_F0(dp, rd);
3302 } else {
3303 gen_mov_F0_vreg(dp, rd);
3304 gen_vfp_st(s, dp, addr);
3305 }
3306 dead_tmp(addr);
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3323 /* load */
3324 gen_vfp_ld(s, dp, addr);
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
3329 gen_vfp_st(s, dp, addr);
3330 }
3331 tcg_gen_addi_i32(addr, addr, offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
3346 dead_tmp(addr);
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356 }
3357
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359 {
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3370 }
3371 }
3372
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374 {
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384 }
3385
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 {
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3397 }
3398
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3412
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3426 }
3427
3428 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430 {
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3436
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(t0, t0, mask);
3440 tcg_gen_or_i32(tmp, tmp, t0);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(t0, mask);
3444 }
3445 dead_tmp(t0);
3446 gen_lookup_tb(s);
3447 return 0;
3448 }
3449
3450 /* Returns nonzero if access to the PSR is not permitted. */
3451 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452 {
3453 TCGv tmp;
3454 tmp = new_tmp();
3455 tcg_gen_movi_i32(tmp, val);
3456 return gen_set_psr(s, mask, spsr, tmp);
3457 }
3458
3459 /* Generate an old-style exception return. Marks pc as dead. */
3460 static void gen_exception_return(DisasContext *s, TCGv pc)
3461 {
3462 TCGv tmp;
3463 store_reg(s, 15, pc);
3464 tmp = load_cpu_field(spsr);
3465 gen_set_cpsr(tmp, 0xffffffff);
3466 dead_tmp(tmp);
3467 s->is_jmp = DISAS_UPDATE;
3468 }
3469
3470 /* Generate a v6 exception return. Marks both values as dead. */
3471 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472 {
3473 gen_set_cpsr(cpsr, 0xffffffff);
3474 dead_tmp(cpsr);
3475 store_reg(s, 15, pc);
3476 s->is_jmp = DISAS_UPDATE;
3477 }
3478
3479 static inline void
3480 gen_set_condexec (DisasContext *s)
3481 {
3482 if (s->condexec_mask) {
3483 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484 TCGv tmp = new_tmp();
3485 tcg_gen_movi_i32(tmp, val);
3486 store_cpu_field(tmp, condexec_bits);
3487 }
3488 }
3489
3490 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3491 {
3492 gen_set_condexec(s);
3493 gen_set_pc_im(s->pc - offset);
3494 gen_exception(excp);
3495 s->is_jmp = DISAS_JUMP;
3496 }
3497
3498 static void gen_nop_hint(DisasContext *s, int val)
3499 {
3500 switch (val) {
3501 case 3: /* wfi */
3502 gen_set_pc_im(s->pc);
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511 }
3512
3513 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3514
3515 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3516 {
3517 switch (size) {
3518 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3519 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3520 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3521 default: return 1;
3522 }
3523 return 0;
3524 }
3525
3526 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3527 {
3528 switch (size) {
3529 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3530 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3531 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3532 default: return;
3533 }
3534 }
3535
3536 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3537 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3538 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3539 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3540 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3541
3542 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3543 switch ((size << 1) | u) { \
3544 case 0: \
3545 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 1: \
3548 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 2: \
3551 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 3: \
3554 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 4: \
3557 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 case 5: \
3560 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3561 break; \
3562 default: return 1; \
3563 }} while (0)
3564
3565 #define GEN_NEON_INTEGER_OP(name) do { \
3566 switch ((size << 1) | u) { \
3567 case 0: \
3568 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3569 break; \
3570 case 1: \
3571 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3572 break; \
3573 case 2: \
3574 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3575 break; \
3576 case 3: \
3577 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3578 break; \
3579 case 4: \
3580 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3581 break; \
3582 case 5: \
3583 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3584 break; \
3585 default: return 1; \
3586 }} while (0)
3587
3588 static TCGv neon_load_scratch(int scratch)
3589 {
3590 TCGv tmp = new_tmp();
3591 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3592 return tmp;
3593 }
3594
3595 static void neon_store_scratch(int scratch, TCGv var)
3596 {
3597 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 dead_tmp(var);
3599 }
3600
3601 static inline TCGv neon_get_scalar(int size, int reg)
3602 {
3603 TCGv tmp;
3604 if (size == 1) {
3605 tmp = neon_load_reg(reg & 7, reg >> 4);
3606 if (reg & 8) {
3607 gen_neon_dup_high16(tmp);
3608 } else {
3609 gen_neon_dup_low16(tmp);
3610 }
3611 } else {
3612 tmp = neon_load_reg(reg & 15, reg >> 4);
3613 }
3614 return tmp;
3615 }
3616
3617 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3618 {
3619 TCGv rd, rm, tmp;
3620
3621 rd = new_tmp();
3622 rm = new_tmp();
3623 tmp = new_tmp();
3624
3625 tcg_gen_andi_i32(rd, t0, 0xff);
3626 tcg_gen_shri_i32(tmp, t0, 8);
3627 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3628 tcg_gen_or_i32(rd, rd, tmp);
3629 tcg_gen_shli_i32(tmp, t1, 16);
3630 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3631 tcg_gen_or_i32(rd, rd, tmp);
3632 tcg_gen_shli_i32(tmp, t1, 8);
3633 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3634 tcg_gen_or_i32(rd, rd, tmp);
3635
3636 tcg_gen_shri_i32(rm, t0, 8);
3637 tcg_gen_andi_i32(rm, rm, 0xff);
3638 tcg_gen_shri_i32(tmp, t0, 16);
3639 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3640 tcg_gen_or_i32(rm, rm, tmp);
3641 tcg_gen_shli_i32(tmp, t1, 8);
3642 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3643 tcg_gen_or_i32(rm, rm, tmp);
3644 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3645 tcg_gen_or_i32(t1, rm, tmp);
3646 tcg_gen_mov_i32(t0, rd);
3647
3648 dead_tmp(tmp);
3649 dead_tmp(rm);
3650 dead_tmp(rd);
3651 }
3652
3653 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3654 {
3655 TCGv rd, rm, tmp;
3656
3657 rd = new_tmp();
3658 rm = new_tmp();
3659 tmp = new_tmp();
3660
3661 tcg_gen_andi_i32(rd, t0, 0xff);
3662 tcg_gen_shli_i32(tmp, t1, 8);
3663 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3664 tcg_gen_or_i32(rd, rd, tmp);
3665 tcg_gen_shli_i32(tmp, t0, 16);
3666 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3667 tcg_gen_or_i32(rd, rd, tmp);
3668 tcg_gen_shli_i32(tmp, t1, 24);
3669 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3670 tcg_gen_or_i32(rd, rd, tmp);
3671
3672 tcg_gen_andi_i32(rm, t1, 0xff000000);
3673 tcg_gen_shri_i32(tmp, t0, 8);
3674 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3675 tcg_gen_or_i32(rm, rm, tmp);
3676 tcg_gen_shri_i32(tmp, t1, 8);
3677 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3678 tcg_gen_or_i32(rm, rm, tmp);
3679 tcg_gen_shri_i32(tmp, t0, 16);
3680 tcg_gen_andi_i32(tmp, tmp, 0xff);
3681 tcg_gen_or_i32(t1, rm, tmp);
3682 tcg_gen_mov_i32(t0, rd);
3683
3684 dead_tmp(tmp);
3685 dead_tmp(rm);
3686 dead_tmp(rd);
3687 }
3688
3689 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3690 {
3691 TCGv tmp, tmp2;
3692
3693 tmp = new_tmp();
3694 tmp2 = new_tmp();
3695
3696 tcg_gen_andi_i32(tmp, t0, 0xffff);
3697 tcg_gen_shli_i32(tmp2, t1, 16);
3698 tcg_gen_or_i32(tmp, tmp, tmp2);
3699 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3700 tcg_gen_shri_i32(tmp2, t0, 16);
3701 tcg_gen_or_i32(t1, t1, tmp2);
3702 tcg_gen_mov_i32(t0, tmp);
3703
3704 dead_tmp(tmp2);
3705 dead_tmp(tmp);
3706 }
3707
3708 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3709 {
3710 int n;
3711 TCGv t0, t1;
3712
3713 for (n = 0; n < q + 1; n += 2) {
3714 t0 = neon_load_reg(reg, n);
3715 t1 = neon_load_reg(reg, n + 1);
3716 switch (size) {
3717 case 0: gen_neon_unzip_u8(t0, t1); break;
3718 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
3719 case 2: /* no-op */; break;
3720 default: abort();
3721 }
3722 neon_store_scratch(tmp + n, t0);
3723 neon_store_scratch(tmp + n + 1, t1);
3724 }
3725 }
3726
3727 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3728 {
3729 TCGv rd, tmp;
3730
3731 rd = new_tmp();
3732 tmp = new_tmp();
3733
3734 tcg_gen_shli_i32(rd, t0, 8);
3735 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3736 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3737 tcg_gen_or_i32(rd, rd, tmp);
3738
3739 tcg_gen_shri_i32(t1, t1, 8);
3740 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3741 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3742 tcg_gen_or_i32(t1, t1, tmp);
3743 tcg_gen_mov_i32(t0, rd);
3744
3745 dead_tmp(tmp);
3746 dead_tmp(rd);
3747 }
3748
3749 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3750 {
3751 TCGv rd, tmp;
3752
3753 rd = new_tmp();
3754 tmp = new_tmp();
3755
3756 tcg_gen_shli_i32(rd, t0, 16);
3757 tcg_gen_andi_i32(tmp, t1, 0xffff);
3758 tcg_gen_or_i32(rd, rd, tmp);
3759 tcg_gen_shri_i32(t1, t1, 16);
3760 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3761 tcg_gen_or_i32(t1, t1, tmp);
3762 tcg_gen_mov_i32(t0, rd);
3763
3764 dead_tmp(tmp);
3765 dead_tmp(rd);
3766 }
3767
3768
3769 static struct {
3770 int nregs;
3771 int interleave;
3772 int spacing;
3773 } neon_ls_element_type[11] = {
3774 {4, 4, 1},
3775 {4, 4, 2},
3776 {4, 1, 1},
3777 {4, 2, 1},
3778 {3, 3, 1},
3779 {3, 3, 2},
3780 {3, 1, 1},
3781 {1, 1, 1},
3782 {2, 2, 1},
3783 {2, 2, 2},
3784 {2, 1, 1}
3785 };
3786
3787 /* Translate a NEON load/store element instruction. Return nonzero if the
3788 instruction is invalid. */
3789 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3790 {
3791 int rd, rn, rm;
3792 int op;
3793 int nregs;
3794 int interleave;
3795 int spacing;
3796 int stride;
3797 int size;
3798 int reg;
3799 int pass;
3800 int load;
3801 int shift;
3802 int n;
3803 TCGv addr;
3804 TCGv tmp;
3805 TCGv tmp2;
3806 TCGv_i64 tmp64;
3807
3808 if (!s->vfp_enabled)
3809 return 1;
3810 VFP_DREG_D(rd, insn);
3811 rn = (insn >> 16) & 0xf;
3812 rm = insn & 0xf;
3813 load = (insn & (1 << 21)) != 0;
3814 addr = new_tmp();
3815 if ((insn & (1 << 23)) == 0) {
3816 /* Load store all elements. */
3817 op = (insn >> 8) & 0xf;
3818 size = (insn >> 6) & 3;
3819 if (op > 10)
3820 return 1;
3821 nregs = neon_ls_element_type[op].nregs;
3822 interleave = neon_ls_element_type[op].interleave;
3823 spacing = neon_ls_element_type[op].spacing;
3824 if (size == 3 && (interleave | spacing) != 1)
3825 return 1;
3826 load_reg_var(s, addr, rn);
3827 stride = (1 << size) * interleave;
3828 for (reg = 0; reg < nregs; reg++) {
3829 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3830 load_reg_var(s, addr, rn);
3831 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3832 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3833 load_reg_var(s, addr, rn);
3834 tcg_gen_addi_i32(addr, addr, 1 << size);
3835 }
3836 if (size == 3) {
3837 if (load) {
3838 tmp64 = gen_ld64(addr, IS_USER(s));
3839 neon_store_reg64(tmp64, rd);
3840 tcg_temp_free_i64(tmp64);
3841 } else {
3842 tmp64 = tcg_temp_new_i64();
3843 neon_load_reg64(tmp64, rd);
3844 gen_st64(tmp64, addr, IS_USER(s));
3845 }
3846 tcg_gen_addi_i32(addr, addr, stride);
3847 } else {
3848 for (pass = 0; pass < 2; pass++) {
3849 if (size == 2) {
3850 if (load) {
3851 tmp = gen_ld32(addr, IS_USER(s));
3852 neon_store_reg(rd, pass, tmp);
3853 } else {
3854 tmp = neon_load_reg(rd, pass);
3855 gen_st32(tmp, addr, IS_USER(s));
3856 }
3857 tcg_gen_addi_i32(addr, addr, stride);
3858 } else if (size == 1) {
3859 if (load) {
3860 tmp = gen_ld16u(addr, IS_USER(s));
3861 tcg_gen_addi_i32(addr, addr, stride);
3862 tmp2 = gen_ld16u(addr, IS_USER(s));
3863 tcg_gen_addi_i32(addr, addr, stride);
3864 tcg_gen_shli_i32(tmp2, tmp2, 16);
3865 tcg_gen_or_i32(tmp, tmp, tmp2);
3866 dead_tmp(tmp2);
3867 neon_store_reg(rd, pass, tmp);
3868 } else {
3869 tmp = neon_load_reg(rd, pass);
3870 tmp2 = new_tmp();
3871 tcg_gen_shri_i32(tmp2, tmp, 16);
3872 gen_st16(tmp, addr, IS_USER(s));
3873 tcg_gen_addi_i32(addr, addr, stride);
3874 gen_st16(tmp2, addr, IS_USER(s));
3875 tcg_gen_addi_i32(addr, addr, stride);
3876 }
3877 } else /* size == 0 */ {
3878 if (load) {
3879 TCGV_UNUSED(tmp2);
3880 for (n = 0; n < 4; n++) {
3881 tmp = gen_ld8u(addr, IS_USER(s));
3882 tcg_gen_addi_i32(addr, addr, stride);
3883 if (n == 0) {
3884 tmp2 = tmp;
3885 } else {
3886 tcg_gen_shli_i32(tmp, tmp, n * 8);
3887 tcg_gen_or_i32(tmp2, tmp2, tmp);
3888 dead_tmp(tmp);
3889 }
3890 }
3891 neon_store_reg(rd, pass, tmp2);
3892 } else {
3893 tmp2 = neon_load_reg(rd, pass);
3894 for (n = 0; n < 4; n++) {
3895 tmp = new_tmp();
3896 if (n == 0) {
3897 tcg_gen_mov_i32(tmp, tmp2);
3898 } else {
3899 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3900 }
3901 gen_st8(tmp, addr, IS_USER(s));
3902 tcg_gen_addi_i32(addr, addr, stride);
3903 }
3904 dead_tmp(tmp2);
3905 }
3906 }
3907 }
3908 }
3909 rd += spacing;
3910 }
3911 stride = nregs * 8;
3912 } else {
3913 size = (insn >> 10) & 3;
3914 if (size == 3) {
3915 /* Load single element to all lanes. */
3916 if (!load)
3917 return 1;
3918 size = (insn >> 6) & 3;
3919 nregs = ((insn >> 8) & 3) + 1;
3920 stride = (insn & (1 << 5)) ? 2 : 1;
3921 load_reg_var(s, addr, rn);
3922 for (reg = 0; reg < nregs; reg++) {
3923 switch (size) {
3924 case 0:
3925 tmp = gen_ld8u(addr, IS_USER(s));
3926 gen_neon_dup_u8(tmp, 0);
3927 break;
3928 case 1:
3929 tmp = gen_ld16u(addr, IS_USER(s));
3930 gen_neon_dup_low16(tmp);
3931 break;
3932 case 2:
3933 tmp = gen_ld32(addr, IS_USER(s));
3934 break;
3935 case 3:
3936 return 1;
3937 default: /* Avoid compiler warnings. */
3938 abort();
3939 }
3940 tcg_gen_addi_i32(addr, addr, 1 << size);
3941 tmp2 = new_tmp();
3942 tcg_gen_mov_i32(tmp2, tmp);
3943 neon_store_reg(rd, 0, tmp2);
3944 neon_store_reg(rd, 1, tmp);
3945 rd += stride;
3946 }
3947 stride = (1 << size) * nregs;
3948 } else {
3949 /* Single element. */
3950 pass = (insn >> 7) & 1;
3951 switch (size) {
3952 case 0:
3953 shift = ((insn >> 5) & 3) * 8;
3954 stride = 1;
3955 break;
3956 case 1:
3957 shift = ((insn >> 6) & 1) * 16;
3958 stride = (insn & (1 << 5)) ? 2 : 1;
3959 break;
3960 case 2:
3961 shift = 0;
3962 stride = (insn & (1 << 6)) ? 2 : 1;
3963 break;
3964 default:
3965 abort();
3966 }
3967 nregs = ((insn >> 8) & 3) + 1;
3968 load_reg_var(s, addr, rn);
3969 for (reg = 0; reg < nregs; reg++) {
3970 if (load) {
3971 switch (size) {
3972 case 0:
3973 tmp = gen_ld8u(addr, IS_USER(s));
3974 break;
3975 case 1:
3976 tmp = gen_ld16u(addr, IS_USER(s));
3977 break;
3978 case 2:
3979 tmp = gen_ld32(addr, IS_USER(s));
3980 break;
3981 default: /* Avoid compiler warnings. */
3982 abort();
3983 }
3984 if (size != 2) {
3985 tmp2 = neon_load_reg(rd, pass);
3986 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3987 dead_tmp(tmp2);
3988 }
3989 neon_store_reg(rd, pass, tmp);
3990 } else { /* Store */
3991 tmp = neon_load_reg(rd, pass);
3992 if (shift)
3993 tcg_gen_shri_i32(tmp, tmp, shift);
3994 switch (size) {
3995 case 0:
3996 gen_st8(tmp, addr, IS_USER(s));
3997 break;
3998 case 1:
3999 gen_st16(tmp, addr, IS_USER(s));
4000 break;
4001 case 2:
4002 gen_st32(tmp, addr, IS_USER(s));
4003 break;
4004 }
4005 }
4006 rd += stride;
4007 tcg_gen_addi_i32(addr, addr, 1 << size);
4008 }
4009 stride = nregs * (1 << size);
4010 }
4011 }
4012 dead_tmp(addr);
4013 if (rm != 15) {
4014 TCGv base;
4015
4016 base = load_reg(s, rn);
4017 if (rm == 13) {
4018 tcg_gen_addi_i32(base, base, stride);
4019 } else {
4020 TCGv index;
4021 index = load_reg(s, rm);
4022 tcg_gen_add_i32(base, base, index);
4023 dead_tmp(index);
4024 }
4025 store_reg(s, rn, base);
4026 }
4027 return 0;
4028 }
4029
4030 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4031 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4032 {
4033 tcg_gen_and_i32(t, t, c);
4034 tcg_gen_andc_i32(f, f, c);
4035 tcg_gen_or_i32(dest, t, f);
4036 }
4037
4038 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4039 {
4040 switch (size) {
4041 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4042 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4043 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4044 default: abort();
4045 }
4046 }
4047
4048 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4049 {
4050 switch (size) {
4051 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4052 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4053 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4054 default: abort();
4055 }
4056 }
4057
4058 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4059 {
4060 switch (size) {
4061 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4062 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4063 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4064 default: abort();
4065 }
4066 }
4067
4068 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4069 int q, int u)
4070 {
4071 if (q) {
4072 if (u) {
4073 switch (size) {
4074 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4075 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4076 default: abort();
4077 }
4078 } else {
4079 switch (size) {
4080 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4081 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4082 default: abort();
4083 }
4084 }
4085 } else {
4086 if (u) {
4087 switch (size) {
4088 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4089 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4090 default: abort();
4091 }
4092 } else {
4093 switch (size) {
4094 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4095 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4096 default: abort();
4097 }
4098 }
4099 }
4100 }
4101
4102 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4103 {
4104 if (u) {
4105 switch (size) {
4106 case 0: gen_helper_neon_widen_u8(dest, src); break;
4107 case 1: gen_helper_neon_widen_u16(dest, src); break;
4108 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4109 default: abort();
4110 }
4111 } else {
4112 switch (size) {
4113 case 0: gen_helper_neon_widen_s8(dest, src); break;
4114 case 1: gen_helper_neon_widen_s16(dest, src); break;
4115 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4116 default: abort();
4117 }
4118 }
4119 dead_tmp(src);
4120 }
4121
4122 static inline void gen_neon_addl(int size)
4123 {
4124 switch (size) {
4125 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4126 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4127 case 2: tcg_gen_add_i64(CPU_V001); break;
4128 default: abort();
4129 }
4130 }
4131
4132 static inline void gen_neon_subl(int size)
4133 {
4134 switch (size) {
4135 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4136 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4137 case 2: tcg_gen_sub_i64(CPU_V001); break;
4138 default: abort();
4139 }
4140 }
4141
4142 static inline void gen_neon_negl(TCGv_i64 var, int size)
4143 {
4144 switch (size) {
4145 case 0: gen_helper_neon_negl_u16(var, var); break;
4146 case 1: gen_helper_neon_negl_u32(var, var); break;
4147 case 2: gen_helper_neon_negl_u64(var, var); break;
4148 default: abort();
4149 }
4150 }
4151
4152 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4153 {
4154 switch (size) {
4155 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4156 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4157 default: abort();
4158 }
4159 }
4160
4161 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4162 {
4163 TCGv_i64 tmp;
4164
4165 switch ((size << 1) | u) {
4166 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4167 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4168 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4169 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4170 case 4:
4171 tmp = gen_muls_i64_i32(a, b);
4172 tcg_gen_mov_i64(dest, tmp);
4173 break;
4174 case 5:
4175 tmp = gen_mulu_i64_i32(a, b);
4176 tcg_gen_mov_i64(dest, tmp);
4177 break;
4178 default: abort();
4179 }
4180
4181 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4182 Don't forget to clean them now. */
4183 if (size < 2) {
4184 dead_tmp(a);
4185 dead_tmp(b);
4186 }
4187 }
4188
4189 /* Translate a NEON data processing instruction. Return nonzero if the
4190 instruction is invalid.
4191 We process data in a mixture of 32-bit and 64-bit chunks.
4192 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4193
4194 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4195 {
4196 int op;
4197 int q;
4198 int rd, rn, rm;
4199 int size;
4200 int shift;
4201 int pass;
4202 int count;
4203 int pairwise;
4204 int u;
4205 int n;
4206 uint32_t imm, mask;
4207 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4208 TCGv_i64 tmp64;
4209
4210 if (!s->vfp_enabled)
4211 return 1;
4212 q = (insn & (1 << 6)) != 0;
4213 u = (insn >> 24) & 1;
4214 VFP_DREG_D(rd, insn);
4215 VFP_DREG_N(rn, insn);
4216 VFP_DREG_M(rm, insn);
4217 size = (insn >> 20) & 3;
4218 if ((insn & (1 << 23)) == 0) {
4219 /* Three register same length. */
4220 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4221 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4222 || op == 10 || op == 11 || op == 16)) {
4223 /* 64-bit element instructions. */
4224 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4225 neon_load_reg64(cpu_V0, rn + pass);
4226 neon_load_reg64(cpu_V1, rm + pass);
4227 switch (op) {
4228 case 1: /* VQADD */
4229 if (u) {
4230 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4231 cpu_V0, cpu_V1);
4232 } else {
4233 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4234 cpu_V0, cpu_V1);
4235 }
4236 break;
4237 case 5: /* VQSUB */
4238 if (u) {
4239 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4240 cpu_V0, cpu_V1);
4241 } else {
4242 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4243 cpu_V0, cpu_V1);
4244 }
4245 break;
4246 case 8: /* VSHL */
4247 if (u) {
4248 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4249 } else {
4250 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4251 }
4252 break;
4253 case 9: /* VQSHL */
4254 if (u) {
4255 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4256 cpu_V1, cpu_V0);
4257 } else {
4258 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4259 cpu_V1, cpu_V0);
4260 }
4261 break;
4262 case 10: /* VRSHL */
4263 if (u) {
4264 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4265 } else {
4266 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4267 }
4268 break;
4269 case 11: /* VQRSHL */
4270 if (u) {
4271 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4272 cpu_V1, cpu_V0);
4273 } else {
4274 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4275 cpu_V1, cpu_V0);
4276 }
4277 break;
4278 case 16:
4279 if (u) {
4280 tcg_gen_sub_i64(CPU_V001);
4281 } else {
4282 tcg_gen_add_i64(CPU_V001);
4283 }
4284 break;
4285 default:
4286 abort();
4287 }
4288 neon_store_reg64(cpu_V0, rd + pass);
4289 }
4290 return 0;
4291 }
4292 switch (op) {
4293 case 8: /* VSHL */
4294 case 9: /* VQSHL */
4295 case 10: /* VRSHL */
4296 case 11: /* VQRSHL */
4297 {
4298 int rtmp;
4299 /* Shift instruction operands are reversed. */
4300 rtmp = rn;
4301 rn = rm;
4302 rm = rtmp;
4303 pairwise = 0;
4304 }
4305 break;
4306 case 20: /* VPMAX */
4307 case 21: /* VPMIN */
4308 case 23: /* VPADD */
4309 pairwise = 1;
4310 break;
4311 case 26: /* VPADD (float) */
4312 pairwise = (u && size < 2);
4313 break;
4314 case 30: /* VPMIN/VPMAX (float) */
4315 pairwise = u;
4316 break;
4317 default:
4318 pairwise = 0;
4319 break;
4320 }
4321
4322 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4323
4324 if (pairwise) {
4325 /* Pairwise. */
4326 if (q)
4327 n = (pass & 1) * 2;
4328 else
4329 n = 0;
4330 if (pass < q + 1) {
4331 tmp = neon_load_reg(rn, n);
4332 tmp2 = neon_load_reg(rn, n + 1);
4333 } else {
4334 tmp = neon_load_reg(rm, n);
4335 tmp2 = neon_load_reg(rm, n + 1);
4336 }
4337 } else {
4338 /* Elementwise. */
4339 tmp = neon_load_reg(rn, pass);
4340 tmp2 = neon_load_reg(rm, pass);
4341 }
4342 switch (op) {
4343 case 0: /* VHADD */
4344 GEN_NEON_INTEGER_OP(hadd);
4345 break;
4346 case 1: /* VQADD */
4347 GEN_NEON_INTEGER_OP_ENV(qadd);
4348 break;
4349 case 2: /* VRHADD */
4350 GEN_NEON_INTEGER_OP(rhadd);
4351 break;
4352 case 3: /* Logic ops. */
4353 switch ((u << 2) | size) {
4354 case 0: /* VAND */
4355 tcg_gen_and_i32(tmp, tmp, tmp2);
4356 break;
4357 case 1: /* BIC */
4358 tcg_gen_andc_i32(tmp, tmp, tmp2);
4359 break;
4360 case 2: /* VORR */
4361 tcg_gen_or_i32(tmp, tmp, tmp2);
4362 break;
4363 case 3: /* VORN */
4364 tcg_gen_orc_i32(tmp, tmp, tmp2);
4365 break;
4366 case 4: /* VEOR */
4367 tcg_gen_xor_i32(tmp, tmp, tmp2);
4368 break;
4369 case 5: /* VBSL */
4370 tmp3 = neon_load_reg(rd, pass);
4371 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4372 dead_tmp(tmp3);
4373 break;
4374 case 6: /* VBIT */
4375 tmp3 = neon_load_reg(rd, pass);
4376 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4377 dead_tmp(tmp3);
4378 break;
4379 case 7: /* VBIF */
4380 tmp3 = neon_load_reg(rd, pass);
4381 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4382 dead_tmp(tmp3);
4383 break;
4384 }
4385 break;
4386 case 4: /* VHSUB */
4387 GEN_NEON_INTEGER_OP(hsub);
4388 break;
4389 case 5: /* VQSUB */
4390 GEN_NEON_INTEGER_OP_ENV(qsub);
4391 break;
4392 case 6: /* VCGT */
4393 GEN_NEON_INTEGER_OP(cgt);
4394 break;
4395 case 7: /* VCGE */
4396 GEN_NEON_INTEGER_OP(cge);
4397 break;
4398 case 8: /* VSHL */
4399 GEN_NEON_INTEGER_OP(shl);
4400 break;
4401 case 9: /* VQSHL */
4402 GEN_NEON_INTEGER_OP_ENV(qshl);
4403 break;
4404 case 10: /* VRSHL */
4405 GEN_NEON_INTEGER_OP(rshl);
4406 break;
4407 case 11: /* VQRSHL */
4408 GEN_NEON_INTEGER_OP_ENV(qrshl);
4409 break;
4410 case 12: /* VMAX */
4411 GEN_NEON_INTEGER_OP(max);
4412 break;
4413 case 13: /* VMIN */
4414 GEN_NEON_INTEGER_OP(min);
4415 break;
4416 case 14: /* VABD */
4417 GEN_NEON_INTEGER_OP(abd);
4418 break;
4419 case 15: /* VABA */
4420 GEN_NEON_INTEGER_OP(abd);
4421 dead_tmp(tmp2);
4422 tmp2 = neon_load_reg(rd, pass);
4423 gen_neon_add(size, tmp, tmp2);
4424 break;
4425 case 16:
4426 if (!u) { /* VADD */
4427 if (gen_neon_add(size, tmp, tmp2))
4428 return 1;
4429 } else { /* VSUB */
4430 switch (size) {
4431 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4432 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4433 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4434 default: return 1;
4435 }
4436 }
4437 break;
4438 case 17:
4439 if (!u) { /* VTST */
4440 switch (size) {
4441 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4442 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4443 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4444 default: return 1;
4445 }
4446 } else { /* VCEQ */
4447 switch (size) {
4448 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4449 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4450 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4451 default: return 1;
4452 }
4453 }
4454 break;
4455 case 18: /* Multiply. */
4456 switch (size) {
4457 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4458 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4459 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4460 default: return 1;
4461 }
4462 dead_tmp(tmp2);
4463 tmp2 = neon_load_reg(rd, pass);
4464 if (u) { /* VMLS */
4465 gen_neon_rsb(size, tmp, tmp2);
4466 } else { /* VMLA */
4467 gen_neon_add(size, tmp, tmp2);
4468 }
4469 break;
4470 case 19: /* VMUL */
4471 if (u) { /* polynomial */
4472 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4473 } else { /* Integer */
4474 switch (size) {
4475 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4476 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4477 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4478 default: return 1;
4479 }
4480 }
4481 break;
4482 case 20: /* VPMAX */
4483 GEN_NEON_INTEGER_OP(pmax);
4484 break;
4485 case 21: /* VPMIN */
4486 GEN_NEON_INTEGER_OP(pmin);
4487 break;
4488 case 22: /* Hultiply high. */
4489 if (!u) { /* VQDMULH */
4490 switch (size) {
4491 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4492 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4493 default: return 1;
4494 }
4495 } else { /* VQRDHMUL */
4496 switch (size) {
4497 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4498 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4499 default: return 1;
4500 }
4501 }
4502 break;
4503 case 23: /* VPADD */
4504 if (u)
4505 return 1;
4506 switch (size) {
4507 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4508 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4509 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4510 default: return 1;
4511 }
4512 break;
4513 case 26: /* Floating point arithnetic. */
4514 switch ((u << 2) | size) {
4515 case 0: /* VADD */
4516 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4517 break;
4518 case 2: /* VSUB */
4519 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4520 break;
4521 case 4: /* VPADD */
4522 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4523 break;
4524 case 6: /* VABD */
4525 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4526 break;
4527 default:
4528 return 1;
4529 }
4530 break;
4531 case 27: /* Float multiply. */
4532 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4533 if (!u) {
4534 dead_tmp(tmp2);
4535 tmp2 = neon_load_reg(rd, pass);
4536 if (size == 0) {
4537 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4538 } else {
4539 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4540 }
4541 }
4542 break;
4543 case 28: /* Float compare. */
4544 if (!u) {
4545 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4546 } else {
4547 if (size == 0)
4548 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4549 else
4550 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4551 }
4552 break;
4553 case 29: /* Float compare absolute. */
4554 if (!u)
4555 return 1;
4556 if (size == 0)
4557 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4558 else
4559 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4560 break;
4561 case 30: /* Float min/max. */
4562 if (size == 0)
4563 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4564 else
4565 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4566 break;
4567 case 31:
4568 if (size == 0)
4569 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4570 else
4571 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4572 break;
4573 default:
4574 abort();
4575 }
4576 dead_tmp(tmp2);
4577
4578 /* Save the result. For elementwise operations we can put it
4579 straight into the destination register. For pairwise operations
4580 we have to be careful to avoid clobbering the source operands. */
4581 if (pairwise && rd == rm) {
4582 neon_store_scratch(pass, tmp);
4583 } else {
4584 neon_store_reg(rd, pass, tmp);
4585 }
4586
4587 } /* for pass */
4588 if (pairwise && rd == rm) {
4589 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4590 tmp = neon_load_scratch(pass);
4591 neon_store_reg(rd, pass, tmp);
4592 }
4593 }
4594 /* End of 3 register same size operations. */
4595 } else if (insn & (1 << 4)) {
4596 if ((insn & 0x00380080) != 0) {
4597 /* Two registers and shift. */
4598 op = (insn >> 8) & 0xf;
4599 if (insn & (1 << 7)) {
4600 /* 64-bit shift. */
4601 size = 3;
4602 } else {
4603 size = 2;
4604 while ((insn & (1 << (size + 19))) == 0)
4605 size--;
4606 }
4607 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4608 /* To avoid excessive dumplication of ops we implement shift
4609 by immediate using the variable shift operations. */
4610 if (op < 8) {
4611 /* Shift by immediate:
4612 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4613 /* Right shifts are encoded as N - shift, where N is the
4614 element size in bits. */
4615 if (op <= 4)
4616 shift = shift - (1 << (size + 3));
4617 if (size == 3) {
4618 count = q + 1;
4619 } else {
4620 count = q ? 4: 2;
4621 }
4622 switch (size) {
4623 case 0:
4624 imm = (uint8_t) shift;
4625 imm |= imm << 8;
4626 imm |= imm << 16;
4627 break;
4628 case 1:
4629 imm = (uint16_t) shift;
4630 imm |= imm << 16;
4631 break;
4632 case 2:
4633 case 3:
4634 imm = shift;
4635 break;
4636 default:
4637 abort();
4638 }
4639
4640 for (pass = 0; pass < count; pass++) {
4641 if (size == 3) {
4642 neon_load_reg64(cpu_V0, rm + pass);
4643 tcg_gen_movi_i64(cpu_V1, imm);
4644 switch (op) {
4645 case 0: /* VSHR */
4646 case 1: /* VSRA */
4647 if (u)
4648 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4649 else
4650 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4651 break;
4652 case 2: /* VRSHR */
4653 case 3: /* VRSRA */
4654 if (u)
4655 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4656 else
4657 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4658 break;
4659 case 4: /* VSRI */
4660 if (!u)
4661 return 1;
4662 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4663 break;
4664 case 5: /* VSHL, VSLI */
4665 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4666 break;
4667 case 6: /* VQSHLU */
4668 if (u) {
4669 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4670 cpu_V0, cpu_V1);
4671 } else {
4672 return 1;
4673 }
4674 break;
4675 case 7: /* VQSHL */
4676 if (u) {
4677 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4678 cpu_V0, cpu_V1);
4679 } else {
4680 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4681 cpu_V0, cpu_V1);
4682 }
4683 break;
4684 }
4685 if (op == 1 || op == 3) {
4686 /* Accumulate. */
4687 neon_load_reg64(cpu_V1, rd + pass);
4688 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4689 } else if (op == 4 || (op == 5 && u)) {
4690 /* Insert */
4691 cpu_abort(env, "VS[LR]I.64 not implemented");
4692 }
4693 neon_store_reg64(cpu_V0, rd + pass);
4694 } else { /* size < 3 */
4695 /* Operands in T0 and T1. */
4696 tmp = neon_load_reg(rm, pass);
4697 tmp2 = new_tmp();
4698 tcg_gen_movi_i32(tmp2, imm);
4699 switch (op) {
4700 case 0: /* VSHR */
4701 case 1: /* VSRA */
4702 GEN_NEON_INTEGER_OP(shl);
4703 break;
4704 case 2: /* VRSHR */
4705 case 3: /* VRSRA */
4706 GEN_NEON_INTEGER_OP(rshl);
4707 break;
4708 case 4: /* VSRI */
4709 if (!u)
4710 return 1;
4711 GEN_NEON_INTEGER_OP(shl);
4712 break;
4713 case 5: /* VSHL, VSLI */
4714 switch (size) {
4715 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4716 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4717 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4718 default: return 1;
4719 }
4720 break;
4721 case 6: /* VQSHLU */
4722 if (!u) {
4723 return 1;
4724 }
4725 switch (size) {
4726 case 0:
4727 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4728 tmp, tmp2);
4729 break;
4730 case 1:
4731 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4732 tmp, tmp2);
4733 break;
4734 case 2:
4735 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4736 tmp, tmp2);
4737 break;
4738 default:
4739 return 1;
4740 }
4741 break;
4742 case 7: /* VQSHL */
4743 GEN_NEON_INTEGER_OP_ENV(qshl);
4744 break;
4745 }
4746 dead_tmp(tmp2);
4747
4748 if (op == 1 || op == 3) {
4749 /* Accumulate. */
4750 tmp2 = neon_load_reg(rd, pass);
4751 gen_neon_add(size, tmp, tmp2);
4752 dead_tmp(tmp2);
4753 } else if (op == 4 || (op == 5 && u)) {
4754 /* Insert */
4755 switch (size) {
4756 case 0:
4757 if (op == 4)
4758 mask = 0xff >> -shift;
4759 else
4760 mask = (uint8_t)(0xff << shift);
4761 mask |= mask << 8;
4762 mask |= mask << 16;
4763 break;
4764 case 1:
4765 if (op == 4)
4766 mask = 0xffff >> -shift;
4767 else
4768 mask = (uint16_t)(0xffff << shift);
4769 mask |= mask << 16;
4770 break;
4771 case 2:
4772 if (shift < -31 || shift > 31) {
4773 mask = 0;
4774 } else {
4775 if (op == 4)
4776 mask = 0xffffffffu >> -shift;
4777 else
4778 mask = 0xffffffffu << shift;
4779 }
4780 break;
4781 default:
4782 abort();
4783 }
4784 tmp2 = neon_load_reg(rd, pass);
4785 tcg_gen_andi_i32(tmp, tmp, mask);
4786 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4787 tcg_gen_or_i32(tmp, tmp, tmp2);
4788 dead_tmp(tmp2);
4789 }
4790 neon_store_reg(rd, pass, tmp);
4791 }
4792 } /* for pass */
4793 } else if (op < 10) {
4794 /* Shift by immediate and narrow:
4795 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4796 shift = shift - (1 << (size + 3));
4797 size++;
4798 switch (size) {
4799 case 1:
4800 imm = (uint16_t)shift;
4801 imm |= imm << 16;
4802 tmp2 = tcg_const_i32(imm);
4803 TCGV_UNUSED_I64(tmp64);
4804 break;
4805 case 2:
4806 imm = (uint32_t)shift;
4807 tmp2 = tcg_const_i32(imm);
4808 TCGV_UNUSED_I64(tmp64);
4809 break;
4810 case 3:
4811 tmp64 = tcg_const_i64(shift);
4812 TCGV_UNUSED(tmp2);
4813 break;
4814 default:
4815 abort();
4816 }
4817
4818 for (pass = 0; pass < 2; pass++) {
4819 if (size == 3) {
4820 neon_load_reg64(cpu_V0, rm + pass);
4821 if (q) {
4822 if (u)
4823 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4824 else
4825 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4826 } else {
4827 if (u)
4828 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4829 else
4830 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4831 }
4832 } else {
4833 tmp = neon_load_reg(rm + pass, 0);
4834 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4835 tmp3 = neon_load_reg(rm + pass, 1);
4836 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4837 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4838 dead_tmp(tmp);
4839 dead_tmp(tmp3);
4840 }
4841 tmp = new_tmp();
4842 if (op == 8 && !u) {
4843 gen_neon_narrow(size - 1, tmp, cpu_V0);
4844 } else {
4845 if (op == 8)
4846 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4847 else
4848 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4849 }
4850 neon_store_reg(rd, pass, tmp);
4851 } /* for pass */
4852 if (size == 3) {
4853 tcg_temp_free_i64(tmp64);
4854 } else {
4855 tcg_temp_free_i32(tmp2);
4856 }
4857 } else if (op == 10) {
4858 /* VSHLL */
4859 if (q || size == 3)
4860 return 1;
4861 tmp = neon_load_reg(rm, 0);
4862 tmp2 = neon_load_reg(rm, 1);
4863 for (pass = 0; pass < 2; pass++) {
4864 if (pass == 1)
4865 tmp = tmp2;
4866
4867 gen_neon_widen(cpu_V0, tmp, size, u);
4868
4869 if (shift != 0) {
4870 /* The shift is less than the width of the source
4871 type, so we can just shift the whole register. */
4872 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4873 if (size < 2 || !u) {
4874 uint64_t imm64;
4875 if (size == 0) {
4876 imm = (0xffu >> (8 - shift));
4877 imm |= imm << 16;
4878 } else {
4879 imm = 0xffff >> (16 - shift);
4880 }
4881 imm64 = imm | (((uint64_t)imm) << 32);
4882 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4883 }
4884 }
4885 neon_store_reg64(cpu_V0, rd + pass);
4886 }
4887 } else if (op >= 14) {
4888 /* VCVT fixed-point. */
4889 /* We have already masked out the must-be-1 top bit of imm6,
4890 * hence this 32-shift where the ARM ARM has 64-imm6.
4891 */
4892 shift = 32 - shift;
4893 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4894 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4895 if (!(op & 1)) {
4896 if (u)
4897 gen_vfp_ulto(0, shift);
4898 else
4899 gen_vfp_slto(0, shift);
4900 } else {
4901 if (u)
4902 gen_vfp_toul(0, shift);
4903 else
4904 gen_vfp_tosl(0, shift);
4905 }
4906 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4907 }
4908 } else {
4909 return 1;
4910 }
4911 } else { /* (insn & 0x00380080) == 0 */
4912 int invert;
4913
4914 op = (insn >> 8) & 0xf;
4915 /* One register and immediate. */
4916 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4917 invert = (insn & (1 << 5)) != 0;
4918 switch (op) {
4919 case 0: case 1:
4920 /* no-op */
4921 break;
4922 case 2: case 3:
4923 imm <<= 8;
4924 break;
4925 case 4: case 5:
4926 imm <<= 16;
4927 break;
4928 case 6: case 7:
4929 imm <<= 24;
4930 break;
4931 case 8: case 9:
4932 imm |= imm << 16;
4933 break;
4934 case 10: case 11:
4935 imm = (imm << 8) | (imm << 24);
4936 break;
4937 case 12:
4938 imm = (imm << 8) | 0xff;
4939 break;
4940 case 13:
4941 imm = (imm << 16) | 0xffff;
4942 break;
4943 case 14:
4944 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4945 if (invert)
4946 imm = ~imm;
4947 break;
4948 case 15:
4949 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4950 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4951 break;
4952 }
4953 if (invert)
4954 imm = ~imm;
4955
4956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4957 if (op & 1 && op < 12) {
4958 tmp = neon_load_reg(rd, pass);
4959 if (invert) {
4960 /* The immediate value has already been inverted, so
4961 BIC becomes AND. */
4962 tcg_gen_andi_i32(tmp, tmp, imm);
4963 } else {
4964 tcg_gen_ori_i32(tmp, tmp, imm);
4965 }
4966 } else {
4967 /* VMOV, VMVN. */
4968 tmp = new_tmp();
4969 if (op == 14 && invert) {
4970 uint32_t val;
4971 val = 0;
4972 for (n = 0; n < 4; n++) {
4973 if (imm & (1 << (n + (pass & 1) * 4)))
4974 val |= 0xff << (n * 8);
4975 }
4976 tcg_gen_movi_i32(tmp, val);
4977 } else {
4978 tcg_gen_movi_i32(tmp, imm);
4979 }
4980 }
4981 neon_store_reg(rd, pass, tmp);
4982 }
4983 }
4984 } else { /* (insn & 0x00800010 == 0x00800000) */
4985 if (size != 3) {
4986 op = (insn >> 8) & 0xf;
4987 if ((insn & (1 << 6)) == 0) {
4988 /* Three registers of different lengths. */
4989 int src1_wide;
4990 int src2_wide;
4991 int prewiden;
4992 /* prewiden, src1_wide, src2_wide */
4993 static const int neon_3reg_wide[16][3] = {
4994 {1, 0, 0}, /* VADDL */
4995 {1, 1, 0}, /* VADDW */
4996 {1, 0, 0}, /* VSUBL */
4997 {1, 1, 0}, /* VSUBW */
4998 {0, 1, 1}, /* VADDHN */
4999 {0, 0, 0}, /* VABAL */
5000 {0, 1, 1}, /* VSUBHN */
5001 {0, 0, 0}, /* VABDL */
5002 {0, 0, 0}, /* VMLAL */
5003 {0, 0, 0}, /* VQDMLAL */
5004 {0, 0, 0}, /* VMLSL */
5005 {0, 0, 0}, /* VQDMLSL */
5006 {0, 0, 0}, /* Integer VMULL */
5007 {0, 0, 0}, /* VQDMULL */
5008 {0, 0, 0} /* Polynomial VMULL */
5009 };
5010
5011 prewiden = neon_3reg_wide[op][0];
5012 src1_wide = neon_3reg_wide[op][1];
5013 src2_wide = neon_3reg_wide[op][2];
5014
5015 if (size == 0 && (op == 9 || op == 11 || op == 13))
5016 return 1;
5017
5018 /* Avoid overlapping operands. Wide source operands are
5019 always aligned so will never overlap with wide
5020 destinations in problematic ways. */
5021 if (rd == rm && !src2_wide) {
5022 tmp = neon_load_reg(rm, 1);
5023 neon_store_scratch(2, tmp);
5024 } else if (rd == rn && !src1_wide) {
5025 tmp = neon_load_reg(rn, 1);
5026 neon_store_scratch(2, tmp);
5027 }
5028 TCGV_UNUSED(tmp3);
5029 for (pass = 0; pass < 2; pass++) {
5030 if (src1_wide) {
5031 neon_load_reg64(cpu_V0, rn + pass);
5032 TCGV_UNUSED(tmp);
5033 } else {
5034 if (pass == 1 && rd == rn) {
5035 tmp = neon_load_scratch(2);
5036 } else {
5037 tmp = neon_load_reg(rn, pass);
5038 }
5039 if (prewiden) {
5040 gen_neon_widen(cpu_V0, tmp, size, u);
5041 }
5042 }
5043 if (src2_wide) {
5044 neon_load_reg64(cpu_V1, rm + pass);
5045 TCGV_UNUSED(tmp2);
5046 } else {
5047 if (pass == 1 && rd == rm) {
5048 tmp2 = neon_load_scratch(2);
5049 } else {
5050 tmp2 = neon_load_reg(rm, pass);
5051 }
5052 if (prewiden) {
5053 gen_neon_widen(cpu_V1, tmp2, size, u);
5054 }
5055 }
5056 switch (op) {
5057 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5058 gen_neon_addl(size);
5059 break;
5060 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5061 gen_neon_subl(size);
5062 break;
5063 case 5: case 7: /* VABAL, VABDL */
5064 switch ((size << 1) | u) {
5065 case 0:
5066 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5067 break;
5068 case 1:
5069 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5070 break;
5071 case 2:
5072 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5073 break;
5074 case 3:
5075 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5076 break;
5077 case 4:
5078 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5079 break;
5080 case 5:
5081 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5082 break;
5083 default: abort();
5084 }
5085 dead_tmp(tmp2);
5086 dead_tmp(tmp);
5087 break;
5088 case 8: case 9: case 10: case 11: case 12: case 13:
5089 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5090 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5091 break;
5092 case 14: /* Polynomial VMULL */
5093 cpu_abort(env, "Polynomial VMULL not implemented");
5094
5095 default: /* 15 is RESERVED. */
5096 return 1;
5097 }
5098 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5099 /* Accumulate. */
5100 if (op == 10 || op == 11) {
5101 gen_neon_negl(cpu_V0, size);
5102 }
5103
5104 if (op != 13) {
5105 neon_load_reg64(cpu_V1, rd + pass);
5106 }
5107
5108 switch (op) {
5109 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5110 gen_neon_addl(size);
5111 break;
5112 case 9: case 11: /* VQDMLAL, VQDMLSL */
5113 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5114 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5115 break;
5116 /* Fall through. */
5117 case 13: /* VQDMULL */
5118 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5119 break;
5120 default:
5121 abort();
5122 }
5123 neon_store_reg64(cpu_V0, rd + pass);
5124 } else if (op == 4 || op == 6) {
5125 /* Narrowing operation. */
5126 tmp = new_tmp();
5127 if (!u) {
5128 switch (size) {
5129 case 0:
5130 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5131 break;
5132 case 1:
5133 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5134 break;
5135 case 2:
5136 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5137 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5138 break;
5139 default: abort();
5140 }
5141 } else {
5142 switch (size) {
5143 case 0:
5144 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5145 break;
5146 case 1:
5147 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5148 break;
5149 case 2:
5150 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5151 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5152 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5153 break;
5154 default: abort();
5155 }
5156 }
5157 if (pass == 0) {
5158 tmp3 = tmp;
5159 } else {
5160 neon_store_reg(rd, 0, tmp3);
5161 neon_store_reg(rd, 1, tmp);
5162 }
5163 } else {
5164 /* Write back the result. */
5165 neon_store_reg64(cpu_V0, rd + pass);
5166 }
5167 }
5168 } else {
5169 /* Two registers and a scalar. */
5170 switch (op) {
5171 case 0: /* Integer VMLA scalar */
5172 case 1: /* Float VMLA scalar */
5173 case 4: /* Integer VMLS scalar */
5174 case 5: /* Floating point VMLS scalar */
5175 case 8: /* Integer VMUL scalar */
5176 case 9: /* Floating point VMUL scalar */
5177 case 12: /* VQDMULH scalar */
5178 case 13: /* VQRDMULH scalar */
5179 tmp = neon_get_scalar(size, rm);
5180 neon_store_scratch(0, tmp);
5181 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5182 tmp = neon_load_scratch(0);
5183 tmp2 = neon_load_reg(rn, pass);
5184 if (op == 12) {
5185 if (size == 1) {
5186 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5187 } else {
5188 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5189 }
5190 } else if (op == 13) {
5191 if (size == 1) {
5192 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5193 } else {
5194 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5195 }
5196 } else if (op & 1) {
5197 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5198 } else {
5199 switch (size) {
5200 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5201 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5202 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5203 default: return 1;
5204 }
5205 }
5206 dead_tmp(tmp2);
5207 if (op < 8) {
5208 /* Accumulate. */
5209 tmp2 = neon_load_reg(rd, pass);
5210 switch (op) {
5211 case 0:
5212 gen_neon_add(size, tmp, tmp2);
5213 break;
5214 case 1:
5215 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5216 break;
5217 case 4:
5218 gen_neon_rsb(size, tmp, tmp2);
5219 break;
5220 case 5:
5221 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5222 break;
5223 default:
5224 abort();
5225 }
5226 dead_tmp(tmp2);
5227 }
5228 neon_store_reg(rd, pass, tmp);
5229 }
5230 break;
5231 case 2: /* VMLAL sclar */
5232 case 3: /* VQDMLAL scalar */
5233 case 6: /* VMLSL scalar */
5234 case 7: /* VQDMLSL scalar */
5235 case 10: /* VMULL scalar */
5236 case 11: /* VQDMULL scalar */
5237 if (size == 0 && (op == 3 || op == 7 || op == 11))
5238 return 1;
5239
5240 tmp2 = neon_get_scalar(size, rm);
5241 /* We need a copy of tmp2 because gen_neon_mull
5242 * deletes it during pass 0. */
5243 tmp4 = new_tmp();
5244 tcg_gen_mov_i32(tmp4, tmp2);
5245 tmp3 = neon_load_reg(rn, 1);
5246
5247 for (pass = 0; pass < 2; pass++) {
5248 if (pass == 0) {
5249 tmp = neon_load_reg(rn, 0);
5250 } else {
5251 tmp = tmp3;
5252 tmp2 = tmp4;
5253 }
5254 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5255 if (op == 6 || op == 7) {
5256 gen_neon_negl(cpu_V0, size);
5257 }
5258 if (op != 11) {
5259 neon_load_reg64(cpu_V1, rd + pass);
5260 }
5261 switch (op) {
5262 case 2: case 6:
5263 gen_neon_addl(size);
5264 break;
5265 case 3: case 7:
5266 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5267 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5268 break;
5269 case 10:
5270 /* no-op */
5271 break;
5272 case 11:
5273 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5274 break;
5275 default:
5276 abort();
5277 }
5278 neon_store_reg64(cpu_V0, rd + pass);
5279 }
5280
5281
5282 break;
5283 default: /* 14 and 15 are RESERVED */
5284 return 1;
5285 }
5286 }
5287 } else { /* size == 3 */
5288 if (!u) {
5289 /* Extract. */
5290 imm = (insn >> 8) & 0xf;
5291
5292 if (imm > 7 && !q)
5293 return 1;
5294
5295 if (imm == 0) {
5296 neon_load_reg64(cpu_V0, rn);
5297 if (q) {
5298 neon_load_reg64(cpu_V1, rn + 1);
5299 }
5300 } else if (imm == 8) {
5301 neon_load_reg64(cpu_V0, rn + 1);
5302 if (q) {
5303 neon_load_reg64(cpu_V1, rm);
5304 }
5305 } else if (q) {
5306 tmp64 = tcg_temp_new_i64();
5307 if (imm < 8) {
5308 neon_load_reg64(cpu_V0, rn);
5309 neon_load_reg64(tmp64, rn + 1);
5310 } else {
5311 neon_load_reg64(cpu_V0, rn + 1);
5312 neon_load_reg64(tmp64, rm);
5313 }
5314 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5315 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5316 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5317 if (imm < 8) {
5318 neon_load_reg64(cpu_V1, rm);
5319 } else {
5320 neon_load_reg64(cpu_V1, rm + 1);
5321 imm -= 8;
5322 }
5323 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5324 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5325 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5326 tcg_temp_free_i64(tmp64);
5327 } else {
5328 /* BUGFIX */
5329 neon_load_reg64(cpu_V0, rn);
5330 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5331 neon_load_reg64(cpu_V1, rm);
5332 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5333 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5334 }
5335 neon_store_reg64(cpu_V0, rd);
5336 if (q) {
5337 neon_store_reg64(cpu_V1, rd + 1);
5338 }
5339 } else if ((insn & (1 << 11)) == 0) {
5340 /* Two register misc. */
5341 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5342 size = (insn >> 18) & 3;
5343 switch (op) {
5344 case 0: /* VREV64 */
5345 if (size == 3)
5346 return 1;
5347 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5348 tmp = neon_load_reg(rm, pass * 2);
5349 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5350 switch (size) {
5351 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5352 case 1: gen_swap_half(tmp); break;
5353 case 2: /* no-op */ break;
5354 default: abort();
5355 }
5356 neon_store_reg(rd, pass * 2 + 1, tmp);
5357 if (size == 2) {
5358 neon_store_reg(rd, pass * 2, tmp2);
5359 } else {
5360 switch (size) {
5361 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5362 case 1: gen_swap_half(tmp2); break;
5363 default: abort();
5364 }
5365 neon_store_reg(rd, pass * 2, tmp2);
5366 }
5367 }
5368 break;
5369 case 4: case 5: /* VPADDL */
5370 case 12: case 13: /* VPADAL */
5371 if (size == 3)
5372 return 1;
5373 for (pass = 0; pass < q + 1; pass++) {
5374 tmp = neon_load_reg(rm, pass * 2);
5375 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5376 tmp = neon_load_reg(rm, pass * 2 + 1);
5377 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5378 switch (size) {
5379 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5380 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5381 case 2: tcg_gen_add_i64(CPU_V001); break;
5382 default: abort();
5383 }
5384 if (op >= 12) {
5385 /* Accumulate. */
5386 neon_load_reg64(cpu_V1, rd + pass);
5387 gen_neon_addl(size);
5388 }
5389 neon_store_reg64(cpu_V0, rd + pass);
5390 }
5391 break;
5392 case 33: /* VTRN */
5393 if (size == 2) {
5394 for (n = 0; n < (q ? 4 : 2); n += 2) {
5395 tmp = neon_load_reg(rm, n);
5396 tmp2 = neon_load_reg(rd, n + 1);
5397 neon_store_reg(rm, n, tmp2);
5398 neon_store_reg(rd, n + 1, tmp);
5399 }
5400 } else {
5401 goto elementwise;
5402 }
5403 break;
5404 case 34: /* VUZP */
5405 /* Reg Before After
5406 Rd A3 A2 A1 A0 B2 B0 A2 A0
5407 Rm B3 B2 B1 B0 B3 B1 A3 A1
5408 */
5409 if (size == 3)
5410 return 1;
5411 gen_neon_unzip(rd, q, 0, size);
5412 gen_neon_unzip(rm, q, 4, size);
5413 if (q) {
5414 static int unzip_order_q[8] =
5415 {0, 2, 4, 6, 1, 3, 5, 7};
5416 for (n = 0; n < 8; n++) {
5417 int reg = (n < 4) ? rd : rm;
5418 tmp = neon_load_scratch(unzip_order_q[n]);
5419 neon_store_reg(reg, n % 4, tmp);
5420 }
5421 } else {
5422 static int unzip_order[4] =
5423 {0, 4, 1, 5};
5424 for (n = 0; n < 4; n++) {
5425 int reg = (n < 2) ? rd : rm;
5426 tmp = neon_load_scratch(unzip_order[n]);
5427 neon_store_reg(reg, n % 2, tmp);
5428 }
5429 }
5430 break;
5431 case 35: /* VZIP */
5432 /* Reg Before After
5433 Rd A3 A2 A1 A0 B1 A1 B0 A0
5434 Rm B3 B2 B1 B0 B3 A3 B2 A2
5435 */
5436 if (size == 3)
5437 return 1;
5438 count = (q ? 4 : 2);
5439 for (n = 0; n < count; n++) {
5440 tmp = neon_load_reg(rd, n);
5441 tmp2 = neon_load_reg(rd, n);
5442 switch (size) {
5443 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5444 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5445 case 2: /* no-op */; break;
5446 default: abort();
5447 }
5448 neon_store_scratch(n * 2, tmp);
5449 neon_store_scratch(n * 2 + 1, tmp2);
5450 }
5451 for (n = 0; n < count * 2; n++) {
5452 int reg = (n < count) ? rd : rm;
5453 tmp = neon_load_scratch(n);
5454 neon_store_reg(reg, n % count, tmp);
5455 }
5456 break;
5457 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5458 if (size == 3)
5459 return 1;
5460 TCGV_UNUSED(tmp2);
5461 for (pass = 0; pass < 2; pass++) {
5462 neon_load_reg64(cpu_V0, rm + pass);
5463 tmp = new_tmp();
5464 if (op == 36 && q == 0) {
5465 gen_neon_narrow(size, tmp, cpu_V0);
5466 } else if (q) {
5467 gen_neon_narrow_satu(size, tmp, cpu_V0);
5468 } else {
5469 gen_neon_narrow_sats(size, tmp, cpu_V0);
5470 }
5471 if (pass == 0) {
5472 tmp2 = tmp;
5473 } else {
5474 neon_store_reg(rd, 0, tmp2);
5475 neon_store_reg(rd, 1, tmp);
5476 }
5477 }
5478 break;
5479 case 38: /* VSHLL */
5480 if (q || size == 3)
5481 return 1;
5482 tmp = neon_load_reg(rm, 0);
5483 tmp2 = neon_load_reg(rm, 1);
5484 for (pass = 0; pass < 2; pass++) {
5485 if (pass == 1)
5486 tmp = tmp2;
5487 gen_neon_widen(cpu_V0, tmp, size, 1);
5488 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5489 neon_store_reg64(cpu_V0, rd + pass);
5490 }
5491 break;
5492 case 44: /* VCVT.F16.F32 */
5493 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5494 return 1;
5495 tmp = new_tmp();
5496 tmp2 = new_tmp();
5497 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5498 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5499 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5500 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5501 tcg_gen_shli_i32(tmp2, tmp2, 16);
5502 tcg_gen_or_i32(tmp2, tmp2, tmp);
5503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5504 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5505 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5506 neon_store_reg(rd, 0, tmp2);
5507 tmp2 = new_tmp();
5508 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5509 tcg_gen_shli_i32(tmp2, tmp2, 16);
5510 tcg_gen_or_i32(tmp2, tmp2, tmp);
5511 neon_store_reg(rd, 1, tmp2);
5512 dead_tmp(tmp);
5513 break;
5514 case 46: /* VCVT.F32.F16 */
5515 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5516 return 1;
5517 tmp3 = new_tmp();
5518 tmp = neon_load_reg(rm, 0);
5519 tmp2 = neon_load_reg(rm, 1);
5520 tcg_gen_ext16u_i32(tmp3, tmp);
5521 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5522 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5523 tcg_gen_shri_i32(tmp3, tmp, 16);
5524 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5525 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5526 dead_tmp(tmp);
5527 tcg_gen_ext16u_i32(tmp3, tmp2);
5528 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5529 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5530 tcg_gen_shri_i32(tmp3, tmp2, 16);
5531 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5532 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5533 dead_tmp(tmp2);
5534 dead_tmp(tmp3);
5535 break;
5536 default:
5537 elementwise:
5538 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5539 if (op == 30 || op == 31 || op >= 58) {
5540 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5541 neon_reg_offset(rm, pass));
5542 TCGV_UNUSED(tmp);
5543 } else {
5544 tmp = neon_load_reg(rm, pass);
5545 }
5546 switch (op) {
5547 case 1: /* VREV32 */
5548 switch (size) {
5549 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5550 case 1: gen_swap_half(tmp); break;
5551 default: return 1;
5552 }
5553 break;
5554 case 2: /* VREV16 */
5555 if (size != 0)
5556 return 1;
5557 gen_rev16(tmp);
5558 break;
5559 case 8: /* CLS */
5560 switch (size) {
5561 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5562 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5563 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5564 default: return 1;
5565 }
5566 break;
5567 case 9: /* CLZ */
5568 switch (size) {
5569 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5570 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5571 case 2: gen_helper_clz(tmp, tmp); break;
5572 default: return 1;
5573 }
5574 break;
5575 case 10: /* CNT */
5576 if (size != 0)
5577 return 1;
5578 gen_helper_neon_cnt_u8(tmp, tmp);
5579 break;
5580 case 11: /* VNOT */
5581 if (size != 0)
5582 return 1;
5583 tcg_gen_not_i32(tmp, tmp);
5584 break;
5585 case 14: /* VQABS */
5586 switch (size) {
5587 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5588 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5589 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5590 default: return 1;
5591 }
5592 break;
5593 case 15: /* VQNEG */
5594 switch (size) {
5595 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5596 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5597 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5598 default: return 1;
5599 }
5600 break;
5601 case 16: case 19: /* VCGT #0, VCLE #0 */
5602 tmp2 = tcg_const_i32(0);
5603 switch(size) {
5604 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5605 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5606 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5607 default: return 1;
5608 }
5609 tcg_temp_free(tmp2);
5610 if (op == 19)
5611 tcg_gen_not_i32(tmp, tmp);
5612 break;
5613 case 17: case 20: /* VCGE #0, VCLT #0 */
5614 tmp2 = tcg_const_i32(0);
5615 switch(size) {
5616 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5617 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5618 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5619 default: return 1;
5620 }
5621 tcg_temp_free(tmp2);
5622 if (op == 20)
5623 tcg_gen_not_i32(tmp, tmp);
5624 break;
5625 case 18: /* VCEQ #0 */
5626 tmp2 = tcg_const_i32(0);
5627 switch(size) {
5628 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5629 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5630 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5631 default: return 1;
5632 }
5633 tcg_temp_free(tmp2);
5634 break;
5635 case 22: /* VABS */
5636 switch(size) {
5637 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5638 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5639 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5640 default: return 1;
5641 }
5642 break;
5643 case 23: /* VNEG */
5644 if (size == 3)
5645 return 1;
5646 tmp2 = tcg_const_i32(0);
5647 gen_neon_rsb(size, tmp, tmp2);
5648 tcg_temp_free(tmp2);
5649 break;
5650 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5651 tmp2 = tcg_const_i32(0);
5652 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5653 tcg_temp_free(tmp2);
5654 if (op == 27)
5655 tcg_gen_not_i32(tmp, tmp);
5656 break;
5657 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5658 tmp2 = tcg_const_i32(0);
5659 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5660 tcg_temp_free(tmp2);
5661 if (op == 28)
5662 tcg_gen_not_i32(tmp, tmp);
5663 break;
5664 case 26: /* Float VCEQ #0 */
5665 tmp2 = tcg_const_i32(0);
5666 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5667 tcg_temp_free(tmp2);
5668 break;
5669 case 30: /* Float VABS */
5670 gen_vfp_abs(0);
5671 break;
5672 case 31: /* Float VNEG */
5673 gen_vfp_neg(0);
5674 break;
5675 case 32: /* VSWP */
5676 tmp2 = neon_load_reg(rd, pass);
5677 neon_store_reg(rm, pass, tmp2);
5678 break;
5679 case 33: /* VTRN */
5680 tmp2 = neon_load_reg(rd, pass);
5681 switch (size) {
5682 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5683 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5684 case 2: abort();
5685 default: return 1;
5686 }
5687 neon_store_reg(rm, pass, tmp2);
5688 break;
5689 case 56: /* Integer VRECPE */
5690 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5691 break;
5692 case 57: /* Integer VRSQRTE */
5693 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5694 break;
5695 case 58: /* Float VRECPE */
5696 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5697 break;
5698 case 59: /* Float VRSQRTE */
5699 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5700 break;
5701 case 60: /* VCVT.F32.S32 */
5702 gen_vfp_sito(0);
5703 break;
5704 case 61: /* VCVT.F32.U32 */
5705 gen_vfp_uito(0);
5706 break;
5707 case 62: /* VCVT.S32.F32 */
5708 gen_vfp_tosiz(0);
5709 break;
5710 case 63: /* VCVT.U32.F32 */
5711 gen_vfp_touiz(0);
5712 break;
5713 default:
5714 /* Reserved: 21, 29, 39-56 */
5715 return 1;
5716 }
5717 if (op == 30 || op == 31 || op >= 58) {
5718 tcg_gen_st_f32(cpu_F0s, cpu_env,
5719 neon_reg_offset(rd, pass));
5720 } else {
5721 neon_store_reg(rd, pass, tmp);
5722 }
5723 }
5724 break;
5725 }
5726 } else if ((insn & (1 << 10)) == 0) {
5727 /* VTBL, VTBX. */
5728 n = ((insn >> 5) & 0x18) + 8;
5729 if (insn & (1 << 6)) {
5730 tmp = neon_load_reg(rd, 0);
5731 } else {
5732 tmp = new_tmp();
5733 tcg_gen_movi_i32(tmp, 0);
5734 }
5735 tmp2 = neon_load_reg(rm, 0);
5736 tmp4 = tcg_const_i32(rn);
5737 tmp5 = tcg_const_i32(n);
5738 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5739 dead_tmp(tmp);
5740 if (insn & (1 << 6)) {
5741 tmp = neon_load_reg(rd, 1);
5742 } else {
5743 tmp = new_tmp();
5744 tcg_gen_movi_i32(tmp, 0);
5745 }
5746 tmp3 = neon_load_reg(rm, 1);
5747 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5748 tcg_temp_free_i32(tmp5);
5749 tcg_temp_free_i32(tmp4);
5750 neon_store_reg(rd, 0, tmp2);
5751 neon_store_reg(rd, 1, tmp3);
5752 dead_tmp(tmp);
5753 } else if ((insn & 0x380) == 0) {
5754 /* VDUP */
5755 if (insn & (1 << 19)) {
5756 tmp = neon_load_reg(rm, 1);
5757 } else {
5758 tmp = neon_load_reg(rm, 0);
5759 }
5760 if (insn & (1 << 16)) {
5761 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5762 } else if (insn & (1 << 17)) {
5763 if ((insn >> 18) & 1)
5764 gen_neon_dup_high16(tmp);
5765 else
5766 gen_neon_dup_low16(tmp);
5767 }
5768 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5769 tmp2 = new_tmp();
5770 tcg_gen_mov_i32(tmp2, tmp);
5771 neon_store_reg(rd, pass, tmp2);
5772 }
5773 dead_tmp(tmp);
5774 } else {
5775 return 1;
5776 }
5777 }
5778 }
5779 return 0;
5780 }
5781
5782 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5783 {
5784 int crn = (insn >> 16) & 0xf;
5785 int crm = insn & 0xf;
5786 int op1 = (insn >> 21) & 7;
5787 int op2 = (insn >> 5) & 7;
5788 int rt = (insn >> 12) & 0xf;
5789 TCGv tmp;
5790
5791 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5792 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5793 /* TEECR */
5794 if (IS_USER(s))
5795 return 1;
5796 tmp = load_cpu_field(teecr);
5797 store_reg(s, rt, tmp);
5798 return 0;
5799 }
5800 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5801 /* TEEHBR */
5802 if (IS_USER(s) && (env->teecr & 1))
5803 return 1;
5804 tmp = load_cpu_field(teehbr);
5805 store_reg(s, rt, tmp);
5806 return 0;
5807 }
5808 }
5809 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5810 op1, crn, crm, op2);
5811 return 1;
5812 }
5813
5814 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5815 {
5816 int crn = (insn >> 16) & 0xf;
5817 int crm = insn & 0xf;
5818 int op1 = (insn >> 21) & 7;
5819 int op2 = (insn >> 5) & 7;
5820 int rt = (insn >> 12) & 0xf;
5821 TCGv tmp;
5822
5823 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5824 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5825 /* TEECR */
5826 if (IS_USER(s))
5827 return 1;
5828 tmp = load_reg(s, rt);
5829 gen_helper_set_teecr(cpu_env, tmp);
5830 dead_tmp(tmp);
5831 return 0;
5832 }
5833 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5834 /* TEEHBR */
5835 if (IS_USER(s) && (env->teecr & 1))
5836 return 1;
5837 tmp = load_reg(s, rt);
5838 store_cpu_field(tmp, teehbr);
5839 return 0;
5840 }
5841 }
5842 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5843 op1, crn, crm, op2);
5844 return 1;
5845 }
5846
5847 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5848 {
5849 int cpnum;
5850
5851 cpnum = (insn >> 8) & 0xf;
5852 if (arm_feature(env, ARM_FEATURE_XSCALE)
5853 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5854 return 1;
5855
5856 switch (cpnum) {
5857 case 0:
5858 case 1:
5859 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5860 return disas_iwmmxt_insn(env, s, insn);
5861 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5862 return disas_dsp_insn(env, s, insn);
5863 }
5864 return 1;
5865 case 10:
5866 case 11:
5867 return disas_vfp_insn (env, s, insn);
5868 case 14:
5869 /* Coprocessors 7-15 are architecturally reserved by ARM.
5870 Unfortunately Intel decided to ignore this. */
5871 if (arm_feature(env, ARM_FEATURE_XSCALE))
5872 goto board;
5873 if (insn & (1 << 20))
5874 return disas_cp14_read(env, s, insn);
5875 else
5876 return disas_cp14_write(env, s, insn);
5877 case 15:
5878 return disas_cp15_insn (env, s, insn);
5879 default:
5880 board:
5881 /* Unknown coprocessor. See if the board has hooked it. */
5882 return disas_cp_insn (env, s, insn);
5883 }
5884 }
5885
5886
5887 /* Store a 64-bit value to a register pair. Clobbers val. */
5888 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5889 {
5890 TCGv tmp;
5891 tmp = new_tmp();
5892 tcg_gen_trunc_i64_i32(tmp, val);
5893 store_reg(s, rlow, tmp);
5894 tmp = new_tmp();
5895 tcg_gen_shri_i64(val, val, 32);
5896 tcg_gen_trunc_i64_i32(tmp, val);
5897 store_reg(s, rhigh, tmp);
5898 }
5899
5900 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5901 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5902 {
5903 TCGv_i64 tmp;
5904 TCGv tmp2;
5905
5906 /* Load value and extend to 64 bits. */
5907 tmp = tcg_temp_new_i64();
5908 tmp2 = load_reg(s, rlow);
5909 tcg_gen_extu_i32_i64(tmp, tmp2);
5910 dead_tmp(tmp2);
5911 tcg_gen_add_i64(val, val, tmp);
5912 tcg_temp_free_i64(tmp);
5913 }
5914
5915 /* load and add a 64-bit value from a register pair. */
5916 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5917 {
5918 TCGv_i64 tmp;
5919 TCGv tmpl;
5920 TCGv tmph;
5921
5922 /* Load 64-bit value rd:rn. */
5923 tmpl = load_reg(s, rlow);
5924 tmph = load_reg(s, rhigh);
5925 tmp = tcg_temp_new_i64();
5926 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5927 dead_tmp(tmpl);
5928 dead_tmp(tmph);
5929 tcg_gen_add_i64(val, val, tmp);
5930 tcg_temp_free_i64(tmp);
5931 }
5932
5933 /* Set N and Z flags from a 64-bit value. */
5934 static void gen_logicq_cc(TCGv_i64 val)
5935 {
5936 TCGv tmp = new_tmp();
5937 gen_helper_logicq_cc(tmp, val);
5938 gen_logic_CC(tmp);
5939 dead_tmp(tmp);
5940 }
5941
5942 /* Load/Store exclusive instructions are implemented by remembering
5943 the value/address loaded, and seeing if these are the same
5944 when the store is performed. This should be is sufficient to implement
5945 the architecturally mandated semantics, and avoids having to monitor
5946 regular stores.
5947
5948 In system emulation mode only one CPU will be running at once, so
5949 this sequence is effectively atomic. In user emulation mode we
5950 throw an exception and handle the atomic operation elsewhere. */
5951 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5952 TCGv addr, int size)
5953 {
5954 TCGv tmp;
5955
5956 switch (size) {
5957 case 0:
5958 tmp = gen_ld8u(addr, IS_USER(s));
5959 break;
5960 case 1:
5961 tmp = gen_ld16u(addr, IS_USER(s));
5962 break;
5963 case 2:
5964 case 3:
5965 tmp = gen_ld32(addr, IS_USER(s));
5966 break;
5967 default:
5968 abort();
5969 }
5970 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5971 store_reg(s, rt, tmp);
5972 if (size == 3) {
5973 TCGv tmp2 = new_tmp();
5974 tcg_gen_addi_i32(tmp2, addr, 4);
5975 tmp = gen_ld32(tmp2, IS_USER(s));
5976 dead_tmp(tmp2);
5977 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5978 store_reg(s, rt2, tmp);
5979 }
5980 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5981 }
5982
5983 static void gen_clrex(DisasContext *s)
5984 {
5985 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5986 }
5987
5988 #ifdef CONFIG_USER_ONLY
5989 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5990 TCGv addr, int size)
5991 {
5992 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5993 tcg_gen_movi_i32(cpu_exclusive_info,
5994 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5995 gen_exception_insn(s, 4, EXCP_STREX);
5996 }
5997 #else
5998 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5999 TCGv addr, int size)
6000 {
6001 TCGv tmp;
6002 int done_label;
6003 int fail_label;
6004
6005 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6006 [addr] = {Rt};
6007 {Rd} = 0;
6008 } else {
6009 {Rd} = 1;
6010 } */
6011 fail_label = gen_new_label();
6012 done_label = gen_new_label();
6013 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6014 switch (size) {
6015 case 0:
6016 tmp = gen_ld8u(addr, IS_USER(s));
6017 break;
6018 case 1:
6019 tmp = gen_ld16u(addr, IS_USER(s));
6020 break;
6021 case 2:
6022 case 3:
6023 tmp = gen_ld32(addr, IS_USER(s));
6024 break;
6025 default:
6026 abort();
6027 }
6028 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6029 dead_tmp(tmp);
6030 if (size == 3) {
6031 TCGv tmp2 = new_tmp();
6032 tcg_gen_addi_i32(tmp2, addr, 4);
6033 tmp = gen_ld32(tmp2, IS_USER(s));
6034 dead_tmp(tmp2);
6035 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6036 dead_tmp(tmp);
6037 }
6038 tmp = load_reg(s, rt);
6039 switch (size) {
6040 case 0:
6041 gen_st8(tmp, addr, IS_USER(s));
6042 break;
6043 case 1:
6044 gen_st16(tmp, addr, IS_USER(s));
6045 break;
6046 case 2:
6047 case 3:
6048 gen_st32(tmp, addr, IS_USER(s));
6049 break;
6050 default:
6051 abort();
6052 }
6053 if (size == 3) {
6054 tcg_gen_addi_i32(addr, addr, 4);
6055 tmp = load_reg(s, rt2);
6056 gen_st32(tmp, addr, IS_USER(s));
6057 }
6058 tcg_gen_movi_i32(cpu_R[rd], 0);
6059 tcg_gen_br(done_label);
6060 gen_set_label(fail_label);
6061 tcg_gen_movi_i32(cpu_R[rd], 1);
6062 gen_set_label(done_label);
6063 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6064 }
6065 #endif
6066
6067 static void disas_arm_insn(CPUState * env, DisasContext *s)
6068 {
6069 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6070 TCGv tmp;
6071 TCGv tmp2;
6072 TCGv tmp3;
6073 TCGv addr;
6074 TCGv_i64 tmp64;
6075
6076 insn = ldl_code(s->pc);
6077 s->pc += 4;
6078
6079 /* M variants do not implement ARM mode. */
6080 if (IS_M(env))
6081 goto illegal_op;
6082 cond = insn >> 28;
6083 if (cond == 0xf){
6084 /* Unconditional instructions. */
6085 if (((insn >> 25) & 7) == 1) {
6086 /* NEON Data processing. */
6087 if (!arm_feature(env, ARM_FEATURE_NEON))
6088 goto illegal_op;
6089
6090 if (disas_neon_data_insn(env, s, insn))
6091 goto illegal_op;
6092 return;
6093 }
6094 if ((insn & 0x0f100000) == 0x04000000) {
6095 /* NEON load/store. */
6096 if (!arm_feature(env, ARM_FEATURE_NEON))
6097 goto illegal_op;
6098
6099 if (disas_neon_ls_insn(env, s, insn))
6100 goto illegal_op;
6101 return;
6102 }
6103 if ((insn & 0x0d70f000) == 0x0550f000)
6104 return; /* PLD */
6105 else if ((insn & 0x0ffffdff) == 0x01010000) {
6106 ARCH(6);
6107 /* setend */
6108 if (insn & (1 << 9)) {
6109 /* BE8 mode not implemented. */
6110 goto illegal_op;
6111 }
6112 return;
6113 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6114 switch ((insn >> 4) & 0xf) {
6115 case 1: /* clrex */
6116 ARCH(6K);
6117 gen_clrex(s);
6118 return;
6119 case 4: /* dsb */
6120 case 5: /* dmb */
6121 case 6: /* isb */
6122 ARCH(7);
6123 /* We don't emulate caches so these are a no-op. */
6124 return;
6125 default:
6126 goto illegal_op;
6127 }
6128 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6129 /* srs */
6130 int32_t offset;
6131 if (IS_USER(s))
6132 goto illegal_op;
6133 ARCH(6);
6134 op1 = (insn & 0x1f);
6135 addr = new_tmp();
6136 tmp = tcg_const_i32(op1);
6137 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6138 tcg_temp_free_i32(tmp);
6139 i = (insn >> 23) & 3;
6140 switch (i) {
6141 case 0: offset = -4; break; /* DA */
6142 case 1: offset = 0; break; /* IA */
6143 case 2: offset = -8; break; /* DB */
6144 case 3: offset = 4; break; /* IB */
6145 default: abort();
6146 }
6147 if (offset)
6148 tcg_gen_addi_i32(addr, addr, offset);
6149 tmp = load_reg(s, 14);
6150 gen_st32(tmp, addr, 0);
6151 tmp = load_cpu_field(spsr);
6152 tcg_gen_addi_i32(addr, addr, 4);
6153 gen_st32(tmp, addr, 0);
6154 if (insn & (1 << 21)) {
6155 /* Base writeback. */
6156 switch (i) {
6157 case 0: offset = -8; break;
6158 case 1: offset = 4; break;
6159 case 2: offset = -4; break;
6160 case 3: offset = 0; break;
6161 default: abort();
6162 }
6163 if (offset)
6164 tcg_gen_addi_i32(addr, addr, offset);
6165 tmp = tcg_const_i32(op1);
6166 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6167 tcg_temp_free_i32(tmp);
6168 dead_tmp(addr);
6169 } else {
6170 dead_tmp(addr);
6171 }
6172 return;
6173 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6174 /* rfe */
6175 int32_t offset;
6176 if (IS_USER(s))
6177 goto illegal_op;
6178 ARCH(6);
6179 rn = (insn >> 16) & 0xf;
6180 addr = load_reg(s, rn);
6181 i = (insn >> 23) & 3;
6182 switch (i) {
6183 case 0: offset = -4; break; /* DA */
6184 case 1: offset = 0; break; /* IA */
6185 case 2: offset = -8; break; /* DB */
6186 case 3: offset = 4; break; /* IB */
6187 default: abort();
6188 }
6189 if (offset)
6190 tcg_gen_addi_i32(addr, addr, offset);
6191 /* Load PC into tmp and CPSR into tmp2. */
6192 tmp = gen_ld32(addr, 0);
6193 tcg_gen_addi_i32(addr, addr, 4);
6194 tmp2 = gen_ld32(addr, 0);
6195 if (insn & (1 << 21)) {
6196 /* Base writeback. */
6197 switch (i) {
6198 case 0: offset = -8; break;
6199 case 1: offset = 4; break;
6200 case 2: offset = -4; break;
6201 case 3: offset = 0; break;
6202 default: abort();
6203 }
6204 if (offset)
6205 tcg_gen_addi_i32(addr, addr, offset);
6206 store_reg(s, rn, addr);
6207 } else {
6208 dead_tmp(addr);
6209 }
6210 gen_rfe(s, tmp, tmp2);
6211 return;
6212 } else if ((insn & 0x0e000000) == 0x0a000000) {
6213 /* branch link and change to thumb (blx <offset>) */
6214 int32_t offset;
6215
6216 val = (uint32_t)s->pc;
6217 tmp = new_tmp();
6218 tcg_gen_movi_i32(tmp, val);
6219 store_reg(s, 14, tmp);
6220 /* Sign-extend the 24-bit offset */
6221 offset = (((int32_t)insn) << 8) >> 8;
6222 /* offset * 4 + bit24 * 2 + (thumb bit) */
6223 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6224 /* pipeline offset */
6225 val += 4;
6226 gen_bx_im(s, val);
6227 return;
6228 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6229 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6230 /* iWMMXt register transfer. */
6231 if (env->cp15.c15_cpar & (1 << 1))
6232 if (!disas_iwmmxt_insn(env, s, insn))
6233 return;
6234 }
6235 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6236 /* Coprocessor double register transfer. */
6237 } else if ((insn & 0x0f000010) == 0x0e000010) {
6238 /* Additional coprocessor register transfer. */
6239 } else if ((insn & 0x0ff10020) == 0x01000000) {
6240 uint32_t mask;
6241 uint32_t val;
6242 /* cps (privileged) */
6243 if (IS_USER(s))
6244 return;
6245 mask = val = 0;
6246 if (insn & (1 << 19)) {
6247 if (insn & (1 << 8))
6248 mask |= CPSR_A;
6249 if (insn & (1 << 7))
6250 mask |= CPSR_I;
6251 if (insn & (1 << 6))
6252 mask |= CPSR_F;
6253 if (insn & (1 << 18))
6254 val |= mask;
6255 }
6256 if (insn & (1 << 17)) {
6257 mask |= CPSR_M;
6258 val |= (insn & 0x1f);
6259 }
6260 if (mask) {
6261 gen_set_psr_im(s, mask, 0, val);
6262 }
6263 return;
6264 }
6265 goto illegal_op;
6266 }
6267 if (cond != 0xe) {
6268 /* if not always execute, we generate a conditional jump to
6269 next instruction */
6270 s->condlabel = gen_new_label();
6271 gen_test_cc(cond ^ 1, s->condlabel);
6272 s->condjmp = 1;
6273 }
6274 if ((insn & 0x0f900000) == 0x03000000) {
6275 if ((insn & (1 << 21)) == 0) {
6276 ARCH(6T2);
6277 rd = (insn >> 12) & 0xf;
6278 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6279 if ((insn & (1 << 22)) == 0) {
6280 /* MOVW */
6281 tmp = new_tmp();
6282 tcg_gen_movi_i32(tmp, val);
6283 } else {
6284 /* MOVT */
6285 tmp = load_reg(s, rd);
6286 tcg_gen_ext16u_i32(tmp, tmp);
6287 tcg_gen_ori_i32(tmp, tmp, val << 16);
6288 }
6289 store_reg(s, rd, tmp);
6290 } else {
6291 if (((insn >> 12) & 0xf) != 0xf)
6292 goto illegal_op;
6293 if (((insn >> 16) & 0xf) == 0) {
6294 gen_nop_hint(s, insn & 0xff);
6295 } else {
6296 /* CPSR = immediate */
6297 val = insn & 0xff;
6298 shift = ((insn >> 8) & 0xf) * 2;
6299 if (shift)
6300 val = (val >> shift) | (val << (32 - shift));
6301 i = ((insn & (1 << 22)) != 0);
6302 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6303 goto illegal_op;
6304 }
6305 }
6306 } else if ((insn & 0x0f900000) == 0x01000000
6307 && (insn & 0x00000090) != 0x00000090) {
6308 /* miscellaneous instructions */
6309 op1 = (insn >> 21) & 3;
6310 sh = (insn >> 4) & 0xf;
6311 rm = insn & 0xf;
6312 switch (sh) {
6313 case 0x0: /* move program status register */
6314 if (op1 & 1) {
6315 /* PSR = reg */
6316 tmp = load_reg(s, rm);
6317 i = ((op1 & 2) != 0);
6318 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6319 goto illegal_op;
6320 } else {
6321 /* reg = PSR */
6322 rd = (insn >> 12) & 0xf;
6323 if (op1 & 2) {
6324 if (IS_USER(s))
6325 goto illegal_op;
6326 tmp = load_cpu_field(spsr);
6327 } else {
6328 tmp = new_tmp();
6329 gen_helper_cpsr_read(tmp);
6330 }
6331 store_reg(s, rd, tmp);
6332 }
6333 break;
6334 case 0x1:
6335 if (op1 == 1) {
6336 /* branch/exchange thumb (bx). */
6337 tmp = load_reg(s, rm);
6338 gen_bx(s, tmp);
6339 } else if (op1 == 3) {
6340 /* clz */
6341 rd = (insn >> 12) & 0xf;
6342 tmp = load_reg(s, rm);
6343 gen_helper_clz(tmp, tmp);
6344 store_reg(s, rd, tmp);
6345 } else {
6346 goto illegal_op;
6347 }
6348 break;
6349 case 0x2:
6350 if (op1 == 1) {
6351 ARCH(5J); /* bxj */
6352 /* Trivial implementation equivalent to bx. */
6353 tmp = load_reg(s, rm);
6354 gen_bx(s, tmp);
6355 } else {
6356 goto illegal_op;
6357 }
6358 break;
6359 case 0x3:
6360 if (op1 != 1)
6361 goto illegal_op;
6362
6363 /* branch link/exchange thumb (blx) */
6364 tmp = load_reg(s, rm);
6365 tmp2 = new_tmp();
6366 tcg_gen_movi_i32(tmp2, s->pc);
6367 store_reg(s, 14, tmp2);
6368 gen_bx(s, tmp);
6369 break;
6370 case 0x5: /* saturating add/subtract */
6371 rd = (insn >> 12) & 0xf;
6372 rn = (insn >> 16) & 0xf;
6373 tmp = load_reg(s, rm);
6374 tmp2 = load_reg(s, rn);
6375 if (op1 & 2)
6376 gen_helper_double_saturate(tmp2, tmp2);
6377 if (op1 & 1)
6378 gen_helper_sub_saturate(tmp, tmp, tmp2);
6379 else
6380 gen_helper_add_saturate(tmp, tmp, tmp2);
6381 dead_tmp(tmp2);
6382 store_reg(s, rd, tmp);
6383 break;
6384 case 7:
6385 /* SMC instruction (op1 == 3)
6386 and undefined instructions (op1 == 0 || op1 == 2)
6387 will trap */
6388 if (op1 != 1) {
6389 goto illegal_op;
6390 }
6391 /* bkpt */
6392 gen_exception_insn(s, 4, EXCP_BKPT);
6393 break;
6394 case 0x8: /* signed multiply */
6395 case 0xa:
6396 case 0xc:
6397 case 0xe:
6398 rs = (insn >> 8) & 0xf;
6399 rn = (insn >> 12) & 0xf;
6400 rd = (insn >> 16) & 0xf;
6401 if (op1 == 1) {
6402 /* (32 * 16) >> 16 */
6403 tmp = load_reg(s, rm);
6404 tmp2 = load_reg(s, rs);
6405 if (sh & 4)
6406 tcg_gen_sari_i32(tmp2, tmp2, 16);
6407 else
6408 gen_sxth(tmp2);
6409 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6410 tcg_gen_shri_i64(tmp64, tmp64, 16);
6411 tmp = new_tmp();
6412 tcg_gen_trunc_i64_i32(tmp, tmp64);
6413 tcg_temp_free_i64(tmp64);
6414 if ((sh & 2) == 0) {
6415 tmp2 = load_reg(s, rn);
6416 gen_helper_add_setq(tmp, tmp, tmp2);
6417 dead_tmp(tmp2);
6418 }
6419 store_reg(s, rd, tmp);
6420 } else {
6421 /* 16 * 16 */
6422 tmp = load_reg(s, rm);
6423 tmp2 = load_reg(s, rs);
6424 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6425 dead_tmp(tmp2);
6426 if (op1 == 2) {
6427 tmp64 = tcg_temp_new_i64();
6428 tcg_gen_ext_i32_i64(tmp64, tmp);
6429 dead_tmp(tmp);
6430 gen_addq(s, tmp64, rn, rd);
6431 gen_storeq_reg(s, rn, rd, tmp64);
6432 tcg_temp_free_i64(tmp64);
6433 } else {
6434 if (op1 == 0) {
6435 tmp2 = load_reg(s, rn);
6436 gen_helper_add_setq(tmp, tmp, tmp2);
6437 dead_tmp(tmp2);
6438 }
6439 store_reg(s, rd, tmp);
6440 }
6441 }
6442 break;
6443 default:
6444 goto illegal_op;
6445 }
6446 } else if (((insn & 0x0e000000) == 0 &&
6447 (insn & 0x00000090) != 0x90) ||
6448 ((insn & 0x0e000000) == (1 << 25))) {
6449 int set_cc, logic_cc, shiftop;
6450
6451 op1 = (insn >> 21) & 0xf;
6452 set_cc = (insn >> 20) & 1;
6453 logic_cc = table_logic_cc[op1] & set_cc;
6454
6455 /* data processing instruction */
6456 if (insn & (1 << 25)) {
6457 /* immediate operand */
6458 val = insn & 0xff;
6459 shift = ((insn >> 8) & 0xf) * 2;
6460 if (shift) {
6461 val = (val >> shift) | (val << (32 - shift));
6462 }
6463 tmp2 = new_tmp();
6464 tcg_gen_movi_i32(tmp2, val);
6465 if (logic_cc && shift) {
6466 gen_set_CF_bit31(tmp2);
6467 }
6468 } else {
6469 /* register */
6470 rm = (insn) & 0xf;
6471 tmp2 = load_reg(s, rm);
6472 shiftop = (insn >> 5) & 3;
6473 if (!(insn & (1 << 4))) {
6474 shift = (insn >> 7) & 0x1f;
6475 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6476 } else {
6477 rs = (insn >> 8) & 0xf;
6478 tmp = load_reg(s, rs);
6479 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6480 }
6481 }
6482 if (op1 != 0x0f && op1 != 0x0d) {
6483 rn = (insn >> 16) & 0xf;
6484 tmp = load_reg(s, rn);
6485 } else {
6486 TCGV_UNUSED(tmp);
6487 }
6488 rd = (insn >> 12) & 0xf;
6489 switch(op1) {
6490 case 0x00:
6491 tcg_gen_and_i32(tmp, tmp, tmp2);
6492 if (logic_cc) {
6493 gen_logic_CC(tmp);
6494 }
6495 store_reg_bx(env, s, rd, tmp);
6496 break;
6497 case 0x01:
6498 tcg_gen_xor_i32(tmp, tmp, tmp2);
6499 if (logic_cc) {
6500 gen_logic_CC(tmp);
6501 }
6502 store_reg_bx(env, s, rd, tmp);
6503 break;
6504 case 0x02:
6505 if (set_cc && rd == 15) {
6506 /* SUBS r15, ... is used for exception return. */
6507 if (IS_USER(s)) {
6508 goto illegal_op;
6509 }
6510 gen_helper_sub_cc(tmp, tmp, tmp2);
6511 gen_exception_return(s, tmp);
6512 } else {
6513 if (set_cc) {
6514 gen_helper_sub_cc(tmp, tmp, tmp2);
6515 } else {
6516 tcg_gen_sub_i32(tmp, tmp, tmp2);
6517 }
6518 store_reg_bx(env, s, rd, tmp);
6519 }
6520 break;
6521 case 0x03:
6522 if (set_cc) {
6523 gen_helper_sub_cc(tmp, tmp2, tmp);
6524 } else {
6525 tcg_gen_sub_i32(tmp, tmp2, tmp);
6526 }
6527 store_reg_bx(env, s, rd, tmp);
6528 break;
6529 case 0x04:
6530 if (set_cc) {
6531 gen_helper_add_cc(tmp, tmp, tmp2);
6532 } else {
6533 tcg_gen_add_i32(tmp, tmp, tmp2);
6534 }
6535 store_reg_bx(env, s, rd, tmp);
6536 break;
6537 case 0x05:
6538 if (set_cc) {
6539 gen_helper_adc_cc(tmp, tmp, tmp2);
6540 } else {
6541 gen_add_carry(tmp, tmp, tmp2);
6542 }
6543 store_reg_bx(env, s, rd, tmp);
6544 break;
6545 case 0x06:
6546 if (set_cc) {
6547 gen_helper_sbc_cc(tmp, tmp, tmp2);
6548 } else {
6549 gen_sub_carry(tmp, tmp, tmp2);
6550 }
6551 store_reg_bx(env, s, rd, tmp);
6552 break;
6553 case 0x07:
6554 if (set_cc) {
6555 gen_helper_sbc_cc(tmp, tmp2, tmp);
6556 } else {
6557 gen_sub_carry(tmp, tmp2, tmp);
6558 }
6559 store_reg_bx(env, s, rd, tmp);
6560 break;
6561 case 0x08:
6562 if (set_cc) {
6563 tcg_gen_and_i32(tmp, tmp, tmp2);
6564 gen_logic_CC(tmp);
6565 }
6566 dead_tmp(tmp);
6567 break;
6568 case 0x09:
6569 if (set_cc) {
6570 tcg_gen_xor_i32(tmp, tmp, tmp2);
6571 gen_logic_CC(tmp);
6572 }
6573 dead_tmp(tmp);
6574 break;
6575 case 0x0a:
6576 if (set_cc) {
6577 gen_helper_sub_cc(tmp, tmp, tmp2);
6578 }
6579 dead_tmp(tmp);
6580 break;
6581 case 0x0b:
6582 if (set_cc) {
6583 gen_helper_add_cc(tmp, tmp, tmp2);
6584 }
6585 dead_tmp(tmp);
6586 break;
6587 case 0x0c:
6588 tcg_gen_or_i32(tmp, tmp, tmp2);
6589 if (logic_cc) {
6590 gen_logic_CC(tmp);
6591 }
6592 store_reg_bx(env, s, rd, tmp);
6593 break;
6594 case 0x0d:
6595 if (logic_cc && rd == 15) {
6596 /* MOVS r15, ... is used for exception return. */
6597 if (IS_USER(s)) {
6598 goto illegal_op;
6599 }
6600 gen_exception_return(s, tmp2);
6601 } else {
6602 if (logic_cc) {
6603 gen_logic_CC(tmp2);
6604 }
6605 store_reg_bx(env, s, rd, tmp2);
6606 }
6607 break;
6608 case 0x0e:
6609 tcg_gen_andc_i32(tmp, tmp, tmp2);
6610 if (logic_cc) {
6611 gen_logic_CC(tmp);
6612 }
6613 store_reg_bx(env, s, rd, tmp);
6614 break;
6615 default:
6616 case 0x0f:
6617 tcg_gen_not_i32(tmp2, tmp2);
6618 if (logic_cc) {
6619 gen_logic_CC(tmp2);
6620 }
6621 store_reg_bx(env, s, rd, tmp2);
6622 break;
6623 }
6624 if (op1 != 0x0f && op1 != 0x0d) {
6625 dead_tmp(tmp2);
6626 }
6627 } else {
6628 /* other instructions */
6629 op1 = (insn >> 24) & 0xf;
6630 switch(op1) {
6631 case 0x0:
6632 case 0x1:
6633 /* multiplies, extra load/stores */
6634 sh = (insn >> 5) & 3;
6635 if (sh == 0) {
6636 if (op1 == 0x0) {
6637 rd = (insn >> 16) & 0xf;
6638 rn = (insn >> 12) & 0xf;
6639 rs = (insn >> 8) & 0xf;
6640 rm = (insn) & 0xf;
6641 op1 = (insn >> 20) & 0xf;
6642 switch (op1) {
6643 case 0: case 1: case 2: case 3: case 6:
6644 /* 32 bit mul */
6645 tmp = load_reg(s, rs);
6646 tmp2 = load_reg(s, rm);
6647 tcg_gen_mul_i32(tmp, tmp, tmp2);
6648 dead_tmp(tmp2);
6649 if (insn & (1 << 22)) {
6650 /* Subtract (mls) */
6651 ARCH(6T2);
6652 tmp2 = load_reg(s, rn);
6653 tcg_gen_sub_i32(tmp, tmp2, tmp);
6654 dead_tmp(tmp2);
6655 } else if (insn & (1 << 21)) {
6656 /* Add */
6657 tmp2 = load_reg(s, rn);
6658 tcg_gen_add_i32(tmp, tmp, tmp2);
6659 dead_tmp(tmp2);
6660 }
6661 if (insn & (1 << 20))
6662 gen_logic_CC(tmp);
6663 store_reg(s, rd, tmp);
6664 break;
6665 case 4:
6666 /* 64 bit mul double accumulate (UMAAL) */
6667 ARCH(6);
6668 tmp = load_reg(s, rs);
6669 tmp2 = load_reg(s, rm);
6670 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6671 gen_addq_lo(s, tmp64, rn);
6672 gen_addq_lo(s, tmp64, rd);
6673 gen_storeq_reg(s, rn, rd, tmp64);
6674 tcg_temp_free_i64(tmp64);
6675 break;
6676 case 8: case 9: case 10: case 11:
6677 case 12: case 13: case 14: case 15:
6678 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6679 tmp = load_reg(s, rs);
6680 tmp2 = load_reg(s, rm);
6681 if (insn & (1 << 22)) {
6682 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6683 } else {
6684 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6685 }
6686 if (insn & (1 << 21)) { /* mult accumulate */
6687 gen_addq(s, tmp64, rn, rd);
6688 }
6689 if (insn & (1 << 20)) {
6690 gen_logicq_cc(tmp64);
6691 }
6692 gen_storeq_reg(s, rn, rd, tmp64);
6693 tcg_temp_free_i64(tmp64);
6694 break;
6695 default:
6696 goto illegal_op;
6697 }
6698 } else {
6699 rn = (insn >> 16) & 0xf;
6700 rd = (insn >> 12) & 0xf;
6701 if (insn & (1 << 23)) {
6702 /* load/store exclusive */
6703 op1 = (insn >> 21) & 0x3;
6704 if (op1)
6705 ARCH(6K);
6706 else
6707 ARCH(6);
6708 addr = tcg_temp_local_new_i32();
6709 load_reg_var(s, addr, rn);
6710 if (insn & (1 << 20)) {
6711 switch (op1) {
6712 case 0: /* ldrex */
6713 gen_load_exclusive(s, rd, 15, addr, 2);
6714 break;
6715 case 1: /* ldrexd */
6716 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6717 break;
6718 case 2: /* ldrexb */
6719 gen_load_exclusive(s, rd, 15, addr, 0);
6720 break;
6721 case 3: /* ldrexh */
6722 gen_load_exclusive(s, rd, 15, addr, 1);
6723 break;
6724 default:
6725 abort();
6726 }
6727 } else {
6728 rm = insn & 0xf;
6729 switch (op1) {
6730 case 0: /* strex */
6731 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6732 break;
6733 case 1: /* strexd */
6734 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6735 break;
6736 case 2: /* strexb */
6737 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6738 break;
6739 case 3: /* strexh */
6740 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6741 break;
6742 default:
6743 abort();
6744 }
6745 }
6746 tcg_temp_free(addr);
6747 } else {
6748 /* SWP instruction */
6749 rm = (insn) & 0xf;
6750
6751 /* ??? This is not really atomic. However we know
6752 we never have multiple CPUs running in parallel,
6753 so it is good enough. */
6754 addr = load_reg(s, rn);
6755 tmp = load_reg(s, rm);
6756 if (insn & (1 << 22)) {
6757 tmp2 = gen_ld8u(addr, IS_USER(s));
6758 gen_st8(tmp, addr, IS_USER(s));
6759 } else {
6760 tmp2 = gen_ld32(addr, IS_USER(s));
6761 gen_st32(tmp, addr, IS_USER(s));
6762 }
6763 dead_tmp(addr);
6764 store_reg(s, rd, tmp2);
6765 }
6766 }
6767 } else {
6768 int address_offset;
6769 int load;
6770 /* Misc load/store */
6771 rn = (insn >> 16) & 0xf;
6772 rd = (insn >> 12) & 0xf;
6773 addr = load_reg(s, rn);
6774 if (insn & (1 << 24))
6775 gen_add_datah_offset(s, insn, 0, addr);
6776 address_offset = 0;
6777 if (insn & (1 << 20)) {
6778 /* load */
6779 switch(sh) {
6780 case 1:
6781 tmp = gen_ld16u(addr, IS_USER(s));
6782 break;
6783 case 2:
6784 tmp = gen_ld8s(addr, IS_USER(s));
6785 break;
6786 default:
6787 case 3:
6788 tmp = gen_ld16s(addr, IS_USER(s));
6789 break;
6790 }
6791 load = 1;
6792 } else if (sh & 2) {
6793 /* doubleword */
6794 if (sh & 1) {
6795 /* store */
6796 tmp = load_reg(s, rd);
6797 gen_st32(tmp, addr, IS_USER(s));
6798 tcg_gen_addi_i32(addr, addr, 4);
6799 tmp = load_reg(s, rd + 1);
6800 gen_st32(tmp, addr, IS_USER(s));
6801 load = 0;
6802 } else {
6803 /* load */
6804 tmp = gen_ld32(addr, IS_USER(s));
6805 store_reg(s, rd, tmp);
6806 tcg_gen_addi_i32(addr, addr, 4);
6807 tmp = gen_ld32(addr, IS_USER(s));
6808 rd++;
6809 load = 1;
6810 }
6811 address_offset = -4;
6812 } else {
6813 /* store */
6814 tmp = load_reg(s, rd);
6815 gen_st16(tmp, addr, IS_USER(s));
6816 load = 0;
6817 }
6818 /* Perform base writeback before the loaded value to
6819 ensure correct behavior with overlapping index registers.
6820 ldrd with base writeback is is undefined if the
6821 destination and index registers overlap. */
6822 if (!(insn & (1 << 24))) {
6823 gen_add_datah_offset(s, insn, address_offset, addr);
6824 store_reg(s, rn, addr);
6825 } else if (insn & (1 << 21)) {
6826 if (address_offset)
6827 tcg_gen_addi_i32(addr, addr, address_offset);
6828 store_reg(s, rn, addr);
6829 } else {
6830 dead_tmp(addr);
6831 }
6832 if (load) {
6833 /* Complete the load. */
6834 store_reg(s, rd, tmp);
6835 }
6836 }
6837 break;
6838 case 0x4:
6839 case 0x5:
6840 goto do_ldst;
6841 case 0x6:
6842 case 0x7:
6843 if (insn & (1 << 4)) {
6844 ARCH(6);
6845 /* Armv6 Media instructions. */
6846 rm = insn & 0xf;
6847 rn = (insn >> 16) & 0xf;
6848 rd = (insn >> 12) & 0xf;
6849 rs = (insn >> 8) & 0xf;
6850 switch ((insn >> 23) & 3) {
6851 case 0: /* Parallel add/subtract. */
6852 op1 = (insn >> 20) & 7;
6853 tmp = load_reg(s, rn);
6854 tmp2 = load_reg(s, rm);
6855 sh = (insn >> 5) & 7;
6856 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6857 goto illegal_op;
6858 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6859 dead_tmp(tmp2);
6860 store_reg(s, rd, tmp);
6861 break;
6862 case 1:
6863 if ((insn & 0x00700020) == 0) {
6864 /* Halfword pack. */
6865 tmp = load_reg(s, rn);
6866 tmp2 = load_reg(s, rm);
6867 shift = (insn >> 7) & 0x1f;
6868 if (insn & (1 << 6)) {
6869 /* pkhtb */
6870 if (shift == 0)
6871 shift = 31;
6872 tcg_gen_sari_i32(tmp2, tmp2, shift);
6873 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6874 tcg_gen_ext16u_i32(tmp2, tmp2);
6875 } else {
6876 /* pkhbt */
6877 if (shift)
6878 tcg_gen_shli_i32(tmp2, tmp2, shift);
6879 tcg_gen_ext16u_i32(tmp, tmp);
6880 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6881 }
6882 tcg_gen_or_i32(tmp, tmp, tmp2);
6883 dead_tmp(tmp2);
6884 store_reg(s, rd, tmp);
6885 } else if ((insn & 0x00200020) == 0x00200000) {
6886 /* [us]sat */
6887 tmp = load_reg(s, rm);
6888 shift = (insn >> 7) & 0x1f;
6889 if (insn & (1 << 6)) {
6890 if (shift == 0)
6891 shift = 31;
6892 tcg_gen_sari_i32(tmp, tmp, shift);
6893 } else {
6894 tcg_gen_shli_i32(tmp, tmp, shift);
6895 }
6896 sh = (insn >> 16) & 0x1f;
6897 tmp2 = tcg_const_i32(sh);
6898 if (insn & (1 << 22))
6899 gen_helper_usat(tmp, tmp, tmp2);
6900 else
6901 gen_helper_ssat(tmp, tmp, tmp2);
6902 tcg_temp_free_i32(tmp2);
6903 store_reg(s, rd, tmp);
6904 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6905 /* [us]sat16 */
6906 tmp = load_reg(s, rm);
6907 sh = (insn >> 16) & 0x1f;
6908 tmp2 = tcg_const_i32(sh);
6909 if (insn & (1 << 22))
6910 gen_helper_usat16(tmp, tmp, tmp2);
6911 else
6912 gen_helper_ssat16(tmp, tmp, tmp2);
6913 tcg_temp_free_i32(tmp2);
6914 store_reg(s, rd, tmp);
6915 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6916 /* Select bytes. */
6917 tmp = load_reg(s, rn);
6918 tmp2 = load_reg(s, rm);
6919 tmp3 = new_tmp();
6920 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6921 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6922 dead_tmp(tmp3);
6923 dead_tmp(tmp2);
6924 store_reg(s, rd, tmp);
6925 } else if ((insn & 0x000003e0) == 0x00000060) {
6926 tmp = load_reg(s, rm);
6927 shift = (insn >> 10) & 3;
6928 /* ??? In many cases it's not neccessary to do a
6929 rotate, a shift is sufficient. */
6930 if (shift != 0)
6931 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6932 op1 = (insn >> 20) & 7;
6933 switch (op1) {
6934 case 0: gen_sxtb16(tmp); break;
6935 case 2: gen_sxtb(tmp); break;
6936 case 3: gen_sxth(tmp); break;
6937 case 4: gen_uxtb16(tmp); break;
6938 case 6: gen_uxtb(tmp); break;
6939 case 7: gen_uxth(tmp); break;
6940 default: goto illegal_op;
6941 }
6942 if (rn != 15) {
6943 tmp2 = load_reg(s, rn);
6944 if ((op1 & 3) == 0) {
6945 gen_add16(tmp, tmp2);
6946 } else {
6947 tcg_gen_add_i32(tmp, tmp, tmp2);
6948 dead_tmp(tmp2);
6949 }
6950 }
6951 store_reg(s, rd, tmp);
6952 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6953 /* rev */
6954 tmp = load_reg(s, rm);
6955 if (insn & (1 << 22)) {
6956 if (insn & (1 << 7)) {
6957 gen_revsh(tmp);
6958 } else {
6959 ARCH(6T2);
6960 gen_helper_rbit(tmp, tmp);
6961 }
6962 } else {
6963 if (insn & (1 << 7))
6964 gen_rev16(tmp);
6965 else
6966 tcg_gen_bswap32_i32(tmp, tmp);
6967 }
6968 store_reg(s, rd, tmp);
6969 } else {
6970 goto illegal_op;
6971 }
6972 break;
6973 case 2: /* Multiplies (Type 3). */
6974 tmp = load_reg(s, rm);
6975 tmp2 = load_reg(s, rs);
6976 if (insn & (1 << 20)) {
6977 /* Signed multiply most significant [accumulate].
6978 (SMMUL, SMMLA, SMMLS) */
6979 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6980
6981 if (rd != 15) {
6982 tmp = load_reg(s, rd);
6983 if (insn & (1 << 6)) {
6984 tmp64 = gen_subq_msw(tmp64, tmp);
6985 } else {
6986 tmp64 = gen_addq_msw(tmp64, tmp);
6987 }
6988 }
6989 if (insn & (1 << 5)) {
6990 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6991 }
6992 tcg_gen_shri_i64(tmp64, tmp64, 32);
6993 tmp = new_tmp();
6994 tcg_gen_trunc_i64_i32(tmp, tmp64);
6995 tcg_temp_free_i64(tmp64);
6996 store_reg(s, rn, tmp);
6997 } else {
6998 if (insn & (1 << 5))
6999 gen_swap_half(tmp2);
7000 gen_smul_dual(tmp, tmp2);
7001 /* This addition cannot overflow. */
7002 if (insn & (1 << 6)) {
7003 tcg_gen_sub_i32(tmp, tmp, tmp2);
7004 } else {
7005 tcg_gen_add_i32(tmp, tmp, tmp2);
7006 }
7007 dead_tmp(tmp2);
7008 if (insn & (1 << 22)) {
7009 /* smlald, smlsld */
7010 tmp64 = tcg_temp_new_i64();
7011 tcg_gen_ext_i32_i64(tmp64, tmp);
7012 dead_tmp(tmp);
7013 gen_addq(s, tmp64, rd, rn);
7014 gen_storeq_reg(s, rd, rn, tmp64);
7015 tcg_temp_free_i64(tmp64);
7016 } else {
7017 /* smuad, smusd, smlad, smlsd */
7018 if (rd != 15)
7019 {
7020 tmp2 = load_reg(s, rd);
7021 gen_helper_add_setq(tmp, tmp, tmp2);
7022 dead_tmp(tmp2);
7023 }
7024 store_reg(s, rn, tmp);
7025 }
7026 }
7027 break;
7028 case 3:
7029 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7030 switch (op1) {
7031 case 0: /* Unsigned sum of absolute differences. */
7032 ARCH(6);
7033 tmp = load_reg(s, rm);
7034 tmp2 = load_reg(s, rs);
7035 gen_helper_usad8(tmp, tmp, tmp2);
7036 dead_tmp(tmp2);
7037 if (rd != 15) {
7038 tmp2 = load_reg(s, rd);
7039 tcg_gen_add_i32(tmp, tmp, tmp2);
7040 dead_tmp(tmp2);
7041 }
7042 store_reg(s, rn, tmp);
7043 break;
7044 case 0x20: case 0x24: case 0x28: case 0x2c:
7045 /* Bitfield insert/clear. */
7046 ARCH(6T2);
7047 shift = (insn >> 7) & 0x1f;
7048 i = (insn >> 16) & 0x1f;
7049 i = i + 1 - shift;
7050 if (rm == 15) {
7051 tmp = new_tmp();
7052 tcg_gen_movi_i32(tmp, 0);
7053 } else {
7054 tmp = load_reg(s, rm);
7055 }
7056 if (i != 32) {
7057 tmp2 = load_reg(s, rd);
7058 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7059 dead_tmp(tmp2);
7060 }
7061 store_reg(s, rd, tmp);
7062 break;
7063 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7064 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7065 ARCH(6T2);
7066 tmp = load_reg(s, rm);
7067 shift = (insn >> 7) & 0x1f;
7068 i = ((insn >> 16) & 0x1f) + 1;
7069 if (shift + i > 32)
7070 goto illegal_op;
7071 if (i < 32) {
7072 if (op1 & 0x20) {
7073 gen_ubfx(tmp, shift, (1u << i) - 1);
7074 } else {
7075 gen_sbfx(tmp, shift, i);
7076 }
7077 }
7078 store_reg(s, rd, tmp);
7079 break;
7080 default:
7081 goto illegal_op;
7082 }
7083 break;
7084 }
7085 break;
7086 }
7087 do_ldst:
7088 /* Check for undefined extension instructions
7089 * per the ARM Bible IE:
7090 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7091 */
7092 sh = (0xf << 20) | (0xf << 4);
7093 if (op1 == 0x7 && ((insn & sh) == sh))
7094 {
7095 goto illegal_op;
7096 }
7097 /* load/store byte/word */
7098 rn = (insn >> 16) & 0xf;
7099 rd = (insn >> 12) & 0xf;
7100 tmp2 = load_reg(s, rn);
7101 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7102 if (insn & (1 << 24))
7103 gen_add_data_offset(s, insn, tmp2);
7104 if (insn & (1 << 20)) {
7105 /* load */
7106 if (insn & (1 << 22)) {
7107 tmp = gen_ld8u(tmp2, i);
7108 } else {
7109 tmp = gen_ld32(tmp2, i);
7110 }
7111 } else {
7112 /* store */
7113 tmp = load_reg(s, rd);
7114 if (insn & (1 << 22))
7115 gen_st8(tmp, tmp2, i);
7116 else
7117 gen_st32(tmp, tmp2, i);
7118 }
7119 if (!(insn & (1 << 24))) {
7120 gen_add_data_offset(s, insn, tmp2);
7121 store_reg(s, rn, tmp2);
7122 } else if (insn & (1 << 21)) {
7123 store_reg(s, rn, tmp2);
7124 } else {
7125 dead_tmp(tmp2);
7126 }
7127 if (insn & (1 << 20)) {
7128 /* Complete the load. */
7129 if (rd == 15)
7130 gen_bx(s, tmp);
7131 else
7132 store_reg(s, rd, tmp);
7133 }
7134 break;
7135 case 0x08:
7136 case 0x09:
7137 {
7138 int j, n, user, loaded_base;
7139 TCGv loaded_var;
7140 /* load/store multiple words */
7141 /* XXX: store correct base if write back */
7142 user = 0;
7143 if (insn & (1 << 22)) {
7144 if (IS_USER(s))
7145 goto illegal_op; /* only usable in supervisor mode */
7146
7147 if ((insn & (1 << 15)) == 0)
7148 user = 1;
7149 }
7150 rn = (insn >> 16) & 0xf;
7151 addr = load_reg(s, rn);
7152
7153 /* compute total size */
7154 loaded_base = 0;
7155 TCGV_UNUSED(loaded_var);
7156 n = 0;
7157 for(i=0;i<16;i++) {
7158 if (insn & (1 << i))
7159 n++;
7160 }
7161 /* XXX: test invalid n == 0 case ? */
7162 if (insn & (1 << 23)) {
7163 if (insn & (1 << 24)) {
7164 /* pre increment */
7165 tcg_gen_addi_i32(addr, addr, 4);
7166 } else {
7167 /* post increment */
7168 }
7169 } else {
7170 if (insn & (1 << 24)) {
7171 /* pre decrement */
7172 tcg_gen_addi_i32(addr, addr, -(n * 4));
7173 } else {
7174 /* post decrement */
7175 if (n != 1)
7176 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7177 }
7178 }
7179 j = 0;
7180 for(i=0;i<16;i++) {
7181 if (insn & (1 << i)) {
7182 if (insn & (1 << 20)) {
7183 /* load */
7184 tmp = gen_ld32(addr, IS_USER(s));
7185 if (i == 15) {
7186 gen_bx(s, tmp);
7187 } else if (user) {
7188 tmp2 = tcg_const_i32(i);
7189 gen_helper_set_user_reg(tmp2, tmp);
7190 tcg_temp_free_i32(tmp2);
7191 dead_tmp(tmp);
7192 } else if (i == rn) {
7193 loaded_var = tmp;
7194 loaded_base = 1;
7195 } else {
7196 store_reg(s, i, tmp);
7197 }
7198 } else {
7199 /* store */
7200 if (i == 15) {
7201 /* special case: r15 = PC + 8 */
7202 val = (long)s->pc + 4;
7203 tmp = new_tmp();
7204 tcg_gen_movi_i32(tmp, val);
7205 } else if (user) {
7206 tmp = new_tmp();
7207 tmp2 = tcg_const_i32(i);
7208 gen_helper_get_user_reg(tmp, tmp2);
7209 tcg_temp_free_i32(tmp2);
7210 } else {
7211 tmp = load_reg(s, i);
7212 }
7213 gen_st32(tmp, addr, IS_USER(s));
7214 }
7215 j++;
7216 /* no need to add after the last transfer */
7217 if (j != n)
7218 tcg_gen_addi_i32(addr, addr, 4);
7219 }
7220 }
7221 if (insn & (1 << 21)) {
7222 /* write back */
7223 if (insn & (1 << 23)) {
7224 if (insn & (1 << 24)) {
7225 /* pre increment */
7226 } else {
7227 /* post increment */
7228 tcg_gen_addi_i32(addr, addr, 4);
7229 }
7230 } else {
7231 if (insn & (1 << 24)) {
7232 /* pre decrement */
7233 if (n != 1)
7234 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7235 } else {
7236 /* post decrement */
7237 tcg_gen_addi_i32(addr, addr, -(n * 4));
7238 }
7239 }
7240 store_reg(s, rn, addr);
7241 } else {
7242 dead_tmp(addr);
7243 }
7244 if (loaded_base) {
7245 store_reg(s, rn, loaded_var);
7246 }
7247 if ((insn & (1 << 22)) && !user) {
7248 /* Restore CPSR from SPSR. */
7249 tmp = load_cpu_field(spsr);
7250 gen_set_cpsr(tmp, 0xffffffff);
7251 dead_tmp(tmp);
7252 s->is_jmp = DISAS_UPDATE;
7253 }
7254 }
7255 break;
7256 case 0xa:
7257 case 0xb:
7258 {
7259 int32_t offset;
7260
7261 /* branch (and link) */
7262 val = (int32_t)s->pc;
7263 if (insn & (1 << 24)) {
7264 tmp = new_tmp();
7265 tcg_gen_movi_i32(tmp, val);
7266 store_reg(s, 14, tmp);
7267 }
7268 offset = (((int32_t)insn << 8) >> 8);
7269 val += (offset << 2) + 4;
7270 gen_jmp(s, val);
7271 }
7272 break;
7273 case 0xc:
7274 case 0xd:
7275 case 0xe:
7276 /* Coprocessor. */
7277 if (disas_coproc_insn(env, s, insn))
7278 goto illegal_op;
7279 break;
7280 case 0xf:
7281 /* swi */
7282 gen_set_pc_im(s->pc);
7283 s->is_jmp = DISAS_SWI;
7284 break;
7285 default:
7286 illegal_op:
7287 gen_exception_insn(s, 4, EXCP_UDEF);
7288 break;
7289 }
7290 }
7291 }
7292
7293 /* Return true if this is a Thumb-2 logical op. */
7294 static int
7295 thumb2_logic_op(int op)
7296 {
7297 return (op < 8);
7298 }
7299
7300 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7301 then set condition code flags based on the result of the operation.
7302 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7303 to the high bit of T1.
7304 Returns zero if the opcode is valid. */
7305
7306 static int
7307 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7308 {
7309 int logic_cc;
7310
7311 logic_cc = 0;
7312 switch (op) {
7313 case 0: /* and */
7314 tcg_gen_and_i32(t0, t0, t1);
7315 logic_cc = conds;
7316 break;
7317 case 1: /* bic */
7318 tcg_gen_andc_i32(t0, t0, t1);
7319 logic_cc = conds;
7320 break;
7321 case 2: /* orr */
7322 tcg_gen_or_i32(t0, t0, t1);
7323 logic_cc = conds;
7324 break;
7325 case 3: /* orn */
7326 tcg_gen_not_i32(t1, t1);
7327 tcg_gen_or_i32(t0, t0, t1);
7328 logic_cc = conds;
7329 break;
7330 case 4: /* eor */
7331 tcg_gen_xor_i32(t0, t0, t1);
7332 logic_cc = conds;
7333 break;
7334 case 8: /* add */
7335 if (conds)
7336 gen_helper_add_cc(t0, t0, t1);
7337 else
7338 tcg_gen_add_i32(t0, t0, t1);
7339 break;
7340 case 10: /* adc */
7341 if (conds)
7342 gen_helper_adc_cc(t0, t0, t1);
7343 else
7344 gen_adc(t0, t1);
7345 break;
7346 case 11: /* sbc */
7347 if (conds)
7348 gen_helper_sbc_cc(t0, t0, t1);
7349 else
7350 gen_sub_carry(t0, t0, t1);
7351 break;
7352 case 13: /* sub */
7353 if (conds)
7354 gen_helper_sub_cc(t0, t0, t1);
7355 else
7356 tcg_gen_sub_i32(t0, t0, t1);
7357 break;
7358 case 14: /* rsb */
7359 if (conds)
7360 gen_helper_sub_cc(t0, t1, t0);
7361 else
7362 tcg_gen_sub_i32(t0, t1, t0);
7363 break;
7364 default: /* 5, 6, 7, 9, 12, 15. */
7365 return 1;
7366 }
7367 if (logic_cc) {
7368 gen_logic_CC(t0);
7369 if (shifter_out)
7370 gen_set_CF_bit31(t1);
7371 }
7372 return 0;
7373 }
7374
7375 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7376 is not legal. */
7377 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7378 {
7379 uint32_t insn, imm, shift, offset;
7380 uint32_t rd, rn, rm, rs;
7381 TCGv tmp;
7382 TCGv tmp2;
7383 TCGv tmp3;
7384 TCGv addr;
7385 TCGv_i64 tmp64;
7386 int op;
7387 int shiftop;
7388 int conds;
7389 int logic_cc;
7390
7391 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7392 || arm_feature (env, ARM_FEATURE_M))) {
7393 /* Thumb-1 cores may need to treat bl and blx as a pair of
7394 16-bit instructions to get correct prefetch abort behavior. */
7395 insn = insn_hw1;
7396 if ((insn & (1 << 12)) == 0) {
7397 /* Second half of blx. */
7398 offset = ((insn & 0x7ff) << 1);
7399 tmp = load_reg(s, 14);
7400 tcg_gen_addi_i32(tmp, tmp, offset);
7401 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7402
7403 tmp2 = new_tmp();
7404 tcg_gen_movi_i32(tmp2, s->pc | 1);
7405 store_reg(s, 14, tmp2);
7406 gen_bx(s, tmp);
7407 return 0;
7408 }
7409 if (insn & (1 << 11)) {
7410 /* Second half of bl. */
7411 offset = ((insn & 0x7ff) << 1) | 1;
7412 tmp = load_reg(s, 14);
7413 tcg_gen_addi_i32(tmp, tmp, offset);
7414
7415 tmp2 = new_tmp();
7416 tcg_gen_movi_i32(tmp2, s->pc | 1);
7417 store_reg(s, 14, tmp2);
7418 gen_bx(s, tmp);
7419 return 0;
7420 }
7421 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7422 /* Instruction spans a page boundary. Implement it as two
7423 16-bit instructions in case the second half causes an
7424 prefetch abort. */
7425 offset = ((int32_t)insn << 21) >> 9;
7426 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7427 return 0;
7428 }
7429 /* Fall through to 32-bit decode. */
7430 }
7431
7432 insn = lduw_code(s->pc);
7433 s->pc += 2;
7434 insn |= (uint32_t)insn_hw1 << 16;
7435
7436 if ((insn & 0xf800e800) != 0xf000e800) {
7437 ARCH(6T2);
7438 }
7439
7440 rn = (insn >> 16) & 0xf;
7441 rs = (insn >> 12) & 0xf;
7442 rd = (insn >> 8) & 0xf;
7443 rm = insn & 0xf;
7444 switch ((insn >> 25) & 0xf) {
7445 case 0: case 1: case 2: case 3:
7446 /* 16-bit instructions. Should never happen. */
7447 abort();
7448 case 4:
7449 if (insn & (1 << 22)) {
7450 /* Other load/store, table branch. */
7451 if (insn & 0x01200000) {
7452 /* Load/store doubleword. */
7453 if (rn == 15) {
7454 addr = new_tmp();
7455 tcg_gen_movi_i32(addr, s->pc & ~3);
7456 } else {
7457 addr = load_reg(s, rn);
7458 }
7459 offset = (insn & 0xff) * 4;
7460 if ((insn & (1 << 23)) == 0)
7461 offset = -offset;
7462 if (insn & (1 << 24)) {
7463 tcg_gen_addi_i32(addr, addr, offset);
7464 offset = 0;
7465 }
7466 if (insn & (1 << 20)) {
7467 /* ldrd */
7468 tmp = gen_ld32(addr, IS_USER(s));
7469 store_reg(s, rs, tmp);
7470 tcg_gen_addi_i32(addr, addr, 4);
7471 tmp = gen_ld32(addr, IS_USER(s));
7472 store_reg(s, rd, tmp);
7473 } else {
7474 /* strd */
7475 tmp = load_reg(s, rs);
7476 gen_st32(tmp, addr, IS_USER(s));
7477 tcg_gen_addi_i32(addr, addr, 4);
7478 tmp = load_reg(s, rd);
7479 gen_st32(tmp, addr, IS_USER(s));
7480 }
7481 if (insn & (1 << 21)) {
7482 /* Base writeback. */
7483 if (rn == 15)
7484 goto illegal_op;
7485 tcg_gen_addi_i32(addr, addr, offset - 4);
7486 store_reg(s, rn, addr);
7487 } else {
7488 dead_tmp(addr);
7489 }
7490 } else if ((insn & (1 << 23)) == 0) {
7491 /* Load/store exclusive word. */
7492 addr = tcg_temp_local_new();
7493 load_reg_var(s, addr, rn);
7494 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7495 if (insn & (1 << 20)) {
7496 gen_load_exclusive(s, rs, 15, addr, 2);
7497 } else {
7498 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7499 }
7500 tcg_temp_free(addr);
7501 } else if ((insn & (1 << 6)) == 0) {
7502 /* Table Branch. */
7503 if (rn == 15) {
7504 addr = new_tmp();
7505 tcg_gen_movi_i32(addr, s->pc);
7506 } else {
7507 addr = load_reg(s, rn);
7508 }
7509 tmp = load_reg(s, rm);
7510 tcg_gen_add_i32(addr, addr, tmp);
7511 if (insn & (1 << 4)) {
7512 /* tbh */
7513 tcg_gen_add_i32(addr, addr, tmp);
7514 dead_tmp(tmp);
7515 tmp = gen_ld16u(addr, IS_USER(s));
7516 } else { /* tbb */
7517 dead_tmp(tmp);
7518 tmp = gen_ld8u(addr, IS_USER(s));
7519 }
7520 dead_tmp(addr);
7521 tcg_gen_shli_i32(tmp, tmp, 1);
7522 tcg_gen_addi_i32(tmp, tmp, s->pc);
7523 store_reg(s, 15, tmp);
7524 } else {
7525 /* Load/store exclusive byte/halfword/doubleword. */
7526 ARCH(7);
7527 op = (insn >> 4) & 0x3;
7528 if (op == 2) {
7529 goto illegal_op;
7530 }
7531 addr = tcg_temp_local_new();
7532 load_reg_var(s, addr, rn);
7533 if (insn & (1 << 20)) {
7534 gen_load_exclusive(s, rs, rd, addr, op);
7535 } else {
7536 gen_store_exclusive(s, rm, rs, rd, addr, op);
7537 }
7538 tcg_temp_free(addr);
7539 }
7540 } else {
7541 /* Load/store multiple, RFE, SRS. */
7542 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7543 /* Not available in user mode. */
7544 if (IS_USER(s))
7545 goto illegal_op;
7546 if (insn & (1 << 20)) {
7547 /* rfe */
7548 addr = load_reg(s, rn);
7549 if ((insn & (1 << 24)) == 0)
7550 tcg_gen_addi_i32(addr, addr, -8);
7551 /* Load PC into tmp and CPSR into tmp2. */
7552 tmp = gen_ld32(addr, 0);
7553 tcg_gen_addi_i32(addr, addr, 4);
7554 tmp2 = gen_ld32(addr, 0);
7555 if (insn & (1 << 21)) {
7556 /* Base writeback. */
7557 if (insn & (1 << 24)) {
7558 tcg_gen_addi_i32(addr, addr, 4);
7559 } else {
7560 tcg_gen_addi_i32(addr, addr, -4);
7561 }
7562 store_reg(s, rn, addr);
7563 } else {
7564 dead_tmp(addr);
7565 }
7566 gen_rfe(s, tmp, tmp2);
7567 } else {
7568 /* srs */
7569 op = (insn & 0x1f);
7570 addr = new_tmp();
7571 tmp = tcg_const_i32(op);
7572 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7573 tcg_temp_free_i32(tmp);
7574 if ((insn & (1 << 24)) == 0) {
7575 tcg_gen_addi_i32(addr, addr, -8);
7576 }
7577 tmp = load_reg(s, 14);
7578 gen_st32(tmp, addr, 0);
7579 tcg_gen_addi_i32(addr, addr, 4);
7580 tmp = new_tmp();
7581 gen_helper_cpsr_read(tmp);
7582 gen_st32(tmp, addr, 0);
7583 if (insn & (1 << 21)) {
7584 if ((insn & (1 << 24)) == 0) {
7585 tcg_gen_addi_i32(addr, addr, -4);
7586 } else {
7587 tcg_gen_addi_i32(addr, addr, 4);
7588 }
7589 tmp = tcg_const_i32(op);
7590 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7591 tcg_temp_free_i32(tmp);
7592 } else {
7593 dead_tmp(addr);
7594 }
7595 }
7596 } else {
7597 int i;
7598 /* Load/store multiple. */
7599 addr = load_reg(s, rn);
7600 offset = 0;
7601 for (i = 0; i < 16; i++) {
7602 if (insn & (1 << i))
7603 offset += 4;
7604 }
7605 if (insn & (1 << 24)) {
7606 tcg_gen_addi_i32(addr, addr, -offset);
7607 }
7608
7609 for (i = 0; i < 16; i++) {
7610 if ((insn & (1 << i)) == 0)
7611 continue;
7612 if (insn & (1 << 20)) {
7613 /* Load. */
7614 tmp = gen_ld32(addr, IS_USER(s));
7615 if (i == 15) {
7616 gen_bx(s, tmp);
7617 } else {
7618 store_reg(s, i, tmp);
7619 }
7620 } else {
7621 /* Store. */
7622 tmp = load_reg(s, i);
7623 gen_st32(tmp, addr, IS_USER(s));
7624 }
7625 tcg_gen_addi_i32(addr, addr, 4);
7626 }
7627 if (insn & (1 << 21)) {
7628 /* Base register writeback. */
7629 if (insn & (1 << 24)) {
7630 tcg_gen_addi_i32(addr, addr, -offset);
7631 }
7632 /* Fault if writeback register is in register list. */
7633 if (insn & (1 << rn))
7634 goto illegal_op;
7635 store_reg(s, rn, addr);
7636 } else {
7637 dead_tmp(addr);
7638 }
7639 }
7640 }
7641 break;
7642 case 5:
7643
7644 op = (insn >> 21) & 0xf;
7645 if (op == 6) {
7646 /* Halfword pack. */
7647 tmp = load_reg(s, rn);
7648 tmp2 = load_reg(s, rm);
7649 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7650 if (insn & (1 << 5)) {
7651 /* pkhtb */
7652 if (shift == 0)
7653 shift = 31;
7654 tcg_gen_sari_i32(tmp2, tmp2, shift);
7655 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7656 tcg_gen_ext16u_i32(tmp2, tmp2);
7657 } else {
7658 /* pkhbt */
7659 if (shift)
7660 tcg_gen_shli_i32(tmp2, tmp2, shift);
7661 tcg_gen_ext16u_i32(tmp, tmp);
7662 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7663 }
7664 tcg_gen_or_i32(tmp, tmp, tmp2);
7665 dead_tmp(tmp2);
7666 store_reg(s, rd, tmp);
7667 } else {
7668 /* Data processing register constant shift. */
7669 if (rn == 15) {
7670 tmp = new_tmp();
7671 tcg_gen_movi_i32(tmp, 0);
7672 } else {
7673 tmp = load_reg(s, rn);
7674 }
7675 tmp2 = load_reg(s, rm);
7676
7677 shiftop = (insn >> 4) & 3;
7678 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7679 conds = (insn & (1 << 20)) != 0;
7680 logic_cc = (conds && thumb2_logic_op(op));
7681 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7682 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7683 goto illegal_op;
7684 dead_tmp(tmp2);
7685 if (rd != 15) {
7686 store_reg(s, rd, tmp);
7687 } else {
7688 dead_tmp(tmp);
7689 }
7690 }
7691 break;
7692 case 13: /* Misc data processing. */
7693 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7694 if (op < 4 && (insn & 0xf000) != 0xf000)
7695 goto illegal_op;
7696 switch (op) {
7697 case 0: /* Register controlled shift. */
7698 tmp = load_reg(s, rn);
7699 tmp2 = load_reg(s, rm);
7700 if ((insn & 0x70) != 0)
7701 goto illegal_op;
7702 op = (insn >> 21) & 3;
7703 logic_cc = (insn & (1 << 20)) != 0;
7704 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7705 if (logic_cc)
7706 gen_logic_CC(tmp);
7707 store_reg_bx(env, s, rd, tmp);
7708 break;
7709 case 1: /* Sign/zero extend. */
7710 tmp = load_reg(s, rm);
7711 shift = (insn >> 4) & 3;
7712 /* ??? In many cases it's not neccessary to do a
7713 rotate, a shift is sufficient. */
7714 if (shift != 0)
7715 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7716 op = (insn >> 20) & 7;
7717 switch (op) {
7718 case 0: gen_sxth(tmp); break;
7719 case 1: gen_uxth(tmp); break;
7720 case 2: gen_sxtb16(tmp); break;
7721 case 3: gen_uxtb16(tmp); break;
7722 case 4: gen_sxtb(tmp); break;
7723 case 5: gen_uxtb(tmp); break;
7724 default: goto illegal_op;
7725 }
7726 if (rn != 15) {
7727 tmp2 = load_reg(s, rn);
7728 if ((op >> 1) == 1) {
7729 gen_add16(tmp, tmp2);
7730 } else {
7731 tcg_gen_add_i32(tmp, tmp, tmp2);
7732 dead_tmp(tmp2);
7733 }
7734 }
7735 store_reg(s, rd, tmp);
7736 break;
7737 case 2: /* SIMD add/subtract. */
7738 op = (insn >> 20) & 7;
7739 shift = (insn >> 4) & 7;
7740 if ((op & 3) == 3 || (shift & 3) == 3)
7741 goto illegal_op;
7742 tmp = load_reg(s, rn);
7743 tmp2 = load_reg(s, rm);
7744 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7745 dead_tmp(tmp2);
7746 store_reg(s, rd, tmp);
7747 break;
7748 case 3: /* Other data processing. */
7749 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7750 if (op < 4) {
7751 /* Saturating add/subtract. */
7752 tmp = load_reg(s, rn);
7753 tmp2 = load_reg(s, rm);
7754 if (op & 1)
7755 gen_helper_double_saturate(tmp, tmp);
7756 if (op & 2)
7757 gen_helper_sub_saturate(tmp, tmp2, tmp);
7758 else
7759 gen_helper_add_saturate(tmp, tmp, tmp2);
7760 dead_tmp(tmp2);
7761 } else {
7762 tmp = load_reg(s, rn);
7763 switch (op) {
7764 case 0x0a: /* rbit */
7765 gen_helper_rbit(tmp, tmp);
7766 break;
7767 case 0x08: /* rev */
7768 tcg_gen_bswap32_i32(tmp, tmp);
7769 break;
7770 case 0x09: /* rev16 */
7771 gen_rev16(tmp);
7772 break;
7773 case 0x0b: /* revsh */
7774 gen_revsh(tmp);
7775 break;
7776 case 0x10: /* sel */
7777 tmp2 = load_reg(s, rm);
7778 tmp3 = new_tmp();
7779 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7780 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7781 dead_tmp(tmp3);
7782 dead_tmp(tmp2);
7783 break;
7784 case 0x18: /* clz */
7785 gen_helper_clz(tmp, tmp);
7786 break;
7787 default:
7788 goto illegal_op;
7789 }
7790 }
7791 store_reg(s, rd, tmp);
7792 break;
7793 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7794 op = (insn >> 4) & 0xf;
7795 tmp = load_reg(s, rn);
7796 tmp2 = load_reg(s, rm);
7797 switch ((insn >> 20) & 7) {
7798 case 0: /* 32 x 32 -> 32 */
7799 tcg_gen_mul_i32(tmp, tmp, tmp2);
7800 dead_tmp(tmp2);
7801 if (rs != 15) {
7802 tmp2 = load_reg(s, rs);
7803 if (op)
7804 tcg_gen_sub_i32(tmp, tmp2, tmp);
7805 else
7806 tcg_gen_add_i32(tmp, tmp, tmp2);
7807 dead_tmp(tmp2);
7808 }
7809 break;
7810 case 1: /* 16 x 16 -> 32 */
7811 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7812 dead_tmp(tmp2);
7813 if (rs != 15) {
7814 tmp2 = load_reg(s, rs);
7815 gen_helper_add_setq(tmp, tmp, tmp2);
7816 dead_tmp(tmp2);
7817 }
7818 break;
7819 case 2: /* Dual multiply add. */
7820 case 4: /* Dual multiply subtract. */
7821 if (op)
7822 gen_swap_half(tmp2);
7823 gen_smul_dual(tmp, tmp2);
7824 /* This addition cannot overflow. */
7825 if (insn & (1 << 22)) {
7826 tcg_gen_sub_i32(tmp, tmp, tmp2);
7827 } else {
7828 tcg_gen_add_i32(tmp, tmp, tmp2);
7829 }
7830 dead_tmp(tmp2);
7831 if (rs != 15)
7832 {
7833 tmp2 = load_reg(s, rs);
7834 gen_helper_add_setq(tmp, tmp, tmp2);
7835 dead_tmp(tmp2);
7836 }
7837 break;
7838 case 3: /* 32 * 16 -> 32msb */
7839 if (op)
7840 tcg_gen_sari_i32(tmp2, tmp2, 16);
7841 else
7842 gen_sxth(tmp2);
7843 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7844 tcg_gen_shri_i64(tmp64, tmp64, 16);
7845 tmp = new_tmp();
7846 tcg_gen_trunc_i64_i32(tmp, tmp64);
7847 tcg_temp_free_i64(tmp64);
7848 if (rs != 15)
7849 {
7850 tmp2 = load_reg(s, rs);
7851 gen_helper_add_setq(tmp, tmp, tmp2);
7852 dead_tmp(tmp2);
7853 }
7854 break;
7855 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7856 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7857 if (rs != 15) {
7858 tmp = load_reg(s, rs);
7859 if (insn & (1 << 20)) {
7860 tmp64 = gen_addq_msw(tmp64, tmp);
7861 } else {
7862 tmp64 = gen_subq_msw(tmp64, tmp);
7863 }
7864 }
7865 if (insn & (1 << 4)) {
7866 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7867 }
7868 tcg_gen_shri_i64(tmp64, tmp64, 32);
7869 tmp = new_tmp();
7870 tcg_gen_trunc_i64_i32(tmp, tmp64);
7871 tcg_temp_free_i64(tmp64);
7872 break;
7873 case 7: /* Unsigned sum of absolute differences. */
7874 gen_helper_usad8(tmp, tmp, tmp2);
7875 dead_tmp(tmp2);
7876 if (rs != 15) {
7877 tmp2 = load_reg(s, rs);
7878 tcg_gen_add_i32(tmp, tmp, tmp2);
7879 dead_tmp(tmp2);
7880 }
7881 break;
7882 }
7883 store_reg(s, rd, tmp);
7884 break;
7885 case 6: case 7: /* 64-bit multiply, Divide. */
7886 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7887 tmp = load_reg(s, rn);
7888 tmp2 = load_reg(s, rm);
7889 if ((op & 0x50) == 0x10) {
7890 /* sdiv, udiv */
7891 if (!arm_feature(env, ARM_FEATURE_DIV))
7892 goto illegal_op;
7893 if (op & 0x20)
7894 gen_helper_udiv(tmp, tmp, tmp2);
7895 else
7896 gen_helper_sdiv(tmp, tmp, tmp2);
7897 dead_tmp(tmp2);
7898 store_reg(s, rd, tmp);
7899 } else if ((op & 0xe) == 0xc) {
7900 /* Dual multiply accumulate long. */
7901 if (op & 1)
7902 gen_swap_half(tmp2);
7903 gen_smul_dual(tmp, tmp2);
7904 if (op & 0x10) {
7905 tcg_gen_sub_i32(tmp, tmp, tmp2);
7906 } else {
7907 tcg_gen_add_i32(tmp, tmp, tmp2);
7908 }
7909 dead_tmp(tmp2);
7910 /* BUGFIX */
7911 tmp64 = tcg_temp_new_i64();
7912 tcg_gen_ext_i32_i64(tmp64, tmp);
7913 dead_tmp(tmp);
7914 gen_addq(s, tmp64, rs, rd);
7915 gen_storeq_reg(s, rs, rd, tmp64);
7916 tcg_temp_free_i64(tmp64);
7917 } else {
7918 if (op & 0x20) {
7919 /* Unsigned 64-bit multiply */
7920 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7921 } else {
7922 if (op & 8) {
7923 /* smlalxy */
7924 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7925 dead_tmp(tmp2);
7926 tmp64 = tcg_temp_new_i64();
7927 tcg_gen_ext_i32_i64(tmp64, tmp);
7928 dead_tmp(tmp);
7929 } else {
7930 /* Signed 64-bit multiply */
7931 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7932 }
7933 }
7934 if (op & 4) {
7935 /* umaal */
7936 gen_addq_lo(s, tmp64, rs);
7937 gen_addq_lo(s, tmp64, rd);
7938 } else if (op & 0x40) {
7939 /* 64-bit accumulate. */
7940 gen_addq(s, tmp64, rs, rd);
7941 }
7942 gen_storeq_reg(s, rs, rd, tmp64);
7943 tcg_temp_free_i64(tmp64);
7944 }
7945 break;
7946 }
7947 break;
7948 case 6: case 7: case 14: case 15:
7949 /* Coprocessor. */
7950 if (((insn >> 24) & 3) == 3) {
7951 /* Translate into the equivalent ARM encoding. */
7952 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7953 if (disas_neon_data_insn(env, s, insn))
7954 goto illegal_op;
7955 } else {
7956 if (insn & (1 << 28))
7957 goto illegal_op;
7958 if (disas_coproc_insn (env, s, insn))
7959 goto illegal_op;
7960 }
7961 break;
7962 case 8: case 9: case 10: case 11:
7963 if (insn & (1 << 15)) {
7964 /* Branches, misc control. */
7965 if (insn & 0x5000) {
7966 /* Unconditional branch. */
7967 /* signextend(hw1[10:0]) -> offset[:12]. */
7968 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7969 /* hw1[10:0] -> offset[11:1]. */
7970 offset |= (insn & 0x7ff) << 1;
7971 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7972 offset[24:22] already have the same value because of the
7973 sign extension above. */
7974 offset ^= ((~insn) & (1 << 13)) << 10;
7975 offset ^= ((~insn) & (1 << 11)) << 11;
7976
7977 if (insn & (1 << 14)) {
7978 /* Branch and link. */
7979 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7980 }
7981
7982 offset += s->pc;
7983 if (insn & (1 << 12)) {
7984 /* b/bl */
7985 gen_jmp(s, offset);
7986 } else {
7987 /* blx */
7988 offset &= ~(uint32_t)2;
7989 gen_bx_im(s, offset);
7990 }
7991 } else if (((insn >> 23) & 7) == 7) {
7992 /* Misc control */
7993 if (insn & (1 << 13))
7994 goto illegal_op;
7995
7996 if (insn & (1 << 26)) {
7997 /* Secure monitor call (v6Z) */
7998 goto illegal_op; /* not implemented. */
7999 } else {
8000 op = (insn >> 20) & 7;
8001 switch (op) {
8002 case 0: /* msr cpsr. */
8003 if (IS_M(env)) {
8004 tmp = load_reg(s, rn);
8005 addr = tcg_const_i32(insn & 0xff);
8006 gen_helper_v7m_msr(cpu_env, addr, tmp);
8007 tcg_temp_free_i32(addr);
8008 dead_tmp(tmp);
8009 gen_lookup_tb(s);
8010 break;
8011 }
8012 /* fall through */
8013 case 1: /* msr spsr. */
8014 if (IS_M(env))
8015 goto illegal_op;
8016 tmp = load_reg(s, rn);
8017 if (gen_set_psr(s,
8018 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8019 op == 1, tmp))
8020 goto illegal_op;
8021 break;
8022 case 2: /* cps, nop-hint. */
8023 if (((insn >> 8) & 7) == 0) {
8024 gen_nop_hint(s, insn & 0xff);
8025 }
8026 /* Implemented as NOP in user mode. */
8027 if (IS_USER(s))
8028 break;
8029 offset = 0;
8030 imm = 0;
8031 if (insn & (1 << 10)) {
8032 if (insn & (1 << 7))
8033 offset |= CPSR_A;
8034 if (insn & (1 << 6))
8035 offset |= CPSR_I;
8036 if (insn & (1 << 5))
8037 offset |= CPSR_F;
8038 if (insn & (1 << 9))
8039 imm = CPSR_A | CPSR_I | CPSR_F;
8040 }
8041 if (insn & (1 << 8)) {
8042 offset |= 0x1f;
8043 imm |= (insn & 0x1f);
8044 }
8045 if (offset) {
8046 gen_set_psr_im(s, offset, 0, imm);
8047 }
8048 break;
8049 case 3: /* Special control operations. */
8050 ARCH(7);
8051 op = (insn >> 4) & 0xf;
8052 switch (op) {
8053 case 2: /* clrex */
8054 gen_clrex(s);
8055 break;
8056 case 4: /* dsb */
8057 case 5: /* dmb */
8058 case 6: /* isb */
8059 /* These execute as NOPs. */
8060 break;
8061 default:
8062 goto illegal_op;
8063 }
8064 break;
8065 case 4: /* bxj */
8066 /* Trivial implementation equivalent to bx. */
8067 tmp = load_reg(s, rn);
8068 gen_bx(s, tmp);
8069 break;
8070 case 5: /* Exception return. */
8071 if (IS_USER(s)) {
8072 goto illegal_op;
8073 }
8074 if (rn != 14 || rd != 15) {
8075 goto illegal_op;
8076 }
8077 tmp = load_reg(s, rn);
8078 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8079 gen_exception_return(s, tmp);
8080 break;
8081 case 6: /* mrs cpsr. */
8082 tmp = new_tmp();
8083 if (IS_M(env)) {
8084 addr = tcg_const_i32(insn & 0xff);
8085 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8086 tcg_temp_free_i32(addr);
8087 } else {
8088 gen_helper_cpsr_read(tmp);
8089 }
8090 store_reg(s, rd, tmp);
8091 break;
8092 case 7: /* mrs spsr. */
8093 /* Not accessible in user mode. */
8094 if (IS_USER(s) || IS_M(env))
8095 goto illegal_op;
8096 tmp = load_cpu_field(spsr);
8097 store_reg(s, rd, tmp);
8098 break;
8099 }
8100 }
8101 } else {
8102 /* Conditional branch. */
8103 op = (insn >> 22) & 0xf;
8104 /* Generate a conditional jump to next instruction. */
8105 s->condlabel = gen_new_label();
8106 gen_test_cc(op ^ 1, s->condlabel);
8107 s->condjmp = 1;
8108
8109 /* offset[11:1] = insn[10:0] */
8110 offset = (insn & 0x7ff) << 1;
8111 /* offset[17:12] = insn[21:16]. */
8112 offset |= (insn & 0x003f0000) >> 4;
8113 /* offset[31:20] = insn[26]. */
8114 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8115 /* offset[18] = insn[13]. */
8116 offset |= (insn & (1 << 13)) << 5;
8117 /* offset[19] = insn[11]. */
8118 offset |= (insn & (1 << 11)) << 8;
8119
8120 /* jump to the offset */
8121 gen_jmp(s, s->pc + offset);
8122 }
8123 } else {
8124 /* Data processing immediate. */
8125 if (insn & (1 << 25)) {
8126 if (insn & (1 << 24)) {
8127 if (insn & (1 << 20))
8128 goto illegal_op;
8129 /* Bitfield/Saturate. */
8130 op = (insn >> 21) & 7;
8131 imm = insn & 0x1f;
8132 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8133 if (rn == 15) {
8134 tmp = new_tmp();
8135 tcg_gen_movi_i32(tmp, 0);
8136 } else {
8137 tmp = load_reg(s, rn);
8138 }
8139 switch (op) {
8140 case 2: /* Signed bitfield extract. */
8141 imm++;
8142 if (shift + imm > 32)
8143 goto illegal_op;
8144 if (imm < 32)
8145 gen_sbfx(tmp, shift, imm);
8146 break;
8147 case 6: /* Unsigned bitfield extract. */
8148 imm++;
8149 if (shift + imm > 32)
8150 goto illegal_op;
8151 if (imm < 32)
8152 gen_ubfx(tmp, shift, (1u << imm) - 1);
8153 break;
8154 case 3: /* Bitfield insert/clear. */
8155 if (imm < shift)
8156 goto illegal_op;
8157 imm = imm + 1 - shift;
8158 if (imm != 32) {
8159 tmp2 = load_reg(s, rd);
8160 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8161 dead_tmp(tmp2);
8162 }
8163 break;
8164 case 7:
8165 goto illegal_op;
8166 default: /* Saturate. */
8167 if (shift) {
8168 if (op & 1)
8169 tcg_gen_sari_i32(tmp, tmp, shift);
8170 else
8171 tcg_gen_shli_i32(tmp, tmp, shift);
8172 }
8173 tmp2 = tcg_const_i32(imm);
8174 if (op & 4) {
8175 /* Unsigned. */
8176 if ((op & 1) && shift == 0)
8177 gen_helper_usat16(tmp, tmp, tmp2);
8178 else
8179 gen_helper_usat(tmp, tmp, tmp2);
8180 } else {
8181 /* Signed. */
8182 if ((op & 1) && shift == 0)
8183 gen_helper_ssat16(tmp, tmp, tmp2);
8184 else
8185 gen_helper_ssat(tmp, tmp, tmp2);
8186 }
8187 tcg_temp_free_i32(tmp2);
8188 break;
8189 }
8190 store_reg(s, rd, tmp);
8191 } else {
8192 imm = ((insn & 0x04000000) >> 15)
8193 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8194 if (insn & (1 << 22)) {
8195 /* 16-bit immediate. */
8196 imm |= (insn >> 4) & 0xf000;
8197 if (insn & (1 << 23)) {
8198 /* movt */
8199 tmp = load_reg(s, rd);
8200 tcg_gen_ext16u_i32(tmp, tmp);
8201 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8202 } else {
8203 /* movw */
8204 tmp = new_tmp();
8205 tcg_gen_movi_i32(tmp, imm);
8206 }
8207 } else {
8208 /* Add/sub 12-bit immediate. */
8209 if (rn == 15) {
8210 offset = s->pc & ~(uint32_t)3;
8211 if (insn & (1 << 23))
8212 offset -= imm;
8213 else
8214 offset += imm;
8215 tmp = new_tmp();
8216 tcg_gen_movi_i32(tmp, offset);
8217 } else {
8218 tmp = load_reg(s, rn);
8219 if (insn & (1 << 23))
8220 tcg_gen_subi_i32(tmp, tmp, imm);
8221 else
8222 tcg_gen_addi_i32(tmp, tmp, imm);
8223 }
8224 }
8225 store_reg(s, rd, tmp);
8226 }
8227 } else {
8228 int shifter_out = 0;
8229 /* modified 12-bit immediate. */
8230 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8231 imm = (insn & 0xff);
8232 switch (shift) {
8233 case 0: /* XY */
8234 /* Nothing to do. */
8235 break;
8236 case 1: /* 00XY00XY */
8237 imm |= imm << 16;
8238 break;
8239 case 2: /* XY00XY00 */
8240 imm |= imm << 16;
8241 imm <<= 8;
8242 break;
8243 case 3: /* XYXYXYXY */
8244 imm |= imm << 16;
8245 imm |= imm << 8;
8246 break;
8247 default: /* Rotated constant. */
8248 shift = (shift << 1) | (imm >> 7);
8249 imm |= 0x80;
8250 imm = imm << (32 - shift);
8251 shifter_out = 1;
8252 break;
8253 }
8254 tmp2 = new_tmp();
8255 tcg_gen_movi_i32(tmp2, imm);
8256 rn = (insn >> 16) & 0xf;
8257 if (rn == 15) {
8258 tmp = new_tmp();
8259 tcg_gen_movi_i32(tmp, 0);
8260 } else {
8261 tmp = load_reg(s, rn);
8262 }
8263 op = (insn >> 21) & 0xf;
8264 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8265 shifter_out, tmp, tmp2))
8266 goto illegal_op;
8267 dead_tmp(tmp2);
8268 rd = (insn >> 8) & 0xf;
8269 if (rd != 15) {
8270 store_reg(s, rd, tmp);
8271 } else {
8272 dead_tmp(tmp);
8273 }
8274 }
8275 }
8276 break;
8277 case 12: /* Load/store single data item. */
8278 {
8279 int postinc = 0;
8280 int writeback = 0;
8281 int user;
8282 if ((insn & 0x01100000) == 0x01000000) {
8283 if (disas_neon_ls_insn(env, s, insn))
8284 goto illegal_op;
8285 break;
8286 }
8287 user = IS_USER(s);
8288 if (rn == 15) {
8289 addr = new_tmp();
8290 /* PC relative. */
8291 /* s->pc has already been incremented by 4. */
8292 imm = s->pc & 0xfffffffc;
8293 if (insn & (1 << 23))
8294 imm += insn & 0xfff;
8295 else
8296 imm -= insn & 0xfff;
8297 tcg_gen_movi_i32(addr, imm);
8298 } else {
8299 addr = load_reg(s, rn);
8300 if (insn & (1 << 23)) {
8301 /* Positive offset. */
8302 imm = insn & 0xfff;
8303 tcg_gen_addi_i32(addr, addr, imm);
8304 } else {
8305 op = (insn >> 8) & 7;
8306 imm = insn & 0xff;
8307 switch (op) {
8308 case 0: case 8: /* Shifted Register. */
8309 shift = (insn >> 4) & 0xf;
8310 if (shift > 3)
8311 goto illegal_op;
8312 tmp = load_reg(s, rm);
8313 if (shift)
8314 tcg_gen_shli_i32(tmp, tmp, shift);
8315 tcg_gen_add_i32(addr, addr, tmp);
8316 dead_tmp(tmp);
8317 break;
8318 case 4: /* Negative offset. */
8319 tcg_gen_addi_i32(addr, addr, -imm);
8320 break;
8321 case 6: /* User privilege. */
8322 tcg_gen_addi_i32(addr, addr, imm);
8323 user = 1;
8324 break;
8325 case 1: /* Post-decrement. */
8326 imm = -imm;
8327 /* Fall through. */
8328 case 3: /* Post-increment. */
8329 postinc = 1;
8330 writeback = 1;
8331 break;
8332 case 5: /* Pre-decrement. */
8333 imm = -imm;
8334 /* Fall through. */
8335 case 7: /* Pre-increment. */
8336 tcg_gen_addi_i32(addr, addr, imm);
8337 writeback = 1;
8338 break;
8339 default:
8340 goto illegal_op;
8341 }
8342 }
8343 }
8344 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8345 if (insn & (1 << 20)) {
8346 /* Load. */
8347 if (rs == 15 && op != 2) {
8348 if (op & 2)
8349 goto illegal_op;
8350 /* Memory hint. Implemented as NOP. */
8351 } else {
8352 switch (op) {
8353 case 0: tmp = gen_ld8u(addr, user); break;
8354 case 4: tmp = gen_ld8s(addr, user); break;
8355 case 1: tmp = gen_ld16u(addr, user); break;
8356 case 5: tmp = gen_ld16s(addr, user); break;
8357 case 2: tmp = gen_ld32(addr, user); break;
8358 default: goto illegal_op;
8359 }
8360 if (rs == 15) {
8361 gen_bx(s, tmp);
8362 } else {
8363 store_reg(s, rs, tmp);
8364 }
8365 }
8366 } else {
8367 /* Store. */
8368 if (rs == 15)
8369 goto illegal_op;
8370 tmp = load_reg(s, rs);
8371 switch (op) {
8372 case 0: gen_st8(tmp, addr, user); break;
8373 case 1: gen_st16(tmp, addr, user); break;
8374 case 2: gen_st32(tmp, addr, user); break;
8375 default: goto illegal_op;
8376 }
8377 }
8378 if (postinc)
8379 tcg_gen_addi_i32(addr, addr, imm);
8380 if (writeback) {
8381 store_reg(s, rn, addr);
8382 } else {
8383 dead_tmp(addr);
8384 }
8385 }
8386 break;
8387 default:
8388 goto illegal_op;
8389 }
8390 return 0;
8391 illegal_op:
8392 return 1;
8393 }
8394
8395 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8396 {
8397 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8398 int32_t offset;
8399 int i;
8400 TCGv tmp;
8401 TCGv tmp2;
8402 TCGv addr;
8403
8404 if (s->condexec_mask) {
8405 cond = s->condexec_cond;
8406 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8407 s->condlabel = gen_new_label();
8408 gen_test_cc(cond ^ 1, s->condlabel);
8409 s->condjmp = 1;
8410 }
8411 }
8412
8413 insn = lduw_code(s->pc);
8414 s->pc += 2;
8415
8416 switch (insn >> 12) {
8417 case 0: case 1:
8418
8419 rd = insn & 7;
8420 op = (insn >> 11) & 3;
8421 if (op == 3) {
8422 /* add/subtract */
8423 rn = (insn >> 3) & 7;
8424 tmp = load_reg(s, rn);
8425 if (insn & (1 << 10)) {
8426 /* immediate */
8427 tmp2 = new_tmp();
8428 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8429 } else {
8430 /* reg */
8431 rm = (insn >> 6) & 7;
8432 tmp2 = load_reg(s, rm);
8433 }
8434 if (insn & (1 << 9)) {
8435 if (s->condexec_mask)
8436 tcg_gen_sub_i32(tmp, tmp, tmp2);
8437 else
8438 gen_helper_sub_cc(tmp, tmp, tmp2);
8439 } else {
8440 if (s->condexec_mask)
8441 tcg_gen_add_i32(tmp, tmp, tmp2);
8442 else
8443 gen_helper_add_cc(tmp, tmp, tmp2);
8444 }
8445 dead_tmp(tmp2);
8446 store_reg(s, rd, tmp);
8447 } else {
8448 /* shift immediate */
8449 rm = (insn >> 3) & 7;
8450 shift = (insn >> 6) & 0x1f;
8451 tmp = load_reg(s, rm);
8452 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8453 if (!s->condexec_mask)
8454 gen_logic_CC(tmp);
8455 store_reg(s, rd, tmp);
8456 }
8457 break;
8458 case 2: case 3:
8459 /* arithmetic large immediate */
8460 op = (insn >> 11) & 3;
8461 rd = (insn >> 8) & 0x7;
8462 if (op == 0) { /* mov */
8463 tmp = new_tmp();
8464 tcg_gen_movi_i32(tmp, insn & 0xff);
8465 if (!s->condexec_mask)
8466 gen_logic_CC(tmp);
8467 store_reg(s, rd, tmp);
8468 } else {
8469 tmp = load_reg(s, rd);
8470 tmp2 = new_tmp();
8471 tcg_gen_movi_i32(tmp2, insn & 0xff);
8472 switch (op) {
8473 case 1: /* cmp */
8474 gen_helper_sub_cc(tmp, tmp, tmp2);
8475 dead_tmp(tmp);
8476 dead_tmp(tmp2);
8477 break;
8478 case 2: /* add */
8479 if (s->condexec_mask)
8480 tcg_gen_add_i32(tmp, tmp, tmp2);
8481 else
8482 gen_helper_add_cc(tmp, tmp, tmp2);
8483 dead_tmp(tmp2);
8484 store_reg(s, rd, tmp);
8485 break;
8486 case 3: /* sub */
8487 if (s->condexec_mask)
8488 tcg_gen_sub_i32(tmp, tmp, tmp2);
8489 else
8490 gen_helper_sub_cc(tmp, tmp, tmp2);
8491 dead_tmp(tmp2);
8492 store_reg(s, rd, tmp);
8493 break;
8494 }
8495 }
8496 break;
8497 case 4:
8498 if (insn & (1 << 11)) {
8499 rd = (insn >> 8) & 7;
8500 /* load pc-relative. Bit 1 of PC is ignored. */
8501 val = s->pc + 2 + ((insn & 0xff) * 4);
8502 val &= ~(uint32_t)2;
8503 addr = new_tmp();
8504 tcg_gen_movi_i32(addr, val);
8505 tmp = gen_ld32(addr, IS_USER(s));
8506 dead_tmp(addr);
8507 store_reg(s, rd, tmp);
8508 break;
8509 }
8510 if (insn & (1 << 10)) {
8511 /* data processing extended or blx */
8512 rd = (insn & 7) | ((insn >> 4) & 8);
8513 rm = (insn >> 3) & 0xf;
8514 op = (insn >> 8) & 3;
8515 switch (op) {
8516 case 0: /* add */
8517 tmp = load_reg(s, rd);
8518 tmp2 = load_reg(s, rm);
8519 tcg_gen_add_i32(tmp, tmp, tmp2);
8520 dead_tmp(tmp2);
8521 store_reg(s, rd, tmp);
8522 break;
8523 case 1: /* cmp */
8524 tmp = load_reg(s, rd);
8525 tmp2 = load_reg(s, rm);
8526 gen_helper_sub_cc(tmp, tmp, tmp2);
8527 dead_tmp(tmp2);
8528 dead_tmp(tmp);
8529 break;
8530 case 2: /* mov/cpy */
8531 tmp = load_reg(s, rm);
8532 store_reg(s, rd, tmp);
8533 break;
8534 case 3:/* branch [and link] exchange thumb register */
8535 tmp = load_reg(s, rm);
8536 if (insn & (1 << 7)) {
8537 val = (uint32_t)s->pc | 1;
8538 tmp2 = new_tmp();
8539 tcg_gen_movi_i32(tmp2, val);
8540 store_reg(s, 14, tmp2);
8541 }
8542 gen_bx(s, tmp);
8543 break;
8544 }
8545 break;
8546 }
8547
8548 /* data processing register */
8549 rd = insn & 7;
8550 rm = (insn >> 3) & 7;
8551 op = (insn >> 6) & 0xf;
8552 if (op == 2 || op == 3 || op == 4 || op == 7) {
8553 /* the shift/rotate ops want the operands backwards */
8554 val = rm;
8555 rm = rd;
8556 rd = val;
8557 val = 1;
8558 } else {
8559 val = 0;
8560 }
8561
8562 if (op == 9) { /* neg */
8563 tmp = new_tmp();
8564 tcg_gen_movi_i32(tmp, 0);
8565 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8566 tmp = load_reg(s, rd);
8567 } else {
8568 TCGV_UNUSED(tmp);
8569 }
8570
8571 tmp2 = load_reg(s, rm);
8572 switch (op) {
8573 case 0x0: /* and */
8574 tcg_gen_and_i32(tmp, tmp, tmp2);
8575 if (!s->condexec_mask)
8576 gen_logic_CC(tmp);
8577 break;
8578 case 0x1: /* eor */
8579 tcg_gen_xor_i32(tmp, tmp, tmp2);
8580 if (!s->condexec_mask)
8581 gen_logic_CC(tmp);
8582 break;
8583 case 0x2: /* lsl */
8584 if (s->condexec_mask) {
8585 gen_helper_shl(tmp2, tmp2, tmp);
8586 } else {
8587 gen_helper_shl_cc(tmp2, tmp2, tmp);
8588 gen_logic_CC(tmp2);
8589 }
8590 break;
8591 case 0x3: /* lsr */
8592 if (s->condexec_mask) {
8593 gen_helper_shr(tmp2, tmp2, tmp);
8594 } else {
8595 gen_helper_shr_cc(tmp2, tmp2, tmp);
8596 gen_logic_CC(tmp2);
8597 }
8598 break;
8599 case 0x4: /* asr */
8600 if (s->condexec_mask) {
8601 gen_helper_sar(tmp2, tmp2, tmp);
8602 } else {
8603 gen_helper_sar_cc(tmp2, tmp2, tmp);
8604 gen_logic_CC(tmp2);
8605 }
8606 break;
8607 case 0x5: /* adc */
8608 if (s->condexec_mask)
8609 gen_adc(tmp, tmp2);
8610 else
8611 gen_helper_adc_cc(tmp, tmp, tmp2);
8612 break;
8613 case 0x6: /* sbc */
8614 if (s->condexec_mask)
8615 gen_sub_carry(tmp, tmp, tmp2);
8616 else
8617 gen_helper_sbc_cc(tmp, tmp, tmp2);
8618 break;
8619 case 0x7: /* ror */
8620 if (s->condexec_mask) {
8621 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8622 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8623 } else {
8624 gen_helper_ror_cc(tmp2, tmp2, tmp);
8625 gen_logic_CC(tmp2);
8626 }
8627 break;
8628 case 0x8: /* tst */
8629 tcg_gen_and_i32(tmp, tmp, tmp2);
8630 gen_logic_CC(tmp);
8631 rd = 16;
8632 break;
8633 case 0x9: /* neg */
8634 if (s->condexec_mask)
8635 tcg_gen_neg_i32(tmp, tmp2);
8636 else
8637 gen_helper_sub_cc(tmp, tmp, tmp2);
8638 break;
8639 case 0xa: /* cmp */
8640 gen_helper_sub_cc(tmp, tmp, tmp2);
8641 rd = 16;
8642 break;
8643 case 0xb: /* cmn */
8644 gen_helper_add_cc(tmp, tmp, tmp2);
8645 rd = 16;
8646 break;
8647 case 0xc: /* orr */
8648 tcg_gen_or_i32(tmp, tmp, tmp2);
8649 if (!s->condexec_mask)
8650 gen_logic_CC(tmp);
8651 break;
8652 case 0xd: /* mul */
8653 tcg_gen_mul_i32(tmp, tmp, tmp2);
8654 if (!s->condexec_mask)
8655 gen_logic_CC(tmp);
8656 break;
8657 case 0xe: /* bic */
8658 tcg_gen_andc_i32(tmp, tmp, tmp2);
8659 if (!s->condexec_mask)
8660 gen_logic_CC(tmp);
8661 break;
8662 case 0xf: /* mvn */
8663 tcg_gen_not_i32(tmp2, tmp2);
8664 if (!s->condexec_mask)
8665 gen_logic_CC(tmp2);
8666 val = 1;
8667 rm = rd;
8668 break;
8669 }
8670 if (rd != 16) {
8671 if (val) {
8672 store_reg(s, rm, tmp2);
8673 if (op != 0xf)
8674 dead_tmp(tmp);
8675 } else {
8676 store_reg(s, rd, tmp);
8677 dead_tmp(tmp2);
8678 }
8679 } else {
8680 dead_tmp(tmp);
8681 dead_tmp(tmp2);
8682 }
8683 break;
8684
8685 case 5:
8686 /* load/store register offset. */
8687 rd = insn & 7;
8688 rn = (insn >> 3) & 7;
8689 rm = (insn >> 6) & 7;
8690 op = (insn >> 9) & 7;
8691 addr = load_reg(s, rn);
8692 tmp = load_reg(s, rm);
8693 tcg_gen_add_i32(addr, addr, tmp);
8694 dead_tmp(tmp);
8695
8696 if (op < 3) /* store */
8697 tmp = load_reg(s, rd);
8698
8699 switch (op) {
8700 case 0: /* str */
8701 gen_st32(tmp, addr, IS_USER(s));
8702 break;
8703 case 1: /* strh */
8704 gen_st16(tmp, addr, IS_USER(s));
8705 break;
8706 case 2: /* strb */
8707 gen_st8(tmp, addr, IS_USER(s));
8708 break;
8709 case 3: /* ldrsb */
8710 tmp = gen_ld8s(addr, IS_USER(s));
8711 break;
8712 case 4: /* ldr */
8713 tmp = gen_ld32(addr, IS_USER(s));
8714 break;
8715 case 5: /* ldrh */
8716 tmp = gen_ld16u(addr, IS_USER(s));
8717 break;
8718 case 6: /* ldrb */
8719 tmp = gen_ld8u(addr, IS_USER(s));
8720 break;
8721 case 7: /* ldrsh */
8722 tmp = gen_ld16s(addr, IS_USER(s));
8723 break;
8724 }
8725 if (op >= 3) /* load */
8726 store_reg(s, rd, tmp);
8727 dead_tmp(addr);
8728 break;
8729
8730 case 6:
8731 /* load/store word immediate offset */
8732 rd = insn & 7;
8733 rn = (insn >> 3) & 7;
8734 addr = load_reg(s, rn);
8735 val = (insn >> 4) & 0x7c;
8736 tcg_gen_addi_i32(addr, addr, val);
8737
8738 if (insn & (1 << 11)) {
8739 /* load */
8740 tmp = gen_ld32(addr, IS_USER(s));
8741 store_reg(s, rd, tmp);
8742 } else {
8743 /* store */
8744 tmp = load_reg(s, rd);
8745 gen_st32(tmp, addr, IS_USER(s));
8746 }
8747 dead_tmp(addr);
8748 break;
8749
8750 case 7:
8751 /* load/store byte immediate offset */
8752 rd = insn & 7;
8753 rn = (insn >> 3) & 7;
8754 addr = load_reg(s, rn);
8755 val = (insn >> 6) & 0x1f;
8756 tcg_gen_addi_i32(addr, addr, val);
8757
8758 if (insn & (1 << 11)) {
8759 /* load */
8760 tmp = gen_ld8u(addr, IS_USER(s));
8761 store_reg(s, rd, tmp);
8762 } else {
8763 /* store */
8764 tmp = load_reg(s, rd);
8765 gen_st8(tmp, addr, IS_USER(s));
8766 }
8767 dead_tmp(addr);
8768 break;
8769
8770 case 8:
8771 /* load/store halfword immediate offset */
8772 rd = insn & 7;
8773 rn = (insn >> 3) & 7;
8774 addr = load_reg(s, rn);
8775 val = (insn >> 5) & 0x3e;
8776 tcg_gen_addi_i32(addr, addr, val);
8777
8778 if (insn & (1 << 11)) {
8779 /* load */
8780 tmp = gen_ld16u(addr, IS_USER(s));
8781 store_reg(s, rd, tmp);
8782 } else {
8783 /* store */
8784 tmp = load_reg(s, rd);
8785 gen_st16(tmp, addr, IS_USER(s));
8786 }
8787 dead_tmp(addr);
8788 break;
8789
8790 case 9:
8791 /* load/store from stack */
8792 rd = (insn >> 8) & 7;
8793 addr = load_reg(s, 13);
8794 val = (insn & 0xff) * 4;
8795 tcg_gen_addi_i32(addr, addr, val);
8796
8797 if (insn & (1 << 11)) {
8798 /* load */
8799 tmp = gen_ld32(addr, IS_USER(s));
8800 store_reg(s, rd, tmp);
8801 } else {
8802 /* store */
8803 tmp = load_reg(s, rd);
8804 gen_st32(tmp, addr, IS_USER(s));
8805 }
8806 dead_tmp(addr);
8807 break;
8808
8809 case 10:
8810 /* add to high reg */
8811 rd = (insn >> 8) & 7;
8812 if (insn & (1 << 11)) {
8813 /* SP */
8814 tmp = load_reg(s, 13);
8815 } else {
8816 /* PC. bit 1 is ignored. */
8817 tmp = new_tmp();
8818 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8819 }
8820 val = (insn & 0xff) * 4;
8821 tcg_gen_addi_i32(tmp, tmp, val);
8822 store_reg(s, rd, tmp);
8823 break;
8824
8825 case 11:
8826 /* misc */
8827 op = (insn >> 8) & 0xf;
8828 switch (op) {
8829 case 0:
8830 /* adjust stack pointer */
8831 tmp = load_reg(s, 13);
8832 val = (insn & 0x7f) * 4;
8833 if (insn & (1 << 7))
8834 val = -(int32_t)val;
8835 tcg_gen_addi_i32(tmp, tmp, val);
8836 store_reg(s, 13, tmp);
8837 break;
8838
8839 case 2: /* sign/zero extend. */
8840 ARCH(6);
8841 rd = insn & 7;
8842 rm = (insn >> 3) & 7;
8843 tmp = load_reg(s, rm);
8844 switch ((insn >> 6) & 3) {
8845 case 0: gen_sxth(tmp); break;
8846 case 1: gen_sxtb(tmp); break;
8847 case 2: gen_uxth(tmp); break;
8848 case 3: gen_uxtb(tmp); break;
8849 }
8850 store_reg(s, rd, tmp);
8851 break;
8852 case 4: case 5: case 0xc: case 0xd:
8853 /* push/pop */
8854 addr = load_reg(s, 13);
8855 if (insn & (1 << 8))
8856 offset = 4;
8857 else
8858 offset = 0;
8859 for (i = 0; i < 8; i++) {
8860 if (insn & (1 << i))
8861 offset += 4;
8862 }
8863 if ((insn & (1 << 11)) == 0) {
8864 tcg_gen_addi_i32(addr, addr, -offset);
8865 }
8866 for (i = 0; i < 8; i++) {
8867 if (insn & (1 << i)) {
8868 if (insn & (1 << 11)) {
8869 /* pop */
8870 tmp = gen_ld32(addr, IS_USER(s));
8871 store_reg(s, i, tmp);
8872 } else {
8873 /* push */
8874 tmp = load_reg(s, i);
8875 gen_st32(tmp, addr, IS_USER(s));
8876 }
8877 /* advance to the next address. */
8878 tcg_gen_addi_i32(addr, addr, 4);
8879 }
8880 }
8881 TCGV_UNUSED(tmp);
8882 if (insn & (1 << 8)) {
8883 if (insn & (1 << 11)) {
8884 /* pop pc */
8885 tmp = gen_ld32(addr, IS_USER(s));
8886 /* don't set the pc until the rest of the instruction
8887 has completed */
8888 } else {
8889 /* push lr */
8890 tmp = load_reg(s, 14);
8891 gen_st32(tmp, addr, IS_USER(s));
8892 }
8893 tcg_gen_addi_i32(addr, addr, 4);
8894 }
8895 if ((insn & (1 << 11)) == 0) {
8896 tcg_gen_addi_i32(addr, addr, -offset);
8897 }
8898 /* write back the new stack pointer */
8899 store_reg(s, 13, addr);
8900 /* set the new PC value */
8901 if ((insn & 0x0900) == 0x0900)
8902 gen_bx(s, tmp);
8903 break;
8904
8905 case 1: case 3: case 9: case 11: /* czb */
8906 rm = insn & 7;
8907 tmp = load_reg(s, rm);
8908 s->condlabel = gen_new_label();
8909 s->condjmp = 1;
8910 if (insn & (1 << 11))
8911 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8912 else
8913 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8914 dead_tmp(tmp);
8915 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8916 val = (uint32_t)s->pc + 2;
8917 val += offset;
8918 gen_jmp(s, val);
8919 break;
8920
8921 case 15: /* IT, nop-hint. */
8922 if ((insn & 0xf) == 0) {
8923 gen_nop_hint(s, (insn >> 4) & 0xf);
8924 break;
8925 }
8926 /* If Then. */
8927 s->condexec_cond = (insn >> 4) & 0xe;
8928 s->condexec_mask = insn & 0x1f;
8929 /* No actual code generated for this insn, just setup state. */
8930 break;
8931
8932 case 0xe: /* bkpt */
8933 gen_exception_insn(s, 2, EXCP_BKPT);
8934 break;
8935
8936 case 0xa: /* rev */
8937 ARCH(6);
8938 rn = (insn >> 3) & 0x7;
8939 rd = insn & 0x7;
8940 tmp = load_reg(s, rn);
8941 switch ((insn >> 6) & 3) {
8942 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8943 case 1: gen_rev16(tmp); break;
8944 case 3: gen_revsh(tmp); break;
8945 default: goto illegal_op;
8946 }
8947 store_reg(s, rd, tmp);
8948 break;
8949
8950 case 6: /* cps */
8951 ARCH(6);
8952 if (IS_USER(s))
8953 break;
8954 if (IS_M(env)) {
8955 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8956 /* PRIMASK */
8957 if (insn & 1) {
8958 addr = tcg_const_i32(16);
8959 gen_helper_v7m_msr(cpu_env, addr, tmp);
8960 tcg_temp_free_i32(addr);
8961 }
8962 /* FAULTMASK */
8963 if (insn & 2) {
8964 addr = tcg_const_i32(17);
8965 gen_helper_v7m_msr(cpu_env, addr, tmp);
8966 tcg_temp_free_i32(addr);
8967 }
8968 tcg_temp_free_i32(tmp);
8969 gen_lookup_tb(s);
8970 } else {
8971 if (insn & (1 << 4))
8972 shift = CPSR_A | CPSR_I | CPSR_F;
8973 else
8974 shift = 0;
8975 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8976 }
8977 break;
8978
8979 default:
8980 goto undef;
8981 }
8982 break;
8983
8984 case 12:
8985 /* load/store multiple */
8986 rn = (insn >> 8) & 0x7;
8987 addr = load_reg(s, rn);
8988 for (i = 0; i < 8; i++) {
8989 if (insn & (1 << i)) {
8990 if (insn & (1 << 11)) {
8991 /* load */
8992 tmp = gen_ld32(addr, IS_USER(s));
8993 store_reg(s, i, tmp);
8994 } else {
8995 /* store */
8996 tmp = load_reg(s, i);
8997 gen_st32(tmp, addr, IS_USER(s));
8998 }
8999 /* advance to the next address */
9000 tcg_gen_addi_i32(addr, addr, 4);
9001 }
9002 }
9003 /* Base register writeback. */
9004 if ((insn & (1 << rn)) == 0) {
9005 store_reg(s, rn, addr);
9006 } else {
9007 dead_tmp(addr);
9008 }
9009 break;
9010
9011 case 13:
9012 /* conditional branch or swi */
9013 cond = (insn >> 8) & 0xf;
9014 if (cond == 0xe)
9015 goto undef;
9016
9017 if (cond == 0xf) {
9018 /* swi */
9019 gen_set_pc_im(s->pc);
9020 s->is_jmp = DISAS_SWI;
9021 break;
9022 }
9023 /* generate a conditional jump to next instruction */
9024 s->condlabel = gen_new_label();
9025 gen_test_cc(cond ^ 1, s->condlabel);
9026 s->condjmp = 1;
9027
9028 /* jump to the offset */
9029 val = (uint32_t)s->pc + 2;
9030 offset = ((int32_t)insn << 24) >> 24;
9031 val += offset << 1;
9032 gen_jmp(s, val);
9033 break;
9034
9035 case 14:
9036 if (insn & (1 << 11)) {
9037 if (disas_thumb2_insn(env, s, insn))
9038 goto undef32;
9039 break;
9040 }
9041 /* unconditional branch */
9042 val = (uint32_t)s->pc;
9043 offset = ((int32_t)insn << 21) >> 21;
9044 val += (offset << 1) + 2;
9045 gen_jmp(s, val);
9046 break;
9047
9048 case 15:
9049 if (disas_thumb2_insn(env, s, insn))
9050 goto undef32;
9051 break;
9052 }
9053 return;
9054 undef32:
9055 gen_exception_insn(s, 4, EXCP_UDEF);
9056 return;
9057 illegal_op:
9058 undef:
9059 gen_exception_insn(s, 2, EXCP_UDEF);
9060 }
9061
9062 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9063 basic block 'tb'. If search_pc is TRUE, also generate PC
9064 information for each intermediate instruction. */
9065 static inline void gen_intermediate_code_internal(CPUState *env,
9066 TranslationBlock *tb,
9067 int search_pc)
9068 {
9069 DisasContext dc1, *dc = &dc1;
9070 CPUBreakpoint *bp;
9071 uint16_t *gen_opc_end;
9072 int j, lj;
9073 target_ulong pc_start;
9074 uint32_t next_page_start;
9075 int num_insns;
9076 int max_insns;
9077
9078 /* generate intermediate code */
9079 num_temps = 0;
9080
9081 pc_start = tb->pc;
9082
9083 dc->tb = tb;
9084
9085 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9086
9087 dc->is_jmp = DISAS_NEXT;
9088 dc->pc = pc_start;
9089 dc->singlestep_enabled = env->singlestep_enabled;
9090 dc->condjmp = 0;
9091 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9092 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9093 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9094 #if !defined(CONFIG_USER_ONLY)
9095 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9096 #endif
9097 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9098 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9099 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9100 cpu_F0s = tcg_temp_new_i32();
9101 cpu_F1s = tcg_temp_new_i32();
9102 cpu_F0d = tcg_temp_new_i64();
9103 cpu_F1d = tcg_temp_new_i64();
9104 cpu_V0 = cpu_F0d;
9105 cpu_V1 = cpu_F1d;
9106 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9107 cpu_M0 = tcg_temp_new_i64();
9108 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9109 lj = -1;
9110 num_insns = 0;
9111 max_insns = tb->cflags & CF_COUNT_MASK;
9112 if (max_insns == 0)
9113 max_insns = CF_COUNT_MASK;
9114
9115 gen_icount_start();
9116
9117 /* A note on handling of the condexec (IT) bits:
9118 *
9119 * We want to avoid the overhead of having to write the updated condexec
9120 * bits back to the CPUState for every instruction in an IT block. So:
9121 * (1) if the condexec bits are not already zero then we write
9122 * zero back into the CPUState now. This avoids complications trying
9123 * to do it at the end of the block. (For example if we don't do this
9124 * it's hard to identify whether we can safely skip writing condexec
9125 * at the end of the TB, which we definitely want to do for the case
9126 * where a TB doesn't do anything with the IT state at all.)
9127 * (2) if we are going to leave the TB then we call gen_set_condexec()
9128 * which will write the correct value into CPUState if zero is wrong.
9129 * This is done both for leaving the TB at the end, and for leaving
9130 * it because of an exception we know will happen, which is done in
9131 * gen_exception_insn(). The latter is necessary because we need to
9132 * leave the TB with the PC/IT state just prior to execution of the
9133 * instruction which caused the exception.
9134 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9135 * then the CPUState will be wrong and we need to reset it.
9136 * This is handled in the same way as restoration of the
9137 * PC in these situations: we will be called again with search_pc=1
9138 * and generate a mapping of the condexec bits for each PC in
9139 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9140 * the condexec bits.
9141 *
9142 * Note that there are no instructions which can read the condexec
9143 * bits, and none which can write non-static values to them, so
9144 * we don't need to care about whether CPUState is correct in the
9145 * middle of a TB.
9146 */
9147
9148 /* Reset the conditional execution bits immediately. This avoids
9149 complications trying to do it at the end of the block. */
9150 if (dc->condexec_mask || dc->condexec_cond)
9151 {
9152 TCGv tmp = new_tmp();
9153 tcg_gen_movi_i32(tmp, 0);
9154 store_cpu_field(tmp, condexec_bits);
9155 }
9156 do {
9157 #ifdef CONFIG_USER_ONLY
9158 /* Intercept jump to the magic kernel page. */
9159 if (dc->pc >= 0xffff0000) {
9160 /* We always get here via a jump, so know we are not in a
9161 conditional execution block. */
9162 gen_exception(EXCP_KERNEL_TRAP);
9163 dc->is_jmp = DISAS_UPDATE;
9164 break;
9165 }
9166 #else
9167 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9168 /* We always get here via a jump, so know we are not in a
9169 conditional execution block. */
9170 gen_exception(EXCP_EXCEPTION_EXIT);
9171 dc->is_jmp = DISAS_UPDATE;
9172 break;
9173 }
9174 #endif
9175
9176 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9177 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9178 if (bp->pc == dc->pc) {
9179 gen_exception_insn(dc, 0, EXCP_DEBUG);
9180 /* Advance PC so that clearing the breakpoint will
9181 invalidate this TB. */
9182 dc->pc += 2;
9183 goto done_generating;
9184 break;
9185 }
9186 }
9187 }
9188 if (search_pc) {
9189 j = gen_opc_ptr - gen_opc_buf;
9190 if (lj < j) {
9191 lj++;
9192 while (lj < j)
9193 gen_opc_instr_start[lj++] = 0;
9194 }
9195 gen_opc_pc[lj] = dc->pc;
9196 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9197 gen_opc_instr_start[lj] = 1;
9198 gen_opc_icount[lj] = num_insns;
9199 }
9200
9201 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9202 gen_io_start();
9203
9204 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9205 tcg_gen_debug_insn_start(dc->pc);
9206 }
9207
9208 if (dc->thumb) {
9209 disas_thumb_insn(env, dc);
9210 if (dc->condexec_mask) {
9211 dc->condexec_cond = (dc->condexec_cond & 0xe)
9212 | ((dc->condexec_mask >> 4) & 1);
9213 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9214 if (dc->condexec_mask == 0) {
9215 dc->condexec_cond = 0;
9216 }
9217 }
9218 } else {
9219 disas_arm_insn(env, dc);
9220 }
9221 if (num_temps) {
9222 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9223 num_temps = 0;
9224 }
9225
9226 if (dc->condjmp && !dc->is_jmp) {
9227 gen_set_label(dc->condlabel);
9228 dc->condjmp = 0;
9229 }
9230 /* Translation stops when a conditional branch is encountered.
9231 * Otherwise the subsequent code could get translated several times.
9232 * Also stop translation when a page boundary is reached. This
9233 * ensures prefetch aborts occur at the right place. */
9234 num_insns ++;
9235 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9236 !env->singlestep_enabled &&
9237 !singlestep &&
9238 dc->pc < next_page_start &&
9239 num_insns < max_insns);
9240
9241 if (tb->cflags & CF_LAST_IO) {
9242 if (dc->condjmp) {
9243 /* FIXME: This can theoretically happen with self-modifying
9244 code. */
9245 cpu_abort(env, "IO on conditional branch instruction");
9246 }
9247 gen_io_end();
9248 }
9249
9250 /* At this stage dc->condjmp will only be set when the skipped
9251 instruction was a conditional branch or trap, and the PC has
9252 already been written. */
9253 if (unlikely(env->singlestep_enabled)) {
9254 /* Make sure the pc is updated, and raise a debug exception. */
9255 if (dc->condjmp) {
9256 gen_set_condexec(dc);
9257 if (dc->is_jmp == DISAS_SWI) {
9258 gen_exception(EXCP_SWI);
9259 } else {
9260 gen_exception(EXCP_DEBUG);
9261 }
9262 gen_set_label(dc->condlabel);
9263 }
9264 if (dc->condjmp || !dc->is_jmp) {
9265 gen_set_pc_im(dc->pc);
9266 dc->condjmp = 0;
9267 }
9268 gen_set_condexec(dc);
9269 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9270 gen_exception(EXCP_SWI);
9271 } else {
9272 /* FIXME: Single stepping a WFI insn will not halt
9273 the CPU. */
9274 gen_exception(EXCP_DEBUG);
9275 }
9276 } else {
9277 /* While branches must always occur at the end of an IT block,
9278 there are a few other things that can cause us to terminate
9279 the TB in the middel of an IT block:
9280 - Exception generating instructions (bkpt, swi, undefined).
9281 - Page boundaries.
9282 - Hardware watchpoints.
9283 Hardware breakpoints have already been handled and skip this code.
9284 */
9285 gen_set_condexec(dc);
9286 switch(dc->is_jmp) {
9287 case DISAS_NEXT:
9288 gen_goto_tb(dc, 1, dc->pc);
9289 break;
9290 default:
9291 case DISAS_JUMP:
9292 case DISAS_UPDATE:
9293 /* indicate that the hash table must be used to find the next TB */
9294 tcg_gen_exit_tb(0);
9295 break;
9296 case DISAS_TB_JUMP:
9297 /* nothing more to generate */
9298 break;
9299 case DISAS_WFI:
9300 gen_helper_wfi();
9301 break;
9302 case DISAS_SWI:
9303 gen_exception(EXCP_SWI);
9304 break;
9305 }
9306 if (dc->condjmp) {
9307 gen_set_label(dc->condlabel);
9308 gen_set_condexec(dc);
9309 gen_goto_tb(dc, 1, dc->pc);
9310 dc->condjmp = 0;
9311 }
9312 }
9313
9314 done_generating:
9315 gen_icount_end(tb, num_insns);
9316 *gen_opc_ptr = INDEX_op_end;
9317
9318 #ifdef DEBUG_DISAS
9319 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9320 qemu_log("----------------\n");
9321 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9322 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9323 qemu_log("\n");
9324 }
9325 #endif
9326 if (search_pc) {
9327 j = gen_opc_ptr - gen_opc_buf;
9328 lj++;
9329 while (lj <= j)
9330 gen_opc_instr_start[lj++] = 0;
9331 } else {
9332 tb->size = dc->pc - pc_start;
9333 tb->icount = num_insns;
9334 }
9335 }
9336
9337 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9338 {
9339 gen_intermediate_code_internal(env, tb, 0);
9340 }
9341
9342 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9343 {
9344 gen_intermediate_code_internal(env, tb, 1);
9345 }
9346
9347 static const char *cpu_mode_names[16] = {
9348 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9349 "???", "???", "???", "und", "???", "???", "???", "sys"
9350 };
9351
9352 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9353 int flags)
9354 {
9355 int i;
9356 #if 0
9357 union {
9358 uint32_t i;
9359 float s;
9360 } s0, s1;
9361 CPU_DoubleU d;
9362 /* ??? This assumes float64 and double have the same layout.
9363 Oh well, it's only debug dumps. */
9364 union {
9365 float64 f64;
9366 double d;
9367 } d0;
9368 #endif
9369 uint32_t psr;
9370
9371 for(i=0;i<16;i++) {
9372 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9373 if ((i % 4) == 3)
9374 cpu_fprintf(f, "\n");
9375 else
9376 cpu_fprintf(f, " ");
9377 }
9378 psr = cpsr_read(env);
9379 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9380 psr,
9381 psr & (1 << 31) ? 'N' : '-',
9382 psr & (1 << 30) ? 'Z' : '-',
9383 psr & (1 << 29) ? 'C' : '-',
9384 psr & (1 << 28) ? 'V' : '-',
9385 psr & CPSR_T ? 'T' : 'A',
9386 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9387
9388 #if 0
9389 for (i = 0; i < 16; i++) {
9390 d.d = env->vfp.regs[i];
9391 s0.i = d.l.lower;
9392 s1.i = d.l.upper;
9393 d0.f64 = d.d;
9394 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9395 i * 2, (int)s0.i, s0.s,
9396 i * 2 + 1, (int)s1.i, s1.s,
9397 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9398 d0.d);
9399 }
9400 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9401 #endif
9402 }
9403
9404 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9405 unsigned long searched_pc, int pc_pos, void *puc)
9406 {
9407 env->regs[15] = gen_opc_pc[pc_pos];
9408 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9409 }