]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Handle UNDEF cases for Neon 2 register misc forms
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
70
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
78
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
83
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
95
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
99
100 #include "gen-icount.h"
101
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
108 {
109 int i;
110
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
130
131 #define GEN_HELPER 2
132 #include "helpers.h"
133 }
134
135 static inline TCGv load_cpu_offset(int offset)
136 {
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140 }
141
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144 static inline void store_cpu_offset(TCGv var, int offset)
145 {
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
148 }
149
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
155 {
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
166 }
167 }
168
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
171 {
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
175 }
176
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
180 {
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
187 }
188
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197
198
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200 {
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204 }
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208 static void gen_exception(int excp)
209 {
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
214 }
215
216 static void gen_smul_dual(TCGv a, TCGv b)
217 {
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
229 }
230
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
233 {
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
241 }
242
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
245 {
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
249 }
250
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253 {
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257 }
258
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
261 {
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272 }
273
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 {
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
281 }
282
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
285 {
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295 }
296
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299 {
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
309 }
310
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
315 {
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
318
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
326 }
327
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 {
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
332
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
340 }
341
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
344 {
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
350 }
351
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359 static void gen_add16(TCGv t0, TCGv t1)
360 {
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
370 }
371
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
376 {
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
381 }
382
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
385 {
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
388 }
389
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
392 {
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
398 }
399
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402 {
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
408 }
409
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412 {
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
419 }
420
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
424 static void shifter_out_im(TCGv var, int shift)
425 {
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
436 }
437
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440 {
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
484 }
485 }
486 };
487
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490 {
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
505 }
506 }
507 tcg_temp_free_i32(shift);
508 }
509
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
520 {
521 TCGv_ptr tmp;
522
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
552 }
553 }
554 #undef PAS_OP
555
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
567 {
568 TCGv_ptr tmp;
569
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
599 }
600 }
601 #undef PAS_OP
602
603 static void gen_test_cc(int cc, int label)
604 {
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
608
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
698 tcg_temp_free_i32(tmp);
699 }
700
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718 };
719
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
722 {
723 TCGv tmp;
724
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
731 }
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
733 }
734
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
737 {
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
742 }
743
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749 {
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755 }
756
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763 {
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769 }
770
771 static inline TCGv gen_ld8s(TCGv addr, int index)
772 {
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776 }
777 static inline TCGv gen_ld8u(TCGv addr, int index)
778 {
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782 }
783 static inline TCGv gen_ld16s(TCGv addr, int index)
784 {
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788 }
789 static inline TCGv gen_ld16u(TCGv addr, int index)
790 {
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794 }
795 static inline TCGv gen_ld32(TCGv addr, int index)
796 {
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800 }
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802 {
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806 }
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
808 {
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
811 }
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
813 {
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
816 }
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
818 {
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
821 }
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823 {
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826 }
827
828 static inline void gen_set_pc_im(uint32_t val)
829 {
830 tcg_gen_movi_i32(cpu_R[15], val);
831 }
832
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
835 {
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
838 }
839
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
842 {
843 int val, rm, shift, shiftop;
844 TCGv offset;
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
865 }
866 }
867
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
870 {
871 int val, rm;
872 TCGv offset;
873
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
893 }
894 }
895
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
898 { \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
903 }
904
905 VFP_OP2(add)
906 VFP_OP2(sub)
907 VFP_OP2(mul)
908 VFP_OP2(div)
909
910 #undef VFP_OP2
911
912 static inline void gen_vfp_abs(int dp)
913 {
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
918 }
919
920 static inline void gen_vfp_neg(int dp)
921 {
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
926 }
927
928 static inline void gen_vfp_sqrt(int dp)
929 {
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
934 }
935
936 static inline void gen_vfp_cmp(int dp)
937 {
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
942 }
943
944 static inline void gen_vfp_cmpe(int dp)
945 {
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
950 }
951
952 static inline void gen_vfp_F1_ld0(int dp)
953 {
954 if (dp)
955 tcg_gen_movi_i64(cpu_F1d, 0);
956 else
957 tcg_gen_movi_i32(cpu_F1s, 0);
958 }
959
960 static inline void gen_vfp_uito(int dp)
961 {
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
966 }
967
968 static inline void gen_vfp_sito(int dp)
969 {
970 if (dp)
971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
972 else
973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
974 }
975
976 static inline void gen_vfp_toui(int dp)
977 {
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
982 }
983
984 static inline void gen_vfp_touiz(int dp)
985 {
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
990 }
991
992 static inline void gen_vfp_tosi(int dp)
993 {
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
998 }
999
1000 static inline void gen_vfp_tosiz(int dp)
1001 {
1002 if (dp)
1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1004 else
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1006 }
1007
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1010 { \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1012 if (dp) \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1014 else \
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1017 }
1018 VFP_GEN_FIX(tosh)
1019 VFP_GEN_FIX(tosl)
1020 VFP_GEN_FIX(touh)
1021 VFP_GEN_FIX(toul)
1022 VFP_GEN_FIX(shto)
1023 VFP_GEN_FIX(slto)
1024 VFP_GEN_FIX(uhto)
1025 VFP_GEN_FIX(ulto)
1026 #undef VFP_GEN_FIX
1027
1028 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1029 {
1030 if (dp)
1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1034 }
1035
1036 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1037 {
1038 if (dp)
1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1040 else
1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1042 }
1043
1044 static inline long
1045 vfp_reg_offset (int dp, int reg)
1046 {
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1055 }
1056 }
1057
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060 static inline long
1061 neon_reg_offset (int reg, int n)
1062 {
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1066 }
1067
1068 static TCGv neon_load_reg(int reg, int pass)
1069 {
1070 TCGv tmp = tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1073 }
1074
1075 static void neon_store_reg(int reg, int pass, TCGv var)
1076 {
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1078 tcg_temp_free_i32(var);
1079 }
1080
1081 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1082 {
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1084 }
1085
1086 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1087 {
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1089 }
1090
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1095
1096 static inline void gen_mov_F0_vreg(int dp, int reg)
1097 {
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1102 }
1103
1104 static inline void gen_mov_F1_vreg(int dp, int reg)
1105 {
1106 if (dp)
1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1110 }
1111
1112 static inline void gen_mov_vreg_F0(int dp, int reg)
1113 {
1114 if (dp)
1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1116 else
1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1118 }
1119
1120 #define ARM_CP_RW_BIT (1 << 20)
1121
1122 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1123 {
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1125 }
1126
1127 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1128 {
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1130 }
1131
1132 static inline TCGv iwmmxt_load_creg(int reg)
1133 {
1134 TCGv var = tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
1137 }
1138
1139 static inline void iwmmxt_store_creg(int reg, TCGv var)
1140 {
1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1142 tcg_temp_free_i32(var);
1143 }
1144
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1146 {
1147 iwmmxt_store_reg(cpu_M0, rn);
1148 }
1149
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1151 {
1152 iwmmxt_load_reg(cpu_M0, rn);
1153 }
1154
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1156 {
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1159 }
1160
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1162 {
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1165 }
1166
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1168 {
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1171 }
1172
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 { \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178 }
1179
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1183 IWMMXT_OP(name##l)
1184
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 { \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1189 }
1190
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1201
1202 IWMMXT_OP_SIZE(unpackl)
1203 IWMMXT_OP_SIZE(unpackh)
1204
1205 IWMMXT_OP_1(unpacklub)
1206 IWMMXT_OP_1(unpackluw)
1207 IWMMXT_OP_1(unpacklul)
1208 IWMMXT_OP_1(unpackhub)
1209 IWMMXT_OP_1(unpackhuw)
1210 IWMMXT_OP_1(unpackhul)
1211 IWMMXT_OP_1(unpacklsb)
1212 IWMMXT_OP_1(unpacklsw)
1213 IWMMXT_OP_1(unpacklsl)
1214 IWMMXT_OP_1(unpackhsb)
1215 IWMMXT_OP_1(unpackhsw)
1216 IWMMXT_OP_1(unpackhsl)
1217
1218 IWMMXT_OP_SIZE(cmpeq)
1219 IWMMXT_OP_SIZE(cmpgtu)
1220 IWMMXT_OP_SIZE(cmpgts)
1221
1222 IWMMXT_OP_SIZE(mins)
1223 IWMMXT_OP_SIZE(minu)
1224 IWMMXT_OP_SIZE(maxs)
1225 IWMMXT_OP_SIZE(maxu)
1226
1227 IWMMXT_OP_SIZE(subn)
1228 IWMMXT_OP_SIZE(addn)
1229 IWMMXT_OP_SIZE(subu)
1230 IWMMXT_OP_SIZE(addu)
1231 IWMMXT_OP_SIZE(subs)
1232 IWMMXT_OP_SIZE(adds)
1233
1234 IWMMXT_OP(avgb0)
1235 IWMMXT_OP(avgb1)
1236 IWMMXT_OP(avgw0)
1237 IWMMXT_OP(avgw1)
1238
1239 IWMMXT_OP(msadb)
1240
1241 IWMMXT_OP(packuw)
1242 IWMMXT_OP(packul)
1243 IWMMXT_OP(packuq)
1244 IWMMXT_OP(packsw)
1245 IWMMXT_OP(packsl)
1246 IWMMXT_OP(packsq)
1247
1248 static void gen_op_iwmmxt_set_mup(void)
1249 {
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254 }
1255
1256 static void gen_op_iwmmxt_set_cup(void)
1257 {
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262 }
1263
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1265 {
1266 TCGv tmp = tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269 }
1270
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272 {
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276 }
1277
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1279 {
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1283
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 tcg_temp_free_i32(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310 }
1311
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1313 {
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1316
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
1324 tmp = tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 tcg_temp_free_i32(tmp);
1331 return 0;
1332 }
1333
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337 {
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
1362 addr = tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 tcg_temp_free_i32(addr);
1365 return 1;
1366 }
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 tcg_temp_free_i32(tmp);
1391 }
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = tcg_temp_new_i32();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1416 }
1417 }
1418 }
1419 }
1420 tcg_temp_free_i32(addr);
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 tcg_temp_free_i32(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 tcg_temp_free_i32(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1695 }
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 tcg_temp_free_i32(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = tcg_temp_new_i32();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1718 }
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1727 }
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1733 }
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1750 }
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 tcg_temp_free_i32(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1771 }
1772 tcg_temp_free_i32(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1793 }
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1799 }
1800 gen_set_nzcv(tmp);
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1841 }
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1847 }
1848 gen_set_nzcv(tmp);
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = tcg_temp_new_i32();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1869 }
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 tcg_temp_free_i32(tmp);
1976 return 1;
1977 }
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
1987 break;
1988 }
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 tcg_temp_free_i32(tmp);
2004 return 1;
2005 }
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2015 break;
2016 }
2017 tcg_temp_free_i32(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 tcg_temp_free_i32(tmp);
2032 return 1;
2033 }
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2043 break;
2044 }
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = tcg_temp_new_i32();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 tcg_temp_free_i32(tmp);
2062 return 1;
2063 }
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 tcg_temp_free_i32(tmp);
2069 return 1;
2070 }
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 tcg_temp_free_i32(tmp);
2076 return 1;
2077 }
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2079 break;
2080 }
2081 tcg_temp_free_i32(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
2324 return 1;
2325 }
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336 }
2337
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341 {
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2375 }
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406 }
2407
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411 {
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = tcg_temp_new_i32();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 tcg_temp_free_i32(tmp);
2437 }
2438 return 0;
2439 }
2440
2441 static int cp15_user_ok(uint32_t insn)
2442 {
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459 }
2460
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462 {
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 tcg_temp_free_i32(tmp);
2504 return 0;
2505 }
2506 }
2507 return 1;
2508 }
2509
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513 {
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2516
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2535 }
2536
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2539 */
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2543 */
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2548 }
2549 return 0;
2550 }
2551
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2555 */
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2561 }
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2565 */
2566 }
2567
2568 rd = (insn >> 12) & 0xf;
2569
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2572
2573 tmp2 = tcg_const_i32(insn);
2574 if (insn & ARM_CP_RW_BIT) {
2575 tmp = tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
2579 store_reg(s, rd, tmp);
2580 else
2581 tcg_temp_free_i32(tmp);
2582 } else {
2583 tmp = load_reg(s, rd);
2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2585 tcg_temp_free_i32(tmp);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
2592 }
2593 tcg_temp_free_i32(tmp2);
2594 return 0;
2595 }
2596
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2609
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2616
2617 /* Move between integer and VFP cores. */
2618 static TCGv gen_vfp_mrs(void)
2619 {
2620 TCGv tmp = tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2623 }
2624
2625 static void gen_vfp_msr(TCGv tmp)
2626 {
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
2628 tcg_temp_free_i32(tmp);
2629 }
2630
2631 static void gen_neon_dup_u8(TCGv var, int shift)
2632 {
2633 TCGv tmp = tcg_temp_new_i32();
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
2636 tcg_gen_ext8u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 tcg_temp_free_i32(tmp);
2642 }
2643
2644 static void gen_neon_dup_low16(TCGv var)
2645 {
2646 TCGv tmp = tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var, var);
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
2650 tcg_temp_free_i32(tmp);
2651 }
2652
2653 static void gen_neon_dup_high16(TCGv var)
2654 {
2655 TCGv tmp = tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
2659 tcg_temp_free_i32(tmp);
2660 }
2661
2662 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2663 {
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2680 }
2681 return tmp;
2682 }
2683
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2687 {
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
2690 TCGv addr;
2691 TCGv tmp;
2692 TCGv tmp2;
2693
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2696
2697 if (!s->vfp_enabled) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2705 }
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2715
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2722
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2733 }
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2752 }
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2758 }
2759 }
2760 break;
2761 case 2:
2762 break;
2763 }
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2774 }
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2779 }
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 tcg_temp_free_i32(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 tcg_temp_free_i32(tmp2);
2793 break;
2794 case 2:
2795 break;
2796 }
2797 neon_store_reg(rn, pass, tmp);
2798 }
2799 }
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2809
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2840 }
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2851 }
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2855 }
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 tcg_temp_free_i32(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2862 }
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 tcg_temp_free_i32(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2895 }
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2899 }
2900 }
2901 }
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2913 }
2914
2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2920 }
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2925 */
2926 rm = VFP_SREG_M(insn);
2927 } else {
2928 VFP_DREG_M(rm, insn);
2929 }
2930 } else {
2931 rn = VFP_SREG_N(insn);
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2937 }
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2940 */
2941 rm = VFP_SREG_M(insn);
2942 }
2943
2944 veclen = s->vec_len;
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2947
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
2952
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2958
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
2965 delta_d = (s->vec_stride >> 1) + 1;
2966 else
2967 delta_d = s->vec_stride + 1;
2968
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2975 }
2976 }
2977 }
2978
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
3013 break;
3014 }
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3019 }
3020
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_vfp_neg(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3065
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
3081 tcg_gen_movi_i32(cpu_F0s, n);
3082 }
3083 break;
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3112 tcg_temp_free_i32(tmp);
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
3117 tmp = tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
3123 tcg_temp_free_i32(tmp2);
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
3136 tcg_temp_free_i32(tmp2);
3137 gen_vfp_msr(tmp);
3138 break;
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3155 else
3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_shto(dp, 16 - rm);
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_slto(dp, 32 - rm);
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
3177 gen_vfp_uhto(dp, 16 - rm);
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_ulto(dp, 32 - rm);
3183 break;
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199 gen_vfp_tosh(dp, 16 - rm);
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
3204 gen_vfp_tosl(dp, 32 - rm);
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
3209 gen_vfp_touh(dp, 16 - rm);
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
3214 gen_vfp_toul(dp, 32 - rm);
3215 break;
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3219 }
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3224 }
3225
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3237
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3241
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3248 }
3249 break;
3250 }
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3255
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3270 }
3271 }
3272 }
3273 }
3274 break;
3275 case 0xc:
3276 case 0xd:
3277 if ((insn & 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3285 }
3286
3287 if (insn & ARM_CP_RW_BIT) {
3288 /* vfp->arm */
3289 if (dp) {
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
3298 tmp = gen_vfp_mrs();
3299 store_reg(s, rd, tmp);
3300 gen_mov_F0_vreg(0, rm + 1);
3301 tmp = gen_vfp_mrs();
3302 store_reg(s, rn, tmp);
3303 }
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
3313 } else {
3314 tmp = load_reg(s, rd);
3315 gen_vfp_msr(tmp);
3316 gen_mov_vreg_F0(0, rm);
3317 tmp = load_reg(s, rn);
3318 gen_vfp_msr(tmp);
3319 gen_mov_vreg_F0(0, rm + 1);
3320 }
3321 }
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
3326 VFP_DREG_D(rd, insn);
3327 else
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
3330 addr = tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr, s->pc & ~2);
3332 } else {
3333 addr = load_reg(s, rn);
3334 }
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
3340 tcg_gen_addi_i32(addr, addr, offset);
3341 if (insn & (1 << 20)) {
3342 gen_vfp_ld(s, dp, addr);
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_st(s, dp, addr);
3347 }
3348 tcg_temp_free_i32(addr);
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3355
3356 if (insn & (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3358
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
3364 if (insn & ARM_CP_RW_BIT) {
3365 /* load */
3366 gen_vfp_ld(s, dp, addr);
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
3371 gen_vfp_st(s, dp, addr);
3372 }
3373 tcg_gen_addi_i32(addr, addr, offset);
3374 }
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3383
3384 if (offset != 0)
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
3388 tcg_temp_free_i32(addr);
3389 }
3390 }
3391 }
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3396 }
3397 return 0;
3398 }
3399
3400 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3401 {
3402 TranslationBlock *tb;
3403
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3406 tcg_gen_goto_tb(n);
3407 gen_set_pc_im(dest);
3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
3409 } else {
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb(0);
3412 }
3413 }
3414
3415 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3416 {
3417 if (unlikely(s->singlestep_enabled)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3419 if (s->thumb)
3420 dest |= 1;
3421 gen_bx_im(s, dest);
3422 } else {
3423 gen_goto_tb(s, 0, dest);
3424 s->is_jmp = DISAS_TB_JUMP;
3425 }
3426 }
3427
3428 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3429 {
3430 if (x)
3431 tcg_gen_sari_i32(t0, t0, 16);
3432 else
3433 gen_sxth(t0);
3434 if (y)
3435 tcg_gen_sari_i32(t1, t1, 16);
3436 else
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
3439 }
3440
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3443 uint32_t mask;
3444
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
3454
3455 /* Mask out undefined bits. */
3456 mask &= ~CPSR_RESERVED;
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
3461 if (!arm_feature(env, ARM_FEATURE_V6))
3462 mask &= ~(CPSR_E | CPSR_GE);
3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3464 mask &= ~CPSR_IT;
3465 /* Mask out execution state bits. */
3466 if (!spsr)
3467 mask &= ~CPSR_EXEC;
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
3470 mask &= CPSR_USER;
3471 return mask;
3472 }
3473
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3476 {
3477 TCGv tmp;
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
3482
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
3487 store_cpu_field(tmp, spsr);
3488 } else {
3489 gen_set_cpsr(t0, mask);
3490 }
3491 tcg_temp_free_i32(t0);
3492 gen_lookup_tb(s);
3493 return 0;
3494 }
3495
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3498 {
3499 TCGv tmp;
3500 tmp = tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3503 }
3504
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext *s, TCGv pc)
3507 {
3508 TCGv tmp;
3509 store_reg(s, 15, pc);
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
3512 tcg_temp_free_i32(tmp);
3513 s->is_jmp = DISAS_UPDATE;
3514 }
3515
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3518 {
3519 gen_set_cpsr(cpsr, 0xffffffff);
3520 tcg_temp_free_i32(cpsr);
3521 store_reg(s, 15, pc);
3522 s->is_jmp = DISAS_UPDATE;
3523 }
3524
3525 static inline void
3526 gen_set_condexec (DisasContext *s)
3527 {
3528 if (s->condexec_mask) {
3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3530 TCGv tmp = tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp, val);
3532 store_cpu_field(tmp, condexec_bits);
3533 }
3534 }
3535
3536 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3537 {
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3542 }
3543
3544 static void gen_nop_hint(DisasContext *s, int val)
3545 {
3546 switch (val) {
3547 case 3: /* wfi */
3548 gen_set_pc_im(s->pc);
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3556 }
3557 }
3558
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3560
3561 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3562 {
3563 switch (size) {
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3567 default: abort();
3568 }
3569 }
3570
3571 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3572 {
3573 switch (size) {
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3577 default: return;
3578 }
3579 }
3580
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3586
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3591 break; \
3592 case 1: \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 2: \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 3: \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 4: \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 5: \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 default: return 1; \
3608 }} while (0)
3609
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3612 case 0: \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3614 break; \
3615 case 1: \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3617 break; \
3618 case 2: \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3620 break; \
3621 case 3: \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3623 break; \
3624 case 4: \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3626 break; \
3627 case 5: \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3629 break; \
3630 default: return 1; \
3631 }} while (0)
3632
3633 static TCGv neon_load_scratch(int scratch)
3634 {
3635 TCGv tmp = tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
3638 }
3639
3640 static void neon_store_scratch(int scratch, TCGv var)
3641 {
3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3643 tcg_temp_free_i32(var);
3644 }
3645
3646 static inline TCGv neon_get_scalar(int size, int reg)
3647 {
3648 TCGv tmp;
3649 if (size == 1) {
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
3652 gen_neon_dup_high16(tmp);
3653 } else {
3654 gen_neon_dup_low16(tmp);
3655 }
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
3658 }
3659 return tmp;
3660 }
3661
3662 static int gen_neon_unzip(int rd, int rm, int size, int q)
3663 {
3664 TCGv tmp, tmp2;
3665 if (!q && size == 2) {
3666 return 1;
3667 }
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
3673 gen_helper_neon_qunzip8(tmp, tmp2);
3674 break;
3675 case 1:
3676 gen_helper_neon_qunzip16(tmp, tmp2);
3677 break;
3678 case 2:
3679 gen_helper_neon_qunzip32(tmp, tmp2);
3680 break;
3681 default:
3682 abort();
3683 }
3684 } else {
3685 switch (size) {
3686 case 0:
3687 gen_helper_neon_unzip8(tmp, tmp2);
3688 break;
3689 case 1:
3690 gen_helper_neon_unzip16(tmp, tmp2);
3691 break;
3692 default:
3693 abort();
3694 }
3695 }
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
3699 }
3700
3701 static int gen_neon_zip(int rd, int rm, int size, int q)
3702 {
3703 TCGv tmp, tmp2;
3704 if (!q && size == 2) {
3705 return 1;
3706 }
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
3712 gen_helper_neon_qzip8(tmp, tmp2);
3713 break;
3714 case 1:
3715 gen_helper_neon_qzip16(tmp, tmp2);
3716 break;
3717 case 2:
3718 gen_helper_neon_qzip32(tmp, tmp2);
3719 break;
3720 default:
3721 abort();
3722 }
3723 } else {
3724 switch (size) {
3725 case 0:
3726 gen_helper_neon_zip8(tmp, tmp2);
3727 break;
3728 case 1:
3729 gen_helper_neon_zip16(tmp, tmp2);
3730 break;
3731 default:
3732 abort();
3733 }
3734 }
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
3738 }
3739
3740 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3741 {
3742 TCGv rd, tmp;
3743
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
3746
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3751
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3757
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
3760 }
3761
3762 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3763 {
3764 TCGv rd, tmp;
3765
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
3768
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3776
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
3779 }
3780
3781
3782 static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786 } neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3798 };
3799
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3803 {
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
3808 int spacing;
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
3815 int n;
3816 TCGv addr;
3817 TCGv tmp;
3818 TCGv tmp2;
3819 TCGv_i64 tmp64;
3820
3821 if (!s->vfp_enabled)
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
3831 if (op > 10)
3832 return 1;
3833 nregs = neon_ls_element_type[op].nregs;
3834 interleave = neon_ls_element_type[op].interleave;
3835 spacing = neon_ls_element_type[op].spacing;
3836 if (size == 3 && (interleave | spacing) != 1)
3837 return 1;
3838 addr = tcg_temp_new_i32();
3839 load_reg_var(s, addr, rn);
3840 stride = (1 << size) * interleave;
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3845 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3846 load_reg_var(s, addr, rn);
3847 tcg_gen_addi_i32(addr, addr, 1 << size);
3848 }
3849 if (size == 3) {
3850 if (load) {
3851 tmp64 = gen_ld64(addr, IS_USER(s));
3852 neon_store_reg64(tmp64, rd);
3853 tcg_temp_free_i64(tmp64);
3854 } else {
3855 tmp64 = tcg_temp_new_i64();
3856 neon_load_reg64(tmp64, rd);
3857 gen_st64(tmp64, addr, IS_USER(s));
3858 }
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 } else {
3861 for (pass = 0; pass < 2; pass++) {
3862 if (size == 2) {
3863 if (load) {
3864 tmp = gen_ld32(addr, IS_USER(s));
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 gen_st32(tmp, addr, IS_USER(s));
3869 }
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 } else if (size == 1) {
3872 if (load) {
3873 tmp = gen_ld16u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 tmp2 = gen_ld16u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
3877 tcg_gen_shli_i32(tmp2, tmp2, 16);
3878 tcg_gen_or_i32(tmp, tmp, tmp2);
3879 tcg_temp_free_i32(tmp2);
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
3883 tmp2 = tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2, tmp, 16);
3885 gen_st16(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 gen_st16(tmp2, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 }
3890 } else /* size == 0 */ {
3891 if (load) {
3892 TCGV_UNUSED(tmp2);
3893 for (n = 0; n < 4; n++) {
3894 tmp = gen_ld8u(addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 if (n == 0) {
3897 tmp2 = tmp;
3898 } else {
3899 tcg_gen_shli_i32(tmp, tmp, n * 8);
3900 tcg_gen_or_i32(tmp2, tmp2, tmp);
3901 tcg_temp_free_i32(tmp);
3902 }
3903 }
3904 neon_store_reg(rd, pass, tmp2);
3905 } else {
3906 tmp2 = neon_load_reg(rd, pass);
3907 for (n = 0; n < 4; n++) {
3908 tmp = tcg_temp_new_i32();
3909 if (n == 0) {
3910 tcg_gen_mov_i32(tmp, tmp2);
3911 } else {
3912 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3913 }
3914 gen_st8(tmp, addr, IS_USER(s));
3915 tcg_gen_addi_i32(addr, addr, stride);
3916 }
3917 tcg_temp_free_i32(tmp2);
3918 }
3919 }
3920 }
3921 }
3922 rd += spacing;
3923 }
3924 tcg_temp_free_i32(addr);
3925 stride = nregs * 8;
3926 } else {
3927 size = (insn >> 10) & 3;
3928 if (size == 3) {
3929 /* Load single element to all lanes. */
3930 int a = (insn >> 4) & 1;
3931 if (!load) {
3932 return 1;
3933 }
3934 size = (insn >> 6) & 3;
3935 nregs = ((insn >> 8) & 3) + 1;
3936
3937 if (size == 3) {
3938 if (nregs != 4 || a == 0) {
3939 return 1;
3940 }
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3942 size = 2;
3943 }
3944 if (nregs == 1 && a == 1 && size == 0) {
3945 return 1;
3946 }
3947 if (nregs == 3 && a == 1) {
3948 return 1;
3949 }
3950 addr = tcg_temp_new_i32();
3951 load_reg_var(s, addr, rn);
3952 if (nregs == 1) {
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp = gen_load_and_replicate(s, addr, size);
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3957 if (insn & (1 << 5)) {
3958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3960 }
3961 tcg_temp_free_i32(tmp);
3962 } else {
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 for (reg = 0; reg < nregs; reg++) {
3966 tmp = gen_load_and_replicate(s, addr, size);
3967 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3969 tcg_temp_free_i32(tmp);
3970 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 rd += stride;
3972 }
3973 }
3974 tcg_temp_free_i32(addr);
3975 stride = (1 << size) * nregs;
3976 } else {
3977 /* Single element. */
3978 pass = (insn >> 7) & 1;
3979 switch (size) {
3980 case 0:
3981 shift = ((insn >> 5) & 3) * 8;
3982 stride = 1;
3983 break;
3984 case 1:
3985 shift = ((insn >> 6) & 1) * 16;
3986 stride = (insn & (1 << 5)) ? 2 : 1;
3987 break;
3988 case 2:
3989 shift = 0;
3990 stride = (insn & (1 << 6)) ? 2 : 1;
3991 break;
3992 default:
3993 abort();
3994 }
3995 nregs = ((insn >> 8) & 3) + 1;
3996 addr = tcg_temp_new_i32();
3997 load_reg_var(s, addr, rn);
3998 for (reg = 0; reg < nregs; reg++) {
3999 if (load) {
4000 switch (size) {
4001 case 0:
4002 tmp = gen_ld8u(addr, IS_USER(s));
4003 break;
4004 case 1:
4005 tmp = gen_ld16u(addr, IS_USER(s));
4006 break;
4007 case 2:
4008 tmp = gen_ld32(addr, IS_USER(s));
4009 break;
4010 default: /* Avoid compiler warnings. */
4011 abort();
4012 }
4013 if (size != 2) {
4014 tmp2 = neon_load_reg(rd, pass);
4015 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4016 tcg_temp_free_i32(tmp2);
4017 }
4018 neon_store_reg(rd, pass, tmp);
4019 } else { /* Store */
4020 tmp = neon_load_reg(rd, pass);
4021 if (shift)
4022 tcg_gen_shri_i32(tmp, tmp, shift);
4023 switch (size) {
4024 case 0:
4025 gen_st8(tmp, addr, IS_USER(s));
4026 break;
4027 case 1:
4028 gen_st16(tmp, addr, IS_USER(s));
4029 break;
4030 case 2:
4031 gen_st32(tmp, addr, IS_USER(s));
4032 break;
4033 }
4034 }
4035 rd += stride;
4036 tcg_gen_addi_i32(addr, addr, 1 << size);
4037 }
4038 tcg_temp_free_i32(addr);
4039 stride = nregs * (1 << size);
4040 }
4041 }
4042 if (rm != 15) {
4043 TCGv base;
4044
4045 base = load_reg(s, rn);
4046 if (rm == 13) {
4047 tcg_gen_addi_i32(base, base, stride);
4048 } else {
4049 TCGv index;
4050 index = load_reg(s, rm);
4051 tcg_gen_add_i32(base, base, index);
4052 tcg_temp_free_i32(index);
4053 }
4054 store_reg(s, rn, base);
4055 }
4056 return 0;
4057 }
4058
4059 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4061 {
4062 tcg_gen_and_i32(t, t, c);
4063 tcg_gen_andc_i32(f, f, c);
4064 tcg_gen_or_i32(dest, t, f);
4065 }
4066
4067 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4068 {
4069 switch (size) {
4070 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4071 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4073 default: abort();
4074 }
4075 }
4076
4077 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4078 {
4079 switch (size) {
4080 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4083 default: abort();
4084 }
4085 }
4086
4087 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4088 {
4089 switch (size) {
4090 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4093 default: abort();
4094 }
4095 }
4096
4097 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4098 {
4099 switch (size) {
4100 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4103 default: abort();
4104 }
4105 }
4106
4107 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4108 int q, int u)
4109 {
4110 if (q) {
4111 if (u) {
4112 switch (size) {
4113 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4121 default: abort();
4122 }
4123 }
4124 } else {
4125 if (u) {
4126 switch (size) {
4127 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4128 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4129 default: abort();
4130 }
4131 } else {
4132 switch (size) {
4133 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4134 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4135 default: abort();
4136 }
4137 }
4138 }
4139 }
4140
4141 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4142 {
4143 if (u) {
4144 switch (size) {
4145 case 0: gen_helper_neon_widen_u8(dest, src); break;
4146 case 1: gen_helper_neon_widen_u16(dest, src); break;
4147 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4148 default: abort();
4149 }
4150 } else {
4151 switch (size) {
4152 case 0: gen_helper_neon_widen_s8(dest, src); break;
4153 case 1: gen_helper_neon_widen_s16(dest, src); break;
4154 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4155 default: abort();
4156 }
4157 }
4158 tcg_temp_free_i32(src);
4159 }
4160
4161 static inline void gen_neon_addl(int size)
4162 {
4163 switch (size) {
4164 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4166 case 2: tcg_gen_add_i64(CPU_V001); break;
4167 default: abort();
4168 }
4169 }
4170
4171 static inline void gen_neon_subl(int size)
4172 {
4173 switch (size) {
4174 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001); break;
4177 default: abort();
4178 }
4179 }
4180
4181 static inline void gen_neon_negl(TCGv_i64 var, int size)
4182 {
4183 switch (size) {
4184 case 0: gen_helper_neon_negl_u16(var, var); break;
4185 case 1: gen_helper_neon_negl_u32(var, var); break;
4186 case 2: gen_helper_neon_negl_u64(var, var); break;
4187 default: abort();
4188 }
4189 }
4190
4191 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4192 {
4193 switch (size) {
4194 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4196 default: abort();
4197 }
4198 }
4199
4200 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4201 {
4202 TCGv_i64 tmp;
4203
4204 switch ((size << 1) | u) {
4205 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4206 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4207 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4208 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4209 case 4:
4210 tmp = gen_muls_i64_i32(a, b);
4211 tcg_gen_mov_i64(dest, tmp);
4212 tcg_temp_free_i64(tmp);
4213 break;
4214 case 5:
4215 tmp = gen_mulu_i64_i32(a, b);
4216 tcg_gen_mov_i64(dest, tmp);
4217 tcg_temp_free_i64(tmp);
4218 break;
4219 default: abort();
4220 }
4221
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4224 if (size < 2) {
4225 tcg_temp_free_i32(a);
4226 tcg_temp_free_i32(b);
4227 }
4228 }
4229
4230 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4231 {
4232 if (op) {
4233 if (u) {
4234 gen_neon_unarrow_sats(size, dest, src);
4235 } else {
4236 gen_neon_narrow(size, dest, src);
4237 }
4238 } else {
4239 if (u) {
4240 gen_neon_narrow_satu(size, dest, src);
4241 } else {
4242 gen_neon_narrow_sats(size, dest, src);
4243 }
4244 }
4245 }
4246
4247 /* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4249 * table A7-9.
4250 */
4251 #define NEON_3R_VHADD 0
4252 #define NEON_3R_VQADD 1
4253 #define NEON_3R_VRHADD 2
4254 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255 #define NEON_3R_VHSUB 4
4256 #define NEON_3R_VQSUB 5
4257 #define NEON_3R_VCGT 6
4258 #define NEON_3R_VCGE 7
4259 #define NEON_3R_VSHL 8
4260 #define NEON_3R_VQSHL 9
4261 #define NEON_3R_VRSHL 10
4262 #define NEON_3R_VQRSHL 11
4263 #define NEON_3R_VMAX 12
4264 #define NEON_3R_VMIN 13
4265 #define NEON_3R_VABD 14
4266 #define NEON_3R_VABA 15
4267 #define NEON_3R_VADD_VSUB 16
4268 #define NEON_3R_VTST_VCEQ 17
4269 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270 #define NEON_3R_VMUL 19
4271 #define NEON_3R_VPMAX 20
4272 #define NEON_3R_VPMIN 21
4273 #define NEON_3R_VQDMULH_VQRDMULH 22
4274 #define NEON_3R_VPADD 23
4275 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4281
4282 static const uint8_t neon_3r_sizes[] = {
4283 [NEON_3R_VHADD] = 0x7,
4284 [NEON_3R_VQADD] = 0xf,
4285 [NEON_3R_VRHADD] = 0x7,
4286 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB] = 0x7,
4288 [NEON_3R_VQSUB] = 0xf,
4289 [NEON_3R_VCGT] = 0x7,
4290 [NEON_3R_VCGE] = 0x7,
4291 [NEON_3R_VSHL] = 0xf,
4292 [NEON_3R_VQSHL] = 0xf,
4293 [NEON_3R_VRSHL] = 0xf,
4294 [NEON_3R_VQRSHL] = 0xf,
4295 [NEON_3R_VMAX] = 0x7,
4296 [NEON_3R_VMIN] = 0x7,
4297 [NEON_3R_VABD] = 0x7,
4298 [NEON_3R_VABA] = 0x7,
4299 [NEON_3R_VADD_VSUB] = 0xf,
4300 [NEON_3R_VTST_VCEQ] = 0x7,
4301 [NEON_3R_VML] = 0x7,
4302 [NEON_3R_VMUL] = 0x7,
4303 [NEON_3R_VPMAX] = 0x7,
4304 [NEON_3R_VPMIN] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4306 [NEON_3R_VPADD] = 0x7,
4307 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4313 };
4314
4315 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4316 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4317 * table A7-13.
4318 */
4319 #define NEON_2RM_VREV64 0
4320 #define NEON_2RM_VREV32 1
4321 #define NEON_2RM_VREV16 2
4322 #define NEON_2RM_VPADDL 4
4323 #define NEON_2RM_VPADDL_U 5
4324 #define NEON_2RM_VCLS 8
4325 #define NEON_2RM_VCLZ 9
4326 #define NEON_2RM_VCNT 10
4327 #define NEON_2RM_VMVN 11
4328 #define NEON_2RM_VPADAL 12
4329 #define NEON_2RM_VPADAL_U 13
4330 #define NEON_2RM_VQABS 14
4331 #define NEON_2RM_VQNEG 15
4332 #define NEON_2RM_VCGT0 16
4333 #define NEON_2RM_VCGE0 17
4334 #define NEON_2RM_VCEQ0 18
4335 #define NEON_2RM_VCLE0 19
4336 #define NEON_2RM_VCLT0 20
4337 #define NEON_2RM_VABS 22
4338 #define NEON_2RM_VNEG 23
4339 #define NEON_2RM_VCGT0_F 24
4340 #define NEON_2RM_VCGE0_F 25
4341 #define NEON_2RM_VCEQ0_F 26
4342 #define NEON_2RM_VCLE0_F 27
4343 #define NEON_2RM_VCLT0_F 28
4344 #define NEON_2RM_VABS_F 30
4345 #define NEON_2RM_VNEG_F 31
4346 #define NEON_2RM_VSWP 32
4347 #define NEON_2RM_VTRN 33
4348 #define NEON_2RM_VUZP 34
4349 #define NEON_2RM_VZIP 35
4350 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4351 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4352 #define NEON_2RM_VSHLL 38
4353 #define NEON_2RM_VCVT_F16_F32 44
4354 #define NEON_2RM_VCVT_F32_F16 46
4355 #define NEON_2RM_VRECPE 56
4356 #define NEON_2RM_VRSQRTE 57
4357 #define NEON_2RM_VRECPE_F 58
4358 #define NEON_2RM_VRSQRTE_F 59
4359 #define NEON_2RM_VCVT_FS 60
4360 #define NEON_2RM_VCVT_FU 61
4361 #define NEON_2RM_VCVT_SF 62
4362 #define NEON_2RM_VCVT_UF 63
4363
4364 static int neon_2rm_is_float_op(int op)
4365 {
4366 /* Return true if this neon 2reg-misc op is float-to-float */
4367 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4368 op >= NEON_2RM_VRECPE_F);
4369 }
4370
4371 /* Each entry in this array has bit n set if the insn allows
4372 * size value n (otherwise it will UNDEF). Since unallocated
4373 * op values will have no bits set they always UNDEF.
4374 */
4375 static const uint8_t neon_2rm_sizes[] = {
4376 [NEON_2RM_VREV64] = 0x7,
4377 [NEON_2RM_VREV32] = 0x3,
4378 [NEON_2RM_VREV16] = 0x1,
4379 [NEON_2RM_VPADDL] = 0x7,
4380 [NEON_2RM_VPADDL_U] = 0x7,
4381 [NEON_2RM_VCLS] = 0x7,
4382 [NEON_2RM_VCLZ] = 0x7,
4383 [NEON_2RM_VCNT] = 0x1,
4384 [NEON_2RM_VMVN] = 0x1,
4385 [NEON_2RM_VPADAL] = 0x7,
4386 [NEON_2RM_VPADAL_U] = 0x7,
4387 [NEON_2RM_VQABS] = 0x7,
4388 [NEON_2RM_VQNEG] = 0x7,
4389 [NEON_2RM_VCGT0] = 0x7,
4390 [NEON_2RM_VCGE0] = 0x7,
4391 [NEON_2RM_VCEQ0] = 0x7,
4392 [NEON_2RM_VCLE0] = 0x7,
4393 [NEON_2RM_VCLT0] = 0x7,
4394 [NEON_2RM_VABS] = 0x7,
4395 [NEON_2RM_VNEG] = 0x7,
4396 [NEON_2RM_VCGT0_F] = 0x4,
4397 [NEON_2RM_VCGE0_F] = 0x4,
4398 [NEON_2RM_VCEQ0_F] = 0x4,
4399 [NEON_2RM_VCLE0_F] = 0x4,
4400 [NEON_2RM_VCLT0_F] = 0x4,
4401 [NEON_2RM_VABS_F] = 0x4,
4402 [NEON_2RM_VNEG_F] = 0x4,
4403 [NEON_2RM_VSWP] = 0x1,
4404 [NEON_2RM_VTRN] = 0x7,
4405 [NEON_2RM_VUZP] = 0x7,
4406 [NEON_2RM_VZIP] = 0x7,
4407 [NEON_2RM_VMOVN] = 0x7,
4408 [NEON_2RM_VQMOVN] = 0x7,
4409 [NEON_2RM_VSHLL] = 0x7,
4410 [NEON_2RM_VCVT_F16_F32] = 0x2,
4411 [NEON_2RM_VCVT_F32_F16] = 0x2,
4412 [NEON_2RM_VRECPE] = 0x4,
4413 [NEON_2RM_VRSQRTE] = 0x4,
4414 [NEON_2RM_VRECPE_F] = 0x4,
4415 [NEON_2RM_VRSQRTE_F] = 0x4,
4416 [NEON_2RM_VCVT_FS] = 0x4,
4417 [NEON_2RM_VCVT_FU] = 0x4,
4418 [NEON_2RM_VCVT_SF] = 0x4,
4419 [NEON_2RM_VCVT_UF] = 0x4,
4420 };
4421
4422 /* Translate a NEON data processing instruction. Return nonzero if the
4423 instruction is invalid.
4424 We process data in a mixture of 32-bit and 64-bit chunks.
4425 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4426
4427 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4428 {
4429 int op;
4430 int q;
4431 int rd, rn, rm;
4432 int size;
4433 int shift;
4434 int pass;
4435 int count;
4436 int pairwise;
4437 int u;
4438 uint32_t imm, mask;
4439 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4440 TCGv_i64 tmp64;
4441
4442 if (!s->vfp_enabled)
4443 return 1;
4444 q = (insn & (1 << 6)) != 0;
4445 u = (insn >> 24) & 1;
4446 VFP_DREG_D(rd, insn);
4447 VFP_DREG_N(rn, insn);
4448 VFP_DREG_M(rm, insn);
4449 size = (insn >> 20) & 3;
4450 if ((insn & (1 << 23)) == 0) {
4451 /* Three register same length. */
4452 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4453 /* Catch invalid op and bad size combinations: UNDEF */
4454 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4455 return 1;
4456 }
4457 /* All insns of this form UNDEF for either this condition or the
4458 * superset of cases "Q==1"; we catch the latter later.
4459 */
4460 if (q && ((rd | rn | rm) & 1)) {
4461 return 1;
4462 }
4463 if (size == 3 && op != NEON_3R_LOGIC) {
4464 /* 64-bit element instructions. */
4465 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4466 neon_load_reg64(cpu_V0, rn + pass);
4467 neon_load_reg64(cpu_V1, rm + pass);
4468 switch (op) {
4469 case NEON_3R_VQADD:
4470 if (u) {
4471 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4472 } else {
4473 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4474 }
4475 break;
4476 case NEON_3R_VQSUB:
4477 if (u) {
4478 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4479 } else {
4480 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4481 }
4482 break;
4483 case NEON_3R_VSHL:
4484 if (u) {
4485 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4486 } else {
4487 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4488 }
4489 break;
4490 case NEON_3R_VQSHL:
4491 if (u) {
4492 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4493 } else {
4494 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4495 }
4496 break;
4497 case NEON_3R_VRSHL:
4498 if (u) {
4499 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4500 } else {
4501 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4502 }
4503 break;
4504 case NEON_3R_VQRSHL:
4505 if (u) {
4506 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4507 } else {
4508 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4509 }
4510 break;
4511 case NEON_3R_VADD_VSUB:
4512 if (u) {
4513 tcg_gen_sub_i64(CPU_V001);
4514 } else {
4515 tcg_gen_add_i64(CPU_V001);
4516 }
4517 break;
4518 default:
4519 abort();
4520 }
4521 neon_store_reg64(cpu_V0, rd + pass);
4522 }
4523 return 0;
4524 }
4525 pairwise = 0;
4526 switch (op) {
4527 case NEON_3R_VSHL:
4528 case NEON_3R_VQSHL:
4529 case NEON_3R_VRSHL:
4530 case NEON_3R_VQRSHL:
4531 {
4532 int rtmp;
4533 /* Shift instruction operands are reversed. */
4534 rtmp = rn;
4535 rn = rm;
4536 rm = rtmp;
4537 }
4538 break;
4539 case NEON_3R_VPADD:
4540 if (u) {
4541 return 1;
4542 }
4543 /* Fall through */
4544 case NEON_3R_VPMAX:
4545 case NEON_3R_VPMIN:
4546 pairwise = 1;
4547 break;
4548 case NEON_3R_FLOAT_ARITH:
4549 pairwise = (u && size < 2); /* if VPADD (float) */
4550 break;
4551 case NEON_3R_FLOAT_MINMAX:
4552 pairwise = u; /* if VPMIN/VPMAX (float) */
4553 break;
4554 case NEON_3R_FLOAT_CMP:
4555 if (!u && size) {
4556 /* no encoding for U=0 C=1x */
4557 return 1;
4558 }
4559 break;
4560 case NEON_3R_FLOAT_ACMP:
4561 if (!u) {
4562 return 1;
4563 }
4564 break;
4565 case NEON_3R_VRECPS_VRSQRTS:
4566 if (u) {
4567 return 1;
4568 }
4569 break;
4570 case NEON_3R_VMUL:
4571 if (u && (size != 0)) {
4572 /* UNDEF on invalid size for polynomial subcase */
4573 return 1;
4574 }
4575 break;
4576 default:
4577 break;
4578 }
4579
4580 if (pairwise && q) {
4581 /* All the pairwise insns UNDEF if Q is set */
4582 return 1;
4583 }
4584
4585 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4586
4587 if (pairwise) {
4588 /* Pairwise. */
4589 if (pass < 1) {
4590 tmp = neon_load_reg(rn, 0);
4591 tmp2 = neon_load_reg(rn, 1);
4592 } else {
4593 tmp = neon_load_reg(rm, 0);
4594 tmp2 = neon_load_reg(rm, 1);
4595 }
4596 } else {
4597 /* Elementwise. */
4598 tmp = neon_load_reg(rn, pass);
4599 tmp2 = neon_load_reg(rm, pass);
4600 }
4601 switch (op) {
4602 case NEON_3R_VHADD:
4603 GEN_NEON_INTEGER_OP(hadd);
4604 break;
4605 case NEON_3R_VQADD:
4606 GEN_NEON_INTEGER_OP(qadd);
4607 break;
4608 case NEON_3R_VRHADD:
4609 GEN_NEON_INTEGER_OP(rhadd);
4610 break;
4611 case NEON_3R_LOGIC: /* Logic ops. */
4612 switch ((u << 2) | size) {
4613 case 0: /* VAND */
4614 tcg_gen_and_i32(tmp, tmp, tmp2);
4615 break;
4616 case 1: /* BIC */
4617 tcg_gen_andc_i32(tmp, tmp, tmp2);
4618 break;
4619 case 2: /* VORR */
4620 tcg_gen_or_i32(tmp, tmp, tmp2);
4621 break;
4622 case 3: /* VORN */
4623 tcg_gen_orc_i32(tmp, tmp, tmp2);
4624 break;
4625 case 4: /* VEOR */
4626 tcg_gen_xor_i32(tmp, tmp, tmp2);
4627 break;
4628 case 5: /* VBSL */
4629 tmp3 = neon_load_reg(rd, pass);
4630 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4631 tcg_temp_free_i32(tmp3);
4632 break;
4633 case 6: /* VBIT */
4634 tmp3 = neon_load_reg(rd, pass);
4635 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4636 tcg_temp_free_i32(tmp3);
4637 break;
4638 case 7: /* VBIF */
4639 tmp3 = neon_load_reg(rd, pass);
4640 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4641 tcg_temp_free_i32(tmp3);
4642 break;
4643 }
4644 break;
4645 case NEON_3R_VHSUB:
4646 GEN_NEON_INTEGER_OP(hsub);
4647 break;
4648 case NEON_3R_VQSUB:
4649 GEN_NEON_INTEGER_OP(qsub);
4650 break;
4651 case NEON_3R_VCGT:
4652 GEN_NEON_INTEGER_OP(cgt);
4653 break;
4654 case NEON_3R_VCGE:
4655 GEN_NEON_INTEGER_OP(cge);
4656 break;
4657 case NEON_3R_VSHL:
4658 GEN_NEON_INTEGER_OP(shl);
4659 break;
4660 case NEON_3R_VQSHL:
4661 GEN_NEON_INTEGER_OP(qshl);
4662 break;
4663 case NEON_3R_VRSHL:
4664 GEN_NEON_INTEGER_OP(rshl);
4665 break;
4666 case NEON_3R_VQRSHL:
4667 GEN_NEON_INTEGER_OP(qrshl);
4668 break;
4669 case NEON_3R_VMAX:
4670 GEN_NEON_INTEGER_OP(max);
4671 break;
4672 case NEON_3R_VMIN:
4673 GEN_NEON_INTEGER_OP(min);
4674 break;
4675 case NEON_3R_VABD:
4676 GEN_NEON_INTEGER_OP(abd);
4677 break;
4678 case NEON_3R_VABA:
4679 GEN_NEON_INTEGER_OP(abd);
4680 tcg_temp_free_i32(tmp2);
4681 tmp2 = neon_load_reg(rd, pass);
4682 gen_neon_add(size, tmp, tmp2);
4683 break;
4684 case NEON_3R_VADD_VSUB:
4685 if (!u) { /* VADD */
4686 gen_neon_add(size, tmp, tmp2);
4687 } else { /* VSUB */
4688 switch (size) {
4689 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4690 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4691 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4692 default: abort();
4693 }
4694 }
4695 break;
4696 case NEON_3R_VTST_VCEQ:
4697 if (!u) { /* VTST */
4698 switch (size) {
4699 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4700 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4701 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4702 default: abort();
4703 }
4704 } else { /* VCEQ */
4705 switch (size) {
4706 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4707 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4708 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4709 default: abort();
4710 }
4711 }
4712 break;
4713 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4714 switch (size) {
4715 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4716 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4717 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4718 default: abort();
4719 }
4720 tcg_temp_free_i32(tmp2);
4721 tmp2 = neon_load_reg(rd, pass);
4722 if (u) { /* VMLS */
4723 gen_neon_rsb(size, tmp, tmp2);
4724 } else { /* VMLA */
4725 gen_neon_add(size, tmp, tmp2);
4726 }
4727 break;
4728 case NEON_3R_VMUL:
4729 if (u) { /* polynomial */
4730 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4731 } else { /* Integer */
4732 switch (size) {
4733 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4734 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4735 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4736 default: abort();
4737 }
4738 }
4739 break;
4740 case NEON_3R_VPMAX:
4741 GEN_NEON_INTEGER_OP(pmax);
4742 break;
4743 case NEON_3R_VPMIN:
4744 GEN_NEON_INTEGER_OP(pmin);
4745 break;
4746 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4747 if (!u) { /* VQDMULH */
4748 switch (size) {
4749 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4750 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4751 default: abort();
4752 }
4753 } else { /* VQRDMULH */
4754 switch (size) {
4755 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4756 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4757 default: abort();
4758 }
4759 }
4760 break;
4761 case NEON_3R_VPADD:
4762 switch (size) {
4763 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4764 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4765 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4766 default: abort();
4767 }
4768 break;
4769 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4770 switch ((u << 2) | size) {
4771 case 0: /* VADD */
4772 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4773 break;
4774 case 2: /* VSUB */
4775 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4776 break;
4777 case 4: /* VPADD */
4778 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4779 break;
4780 case 6: /* VABD */
4781 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4782 break;
4783 default:
4784 abort();
4785 }
4786 break;
4787 case NEON_3R_FLOAT_MULTIPLY:
4788 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4789 if (!u) {
4790 tcg_temp_free_i32(tmp2);
4791 tmp2 = neon_load_reg(rd, pass);
4792 if (size == 0) {
4793 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4794 } else {
4795 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4796 }
4797 }
4798 break;
4799 case NEON_3R_FLOAT_CMP:
4800 if (!u) {
4801 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4802 } else {
4803 if (size == 0)
4804 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4805 else
4806 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4807 }
4808 break;
4809 case NEON_3R_FLOAT_ACMP:
4810 if (size == 0)
4811 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4812 else
4813 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4814 break;
4815 case NEON_3R_FLOAT_MINMAX:
4816 if (size == 0)
4817 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4818 else
4819 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4820 break;
4821 case NEON_3R_VRECPS_VRSQRTS:
4822 if (size == 0)
4823 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4824 else
4825 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4826 break;
4827 default:
4828 abort();
4829 }
4830 tcg_temp_free_i32(tmp2);
4831
4832 /* Save the result. For elementwise operations we can put it
4833 straight into the destination register. For pairwise operations
4834 we have to be careful to avoid clobbering the source operands. */
4835 if (pairwise && rd == rm) {
4836 neon_store_scratch(pass, tmp);
4837 } else {
4838 neon_store_reg(rd, pass, tmp);
4839 }
4840
4841 } /* for pass */
4842 if (pairwise && rd == rm) {
4843 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4844 tmp = neon_load_scratch(pass);
4845 neon_store_reg(rd, pass, tmp);
4846 }
4847 }
4848 /* End of 3 register same size operations. */
4849 } else if (insn & (1 << 4)) {
4850 if ((insn & 0x00380080) != 0) {
4851 /* Two registers and shift. */
4852 op = (insn >> 8) & 0xf;
4853 if (insn & (1 << 7)) {
4854 /* 64-bit shift. */
4855 if (op > 7) {
4856 return 1;
4857 }
4858 size = 3;
4859 } else {
4860 size = 2;
4861 while ((insn & (1 << (size + 19))) == 0)
4862 size--;
4863 }
4864 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4865 /* To avoid excessive dumplication of ops we implement shift
4866 by immediate using the variable shift operations. */
4867 if (op < 8) {
4868 /* Shift by immediate:
4869 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4870 if (q && ((rd | rm) & 1)) {
4871 return 1;
4872 }
4873 if (!u && (op == 4 || op == 6)) {
4874 return 1;
4875 }
4876 /* Right shifts are encoded as N - shift, where N is the
4877 element size in bits. */
4878 if (op <= 4)
4879 shift = shift - (1 << (size + 3));
4880 if (size == 3) {
4881 count = q + 1;
4882 } else {
4883 count = q ? 4: 2;
4884 }
4885 switch (size) {
4886 case 0:
4887 imm = (uint8_t) shift;
4888 imm |= imm << 8;
4889 imm |= imm << 16;
4890 break;
4891 case 1:
4892 imm = (uint16_t) shift;
4893 imm |= imm << 16;
4894 break;
4895 case 2:
4896 case 3:
4897 imm = shift;
4898 break;
4899 default:
4900 abort();
4901 }
4902
4903 for (pass = 0; pass < count; pass++) {
4904 if (size == 3) {
4905 neon_load_reg64(cpu_V0, rm + pass);
4906 tcg_gen_movi_i64(cpu_V1, imm);
4907 switch (op) {
4908 case 0: /* VSHR */
4909 case 1: /* VSRA */
4910 if (u)
4911 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4912 else
4913 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4914 break;
4915 case 2: /* VRSHR */
4916 case 3: /* VRSRA */
4917 if (u)
4918 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4919 else
4920 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4921 break;
4922 case 4: /* VSRI */
4923 case 5: /* VSHL, VSLI */
4924 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4925 break;
4926 case 6: /* VQSHLU */
4927 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
4928 break;
4929 case 7: /* VQSHL */
4930 if (u) {
4931 gen_helper_neon_qshl_u64(cpu_V0,
4932 cpu_V0, cpu_V1);
4933 } else {
4934 gen_helper_neon_qshl_s64(cpu_V0,
4935 cpu_V0, cpu_V1);
4936 }
4937 break;
4938 }
4939 if (op == 1 || op == 3) {
4940 /* Accumulate. */
4941 neon_load_reg64(cpu_V1, rd + pass);
4942 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4943 } else if (op == 4 || (op == 5 && u)) {
4944 /* Insert */
4945 neon_load_reg64(cpu_V1, rd + pass);
4946 uint64_t mask;
4947 if (shift < -63 || shift > 63) {
4948 mask = 0;
4949 } else {
4950 if (op == 4) {
4951 mask = 0xffffffffffffffffull >> -shift;
4952 } else {
4953 mask = 0xffffffffffffffffull << shift;
4954 }
4955 }
4956 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4957 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4958 }
4959 neon_store_reg64(cpu_V0, rd + pass);
4960 } else { /* size < 3 */
4961 /* Operands in T0 and T1. */
4962 tmp = neon_load_reg(rm, pass);
4963 tmp2 = tcg_temp_new_i32();
4964 tcg_gen_movi_i32(tmp2, imm);
4965 switch (op) {
4966 case 0: /* VSHR */
4967 case 1: /* VSRA */
4968 GEN_NEON_INTEGER_OP(shl);
4969 break;
4970 case 2: /* VRSHR */
4971 case 3: /* VRSRA */
4972 GEN_NEON_INTEGER_OP(rshl);
4973 break;
4974 case 4: /* VSRI */
4975 case 5: /* VSHL, VSLI */
4976 switch (size) {
4977 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4978 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4979 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4980 default: abort();
4981 }
4982 break;
4983 case 6: /* VQSHLU */
4984 switch (size) {
4985 case 0:
4986 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
4987 break;
4988 case 1:
4989 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
4990 break;
4991 case 2:
4992 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
4993 break;
4994 default:
4995 abort();
4996 }
4997 break;
4998 case 7: /* VQSHL */
4999 GEN_NEON_INTEGER_OP(qshl);
5000 break;
5001 }
5002 tcg_temp_free_i32(tmp2);
5003
5004 if (op == 1 || op == 3) {
5005 /* Accumulate. */
5006 tmp2 = neon_load_reg(rd, pass);
5007 gen_neon_add(size, tmp, tmp2);
5008 tcg_temp_free_i32(tmp2);
5009 } else if (op == 4 || (op == 5 && u)) {
5010 /* Insert */
5011 switch (size) {
5012 case 0:
5013 if (op == 4)
5014 mask = 0xff >> -shift;
5015 else
5016 mask = (uint8_t)(0xff << shift);
5017 mask |= mask << 8;
5018 mask |= mask << 16;
5019 break;
5020 case 1:
5021 if (op == 4)
5022 mask = 0xffff >> -shift;
5023 else
5024 mask = (uint16_t)(0xffff << shift);
5025 mask |= mask << 16;
5026 break;
5027 case 2:
5028 if (shift < -31 || shift > 31) {
5029 mask = 0;
5030 } else {
5031 if (op == 4)
5032 mask = 0xffffffffu >> -shift;
5033 else
5034 mask = 0xffffffffu << shift;
5035 }
5036 break;
5037 default:
5038 abort();
5039 }
5040 tmp2 = neon_load_reg(rd, pass);
5041 tcg_gen_andi_i32(tmp, tmp, mask);
5042 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5043 tcg_gen_or_i32(tmp, tmp, tmp2);
5044 tcg_temp_free_i32(tmp2);
5045 }
5046 neon_store_reg(rd, pass, tmp);
5047 }
5048 } /* for pass */
5049 } else if (op < 10) {
5050 /* Shift by immediate and narrow:
5051 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5052 int input_unsigned = (op == 8) ? !u : u;
5053 if (rm & 1) {
5054 return 1;
5055 }
5056 shift = shift - (1 << (size + 3));
5057 size++;
5058 if (size == 3) {
5059 tmp64 = tcg_const_i64(shift);
5060 neon_load_reg64(cpu_V0, rm);
5061 neon_load_reg64(cpu_V1, rm + 1);
5062 for (pass = 0; pass < 2; pass++) {
5063 TCGv_i64 in;
5064 if (pass == 0) {
5065 in = cpu_V0;
5066 } else {
5067 in = cpu_V1;
5068 }
5069 if (q) {
5070 if (input_unsigned) {
5071 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5072 } else {
5073 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5074 }
5075 } else {
5076 if (input_unsigned) {
5077 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5078 } else {
5079 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5080 }
5081 }
5082 tmp = tcg_temp_new_i32();
5083 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5084 neon_store_reg(rd, pass, tmp);
5085 } /* for pass */
5086 tcg_temp_free_i64(tmp64);
5087 } else {
5088 if (size == 1) {
5089 imm = (uint16_t)shift;
5090 imm |= imm << 16;
5091 } else {
5092 /* size == 2 */
5093 imm = (uint32_t)shift;
5094 }
5095 tmp2 = tcg_const_i32(imm);
5096 tmp4 = neon_load_reg(rm + 1, 0);
5097 tmp5 = neon_load_reg(rm + 1, 1);
5098 for (pass = 0; pass < 2; pass++) {
5099 if (pass == 0) {
5100 tmp = neon_load_reg(rm, 0);
5101 } else {
5102 tmp = tmp4;
5103 }
5104 gen_neon_shift_narrow(size, tmp, tmp2, q,
5105 input_unsigned);
5106 if (pass == 0) {
5107 tmp3 = neon_load_reg(rm, 1);
5108 } else {
5109 tmp3 = tmp5;
5110 }
5111 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5112 input_unsigned);
5113 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5114 tcg_temp_free_i32(tmp);
5115 tcg_temp_free_i32(tmp3);
5116 tmp = tcg_temp_new_i32();
5117 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5118 neon_store_reg(rd, pass, tmp);
5119 } /* for pass */
5120 tcg_temp_free_i32(tmp2);
5121 }
5122 } else if (op == 10) {
5123 /* VSHLL, VMOVL */
5124 if (q || (rd & 1)) {
5125 return 1;
5126 }
5127 tmp = neon_load_reg(rm, 0);
5128 tmp2 = neon_load_reg(rm, 1);
5129 for (pass = 0; pass < 2; pass++) {
5130 if (pass == 1)
5131 tmp = tmp2;
5132
5133 gen_neon_widen(cpu_V0, tmp, size, u);
5134
5135 if (shift != 0) {
5136 /* The shift is less than the width of the source
5137 type, so we can just shift the whole register. */
5138 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5139 /* Widen the result of shift: we need to clear
5140 * the potential overflow bits resulting from
5141 * left bits of the narrow input appearing as
5142 * right bits of left the neighbour narrow
5143 * input. */
5144 if (size < 2 || !u) {
5145 uint64_t imm64;
5146 if (size == 0) {
5147 imm = (0xffu >> (8 - shift));
5148 imm |= imm << 16;
5149 } else if (size == 1) {
5150 imm = 0xffff >> (16 - shift);
5151 } else {
5152 /* size == 2 */
5153 imm = 0xffffffff >> (32 - shift);
5154 }
5155 if (size < 2) {
5156 imm64 = imm | (((uint64_t)imm) << 32);
5157 } else {
5158 imm64 = imm;
5159 }
5160 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5161 }
5162 }
5163 neon_store_reg64(cpu_V0, rd + pass);
5164 }
5165 } else if (op >= 14) {
5166 /* VCVT fixed-point. */
5167 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5168 return 1;
5169 }
5170 /* We have already masked out the must-be-1 top bit of imm6,
5171 * hence this 32-shift where the ARM ARM has 64-imm6.
5172 */
5173 shift = 32 - shift;
5174 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5175 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5176 if (!(op & 1)) {
5177 if (u)
5178 gen_vfp_ulto(0, shift);
5179 else
5180 gen_vfp_slto(0, shift);
5181 } else {
5182 if (u)
5183 gen_vfp_toul(0, shift);
5184 else
5185 gen_vfp_tosl(0, shift);
5186 }
5187 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5188 }
5189 } else {
5190 return 1;
5191 }
5192 } else { /* (insn & 0x00380080) == 0 */
5193 int invert;
5194 if (q && (rd & 1)) {
5195 return 1;
5196 }
5197
5198 op = (insn >> 8) & 0xf;
5199 /* One register and immediate. */
5200 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5201 invert = (insn & (1 << 5)) != 0;
5202 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5203 * We choose to not special-case this and will behave as if a
5204 * valid constant encoding of 0 had been given.
5205 */
5206 switch (op) {
5207 case 0: case 1:
5208 /* no-op */
5209 break;
5210 case 2: case 3:
5211 imm <<= 8;
5212 break;
5213 case 4: case 5:
5214 imm <<= 16;
5215 break;
5216 case 6: case 7:
5217 imm <<= 24;
5218 break;
5219 case 8: case 9:
5220 imm |= imm << 16;
5221 break;
5222 case 10: case 11:
5223 imm = (imm << 8) | (imm << 24);
5224 break;
5225 case 12:
5226 imm = (imm << 8) | 0xff;
5227 break;
5228 case 13:
5229 imm = (imm << 16) | 0xffff;
5230 break;
5231 case 14:
5232 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5233 if (invert)
5234 imm = ~imm;
5235 break;
5236 case 15:
5237 if (invert) {
5238 return 1;
5239 }
5240 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5241 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5242 break;
5243 }
5244 if (invert)
5245 imm = ~imm;
5246
5247 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5248 if (op & 1 && op < 12) {
5249 tmp = neon_load_reg(rd, pass);
5250 if (invert) {
5251 /* The immediate value has already been inverted, so
5252 BIC becomes AND. */
5253 tcg_gen_andi_i32(tmp, tmp, imm);
5254 } else {
5255 tcg_gen_ori_i32(tmp, tmp, imm);
5256 }
5257 } else {
5258 /* VMOV, VMVN. */
5259 tmp = tcg_temp_new_i32();
5260 if (op == 14 && invert) {
5261 int n;
5262 uint32_t val;
5263 val = 0;
5264 for (n = 0; n < 4; n++) {
5265 if (imm & (1 << (n + (pass & 1) * 4)))
5266 val |= 0xff << (n * 8);
5267 }
5268 tcg_gen_movi_i32(tmp, val);
5269 } else {
5270 tcg_gen_movi_i32(tmp, imm);
5271 }
5272 }
5273 neon_store_reg(rd, pass, tmp);
5274 }
5275 }
5276 } else { /* (insn & 0x00800010 == 0x00800000) */
5277 if (size != 3) {
5278 op = (insn >> 8) & 0xf;
5279 if ((insn & (1 << 6)) == 0) {
5280 /* Three registers of different lengths. */
5281 int src1_wide;
5282 int src2_wide;
5283 int prewiden;
5284 /* undefreq: bit 0 : UNDEF if size != 0
5285 * bit 1 : UNDEF if size == 0
5286 * bit 2 : UNDEF if U == 1
5287 * Note that [1:0] set implies 'always UNDEF'
5288 */
5289 int undefreq;
5290 /* prewiden, src1_wide, src2_wide, undefreq */
5291 static const int neon_3reg_wide[16][4] = {
5292 {1, 0, 0, 0}, /* VADDL */
5293 {1, 1, 0, 0}, /* VADDW */
5294 {1, 0, 0, 0}, /* VSUBL */
5295 {1, 1, 0, 0}, /* VSUBW */
5296 {0, 1, 1, 0}, /* VADDHN */
5297 {0, 0, 0, 0}, /* VABAL */
5298 {0, 1, 1, 0}, /* VSUBHN */
5299 {0, 0, 0, 0}, /* VABDL */
5300 {0, 0, 0, 0}, /* VMLAL */
5301 {0, 0, 0, 6}, /* VQDMLAL */
5302 {0, 0, 0, 0}, /* VMLSL */
5303 {0, 0, 0, 6}, /* VQDMLSL */
5304 {0, 0, 0, 0}, /* Integer VMULL */
5305 {0, 0, 0, 2}, /* VQDMULL */
5306 {0, 0, 0, 5}, /* Polynomial VMULL */
5307 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5308 };
5309
5310 prewiden = neon_3reg_wide[op][0];
5311 src1_wide = neon_3reg_wide[op][1];
5312 src2_wide = neon_3reg_wide[op][2];
5313 undefreq = neon_3reg_wide[op][3];
5314
5315 if (((undefreq & 1) && (size != 0)) ||
5316 ((undefreq & 2) && (size == 0)) ||
5317 ((undefreq & 4) && u)) {
5318 return 1;
5319 }
5320 if ((src1_wide && (rn & 1)) ||
5321 (src2_wide && (rm & 1)) ||
5322 (!src2_wide && (rd & 1))) {
5323 return 1;
5324 }
5325
5326 /* Avoid overlapping operands. Wide source operands are
5327 always aligned so will never overlap with wide
5328 destinations in problematic ways. */
5329 if (rd == rm && !src2_wide) {
5330 tmp = neon_load_reg(rm, 1);
5331 neon_store_scratch(2, tmp);
5332 } else if (rd == rn && !src1_wide) {
5333 tmp = neon_load_reg(rn, 1);
5334 neon_store_scratch(2, tmp);
5335 }
5336 TCGV_UNUSED(tmp3);
5337 for (pass = 0; pass < 2; pass++) {
5338 if (src1_wide) {
5339 neon_load_reg64(cpu_V0, rn + pass);
5340 TCGV_UNUSED(tmp);
5341 } else {
5342 if (pass == 1 && rd == rn) {
5343 tmp = neon_load_scratch(2);
5344 } else {
5345 tmp = neon_load_reg(rn, pass);
5346 }
5347 if (prewiden) {
5348 gen_neon_widen(cpu_V0, tmp, size, u);
5349 }
5350 }
5351 if (src2_wide) {
5352 neon_load_reg64(cpu_V1, rm + pass);
5353 TCGV_UNUSED(tmp2);
5354 } else {
5355 if (pass == 1 && rd == rm) {
5356 tmp2 = neon_load_scratch(2);
5357 } else {
5358 tmp2 = neon_load_reg(rm, pass);
5359 }
5360 if (prewiden) {
5361 gen_neon_widen(cpu_V1, tmp2, size, u);
5362 }
5363 }
5364 switch (op) {
5365 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5366 gen_neon_addl(size);
5367 break;
5368 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5369 gen_neon_subl(size);
5370 break;
5371 case 5: case 7: /* VABAL, VABDL */
5372 switch ((size << 1) | u) {
5373 case 0:
5374 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5375 break;
5376 case 1:
5377 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5378 break;
5379 case 2:
5380 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5381 break;
5382 case 3:
5383 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5384 break;
5385 case 4:
5386 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5387 break;
5388 case 5:
5389 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5390 break;
5391 default: abort();
5392 }
5393 tcg_temp_free_i32(tmp2);
5394 tcg_temp_free_i32(tmp);
5395 break;
5396 case 8: case 9: case 10: case 11: case 12: case 13:
5397 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5398 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5399 break;
5400 case 14: /* Polynomial VMULL */
5401 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5402 tcg_temp_free_i32(tmp2);
5403 tcg_temp_free_i32(tmp);
5404 break;
5405 default: /* 15 is RESERVED: caught earlier */
5406 abort();
5407 }
5408 if (op == 13) {
5409 /* VQDMULL */
5410 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5411 neon_store_reg64(cpu_V0, rd + pass);
5412 } else if (op == 5 || (op >= 8 && op <= 11)) {
5413 /* Accumulate. */
5414 neon_load_reg64(cpu_V1, rd + pass);
5415 switch (op) {
5416 case 10: /* VMLSL */
5417 gen_neon_negl(cpu_V0, size);
5418 /* Fall through */
5419 case 5: case 8: /* VABAL, VMLAL */
5420 gen_neon_addl(size);
5421 break;
5422 case 9: case 11: /* VQDMLAL, VQDMLSL */
5423 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5424 if (op == 11) {
5425 gen_neon_negl(cpu_V0, size);
5426 }
5427 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5428 break;
5429 default:
5430 abort();
5431 }
5432 neon_store_reg64(cpu_V0, rd + pass);
5433 } else if (op == 4 || op == 6) {
5434 /* Narrowing operation. */
5435 tmp = tcg_temp_new_i32();
5436 if (!u) {
5437 switch (size) {
5438 case 0:
5439 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5440 break;
5441 case 1:
5442 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5443 break;
5444 case 2:
5445 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5446 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5447 break;
5448 default: abort();
5449 }
5450 } else {
5451 switch (size) {
5452 case 0:
5453 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5454 break;
5455 case 1:
5456 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5457 break;
5458 case 2:
5459 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5460 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5461 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5462 break;
5463 default: abort();
5464 }
5465 }
5466 if (pass == 0) {
5467 tmp3 = tmp;
5468 } else {
5469 neon_store_reg(rd, 0, tmp3);
5470 neon_store_reg(rd, 1, tmp);
5471 }
5472 } else {
5473 /* Write back the result. */
5474 neon_store_reg64(cpu_V0, rd + pass);
5475 }
5476 }
5477 } else {
5478 /* Two registers and a scalar. NB that for ops of this form
5479 * the ARM ARM labels bit 24 as Q, but it is in our variable
5480 * 'u', not 'q'.
5481 */
5482 if (size == 0) {
5483 return 1;
5484 }
5485 switch (op) {
5486 case 1: /* Float VMLA scalar */
5487 case 5: /* Floating point VMLS scalar */
5488 case 9: /* Floating point VMUL scalar */
5489 if (size == 1) {
5490 return 1;
5491 }
5492 /* fall through */
5493 case 0: /* Integer VMLA scalar */
5494 case 4: /* Integer VMLS scalar */
5495 case 8: /* Integer VMUL scalar */
5496 case 12: /* VQDMULH scalar */
5497 case 13: /* VQRDMULH scalar */
5498 if (u && ((rd | rn) & 1)) {
5499 return 1;
5500 }
5501 tmp = neon_get_scalar(size, rm);
5502 neon_store_scratch(0, tmp);
5503 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5504 tmp = neon_load_scratch(0);
5505 tmp2 = neon_load_reg(rn, pass);
5506 if (op == 12) {
5507 if (size == 1) {
5508 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5509 } else {
5510 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5511 }
5512 } else if (op == 13) {
5513 if (size == 1) {
5514 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5515 } else {
5516 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5517 }
5518 } else if (op & 1) {
5519 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5520 } else {
5521 switch (size) {
5522 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5523 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5524 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5525 default: abort();
5526 }
5527 }
5528 tcg_temp_free_i32(tmp2);
5529 if (op < 8) {
5530 /* Accumulate. */
5531 tmp2 = neon_load_reg(rd, pass);
5532 switch (op) {
5533 case 0:
5534 gen_neon_add(size, tmp, tmp2);
5535 break;
5536 case 1:
5537 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5538 break;
5539 case 4:
5540 gen_neon_rsb(size, tmp, tmp2);
5541 break;
5542 case 5:
5543 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5544 break;
5545 default:
5546 abort();
5547 }
5548 tcg_temp_free_i32(tmp2);
5549 }
5550 neon_store_reg(rd, pass, tmp);
5551 }
5552 break;
5553 case 3: /* VQDMLAL scalar */
5554 case 7: /* VQDMLSL scalar */
5555 case 11: /* VQDMULL scalar */
5556 if (u == 1) {
5557 return 1;
5558 }
5559 /* fall through */
5560 case 2: /* VMLAL sclar */
5561 case 6: /* VMLSL scalar */
5562 case 10: /* VMULL scalar */
5563 if (rd & 1) {
5564 return 1;
5565 }
5566 tmp2 = neon_get_scalar(size, rm);
5567 /* We need a copy of tmp2 because gen_neon_mull
5568 * deletes it during pass 0. */
5569 tmp4 = tcg_temp_new_i32();
5570 tcg_gen_mov_i32(tmp4, tmp2);
5571 tmp3 = neon_load_reg(rn, 1);
5572
5573 for (pass = 0; pass < 2; pass++) {
5574 if (pass == 0) {
5575 tmp = neon_load_reg(rn, 0);
5576 } else {
5577 tmp = tmp3;
5578 tmp2 = tmp4;
5579 }
5580 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5581 if (op != 11) {
5582 neon_load_reg64(cpu_V1, rd + pass);
5583 }
5584 switch (op) {
5585 case 6:
5586 gen_neon_negl(cpu_V0, size);
5587 /* Fall through */
5588 case 2:
5589 gen_neon_addl(size);
5590 break;
5591 case 3: case 7:
5592 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5593 if (op == 7) {
5594 gen_neon_negl(cpu_V0, size);
5595 }
5596 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5597 break;
5598 case 10:
5599 /* no-op */
5600 break;
5601 case 11:
5602 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5603 break;
5604 default:
5605 abort();
5606 }
5607 neon_store_reg64(cpu_V0, rd + pass);
5608 }
5609
5610
5611 break;
5612 default: /* 14 and 15 are RESERVED */
5613 return 1;
5614 }
5615 }
5616 } else { /* size == 3 */
5617 if (!u) {
5618 /* Extract. */
5619 imm = (insn >> 8) & 0xf;
5620
5621 if (imm > 7 && !q)
5622 return 1;
5623
5624 if (q && ((rd | rn | rm) & 1)) {
5625 return 1;
5626 }
5627
5628 if (imm == 0) {
5629 neon_load_reg64(cpu_V0, rn);
5630 if (q) {
5631 neon_load_reg64(cpu_V1, rn + 1);
5632 }
5633 } else if (imm == 8) {
5634 neon_load_reg64(cpu_V0, rn + 1);
5635 if (q) {
5636 neon_load_reg64(cpu_V1, rm);
5637 }
5638 } else if (q) {
5639 tmp64 = tcg_temp_new_i64();
5640 if (imm < 8) {
5641 neon_load_reg64(cpu_V0, rn);
5642 neon_load_reg64(tmp64, rn + 1);
5643 } else {
5644 neon_load_reg64(cpu_V0, rn + 1);
5645 neon_load_reg64(tmp64, rm);
5646 }
5647 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5648 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5649 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5650 if (imm < 8) {
5651 neon_load_reg64(cpu_V1, rm);
5652 } else {
5653 neon_load_reg64(cpu_V1, rm + 1);
5654 imm -= 8;
5655 }
5656 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5657 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5658 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5659 tcg_temp_free_i64(tmp64);
5660 } else {
5661 /* BUGFIX */
5662 neon_load_reg64(cpu_V0, rn);
5663 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5664 neon_load_reg64(cpu_V1, rm);
5665 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5666 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5667 }
5668 neon_store_reg64(cpu_V0, rd);
5669 if (q) {
5670 neon_store_reg64(cpu_V1, rd + 1);
5671 }
5672 } else if ((insn & (1 << 11)) == 0) {
5673 /* Two register misc. */
5674 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5675 size = (insn >> 18) & 3;
5676 /* UNDEF for unknown op values and bad op-size combinations */
5677 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5678 return 1;
5679 }
5680 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5681 q && ((rm | rd) & 1)) {
5682 return 1;
5683 }
5684 switch (op) {
5685 case NEON_2RM_VREV64:
5686 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5687 tmp = neon_load_reg(rm, pass * 2);
5688 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5689 switch (size) {
5690 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5691 case 1: gen_swap_half(tmp); break;
5692 case 2: /* no-op */ break;
5693 default: abort();
5694 }
5695 neon_store_reg(rd, pass * 2 + 1, tmp);
5696 if (size == 2) {
5697 neon_store_reg(rd, pass * 2, tmp2);
5698 } else {
5699 switch (size) {
5700 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5701 case 1: gen_swap_half(tmp2); break;
5702 default: abort();
5703 }
5704 neon_store_reg(rd, pass * 2, tmp2);
5705 }
5706 }
5707 break;
5708 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5709 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5710 for (pass = 0; pass < q + 1; pass++) {
5711 tmp = neon_load_reg(rm, pass * 2);
5712 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5713 tmp = neon_load_reg(rm, pass * 2 + 1);
5714 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5715 switch (size) {
5716 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5717 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5718 case 2: tcg_gen_add_i64(CPU_V001); break;
5719 default: abort();
5720 }
5721 if (op >= NEON_2RM_VPADAL) {
5722 /* Accumulate. */
5723 neon_load_reg64(cpu_V1, rd + pass);
5724 gen_neon_addl(size);
5725 }
5726 neon_store_reg64(cpu_V0, rd + pass);
5727 }
5728 break;
5729 case NEON_2RM_VTRN:
5730 if (size == 2) {
5731 int n;
5732 for (n = 0; n < (q ? 4 : 2); n += 2) {
5733 tmp = neon_load_reg(rm, n);
5734 tmp2 = neon_load_reg(rd, n + 1);
5735 neon_store_reg(rm, n, tmp2);
5736 neon_store_reg(rd, n + 1, tmp);
5737 }
5738 } else {
5739 goto elementwise;
5740 }
5741 break;
5742 case NEON_2RM_VUZP:
5743 if (gen_neon_unzip(rd, rm, size, q)) {
5744 return 1;
5745 }
5746 break;
5747 case NEON_2RM_VZIP:
5748 if (gen_neon_zip(rd, rm, size, q)) {
5749 return 1;
5750 }
5751 break;
5752 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5753 /* also VQMOVUN; op field and mnemonics don't line up */
5754 if (rm & 1) {
5755 return 1;
5756 }
5757 TCGV_UNUSED(tmp2);
5758 for (pass = 0; pass < 2; pass++) {
5759 neon_load_reg64(cpu_V0, rm + pass);
5760 tmp = tcg_temp_new_i32();
5761 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5762 tmp, cpu_V0);
5763 if (pass == 0) {
5764 tmp2 = tmp;
5765 } else {
5766 neon_store_reg(rd, 0, tmp2);
5767 neon_store_reg(rd, 1, tmp);
5768 }
5769 }
5770 break;
5771 case NEON_2RM_VSHLL:
5772 if (q || (rd & 1)) {
5773 return 1;
5774 }
5775 tmp = neon_load_reg(rm, 0);
5776 tmp2 = neon_load_reg(rm, 1);
5777 for (pass = 0; pass < 2; pass++) {
5778 if (pass == 1)
5779 tmp = tmp2;
5780 gen_neon_widen(cpu_V0, tmp, size, 1);
5781 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5782 neon_store_reg64(cpu_V0, rd + pass);
5783 }
5784 break;
5785 case NEON_2RM_VCVT_F16_F32:
5786 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5787 q || (rm & 1)) {
5788 return 1;
5789 }
5790 tmp = tcg_temp_new_i32();
5791 tmp2 = tcg_temp_new_i32();
5792 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5793 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5794 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5795 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5796 tcg_gen_shli_i32(tmp2, tmp2, 16);
5797 tcg_gen_or_i32(tmp2, tmp2, tmp);
5798 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5799 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5800 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5801 neon_store_reg(rd, 0, tmp2);
5802 tmp2 = tcg_temp_new_i32();
5803 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5804 tcg_gen_shli_i32(tmp2, tmp2, 16);
5805 tcg_gen_or_i32(tmp2, tmp2, tmp);
5806 neon_store_reg(rd, 1, tmp2);
5807 tcg_temp_free_i32(tmp);
5808 break;
5809 case NEON_2RM_VCVT_F32_F16:
5810 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5811 q || (rd & 1)) {
5812 return 1;
5813 }
5814 tmp3 = tcg_temp_new_i32();
5815 tmp = neon_load_reg(rm, 0);
5816 tmp2 = neon_load_reg(rm, 1);
5817 tcg_gen_ext16u_i32(tmp3, tmp);
5818 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5819 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5820 tcg_gen_shri_i32(tmp3, tmp, 16);
5821 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5822 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5823 tcg_temp_free_i32(tmp);
5824 tcg_gen_ext16u_i32(tmp3, tmp2);
5825 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5826 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5827 tcg_gen_shri_i32(tmp3, tmp2, 16);
5828 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5829 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5830 tcg_temp_free_i32(tmp2);
5831 tcg_temp_free_i32(tmp3);
5832 break;
5833 default:
5834 elementwise:
5835 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5836 if (neon_2rm_is_float_op(op)) {
5837 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5838 neon_reg_offset(rm, pass));
5839 TCGV_UNUSED(tmp);
5840 } else {
5841 tmp = neon_load_reg(rm, pass);
5842 }
5843 switch (op) {
5844 case NEON_2RM_VREV32:
5845 switch (size) {
5846 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5847 case 1: gen_swap_half(tmp); break;
5848 default: abort();
5849 }
5850 break;
5851 case NEON_2RM_VREV16:
5852 gen_rev16(tmp);
5853 break;
5854 case NEON_2RM_VCLS:
5855 switch (size) {
5856 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5857 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5858 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5859 default: abort();
5860 }
5861 break;
5862 case NEON_2RM_VCLZ:
5863 switch (size) {
5864 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5865 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5866 case 2: gen_helper_clz(tmp, tmp); break;
5867 default: abort();
5868 }
5869 break;
5870 case NEON_2RM_VCNT:
5871 gen_helper_neon_cnt_u8(tmp, tmp);
5872 break;
5873 case NEON_2RM_VMVN:
5874 tcg_gen_not_i32(tmp, tmp);
5875 break;
5876 case NEON_2RM_VQABS:
5877 switch (size) {
5878 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5879 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5880 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5881 default: abort();
5882 }
5883 break;
5884 case NEON_2RM_VQNEG:
5885 switch (size) {
5886 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5887 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5888 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5889 default: abort();
5890 }
5891 break;
5892 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5893 tmp2 = tcg_const_i32(0);
5894 switch(size) {
5895 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5896 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5897 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5898 default: abort();
5899 }
5900 tcg_temp_free(tmp2);
5901 if (op == NEON_2RM_VCLE0) {
5902 tcg_gen_not_i32(tmp, tmp);
5903 }
5904 break;
5905 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
5906 tmp2 = tcg_const_i32(0);
5907 switch(size) {
5908 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5909 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5910 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5911 default: abort();
5912 }
5913 tcg_temp_free(tmp2);
5914 if (op == NEON_2RM_VCLT0) {
5915 tcg_gen_not_i32(tmp, tmp);
5916 }
5917 break;
5918 case NEON_2RM_VCEQ0:
5919 tmp2 = tcg_const_i32(0);
5920 switch(size) {
5921 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5922 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5923 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5924 default: abort();
5925 }
5926 tcg_temp_free(tmp2);
5927 break;
5928 case NEON_2RM_VABS:
5929 switch(size) {
5930 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5931 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5932 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5933 default: abort();
5934 }
5935 break;
5936 case NEON_2RM_VNEG:
5937 tmp2 = tcg_const_i32(0);
5938 gen_neon_rsb(size, tmp, tmp2);
5939 tcg_temp_free(tmp2);
5940 break;
5941 case NEON_2RM_VCGT0_F:
5942 tmp2 = tcg_const_i32(0);
5943 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5944 tcg_temp_free(tmp2);
5945 break;
5946 case NEON_2RM_VCGE0_F:
5947 tmp2 = tcg_const_i32(0);
5948 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5949 tcg_temp_free(tmp2);
5950 break;
5951 case NEON_2RM_VCEQ0_F:
5952 tmp2 = tcg_const_i32(0);
5953 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5954 tcg_temp_free(tmp2);
5955 break;
5956 case NEON_2RM_VCLE0_F:
5957 tmp2 = tcg_const_i32(0);
5958 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5959 tcg_temp_free(tmp2);
5960 break;
5961 case NEON_2RM_VCLT0_F:
5962 tmp2 = tcg_const_i32(0);
5963 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5964 tcg_temp_free(tmp2);
5965 break;
5966 case NEON_2RM_VABS_F:
5967 gen_vfp_abs(0);
5968 break;
5969 case NEON_2RM_VNEG_F:
5970 gen_vfp_neg(0);
5971 break;
5972 case NEON_2RM_VSWP:
5973 tmp2 = neon_load_reg(rd, pass);
5974 neon_store_reg(rm, pass, tmp2);
5975 break;
5976 case NEON_2RM_VTRN:
5977 tmp2 = neon_load_reg(rd, pass);
5978 switch (size) {
5979 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5980 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5981 default: abort();
5982 }
5983 neon_store_reg(rm, pass, tmp2);
5984 break;
5985 case NEON_2RM_VRECPE:
5986 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5987 break;
5988 case NEON_2RM_VRSQRTE:
5989 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5990 break;
5991 case NEON_2RM_VRECPE_F:
5992 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5993 break;
5994 case NEON_2RM_VRSQRTE_F:
5995 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5996 break;
5997 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5998 gen_vfp_sito(0);
5999 break;
6000 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6001 gen_vfp_uito(0);
6002 break;
6003 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6004 gen_vfp_tosiz(0);
6005 break;
6006 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6007 gen_vfp_touiz(0);
6008 break;
6009 default:
6010 /* Reserved op values were caught by the
6011 * neon_2rm_sizes[] check earlier.
6012 */
6013 abort();
6014 }
6015 if (neon_2rm_is_float_op(op)) {
6016 tcg_gen_st_f32(cpu_F0s, cpu_env,
6017 neon_reg_offset(rd, pass));
6018 } else {
6019 neon_store_reg(rd, pass, tmp);
6020 }
6021 }
6022 break;
6023 }
6024 } else if ((insn & (1 << 10)) == 0) {
6025 /* VTBL, VTBX. */
6026 int n = ((insn >> 5) & 0x18) + 8;
6027 if (insn & (1 << 6)) {
6028 tmp = neon_load_reg(rd, 0);
6029 } else {
6030 tmp = tcg_temp_new_i32();
6031 tcg_gen_movi_i32(tmp, 0);
6032 }
6033 tmp2 = neon_load_reg(rm, 0);
6034 tmp4 = tcg_const_i32(rn);
6035 tmp5 = tcg_const_i32(n);
6036 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6037 tcg_temp_free_i32(tmp);
6038 if (insn & (1 << 6)) {
6039 tmp = neon_load_reg(rd, 1);
6040 } else {
6041 tmp = tcg_temp_new_i32();
6042 tcg_gen_movi_i32(tmp, 0);
6043 }
6044 tmp3 = neon_load_reg(rm, 1);
6045 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6046 tcg_temp_free_i32(tmp5);
6047 tcg_temp_free_i32(tmp4);
6048 neon_store_reg(rd, 0, tmp2);
6049 neon_store_reg(rd, 1, tmp3);
6050 tcg_temp_free_i32(tmp);
6051 } else if ((insn & 0x380) == 0) {
6052 /* VDUP */
6053 if (insn & (1 << 19)) {
6054 tmp = neon_load_reg(rm, 1);
6055 } else {
6056 tmp = neon_load_reg(rm, 0);
6057 }
6058 if (insn & (1 << 16)) {
6059 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6060 } else if (insn & (1 << 17)) {
6061 if ((insn >> 18) & 1)
6062 gen_neon_dup_high16(tmp);
6063 else
6064 gen_neon_dup_low16(tmp);
6065 }
6066 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6067 tmp2 = tcg_temp_new_i32();
6068 tcg_gen_mov_i32(tmp2, tmp);
6069 neon_store_reg(rd, pass, tmp2);
6070 }
6071 tcg_temp_free_i32(tmp);
6072 } else {
6073 return 1;
6074 }
6075 }
6076 }
6077 return 0;
6078 }
6079
6080 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6081 {
6082 int crn = (insn >> 16) & 0xf;
6083 int crm = insn & 0xf;
6084 int op1 = (insn >> 21) & 7;
6085 int op2 = (insn >> 5) & 7;
6086 int rt = (insn >> 12) & 0xf;
6087 TCGv tmp;
6088
6089 /* Minimal set of debug registers, since we don't support debug */
6090 if (op1 == 0 && crn == 0 && op2 == 0) {
6091 switch (crm) {
6092 case 0:
6093 /* DBGDIDR: just RAZ. In particular this means the
6094 * "debug architecture version" bits will read as
6095 * a reserved value, which should cause Linux to
6096 * not try to use the debug hardware.
6097 */
6098 tmp = tcg_const_i32(0);
6099 store_reg(s, rt, tmp);
6100 return 0;
6101 case 1:
6102 case 2:
6103 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6104 * don't implement memory mapped debug components
6105 */
6106 if (ENABLE_ARCH_7) {
6107 tmp = tcg_const_i32(0);
6108 store_reg(s, rt, tmp);
6109 return 0;
6110 }
6111 break;
6112 default:
6113 break;
6114 }
6115 }
6116
6117 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6118 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6119 /* TEECR */
6120 if (IS_USER(s))
6121 return 1;
6122 tmp = load_cpu_field(teecr);
6123 store_reg(s, rt, tmp);
6124 return 0;
6125 }
6126 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6127 /* TEEHBR */
6128 if (IS_USER(s) && (env->teecr & 1))
6129 return 1;
6130 tmp = load_cpu_field(teehbr);
6131 store_reg(s, rt, tmp);
6132 return 0;
6133 }
6134 }
6135 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6136 op1, crn, crm, op2);
6137 return 1;
6138 }
6139
6140 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6141 {
6142 int crn = (insn >> 16) & 0xf;
6143 int crm = insn & 0xf;
6144 int op1 = (insn >> 21) & 7;
6145 int op2 = (insn >> 5) & 7;
6146 int rt = (insn >> 12) & 0xf;
6147 TCGv tmp;
6148
6149 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6150 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6151 /* TEECR */
6152 if (IS_USER(s))
6153 return 1;
6154 tmp = load_reg(s, rt);
6155 gen_helper_set_teecr(cpu_env, tmp);
6156 tcg_temp_free_i32(tmp);
6157 return 0;
6158 }
6159 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6160 /* TEEHBR */
6161 if (IS_USER(s) && (env->teecr & 1))
6162 return 1;
6163 tmp = load_reg(s, rt);
6164 store_cpu_field(tmp, teehbr);
6165 return 0;
6166 }
6167 }
6168 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6169 op1, crn, crm, op2);
6170 return 1;
6171 }
6172
6173 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6174 {
6175 int cpnum;
6176
6177 cpnum = (insn >> 8) & 0xf;
6178 if (arm_feature(env, ARM_FEATURE_XSCALE)
6179 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6180 return 1;
6181
6182 switch (cpnum) {
6183 case 0:
6184 case 1:
6185 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6186 return disas_iwmmxt_insn(env, s, insn);
6187 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6188 return disas_dsp_insn(env, s, insn);
6189 }
6190 return 1;
6191 case 10:
6192 case 11:
6193 return disas_vfp_insn (env, s, insn);
6194 case 14:
6195 /* Coprocessors 7-15 are architecturally reserved by ARM.
6196 Unfortunately Intel decided to ignore this. */
6197 if (arm_feature(env, ARM_FEATURE_XSCALE))
6198 goto board;
6199 if (insn & (1 << 20))
6200 return disas_cp14_read(env, s, insn);
6201 else
6202 return disas_cp14_write(env, s, insn);
6203 case 15:
6204 return disas_cp15_insn (env, s, insn);
6205 default:
6206 board:
6207 /* Unknown coprocessor. See if the board has hooked it. */
6208 return disas_cp_insn (env, s, insn);
6209 }
6210 }
6211
6212
6213 /* Store a 64-bit value to a register pair. Clobbers val. */
6214 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6215 {
6216 TCGv tmp;
6217 tmp = tcg_temp_new_i32();
6218 tcg_gen_trunc_i64_i32(tmp, val);
6219 store_reg(s, rlow, tmp);
6220 tmp = tcg_temp_new_i32();
6221 tcg_gen_shri_i64(val, val, 32);
6222 tcg_gen_trunc_i64_i32(tmp, val);
6223 store_reg(s, rhigh, tmp);
6224 }
6225
6226 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6227 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6228 {
6229 TCGv_i64 tmp;
6230 TCGv tmp2;
6231
6232 /* Load value and extend to 64 bits. */
6233 tmp = tcg_temp_new_i64();
6234 tmp2 = load_reg(s, rlow);
6235 tcg_gen_extu_i32_i64(tmp, tmp2);
6236 tcg_temp_free_i32(tmp2);
6237 tcg_gen_add_i64(val, val, tmp);
6238 tcg_temp_free_i64(tmp);
6239 }
6240
6241 /* load and add a 64-bit value from a register pair. */
6242 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6243 {
6244 TCGv_i64 tmp;
6245 TCGv tmpl;
6246 TCGv tmph;
6247
6248 /* Load 64-bit value rd:rn. */
6249 tmpl = load_reg(s, rlow);
6250 tmph = load_reg(s, rhigh);
6251 tmp = tcg_temp_new_i64();
6252 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6253 tcg_temp_free_i32(tmpl);
6254 tcg_temp_free_i32(tmph);
6255 tcg_gen_add_i64(val, val, tmp);
6256 tcg_temp_free_i64(tmp);
6257 }
6258
6259 /* Set N and Z flags from a 64-bit value. */
6260 static void gen_logicq_cc(TCGv_i64 val)
6261 {
6262 TCGv tmp = tcg_temp_new_i32();
6263 gen_helper_logicq_cc(tmp, val);
6264 gen_logic_CC(tmp);
6265 tcg_temp_free_i32(tmp);
6266 }
6267
6268 /* Load/Store exclusive instructions are implemented by remembering
6269 the value/address loaded, and seeing if these are the same
6270 when the store is performed. This should be is sufficient to implement
6271 the architecturally mandated semantics, and avoids having to monitor
6272 regular stores.
6273
6274 In system emulation mode only one CPU will be running at once, so
6275 this sequence is effectively atomic. In user emulation mode we
6276 throw an exception and handle the atomic operation elsewhere. */
6277 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6278 TCGv addr, int size)
6279 {
6280 TCGv tmp;
6281
6282 switch (size) {
6283 case 0:
6284 tmp = gen_ld8u(addr, IS_USER(s));
6285 break;
6286 case 1:
6287 tmp = gen_ld16u(addr, IS_USER(s));
6288 break;
6289 case 2:
6290 case 3:
6291 tmp = gen_ld32(addr, IS_USER(s));
6292 break;
6293 default:
6294 abort();
6295 }
6296 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6297 store_reg(s, rt, tmp);
6298 if (size == 3) {
6299 TCGv tmp2 = tcg_temp_new_i32();
6300 tcg_gen_addi_i32(tmp2, addr, 4);
6301 tmp = gen_ld32(tmp2, IS_USER(s));
6302 tcg_temp_free_i32(tmp2);
6303 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6304 store_reg(s, rt2, tmp);
6305 }
6306 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6307 }
6308
6309 static void gen_clrex(DisasContext *s)
6310 {
6311 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6312 }
6313
6314 #ifdef CONFIG_USER_ONLY
6315 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6316 TCGv addr, int size)
6317 {
6318 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6319 tcg_gen_movi_i32(cpu_exclusive_info,
6320 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6321 gen_exception_insn(s, 4, EXCP_STREX);
6322 }
6323 #else
6324 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6325 TCGv addr, int size)
6326 {
6327 TCGv tmp;
6328 int done_label;
6329 int fail_label;
6330
6331 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6332 [addr] = {Rt};
6333 {Rd} = 0;
6334 } else {
6335 {Rd} = 1;
6336 } */
6337 fail_label = gen_new_label();
6338 done_label = gen_new_label();
6339 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6340 switch (size) {
6341 case 0:
6342 tmp = gen_ld8u(addr, IS_USER(s));
6343 break;
6344 case 1:
6345 tmp = gen_ld16u(addr, IS_USER(s));
6346 break;
6347 case 2:
6348 case 3:
6349 tmp = gen_ld32(addr, IS_USER(s));
6350 break;
6351 default:
6352 abort();
6353 }
6354 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6355 tcg_temp_free_i32(tmp);
6356 if (size == 3) {
6357 TCGv tmp2 = tcg_temp_new_i32();
6358 tcg_gen_addi_i32(tmp2, addr, 4);
6359 tmp = gen_ld32(tmp2, IS_USER(s));
6360 tcg_temp_free_i32(tmp2);
6361 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6362 tcg_temp_free_i32(tmp);
6363 }
6364 tmp = load_reg(s, rt);
6365 switch (size) {
6366 case 0:
6367 gen_st8(tmp, addr, IS_USER(s));
6368 break;
6369 case 1:
6370 gen_st16(tmp, addr, IS_USER(s));
6371 break;
6372 case 2:
6373 case 3:
6374 gen_st32(tmp, addr, IS_USER(s));
6375 break;
6376 default:
6377 abort();
6378 }
6379 if (size == 3) {
6380 tcg_gen_addi_i32(addr, addr, 4);
6381 tmp = load_reg(s, rt2);
6382 gen_st32(tmp, addr, IS_USER(s));
6383 }
6384 tcg_gen_movi_i32(cpu_R[rd], 0);
6385 tcg_gen_br(done_label);
6386 gen_set_label(fail_label);
6387 tcg_gen_movi_i32(cpu_R[rd], 1);
6388 gen_set_label(done_label);
6389 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6390 }
6391 #endif
6392
6393 static void disas_arm_insn(CPUState * env, DisasContext *s)
6394 {
6395 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6396 TCGv tmp;
6397 TCGv tmp2;
6398 TCGv tmp3;
6399 TCGv addr;
6400 TCGv_i64 tmp64;
6401
6402 insn = ldl_code(s->pc);
6403 s->pc += 4;
6404
6405 /* M variants do not implement ARM mode. */
6406 if (IS_M(env))
6407 goto illegal_op;
6408 cond = insn >> 28;
6409 if (cond == 0xf){
6410 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6411 * choose to UNDEF. In ARMv5 and above the space is used
6412 * for miscellaneous unconditional instructions.
6413 */
6414 ARCH(5);
6415
6416 /* Unconditional instructions. */
6417 if (((insn >> 25) & 7) == 1) {
6418 /* NEON Data processing. */
6419 if (!arm_feature(env, ARM_FEATURE_NEON))
6420 goto illegal_op;
6421
6422 if (disas_neon_data_insn(env, s, insn))
6423 goto illegal_op;
6424 return;
6425 }
6426 if ((insn & 0x0f100000) == 0x04000000) {
6427 /* NEON load/store. */
6428 if (!arm_feature(env, ARM_FEATURE_NEON))
6429 goto illegal_op;
6430
6431 if (disas_neon_ls_insn(env, s, insn))
6432 goto illegal_op;
6433 return;
6434 }
6435 if (((insn & 0x0f30f000) == 0x0510f000) ||
6436 ((insn & 0x0f30f010) == 0x0710f000)) {
6437 if ((insn & (1 << 22)) == 0) {
6438 /* PLDW; v7MP */
6439 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6440 goto illegal_op;
6441 }
6442 }
6443 /* Otherwise PLD; v5TE+ */
6444 ARCH(5TE);
6445 return;
6446 }
6447 if (((insn & 0x0f70f000) == 0x0450f000) ||
6448 ((insn & 0x0f70f010) == 0x0650f000)) {
6449 ARCH(7);
6450 return; /* PLI; V7 */
6451 }
6452 if (((insn & 0x0f700000) == 0x04100000) ||
6453 ((insn & 0x0f700010) == 0x06100000)) {
6454 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6455 goto illegal_op;
6456 }
6457 return; /* v7MP: Unallocated memory hint: must NOP */
6458 }
6459
6460 if ((insn & 0x0ffffdff) == 0x01010000) {
6461 ARCH(6);
6462 /* setend */
6463 if (insn & (1 << 9)) {
6464 /* BE8 mode not implemented. */
6465 goto illegal_op;
6466 }
6467 return;
6468 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6469 switch ((insn >> 4) & 0xf) {
6470 case 1: /* clrex */
6471 ARCH(6K);
6472 gen_clrex(s);
6473 return;
6474 case 4: /* dsb */
6475 case 5: /* dmb */
6476 case 6: /* isb */
6477 ARCH(7);
6478 /* We don't emulate caches so these are a no-op. */
6479 return;
6480 default:
6481 goto illegal_op;
6482 }
6483 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6484 /* srs */
6485 int32_t offset;
6486 if (IS_USER(s))
6487 goto illegal_op;
6488 ARCH(6);
6489 op1 = (insn & 0x1f);
6490 addr = tcg_temp_new_i32();
6491 tmp = tcg_const_i32(op1);
6492 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6493 tcg_temp_free_i32(tmp);
6494 i = (insn >> 23) & 3;
6495 switch (i) {
6496 case 0: offset = -4; break; /* DA */
6497 case 1: offset = 0; break; /* IA */
6498 case 2: offset = -8; break; /* DB */
6499 case 3: offset = 4; break; /* IB */
6500 default: abort();
6501 }
6502 if (offset)
6503 tcg_gen_addi_i32(addr, addr, offset);
6504 tmp = load_reg(s, 14);
6505 gen_st32(tmp, addr, 0);
6506 tmp = load_cpu_field(spsr);
6507 tcg_gen_addi_i32(addr, addr, 4);
6508 gen_st32(tmp, addr, 0);
6509 if (insn & (1 << 21)) {
6510 /* Base writeback. */
6511 switch (i) {
6512 case 0: offset = -8; break;
6513 case 1: offset = 4; break;
6514 case 2: offset = -4; break;
6515 case 3: offset = 0; break;
6516 default: abort();
6517 }
6518 if (offset)
6519 tcg_gen_addi_i32(addr, addr, offset);
6520 tmp = tcg_const_i32(op1);
6521 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6522 tcg_temp_free_i32(tmp);
6523 tcg_temp_free_i32(addr);
6524 } else {
6525 tcg_temp_free_i32(addr);
6526 }
6527 return;
6528 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6529 /* rfe */
6530 int32_t offset;
6531 if (IS_USER(s))
6532 goto illegal_op;
6533 ARCH(6);
6534 rn = (insn >> 16) & 0xf;
6535 addr = load_reg(s, rn);
6536 i = (insn >> 23) & 3;
6537 switch (i) {
6538 case 0: offset = -4; break; /* DA */
6539 case 1: offset = 0; break; /* IA */
6540 case 2: offset = -8; break; /* DB */
6541 case 3: offset = 4; break; /* IB */
6542 default: abort();
6543 }
6544 if (offset)
6545 tcg_gen_addi_i32(addr, addr, offset);
6546 /* Load PC into tmp and CPSR into tmp2. */
6547 tmp = gen_ld32(addr, 0);
6548 tcg_gen_addi_i32(addr, addr, 4);
6549 tmp2 = gen_ld32(addr, 0);
6550 if (insn & (1 << 21)) {
6551 /* Base writeback. */
6552 switch (i) {
6553 case 0: offset = -8; break;
6554 case 1: offset = 4; break;
6555 case 2: offset = -4; break;
6556 case 3: offset = 0; break;
6557 default: abort();
6558 }
6559 if (offset)
6560 tcg_gen_addi_i32(addr, addr, offset);
6561 store_reg(s, rn, addr);
6562 } else {
6563 tcg_temp_free_i32(addr);
6564 }
6565 gen_rfe(s, tmp, tmp2);
6566 return;
6567 } else if ((insn & 0x0e000000) == 0x0a000000) {
6568 /* branch link and change to thumb (blx <offset>) */
6569 int32_t offset;
6570
6571 val = (uint32_t)s->pc;
6572 tmp = tcg_temp_new_i32();
6573 tcg_gen_movi_i32(tmp, val);
6574 store_reg(s, 14, tmp);
6575 /* Sign-extend the 24-bit offset */
6576 offset = (((int32_t)insn) << 8) >> 8;
6577 /* offset * 4 + bit24 * 2 + (thumb bit) */
6578 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6579 /* pipeline offset */
6580 val += 4;
6581 /* protected by ARCH(5); above, near the start of uncond block */
6582 gen_bx_im(s, val);
6583 return;
6584 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6585 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6586 /* iWMMXt register transfer. */
6587 if (env->cp15.c15_cpar & (1 << 1))
6588 if (!disas_iwmmxt_insn(env, s, insn))
6589 return;
6590 }
6591 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6592 /* Coprocessor double register transfer. */
6593 ARCH(5TE);
6594 } else if ((insn & 0x0f000010) == 0x0e000010) {
6595 /* Additional coprocessor register transfer. */
6596 } else if ((insn & 0x0ff10020) == 0x01000000) {
6597 uint32_t mask;
6598 uint32_t val;
6599 /* cps (privileged) */
6600 if (IS_USER(s))
6601 return;
6602 mask = val = 0;
6603 if (insn & (1 << 19)) {
6604 if (insn & (1 << 8))
6605 mask |= CPSR_A;
6606 if (insn & (1 << 7))
6607 mask |= CPSR_I;
6608 if (insn & (1 << 6))
6609 mask |= CPSR_F;
6610 if (insn & (1 << 18))
6611 val |= mask;
6612 }
6613 if (insn & (1 << 17)) {
6614 mask |= CPSR_M;
6615 val |= (insn & 0x1f);
6616 }
6617 if (mask) {
6618 gen_set_psr_im(s, mask, 0, val);
6619 }
6620 return;
6621 }
6622 goto illegal_op;
6623 }
6624 if (cond != 0xe) {
6625 /* if not always execute, we generate a conditional jump to
6626 next instruction */
6627 s->condlabel = gen_new_label();
6628 gen_test_cc(cond ^ 1, s->condlabel);
6629 s->condjmp = 1;
6630 }
6631 if ((insn & 0x0f900000) == 0x03000000) {
6632 if ((insn & (1 << 21)) == 0) {
6633 ARCH(6T2);
6634 rd = (insn >> 12) & 0xf;
6635 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6636 if ((insn & (1 << 22)) == 0) {
6637 /* MOVW */
6638 tmp = tcg_temp_new_i32();
6639 tcg_gen_movi_i32(tmp, val);
6640 } else {
6641 /* MOVT */
6642 tmp = load_reg(s, rd);
6643 tcg_gen_ext16u_i32(tmp, tmp);
6644 tcg_gen_ori_i32(tmp, tmp, val << 16);
6645 }
6646 store_reg(s, rd, tmp);
6647 } else {
6648 if (((insn >> 12) & 0xf) != 0xf)
6649 goto illegal_op;
6650 if (((insn >> 16) & 0xf) == 0) {
6651 gen_nop_hint(s, insn & 0xff);
6652 } else {
6653 /* CPSR = immediate */
6654 val = insn & 0xff;
6655 shift = ((insn >> 8) & 0xf) * 2;
6656 if (shift)
6657 val = (val >> shift) | (val << (32 - shift));
6658 i = ((insn & (1 << 22)) != 0);
6659 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6660 goto illegal_op;
6661 }
6662 }
6663 } else if ((insn & 0x0f900000) == 0x01000000
6664 && (insn & 0x00000090) != 0x00000090) {
6665 /* miscellaneous instructions */
6666 op1 = (insn >> 21) & 3;
6667 sh = (insn >> 4) & 0xf;
6668 rm = insn & 0xf;
6669 switch (sh) {
6670 case 0x0: /* move program status register */
6671 if (op1 & 1) {
6672 /* PSR = reg */
6673 tmp = load_reg(s, rm);
6674 i = ((op1 & 2) != 0);
6675 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6676 goto illegal_op;
6677 } else {
6678 /* reg = PSR */
6679 rd = (insn >> 12) & 0xf;
6680 if (op1 & 2) {
6681 if (IS_USER(s))
6682 goto illegal_op;
6683 tmp = load_cpu_field(spsr);
6684 } else {
6685 tmp = tcg_temp_new_i32();
6686 gen_helper_cpsr_read(tmp);
6687 }
6688 store_reg(s, rd, tmp);
6689 }
6690 break;
6691 case 0x1:
6692 if (op1 == 1) {
6693 /* branch/exchange thumb (bx). */
6694 ARCH(4T);
6695 tmp = load_reg(s, rm);
6696 gen_bx(s, tmp);
6697 } else if (op1 == 3) {
6698 /* clz */
6699 ARCH(5);
6700 rd = (insn >> 12) & 0xf;
6701 tmp = load_reg(s, rm);
6702 gen_helper_clz(tmp, tmp);
6703 store_reg(s, rd, tmp);
6704 } else {
6705 goto illegal_op;
6706 }
6707 break;
6708 case 0x2:
6709 if (op1 == 1) {
6710 ARCH(5J); /* bxj */
6711 /* Trivial implementation equivalent to bx. */
6712 tmp = load_reg(s, rm);
6713 gen_bx(s, tmp);
6714 } else {
6715 goto illegal_op;
6716 }
6717 break;
6718 case 0x3:
6719 if (op1 != 1)
6720 goto illegal_op;
6721
6722 ARCH(5);
6723 /* branch link/exchange thumb (blx) */
6724 tmp = load_reg(s, rm);
6725 tmp2 = tcg_temp_new_i32();
6726 tcg_gen_movi_i32(tmp2, s->pc);
6727 store_reg(s, 14, tmp2);
6728 gen_bx(s, tmp);
6729 break;
6730 case 0x5: /* saturating add/subtract */
6731 ARCH(5TE);
6732 rd = (insn >> 12) & 0xf;
6733 rn = (insn >> 16) & 0xf;
6734 tmp = load_reg(s, rm);
6735 tmp2 = load_reg(s, rn);
6736 if (op1 & 2)
6737 gen_helper_double_saturate(tmp2, tmp2);
6738 if (op1 & 1)
6739 gen_helper_sub_saturate(tmp, tmp, tmp2);
6740 else
6741 gen_helper_add_saturate(tmp, tmp, tmp2);
6742 tcg_temp_free_i32(tmp2);
6743 store_reg(s, rd, tmp);
6744 break;
6745 case 7:
6746 /* SMC instruction (op1 == 3)
6747 and undefined instructions (op1 == 0 || op1 == 2)
6748 will trap */
6749 if (op1 != 1) {
6750 goto illegal_op;
6751 }
6752 /* bkpt */
6753 ARCH(5);
6754 gen_exception_insn(s, 4, EXCP_BKPT);
6755 break;
6756 case 0x8: /* signed multiply */
6757 case 0xa:
6758 case 0xc:
6759 case 0xe:
6760 ARCH(5TE);
6761 rs = (insn >> 8) & 0xf;
6762 rn = (insn >> 12) & 0xf;
6763 rd = (insn >> 16) & 0xf;
6764 if (op1 == 1) {
6765 /* (32 * 16) >> 16 */
6766 tmp = load_reg(s, rm);
6767 tmp2 = load_reg(s, rs);
6768 if (sh & 4)
6769 tcg_gen_sari_i32(tmp2, tmp2, 16);
6770 else
6771 gen_sxth(tmp2);
6772 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6773 tcg_gen_shri_i64(tmp64, tmp64, 16);
6774 tmp = tcg_temp_new_i32();
6775 tcg_gen_trunc_i64_i32(tmp, tmp64);
6776 tcg_temp_free_i64(tmp64);
6777 if ((sh & 2) == 0) {
6778 tmp2 = load_reg(s, rn);
6779 gen_helper_add_setq(tmp, tmp, tmp2);
6780 tcg_temp_free_i32(tmp2);
6781 }
6782 store_reg(s, rd, tmp);
6783 } else {
6784 /* 16 * 16 */
6785 tmp = load_reg(s, rm);
6786 tmp2 = load_reg(s, rs);
6787 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6788 tcg_temp_free_i32(tmp2);
6789 if (op1 == 2) {
6790 tmp64 = tcg_temp_new_i64();
6791 tcg_gen_ext_i32_i64(tmp64, tmp);
6792 tcg_temp_free_i32(tmp);
6793 gen_addq(s, tmp64, rn, rd);
6794 gen_storeq_reg(s, rn, rd, tmp64);
6795 tcg_temp_free_i64(tmp64);
6796 } else {
6797 if (op1 == 0) {
6798 tmp2 = load_reg(s, rn);
6799 gen_helper_add_setq(tmp, tmp, tmp2);
6800 tcg_temp_free_i32(tmp2);
6801 }
6802 store_reg(s, rd, tmp);
6803 }
6804 }
6805 break;
6806 default:
6807 goto illegal_op;
6808 }
6809 } else if (((insn & 0x0e000000) == 0 &&
6810 (insn & 0x00000090) != 0x90) ||
6811 ((insn & 0x0e000000) == (1 << 25))) {
6812 int set_cc, logic_cc, shiftop;
6813
6814 op1 = (insn >> 21) & 0xf;
6815 set_cc = (insn >> 20) & 1;
6816 logic_cc = table_logic_cc[op1] & set_cc;
6817
6818 /* data processing instruction */
6819 if (insn & (1 << 25)) {
6820 /* immediate operand */
6821 val = insn & 0xff;
6822 shift = ((insn >> 8) & 0xf) * 2;
6823 if (shift) {
6824 val = (val >> shift) | (val << (32 - shift));
6825 }
6826 tmp2 = tcg_temp_new_i32();
6827 tcg_gen_movi_i32(tmp2, val);
6828 if (logic_cc && shift) {
6829 gen_set_CF_bit31(tmp2);
6830 }
6831 } else {
6832 /* register */
6833 rm = (insn) & 0xf;
6834 tmp2 = load_reg(s, rm);
6835 shiftop = (insn >> 5) & 3;
6836 if (!(insn & (1 << 4))) {
6837 shift = (insn >> 7) & 0x1f;
6838 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6839 } else {
6840 rs = (insn >> 8) & 0xf;
6841 tmp = load_reg(s, rs);
6842 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6843 }
6844 }
6845 if (op1 != 0x0f && op1 != 0x0d) {
6846 rn = (insn >> 16) & 0xf;
6847 tmp = load_reg(s, rn);
6848 } else {
6849 TCGV_UNUSED(tmp);
6850 }
6851 rd = (insn >> 12) & 0xf;
6852 switch(op1) {
6853 case 0x00:
6854 tcg_gen_and_i32(tmp, tmp, tmp2);
6855 if (logic_cc) {
6856 gen_logic_CC(tmp);
6857 }
6858 store_reg_bx(env, s, rd, tmp);
6859 break;
6860 case 0x01:
6861 tcg_gen_xor_i32(tmp, tmp, tmp2);
6862 if (logic_cc) {
6863 gen_logic_CC(tmp);
6864 }
6865 store_reg_bx(env, s, rd, tmp);
6866 break;
6867 case 0x02:
6868 if (set_cc && rd == 15) {
6869 /* SUBS r15, ... is used for exception return. */
6870 if (IS_USER(s)) {
6871 goto illegal_op;
6872 }
6873 gen_helper_sub_cc(tmp, tmp, tmp2);
6874 gen_exception_return(s, tmp);
6875 } else {
6876 if (set_cc) {
6877 gen_helper_sub_cc(tmp, tmp, tmp2);
6878 } else {
6879 tcg_gen_sub_i32(tmp, tmp, tmp2);
6880 }
6881 store_reg_bx(env, s, rd, tmp);
6882 }
6883 break;
6884 case 0x03:
6885 if (set_cc) {
6886 gen_helper_sub_cc(tmp, tmp2, tmp);
6887 } else {
6888 tcg_gen_sub_i32(tmp, tmp2, tmp);
6889 }
6890 store_reg_bx(env, s, rd, tmp);
6891 break;
6892 case 0x04:
6893 if (set_cc) {
6894 gen_helper_add_cc(tmp, tmp, tmp2);
6895 } else {
6896 tcg_gen_add_i32(tmp, tmp, tmp2);
6897 }
6898 store_reg_bx(env, s, rd, tmp);
6899 break;
6900 case 0x05:
6901 if (set_cc) {
6902 gen_helper_adc_cc(tmp, tmp, tmp2);
6903 } else {
6904 gen_add_carry(tmp, tmp, tmp2);
6905 }
6906 store_reg_bx(env, s, rd, tmp);
6907 break;
6908 case 0x06:
6909 if (set_cc) {
6910 gen_helper_sbc_cc(tmp, tmp, tmp2);
6911 } else {
6912 gen_sub_carry(tmp, tmp, tmp2);
6913 }
6914 store_reg_bx(env, s, rd, tmp);
6915 break;
6916 case 0x07:
6917 if (set_cc) {
6918 gen_helper_sbc_cc(tmp, tmp2, tmp);
6919 } else {
6920 gen_sub_carry(tmp, tmp2, tmp);
6921 }
6922 store_reg_bx(env, s, rd, tmp);
6923 break;
6924 case 0x08:
6925 if (set_cc) {
6926 tcg_gen_and_i32(tmp, tmp, tmp2);
6927 gen_logic_CC(tmp);
6928 }
6929 tcg_temp_free_i32(tmp);
6930 break;
6931 case 0x09:
6932 if (set_cc) {
6933 tcg_gen_xor_i32(tmp, tmp, tmp2);
6934 gen_logic_CC(tmp);
6935 }
6936 tcg_temp_free_i32(tmp);
6937 break;
6938 case 0x0a:
6939 if (set_cc) {
6940 gen_helper_sub_cc(tmp, tmp, tmp2);
6941 }
6942 tcg_temp_free_i32(tmp);
6943 break;
6944 case 0x0b:
6945 if (set_cc) {
6946 gen_helper_add_cc(tmp, tmp, tmp2);
6947 }
6948 tcg_temp_free_i32(tmp);
6949 break;
6950 case 0x0c:
6951 tcg_gen_or_i32(tmp, tmp, tmp2);
6952 if (logic_cc) {
6953 gen_logic_CC(tmp);
6954 }
6955 store_reg_bx(env, s, rd, tmp);
6956 break;
6957 case 0x0d:
6958 if (logic_cc && rd == 15) {
6959 /* MOVS r15, ... is used for exception return. */
6960 if (IS_USER(s)) {
6961 goto illegal_op;
6962 }
6963 gen_exception_return(s, tmp2);
6964 } else {
6965 if (logic_cc) {
6966 gen_logic_CC(tmp2);
6967 }
6968 store_reg_bx(env, s, rd, tmp2);
6969 }
6970 break;
6971 case 0x0e:
6972 tcg_gen_andc_i32(tmp, tmp, tmp2);
6973 if (logic_cc) {
6974 gen_logic_CC(tmp);
6975 }
6976 store_reg_bx(env, s, rd, tmp);
6977 break;
6978 default:
6979 case 0x0f:
6980 tcg_gen_not_i32(tmp2, tmp2);
6981 if (logic_cc) {
6982 gen_logic_CC(tmp2);
6983 }
6984 store_reg_bx(env, s, rd, tmp2);
6985 break;
6986 }
6987 if (op1 != 0x0f && op1 != 0x0d) {
6988 tcg_temp_free_i32(tmp2);
6989 }
6990 } else {
6991 /* other instructions */
6992 op1 = (insn >> 24) & 0xf;
6993 switch(op1) {
6994 case 0x0:
6995 case 0x1:
6996 /* multiplies, extra load/stores */
6997 sh = (insn >> 5) & 3;
6998 if (sh == 0) {
6999 if (op1 == 0x0) {
7000 rd = (insn >> 16) & 0xf;
7001 rn = (insn >> 12) & 0xf;
7002 rs = (insn >> 8) & 0xf;
7003 rm = (insn) & 0xf;
7004 op1 = (insn >> 20) & 0xf;
7005 switch (op1) {
7006 case 0: case 1: case 2: case 3: case 6:
7007 /* 32 bit mul */
7008 tmp = load_reg(s, rs);
7009 tmp2 = load_reg(s, rm);
7010 tcg_gen_mul_i32(tmp, tmp, tmp2);
7011 tcg_temp_free_i32(tmp2);
7012 if (insn & (1 << 22)) {
7013 /* Subtract (mls) */
7014 ARCH(6T2);
7015 tmp2 = load_reg(s, rn);
7016 tcg_gen_sub_i32(tmp, tmp2, tmp);
7017 tcg_temp_free_i32(tmp2);
7018 } else if (insn & (1 << 21)) {
7019 /* Add */
7020 tmp2 = load_reg(s, rn);
7021 tcg_gen_add_i32(tmp, tmp, tmp2);
7022 tcg_temp_free_i32(tmp2);
7023 }
7024 if (insn & (1 << 20))
7025 gen_logic_CC(tmp);
7026 store_reg(s, rd, tmp);
7027 break;
7028 case 4:
7029 /* 64 bit mul double accumulate (UMAAL) */
7030 ARCH(6);
7031 tmp = load_reg(s, rs);
7032 tmp2 = load_reg(s, rm);
7033 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7034 gen_addq_lo(s, tmp64, rn);
7035 gen_addq_lo(s, tmp64, rd);
7036 gen_storeq_reg(s, rn, rd, tmp64);
7037 tcg_temp_free_i64(tmp64);
7038 break;
7039 case 8: case 9: case 10: case 11:
7040 case 12: case 13: case 14: case 15:
7041 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7042 tmp = load_reg(s, rs);
7043 tmp2 = load_reg(s, rm);
7044 if (insn & (1 << 22)) {
7045 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7046 } else {
7047 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7048 }
7049 if (insn & (1 << 21)) { /* mult accumulate */
7050 gen_addq(s, tmp64, rn, rd);
7051 }
7052 if (insn & (1 << 20)) {
7053 gen_logicq_cc(tmp64);
7054 }
7055 gen_storeq_reg(s, rn, rd, tmp64);
7056 tcg_temp_free_i64(tmp64);
7057 break;
7058 default:
7059 goto illegal_op;
7060 }
7061 } else {
7062 rn = (insn >> 16) & 0xf;
7063 rd = (insn >> 12) & 0xf;
7064 if (insn & (1 << 23)) {
7065 /* load/store exclusive */
7066 op1 = (insn >> 21) & 0x3;
7067 if (op1)
7068 ARCH(6K);
7069 else
7070 ARCH(6);
7071 addr = tcg_temp_local_new_i32();
7072 load_reg_var(s, addr, rn);
7073 if (insn & (1 << 20)) {
7074 switch (op1) {
7075 case 0: /* ldrex */
7076 gen_load_exclusive(s, rd, 15, addr, 2);
7077 break;
7078 case 1: /* ldrexd */
7079 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7080 break;
7081 case 2: /* ldrexb */
7082 gen_load_exclusive(s, rd, 15, addr, 0);
7083 break;
7084 case 3: /* ldrexh */
7085 gen_load_exclusive(s, rd, 15, addr, 1);
7086 break;
7087 default:
7088 abort();
7089 }
7090 } else {
7091 rm = insn & 0xf;
7092 switch (op1) {
7093 case 0: /* strex */
7094 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7095 break;
7096 case 1: /* strexd */
7097 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7098 break;
7099 case 2: /* strexb */
7100 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7101 break;
7102 case 3: /* strexh */
7103 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7104 break;
7105 default:
7106 abort();
7107 }
7108 }
7109 tcg_temp_free(addr);
7110 } else {
7111 /* SWP instruction */
7112 rm = (insn) & 0xf;
7113
7114 /* ??? This is not really atomic. However we know
7115 we never have multiple CPUs running in parallel,
7116 so it is good enough. */
7117 addr = load_reg(s, rn);
7118 tmp = load_reg(s, rm);
7119 if (insn & (1 << 22)) {
7120 tmp2 = gen_ld8u(addr, IS_USER(s));
7121 gen_st8(tmp, addr, IS_USER(s));
7122 } else {
7123 tmp2 = gen_ld32(addr, IS_USER(s));
7124 gen_st32(tmp, addr, IS_USER(s));
7125 }
7126 tcg_temp_free_i32(addr);
7127 store_reg(s, rd, tmp2);
7128 }
7129 }
7130 } else {
7131 int address_offset;
7132 int load;
7133 /* Misc load/store */
7134 rn = (insn >> 16) & 0xf;
7135 rd = (insn >> 12) & 0xf;
7136 addr = load_reg(s, rn);
7137 if (insn & (1 << 24))
7138 gen_add_datah_offset(s, insn, 0, addr);
7139 address_offset = 0;
7140 if (insn & (1 << 20)) {
7141 /* load */
7142 switch(sh) {
7143 case 1:
7144 tmp = gen_ld16u(addr, IS_USER(s));
7145 break;
7146 case 2:
7147 tmp = gen_ld8s(addr, IS_USER(s));
7148 break;
7149 default:
7150 case 3:
7151 tmp = gen_ld16s(addr, IS_USER(s));
7152 break;
7153 }
7154 load = 1;
7155 } else if (sh & 2) {
7156 ARCH(5TE);
7157 /* doubleword */
7158 if (sh & 1) {
7159 /* store */
7160 tmp = load_reg(s, rd);
7161 gen_st32(tmp, addr, IS_USER(s));
7162 tcg_gen_addi_i32(addr, addr, 4);
7163 tmp = load_reg(s, rd + 1);
7164 gen_st32(tmp, addr, IS_USER(s));
7165 load = 0;
7166 } else {
7167 /* load */
7168 tmp = gen_ld32(addr, IS_USER(s));
7169 store_reg(s, rd, tmp);
7170 tcg_gen_addi_i32(addr, addr, 4);
7171 tmp = gen_ld32(addr, IS_USER(s));
7172 rd++;
7173 load = 1;
7174 }
7175 address_offset = -4;
7176 } else {
7177 /* store */
7178 tmp = load_reg(s, rd);
7179 gen_st16(tmp, addr, IS_USER(s));
7180 load = 0;
7181 }
7182 /* Perform base writeback before the loaded value to
7183 ensure correct behavior with overlapping index registers.
7184 ldrd with base writeback is is undefined if the
7185 destination and index registers overlap. */
7186 if (!(insn & (1 << 24))) {
7187 gen_add_datah_offset(s, insn, address_offset, addr);
7188 store_reg(s, rn, addr);
7189 } else if (insn & (1 << 21)) {
7190 if (address_offset)
7191 tcg_gen_addi_i32(addr, addr, address_offset);
7192 store_reg(s, rn, addr);
7193 } else {
7194 tcg_temp_free_i32(addr);
7195 }
7196 if (load) {
7197 /* Complete the load. */
7198 store_reg(s, rd, tmp);
7199 }
7200 }
7201 break;
7202 case 0x4:
7203 case 0x5:
7204 goto do_ldst;
7205 case 0x6:
7206 case 0x7:
7207 if (insn & (1 << 4)) {
7208 ARCH(6);
7209 /* Armv6 Media instructions. */
7210 rm = insn & 0xf;
7211 rn = (insn >> 16) & 0xf;
7212 rd = (insn >> 12) & 0xf;
7213 rs = (insn >> 8) & 0xf;
7214 switch ((insn >> 23) & 3) {
7215 case 0: /* Parallel add/subtract. */
7216 op1 = (insn >> 20) & 7;
7217 tmp = load_reg(s, rn);
7218 tmp2 = load_reg(s, rm);
7219 sh = (insn >> 5) & 7;
7220 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7221 goto illegal_op;
7222 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7223 tcg_temp_free_i32(tmp2);
7224 store_reg(s, rd, tmp);
7225 break;
7226 case 1:
7227 if ((insn & 0x00700020) == 0) {
7228 /* Halfword pack. */
7229 tmp = load_reg(s, rn);
7230 tmp2 = load_reg(s, rm);
7231 shift = (insn >> 7) & 0x1f;
7232 if (insn & (1 << 6)) {
7233 /* pkhtb */
7234 if (shift == 0)
7235 shift = 31;
7236 tcg_gen_sari_i32(tmp2, tmp2, shift);
7237 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7238 tcg_gen_ext16u_i32(tmp2, tmp2);
7239 } else {
7240 /* pkhbt */
7241 if (shift)
7242 tcg_gen_shli_i32(tmp2, tmp2, shift);
7243 tcg_gen_ext16u_i32(tmp, tmp);
7244 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7245 }
7246 tcg_gen_or_i32(tmp, tmp, tmp2);
7247 tcg_temp_free_i32(tmp2);
7248 store_reg(s, rd, tmp);
7249 } else if ((insn & 0x00200020) == 0x00200000) {
7250 /* [us]sat */
7251 tmp = load_reg(s, rm);
7252 shift = (insn >> 7) & 0x1f;
7253 if (insn & (1 << 6)) {
7254 if (shift == 0)
7255 shift = 31;
7256 tcg_gen_sari_i32(tmp, tmp, shift);
7257 } else {
7258 tcg_gen_shli_i32(tmp, tmp, shift);
7259 }
7260 sh = (insn >> 16) & 0x1f;
7261 tmp2 = tcg_const_i32(sh);
7262 if (insn & (1 << 22))
7263 gen_helper_usat(tmp, tmp, tmp2);
7264 else
7265 gen_helper_ssat(tmp, tmp, tmp2);
7266 tcg_temp_free_i32(tmp2);
7267 store_reg(s, rd, tmp);
7268 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7269 /* [us]sat16 */
7270 tmp = load_reg(s, rm);
7271 sh = (insn >> 16) & 0x1f;
7272 tmp2 = tcg_const_i32(sh);
7273 if (insn & (1 << 22))
7274 gen_helper_usat16(tmp, tmp, tmp2);
7275 else
7276 gen_helper_ssat16(tmp, tmp, tmp2);
7277 tcg_temp_free_i32(tmp2);
7278 store_reg(s, rd, tmp);
7279 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7280 /* Select bytes. */
7281 tmp = load_reg(s, rn);
7282 tmp2 = load_reg(s, rm);
7283 tmp3 = tcg_temp_new_i32();
7284 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7285 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7286 tcg_temp_free_i32(tmp3);
7287 tcg_temp_free_i32(tmp2);
7288 store_reg(s, rd, tmp);
7289 } else if ((insn & 0x000003e0) == 0x00000060) {
7290 tmp = load_reg(s, rm);
7291 shift = (insn >> 10) & 3;
7292 /* ??? In many cases it's not neccessary to do a
7293 rotate, a shift is sufficient. */
7294 if (shift != 0)
7295 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7296 op1 = (insn >> 20) & 7;
7297 switch (op1) {
7298 case 0: gen_sxtb16(tmp); break;
7299 case 2: gen_sxtb(tmp); break;
7300 case 3: gen_sxth(tmp); break;
7301 case 4: gen_uxtb16(tmp); break;
7302 case 6: gen_uxtb(tmp); break;
7303 case 7: gen_uxth(tmp); break;
7304 default: goto illegal_op;
7305 }
7306 if (rn != 15) {
7307 tmp2 = load_reg(s, rn);
7308 if ((op1 & 3) == 0) {
7309 gen_add16(tmp, tmp2);
7310 } else {
7311 tcg_gen_add_i32(tmp, tmp, tmp2);
7312 tcg_temp_free_i32(tmp2);
7313 }
7314 }
7315 store_reg(s, rd, tmp);
7316 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7317 /* rev */
7318 tmp = load_reg(s, rm);
7319 if (insn & (1 << 22)) {
7320 if (insn & (1 << 7)) {
7321 gen_revsh(tmp);
7322 } else {
7323 ARCH(6T2);
7324 gen_helper_rbit(tmp, tmp);
7325 }
7326 } else {
7327 if (insn & (1 << 7))
7328 gen_rev16(tmp);
7329 else
7330 tcg_gen_bswap32_i32(tmp, tmp);
7331 }
7332 store_reg(s, rd, tmp);
7333 } else {
7334 goto illegal_op;
7335 }
7336 break;
7337 case 2: /* Multiplies (Type 3). */
7338 tmp = load_reg(s, rm);
7339 tmp2 = load_reg(s, rs);
7340 if (insn & (1 << 20)) {
7341 /* Signed multiply most significant [accumulate].
7342 (SMMUL, SMMLA, SMMLS) */
7343 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7344
7345 if (rd != 15) {
7346 tmp = load_reg(s, rd);
7347 if (insn & (1 << 6)) {
7348 tmp64 = gen_subq_msw(tmp64, tmp);
7349 } else {
7350 tmp64 = gen_addq_msw(tmp64, tmp);
7351 }
7352 }
7353 if (insn & (1 << 5)) {
7354 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7355 }
7356 tcg_gen_shri_i64(tmp64, tmp64, 32);
7357 tmp = tcg_temp_new_i32();
7358 tcg_gen_trunc_i64_i32(tmp, tmp64);
7359 tcg_temp_free_i64(tmp64);
7360 store_reg(s, rn, tmp);
7361 } else {
7362 if (insn & (1 << 5))
7363 gen_swap_half(tmp2);
7364 gen_smul_dual(tmp, tmp2);
7365 if (insn & (1 << 6)) {
7366 /* This subtraction cannot overflow. */
7367 tcg_gen_sub_i32(tmp, tmp, tmp2);
7368 } else {
7369 /* This addition cannot overflow 32 bits;
7370 * however it may overflow considered as a signed
7371 * operation, in which case we must set the Q flag.
7372 */
7373 gen_helper_add_setq(tmp, tmp, tmp2);
7374 }
7375 tcg_temp_free_i32(tmp2);
7376 if (insn & (1 << 22)) {
7377 /* smlald, smlsld */
7378 tmp64 = tcg_temp_new_i64();
7379 tcg_gen_ext_i32_i64(tmp64, tmp);
7380 tcg_temp_free_i32(tmp);
7381 gen_addq(s, tmp64, rd, rn);
7382 gen_storeq_reg(s, rd, rn, tmp64);
7383 tcg_temp_free_i64(tmp64);
7384 } else {
7385 /* smuad, smusd, smlad, smlsd */
7386 if (rd != 15)
7387 {
7388 tmp2 = load_reg(s, rd);
7389 gen_helper_add_setq(tmp, tmp, tmp2);
7390 tcg_temp_free_i32(tmp2);
7391 }
7392 store_reg(s, rn, tmp);
7393 }
7394 }
7395 break;
7396 case 3:
7397 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7398 switch (op1) {
7399 case 0: /* Unsigned sum of absolute differences. */
7400 ARCH(6);
7401 tmp = load_reg(s, rm);
7402 tmp2 = load_reg(s, rs);
7403 gen_helper_usad8(tmp, tmp, tmp2);
7404 tcg_temp_free_i32(tmp2);
7405 if (rd != 15) {
7406 tmp2 = load_reg(s, rd);
7407 tcg_gen_add_i32(tmp, tmp, tmp2);
7408 tcg_temp_free_i32(tmp2);
7409 }
7410 store_reg(s, rn, tmp);
7411 break;
7412 case 0x20: case 0x24: case 0x28: case 0x2c:
7413 /* Bitfield insert/clear. */
7414 ARCH(6T2);
7415 shift = (insn >> 7) & 0x1f;
7416 i = (insn >> 16) & 0x1f;
7417 i = i + 1 - shift;
7418 if (rm == 15) {
7419 tmp = tcg_temp_new_i32();
7420 tcg_gen_movi_i32(tmp, 0);
7421 } else {
7422 tmp = load_reg(s, rm);
7423 }
7424 if (i != 32) {
7425 tmp2 = load_reg(s, rd);
7426 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7427 tcg_temp_free_i32(tmp2);
7428 }
7429 store_reg(s, rd, tmp);
7430 break;
7431 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7432 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7433 ARCH(6T2);
7434 tmp = load_reg(s, rm);
7435 shift = (insn >> 7) & 0x1f;
7436 i = ((insn >> 16) & 0x1f) + 1;
7437 if (shift + i > 32)
7438 goto illegal_op;
7439 if (i < 32) {
7440 if (op1 & 0x20) {
7441 gen_ubfx(tmp, shift, (1u << i) - 1);
7442 } else {
7443 gen_sbfx(tmp, shift, i);
7444 }
7445 }
7446 store_reg(s, rd, tmp);
7447 break;
7448 default:
7449 goto illegal_op;
7450 }
7451 break;
7452 }
7453 break;
7454 }
7455 do_ldst:
7456 /* Check for undefined extension instructions
7457 * per the ARM Bible IE:
7458 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7459 */
7460 sh = (0xf << 20) | (0xf << 4);
7461 if (op1 == 0x7 && ((insn & sh) == sh))
7462 {
7463 goto illegal_op;
7464 }
7465 /* load/store byte/word */
7466 rn = (insn >> 16) & 0xf;
7467 rd = (insn >> 12) & 0xf;
7468 tmp2 = load_reg(s, rn);
7469 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7470 if (insn & (1 << 24))
7471 gen_add_data_offset(s, insn, tmp2);
7472 if (insn & (1 << 20)) {
7473 /* load */
7474 if (insn & (1 << 22)) {
7475 tmp = gen_ld8u(tmp2, i);
7476 } else {
7477 tmp = gen_ld32(tmp2, i);
7478 }
7479 } else {
7480 /* store */
7481 tmp = load_reg(s, rd);
7482 if (insn & (1 << 22))
7483 gen_st8(tmp, tmp2, i);
7484 else
7485 gen_st32(tmp, tmp2, i);
7486 }
7487 if (!(insn & (1 << 24))) {
7488 gen_add_data_offset(s, insn, tmp2);
7489 store_reg(s, rn, tmp2);
7490 } else if (insn & (1 << 21)) {
7491 store_reg(s, rn, tmp2);
7492 } else {
7493 tcg_temp_free_i32(tmp2);
7494 }
7495 if (insn & (1 << 20)) {
7496 /* Complete the load. */
7497 store_reg_from_load(env, s, rd, tmp);
7498 }
7499 break;
7500 case 0x08:
7501 case 0x09:
7502 {
7503 int j, n, user, loaded_base;
7504 TCGv loaded_var;
7505 /* load/store multiple words */
7506 /* XXX: store correct base if write back */
7507 user = 0;
7508 if (insn & (1 << 22)) {
7509 if (IS_USER(s))
7510 goto illegal_op; /* only usable in supervisor mode */
7511
7512 if ((insn & (1 << 15)) == 0)
7513 user = 1;
7514 }
7515 rn = (insn >> 16) & 0xf;
7516 addr = load_reg(s, rn);
7517
7518 /* compute total size */
7519 loaded_base = 0;
7520 TCGV_UNUSED(loaded_var);
7521 n = 0;
7522 for(i=0;i<16;i++) {
7523 if (insn & (1 << i))
7524 n++;
7525 }
7526 /* XXX: test invalid n == 0 case ? */
7527 if (insn & (1 << 23)) {
7528 if (insn & (1 << 24)) {
7529 /* pre increment */
7530 tcg_gen_addi_i32(addr, addr, 4);
7531 } else {
7532 /* post increment */
7533 }
7534 } else {
7535 if (insn & (1 << 24)) {
7536 /* pre decrement */
7537 tcg_gen_addi_i32(addr, addr, -(n * 4));
7538 } else {
7539 /* post decrement */
7540 if (n != 1)
7541 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7542 }
7543 }
7544 j = 0;
7545 for(i=0;i<16;i++) {
7546 if (insn & (1 << i)) {
7547 if (insn & (1 << 20)) {
7548 /* load */
7549 tmp = gen_ld32(addr, IS_USER(s));
7550 if (user) {
7551 tmp2 = tcg_const_i32(i);
7552 gen_helper_set_user_reg(tmp2, tmp);
7553 tcg_temp_free_i32(tmp2);
7554 tcg_temp_free_i32(tmp);
7555 } else if (i == rn) {
7556 loaded_var = tmp;
7557 loaded_base = 1;
7558 } else {
7559 store_reg_from_load(env, s, i, tmp);
7560 }
7561 } else {
7562 /* store */
7563 if (i == 15) {
7564 /* special case: r15 = PC + 8 */
7565 val = (long)s->pc + 4;
7566 tmp = tcg_temp_new_i32();
7567 tcg_gen_movi_i32(tmp, val);
7568 } else if (user) {
7569 tmp = tcg_temp_new_i32();
7570 tmp2 = tcg_const_i32(i);
7571 gen_helper_get_user_reg(tmp, tmp2);
7572 tcg_temp_free_i32(tmp2);
7573 } else {
7574 tmp = load_reg(s, i);
7575 }
7576 gen_st32(tmp, addr, IS_USER(s));
7577 }
7578 j++;
7579 /* no need to add after the last transfer */
7580 if (j != n)
7581 tcg_gen_addi_i32(addr, addr, 4);
7582 }
7583 }
7584 if (insn & (1 << 21)) {
7585 /* write back */
7586 if (insn & (1 << 23)) {
7587 if (insn & (1 << 24)) {
7588 /* pre increment */
7589 } else {
7590 /* post increment */
7591 tcg_gen_addi_i32(addr, addr, 4);
7592 }
7593 } else {
7594 if (insn & (1 << 24)) {
7595 /* pre decrement */
7596 if (n != 1)
7597 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7598 } else {
7599 /* post decrement */
7600 tcg_gen_addi_i32(addr, addr, -(n * 4));
7601 }
7602 }
7603 store_reg(s, rn, addr);
7604 } else {
7605 tcg_temp_free_i32(addr);
7606 }
7607 if (loaded_base) {
7608 store_reg(s, rn, loaded_var);
7609 }
7610 if ((insn & (1 << 22)) && !user) {
7611 /* Restore CPSR from SPSR. */
7612 tmp = load_cpu_field(spsr);
7613 gen_set_cpsr(tmp, 0xffffffff);
7614 tcg_temp_free_i32(tmp);
7615 s->is_jmp = DISAS_UPDATE;
7616 }
7617 }
7618 break;
7619 case 0xa:
7620 case 0xb:
7621 {
7622 int32_t offset;
7623
7624 /* branch (and link) */
7625 val = (int32_t)s->pc;
7626 if (insn & (1 << 24)) {
7627 tmp = tcg_temp_new_i32();
7628 tcg_gen_movi_i32(tmp, val);
7629 store_reg(s, 14, tmp);
7630 }
7631 offset = (((int32_t)insn << 8) >> 8);
7632 val += (offset << 2) + 4;
7633 gen_jmp(s, val);
7634 }
7635 break;
7636 case 0xc:
7637 case 0xd:
7638 case 0xe:
7639 /* Coprocessor. */
7640 if (disas_coproc_insn(env, s, insn))
7641 goto illegal_op;
7642 break;
7643 case 0xf:
7644 /* swi */
7645 gen_set_pc_im(s->pc);
7646 s->is_jmp = DISAS_SWI;
7647 break;
7648 default:
7649 illegal_op:
7650 gen_exception_insn(s, 4, EXCP_UDEF);
7651 break;
7652 }
7653 }
7654 }
7655
7656 /* Return true if this is a Thumb-2 logical op. */
7657 static int
7658 thumb2_logic_op(int op)
7659 {
7660 return (op < 8);
7661 }
7662
7663 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7664 then set condition code flags based on the result of the operation.
7665 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7666 to the high bit of T1.
7667 Returns zero if the opcode is valid. */
7668
7669 static int
7670 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7671 {
7672 int logic_cc;
7673
7674 logic_cc = 0;
7675 switch (op) {
7676 case 0: /* and */
7677 tcg_gen_and_i32(t0, t0, t1);
7678 logic_cc = conds;
7679 break;
7680 case 1: /* bic */
7681 tcg_gen_andc_i32(t0, t0, t1);
7682 logic_cc = conds;
7683 break;
7684 case 2: /* orr */
7685 tcg_gen_or_i32(t0, t0, t1);
7686 logic_cc = conds;
7687 break;
7688 case 3: /* orn */
7689 tcg_gen_orc_i32(t0, t0, t1);
7690 logic_cc = conds;
7691 break;
7692 case 4: /* eor */
7693 tcg_gen_xor_i32(t0, t0, t1);
7694 logic_cc = conds;
7695 break;
7696 case 8: /* add */
7697 if (conds)
7698 gen_helper_add_cc(t0, t0, t1);
7699 else
7700 tcg_gen_add_i32(t0, t0, t1);
7701 break;
7702 case 10: /* adc */
7703 if (conds)
7704 gen_helper_adc_cc(t0, t0, t1);
7705 else
7706 gen_adc(t0, t1);
7707 break;
7708 case 11: /* sbc */
7709 if (conds)
7710 gen_helper_sbc_cc(t0, t0, t1);
7711 else
7712 gen_sub_carry(t0, t0, t1);
7713 break;
7714 case 13: /* sub */
7715 if (conds)
7716 gen_helper_sub_cc(t0, t0, t1);
7717 else
7718 tcg_gen_sub_i32(t0, t0, t1);
7719 break;
7720 case 14: /* rsb */
7721 if (conds)
7722 gen_helper_sub_cc(t0, t1, t0);
7723 else
7724 tcg_gen_sub_i32(t0, t1, t0);
7725 break;
7726 default: /* 5, 6, 7, 9, 12, 15. */
7727 return 1;
7728 }
7729 if (logic_cc) {
7730 gen_logic_CC(t0);
7731 if (shifter_out)
7732 gen_set_CF_bit31(t1);
7733 }
7734 return 0;
7735 }
7736
7737 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7738 is not legal. */
7739 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7740 {
7741 uint32_t insn, imm, shift, offset;
7742 uint32_t rd, rn, rm, rs;
7743 TCGv tmp;
7744 TCGv tmp2;
7745 TCGv tmp3;
7746 TCGv addr;
7747 TCGv_i64 tmp64;
7748 int op;
7749 int shiftop;
7750 int conds;
7751 int logic_cc;
7752
7753 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7754 || arm_feature (env, ARM_FEATURE_M))) {
7755 /* Thumb-1 cores may need to treat bl and blx as a pair of
7756 16-bit instructions to get correct prefetch abort behavior. */
7757 insn = insn_hw1;
7758 if ((insn & (1 << 12)) == 0) {
7759 ARCH(5);
7760 /* Second half of blx. */
7761 offset = ((insn & 0x7ff) << 1);
7762 tmp = load_reg(s, 14);
7763 tcg_gen_addi_i32(tmp, tmp, offset);
7764 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7765
7766 tmp2 = tcg_temp_new_i32();
7767 tcg_gen_movi_i32(tmp2, s->pc | 1);
7768 store_reg(s, 14, tmp2);
7769 gen_bx(s, tmp);
7770 return 0;
7771 }
7772 if (insn & (1 << 11)) {
7773 /* Second half of bl. */
7774 offset = ((insn & 0x7ff) << 1) | 1;
7775 tmp = load_reg(s, 14);
7776 tcg_gen_addi_i32(tmp, tmp, offset);
7777
7778 tmp2 = tcg_temp_new_i32();
7779 tcg_gen_movi_i32(tmp2, s->pc | 1);
7780 store_reg(s, 14, tmp2);
7781 gen_bx(s, tmp);
7782 return 0;
7783 }
7784 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7785 /* Instruction spans a page boundary. Implement it as two
7786 16-bit instructions in case the second half causes an
7787 prefetch abort. */
7788 offset = ((int32_t)insn << 21) >> 9;
7789 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7790 return 0;
7791 }
7792 /* Fall through to 32-bit decode. */
7793 }
7794
7795 insn = lduw_code(s->pc);
7796 s->pc += 2;
7797 insn |= (uint32_t)insn_hw1 << 16;
7798
7799 if ((insn & 0xf800e800) != 0xf000e800) {
7800 ARCH(6T2);
7801 }
7802
7803 rn = (insn >> 16) & 0xf;
7804 rs = (insn >> 12) & 0xf;
7805 rd = (insn >> 8) & 0xf;
7806 rm = insn & 0xf;
7807 switch ((insn >> 25) & 0xf) {
7808 case 0: case 1: case 2: case 3:
7809 /* 16-bit instructions. Should never happen. */
7810 abort();
7811 case 4:
7812 if (insn & (1 << 22)) {
7813 /* Other load/store, table branch. */
7814 if (insn & 0x01200000) {
7815 /* Load/store doubleword. */
7816 if (rn == 15) {
7817 addr = tcg_temp_new_i32();
7818 tcg_gen_movi_i32(addr, s->pc & ~3);
7819 } else {
7820 addr = load_reg(s, rn);
7821 }
7822 offset = (insn & 0xff) * 4;
7823 if ((insn & (1 << 23)) == 0)
7824 offset = -offset;
7825 if (insn & (1 << 24)) {
7826 tcg_gen_addi_i32(addr, addr, offset);
7827 offset = 0;
7828 }
7829 if (insn & (1 << 20)) {
7830 /* ldrd */
7831 tmp = gen_ld32(addr, IS_USER(s));
7832 store_reg(s, rs, tmp);
7833 tcg_gen_addi_i32(addr, addr, 4);
7834 tmp = gen_ld32(addr, IS_USER(s));
7835 store_reg(s, rd, tmp);
7836 } else {
7837 /* strd */
7838 tmp = load_reg(s, rs);
7839 gen_st32(tmp, addr, IS_USER(s));
7840 tcg_gen_addi_i32(addr, addr, 4);
7841 tmp = load_reg(s, rd);
7842 gen_st32(tmp, addr, IS_USER(s));
7843 }
7844 if (insn & (1 << 21)) {
7845 /* Base writeback. */
7846 if (rn == 15)
7847 goto illegal_op;
7848 tcg_gen_addi_i32(addr, addr, offset - 4);
7849 store_reg(s, rn, addr);
7850 } else {
7851 tcg_temp_free_i32(addr);
7852 }
7853 } else if ((insn & (1 << 23)) == 0) {
7854 /* Load/store exclusive word. */
7855 addr = tcg_temp_local_new();
7856 load_reg_var(s, addr, rn);
7857 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7858 if (insn & (1 << 20)) {
7859 gen_load_exclusive(s, rs, 15, addr, 2);
7860 } else {
7861 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7862 }
7863 tcg_temp_free(addr);
7864 } else if ((insn & (1 << 6)) == 0) {
7865 /* Table Branch. */
7866 if (rn == 15) {
7867 addr = tcg_temp_new_i32();
7868 tcg_gen_movi_i32(addr, s->pc);
7869 } else {
7870 addr = load_reg(s, rn);
7871 }
7872 tmp = load_reg(s, rm);
7873 tcg_gen_add_i32(addr, addr, tmp);
7874 if (insn & (1 << 4)) {
7875 /* tbh */
7876 tcg_gen_add_i32(addr, addr, tmp);
7877 tcg_temp_free_i32(tmp);
7878 tmp = gen_ld16u(addr, IS_USER(s));
7879 } else { /* tbb */
7880 tcg_temp_free_i32(tmp);
7881 tmp = gen_ld8u(addr, IS_USER(s));
7882 }
7883 tcg_temp_free_i32(addr);
7884 tcg_gen_shli_i32(tmp, tmp, 1);
7885 tcg_gen_addi_i32(tmp, tmp, s->pc);
7886 store_reg(s, 15, tmp);
7887 } else {
7888 /* Load/store exclusive byte/halfword/doubleword. */
7889 ARCH(7);
7890 op = (insn >> 4) & 0x3;
7891 if (op == 2) {
7892 goto illegal_op;
7893 }
7894 addr = tcg_temp_local_new();
7895 load_reg_var(s, addr, rn);
7896 if (insn & (1 << 20)) {
7897 gen_load_exclusive(s, rs, rd, addr, op);
7898 } else {
7899 gen_store_exclusive(s, rm, rs, rd, addr, op);
7900 }
7901 tcg_temp_free(addr);
7902 }
7903 } else {
7904 /* Load/store multiple, RFE, SRS. */
7905 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7906 /* Not available in user mode. */
7907 if (IS_USER(s))
7908 goto illegal_op;
7909 if (insn & (1 << 20)) {
7910 /* rfe */
7911 addr = load_reg(s, rn);
7912 if ((insn & (1 << 24)) == 0)
7913 tcg_gen_addi_i32(addr, addr, -8);
7914 /* Load PC into tmp and CPSR into tmp2. */
7915 tmp = gen_ld32(addr, 0);
7916 tcg_gen_addi_i32(addr, addr, 4);
7917 tmp2 = gen_ld32(addr, 0);
7918 if (insn & (1 << 21)) {
7919 /* Base writeback. */
7920 if (insn & (1 << 24)) {
7921 tcg_gen_addi_i32(addr, addr, 4);
7922 } else {
7923 tcg_gen_addi_i32(addr, addr, -4);
7924 }
7925 store_reg(s, rn, addr);
7926 } else {
7927 tcg_temp_free_i32(addr);
7928 }
7929 gen_rfe(s, tmp, tmp2);
7930 } else {
7931 /* srs */
7932 op = (insn & 0x1f);
7933 addr = tcg_temp_new_i32();
7934 tmp = tcg_const_i32(op);
7935 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7936 tcg_temp_free_i32(tmp);
7937 if ((insn & (1 << 24)) == 0) {
7938 tcg_gen_addi_i32(addr, addr, -8);
7939 }
7940 tmp = load_reg(s, 14);
7941 gen_st32(tmp, addr, 0);
7942 tcg_gen_addi_i32(addr, addr, 4);
7943 tmp = tcg_temp_new_i32();
7944 gen_helper_cpsr_read(tmp);
7945 gen_st32(tmp, addr, 0);
7946 if (insn & (1 << 21)) {
7947 if ((insn & (1 << 24)) == 0) {
7948 tcg_gen_addi_i32(addr, addr, -4);
7949 } else {
7950 tcg_gen_addi_i32(addr, addr, 4);
7951 }
7952 tmp = tcg_const_i32(op);
7953 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7954 tcg_temp_free_i32(tmp);
7955 } else {
7956 tcg_temp_free_i32(addr);
7957 }
7958 }
7959 } else {
7960 int i;
7961 /* Load/store multiple. */
7962 addr = load_reg(s, rn);
7963 offset = 0;
7964 for (i = 0; i < 16; i++) {
7965 if (insn & (1 << i))
7966 offset += 4;
7967 }
7968 if (insn & (1 << 24)) {
7969 tcg_gen_addi_i32(addr, addr, -offset);
7970 }
7971
7972 for (i = 0; i < 16; i++) {
7973 if ((insn & (1 << i)) == 0)
7974 continue;
7975 if (insn & (1 << 20)) {
7976 /* Load. */
7977 tmp = gen_ld32(addr, IS_USER(s));
7978 if (i == 15) {
7979 gen_bx(s, tmp);
7980 } else {
7981 store_reg(s, i, tmp);
7982 }
7983 } else {
7984 /* Store. */
7985 tmp = load_reg(s, i);
7986 gen_st32(tmp, addr, IS_USER(s));
7987 }
7988 tcg_gen_addi_i32(addr, addr, 4);
7989 }
7990 if (insn & (1 << 21)) {
7991 /* Base register writeback. */
7992 if (insn & (1 << 24)) {
7993 tcg_gen_addi_i32(addr, addr, -offset);
7994 }
7995 /* Fault if writeback register is in register list. */
7996 if (insn & (1 << rn))
7997 goto illegal_op;
7998 store_reg(s, rn, addr);
7999 } else {
8000 tcg_temp_free_i32(addr);
8001 }
8002 }
8003 }
8004 break;
8005 case 5:
8006
8007 op = (insn >> 21) & 0xf;
8008 if (op == 6) {
8009 /* Halfword pack. */
8010 tmp = load_reg(s, rn);
8011 tmp2 = load_reg(s, rm);
8012 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8013 if (insn & (1 << 5)) {
8014 /* pkhtb */
8015 if (shift == 0)
8016 shift = 31;
8017 tcg_gen_sari_i32(tmp2, tmp2, shift);
8018 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8019 tcg_gen_ext16u_i32(tmp2, tmp2);
8020 } else {
8021 /* pkhbt */
8022 if (shift)
8023 tcg_gen_shli_i32(tmp2, tmp2, shift);
8024 tcg_gen_ext16u_i32(tmp, tmp);
8025 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8026 }
8027 tcg_gen_or_i32(tmp, tmp, tmp2);
8028 tcg_temp_free_i32(tmp2);
8029 store_reg(s, rd, tmp);
8030 } else {
8031 /* Data processing register constant shift. */
8032 if (rn == 15) {
8033 tmp = tcg_temp_new_i32();
8034 tcg_gen_movi_i32(tmp, 0);
8035 } else {
8036 tmp = load_reg(s, rn);
8037 }
8038 tmp2 = load_reg(s, rm);
8039
8040 shiftop = (insn >> 4) & 3;
8041 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8042 conds = (insn & (1 << 20)) != 0;
8043 logic_cc = (conds && thumb2_logic_op(op));
8044 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8045 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8046 goto illegal_op;
8047 tcg_temp_free_i32(tmp2);
8048 if (rd != 15) {
8049 store_reg(s, rd, tmp);
8050 } else {
8051 tcg_temp_free_i32(tmp);
8052 }
8053 }
8054 break;
8055 case 13: /* Misc data processing. */
8056 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8057 if (op < 4 && (insn & 0xf000) != 0xf000)
8058 goto illegal_op;
8059 switch (op) {
8060 case 0: /* Register controlled shift. */
8061 tmp = load_reg(s, rn);
8062 tmp2 = load_reg(s, rm);
8063 if ((insn & 0x70) != 0)
8064 goto illegal_op;
8065 op = (insn >> 21) & 3;
8066 logic_cc = (insn & (1 << 20)) != 0;
8067 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8068 if (logic_cc)
8069 gen_logic_CC(tmp);
8070 store_reg_bx(env, s, rd, tmp);
8071 break;
8072 case 1: /* Sign/zero extend. */
8073 tmp = load_reg(s, rm);
8074 shift = (insn >> 4) & 3;
8075 /* ??? In many cases it's not neccessary to do a
8076 rotate, a shift is sufficient. */
8077 if (shift != 0)
8078 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8079 op = (insn >> 20) & 7;
8080 switch (op) {
8081 case 0: gen_sxth(tmp); break;
8082 case 1: gen_uxth(tmp); break;
8083 case 2: gen_sxtb16(tmp); break;
8084 case 3: gen_uxtb16(tmp); break;
8085 case 4: gen_sxtb(tmp); break;
8086 case 5: gen_uxtb(tmp); break;
8087 default: goto illegal_op;
8088 }
8089 if (rn != 15) {
8090 tmp2 = load_reg(s, rn);
8091 if ((op >> 1) == 1) {
8092 gen_add16(tmp, tmp2);
8093 } else {
8094 tcg_gen_add_i32(tmp, tmp, tmp2);
8095 tcg_temp_free_i32(tmp2);
8096 }
8097 }
8098 store_reg(s, rd, tmp);
8099 break;
8100 case 2: /* SIMD add/subtract. */
8101 op = (insn >> 20) & 7;
8102 shift = (insn >> 4) & 7;
8103 if ((op & 3) == 3 || (shift & 3) == 3)
8104 goto illegal_op;
8105 tmp = load_reg(s, rn);
8106 tmp2 = load_reg(s, rm);
8107 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8108 tcg_temp_free_i32(tmp2);
8109 store_reg(s, rd, tmp);
8110 break;
8111 case 3: /* Other data processing. */
8112 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8113 if (op < 4) {
8114 /* Saturating add/subtract. */
8115 tmp = load_reg(s, rn);
8116 tmp2 = load_reg(s, rm);
8117 if (op & 1)
8118 gen_helper_double_saturate(tmp, tmp);
8119 if (op & 2)
8120 gen_helper_sub_saturate(tmp, tmp2, tmp);
8121 else
8122 gen_helper_add_saturate(tmp, tmp, tmp2);
8123 tcg_temp_free_i32(tmp2);
8124 } else {
8125 tmp = load_reg(s, rn);
8126 switch (op) {
8127 case 0x0a: /* rbit */
8128 gen_helper_rbit(tmp, tmp);
8129 break;
8130 case 0x08: /* rev */
8131 tcg_gen_bswap32_i32(tmp, tmp);
8132 break;
8133 case 0x09: /* rev16 */
8134 gen_rev16(tmp);
8135 break;
8136 case 0x0b: /* revsh */
8137 gen_revsh(tmp);
8138 break;
8139 case 0x10: /* sel */
8140 tmp2 = load_reg(s, rm);
8141 tmp3 = tcg_temp_new_i32();
8142 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8143 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8144 tcg_temp_free_i32(tmp3);
8145 tcg_temp_free_i32(tmp2);
8146 break;
8147 case 0x18: /* clz */
8148 gen_helper_clz(tmp, tmp);
8149 break;
8150 default:
8151 goto illegal_op;
8152 }
8153 }
8154 store_reg(s, rd, tmp);
8155 break;
8156 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8157 op = (insn >> 4) & 0xf;
8158 tmp = load_reg(s, rn);
8159 tmp2 = load_reg(s, rm);
8160 switch ((insn >> 20) & 7) {
8161 case 0: /* 32 x 32 -> 32 */
8162 tcg_gen_mul_i32(tmp, tmp, tmp2);
8163 tcg_temp_free_i32(tmp2);
8164 if (rs != 15) {
8165 tmp2 = load_reg(s, rs);
8166 if (op)
8167 tcg_gen_sub_i32(tmp, tmp2, tmp);
8168 else
8169 tcg_gen_add_i32(tmp, tmp, tmp2);
8170 tcg_temp_free_i32(tmp2);
8171 }
8172 break;
8173 case 1: /* 16 x 16 -> 32 */
8174 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8175 tcg_temp_free_i32(tmp2);
8176 if (rs != 15) {
8177 tmp2 = load_reg(s, rs);
8178 gen_helper_add_setq(tmp, tmp, tmp2);
8179 tcg_temp_free_i32(tmp2);
8180 }
8181 break;
8182 case 2: /* Dual multiply add. */
8183 case 4: /* Dual multiply subtract. */
8184 if (op)
8185 gen_swap_half(tmp2);
8186 gen_smul_dual(tmp, tmp2);
8187 if (insn & (1 << 22)) {
8188 /* This subtraction cannot overflow. */
8189 tcg_gen_sub_i32(tmp, tmp, tmp2);
8190 } else {
8191 /* This addition cannot overflow 32 bits;
8192 * however it may overflow considered as a signed
8193 * operation, in which case we must set the Q flag.
8194 */
8195 gen_helper_add_setq(tmp, tmp, tmp2);
8196 }
8197 tcg_temp_free_i32(tmp2);
8198 if (rs != 15)
8199 {
8200 tmp2 = load_reg(s, rs);
8201 gen_helper_add_setq(tmp, tmp, tmp2);
8202 tcg_temp_free_i32(tmp2);
8203 }
8204 break;
8205 case 3: /* 32 * 16 -> 32msb */
8206 if (op)
8207 tcg_gen_sari_i32(tmp2, tmp2, 16);
8208 else
8209 gen_sxth(tmp2);
8210 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8211 tcg_gen_shri_i64(tmp64, tmp64, 16);
8212 tmp = tcg_temp_new_i32();
8213 tcg_gen_trunc_i64_i32(tmp, tmp64);
8214 tcg_temp_free_i64(tmp64);
8215 if (rs != 15)
8216 {
8217 tmp2 = load_reg(s, rs);
8218 gen_helper_add_setq(tmp, tmp, tmp2);
8219 tcg_temp_free_i32(tmp2);
8220 }
8221 break;
8222 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8223 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8224 if (rs != 15) {
8225 tmp = load_reg(s, rs);
8226 if (insn & (1 << 20)) {
8227 tmp64 = gen_addq_msw(tmp64, tmp);
8228 } else {
8229 tmp64 = gen_subq_msw(tmp64, tmp);
8230 }
8231 }
8232 if (insn & (1 << 4)) {
8233 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8234 }
8235 tcg_gen_shri_i64(tmp64, tmp64, 32);
8236 tmp = tcg_temp_new_i32();
8237 tcg_gen_trunc_i64_i32(tmp, tmp64);
8238 tcg_temp_free_i64(tmp64);
8239 break;
8240 case 7: /* Unsigned sum of absolute differences. */
8241 gen_helper_usad8(tmp, tmp, tmp2);
8242 tcg_temp_free_i32(tmp2);
8243 if (rs != 15) {
8244 tmp2 = load_reg(s, rs);
8245 tcg_gen_add_i32(tmp, tmp, tmp2);
8246 tcg_temp_free_i32(tmp2);
8247 }
8248 break;
8249 }
8250 store_reg(s, rd, tmp);
8251 break;
8252 case 6: case 7: /* 64-bit multiply, Divide. */
8253 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8254 tmp = load_reg(s, rn);
8255 tmp2 = load_reg(s, rm);
8256 if ((op & 0x50) == 0x10) {
8257 /* sdiv, udiv */
8258 if (!arm_feature(env, ARM_FEATURE_DIV))
8259 goto illegal_op;
8260 if (op & 0x20)
8261 gen_helper_udiv(tmp, tmp, tmp2);
8262 else
8263 gen_helper_sdiv(tmp, tmp, tmp2);
8264 tcg_temp_free_i32(tmp2);
8265 store_reg(s, rd, tmp);
8266 } else if ((op & 0xe) == 0xc) {
8267 /* Dual multiply accumulate long. */
8268 if (op & 1)
8269 gen_swap_half(tmp2);
8270 gen_smul_dual(tmp, tmp2);
8271 if (op & 0x10) {
8272 tcg_gen_sub_i32(tmp, tmp, tmp2);
8273 } else {
8274 tcg_gen_add_i32(tmp, tmp, tmp2);
8275 }
8276 tcg_temp_free_i32(tmp2);
8277 /* BUGFIX */
8278 tmp64 = tcg_temp_new_i64();
8279 tcg_gen_ext_i32_i64(tmp64, tmp);
8280 tcg_temp_free_i32(tmp);
8281 gen_addq(s, tmp64, rs, rd);
8282 gen_storeq_reg(s, rs, rd, tmp64);
8283 tcg_temp_free_i64(tmp64);
8284 } else {
8285 if (op & 0x20) {
8286 /* Unsigned 64-bit multiply */
8287 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8288 } else {
8289 if (op & 8) {
8290 /* smlalxy */
8291 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8292 tcg_temp_free_i32(tmp2);
8293 tmp64 = tcg_temp_new_i64();
8294 tcg_gen_ext_i32_i64(tmp64, tmp);
8295 tcg_temp_free_i32(tmp);
8296 } else {
8297 /* Signed 64-bit multiply */
8298 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8299 }
8300 }
8301 if (op & 4) {
8302 /* umaal */
8303 gen_addq_lo(s, tmp64, rs);
8304 gen_addq_lo(s, tmp64, rd);
8305 } else if (op & 0x40) {
8306 /* 64-bit accumulate. */
8307 gen_addq(s, tmp64, rs, rd);
8308 }
8309 gen_storeq_reg(s, rs, rd, tmp64);
8310 tcg_temp_free_i64(tmp64);
8311 }
8312 break;
8313 }
8314 break;
8315 case 6: case 7: case 14: case 15:
8316 /* Coprocessor. */
8317 if (((insn >> 24) & 3) == 3) {
8318 /* Translate into the equivalent ARM encoding. */
8319 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8320 if (disas_neon_data_insn(env, s, insn))
8321 goto illegal_op;
8322 } else {
8323 if (insn & (1 << 28))
8324 goto illegal_op;
8325 if (disas_coproc_insn (env, s, insn))
8326 goto illegal_op;
8327 }
8328 break;
8329 case 8: case 9: case 10: case 11:
8330 if (insn & (1 << 15)) {
8331 /* Branches, misc control. */
8332 if (insn & 0x5000) {
8333 /* Unconditional branch. */
8334 /* signextend(hw1[10:0]) -> offset[:12]. */
8335 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8336 /* hw1[10:0] -> offset[11:1]. */
8337 offset |= (insn & 0x7ff) << 1;
8338 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8339 offset[24:22] already have the same value because of the
8340 sign extension above. */
8341 offset ^= ((~insn) & (1 << 13)) << 10;
8342 offset ^= ((~insn) & (1 << 11)) << 11;
8343
8344 if (insn & (1 << 14)) {
8345 /* Branch and link. */
8346 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8347 }
8348
8349 offset += s->pc;
8350 if (insn & (1 << 12)) {
8351 /* b/bl */
8352 gen_jmp(s, offset);
8353 } else {
8354 /* blx */
8355 offset &= ~(uint32_t)2;
8356 /* thumb2 bx, no need to check */
8357 gen_bx_im(s, offset);
8358 }
8359 } else if (((insn >> 23) & 7) == 7) {
8360 /* Misc control */
8361 if (insn & (1 << 13))
8362 goto illegal_op;
8363
8364 if (insn & (1 << 26)) {
8365 /* Secure monitor call (v6Z) */
8366 goto illegal_op; /* not implemented. */
8367 } else {
8368 op = (insn >> 20) & 7;
8369 switch (op) {
8370 case 0: /* msr cpsr. */
8371 if (IS_M(env)) {
8372 tmp = load_reg(s, rn);
8373 addr = tcg_const_i32(insn & 0xff);
8374 gen_helper_v7m_msr(cpu_env, addr, tmp);
8375 tcg_temp_free_i32(addr);
8376 tcg_temp_free_i32(tmp);
8377 gen_lookup_tb(s);
8378 break;
8379 }
8380 /* fall through */
8381 case 1: /* msr spsr. */
8382 if (IS_M(env))
8383 goto illegal_op;
8384 tmp = load_reg(s, rn);
8385 if (gen_set_psr(s,
8386 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8387 op == 1, tmp))
8388 goto illegal_op;
8389 break;
8390 case 2: /* cps, nop-hint. */
8391 if (((insn >> 8) & 7) == 0) {
8392 gen_nop_hint(s, insn & 0xff);
8393 }
8394 /* Implemented as NOP in user mode. */
8395 if (IS_USER(s))
8396 break;
8397 offset = 0;
8398 imm = 0;
8399 if (insn & (1 << 10)) {
8400 if (insn & (1 << 7))
8401 offset |= CPSR_A;
8402 if (insn & (1 << 6))
8403 offset |= CPSR_I;
8404 if (insn & (1 << 5))
8405 offset |= CPSR_F;
8406 if (insn & (1 << 9))
8407 imm = CPSR_A | CPSR_I | CPSR_F;
8408 }
8409 if (insn & (1 << 8)) {
8410 offset |= 0x1f;
8411 imm |= (insn & 0x1f);
8412 }
8413 if (offset) {
8414 gen_set_psr_im(s, offset, 0, imm);
8415 }
8416 break;
8417 case 3: /* Special control operations. */
8418 ARCH(7);
8419 op = (insn >> 4) & 0xf;
8420 switch (op) {
8421 case 2: /* clrex */
8422 gen_clrex(s);
8423 break;
8424 case 4: /* dsb */
8425 case 5: /* dmb */
8426 case 6: /* isb */
8427 /* These execute as NOPs. */
8428 break;
8429 default:
8430 goto illegal_op;
8431 }
8432 break;
8433 case 4: /* bxj */
8434 /* Trivial implementation equivalent to bx. */
8435 tmp = load_reg(s, rn);
8436 gen_bx(s, tmp);
8437 break;
8438 case 5: /* Exception return. */
8439 if (IS_USER(s)) {
8440 goto illegal_op;
8441 }
8442 if (rn != 14 || rd != 15) {
8443 goto illegal_op;
8444 }
8445 tmp = load_reg(s, rn);
8446 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8447 gen_exception_return(s, tmp);
8448 break;
8449 case 6: /* mrs cpsr. */
8450 tmp = tcg_temp_new_i32();
8451 if (IS_M(env)) {
8452 addr = tcg_const_i32(insn & 0xff);
8453 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8454 tcg_temp_free_i32(addr);
8455 } else {
8456 gen_helper_cpsr_read(tmp);
8457 }
8458 store_reg(s, rd, tmp);
8459 break;
8460 case 7: /* mrs spsr. */
8461 /* Not accessible in user mode. */
8462 if (IS_USER(s) || IS_M(env))
8463 goto illegal_op;
8464 tmp = load_cpu_field(spsr);
8465 store_reg(s, rd, tmp);
8466 break;
8467 }
8468 }
8469 } else {
8470 /* Conditional branch. */
8471 op = (insn >> 22) & 0xf;
8472 /* Generate a conditional jump to next instruction. */
8473 s->condlabel = gen_new_label();
8474 gen_test_cc(op ^ 1, s->condlabel);
8475 s->condjmp = 1;
8476
8477 /* offset[11:1] = insn[10:0] */
8478 offset = (insn & 0x7ff) << 1;
8479 /* offset[17:12] = insn[21:16]. */
8480 offset |= (insn & 0x003f0000) >> 4;
8481 /* offset[31:20] = insn[26]. */
8482 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8483 /* offset[18] = insn[13]. */
8484 offset |= (insn & (1 << 13)) << 5;
8485 /* offset[19] = insn[11]. */
8486 offset |= (insn & (1 << 11)) << 8;
8487
8488 /* jump to the offset */
8489 gen_jmp(s, s->pc + offset);
8490 }
8491 } else {
8492 /* Data processing immediate. */
8493 if (insn & (1 << 25)) {
8494 if (insn & (1 << 24)) {
8495 if (insn & (1 << 20))
8496 goto illegal_op;
8497 /* Bitfield/Saturate. */
8498 op = (insn >> 21) & 7;
8499 imm = insn & 0x1f;
8500 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8501 if (rn == 15) {
8502 tmp = tcg_temp_new_i32();
8503 tcg_gen_movi_i32(tmp, 0);
8504 } else {
8505 tmp = load_reg(s, rn);
8506 }
8507 switch (op) {
8508 case 2: /* Signed bitfield extract. */
8509 imm++;
8510 if (shift + imm > 32)
8511 goto illegal_op;
8512 if (imm < 32)
8513 gen_sbfx(tmp, shift, imm);
8514 break;
8515 case 6: /* Unsigned bitfield extract. */
8516 imm++;
8517 if (shift + imm > 32)
8518 goto illegal_op;
8519 if (imm < 32)
8520 gen_ubfx(tmp, shift, (1u << imm) - 1);
8521 break;
8522 case 3: /* Bitfield insert/clear. */
8523 if (imm < shift)
8524 goto illegal_op;
8525 imm = imm + 1 - shift;
8526 if (imm != 32) {
8527 tmp2 = load_reg(s, rd);
8528 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8529 tcg_temp_free_i32(tmp2);
8530 }
8531 break;
8532 case 7:
8533 goto illegal_op;
8534 default: /* Saturate. */
8535 if (shift) {
8536 if (op & 1)
8537 tcg_gen_sari_i32(tmp, tmp, shift);
8538 else
8539 tcg_gen_shli_i32(tmp, tmp, shift);
8540 }
8541 tmp2 = tcg_const_i32(imm);
8542 if (op & 4) {
8543 /* Unsigned. */
8544 if ((op & 1) && shift == 0)
8545 gen_helper_usat16(tmp, tmp, tmp2);
8546 else
8547 gen_helper_usat(tmp, tmp, tmp2);
8548 } else {
8549 /* Signed. */
8550 if ((op & 1) && shift == 0)
8551 gen_helper_ssat16(tmp, tmp, tmp2);
8552 else
8553 gen_helper_ssat(tmp, tmp, tmp2);
8554 }
8555 tcg_temp_free_i32(tmp2);
8556 break;
8557 }
8558 store_reg(s, rd, tmp);
8559 } else {
8560 imm = ((insn & 0x04000000) >> 15)
8561 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8562 if (insn & (1 << 22)) {
8563 /* 16-bit immediate. */
8564 imm |= (insn >> 4) & 0xf000;
8565 if (insn & (1 << 23)) {
8566 /* movt */
8567 tmp = load_reg(s, rd);
8568 tcg_gen_ext16u_i32(tmp, tmp);
8569 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8570 } else {
8571 /* movw */
8572 tmp = tcg_temp_new_i32();
8573 tcg_gen_movi_i32(tmp, imm);
8574 }
8575 } else {
8576 /* Add/sub 12-bit immediate. */
8577 if (rn == 15) {
8578 offset = s->pc & ~(uint32_t)3;
8579 if (insn & (1 << 23))
8580 offset -= imm;
8581 else
8582 offset += imm;
8583 tmp = tcg_temp_new_i32();
8584 tcg_gen_movi_i32(tmp, offset);
8585 } else {
8586 tmp = load_reg(s, rn);
8587 if (insn & (1 << 23))
8588 tcg_gen_subi_i32(tmp, tmp, imm);
8589 else
8590 tcg_gen_addi_i32(tmp, tmp, imm);
8591 }
8592 }
8593 store_reg(s, rd, tmp);
8594 }
8595 } else {
8596 int shifter_out = 0;
8597 /* modified 12-bit immediate. */
8598 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8599 imm = (insn & 0xff);
8600 switch (shift) {
8601 case 0: /* XY */
8602 /* Nothing to do. */
8603 break;
8604 case 1: /* 00XY00XY */
8605 imm |= imm << 16;
8606 break;
8607 case 2: /* XY00XY00 */
8608 imm |= imm << 16;
8609 imm <<= 8;
8610 break;
8611 case 3: /* XYXYXYXY */
8612 imm |= imm << 16;
8613 imm |= imm << 8;
8614 break;
8615 default: /* Rotated constant. */
8616 shift = (shift << 1) | (imm >> 7);
8617 imm |= 0x80;
8618 imm = imm << (32 - shift);
8619 shifter_out = 1;
8620 break;
8621 }
8622 tmp2 = tcg_temp_new_i32();
8623 tcg_gen_movi_i32(tmp2, imm);
8624 rn = (insn >> 16) & 0xf;
8625 if (rn == 15) {
8626 tmp = tcg_temp_new_i32();
8627 tcg_gen_movi_i32(tmp, 0);
8628 } else {
8629 tmp = load_reg(s, rn);
8630 }
8631 op = (insn >> 21) & 0xf;
8632 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8633 shifter_out, tmp, tmp2))
8634 goto illegal_op;
8635 tcg_temp_free_i32(tmp2);
8636 rd = (insn >> 8) & 0xf;
8637 if (rd != 15) {
8638 store_reg(s, rd, tmp);
8639 } else {
8640 tcg_temp_free_i32(tmp);
8641 }
8642 }
8643 }
8644 break;
8645 case 12: /* Load/store single data item. */
8646 {
8647 int postinc = 0;
8648 int writeback = 0;
8649 int user;
8650 if ((insn & 0x01100000) == 0x01000000) {
8651 if (disas_neon_ls_insn(env, s, insn))
8652 goto illegal_op;
8653 break;
8654 }
8655 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8656 if (rs == 15) {
8657 if (!(insn & (1 << 20))) {
8658 goto illegal_op;
8659 }
8660 if (op != 2) {
8661 /* Byte or halfword load space with dest == r15 : memory hints.
8662 * Catch them early so we don't emit pointless addressing code.
8663 * This space is a mix of:
8664 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8665 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8666 * cores)
8667 * unallocated hints, which must be treated as NOPs
8668 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8669 * which is easiest for the decoding logic
8670 * Some space which must UNDEF
8671 */
8672 int op1 = (insn >> 23) & 3;
8673 int op2 = (insn >> 6) & 0x3f;
8674 if (op & 2) {
8675 goto illegal_op;
8676 }
8677 if (rn == 15) {
8678 /* UNPREDICTABLE or unallocated hint */
8679 return 0;
8680 }
8681 if (op1 & 1) {
8682 return 0; /* PLD* or unallocated hint */
8683 }
8684 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8685 return 0; /* PLD* or unallocated hint */
8686 }
8687 /* UNDEF space, or an UNPREDICTABLE */
8688 return 1;
8689 }
8690 }
8691 user = IS_USER(s);
8692 if (rn == 15) {
8693 addr = tcg_temp_new_i32();
8694 /* PC relative. */
8695 /* s->pc has already been incremented by 4. */
8696 imm = s->pc & 0xfffffffc;
8697 if (insn & (1 << 23))
8698 imm += insn & 0xfff;
8699 else
8700 imm -= insn & 0xfff;
8701 tcg_gen_movi_i32(addr, imm);
8702 } else {
8703 addr = load_reg(s, rn);
8704 if (insn & (1 << 23)) {
8705 /* Positive offset. */
8706 imm = insn & 0xfff;
8707 tcg_gen_addi_i32(addr, addr, imm);
8708 } else {
8709 imm = insn & 0xff;
8710 switch ((insn >> 8) & 0xf) {
8711 case 0x0: /* Shifted Register. */
8712 shift = (insn >> 4) & 0xf;
8713 if (shift > 3) {
8714 tcg_temp_free_i32(addr);
8715 goto illegal_op;
8716 }
8717 tmp = load_reg(s, rm);
8718 if (shift)
8719 tcg_gen_shli_i32(tmp, tmp, shift);
8720 tcg_gen_add_i32(addr, addr, tmp);
8721 tcg_temp_free_i32(tmp);
8722 break;
8723 case 0xc: /* Negative offset. */
8724 tcg_gen_addi_i32(addr, addr, -imm);
8725 break;
8726 case 0xe: /* User privilege. */
8727 tcg_gen_addi_i32(addr, addr, imm);
8728 user = 1;
8729 break;
8730 case 0x9: /* Post-decrement. */
8731 imm = -imm;
8732 /* Fall through. */
8733 case 0xb: /* Post-increment. */
8734 postinc = 1;
8735 writeback = 1;
8736 break;
8737 case 0xd: /* Pre-decrement. */
8738 imm = -imm;
8739 /* Fall through. */
8740 case 0xf: /* Pre-increment. */
8741 tcg_gen_addi_i32(addr, addr, imm);
8742 writeback = 1;
8743 break;
8744 default:
8745 tcg_temp_free_i32(addr);
8746 goto illegal_op;
8747 }
8748 }
8749 }
8750 if (insn & (1 << 20)) {
8751 /* Load. */
8752 switch (op) {
8753 case 0: tmp = gen_ld8u(addr, user); break;
8754 case 4: tmp = gen_ld8s(addr, user); break;
8755 case 1: tmp = gen_ld16u(addr, user); break;
8756 case 5: tmp = gen_ld16s(addr, user); break;
8757 case 2: tmp = gen_ld32(addr, user); break;
8758 default:
8759 tcg_temp_free_i32(addr);
8760 goto illegal_op;
8761 }
8762 if (rs == 15) {
8763 gen_bx(s, tmp);
8764 } else {
8765 store_reg(s, rs, tmp);
8766 }
8767 } else {
8768 /* Store. */
8769 tmp = load_reg(s, rs);
8770 switch (op) {
8771 case 0: gen_st8(tmp, addr, user); break;
8772 case 1: gen_st16(tmp, addr, user); break;
8773 case 2: gen_st32(tmp, addr, user); break;
8774 default:
8775 tcg_temp_free_i32(addr);
8776 goto illegal_op;
8777 }
8778 }
8779 if (postinc)
8780 tcg_gen_addi_i32(addr, addr, imm);
8781 if (writeback) {
8782 store_reg(s, rn, addr);
8783 } else {
8784 tcg_temp_free_i32(addr);
8785 }
8786 }
8787 break;
8788 default:
8789 goto illegal_op;
8790 }
8791 return 0;
8792 illegal_op:
8793 return 1;
8794 }
8795
8796 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8797 {
8798 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8799 int32_t offset;
8800 int i;
8801 TCGv tmp;
8802 TCGv tmp2;
8803 TCGv addr;
8804
8805 if (s->condexec_mask) {
8806 cond = s->condexec_cond;
8807 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8808 s->condlabel = gen_new_label();
8809 gen_test_cc(cond ^ 1, s->condlabel);
8810 s->condjmp = 1;
8811 }
8812 }
8813
8814 insn = lduw_code(s->pc);
8815 s->pc += 2;
8816
8817 switch (insn >> 12) {
8818 case 0: case 1:
8819
8820 rd = insn & 7;
8821 op = (insn >> 11) & 3;
8822 if (op == 3) {
8823 /* add/subtract */
8824 rn = (insn >> 3) & 7;
8825 tmp = load_reg(s, rn);
8826 if (insn & (1 << 10)) {
8827 /* immediate */
8828 tmp2 = tcg_temp_new_i32();
8829 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8830 } else {
8831 /* reg */
8832 rm = (insn >> 6) & 7;
8833 tmp2 = load_reg(s, rm);
8834 }
8835 if (insn & (1 << 9)) {
8836 if (s->condexec_mask)
8837 tcg_gen_sub_i32(tmp, tmp, tmp2);
8838 else
8839 gen_helper_sub_cc(tmp, tmp, tmp2);
8840 } else {
8841 if (s->condexec_mask)
8842 tcg_gen_add_i32(tmp, tmp, tmp2);
8843 else
8844 gen_helper_add_cc(tmp, tmp, tmp2);
8845 }
8846 tcg_temp_free_i32(tmp2);
8847 store_reg(s, rd, tmp);
8848 } else {
8849 /* shift immediate */
8850 rm = (insn >> 3) & 7;
8851 shift = (insn >> 6) & 0x1f;
8852 tmp = load_reg(s, rm);
8853 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8854 if (!s->condexec_mask)
8855 gen_logic_CC(tmp);
8856 store_reg(s, rd, tmp);
8857 }
8858 break;
8859 case 2: case 3:
8860 /* arithmetic large immediate */
8861 op = (insn >> 11) & 3;
8862 rd = (insn >> 8) & 0x7;
8863 if (op == 0) { /* mov */
8864 tmp = tcg_temp_new_i32();
8865 tcg_gen_movi_i32(tmp, insn & 0xff);
8866 if (!s->condexec_mask)
8867 gen_logic_CC(tmp);
8868 store_reg(s, rd, tmp);
8869 } else {
8870 tmp = load_reg(s, rd);
8871 tmp2 = tcg_temp_new_i32();
8872 tcg_gen_movi_i32(tmp2, insn & 0xff);
8873 switch (op) {
8874 case 1: /* cmp */
8875 gen_helper_sub_cc(tmp, tmp, tmp2);
8876 tcg_temp_free_i32(tmp);
8877 tcg_temp_free_i32(tmp2);
8878 break;
8879 case 2: /* add */
8880 if (s->condexec_mask)
8881 tcg_gen_add_i32(tmp, tmp, tmp2);
8882 else
8883 gen_helper_add_cc(tmp, tmp, tmp2);
8884 tcg_temp_free_i32(tmp2);
8885 store_reg(s, rd, tmp);
8886 break;
8887 case 3: /* sub */
8888 if (s->condexec_mask)
8889 tcg_gen_sub_i32(tmp, tmp, tmp2);
8890 else
8891 gen_helper_sub_cc(tmp, tmp, tmp2);
8892 tcg_temp_free_i32(tmp2);
8893 store_reg(s, rd, tmp);
8894 break;
8895 }
8896 }
8897 break;
8898 case 4:
8899 if (insn & (1 << 11)) {
8900 rd = (insn >> 8) & 7;
8901 /* load pc-relative. Bit 1 of PC is ignored. */
8902 val = s->pc + 2 + ((insn & 0xff) * 4);
8903 val &= ~(uint32_t)2;
8904 addr = tcg_temp_new_i32();
8905 tcg_gen_movi_i32(addr, val);
8906 tmp = gen_ld32(addr, IS_USER(s));
8907 tcg_temp_free_i32(addr);
8908 store_reg(s, rd, tmp);
8909 break;
8910 }
8911 if (insn & (1 << 10)) {
8912 /* data processing extended or blx */
8913 rd = (insn & 7) | ((insn >> 4) & 8);
8914 rm = (insn >> 3) & 0xf;
8915 op = (insn >> 8) & 3;
8916 switch (op) {
8917 case 0: /* add */
8918 tmp = load_reg(s, rd);
8919 tmp2 = load_reg(s, rm);
8920 tcg_gen_add_i32(tmp, tmp, tmp2);
8921 tcg_temp_free_i32(tmp2);
8922 store_reg(s, rd, tmp);
8923 break;
8924 case 1: /* cmp */
8925 tmp = load_reg(s, rd);
8926 tmp2 = load_reg(s, rm);
8927 gen_helper_sub_cc(tmp, tmp, tmp2);
8928 tcg_temp_free_i32(tmp2);
8929 tcg_temp_free_i32(tmp);
8930 break;
8931 case 2: /* mov/cpy */
8932 tmp = load_reg(s, rm);
8933 store_reg(s, rd, tmp);
8934 break;
8935 case 3:/* branch [and link] exchange thumb register */
8936 tmp = load_reg(s, rm);
8937 if (insn & (1 << 7)) {
8938 ARCH(5);
8939 val = (uint32_t)s->pc | 1;
8940 tmp2 = tcg_temp_new_i32();
8941 tcg_gen_movi_i32(tmp2, val);
8942 store_reg(s, 14, tmp2);
8943 }
8944 /* already thumb, no need to check */
8945 gen_bx(s, tmp);
8946 break;
8947 }
8948 break;
8949 }
8950
8951 /* data processing register */
8952 rd = insn & 7;
8953 rm = (insn >> 3) & 7;
8954 op = (insn >> 6) & 0xf;
8955 if (op == 2 || op == 3 || op == 4 || op == 7) {
8956 /* the shift/rotate ops want the operands backwards */
8957 val = rm;
8958 rm = rd;
8959 rd = val;
8960 val = 1;
8961 } else {
8962 val = 0;
8963 }
8964
8965 if (op == 9) { /* neg */
8966 tmp = tcg_temp_new_i32();
8967 tcg_gen_movi_i32(tmp, 0);
8968 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8969 tmp = load_reg(s, rd);
8970 } else {
8971 TCGV_UNUSED(tmp);
8972 }
8973
8974 tmp2 = load_reg(s, rm);
8975 switch (op) {
8976 case 0x0: /* and */
8977 tcg_gen_and_i32(tmp, tmp, tmp2);
8978 if (!s->condexec_mask)
8979 gen_logic_CC(tmp);
8980 break;
8981 case 0x1: /* eor */
8982 tcg_gen_xor_i32(tmp, tmp, tmp2);
8983 if (!s->condexec_mask)
8984 gen_logic_CC(tmp);
8985 break;
8986 case 0x2: /* lsl */
8987 if (s->condexec_mask) {
8988 gen_helper_shl(tmp2, tmp2, tmp);
8989 } else {
8990 gen_helper_shl_cc(tmp2, tmp2, tmp);
8991 gen_logic_CC(tmp2);
8992 }
8993 break;
8994 case 0x3: /* lsr */
8995 if (s->condexec_mask) {
8996 gen_helper_shr(tmp2, tmp2, tmp);
8997 } else {
8998 gen_helper_shr_cc(tmp2, tmp2, tmp);
8999 gen_logic_CC(tmp2);
9000 }
9001 break;
9002 case 0x4: /* asr */
9003 if (s->condexec_mask) {
9004 gen_helper_sar(tmp2, tmp2, tmp);
9005 } else {
9006 gen_helper_sar_cc(tmp2, tmp2, tmp);
9007 gen_logic_CC(tmp2);
9008 }
9009 break;
9010 case 0x5: /* adc */
9011 if (s->condexec_mask)
9012 gen_adc(tmp, tmp2);
9013 else
9014 gen_helper_adc_cc(tmp, tmp, tmp2);
9015 break;
9016 case 0x6: /* sbc */
9017 if (s->condexec_mask)
9018 gen_sub_carry(tmp, tmp, tmp2);
9019 else
9020 gen_helper_sbc_cc(tmp, tmp, tmp2);
9021 break;
9022 case 0x7: /* ror */
9023 if (s->condexec_mask) {
9024 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9025 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9026 } else {
9027 gen_helper_ror_cc(tmp2, tmp2, tmp);
9028 gen_logic_CC(tmp2);
9029 }
9030 break;
9031 case 0x8: /* tst */
9032 tcg_gen_and_i32(tmp, tmp, tmp2);
9033 gen_logic_CC(tmp);
9034 rd = 16;
9035 break;
9036 case 0x9: /* neg */
9037 if (s->condexec_mask)
9038 tcg_gen_neg_i32(tmp, tmp2);
9039 else
9040 gen_helper_sub_cc(tmp, tmp, tmp2);
9041 break;
9042 case 0xa: /* cmp */
9043 gen_helper_sub_cc(tmp, tmp, tmp2);
9044 rd = 16;
9045 break;
9046 case 0xb: /* cmn */
9047 gen_helper_add_cc(tmp, tmp, tmp2);
9048 rd = 16;
9049 break;
9050 case 0xc: /* orr */
9051 tcg_gen_or_i32(tmp, tmp, tmp2);
9052 if (!s->condexec_mask)
9053 gen_logic_CC(tmp);
9054 break;
9055 case 0xd: /* mul */
9056 tcg_gen_mul_i32(tmp, tmp, tmp2);
9057 if (!s->condexec_mask)
9058 gen_logic_CC(tmp);
9059 break;
9060 case 0xe: /* bic */
9061 tcg_gen_andc_i32(tmp, tmp, tmp2);
9062 if (!s->condexec_mask)
9063 gen_logic_CC(tmp);
9064 break;
9065 case 0xf: /* mvn */
9066 tcg_gen_not_i32(tmp2, tmp2);
9067 if (!s->condexec_mask)
9068 gen_logic_CC(tmp2);
9069 val = 1;
9070 rm = rd;
9071 break;
9072 }
9073 if (rd != 16) {
9074 if (val) {
9075 store_reg(s, rm, tmp2);
9076 if (op != 0xf)
9077 tcg_temp_free_i32(tmp);
9078 } else {
9079 store_reg(s, rd, tmp);
9080 tcg_temp_free_i32(tmp2);
9081 }
9082 } else {
9083 tcg_temp_free_i32(tmp);
9084 tcg_temp_free_i32(tmp2);
9085 }
9086 break;
9087
9088 case 5:
9089 /* load/store register offset. */
9090 rd = insn & 7;
9091 rn = (insn >> 3) & 7;
9092 rm = (insn >> 6) & 7;
9093 op = (insn >> 9) & 7;
9094 addr = load_reg(s, rn);
9095 tmp = load_reg(s, rm);
9096 tcg_gen_add_i32(addr, addr, tmp);
9097 tcg_temp_free_i32(tmp);
9098
9099 if (op < 3) /* store */
9100 tmp = load_reg(s, rd);
9101
9102 switch (op) {
9103 case 0: /* str */
9104 gen_st32(tmp, addr, IS_USER(s));
9105 break;
9106 case 1: /* strh */
9107 gen_st16(tmp, addr, IS_USER(s));
9108 break;
9109 case 2: /* strb */
9110 gen_st8(tmp, addr, IS_USER(s));
9111 break;
9112 case 3: /* ldrsb */
9113 tmp = gen_ld8s(addr, IS_USER(s));
9114 break;
9115 case 4: /* ldr */
9116 tmp = gen_ld32(addr, IS_USER(s));
9117 break;
9118 case 5: /* ldrh */
9119 tmp = gen_ld16u(addr, IS_USER(s));
9120 break;
9121 case 6: /* ldrb */
9122 tmp = gen_ld8u(addr, IS_USER(s));
9123 break;
9124 case 7: /* ldrsh */
9125 tmp = gen_ld16s(addr, IS_USER(s));
9126 break;
9127 }
9128 if (op >= 3) /* load */
9129 store_reg(s, rd, tmp);
9130 tcg_temp_free_i32(addr);
9131 break;
9132
9133 case 6:
9134 /* load/store word immediate offset */
9135 rd = insn & 7;
9136 rn = (insn >> 3) & 7;
9137 addr = load_reg(s, rn);
9138 val = (insn >> 4) & 0x7c;
9139 tcg_gen_addi_i32(addr, addr, val);
9140
9141 if (insn & (1 << 11)) {
9142 /* load */
9143 tmp = gen_ld32(addr, IS_USER(s));
9144 store_reg(s, rd, tmp);
9145 } else {
9146 /* store */
9147 tmp = load_reg(s, rd);
9148 gen_st32(tmp, addr, IS_USER(s));
9149 }
9150 tcg_temp_free_i32(addr);
9151 break;
9152
9153 case 7:
9154 /* load/store byte immediate offset */
9155 rd = insn & 7;
9156 rn = (insn >> 3) & 7;
9157 addr = load_reg(s, rn);
9158 val = (insn >> 6) & 0x1f;
9159 tcg_gen_addi_i32(addr, addr, val);
9160
9161 if (insn & (1 << 11)) {
9162 /* load */
9163 tmp = gen_ld8u(addr, IS_USER(s));
9164 store_reg(s, rd, tmp);
9165 } else {
9166 /* store */
9167 tmp = load_reg(s, rd);
9168 gen_st8(tmp, addr, IS_USER(s));
9169 }
9170 tcg_temp_free_i32(addr);
9171 break;
9172
9173 case 8:
9174 /* load/store halfword immediate offset */
9175 rd = insn & 7;
9176 rn = (insn >> 3) & 7;
9177 addr = load_reg(s, rn);
9178 val = (insn >> 5) & 0x3e;
9179 tcg_gen_addi_i32(addr, addr, val);
9180
9181 if (insn & (1 << 11)) {
9182 /* load */
9183 tmp = gen_ld16u(addr, IS_USER(s));
9184 store_reg(s, rd, tmp);
9185 } else {
9186 /* store */
9187 tmp = load_reg(s, rd);
9188 gen_st16(tmp, addr, IS_USER(s));
9189 }
9190 tcg_temp_free_i32(addr);
9191 break;
9192
9193 case 9:
9194 /* load/store from stack */
9195 rd = (insn >> 8) & 7;
9196 addr = load_reg(s, 13);
9197 val = (insn & 0xff) * 4;
9198 tcg_gen_addi_i32(addr, addr, val);
9199
9200 if (insn & (1 << 11)) {
9201 /* load */
9202 tmp = gen_ld32(addr, IS_USER(s));
9203 store_reg(s, rd, tmp);
9204 } else {
9205 /* store */
9206 tmp = load_reg(s, rd);
9207 gen_st32(tmp, addr, IS_USER(s));
9208 }
9209 tcg_temp_free_i32(addr);
9210 break;
9211
9212 case 10:
9213 /* add to high reg */
9214 rd = (insn >> 8) & 7;
9215 if (insn & (1 << 11)) {
9216 /* SP */
9217 tmp = load_reg(s, 13);
9218 } else {
9219 /* PC. bit 1 is ignored. */
9220 tmp = tcg_temp_new_i32();
9221 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9222 }
9223 val = (insn & 0xff) * 4;
9224 tcg_gen_addi_i32(tmp, tmp, val);
9225 store_reg(s, rd, tmp);
9226 break;
9227
9228 case 11:
9229 /* misc */
9230 op = (insn >> 8) & 0xf;
9231 switch (op) {
9232 case 0:
9233 /* adjust stack pointer */
9234 tmp = load_reg(s, 13);
9235 val = (insn & 0x7f) * 4;
9236 if (insn & (1 << 7))
9237 val = -(int32_t)val;
9238 tcg_gen_addi_i32(tmp, tmp, val);
9239 store_reg(s, 13, tmp);
9240 break;
9241
9242 case 2: /* sign/zero extend. */
9243 ARCH(6);
9244 rd = insn & 7;
9245 rm = (insn >> 3) & 7;
9246 tmp = load_reg(s, rm);
9247 switch ((insn >> 6) & 3) {
9248 case 0: gen_sxth(tmp); break;
9249 case 1: gen_sxtb(tmp); break;
9250 case 2: gen_uxth(tmp); break;
9251 case 3: gen_uxtb(tmp); break;
9252 }
9253 store_reg(s, rd, tmp);
9254 break;
9255 case 4: case 5: case 0xc: case 0xd:
9256 /* push/pop */
9257 addr = load_reg(s, 13);
9258 if (insn & (1 << 8))
9259 offset = 4;
9260 else
9261 offset = 0;
9262 for (i = 0; i < 8; i++) {
9263 if (insn & (1 << i))
9264 offset += 4;
9265 }
9266 if ((insn & (1 << 11)) == 0) {
9267 tcg_gen_addi_i32(addr, addr, -offset);
9268 }
9269 for (i = 0; i < 8; i++) {
9270 if (insn & (1 << i)) {
9271 if (insn & (1 << 11)) {
9272 /* pop */
9273 tmp = gen_ld32(addr, IS_USER(s));
9274 store_reg(s, i, tmp);
9275 } else {
9276 /* push */
9277 tmp = load_reg(s, i);
9278 gen_st32(tmp, addr, IS_USER(s));
9279 }
9280 /* advance to the next address. */
9281 tcg_gen_addi_i32(addr, addr, 4);
9282 }
9283 }
9284 TCGV_UNUSED(tmp);
9285 if (insn & (1 << 8)) {
9286 if (insn & (1 << 11)) {
9287 /* pop pc */
9288 tmp = gen_ld32(addr, IS_USER(s));
9289 /* don't set the pc until the rest of the instruction
9290 has completed */
9291 } else {
9292 /* push lr */
9293 tmp = load_reg(s, 14);
9294 gen_st32(tmp, addr, IS_USER(s));
9295 }
9296 tcg_gen_addi_i32(addr, addr, 4);
9297 }
9298 if ((insn & (1 << 11)) == 0) {
9299 tcg_gen_addi_i32(addr, addr, -offset);
9300 }
9301 /* write back the new stack pointer */
9302 store_reg(s, 13, addr);
9303 /* set the new PC value */
9304 if ((insn & 0x0900) == 0x0900) {
9305 store_reg_from_load(env, s, 15, tmp);
9306 }
9307 break;
9308
9309 case 1: case 3: case 9: case 11: /* czb */
9310 rm = insn & 7;
9311 tmp = load_reg(s, rm);
9312 s->condlabel = gen_new_label();
9313 s->condjmp = 1;
9314 if (insn & (1 << 11))
9315 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9316 else
9317 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9318 tcg_temp_free_i32(tmp);
9319 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9320 val = (uint32_t)s->pc + 2;
9321 val += offset;
9322 gen_jmp(s, val);
9323 break;
9324
9325 case 15: /* IT, nop-hint. */
9326 if ((insn & 0xf) == 0) {
9327 gen_nop_hint(s, (insn >> 4) & 0xf);
9328 break;
9329 }
9330 /* If Then. */
9331 s->condexec_cond = (insn >> 4) & 0xe;
9332 s->condexec_mask = insn & 0x1f;
9333 /* No actual code generated for this insn, just setup state. */
9334 break;
9335
9336 case 0xe: /* bkpt */
9337 ARCH(5);
9338 gen_exception_insn(s, 2, EXCP_BKPT);
9339 break;
9340
9341 case 0xa: /* rev */
9342 ARCH(6);
9343 rn = (insn >> 3) & 0x7;
9344 rd = insn & 0x7;
9345 tmp = load_reg(s, rn);
9346 switch ((insn >> 6) & 3) {
9347 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9348 case 1: gen_rev16(tmp); break;
9349 case 3: gen_revsh(tmp); break;
9350 default: goto illegal_op;
9351 }
9352 store_reg(s, rd, tmp);
9353 break;
9354
9355 case 6: /* cps */
9356 ARCH(6);
9357 if (IS_USER(s))
9358 break;
9359 if (IS_M(env)) {
9360 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9361 /* PRIMASK */
9362 if (insn & 1) {
9363 addr = tcg_const_i32(16);
9364 gen_helper_v7m_msr(cpu_env, addr, tmp);
9365 tcg_temp_free_i32(addr);
9366 }
9367 /* FAULTMASK */
9368 if (insn & 2) {
9369 addr = tcg_const_i32(17);
9370 gen_helper_v7m_msr(cpu_env, addr, tmp);
9371 tcg_temp_free_i32(addr);
9372 }
9373 tcg_temp_free_i32(tmp);
9374 gen_lookup_tb(s);
9375 } else {
9376 if (insn & (1 << 4))
9377 shift = CPSR_A | CPSR_I | CPSR_F;
9378 else
9379 shift = 0;
9380 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9381 }
9382 break;
9383
9384 default:
9385 goto undef;
9386 }
9387 break;
9388
9389 case 12:
9390 /* load/store multiple */
9391 rn = (insn >> 8) & 0x7;
9392 addr = load_reg(s, rn);
9393 for (i = 0; i < 8; i++) {
9394 if (insn & (1 << i)) {
9395 if (insn & (1 << 11)) {
9396 /* load */
9397 tmp = gen_ld32(addr, IS_USER(s));
9398 store_reg(s, i, tmp);
9399 } else {
9400 /* store */
9401 tmp = load_reg(s, i);
9402 gen_st32(tmp, addr, IS_USER(s));
9403 }
9404 /* advance to the next address */
9405 tcg_gen_addi_i32(addr, addr, 4);
9406 }
9407 }
9408 /* Base register writeback. */
9409 if ((insn & (1 << rn)) == 0) {
9410 store_reg(s, rn, addr);
9411 } else {
9412 tcg_temp_free_i32(addr);
9413 }
9414 break;
9415
9416 case 13:
9417 /* conditional branch or swi */
9418 cond = (insn >> 8) & 0xf;
9419 if (cond == 0xe)
9420 goto undef;
9421
9422 if (cond == 0xf) {
9423 /* swi */
9424 gen_set_pc_im(s->pc);
9425 s->is_jmp = DISAS_SWI;
9426 break;
9427 }
9428 /* generate a conditional jump to next instruction */
9429 s->condlabel = gen_new_label();
9430 gen_test_cc(cond ^ 1, s->condlabel);
9431 s->condjmp = 1;
9432
9433 /* jump to the offset */
9434 val = (uint32_t)s->pc + 2;
9435 offset = ((int32_t)insn << 24) >> 24;
9436 val += offset << 1;
9437 gen_jmp(s, val);
9438 break;
9439
9440 case 14:
9441 if (insn & (1 << 11)) {
9442 if (disas_thumb2_insn(env, s, insn))
9443 goto undef32;
9444 break;
9445 }
9446 /* unconditional branch */
9447 val = (uint32_t)s->pc;
9448 offset = ((int32_t)insn << 21) >> 21;
9449 val += (offset << 1) + 2;
9450 gen_jmp(s, val);
9451 break;
9452
9453 case 15:
9454 if (disas_thumb2_insn(env, s, insn))
9455 goto undef32;
9456 break;
9457 }
9458 return;
9459 undef32:
9460 gen_exception_insn(s, 4, EXCP_UDEF);
9461 return;
9462 illegal_op:
9463 undef:
9464 gen_exception_insn(s, 2, EXCP_UDEF);
9465 }
9466
9467 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9468 basic block 'tb'. If search_pc is TRUE, also generate PC
9469 information for each intermediate instruction. */
9470 static inline void gen_intermediate_code_internal(CPUState *env,
9471 TranslationBlock *tb,
9472 int search_pc)
9473 {
9474 DisasContext dc1, *dc = &dc1;
9475 CPUBreakpoint *bp;
9476 uint16_t *gen_opc_end;
9477 int j, lj;
9478 target_ulong pc_start;
9479 uint32_t next_page_start;
9480 int num_insns;
9481 int max_insns;
9482
9483 /* generate intermediate code */
9484 pc_start = tb->pc;
9485
9486 dc->tb = tb;
9487
9488 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9489
9490 dc->is_jmp = DISAS_NEXT;
9491 dc->pc = pc_start;
9492 dc->singlestep_enabled = env->singlestep_enabled;
9493 dc->condjmp = 0;
9494 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9495 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9496 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9497 #if !defined(CONFIG_USER_ONLY)
9498 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9499 #endif
9500 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9501 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9502 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9503 cpu_F0s = tcg_temp_new_i32();
9504 cpu_F1s = tcg_temp_new_i32();
9505 cpu_F0d = tcg_temp_new_i64();
9506 cpu_F1d = tcg_temp_new_i64();
9507 cpu_V0 = cpu_F0d;
9508 cpu_V1 = cpu_F1d;
9509 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9510 cpu_M0 = tcg_temp_new_i64();
9511 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9512 lj = -1;
9513 num_insns = 0;
9514 max_insns = tb->cflags & CF_COUNT_MASK;
9515 if (max_insns == 0)
9516 max_insns = CF_COUNT_MASK;
9517
9518 gen_icount_start();
9519
9520 tcg_clear_temp_count();
9521
9522 /* A note on handling of the condexec (IT) bits:
9523 *
9524 * We want to avoid the overhead of having to write the updated condexec
9525 * bits back to the CPUState for every instruction in an IT block. So:
9526 * (1) if the condexec bits are not already zero then we write
9527 * zero back into the CPUState now. This avoids complications trying
9528 * to do it at the end of the block. (For example if we don't do this
9529 * it's hard to identify whether we can safely skip writing condexec
9530 * at the end of the TB, which we definitely want to do for the case
9531 * where a TB doesn't do anything with the IT state at all.)
9532 * (2) if we are going to leave the TB then we call gen_set_condexec()
9533 * which will write the correct value into CPUState if zero is wrong.
9534 * This is done both for leaving the TB at the end, and for leaving
9535 * it because of an exception we know will happen, which is done in
9536 * gen_exception_insn(). The latter is necessary because we need to
9537 * leave the TB with the PC/IT state just prior to execution of the
9538 * instruction which caused the exception.
9539 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9540 * then the CPUState will be wrong and we need to reset it.
9541 * This is handled in the same way as restoration of the
9542 * PC in these situations: we will be called again with search_pc=1
9543 * and generate a mapping of the condexec bits for each PC in
9544 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9545 * the condexec bits.
9546 *
9547 * Note that there are no instructions which can read the condexec
9548 * bits, and none which can write non-static values to them, so
9549 * we don't need to care about whether CPUState is correct in the
9550 * middle of a TB.
9551 */
9552
9553 /* Reset the conditional execution bits immediately. This avoids
9554 complications trying to do it at the end of the block. */
9555 if (dc->condexec_mask || dc->condexec_cond)
9556 {
9557 TCGv tmp = tcg_temp_new_i32();
9558 tcg_gen_movi_i32(tmp, 0);
9559 store_cpu_field(tmp, condexec_bits);
9560 }
9561 do {
9562 #ifdef CONFIG_USER_ONLY
9563 /* Intercept jump to the magic kernel page. */
9564 if (dc->pc >= 0xffff0000) {
9565 /* We always get here via a jump, so know we are not in a
9566 conditional execution block. */
9567 gen_exception(EXCP_KERNEL_TRAP);
9568 dc->is_jmp = DISAS_UPDATE;
9569 break;
9570 }
9571 #else
9572 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9573 /* We always get here via a jump, so know we are not in a
9574 conditional execution block. */
9575 gen_exception(EXCP_EXCEPTION_EXIT);
9576 dc->is_jmp = DISAS_UPDATE;
9577 break;
9578 }
9579 #endif
9580
9581 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9582 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9583 if (bp->pc == dc->pc) {
9584 gen_exception_insn(dc, 0, EXCP_DEBUG);
9585 /* Advance PC so that clearing the breakpoint will
9586 invalidate this TB. */
9587 dc->pc += 2;
9588 goto done_generating;
9589 break;
9590 }
9591 }
9592 }
9593 if (search_pc) {
9594 j = gen_opc_ptr - gen_opc_buf;
9595 if (lj < j) {
9596 lj++;
9597 while (lj < j)
9598 gen_opc_instr_start[lj++] = 0;
9599 }
9600 gen_opc_pc[lj] = dc->pc;
9601 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9602 gen_opc_instr_start[lj] = 1;
9603 gen_opc_icount[lj] = num_insns;
9604 }
9605
9606 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9607 gen_io_start();
9608
9609 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9610 tcg_gen_debug_insn_start(dc->pc);
9611 }
9612
9613 if (dc->thumb) {
9614 disas_thumb_insn(env, dc);
9615 if (dc->condexec_mask) {
9616 dc->condexec_cond = (dc->condexec_cond & 0xe)
9617 | ((dc->condexec_mask >> 4) & 1);
9618 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9619 if (dc->condexec_mask == 0) {
9620 dc->condexec_cond = 0;
9621 }
9622 }
9623 } else {
9624 disas_arm_insn(env, dc);
9625 }
9626
9627 if (dc->condjmp && !dc->is_jmp) {
9628 gen_set_label(dc->condlabel);
9629 dc->condjmp = 0;
9630 }
9631
9632 if (tcg_check_temp_count()) {
9633 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9634 }
9635
9636 /* Translation stops when a conditional branch is encountered.
9637 * Otherwise the subsequent code could get translated several times.
9638 * Also stop translation when a page boundary is reached. This
9639 * ensures prefetch aborts occur at the right place. */
9640 num_insns ++;
9641 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9642 !env->singlestep_enabled &&
9643 !singlestep &&
9644 dc->pc < next_page_start &&
9645 num_insns < max_insns);
9646
9647 if (tb->cflags & CF_LAST_IO) {
9648 if (dc->condjmp) {
9649 /* FIXME: This can theoretically happen with self-modifying
9650 code. */
9651 cpu_abort(env, "IO on conditional branch instruction");
9652 }
9653 gen_io_end();
9654 }
9655
9656 /* At this stage dc->condjmp will only be set when the skipped
9657 instruction was a conditional branch or trap, and the PC has
9658 already been written. */
9659 if (unlikely(env->singlestep_enabled)) {
9660 /* Make sure the pc is updated, and raise a debug exception. */
9661 if (dc->condjmp) {
9662 gen_set_condexec(dc);
9663 if (dc->is_jmp == DISAS_SWI) {
9664 gen_exception(EXCP_SWI);
9665 } else {
9666 gen_exception(EXCP_DEBUG);
9667 }
9668 gen_set_label(dc->condlabel);
9669 }
9670 if (dc->condjmp || !dc->is_jmp) {
9671 gen_set_pc_im(dc->pc);
9672 dc->condjmp = 0;
9673 }
9674 gen_set_condexec(dc);
9675 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9676 gen_exception(EXCP_SWI);
9677 } else {
9678 /* FIXME: Single stepping a WFI insn will not halt
9679 the CPU. */
9680 gen_exception(EXCP_DEBUG);
9681 }
9682 } else {
9683 /* While branches must always occur at the end of an IT block,
9684 there are a few other things that can cause us to terminate
9685 the TB in the middel of an IT block:
9686 - Exception generating instructions (bkpt, swi, undefined).
9687 - Page boundaries.
9688 - Hardware watchpoints.
9689 Hardware breakpoints have already been handled and skip this code.
9690 */
9691 gen_set_condexec(dc);
9692 switch(dc->is_jmp) {
9693 case DISAS_NEXT:
9694 gen_goto_tb(dc, 1, dc->pc);
9695 break;
9696 default:
9697 case DISAS_JUMP:
9698 case DISAS_UPDATE:
9699 /* indicate that the hash table must be used to find the next TB */
9700 tcg_gen_exit_tb(0);
9701 break;
9702 case DISAS_TB_JUMP:
9703 /* nothing more to generate */
9704 break;
9705 case DISAS_WFI:
9706 gen_helper_wfi();
9707 break;
9708 case DISAS_SWI:
9709 gen_exception(EXCP_SWI);
9710 break;
9711 }
9712 if (dc->condjmp) {
9713 gen_set_label(dc->condlabel);
9714 gen_set_condexec(dc);
9715 gen_goto_tb(dc, 1, dc->pc);
9716 dc->condjmp = 0;
9717 }
9718 }
9719
9720 done_generating:
9721 gen_icount_end(tb, num_insns);
9722 *gen_opc_ptr = INDEX_op_end;
9723
9724 #ifdef DEBUG_DISAS
9725 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9726 qemu_log("----------------\n");
9727 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9728 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9729 qemu_log("\n");
9730 }
9731 #endif
9732 if (search_pc) {
9733 j = gen_opc_ptr - gen_opc_buf;
9734 lj++;
9735 while (lj <= j)
9736 gen_opc_instr_start[lj++] = 0;
9737 } else {
9738 tb->size = dc->pc - pc_start;
9739 tb->icount = num_insns;
9740 }
9741 }
9742
9743 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9744 {
9745 gen_intermediate_code_internal(env, tb, 0);
9746 }
9747
9748 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9749 {
9750 gen_intermediate_code_internal(env, tb, 1);
9751 }
9752
9753 static const char *cpu_mode_names[16] = {
9754 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9755 "???", "???", "???", "und", "???", "???", "???", "sys"
9756 };
9757
9758 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9759 int flags)
9760 {
9761 int i;
9762 #if 0
9763 union {
9764 uint32_t i;
9765 float s;
9766 } s0, s1;
9767 CPU_DoubleU d;
9768 /* ??? This assumes float64 and double have the same layout.
9769 Oh well, it's only debug dumps. */
9770 union {
9771 float64 f64;
9772 double d;
9773 } d0;
9774 #endif
9775 uint32_t psr;
9776
9777 for(i=0;i<16;i++) {
9778 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9779 if ((i % 4) == 3)
9780 cpu_fprintf(f, "\n");
9781 else
9782 cpu_fprintf(f, " ");
9783 }
9784 psr = cpsr_read(env);
9785 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9786 psr,
9787 psr & (1 << 31) ? 'N' : '-',
9788 psr & (1 << 30) ? 'Z' : '-',
9789 psr & (1 << 29) ? 'C' : '-',
9790 psr & (1 << 28) ? 'V' : '-',
9791 psr & CPSR_T ? 'T' : 'A',
9792 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9793
9794 #if 0
9795 for (i = 0; i < 16; i++) {
9796 d.d = env->vfp.regs[i];
9797 s0.i = d.l.lower;
9798 s1.i = d.l.upper;
9799 d0.f64 = d.d;
9800 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9801 i * 2, (int)s0.i, s0.s,
9802 i * 2 + 1, (int)s1.i, s1.s,
9803 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9804 d0.d);
9805 }
9806 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9807 #endif
9808 }
9809
9810 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9811 unsigned long searched_pc, int pc_pos, void *puc)
9812 {
9813 env->regs[15] = gen_opc_pc[pc_pos];
9814 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9815 }