]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Don't leak TCG temp for UNDEFs in Neon load/store space
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
66
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
68
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
74
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
79
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
91
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
95
96 #include "gen-icount.h"
97
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
101
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
104 {
105 int i;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
113 }
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
126
127 #define GEN_HELPER 2
128 #include "helpers.h"
129 }
130
131 static inline TCGv load_cpu_offset(int offset)
132 {
133 TCGv tmp = tcg_temp_new_i32();
134 tcg_gen_ld_i32(tmp, cpu_env, offset);
135 return tmp;
136 }
137
138 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
139
140 static inline void store_cpu_offset(TCGv var, int offset)
141 {
142 tcg_gen_st_i32(var, cpu_env, offset);
143 tcg_temp_free_i32(var);
144 }
145
146 #define store_cpu_field(var, name) \
147 store_cpu_offset(var, offsetof(CPUState, name))
148
149 /* Set a variable to the value of a CPU register. */
150 static void load_reg_var(DisasContext *s, TCGv var, int reg)
151 {
152 if (reg == 15) {
153 uint32_t addr;
154 /* normaly, since we updated PC, we need only to add one insn */
155 if (s->thumb)
156 addr = (long)s->pc + 2;
157 else
158 addr = (long)s->pc + 4;
159 tcg_gen_movi_i32(var, addr);
160 } else {
161 tcg_gen_mov_i32(var, cpu_R[reg]);
162 }
163 }
164
165 /* Create a new temporary and set it to the value of a CPU register. */
166 static inline TCGv load_reg(DisasContext *s, int reg)
167 {
168 TCGv tmp = tcg_temp_new_i32();
169 load_reg_var(s, tmp, reg);
170 return tmp;
171 }
172
173 /* Set a CPU register. The source must be a temporary and will be
174 marked as dead. */
175 static void store_reg(DisasContext *s, int reg, TCGv var)
176 {
177 if (reg == 15) {
178 tcg_gen_andi_i32(var, var, ~1);
179 s->is_jmp = DISAS_JUMP;
180 }
181 tcg_gen_mov_i32(cpu_R[reg], var);
182 tcg_temp_free_i32(var);
183 }
184
185 /* Value extensions. */
186 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
187 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
188 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
189 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
190
191 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
192 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
193
194
195 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
196 {
197 TCGv tmp_mask = tcg_const_i32(mask);
198 gen_helper_cpsr_write(var, tmp_mask);
199 tcg_temp_free_i32(tmp_mask);
200 }
201 /* Set NZCV flags from the high 4 bits of var. */
202 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
203
204 static void gen_exception(int excp)
205 {
206 TCGv tmp = tcg_temp_new_i32();
207 tcg_gen_movi_i32(tmp, excp);
208 gen_helper_exception(tmp);
209 tcg_temp_free_i32(tmp);
210 }
211
212 static void gen_smul_dual(TCGv a, TCGv b)
213 {
214 TCGv tmp1 = tcg_temp_new_i32();
215 TCGv tmp2 = tcg_temp_new_i32();
216 tcg_gen_ext16s_i32(tmp1, a);
217 tcg_gen_ext16s_i32(tmp2, b);
218 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
219 tcg_temp_free_i32(tmp2);
220 tcg_gen_sari_i32(a, a, 16);
221 tcg_gen_sari_i32(b, b, 16);
222 tcg_gen_mul_i32(b, b, a);
223 tcg_gen_mov_i32(a, tmp1);
224 tcg_temp_free_i32(tmp1);
225 }
226
227 /* Byteswap each halfword. */
228 static void gen_rev16(TCGv var)
229 {
230 TCGv tmp = tcg_temp_new_i32();
231 tcg_gen_shri_i32(tmp, var, 8);
232 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
233 tcg_gen_shli_i32(var, var, 8);
234 tcg_gen_andi_i32(var, var, 0xff00ff00);
235 tcg_gen_or_i32(var, var, tmp);
236 tcg_temp_free_i32(tmp);
237 }
238
239 /* Byteswap low halfword and sign extend. */
240 static void gen_revsh(TCGv var)
241 {
242 tcg_gen_ext16u_i32(var, var);
243 tcg_gen_bswap16_i32(var, var);
244 tcg_gen_ext16s_i32(var, var);
245 }
246
247 /* Unsigned bitfield extract. */
248 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
249 {
250 if (shift)
251 tcg_gen_shri_i32(var, var, shift);
252 tcg_gen_andi_i32(var, var, mask);
253 }
254
255 /* Signed bitfield extract. */
256 static void gen_sbfx(TCGv var, int shift, int width)
257 {
258 uint32_t signbit;
259
260 if (shift)
261 tcg_gen_sari_i32(var, var, shift);
262 if (shift + width < 32) {
263 signbit = 1u << (width - 1);
264 tcg_gen_andi_i32(var, var, (1u << width) - 1);
265 tcg_gen_xori_i32(var, var, signbit);
266 tcg_gen_subi_i32(var, var, signbit);
267 }
268 }
269
270 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
271 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
272 {
273 tcg_gen_andi_i32(val, val, mask);
274 tcg_gen_shli_i32(val, val, shift);
275 tcg_gen_andi_i32(base, base, ~(mask << shift));
276 tcg_gen_or_i32(dest, base, val);
277 }
278
279 /* Return (b << 32) + a. Mark inputs as dead */
280 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
281 {
282 TCGv_i64 tmp64 = tcg_temp_new_i64();
283
284 tcg_gen_extu_i32_i64(tmp64, b);
285 tcg_temp_free_i32(b);
286 tcg_gen_shli_i64(tmp64, tmp64, 32);
287 tcg_gen_add_i64(a, tmp64, a);
288
289 tcg_temp_free_i64(tmp64);
290 return a;
291 }
292
293 /* Return (b << 32) - a. Mark inputs as dead. */
294 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
295 {
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
299 tcg_temp_free_i32(b);
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_sub_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
305 }
306
307 /* FIXME: Most targets have native widening multiplication.
308 It would be good to use that instead of a full wide multiply. */
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
311 {
312 TCGv_i64 tmp1 = tcg_temp_new_i64();
313 TCGv_i64 tmp2 = tcg_temp_new_i64();
314
315 tcg_gen_extu_i32_i64(tmp1, a);
316 tcg_temp_free_i32(a);
317 tcg_gen_extu_i32_i64(tmp2, b);
318 tcg_temp_free_i32(b);
319 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
320 tcg_temp_free_i64(tmp2);
321 return tmp1;
322 }
323
324 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
325 {
326 TCGv_i64 tmp1 = tcg_temp_new_i64();
327 TCGv_i64 tmp2 = tcg_temp_new_i64();
328
329 tcg_gen_ext_i32_i64(tmp1, a);
330 tcg_temp_free_i32(a);
331 tcg_gen_ext_i32_i64(tmp2, b);
332 tcg_temp_free_i32(b);
333 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
334 tcg_temp_free_i64(tmp2);
335 return tmp1;
336 }
337
338 /* Swap low and high halfwords. */
339 static void gen_swap_half(TCGv var)
340 {
341 TCGv tmp = tcg_temp_new_i32();
342 tcg_gen_shri_i32(tmp, var, 16);
343 tcg_gen_shli_i32(var, var, 16);
344 tcg_gen_or_i32(var, var, tmp);
345 tcg_temp_free_i32(tmp);
346 }
347
348 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
349 tmp = (t0 ^ t1) & 0x8000;
350 t0 &= ~0x8000;
351 t1 &= ~0x8000;
352 t0 = (t0 + t1) ^ tmp;
353 */
354
355 static void gen_add16(TCGv t0, TCGv t1)
356 {
357 TCGv tmp = tcg_temp_new_i32();
358 tcg_gen_xor_i32(tmp, t0, t1);
359 tcg_gen_andi_i32(tmp, tmp, 0x8000);
360 tcg_gen_andi_i32(t0, t0, ~0x8000);
361 tcg_gen_andi_i32(t1, t1, ~0x8000);
362 tcg_gen_add_i32(t0, t0, t1);
363 tcg_gen_xor_i32(t0, t0, tmp);
364 tcg_temp_free_i32(tmp);
365 tcg_temp_free_i32(t1);
366 }
367
368 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
369
370 /* Set CF to the top bit of var. */
371 static void gen_set_CF_bit31(TCGv var)
372 {
373 TCGv tmp = tcg_temp_new_i32();
374 tcg_gen_shri_i32(tmp, var, 31);
375 gen_set_CF(tmp);
376 tcg_temp_free_i32(tmp);
377 }
378
379 /* Set N and Z flags from var. */
380 static inline void gen_logic_CC(TCGv var)
381 {
382 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
383 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
384 }
385
386 /* T0 += T1 + CF. */
387 static void gen_adc(TCGv t0, TCGv t1)
388 {
389 TCGv tmp;
390 tcg_gen_add_i32(t0, t0, t1);
391 tmp = load_cpu_field(CF);
392 tcg_gen_add_i32(t0, t0, tmp);
393 tcg_temp_free_i32(tmp);
394 }
395
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
398 {
399 TCGv tmp;
400 tcg_gen_add_i32(dest, t0, t1);
401 tmp = load_cpu_field(CF);
402 tcg_gen_add_i32(dest, dest, tmp);
403 tcg_temp_free_i32(tmp);
404 }
405
406 /* dest = T0 - T1 + CF - 1. */
407 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
408 {
409 TCGv tmp;
410 tcg_gen_sub_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 tcg_gen_subi_i32(dest, dest, 1);
414 tcg_temp_free_i32(tmp);
415 }
416
417 /* FIXME: Implement this natively. */
418 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
419
420 static void shifter_out_im(TCGv var, int shift)
421 {
422 TCGv tmp = tcg_temp_new_i32();
423 if (shift == 0) {
424 tcg_gen_andi_i32(tmp, var, 1);
425 } else {
426 tcg_gen_shri_i32(tmp, var, shift);
427 if (shift != 31)
428 tcg_gen_andi_i32(tmp, tmp, 1);
429 }
430 gen_set_CF(tmp);
431 tcg_temp_free_i32(tmp);
432 }
433
434 /* Shift by immediate. Includes special handling for shift == 0. */
435 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
436 {
437 switch (shiftop) {
438 case 0: /* LSL */
439 if (shift != 0) {
440 if (flags)
441 shifter_out_im(var, 32 - shift);
442 tcg_gen_shli_i32(var, var, shift);
443 }
444 break;
445 case 1: /* LSR */
446 if (shift == 0) {
447 if (flags) {
448 tcg_gen_shri_i32(var, var, 31);
449 gen_set_CF(var);
450 }
451 tcg_gen_movi_i32(var, 0);
452 } else {
453 if (flags)
454 shifter_out_im(var, shift - 1);
455 tcg_gen_shri_i32(var, var, shift);
456 }
457 break;
458 case 2: /* ASR */
459 if (shift == 0)
460 shift = 32;
461 if (flags)
462 shifter_out_im(var, shift - 1);
463 if (shift == 32)
464 shift = 31;
465 tcg_gen_sari_i32(var, var, shift);
466 break;
467 case 3: /* ROR/RRX */
468 if (shift != 0) {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_rotri_i32(var, var, shift); break;
472 } else {
473 TCGv tmp = load_cpu_field(CF);
474 if (flags)
475 shifter_out_im(var, 0);
476 tcg_gen_shri_i32(var, var, 1);
477 tcg_gen_shli_i32(tmp, tmp, 31);
478 tcg_gen_or_i32(var, var, tmp);
479 tcg_temp_free_i32(tmp);
480 }
481 }
482 };
483
484 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
485 TCGv shift, int flags)
486 {
487 if (flags) {
488 switch (shiftop) {
489 case 0: gen_helper_shl_cc(var, var, shift); break;
490 case 1: gen_helper_shr_cc(var, var, shift); break;
491 case 2: gen_helper_sar_cc(var, var, shift); break;
492 case 3: gen_helper_ror_cc(var, var, shift); break;
493 }
494 } else {
495 switch (shiftop) {
496 case 0: gen_helper_shl(var, var, shift); break;
497 case 1: gen_helper_shr(var, var, shift); break;
498 case 2: gen_helper_sar(var, var, shift); break;
499 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
500 tcg_gen_rotr_i32(var, var, shift); break;
501 }
502 }
503 tcg_temp_free_i32(shift);
504 }
505
506 #define PAS_OP(pfx) \
507 switch (op2) { \
508 case 0: gen_pas_helper(glue(pfx,add16)); break; \
509 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
510 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
511 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
512 case 4: gen_pas_helper(glue(pfx,add8)); break; \
513 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
514 }
515 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
516 {
517 TCGv_ptr tmp;
518
519 switch (op1) {
520 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
521 case 1:
522 tmp = tcg_temp_new_ptr();
523 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
524 PAS_OP(s)
525 tcg_temp_free_ptr(tmp);
526 break;
527 case 5:
528 tmp = tcg_temp_new_ptr();
529 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
530 PAS_OP(u)
531 tcg_temp_free_ptr(tmp);
532 break;
533 #undef gen_pas_helper
534 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
535 case 2:
536 PAS_OP(q);
537 break;
538 case 3:
539 PAS_OP(sh);
540 break;
541 case 6:
542 PAS_OP(uq);
543 break;
544 case 7:
545 PAS_OP(uh);
546 break;
547 #undef gen_pas_helper
548 }
549 }
550 #undef PAS_OP
551
552 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
553 #define PAS_OP(pfx) \
554 switch (op1) { \
555 case 0: gen_pas_helper(glue(pfx,add8)); break; \
556 case 1: gen_pas_helper(glue(pfx,add16)); break; \
557 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
558 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
559 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
560 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
561 }
562 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
563 {
564 TCGv_ptr tmp;
565
566 switch (op2) {
567 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
568 case 0:
569 tmp = tcg_temp_new_ptr();
570 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
571 PAS_OP(s)
572 tcg_temp_free_ptr(tmp);
573 break;
574 case 4:
575 tmp = tcg_temp_new_ptr();
576 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
577 PAS_OP(u)
578 tcg_temp_free_ptr(tmp);
579 break;
580 #undef gen_pas_helper
581 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
582 case 1:
583 PAS_OP(q);
584 break;
585 case 2:
586 PAS_OP(sh);
587 break;
588 case 5:
589 PAS_OP(uq);
590 break;
591 case 6:
592 PAS_OP(uh);
593 break;
594 #undef gen_pas_helper
595 }
596 }
597 #undef PAS_OP
598
599 static void gen_test_cc(int cc, int label)
600 {
601 TCGv tmp;
602 TCGv tmp2;
603 int inv;
604
605 switch (cc) {
606 case 0: /* eq: Z */
607 tmp = load_cpu_field(ZF);
608 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
609 break;
610 case 1: /* ne: !Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
613 break;
614 case 2: /* cs: C */
615 tmp = load_cpu_field(CF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 3: /* cc: !C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
621 break;
622 case 4: /* mi: N */
623 tmp = load_cpu_field(NF);
624 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
625 break;
626 case 5: /* pl: !N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
629 break;
630 case 6: /* vs: V */
631 tmp = load_cpu_field(VF);
632 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
633 break;
634 case 7: /* vc: !V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
637 break;
638 case 8: /* hi: C && !Z */
639 inv = gen_new_label();
640 tmp = load_cpu_field(CF);
641 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
642 tcg_temp_free_i32(tmp);
643 tmp = load_cpu_field(ZF);
644 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
645 gen_set_label(inv);
646 break;
647 case 9: /* ls: !C || Z */
648 tmp = load_cpu_field(CF);
649 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
650 tcg_temp_free_i32(tmp);
651 tmp = load_cpu_field(ZF);
652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
653 break;
654 case 10: /* ge: N == V -> N ^ V == 0 */
655 tmp = load_cpu_field(VF);
656 tmp2 = load_cpu_field(NF);
657 tcg_gen_xor_i32(tmp, tmp, tmp2);
658 tcg_temp_free_i32(tmp2);
659 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
660 break;
661 case 11: /* lt: N != V -> N ^ V != 0 */
662 tmp = load_cpu_field(VF);
663 tmp2 = load_cpu_field(NF);
664 tcg_gen_xor_i32(tmp, tmp, tmp2);
665 tcg_temp_free_i32(tmp2);
666 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
667 break;
668 case 12: /* gt: !Z && N == V */
669 inv = gen_new_label();
670 tmp = load_cpu_field(ZF);
671 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
672 tcg_temp_free_i32(tmp);
673 tmp = load_cpu_field(VF);
674 tmp2 = load_cpu_field(NF);
675 tcg_gen_xor_i32(tmp, tmp, tmp2);
676 tcg_temp_free_i32(tmp2);
677 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
678 gen_set_label(inv);
679 break;
680 case 13: /* le: Z || N != V */
681 tmp = load_cpu_field(ZF);
682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
683 tcg_temp_free_i32(tmp);
684 tmp = load_cpu_field(VF);
685 tmp2 = load_cpu_field(NF);
686 tcg_gen_xor_i32(tmp, tmp, tmp2);
687 tcg_temp_free_i32(tmp2);
688 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
689 break;
690 default:
691 fprintf(stderr, "Bad condition code 0x%x\n", cc);
692 abort();
693 }
694 tcg_temp_free_i32(tmp);
695 }
696
697 static const uint8_t table_logic_cc[16] = {
698 1, /* and */
699 1, /* xor */
700 0, /* sub */
701 0, /* rsb */
702 0, /* add */
703 0, /* adc */
704 0, /* sbc */
705 0, /* rsc */
706 1, /* andl */
707 1, /* xorl */
708 0, /* cmp */
709 0, /* cmn */
710 1, /* orr */
711 1, /* mov */
712 1, /* bic */
713 1, /* mvn */
714 };
715
716 /* Set PC and Thumb state from an immediate address. */
717 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
718 {
719 TCGv tmp;
720
721 s->is_jmp = DISAS_UPDATE;
722 if (s->thumb != (addr & 1)) {
723 tmp = tcg_temp_new_i32();
724 tcg_gen_movi_i32(tmp, addr & 1);
725 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
726 tcg_temp_free_i32(tmp);
727 }
728 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
729 }
730
731 /* Set PC and Thumb state from var. var is marked as dead. */
732 static inline void gen_bx(DisasContext *s, TCGv var)
733 {
734 s->is_jmp = DISAS_UPDATE;
735 tcg_gen_andi_i32(cpu_R[15], var, ~1);
736 tcg_gen_andi_i32(var, var, 1);
737 store_cpu_field(var, thumb);
738 }
739
740 /* Variant of store_reg which uses branch&exchange logic when storing
741 to r15 in ARM architecture v7 and above. The source must be a temporary
742 and will be marked as dead. */
743 static inline void store_reg_bx(CPUState *env, DisasContext *s,
744 int reg, TCGv var)
745 {
746 if (reg == 15 && ENABLE_ARCH_7) {
747 gen_bx(s, var);
748 } else {
749 store_reg(s, reg, var);
750 }
751 }
752
753 static inline TCGv gen_ld8s(TCGv addr, int index)
754 {
755 TCGv tmp = tcg_temp_new_i32();
756 tcg_gen_qemu_ld8s(tmp, addr, index);
757 return tmp;
758 }
759 static inline TCGv gen_ld8u(TCGv addr, int index)
760 {
761 TCGv tmp = tcg_temp_new_i32();
762 tcg_gen_qemu_ld8u(tmp, addr, index);
763 return tmp;
764 }
765 static inline TCGv gen_ld16s(TCGv addr, int index)
766 {
767 TCGv tmp = tcg_temp_new_i32();
768 tcg_gen_qemu_ld16s(tmp, addr, index);
769 return tmp;
770 }
771 static inline TCGv gen_ld16u(TCGv addr, int index)
772 {
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld16u(tmp, addr, index);
775 return tmp;
776 }
777 static inline TCGv gen_ld32(TCGv addr, int index)
778 {
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld32u(tmp, addr, index);
781 return tmp;
782 }
783 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
784 {
785 TCGv_i64 tmp = tcg_temp_new_i64();
786 tcg_gen_qemu_ld64(tmp, addr, index);
787 return tmp;
788 }
789 static inline void gen_st8(TCGv val, TCGv addr, int index)
790 {
791 tcg_gen_qemu_st8(val, addr, index);
792 tcg_temp_free_i32(val);
793 }
794 static inline void gen_st16(TCGv val, TCGv addr, int index)
795 {
796 tcg_gen_qemu_st16(val, addr, index);
797 tcg_temp_free_i32(val);
798 }
799 static inline void gen_st32(TCGv val, TCGv addr, int index)
800 {
801 tcg_gen_qemu_st32(val, addr, index);
802 tcg_temp_free_i32(val);
803 }
804 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
805 {
806 tcg_gen_qemu_st64(val, addr, index);
807 tcg_temp_free_i64(val);
808 }
809
810 static inline void gen_set_pc_im(uint32_t val)
811 {
812 tcg_gen_movi_i32(cpu_R[15], val);
813 }
814
815 /* Force a TB lookup after an instruction that changes the CPU state. */
816 static inline void gen_lookup_tb(DisasContext *s)
817 {
818 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
819 s->is_jmp = DISAS_UPDATE;
820 }
821
822 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
823 TCGv var)
824 {
825 int val, rm, shift, shiftop;
826 TCGv offset;
827
828 if (!(insn & (1 << 25))) {
829 /* immediate */
830 val = insn & 0xfff;
831 if (!(insn & (1 << 23)))
832 val = -val;
833 if (val != 0)
834 tcg_gen_addi_i32(var, var, val);
835 } else {
836 /* shift/register */
837 rm = (insn) & 0xf;
838 shift = (insn >> 7) & 0x1f;
839 shiftop = (insn >> 5) & 3;
840 offset = load_reg(s, rm);
841 gen_arm_shift_im(offset, shiftop, shift, 0);
842 if (!(insn & (1 << 23)))
843 tcg_gen_sub_i32(var, var, offset);
844 else
845 tcg_gen_add_i32(var, var, offset);
846 tcg_temp_free_i32(offset);
847 }
848 }
849
850 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
851 int extra, TCGv var)
852 {
853 int val, rm;
854 TCGv offset;
855
856 if (insn & (1 << 22)) {
857 /* immediate */
858 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
859 if (!(insn & (1 << 23)))
860 val = -val;
861 val += extra;
862 if (val != 0)
863 tcg_gen_addi_i32(var, var, val);
864 } else {
865 /* register */
866 if (extra)
867 tcg_gen_addi_i32(var, var, extra);
868 rm = (insn) & 0xf;
869 offset = load_reg(s, rm);
870 if (!(insn & (1 << 23)))
871 tcg_gen_sub_i32(var, var, offset);
872 else
873 tcg_gen_add_i32(var, var, offset);
874 tcg_temp_free_i32(offset);
875 }
876 }
877
878 #define VFP_OP2(name) \
879 static inline void gen_vfp_##name(int dp) \
880 { \
881 if (dp) \
882 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
883 else \
884 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
885 }
886
887 VFP_OP2(add)
888 VFP_OP2(sub)
889 VFP_OP2(mul)
890 VFP_OP2(div)
891
892 #undef VFP_OP2
893
894 static inline void gen_vfp_abs(int dp)
895 {
896 if (dp)
897 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
898 else
899 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
900 }
901
902 static inline void gen_vfp_neg(int dp)
903 {
904 if (dp)
905 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
906 else
907 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
908 }
909
910 static inline void gen_vfp_sqrt(int dp)
911 {
912 if (dp)
913 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
914 else
915 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
916 }
917
918 static inline void gen_vfp_cmp(int dp)
919 {
920 if (dp)
921 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
922 else
923 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
924 }
925
926 static inline void gen_vfp_cmpe(int dp)
927 {
928 if (dp)
929 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
930 else
931 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
932 }
933
934 static inline void gen_vfp_F1_ld0(int dp)
935 {
936 if (dp)
937 tcg_gen_movi_i64(cpu_F1d, 0);
938 else
939 tcg_gen_movi_i32(cpu_F1s, 0);
940 }
941
942 static inline void gen_vfp_uito(int dp)
943 {
944 if (dp)
945 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
946 else
947 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
948 }
949
950 static inline void gen_vfp_sito(int dp)
951 {
952 if (dp)
953 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
954 else
955 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
956 }
957
958 static inline void gen_vfp_toui(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
962 else
963 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
964 }
965
966 static inline void gen_vfp_touiz(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_tosi(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
980 }
981
982 static inline void gen_vfp_tosiz(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
988 }
989
990 #define VFP_GEN_FIX(name) \
991 static inline void gen_vfp_##name(int dp, int shift) \
992 { \
993 TCGv tmp_shift = tcg_const_i32(shift); \
994 if (dp) \
995 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
996 else \
997 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
998 tcg_temp_free_i32(tmp_shift); \
999 }
1000 VFP_GEN_FIX(tosh)
1001 VFP_GEN_FIX(tosl)
1002 VFP_GEN_FIX(touh)
1003 VFP_GEN_FIX(toul)
1004 VFP_GEN_FIX(shto)
1005 VFP_GEN_FIX(slto)
1006 VFP_GEN_FIX(uhto)
1007 VFP_GEN_FIX(ulto)
1008 #undef VFP_GEN_FIX
1009
1010 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1011 {
1012 if (dp)
1013 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1014 else
1015 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1016 }
1017
1018 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1019 {
1020 if (dp)
1021 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1022 else
1023 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1024 }
1025
1026 static inline long
1027 vfp_reg_offset (int dp, int reg)
1028 {
1029 if (dp)
1030 return offsetof(CPUARMState, vfp.regs[reg]);
1031 else if (reg & 1) {
1032 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1033 + offsetof(CPU_DoubleU, l.upper);
1034 } else {
1035 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1036 + offsetof(CPU_DoubleU, l.lower);
1037 }
1038 }
1039
1040 /* Return the offset of a 32-bit piece of a NEON register.
1041 zero is the least significant end of the register. */
1042 static inline long
1043 neon_reg_offset (int reg, int n)
1044 {
1045 int sreg;
1046 sreg = reg * 2 + n;
1047 return vfp_reg_offset(0, sreg);
1048 }
1049
1050 static TCGv neon_load_reg(int reg, int pass)
1051 {
1052 TCGv tmp = tcg_temp_new_i32();
1053 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1054 return tmp;
1055 }
1056
1057 static void neon_store_reg(int reg, int pass, TCGv var)
1058 {
1059 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1060 tcg_temp_free_i32(var);
1061 }
1062
1063 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1064 {
1065 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1066 }
1067
1068 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1069 {
1070 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1071 }
1072
1073 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1074 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1075 #define tcg_gen_st_f32 tcg_gen_st_i32
1076 #define tcg_gen_st_f64 tcg_gen_st_i64
1077
1078 static inline void gen_mov_F0_vreg(int dp, int reg)
1079 {
1080 if (dp)
1081 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1082 else
1083 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1084 }
1085
1086 static inline void gen_mov_F1_vreg(int dp, int reg)
1087 {
1088 if (dp)
1089 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1090 else
1091 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1092 }
1093
1094 static inline void gen_mov_vreg_F0(int dp, int reg)
1095 {
1096 if (dp)
1097 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1100 }
1101
1102 #define ARM_CP_RW_BIT (1 << 20)
1103
1104 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1105 {
1106 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1107 }
1108
1109 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1110 {
1111 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1112 }
1113
1114 static inline TCGv iwmmxt_load_creg(int reg)
1115 {
1116 TCGv var = tcg_temp_new_i32();
1117 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1118 return var;
1119 }
1120
1121 static inline void iwmmxt_store_creg(int reg, TCGv var)
1122 {
1123 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1124 tcg_temp_free_i32(var);
1125 }
1126
1127 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1128 {
1129 iwmmxt_store_reg(cpu_M0, rn);
1130 }
1131
1132 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1133 {
1134 iwmmxt_load_reg(cpu_M0, rn);
1135 }
1136
1137 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1138 {
1139 iwmmxt_load_reg(cpu_V1, rn);
1140 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1141 }
1142
1143 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1144 {
1145 iwmmxt_load_reg(cpu_V1, rn);
1146 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1147 }
1148
1149 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1150 {
1151 iwmmxt_load_reg(cpu_V1, rn);
1152 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1153 }
1154
1155 #define IWMMXT_OP(name) \
1156 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1157 { \
1158 iwmmxt_load_reg(cpu_V1, rn); \
1159 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1160 }
1161
1162 #define IWMMXT_OP_ENV(name) \
1163 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1164 { \
1165 iwmmxt_load_reg(cpu_V1, rn); \
1166 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1167 }
1168
1169 #define IWMMXT_OP_ENV_SIZE(name) \
1170 IWMMXT_OP_ENV(name##b) \
1171 IWMMXT_OP_ENV(name##w) \
1172 IWMMXT_OP_ENV(name##l)
1173
1174 #define IWMMXT_OP_ENV1(name) \
1175 static inline void gen_op_iwmmxt_##name##_M0(void) \
1176 { \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1178 }
1179
1180 IWMMXT_OP(maddsq)
1181 IWMMXT_OP(madduq)
1182 IWMMXT_OP(sadb)
1183 IWMMXT_OP(sadw)
1184 IWMMXT_OP(mulslw)
1185 IWMMXT_OP(mulshw)
1186 IWMMXT_OP(mululw)
1187 IWMMXT_OP(muluhw)
1188 IWMMXT_OP(macsw)
1189 IWMMXT_OP(macuw)
1190
1191 IWMMXT_OP_ENV_SIZE(unpackl)
1192 IWMMXT_OP_ENV_SIZE(unpackh)
1193
1194 IWMMXT_OP_ENV1(unpacklub)
1195 IWMMXT_OP_ENV1(unpackluw)
1196 IWMMXT_OP_ENV1(unpacklul)
1197 IWMMXT_OP_ENV1(unpackhub)
1198 IWMMXT_OP_ENV1(unpackhuw)
1199 IWMMXT_OP_ENV1(unpackhul)
1200 IWMMXT_OP_ENV1(unpacklsb)
1201 IWMMXT_OP_ENV1(unpacklsw)
1202 IWMMXT_OP_ENV1(unpacklsl)
1203 IWMMXT_OP_ENV1(unpackhsb)
1204 IWMMXT_OP_ENV1(unpackhsw)
1205 IWMMXT_OP_ENV1(unpackhsl)
1206
1207 IWMMXT_OP_ENV_SIZE(cmpeq)
1208 IWMMXT_OP_ENV_SIZE(cmpgtu)
1209 IWMMXT_OP_ENV_SIZE(cmpgts)
1210
1211 IWMMXT_OP_ENV_SIZE(mins)
1212 IWMMXT_OP_ENV_SIZE(minu)
1213 IWMMXT_OP_ENV_SIZE(maxs)
1214 IWMMXT_OP_ENV_SIZE(maxu)
1215
1216 IWMMXT_OP_ENV_SIZE(subn)
1217 IWMMXT_OP_ENV_SIZE(addn)
1218 IWMMXT_OP_ENV_SIZE(subu)
1219 IWMMXT_OP_ENV_SIZE(addu)
1220 IWMMXT_OP_ENV_SIZE(subs)
1221 IWMMXT_OP_ENV_SIZE(adds)
1222
1223 IWMMXT_OP_ENV(avgb0)
1224 IWMMXT_OP_ENV(avgb1)
1225 IWMMXT_OP_ENV(avgw0)
1226 IWMMXT_OP_ENV(avgw1)
1227
1228 IWMMXT_OP(msadb)
1229
1230 IWMMXT_OP_ENV(packuw)
1231 IWMMXT_OP_ENV(packul)
1232 IWMMXT_OP_ENV(packuq)
1233 IWMMXT_OP_ENV(packsw)
1234 IWMMXT_OP_ENV(packsl)
1235 IWMMXT_OP_ENV(packsq)
1236
1237 static void gen_op_iwmmxt_set_mup(void)
1238 {
1239 TCGv tmp;
1240 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1241 tcg_gen_ori_i32(tmp, tmp, 2);
1242 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1243 }
1244
1245 static void gen_op_iwmmxt_set_cup(void)
1246 {
1247 TCGv tmp;
1248 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1249 tcg_gen_ori_i32(tmp, tmp, 1);
1250 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 }
1252
1253 static void gen_op_iwmmxt_setpsr_nz(void)
1254 {
1255 TCGv tmp = tcg_temp_new_i32();
1256 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1257 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1258 }
1259
1260 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1261 {
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1264 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1265 }
1266
1267 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1268 {
1269 int rd;
1270 uint32_t offset;
1271 TCGv tmp;
1272
1273 rd = (insn >> 16) & 0xf;
1274 tmp = load_reg(s, rd);
1275
1276 offset = (insn & 0xff) << ((insn >> 7) & 2);
1277 if (insn & (1 << 24)) {
1278 /* Pre indexed */
1279 if (insn & (1 << 23))
1280 tcg_gen_addi_i32(tmp, tmp, offset);
1281 else
1282 tcg_gen_addi_i32(tmp, tmp, -offset);
1283 tcg_gen_mov_i32(dest, tmp);
1284 if (insn & (1 << 21))
1285 store_reg(s, rd, tmp);
1286 else
1287 tcg_temp_free_i32(tmp);
1288 } else if (insn & (1 << 21)) {
1289 /* Post indexed */
1290 tcg_gen_mov_i32(dest, tmp);
1291 if (insn & (1 << 23))
1292 tcg_gen_addi_i32(tmp, tmp, offset);
1293 else
1294 tcg_gen_addi_i32(tmp, tmp, -offset);
1295 store_reg(s, rd, tmp);
1296 } else if (!(insn & (1 << 23)))
1297 return 1;
1298 return 0;
1299 }
1300
1301 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1302 {
1303 int rd = (insn >> 0) & 0xf;
1304 TCGv tmp;
1305
1306 if (insn & (1 << 8)) {
1307 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1308 return 1;
1309 } else {
1310 tmp = iwmmxt_load_creg(rd);
1311 }
1312 } else {
1313 tmp = tcg_temp_new_i32();
1314 iwmmxt_load_reg(cpu_V0, rd);
1315 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1316 }
1317 tcg_gen_andi_i32(tmp, tmp, mask);
1318 tcg_gen_mov_i32(dest, tmp);
1319 tcg_temp_free_i32(tmp);
1320 return 0;
1321 }
1322
1323 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1324 (ie. an undefined instruction). */
1325 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1326 {
1327 int rd, wrd;
1328 int rdhi, rdlo, rd0, rd1, i;
1329 TCGv addr;
1330 TCGv tmp, tmp2, tmp3;
1331
1332 if ((insn & 0x0e000e00) == 0x0c000000) {
1333 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1334 wrd = insn & 0xf;
1335 rdlo = (insn >> 12) & 0xf;
1336 rdhi = (insn >> 16) & 0xf;
1337 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1338 iwmmxt_load_reg(cpu_V0, wrd);
1339 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1340 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1341 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1342 } else { /* TMCRR */
1343 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1344 iwmmxt_store_reg(cpu_V0, wrd);
1345 gen_op_iwmmxt_set_mup();
1346 }
1347 return 0;
1348 }
1349
1350 wrd = (insn >> 12) & 0xf;
1351 addr = tcg_temp_new_i32();
1352 if (gen_iwmmxt_address(s, insn, addr)) {
1353 tcg_temp_free_i32(addr);
1354 return 1;
1355 }
1356 if (insn & ARM_CP_RW_BIT) {
1357 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1358 tmp = tcg_temp_new_i32();
1359 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1360 iwmmxt_store_creg(wrd, tmp);
1361 } else {
1362 i = 1;
1363 if (insn & (1 << 8)) {
1364 if (insn & (1 << 22)) { /* WLDRD */
1365 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1366 i = 0;
1367 } else { /* WLDRW wRd */
1368 tmp = gen_ld32(addr, IS_USER(s));
1369 }
1370 } else {
1371 if (insn & (1 << 22)) { /* WLDRH */
1372 tmp = gen_ld16u(addr, IS_USER(s));
1373 } else { /* WLDRB */
1374 tmp = gen_ld8u(addr, IS_USER(s));
1375 }
1376 }
1377 if (i) {
1378 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1379 tcg_temp_free_i32(tmp);
1380 }
1381 gen_op_iwmmxt_movq_wRn_M0(wrd);
1382 }
1383 } else {
1384 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1385 tmp = iwmmxt_load_creg(wrd);
1386 gen_st32(tmp, addr, IS_USER(s));
1387 } else {
1388 gen_op_iwmmxt_movq_M0_wRn(wrd);
1389 tmp = tcg_temp_new_i32();
1390 if (insn & (1 << 8)) {
1391 if (insn & (1 << 22)) { /* WSTRD */
1392 tcg_temp_free_i32(tmp);
1393 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1394 } else { /* WSTRW wRd */
1395 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1396 gen_st32(tmp, addr, IS_USER(s));
1397 }
1398 } else {
1399 if (insn & (1 << 22)) { /* WSTRH */
1400 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1401 gen_st16(tmp, addr, IS_USER(s));
1402 } else { /* WSTRB */
1403 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1404 gen_st8(tmp, addr, IS_USER(s));
1405 }
1406 }
1407 }
1408 }
1409 tcg_temp_free_i32(addr);
1410 return 0;
1411 }
1412
1413 if ((insn & 0x0f000000) != 0x0e000000)
1414 return 1;
1415
1416 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1417 case 0x000: /* WOR */
1418 wrd = (insn >> 12) & 0xf;
1419 rd0 = (insn >> 0) & 0xf;
1420 rd1 = (insn >> 16) & 0xf;
1421 gen_op_iwmmxt_movq_M0_wRn(rd0);
1422 gen_op_iwmmxt_orq_M0_wRn(rd1);
1423 gen_op_iwmmxt_setpsr_nz();
1424 gen_op_iwmmxt_movq_wRn_M0(wrd);
1425 gen_op_iwmmxt_set_mup();
1426 gen_op_iwmmxt_set_cup();
1427 break;
1428 case 0x011: /* TMCR */
1429 if (insn & 0xf)
1430 return 1;
1431 rd = (insn >> 12) & 0xf;
1432 wrd = (insn >> 16) & 0xf;
1433 switch (wrd) {
1434 case ARM_IWMMXT_wCID:
1435 case ARM_IWMMXT_wCASF:
1436 break;
1437 case ARM_IWMMXT_wCon:
1438 gen_op_iwmmxt_set_cup();
1439 /* Fall through. */
1440 case ARM_IWMMXT_wCSSF:
1441 tmp = iwmmxt_load_creg(wrd);
1442 tmp2 = load_reg(s, rd);
1443 tcg_gen_andc_i32(tmp, tmp, tmp2);
1444 tcg_temp_free_i32(tmp2);
1445 iwmmxt_store_creg(wrd, tmp);
1446 break;
1447 case ARM_IWMMXT_wCGR0:
1448 case ARM_IWMMXT_wCGR1:
1449 case ARM_IWMMXT_wCGR2:
1450 case ARM_IWMMXT_wCGR3:
1451 gen_op_iwmmxt_set_cup();
1452 tmp = load_reg(s, rd);
1453 iwmmxt_store_creg(wrd, tmp);
1454 break;
1455 default:
1456 return 1;
1457 }
1458 break;
1459 case 0x100: /* WXOR */
1460 wrd = (insn >> 12) & 0xf;
1461 rd0 = (insn >> 0) & 0xf;
1462 rd1 = (insn >> 16) & 0xf;
1463 gen_op_iwmmxt_movq_M0_wRn(rd0);
1464 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1465 gen_op_iwmmxt_setpsr_nz();
1466 gen_op_iwmmxt_movq_wRn_M0(wrd);
1467 gen_op_iwmmxt_set_mup();
1468 gen_op_iwmmxt_set_cup();
1469 break;
1470 case 0x111: /* TMRC */
1471 if (insn & 0xf)
1472 return 1;
1473 rd = (insn >> 12) & 0xf;
1474 wrd = (insn >> 16) & 0xf;
1475 tmp = iwmmxt_load_creg(wrd);
1476 store_reg(s, rd, tmp);
1477 break;
1478 case 0x300: /* WANDN */
1479 wrd = (insn >> 12) & 0xf;
1480 rd0 = (insn >> 0) & 0xf;
1481 rd1 = (insn >> 16) & 0xf;
1482 gen_op_iwmmxt_movq_M0_wRn(rd0);
1483 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1484 gen_op_iwmmxt_andq_M0_wRn(rd1);
1485 gen_op_iwmmxt_setpsr_nz();
1486 gen_op_iwmmxt_movq_wRn_M0(wrd);
1487 gen_op_iwmmxt_set_mup();
1488 gen_op_iwmmxt_set_cup();
1489 break;
1490 case 0x200: /* WAND */
1491 wrd = (insn >> 12) & 0xf;
1492 rd0 = (insn >> 0) & 0xf;
1493 rd1 = (insn >> 16) & 0xf;
1494 gen_op_iwmmxt_movq_M0_wRn(rd0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x810: case 0xa10: /* WMADD */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 if (insn & (1 << 21))
1507 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1508 else
1509 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 break;
1513 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1514 wrd = (insn >> 12) & 0xf;
1515 rd0 = (insn >> 16) & 0xf;
1516 rd1 = (insn >> 0) & 0xf;
1517 gen_op_iwmmxt_movq_M0_wRn(rd0);
1518 switch ((insn >> 22) & 3) {
1519 case 0:
1520 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1521 break;
1522 case 1:
1523 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1524 break;
1525 case 2:
1526 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1527 break;
1528 case 3:
1529 return 1;
1530 }
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 16) & 0xf;
1538 rd1 = (insn >> 0) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 switch ((insn >> 22) & 3) {
1541 case 0:
1542 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1543 break;
1544 case 1:
1545 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1546 break;
1547 case 2:
1548 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1549 break;
1550 case 3:
1551 return 1;
1552 }
1553 gen_op_iwmmxt_movq_wRn_M0(wrd);
1554 gen_op_iwmmxt_set_mup();
1555 gen_op_iwmmxt_set_cup();
1556 break;
1557 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 if (insn & (1 << 22))
1563 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1564 else
1565 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1566 if (!(insn & (1 << 20)))
1567 gen_op_iwmmxt_addl_M0_wRn(wrd);
1568 gen_op_iwmmxt_movq_wRn_M0(wrd);
1569 gen_op_iwmmxt_set_mup();
1570 break;
1571 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1572 wrd = (insn >> 12) & 0xf;
1573 rd0 = (insn >> 16) & 0xf;
1574 rd1 = (insn >> 0) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0);
1576 if (insn & (1 << 21)) {
1577 if (insn & (1 << 20))
1578 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1579 else
1580 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1581 } else {
1582 if (insn & (1 << 20))
1583 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1584 else
1585 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1586 }
1587 gen_op_iwmmxt_movq_wRn_M0(wrd);
1588 gen_op_iwmmxt_set_mup();
1589 break;
1590 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1591 wrd = (insn >> 12) & 0xf;
1592 rd0 = (insn >> 16) & 0xf;
1593 rd1 = (insn >> 0) & 0xf;
1594 gen_op_iwmmxt_movq_M0_wRn(rd0);
1595 if (insn & (1 << 21))
1596 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1597 else
1598 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1599 if (!(insn & (1 << 20))) {
1600 iwmmxt_load_reg(cpu_V1, wrd);
1601 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1602 }
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 switch ((insn >> 22) & 3) {
1612 case 0:
1613 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1614 break;
1615 case 1:
1616 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1617 break;
1618 case 2:
1619 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1620 break;
1621 case 3:
1622 return 1;
1623 }
1624 gen_op_iwmmxt_movq_wRn_M0(wrd);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1627 break;
1628 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1629 wrd = (insn >> 12) & 0xf;
1630 rd0 = (insn >> 16) & 0xf;
1631 rd1 = (insn >> 0) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0);
1633 if (insn & (1 << 22)) {
1634 if (insn & (1 << 20))
1635 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1636 else
1637 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1638 } else {
1639 if (insn & (1 << 20))
1640 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1643 }
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1647 break;
1648 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 16) & 0xf;
1651 rd1 = (insn >> 0) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
1653 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1654 tcg_gen_andi_i32(tmp, tmp, 7);
1655 iwmmxt_load_reg(cpu_V1, rd1);
1656 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1657 tcg_temp_free_i32(tmp);
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 gen_op_iwmmxt_set_mup();
1660 break;
1661 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1662 if (((insn >> 6) & 3) == 3)
1663 return 1;
1664 rd = (insn >> 12) & 0xf;
1665 wrd = (insn >> 16) & 0xf;
1666 tmp = load_reg(s, rd);
1667 gen_op_iwmmxt_movq_M0_wRn(wrd);
1668 switch ((insn >> 6) & 3) {
1669 case 0:
1670 tmp2 = tcg_const_i32(0xff);
1671 tmp3 = tcg_const_i32((insn & 7) << 3);
1672 break;
1673 case 1:
1674 tmp2 = tcg_const_i32(0xffff);
1675 tmp3 = tcg_const_i32((insn & 3) << 4);
1676 break;
1677 case 2:
1678 tmp2 = tcg_const_i32(0xffffffff);
1679 tmp3 = tcg_const_i32((insn & 1) << 5);
1680 break;
1681 default:
1682 TCGV_UNUSED(tmp2);
1683 TCGV_UNUSED(tmp3);
1684 }
1685 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1686 tcg_temp_free(tmp3);
1687 tcg_temp_free(tmp2);
1688 tcg_temp_free_i32(tmp);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1693 rd = (insn >> 12) & 0xf;
1694 wrd = (insn >> 16) & 0xf;
1695 if (rd == 15 || ((insn >> 22) & 3) == 3)
1696 return 1;
1697 gen_op_iwmmxt_movq_M0_wRn(wrd);
1698 tmp = tcg_temp_new_i32();
1699 switch ((insn >> 22) & 3) {
1700 case 0:
1701 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1702 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1703 if (insn & 8) {
1704 tcg_gen_ext8s_i32(tmp, tmp);
1705 } else {
1706 tcg_gen_andi_i32(tmp, tmp, 0xff);
1707 }
1708 break;
1709 case 1:
1710 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1711 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1712 if (insn & 8) {
1713 tcg_gen_ext16s_i32(tmp, tmp);
1714 } else {
1715 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1716 }
1717 break;
1718 case 2:
1719 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1720 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1721 break;
1722 }
1723 store_reg(s, rd, tmp);
1724 break;
1725 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1726 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1727 return 1;
1728 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1729 switch ((insn >> 22) & 3) {
1730 case 0:
1731 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1732 break;
1733 case 1:
1734 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1735 break;
1736 case 2:
1737 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1738 break;
1739 }
1740 tcg_gen_shli_i32(tmp, tmp, 28);
1741 gen_set_nzcv(tmp);
1742 tcg_temp_free_i32(tmp);
1743 break;
1744 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1745 if (((insn >> 6) & 3) == 3)
1746 return 1;
1747 rd = (insn >> 12) & 0xf;
1748 wrd = (insn >> 16) & 0xf;
1749 tmp = load_reg(s, rd);
1750 switch ((insn >> 6) & 3) {
1751 case 0:
1752 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1753 break;
1754 case 1:
1755 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1756 break;
1757 case 2:
1758 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1759 break;
1760 }
1761 tcg_temp_free_i32(tmp);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd);
1763 gen_op_iwmmxt_set_mup();
1764 break;
1765 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1766 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1767 return 1;
1768 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1769 tmp2 = tcg_temp_new_i32();
1770 tcg_gen_mov_i32(tmp2, tmp);
1771 switch ((insn >> 22) & 3) {
1772 case 0:
1773 for (i = 0; i < 7; i ++) {
1774 tcg_gen_shli_i32(tmp2, tmp2, 4);
1775 tcg_gen_and_i32(tmp, tmp, tmp2);
1776 }
1777 break;
1778 case 1:
1779 for (i = 0; i < 3; i ++) {
1780 tcg_gen_shli_i32(tmp2, tmp2, 8);
1781 tcg_gen_and_i32(tmp, tmp, tmp2);
1782 }
1783 break;
1784 case 2:
1785 tcg_gen_shli_i32(tmp2, tmp2, 16);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1787 break;
1788 }
1789 gen_set_nzcv(tmp);
1790 tcg_temp_free_i32(tmp2);
1791 tcg_temp_free_i32(tmp);
1792 break;
1793 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
1797 switch ((insn >> 22) & 3) {
1798 case 0:
1799 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1800 break;
1801 case 1:
1802 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1803 break;
1804 case 2:
1805 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1806 break;
1807 case 3:
1808 return 1;
1809 }
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1814 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1815 return 1;
1816 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1817 tmp2 = tcg_temp_new_i32();
1818 tcg_gen_mov_i32(tmp2, tmp);
1819 switch ((insn >> 22) & 3) {
1820 case 0:
1821 for (i = 0; i < 7; i ++) {
1822 tcg_gen_shli_i32(tmp2, tmp2, 4);
1823 tcg_gen_or_i32(tmp, tmp, tmp2);
1824 }
1825 break;
1826 case 1:
1827 for (i = 0; i < 3; i ++) {
1828 tcg_gen_shli_i32(tmp2, tmp2, 8);
1829 tcg_gen_or_i32(tmp, tmp, tmp2);
1830 }
1831 break;
1832 case 2:
1833 tcg_gen_shli_i32(tmp2, tmp2, 16);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1835 break;
1836 }
1837 gen_set_nzcv(tmp);
1838 tcg_temp_free_i32(tmp2);
1839 tcg_temp_free_i32(tmp);
1840 break;
1841 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1842 rd = (insn >> 12) & 0xf;
1843 rd0 = (insn >> 16) & 0xf;
1844 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1845 return 1;
1846 gen_op_iwmmxt_movq_M0_wRn(rd0);
1847 tmp = tcg_temp_new_i32();
1848 switch ((insn >> 22) & 3) {
1849 case 0:
1850 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1851 break;
1852 case 1:
1853 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1854 break;
1855 case 2:
1856 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1857 break;
1858 }
1859 store_reg(s, rd, tmp);
1860 break;
1861 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1862 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1863 wrd = (insn >> 12) & 0xf;
1864 rd0 = (insn >> 16) & 0xf;
1865 rd1 = (insn >> 0) & 0xf;
1866 gen_op_iwmmxt_movq_M0_wRn(rd0);
1867 switch ((insn >> 22) & 3) {
1868 case 0:
1869 if (insn & (1 << 21))
1870 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1871 else
1872 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1873 break;
1874 case 1:
1875 if (insn & (1 << 21))
1876 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1879 break;
1880 case 2:
1881 if (insn & (1 << 21))
1882 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1883 else
1884 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1885 break;
1886 case 3:
1887 return 1;
1888 }
1889 gen_op_iwmmxt_movq_wRn_M0(wrd);
1890 gen_op_iwmmxt_set_mup();
1891 gen_op_iwmmxt_set_cup();
1892 break;
1893 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1894 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1895 wrd = (insn >> 12) & 0xf;
1896 rd0 = (insn >> 16) & 0xf;
1897 gen_op_iwmmxt_movq_M0_wRn(rd0);
1898 switch ((insn >> 22) & 3) {
1899 case 0:
1900 if (insn & (1 << 21))
1901 gen_op_iwmmxt_unpacklsb_M0();
1902 else
1903 gen_op_iwmmxt_unpacklub_M0();
1904 break;
1905 case 1:
1906 if (insn & (1 << 21))
1907 gen_op_iwmmxt_unpacklsw_M0();
1908 else
1909 gen_op_iwmmxt_unpackluw_M0();
1910 break;
1911 case 2:
1912 if (insn & (1 << 21))
1913 gen_op_iwmmxt_unpacklsl_M0();
1914 else
1915 gen_op_iwmmxt_unpacklul_M0();
1916 break;
1917 case 3:
1918 return 1;
1919 }
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 gen_op_iwmmxt_set_cup();
1923 break;
1924 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1925 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1926 wrd = (insn >> 12) & 0xf;
1927 rd0 = (insn >> 16) & 0xf;
1928 gen_op_iwmmxt_movq_M0_wRn(rd0);
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 if (insn & (1 << 21))
1932 gen_op_iwmmxt_unpackhsb_M0();
1933 else
1934 gen_op_iwmmxt_unpackhub_M0();
1935 break;
1936 case 1:
1937 if (insn & (1 << 21))
1938 gen_op_iwmmxt_unpackhsw_M0();
1939 else
1940 gen_op_iwmmxt_unpackhuw_M0();
1941 break;
1942 case 2:
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_unpackhsl_M0();
1945 else
1946 gen_op_iwmmxt_unpackhul_M0();
1947 break;
1948 case 3:
1949 return 1;
1950 }
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 gen_op_iwmmxt_set_cup();
1954 break;
1955 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1956 case 0x214: case 0x614: case 0xa14: case 0xe14:
1957 if (((insn >> 22) & 3) == 0)
1958 return 1;
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 tmp = tcg_temp_new_i32();
1963 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1964 tcg_temp_free_i32(tmp);
1965 return 1;
1966 }
1967 switch ((insn >> 22) & 3) {
1968 case 1:
1969 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1970 break;
1971 case 2:
1972 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1973 break;
1974 case 3:
1975 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1976 break;
1977 }
1978 tcg_temp_free_i32(tmp);
1979 gen_op_iwmmxt_movq_wRn_M0(wrd);
1980 gen_op_iwmmxt_set_mup();
1981 gen_op_iwmmxt_set_cup();
1982 break;
1983 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1984 case 0x014: case 0x414: case 0x814: case 0xc14:
1985 if (((insn >> 22) & 3) == 0)
1986 return 1;
1987 wrd = (insn >> 12) & 0xf;
1988 rd0 = (insn >> 16) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
1990 tmp = tcg_temp_new_i32();
1991 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1992 tcg_temp_free_i32(tmp);
1993 return 1;
1994 }
1995 switch ((insn >> 22) & 3) {
1996 case 1:
1997 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
1998 break;
1999 case 2:
2000 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2001 break;
2002 case 3:
2003 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2004 break;
2005 }
2006 tcg_temp_free_i32(tmp);
2007 gen_op_iwmmxt_movq_wRn_M0(wrd);
2008 gen_op_iwmmxt_set_mup();
2009 gen_op_iwmmxt_set_cup();
2010 break;
2011 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2012 case 0x114: case 0x514: case 0x914: case 0xd14:
2013 if (((insn >> 22) & 3) == 0)
2014 return 1;
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
2018 tmp = tcg_temp_new_i32();
2019 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2020 tcg_temp_free_i32(tmp);
2021 return 1;
2022 }
2023 switch ((insn >> 22) & 3) {
2024 case 1:
2025 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2026 break;
2027 case 2:
2028 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2029 break;
2030 case 3:
2031 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2032 break;
2033 }
2034 tcg_temp_free_i32(tmp);
2035 gen_op_iwmmxt_movq_wRn_M0(wrd);
2036 gen_op_iwmmxt_set_mup();
2037 gen_op_iwmmxt_set_cup();
2038 break;
2039 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2040 case 0x314: case 0x714: case 0xb14: case 0xf14:
2041 if (((insn >> 22) & 3) == 0)
2042 return 1;
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
2046 tmp = tcg_temp_new_i32();
2047 switch ((insn >> 22) & 3) {
2048 case 1:
2049 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2050 tcg_temp_free_i32(tmp);
2051 return 1;
2052 }
2053 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2054 break;
2055 case 2:
2056 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2057 tcg_temp_free_i32(tmp);
2058 return 1;
2059 }
2060 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2061 break;
2062 case 3:
2063 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2064 tcg_temp_free_i32(tmp);
2065 return 1;
2066 }
2067 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2068 break;
2069 }
2070 tcg_temp_free_i32(tmp);
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2076 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2077 wrd = (insn >> 12) & 0xf;
2078 rd0 = (insn >> 16) & 0xf;
2079 rd1 = (insn >> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 switch ((insn >> 22) & 3) {
2082 case 0:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2085 else
2086 gen_op_iwmmxt_minub_M0_wRn(rd1);
2087 break;
2088 case 1:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2091 else
2092 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2093 break;
2094 case 2:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2097 else
2098 gen_op_iwmmxt_minul_M0_wRn(rd1);
2099 break;
2100 case 3:
2101 return 1;
2102 }
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 break;
2106 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2107 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 rd1 = (insn >> 0) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
2112 switch ((insn >> 22) & 3) {
2113 case 0:
2114 if (insn & (1 << 21))
2115 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2116 else
2117 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2118 break;
2119 case 1:
2120 if (insn & (1 << 21))
2121 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2122 else
2123 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2124 break;
2125 case 2:
2126 if (insn & (1 << 21))
2127 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2128 else
2129 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2130 break;
2131 case 3:
2132 return 1;
2133 }
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 break;
2137 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2138 case 0x402: case 0x502: case 0x602: case 0x702:
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 rd1 = (insn >> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 tmp = tcg_const_i32((insn >> 20) & 3);
2144 iwmmxt_load_reg(cpu_V1, rd1);
2145 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2146 tcg_temp_free(tmp);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2151 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2152 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2153 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2154 wrd = (insn >> 12) & 0xf;
2155 rd0 = (insn >> 16) & 0xf;
2156 rd1 = (insn >> 0) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 switch ((insn >> 20) & 0xf) {
2159 case 0x0:
2160 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2161 break;
2162 case 0x1:
2163 gen_op_iwmmxt_subub_M0_wRn(rd1);
2164 break;
2165 case 0x3:
2166 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2167 break;
2168 case 0x4:
2169 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2170 break;
2171 case 0x5:
2172 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2173 break;
2174 case 0x7:
2175 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2176 break;
2177 case 0x8:
2178 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2179 break;
2180 case 0x9:
2181 gen_op_iwmmxt_subul_M0_wRn(rd1);
2182 break;
2183 case 0xb:
2184 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2185 break;
2186 default:
2187 return 1;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
2193 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2194 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2195 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2196 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0);
2200 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2201 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2202 tcg_temp_free(tmp);
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 gen_op_iwmmxt_set_cup();
2206 break;
2207 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2208 case 0x418: case 0x518: case 0x618: case 0x718:
2209 case 0x818: case 0x918: case 0xa18: case 0xb18:
2210 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 20) & 0xf) {
2216 case 0x0:
2217 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2218 break;
2219 case 0x1:
2220 gen_op_iwmmxt_addub_M0_wRn(rd1);
2221 break;
2222 case 0x3:
2223 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2224 break;
2225 case 0x4:
2226 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2227 break;
2228 case 0x5:
2229 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2230 break;
2231 case 0x7:
2232 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2233 break;
2234 case 0x8:
2235 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2236 break;
2237 case 0x9:
2238 gen_op_iwmmxt_addul_M0_wRn(rd1);
2239 break;
2240 case 0xb:
2241 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2242 break;
2243 default:
2244 return 1;
2245 }
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2251 case 0x408: case 0x508: case 0x608: case 0x708:
2252 case 0x808: case 0x908: case 0xa08: case 0xb08:
2253 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2254 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2255 return 1;
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 22) & 3) {
2261 case 1:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2264 else
2265 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2266 break;
2267 case 2:
2268 if (insn & (1 << 21))
2269 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2270 else
2271 gen_op_iwmmxt_packul_M0_wRn(rd1);
2272 break;
2273 case 3:
2274 if (insn & (1 << 21))
2275 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2276 else
2277 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2278 break;
2279 }
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2283 break;
2284 case 0x201: case 0x203: case 0x205: case 0x207:
2285 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2286 case 0x211: case 0x213: case 0x215: case 0x217:
2287 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2288 wrd = (insn >> 5) & 0xf;
2289 rd0 = (insn >> 12) & 0xf;
2290 rd1 = (insn >> 0) & 0xf;
2291 if (rd0 == 0xf || rd1 == 0xf)
2292 return 1;
2293 gen_op_iwmmxt_movq_M0_wRn(wrd);
2294 tmp = load_reg(s, rd0);
2295 tmp2 = load_reg(s, rd1);
2296 switch ((insn >> 16) & 0xf) {
2297 case 0x0: /* TMIA */
2298 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2299 break;
2300 case 0x8: /* TMIAPH */
2301 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2302 break;
2303 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2304 if (insn & (1 << 16))
2305 tcg_gen_shri_i32(tmp, tmp, 16);
2306 if (insn & (1 << 17))
2307 tcg_gen_shri_i32(tmp2, tmp2, 16);
2308 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2309 break;
2310 default:
2311 tcg_temp_free_i32(tmp2);
2312 tcg_temp_free_i32(tmp);
2313 return 1;
2314 }
2315 tcg_temp_free_i32(tmp2);
2316 tcg_temp_free_i32(tmp);
2317 gen_op_iwmmxt_movq_wRn_M0(wrd);
2318 gen_op_iwmmxt_set_mup();
2319 break;
2320 default:
2321 return 1;
2322 }
2323
2324 return 0;
2325 }
2326
2327 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2328 (ie. an undefined instruction). */
2329 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2330 {
2331 int acc, rd0, rd1, rdhi, rdlo;
2332 TCGv tmp, tmp2;
2333
2334 if ((insn & 0x0ff00f10) == 0x0e200010) {
2335 /* Multiply with Internal Accumulate Format */
2336 rd0 = (insn >> 12) & 0xf;
2337 rd1 = insn & 0xf;
2338 acc = (insn >> 5) & 7;
2339
2340 if (acc != 0)
2341 return 1;
2342
2343 tmp = load_reg(s, rd0);
2344 tmp2 = load_reg(s, rd1);
2345 switch ((insn >> 16) & 0xf) {
2346 case 0x0: /* MIA */
2347 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2348 break;
2349 case 0x8: /* MIAPH */
2350 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2351 break;
2352 case 0xc: /* MIABB */
2353 case 0xd: /* MIABT */
2354 case 0xe: /* MIATB */
2355 case 0xf: /* MIATT */
2356 if (insn & (1 << 16))
2357 tcg_gen_shri_i32(tmp, tmp, 16);
2358 if (insn & (1 << 17))
2359 tcg_gen_shri_i32(tmp2, tmp2, 16);
2360 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2361 break;
2362 default:
2363 return 1;
2364 }
2365 tcg_temp_free_i32(tmp2);
2366 tcg_temp_free_i32(tmp);
2367
2368 gen_op_iwmmxt_movq_wRn_M0(acc);
2369 return 0;
2370 }
2371
2372 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2373 /* Internal Accumulator Access Format */
2374 rdhi = (insn >> 16) & 0xf;
2375 rdlo = (insn >> 12) & 0xf;
2376 acc = insn & 7;
2377
2378 if (acc != 0)
2379 return 1;
2380
2381 if (insn & ARM_CP_RW_BIT) { /* MRA */
2382 iwmmxt_load_reg(cpu_V0, acc);
2383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2386 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2387 } else { /* MAR */
2388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2389 iwmmxt_store_reg(cpu_V0, acc);
2390 }
2391 return 0;
2392 }
2393
2394 return 1;
2395 }
2396
2397 /* Disassemble system coprocessor instruction. Return nonzero if
2398 instruction is not defined. */
2399 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2400 {
2401 TCGv tmp, tmp2;
2402 uint32_t rd = (insn >> 12) & 0xf;
2403 uint32_t cp = (insn >> 8) & 0xf;
2404 if (IS_USER(s)) {
2405 return 1;
2406 }
2407
2408 if (insn & ARM_CP_RW_BIT) {
2409 if (!env->cp[cp].cp_read)
2410 return 1;
2411 gen_set_pc_im(s->pc);
2412 tmp = tcg_temp_new_i32();
2413 tmp2 = tcg_const_i32(insn);
2414 gen_helper_get_cp(tmp, cpu_env, tmp2);
2415 tcg_temp_free(tmp2);
2416 store_reg(s, rd, tmp);
2417 } else {
2418 if (!env->cp[cp].cp_write)
2419 return 1;
2420 gen_set_pc_im(s->pc);
2421 tmp = load_reg(s, rd);
2422 tmp2 = tcg_const_i32(insn);
2423 gen_helper_set_cp(cpu_env, tmp2, tmp);
2424 tcg_temp_free(tmp2);
2425 tcg_temp_free_i32(tmp);
2426 }
2427 return 0;
2428 }
2429
2430 static int cp15_user_ok(uint32_t insn)
2431 {
2432 int cpn = (insn >> 16) & 0xf;
2433 int cpm = insn & 0xf;
2434 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2435
2436 if (cpn == 13 && cpm == 0) {
2437 /* TLS register. */
2438 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2439 return 1;
2440 }
2441 if (cpn == 7) {
2442 /* ISB, DSB, DMB. */
2443 if ((cpm == 5 && op == 4)
2444 || (cpm == 10 && (op == 4 || op == 5)))
2445 return 1;
2446 }
2447 return 0;
2448 }
2449
2450 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2451 {
2452 TCGv tmp;
2453 int cpn = (insn >> 16) & 0xf;
2454 int cpm = insn & 0xf;
2455 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2456
2457 if (!arm_feature(env, ARM_FEATURE_V6K))
2458 return 0;
2459
2460 if (!(cpn == 13 && cpm == 0))
2461 return 0;
2462
2463 if (insn & ARM_CP_RW_BIT) {
2464 switch (op) {
2465 case 2:
2466 tmp = load_cpu_field(cp15.c13_tls1);
2467 break;
2468 case 3:
2469 tmp = load_cpu_field(cp15.c13_tls2);
2470 break;
2471 case 4:
2472 tmp = load_cpu_field(cp15.c13_tls3);
2473 break;
2474 default:
2475 return 0;
2476 }
2477 store_reg(s, rd, tmp);
2478
2479 } else {
2480 tmp = load_reg(s, rd);
2481 switch (op) {
2482 case 2:
2483 store_cpu_field(tmp, cp15.c13_tls1);
2484 break;
2485 case 3:
2486 store_cpu_field(tmp, cp15.c13_tls2);
2487 break;
2488 case 4:
2489 store_cpu_field(tmp, cp15.c13_tls3);
2490 break;
2491 default:
2492 tcg_temp_free_i32(tmp);
2493 return 0;
2494 }
2495 }
2496 return 1;
2497 }
2498
2499 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2500 instruction is not defined. */
2501 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2502 {
2503 uint32_t rd;
2504 TCGv tmp, tmp2;
2505
2506 /* M profile cores use memory mapped registers instead of cp15. */
2507 if (arm_feature(env, ARM_FEATURE_M))
2508 return 1;
2509
2510 if ((insn & (1 << 25)) == 0) {
2511 if (insn & (1 << 20)) {
2512 /* mrrc */
2513 return 1;
2514 }
2515 /* mcrr. Used for block cache operations, so implement as no-op. */
2516 return 0;
2517 }
2518 if ((insn & (1 << 4)) == 0) {
2519 /* cdp */
2520 return 1;
2521 }
2522 if (IS_USER(s) && !cp15_user_ok(insn)) {
2523 return 1;
2524 }
2525
2526 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2527 * instructions rather than a separate instruction.
2528 */
2529 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2530 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2531 * In v7, this must NOP.
2532 */
2533 if (!arm_feature(env, ARM_FEATURE_V7)) {
2534 /* Wait for interrupt. */
2535 gen_set_pc_im(s->pc);
2536 s->is_jmp = DISAS_WFI;
2537 }
2538 return 0;
2539 }
2540
2541 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2542 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2543 * so this is slightly over-broad.
2544 */
2545 if (!arm_feature(env, ARM_FEATURE_V6)) {
2546 /* Wait for interrupt. */
2547 gen_set_pc_im(s->pc);
2548 s->is_jmp = DISAS_WFI;
2549 return 0;
2550 }
2551 /* Otherwise fall through to handle via helper function.
2552 * In particular, on v7 and some v6 cores this is one of
2553 * the VA-PA registers.
2554 */
2555 }
2556
2557 rd = (insn >> 12) & 0xf;
2558
2559 if (cp15_tls_load_store(env, s, insn, rd))
2560 return 0;
2561
2562 tmp2 = tcg_const_i32(insn);
2563 if (insn & ARM_CP_RW_BIT) {
2564 tmp = tcg_temp_new_i32();
2565 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2566 /* If the destination register is r15 then sets condition codes. */
2567 if (rd != 15)
2568 store_reg(s, rd, tmp);
2569 else
2570 tcg_temp_free_i32(tmp);
2571 } else {
2572 tmp = load_reg(s, rd);
2573 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2574 tcg_temp_free_i32(tmp);
2575 /* Normally we would always end the TB here, but Linux
2576 * arch/arm/mach-pxa/sleep.S expects two instructions following
2577 * an MMU enable to execute from cache. Imitate this behaviour. */
2578 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2579 (insn & 0x0fff0fff) != 0x0e010f10)
2580 gen_lookup_tb(s);
2581 }
2582 tcg_temp_free_i32(tmp2);
2583 return 0;
2584 }
2585
2586 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2587 #define VFP_SREG(insn, bigbit, smallbit) \
2588 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2589 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2590 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2591 reg = (((insn) >> (bigbit)) & 0x0f) \
2592 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2593 } else { \
2594 if (insn & (1 << (smallbit))) \
2595 return 1; \
2596 reg = ((insn) >> (bigbit)) & 0x0f; \
2597 }} while (0)
2598
2599 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2600 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2601 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2602 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2603 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2604 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2605
2606 /* Move between integer and VFP cores. */
2607 static TCGv gen_vfp_mrs(void)
2608 {
2609 TCGv tmp = tcg_temp_new_i32();
2610 tcg_gen_mov_i32(tmp, cpu_F0s);
2611 return tmp;
2612 }
2613
2614 static void gen_vfp_msr(TCGv tmp)
2615 {
2616 tcg_gen_mov_i32(cpu_F0s, tmp);
2617 tcg_temp_free_i32(tmp);
2618 }
2619
2620 static void gen_neon_dup_u8(TCGv var, int shift)
2621 {
2622 TCGv tmp = tcg_temp_new_i32();
2623 if (shift)
2624 tcg_gen_shri_i32(var, var, shift);
2625 tcg_gen_ext8u_i32(var, var);
2626 tcg_gen_shli_i32(tmp, var, 8);
2627 tcg_gen_or_i32(var, var, tmp);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 tcg_temp_free_i32(tmp);
2631 }
2632
2633 static void gen_neon_dup_low16(TCGv var)
2634 {
2635 TCGv tmp = tcg_temp_new_i32();
2636 tcg_gen_ext16u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_temp_free_i32(tmp);
2640 }
2641
2642 static void gen_neon_dup_high16(TCGv var)
2643 {
2644 TCGv tmp = tcg_temp_new_i32();
2645 tcg_gen_andi_i32(var, var, 0xffff0000);
2646 tcg_gen_shri_i32(tmp, var, 16);
2647 tcg_gen_or_i32(var, var, tmp);
2648 tcg_temp_free_i32(tmp);
2649 }
2650
2651 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2652 {
2653 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2654 TCGv tmp;
2655 switch (size) {
2656 case 0:
2657 tmp = gen_ld8u(addr, IS_USER(s));
2658 gen_neon_dup_u8(tmp, 0);
2659 break;
2660 case 1:
2661 tmp = gen_ld16u(addr, IS_USER(s));
2662 gen_neon_dup_low16(tmp);
2663 break;
2664 case 2:
2665 tmp = gen_ld32(addr, IS_USER(s));
2666 break;
2667 default: /* Avoid compiler warnings. */
2668 abort();
2669 }
2670 return tmp;
2671 }
2672
2673 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2674 (ie. an undefined instruction). */
2675 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2676 {
2677 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2678 int dp, veclen;
2679 TCGv addr;
2680 TCGv tmp;
2681 TCGv tmp2;
2682
2683 if (!arm_feature(env, ARM_FEATURE_VFP))
2684 return 1;
2685
2686 if (!s->vfp_enabled) {
2687 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2688 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2689 return 1;
2690 rn = (insn >> 16) & 0xf;
2691 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2692 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2693 return 1;
2694 }
2695 dp = ((insn & 0xf00) == 0xb00);
2696 switch ((insn >> 24) & 0xf) {
2697 case 0xe:
2698 if (insn & (1 << 4)) {
2699 /* single register transfer */
2700 rd = (insn >> 12) & 0xf;
2701 if (dp) {
2702 int size;
2703 int pass;
2704
2705 VFP_DREG_N(rn, insn);
2706 if (insn & 0xf)
2707 return 1;
2708 if (insn & 0x00c00060
2709 && !arm_feature(env, ARM_FEATURE_NEON))
2710 return 1;
2711
2712 pass = (insn >> 21) & 1;
2713 if (insn & (1 << 22)) {
2714 size = 0;
2715 offset = ((insn >> 5) & 3) * 8;
2716 } else if (insn & (1 << 5)) {
2717 size = 1;
2718 offset = (insn & (1 << 6)) ? 16 : 0;
2719 } else {
2720 size = 2;
2721 offset = 0;
2722 }
2723 if (insn & ARM_CP_RW_BIT) {
2724 /* vfp->arm */
2725 tmp = neon_load_reg(rn, pass);
2726 switch (size) {
2727 case 0:
2728 if (offset)
2729 tcg_gen_shri_i32(tmp, tmp, offset);
2730 if (insn & (1 << 23))
2731 gen_uxtb(tmp);
2732 else
2733 gen_sxtb(tmp);
2734 break;
2735 case 1:
2736 if (insn & (1 << 23)) {
2737 if (offset) {
2738 tcg_gen_shri_i32(tmp, tmp, 16);
2739 } else {
2740 gen_uxth(tmp);
2741 }
2742 } else {
2743 if (offset) {
2744 tcg_gen_sari_i32(tmp, tmp, 16);
2745 } else {
2746 gen_sxth(tmp);
2747 }
2748 }
2749 break;
2750 case 2:
2751 break;
2752 }
2753 store_reg(s, rd, tmp);
2754 } else {
2755 /* arm->vfp */
2756 tmp = load_reg(s, rd);
2757 if (insn & (1 << 23)) {
2758 /* VDUP */
2759 if (size == 0) {
2760 gen_neon_dup_u8(tmp, 0);
2761 } else if (size == 1) {
2762 gen_neon_dup_low16(tmp);
2763 }
2764 for (n = 0; n <= pass * 2; n++) {
2765 tmp2 = tcg_temp_new_i32();
2766 tcg_gen_mov_i32(tmp2, tmp);
2767 neon_store_reg(rn, n, tmp2);
2768 }
2769 neon_store_reg(rn, n, tmp);
2770 } else {
2771 /* VMOV */
2772 switch (size) {
2773 case 0:
2774 tmp2 = neon_load_reg(rn, pass);
2775 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2776 tcg_temp_free_i32(tmp2);
2777 break;
2778 case 1:
2779 tmp2 = neon_load_reg(rn, pass);
2780 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2781 tcg_temp_free_i32(tmp2);
2782 break;
2783 case 2:
2784 break;
2785 }
2786 neon_store_reg(rn, pass, tmp);
2787 }
2788 }
2789 } else { /* !dp */
2790 if ((insn & 0x6f) != 0x00)
2791 return 1;
2792 rn = VFP_SREG_N(insn);
2793 if (insn & ARM_CP_RW_BIT) {
2794 /* vfp->arm */
2795 if (insn & (1 << 21)) {
2796 /* system register */
2797 rn >>= 1;
2798
2799 switch (rn) {
2800 case ARM_VFP_FPSID:
2801 /* VFP2 allows access to FSID from userspace.
2802 VFP3 restricts all id registers to privileged
2803 accesses. */
2804 if (IS_USER(s)
2805 && arm_feature(env, ARM_FEATURE_VFP3))
2806 return 1;
2807 tmp = load_cpu_field(vfp.xregs[rn]);
2808 break;
2809 case ARM_VFP_FPEXC:
2810 if (IS_USER(s))
2811 return 1;
2812 tmp = load_cpu_field(vfp.xregs[rn]);
2813 break;
2814 case ARM_VFP_FPINST:
2815 case ARM_VFP_FPINST2:
2816 /* Not present in VFP3. */
2817 if (IS_USER(s)
2818 || arm_feature(env, ARM_FEATURE_VFP3))
2819 return 1;
2820 tmp = load_cpu_field(vfp.xregs[rn]);
2821 break;
2822 case ARM_VFP_FPSCR:
2823 if (rd == 15) {
2824 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2825 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2826 } else {
2827 tmp = tcg_temp_new_i32();
2828 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2829 }
2830 break;
2831 case ARM_VFP_MVFR0:
2832 case ARM_VFP_MVFR1:
2833 if (IS_USER(s)
2834 || !arm_feature(env, ARM_FEATURE_VFP3))
2835 return 1;
2836 tmp = load_cpu_field(vfp.xregs[rn]);
2837 break;
2838 default:
2839 return 1;
2840 }
2841 } else {
2842 gen_mov_F0_vreg(0, rn);
2843 tmp = gen_vfp_mrs();
2844 }
2845 if (rd == 15) {
2846 /* Set the 4 flag bits in the CPSR. */
2847 gen_set_nzcv(tmp);
2848 tcg_temp_free_i32(tmp);
2849 } else {
2850 store_reg(s, rd, tmp);
2851 }
2852 } else {
2853 /* arm->vfp */
2854 tmp = load_reg(s, rd);
2855 if (insn & (1 << 21)) {
2856 rn >>= 1;
2857 /* system register */
2858 switch (rn) {
2859 case ARM_VFP_FPSID:
2860 case ARM_VFP_MVFR0:
2861 case ARM_VFP_MVFR1:
2862 /* Writes are ignored. */
2863 break;
2864 case ARM_VFP_FPSCR:
2865 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2866 tcg_temp_free_i32(tmp);
2867 gen_lookup_tb(s);
2868 break;
2869 case ARM_VFP_FPEXC:
2870 if (IS_USER(s))
2871 return 1;
2872 /* TODO: VFP subarchitecture support.
2873 * For now, keep the EN bit only */
2874 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2875 store_cpu_field(tmp, vfp.xregs[rn]);
2876 gen_lookup_tb(s);
2877 break;
2878 case ARM_VFP_FPINST:
2879 case ARM_VFP_FPINST2:
2880 store_cpu_field(tmp, vfp.xregs[rn]);
2881 break;
2882 default:
2883 return 1;
2884 }
2885 } else {
2886 gen_vfp_msr(tmp);
2887 gen_mov_vreg_F0(0, rn);
2888 }
2889 }
2890 }
2891 } else {
2892 /* data processing */
2893 /* The opcode is in bits 23, 21, 20 and 6. */
2894 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2895 if (dp) {
2896 if (op == 15) {
2897 /* rn is opcode */
2898 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2899 } else {
2900 /* rn is register number */
2901 VFP_DREG_N(rn, insn);
2902 }
2903
2904 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2905 /* Integer or single precision destination. */
2906 rd = VFP_SREG_D(insn);
2907 } else {
2908 VFP_DREG_D(rd, insn);
2909 }
2910 if (op == 15 &&
2911 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2912 /* VCVT from int is always from S reg regardless of dp bit.
2913 * VCVT with immediate frac_bits has same format as SREG_M
2914 */
2915 rm = VFP_SREG_M(insn);
2916 } else {
2917 VFP_DREG_M(rm, insn);
2918 }
2919 } else {
2920 rn = VFP_SREG_N(insn);
2921 if (op == 15 && rn == 15) {
2922 /* Double precision destination. */
2923 VFP_DREG_D(rd, insn);
2924 } else {
2925 rd = VFP_SREG_D(insn);
2926 }
2927 /* NB that we implicitly rely on the encoding for the frac_bits
2928 * in VCVT of fixed to float being the same as that of an SREG_M
2929 */
2930 rm = VFP_SREG_M(insn);
2931 }
2932
2933 veclen = s->vec_len;
2934 if (op == 15 && rn > 3)
2935 veclen = 0;
2936
2937 /* Shut up compiler warnings. */
2938 delta_m = 0;
2939 delta_d = 0;
2940 bank_mask = 0;
2941
2942 if (veclen > 0) {
2943 if (dp)
2944 bank_mask = 0xc;
2945 else
2946 bank_mask = 0x18;
2947
2948 /* Figure out what type of vector operation this is. */
2949 if ((rd & bank_mask) == 0) {
2950 /* scalar */
2951 veclen = 0;
2952 } else {
2953 if (dp)
2954 delta_d = (s->vec_stride >> 1) + 1;
2955 else
2956 delta_d = s->vec_stride + 1;
2957
2958 if ((rm & bank_mask) == 0) {
2959 /* mixed scalar/vector */
2960 delta_m = 0;
2961 } else {
2962 /* vector */
2963 delta_m = delta_d;
2964 }
2965 }
2966 }
2967
2968 /* Load the initial operands. */
2969 if (op == 15) {
2970 switch (rn) {
2971 case 16:
2972 case 17:
2973 /* Integer source */
2974 gen_mov_F0_vreg(0, rm);
2975 break;
2976 case 8:
2977 case 9:
2978 /* Compare */
2979 gen_mov_F0_vreg(dp, rd);
2980 gen_mov_F1_vreg(dp, rm);
2981 break;
2982 case 10:
2983 case 11:
2984 /* Compare with zero */
2985 gen_mov_F0_vreg(dp, rd);
2986 gen_vfp_F1_ld0(dp);
2987 break;
2988 case 20:
2989 case 21:
2990 case 22:
2991 case 23:
2992 case 28:
2993 case 29:
2994 case 30:
2995 case 31:
2996 /* Source and destination the same. */
2997 gen_mov_F0_vreg(dp, rd);
2998 break;
2999 default:
3000 /* One source operand. */
3001 gen_mov_F0_vreg(dp, rm);
3002 break;
3003 }
3004 } else {
3005 /* Two source operands. */
3006 gen_mov_F0_vreg(dp, rn);
3007 gen_mov_F1_vreg(dp, rm);
3008 }
3009
3010 for (;;) {
3011 /* Perform the calculation. */
3012 switch (op) {
3013 case 0: /* mac: fd + (fn * fm) */
3014 gen_vfp_mul(dp);
3015 gen_mov_F1_vreg(dp, rd);
3016 gen_vfp_add(dp);
3017 break;
3018 case 1: /* nmac: fd - (fn * fm) */
3019 gen_vfp_mul(dp);
3020 gen_vfp_neg(dp);
3021 gen_mov_F1_vreg(dp, rd);
3022 gen_vfp_add(dp);
3023 break;
3024 case 2: /* msc: -fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_sub(dp);
3028 break;
3029 case 3: /* nmsc: -fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_sub(dp);
3034 break;
3035 case 4: /* mul: fn * fm */
3036 gen_vfp_mul(dp);
3037 break;
3038 case 5: /* nmul: -(fn * fm) */
3039 gen_vfp_mul(dp);
3040 gen_vfp_neg(dp);
3041 break;
3042 case 6: /* add: fn + fm */
3043 gen_vfp_add(dp);
3044 break;
3045 case 7: /* sub: fn - fm */
3046 gen_vfp_sub(dp);
3047 break;
3048 case 8: /* div: fn / fm */
3049 gen_vfp_div(dp);
3050 break;
3051 case 14: /* fconst */
3052 if (!arm_feature(env, ARM_FEATURE_VFP3))
3053 return 1;
3054
3055 n = (insn << 12) & 0x80000000;
3056 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3057 if (dp) {
3058 if (i & 0x40)
3059 i |= 0x3f80;
3060 else
3061 i |= 0x4000;
3062 n |= i << 16;
3063 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3064 } else {
3065 if (i & 0x40)
3066 i |= 0x780;
3067 else
3068 i |= 0x800;
3069 n |= i << 19;
3070 tcg_gen_movi_i32(cpu_F0s, n);
3071 }
3072 break;
3073 case 15: /* extension space */
3074 switch (rn) {
3075 case 0: /* cpy */
3076 /* no-op */
3077 break;
3078 case 1: /* abs */
3079 gen_vfp_abs(dp);
3080 break;
3081 case 2: /* neg */
3082 gen_vfp_neg(dp);
3083 break;
3084 case 3: /* sqrt */
3085 gen_vfp_sqrt(dp);
3086 break;
3087 case 4: /* vcvtb.f32.f16 */
3088 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3089 return 1;
3090 tmp = gen_vfp_mrs();
3091 tcg_gen_ext16u_i32(tmp, tmp);
3092 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3093 tcg_temp_free_i32(tmp);
3094 break;
3095 case 5: /* vcvtt.f32.f16 */
3096 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3097 return 1;
3098 tmp = gen_vfp_mrs();
3099 tcg_gen_shri_i32(tmp, tmp, 16);
3100 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3101 tcg_temp_free_i32(tmp);
3102 break;
3103 case 6: /* vcvtb.f16.f32 */
3104 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3105 return 1;
3106 tmp = tcg_temp_new_i32();
3107 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3108 gen_mov_F0_vreg(0, rd);
3109 tmp2 = gen_vfp_mrs();
3110 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3111 tcg_gen_or_i32(tmp, tmp, tmp2);
3112 tcg_temp_free_i32(tmp2);
3113 gen_vfp_msr(tmp);
3114 break;
3115 case 7: /* vcvtt.f16.f32 */
3116 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3117 return 1;
3118 tmp = tcg_temp_new_i32();
3119 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3120 tcg_gen_shli_i32(tmp, tmp, 16);
3121 gen_mov_F0_vreg(0, rd);
3122 tmp2 = gen_vfp_mrs();
3123 tcg_gen_ext16u_i32(tmp2, tmp2);
3124 tcg_gen_or_i32(tmp, tmp, tmp2);
3125 tcg_temp_free_i32(tmp2);
3126 gen_vfp_msr(tmp);
3127 break;
3128 case 8: /* cmp */
3129 gen_vfp_cmp(dp);
3130 break;
3131 case 9: /* cmpe */
3132 gen_vfp_cmpe(dp);
3133 break;
3134 case 10: /* cmpz */
3135 gen_vfp_cmp(dp);
3136 break;
3137 case 11: /* cmpez */
3138 gen_vfp_F1_ld0(dp);
3139 gen_vfp_cmpe(dp);
3140 break;
3141 case 15: /* single<->double conversion */
3142 if (dp)
3143 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3144 else
3145 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3146 break;
3147 case 16: /* fuito */
3148 gen_vfp_uito(dp);
3149 break;
3150 case 17: /* fsito */
3151 gen_vfp_sito(dp);
3152 break;
3153 case 20: /* fshto */
3154 if (!arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
3156 gen_vfp_shto(dp, 16 - rm);
3157 break;
3158 case 21: /* fslto */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
3161 gen_vfp_slto(dp, 32 - rm);
3162 break;
3163 case 22: /* fuhto */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
3166 gen_vfp_uhto(dp, 16 - rm);
3167 break;
3168 case 23: /* fulto */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
3171 gen_vfp_ulto(dp, 32 - rm);
3172 break;
3173 case 24: /* ftoui */
3174 gen_vfp_toui(dp);
3175 break;
3176 case 25: /* ftouiz */
3177 gen_vfp_touiz(dp);
3178 break;
3179 case 26: /* ftosi */
3180 gen_vfp_tosi(dp);
3181 break;
3182 case 27: /* ftosiz */
3183 gen_vfp_tosiz(dp);
3184 break;
3185 case 28: /* ftosh */
3186 if (!arm_feature(env, ARM_FEATURE_VFP3))
3187 return 1;
3188 gen_vfp_tosh(dp, 16 - rm);
3189 break;
3190 case 29: /* ftosl */
3191 if (!arm_feature(env, ARM_FEATURE_VFP3))
3192 return 1;
3193 gen_vfp_tosl(dp, 32 - rm);
3194 break;
3195 case 30: /* ftouh */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
3198 gen_vfp_touh(dp, 16 - rm);
3199 break;
3200 case 31: /* ftoul */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
3203 gen_vfp_toul(dp, 32 - rm);
3204 break;
3205 default: /* undefined */
3206 printf ("rn:%d\n", rn);
3207 return 1;
3208 }
3209 break;
3210 default: /* undefined */
3211 printf ("op:%d\n", op);
3212 return 1;
3213 }
3214
3215 /* Write back the result. */
3216 if (op == 15 && (rn >= 8 && rn <= 11))
3217 ; /* Comparison, do nothing. */
3218 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3219 /* VCVT double to int: always integer result. */
3220 gen_mov_vreg_F0(0, rd);
3221 else if (op == 15 && rn == 15)
3222 /* conversion */
3223 gen_mov_vreg_F0(!dp, rd);
3224 else
3225 gen_mov_vreg_F0(dp, rd);
3226
3227 /* break out of the loop if we have finished */
3228 if (veclen == 0)
3229 break;
3230
3231 if (op == 15 && delta_m == 0) {
3232 /* single source one-many */
3233 while (veclen--) {
3234 rd = ((rd + delta_d) & (bank_mask - 1))
3235 | (rd & bank_mask);
3236 gen_mov_vreg_F0(dp, rd);
3237 }
3238 break;
3239 }
3240 /* Setup the next operands. */
3241 veclen--;
3242 rd = ((rd + delta_d) & (bank_mask - 1))
3243 | (rd & bank_mask);
3244
3245 if (op == 15) {
3246 /* One source operand. */
3247 rm = ((rm + delta_m) & (bank_mask - 1))
3248 | (rm & bank_mask);
3249 gen_mov_F0_vreg(dp, rm);
3250 } else {
3251 /* Two source operands. */
3252 rn = ((rn + delta_d) & (bank_mask - 1))
3253 | (rn & bank_mask);
3254 gen_mov_F0_vreg(dp, rn);
3255 if (delta_m) {
3256 rm = ((rm + delta_m) & (bank_mask - 1))
3257 | (rm & bank_mask);
3258 gen_mov_F1_vreg(dp, rm);
3259 }
3260 }
3261 }
3262 }
3263 break;
3264 case 0xc:
3265 case 0xd:
3266 if ((insn & 0x03e00000) == 0x00400000) {
3267 /* two-register transfer */
3268 rn = (insn >> 16) & 0xf;
3269 rd = (insn >> 12) & 0xf;
3270 if (dp) {
3271 VFP_DREG_M(rm, insn);
3272 } else {
3273 rm = VFP_SREG_M(insn);
3274 }
3275
3276 if (insn & ARM_CP_RW_BIT) {
3277 /* vfp->arm */
3278 if (dp) {
3279 gen_mov_F0_vreg(0, rm * 2);
3280 tmp = gen_vfp_mrs();
3281 store_reg(s, rd, tmp);
3282 gen_mov_F0_vreg(0, rm * 2 + 1);
3283 tmp = gen_vfp_mrs();
3284 store_reg(s, rn, tmp);
3285 } else {
3286 gen_mov_F0_vreg(0, rm);
3287 tmp = gen_vfp_mrs();
3288 store_reg(s, rd, tmp);
3289 gen_mov_F0_vreg(0, rm + 1);
3290 tmp = gen_vfp_mrs();
3291 store_reg(s, rn, tmp);
3292 }
3293 } else {
3294 /* arm->vfp */
3295 if (dp) {
3296 tmp = load_reg(s, rd);
3297 gen_vfp_msr(tmp);
3298 gen_mov_vreg_F0(0, rm * 2);
3299 tmp = load_reg(s, rn);
3300 gen_vfp_msr(tmp);
3301 gen_mov_vreg_F0(0, rm * 2 + 1);
3302 } else {
3303 tmp = load_reg(s, rd);
3304 gen_vfp_msr(tmp);
3305 gen_mov_vreg_F0(0, rm);
3306 tmp = load_reg(s, rn);
3307 gen_vfp_msr(tmp);
3308 gen_mov_vreg_F0(0, rm + 1);
3309 }
3310 }
3311 } else {
3312 /* Load/store */
3313 rn = (insn >> 16) & 0xf;
3314 if (dp)
3315 VFP_DREG_D(rd, insn);
3316 else
3317 rd = VFP_SREG_D(insn);
3318 if (s->thumb && rn == 15) {
3319 addr = tcg_temp_new_i32();
3320 tcg_gen_movi_i32(addr, s->pc & ~2);
3321 } else {
3322 addr = load_reg(s, rn);
3323 }
3324 if ((insn & 0x01200000) == 0x01000000) {
3325 /* Single load/store */
3326 offset = (insn & 0xff) << 2;
3327 if ((insn & (1 << 23)) == 0)
3328 offset = -offset;
3329 tcg_gen_addi_i32(addr, addr, offset);
3330 if (insn & (1 << 20)) {
3331 gen_vfp_ld(s, dp, addr);
3332 gen_mov_vreg_F0(dp, rd);
3333 } else {
3334 gen_mov_F0_vreg(dp, rd);
3335 gen_vfp_st(s, dp, addr);
3336 }
3337 tcg_temp_free_i32(addr);
3338 } else {
3339 /* load/store multiple */
3340 if (dp)
3341 n = (insn >> 1) & 0x7f;
3342 else
3343 n = insn & 0xff;
3344
3345 if (insn & (1 << 24)) /* pre-decrement */
3346 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3347
3348 if (dp)
3349 offset = 8;
3350 else
3351 offset = 4;
3352 for (i = 0; i < n; i++) {
3353 if (insn & ARM_CP_RW_BIT) {
3354 /* load */
3355 gen_vfp_ld(s, dp, addr);
3356 gen_mov_vreg_F0(dp, rd + i);
3357 } else {
3358 /* store */
3359 gen_mov_F0_vreg(dp, rd + i);
3360 gen_vfp_st(s, dp, addr);
3361 }
3362 tcg_gen_addi_i32(addr, addr, offset);
3363 }
3364 if (insn & (1 << 21)) {
3365 /* writeback */
3366 if (insn & (1 << 24))
3367 offset = -offset * n;
3368 else if (dp && (insn & 1))
3369 offset = 4;
3370 else
3371 offset = 0;
3372
3373 if (offset != 0)
3374 tcg_gen_addi_i32(addr, addr, offset);
3375 store_reg(s, rn, addr);
3376 } else {
3377 tcg_temp_free_i32(addr);
3378 }
3379 }
3380 }
3381 break;
3382 default:
3383 /* Should never happen. */
3384 return 1;
3385 }
3386 return 0;
3387 }
3388
3389 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3390 {
3391 TranslationBlock *tb;
3392
3393 tb = s->tb;
3394 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3395 tcg_gen_goto_tb(n);
3396 gen_set_pc_im(dest);
3397 tcg_gen_exit_tb((long)tb + n);
3398 } else {
3399 gen_set_pc_im(dest);
3400 tcg_gen_exit_tb(0);
3401 }
3402 }
3403
3404 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3405 {
3406 if (unlikely(s->singlestep_enabled)) {
3407 /* An indirect jump so that we still trigger the debug exception. */
3408 if (s->thumb)
3409 dest |= 1;
3410 gen_bx_im(s, dest);
3411 } else {
3412 gen_goto_tb(s, 0, dest);
3413 s->is_jmp = DISAS_TB_JUMP;
3414 }
3415 }
3416
3417 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3418 {
3419 if (x)
3420 tcg_gen_sari_i32(t0, t0, 16);
3421 else
3422 gen_sxth(t0);
3423 if (y)
3424 tcg_gen_sari_i32(t1, t1, 16);
3425 else
3426 gen_sxth(t1);
3427 tcg_gen_mul_i32(t0, t0, t1);
3428 }
3429
3430 /* Return the mask of PSR bits set by a MSR instruction. */
3431 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3432 uint32_t mask;
3433
3434 mask = 0;
3435 if (flags & (1 << 0))
3436 mask |= 0xff;
3437 if (flags & (1 << 1))
3438 mask |= 0xff00;
3439 if (flags & (1 << 2))
3440 mask |= 0xff0000;
3441 if (flags & (1 << 3))
3442 mask |= 0xff000000;
3443
3444 /* Mask out undefined bits. */
3445 mask &= ~CPSR_RESERVED;
3446 if (!arm_feature(env, ARM_FEATURE_V6))
3447 mask &= ~(CPSR_E | CPSR_GE);
3448 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3449 mask &= ~CPSR_IT;
3450 /* Mask out execution state bits. */
3451 if (!spsr)
3452 mask &= ~CPSR_EXEC;
3453 /* Mask out privileged bits. */
3454 if (IS_USER(s))
3455 mask &= CPSR_USER;
3456 return mask;
3457 }
3458
3459 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3460 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3461 {
3462 TCGv tmp;
3463 if (spsr) {
3464 /* ??? This is also undefined in system mode. */
3465 if (IS_USER(s))
3466 return 1;
3467
3468 tmp = load_cpu_field(spsr);
3469 tcg_gen_andi_i32(tmp, tmp, ~mask);
3470 tcg_gen_andi_i32(t0, t0, mask);
3471 tcg_gen_or_i32(tmp, tmp, t0);
3472 store_cpu_field(tmp, spsr);
3473 } else {
3474 gen_set_cpsr(t0, mask);
3475 }
3476 tcg_temp_free_i32(t0);
3477 gen_lookup_tb(s);
3478 return 0;
3479 }
3480
3481 /* Returns nonzero if access to the PSR is not permitted. */
3482 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3483 {
3484 TCGv tmp;
3485 tmp = tcg_temp_new_i32();
3486 tcg_gen_movi_i32(tmp, val);
3487 return gen_set_psr(s, mask, spsr, tmp);
3488 }
3489
3490 /* Generate an old-style exception return. Marks pc as dead. */
3491 static void gen_exception_return(DisasContext *s, TCGv pc)
3492 {
3493 TCGv tmp;
3494 store_reg(s, 15, pc);
3495 tmp = load_cpu_field(spsr);
3496 gen_set_cpsr(tmp, 0xffffffff);
3497 tcg_temp_free_i32(tmp);
3498 s->is_jmp = DISAS_UPDATE;
3499 }
3500
3501 /* Generate a v6 exception return. Marks both values as dead. */
3502 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3503 {
3504 gen_set_cpsr(cpsr, 0xffffffff);
3505 tcg_temp_free_i32(cpsr);
3506 store_reg(s, 15, pc);
3507 s->is_jmp = DISAS_UPDATE;
3508 }
3509
3510 static inline void
3511 gen_set_condexec (DisasContext *s)
3512 {
3513 if (s->condexec_mask) {
3514 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3515 TCGv tmp = tcg_temp_new_i32();
3516 tcg_gen_movi_i32(tmp, val);
3517 store_cpu_field(tmp, condexec_bits);
3518 }
3519 }
3520
3521 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3522 {
3523 gen_set_condexec(s);
3524 gen_set_pc_im(s->pc - offset);
3525 gen_exception(excp);
3526 s->is_jmp = DISAS_JUMP;
3527 }
3528
3529 static void gen_nop_hint(DisasContext *s, int val)
3530 {
3531 switch (val) {
3532 case 3: /* wfi */
3533 gen_set_pc_im(s->pc);
3534 s->is_jmp = DISAS_WFI;
3535 break;
3536 case 2: /* wfe */
3537 case 4: /* sev */
3538 /* TODO: Implement SEV and WFE. May help SMP performance. */
3539 default: /* nop */
3540 break;
3541 }
3542 }
3543
3544 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3545
3546 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3547 {
3548 switch (size) {
3549 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3550 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3551 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3552 default: return 1;
3553 }
3554 return 0;
3555 }
3556
3557 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3558 {
3559 switch (size) {
3560 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3561 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3562 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3563 default: return;
3564 }
3565 }
3566
3567 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3568 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3569 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3570 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3571 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3572
3573 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3574 switch ((size << 1) | u) { \
3575 case 0: \
3576 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3577 break; \
3578 case 1: \
3579 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3580 break; \
3581 case 2: \
3582 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3583 break; \
3584 case 3: \
3585 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3586 break; \
3587 case 4: \
3588 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3589 break; \
3590 case 5: \
3591 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3592 break; \
3593 default: return 1; \
3594 }} while (0)
3595
3596 #define GEN_NEON_INTEGER_OP(name) do { \
3597 switch ((size << 1) | u) { \
3598 case 0: \
3599 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3600 break; \
3601 case 1: \
3602 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3603 break; \
3604 case 2: \
3605 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3606 break; \
3607 case 3: \
3608 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3609 break; \
3610 case 4: \
3611 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3612 break; \
3613 case 5: \
3614 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3615 break; \
3616 default: return 1; \
3617 }} while (0)
3618
3619 static TCGv neon_load_scratch(int scratch)
3620 {
3621 TCGv tmp = tcg_temp_new_i32();
3622 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3623 return tmp;
3624 }
3625
3626 static void neon_store_scratch(int scratch, TCGv var)
3627 {
3628 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3629 tcg_temp_free_i32(var);
3630 }
3631
3632 static inline TCGv neon_get_scalar(int size, int reg)
3633 {
3634 TCGv tmp;
3635 if (size == 1) {
3636 tmp = neon_load_reg(reg & 7, reg >> 4);
3637 if (reg & 8) {
3638 gen_neon_dup_high16(tmp);
3639 } else {
3640 gen_neon_dup_low16(tmp);
3641 }
3642 } else {
3643 tmp = neon_load_reg(reg & 15, reg >> 4);
3644 }
3645 return tmp;
3646 }
3647
3648 static int gen_neon_unzip(int rd, int rm, int size, int q)
3649 {
3650 TCGv tmp, tmp2;
3651 if (size == 3 || (!q && size == 2)) {
3652 return 1;
3653 }
3654 tmp = tcg_const_i32(rd);
3655 tmp2 = tcg_const_i32(rm);
3656 if (q) {
3657 switch (size) {
3658 case 0:
3659 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3660 break;
3661 case 1:
3662 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3663 break;
3664 case 2:
3665 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3666 break;
3667 default:
3668 abort();
3669 }
3670 } else {
3671 switch (size) {
3672 case 0:
3673 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3674 break;
3675 case 1:
3676 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3677 break;
3678 default:
3679 abort();
3680 }
3681 }
3682 tcg_temp_free_i32(tmp);
3683 tcg_temp_free_i32(tmp2);
3684 return 0;
3685 }
3686
3687 static int gen_neon_zip(int rd, int rm, int size, int q)
3688 {
3689 TCGv tmp, tmp2;
3690 if (size == 3 || (!q && size == 2)) {
3691 return 1;
3692 }
3693 tmp = tcg_const_i32(rd);
3694 tmp2 = tcg_const_i32(rm);
3695 if (q) {
3696 switch (size) {
3697 case 0:
3698 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3699 break;
3700 case 1:
3701 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3702 break;
3703 case 2:
3704 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3705 break;
3706 default:
3707 abort();
3708 }
3709 } else {
3710 switch (size) {
3711 case 0:
3712 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3713 break;
3714 case 1:
3715 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3716 break;
3717 default:
3718 abort();
3719 }
3720 }
3721 tcg_temp_free_i32(tmp);
3722 tcg_temp_free_i32(tmp2);
3723 return 0;
3724 }
3725
3726 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3727 {
3728 TCGv rd, tmp;
3729
3730 rd = tcg_temp_new_i32();
3731 tmp = tcg_temp_new_i32();
3732
3733 tcg_gen_shli_i32(rd, t0, 8);
3734 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3736 tcg_gen_or_i32(rd, rd, tmp);
3737
3738 tcg_gen_shri_i32(t1, t1, 8);
3739 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3741 tcg_gen_or_i32(t1, t1, tmp);
3742 tcg_gen_mov_i32(t0, rd);
3743
3744 tcg_temp_free_i32(tmp);
3745 tcg_temp_free_i32(rd);
3746 }
3747
3748 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3749 {
3750 TCGv rd, tmp;
3751
3752 rd = tcg_temp_new_i32();
3753 tmp = tcg_temp_new_i32();
3754
3755 tcg_gen_shli_i32(rd, t0, 16);
3756 tcg_gen_andi_i32(tmp, t1, 0xffff);
3757 tcg_gen_or_i32(rd, rd, tmp);
3758 tcg_gen_shri_i32(t1, t1, 16);
3759 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3760 tcg_gen_or_i32(t1, t1, tmp);
3761 tcg_gen_mov_i32(t0, rd);
3762
3763 tcg_temp_free_i32(tmp);
3764 tcg_temp_free_i32(rd);
3765 }
3766
3767
3768 static struct {
3769 int nregs;
3770 int interleave;
3771 int spacing;
3772 } neon_ls_element_type[11] = {
3773 {4, 4, 1},
3774 {4, 4, 2},
3775 {4, 1, 1},
3776 {4, 2, 1},
3777 {3, 3, 1},
3778 {3, 3, 2},
3779 {3, 1, 1},
3780 {1, 1, 1},
3781 {2, 2, 1},
3782 {2, 2, 2},
3783 {2, 1, 1}
3784 };
3785
3786 /* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3789 {
3790 int rd, rn, rm;
3791 int op;
3792 int nregs;
3793 int interleave;
3794 int spacing;
3795 int stride;
3796 int size;
3797 int reg;
3798 int pass;
3799 int load;
3800 int shift;
3801 int n;
3802 TCGv addr;
3803 TCGv tmp;
3804 TCGv tmp2;
3805 TCGv_i64 tmp64;
3806
3807 if (!s->vfp_enabled)
3808 return 1;
3809 VFP_DREG_D(rd, insn);
3810 rn = (insn >> 16) & 0xf;
3811 rm = insn & 0xf;
3812 load = (insn & (1 << 21)) != 0;
3813 if ((insn & (1 << 23)) == 0) {
3814 /* Load store all elements. */
3815 op = (insn >> 8) & 0xf;
3816 size = (insn >> 6) & 3;
3817 if (op > 10)
3818 return 1;
3819 nregs = neon_ls_element_type[op].nregs;
3820 interleave = neon_ls_element_type[op].interleave;
3821 spacing = neon_ls_element_type[op].spacing;
3822 if (size == 3 && (interleave | spacing) != 1)
3823 return 1;
3824 addr = tcg_temp_new_i32();
3825 load_reg_var(s, addr, rn);
3826 stride = (1 << size) * interleave;
3827 for (reg = 0; reg < nregs; reg++) {
3828 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3829 load_reg_var(s, addr, rn);
3830 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3831 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3832 load_reg_var(s, addr, rn);
3833 tcg_gen_addi_i32(addr, addr, 1 << size);
3834 }
3835 if (size == 3) {
3836 if (load) {
3837 tmp64 = gen_ld64(addr, IS_USER(s));
3838 neon_store_reg64(tmp64, rd);
3839 tcg_temp_free_i64(tmp64);
3840 } else {
3841 tmp64 = tcg_temp_new_i64();
3842 neon_load_reg64(tmp64, rd);
3843 gen_st64(tmp64, addr, IS_USER(s));
3844 }
3845 tcg_gen_addi_i32(addr, addr, stride);
3846 } else {
3847 for (pass = 0; pass < 2; pass++) {
3848 if (size == 2) {
3849 if (load) {
3850 tmp = gen_ld32(addr, IS_USER(s));
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 gen_st32(tmp, addr, IS_USER(s));
3855 }
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 } else if (size == 1) {
3858 if (load) {
3859 tmp = gen_ld16u(addr, IS_USER(s));
3860 tcg_gen_addi_i32(addr, addr, stride);
3861 tmp2 = gen_ld16u(addr, IS_USER(s));
3862 tcg_gen_addi_i32(addr, addr, stride);
3863 tcg_gen_shli_i32(tmp2, tmp2, 16);
3864 tcg_gen_or_i32(tmp, tmp, tmp2);
3865 tcg_temp_free_i32(tmp2);
3866 neon_store_reg(rd, pass, tmp);
3867 } else {
3868 tmp = neon_load_reg(rd, pass);
3869 tmp2 = tcg_temp_new_i32();
3870 tcg_gen_shri_i32(tmp2, tmp, 16);
3871 gen_st16(tmp, addr, IS_USER(s));
3872 tcg_gen_addi_i32(addr, addr, stride);
3873 gen_st16(tmp2, addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 }
3876 } else /* size == 0 */ {
3877 if (load) {
3878 TCGV_UNUSED(tmp2);
3879 for (n = 0; n < 4; n++) {
3880 tmp = gen_ld8u(addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3882 if (n == 0) {
3883 tmp2 = tmp;
3884 } else {
3885 tcg_gen_shli_i32(tmp, tmp, n * 8);
3886 tcg_gen_or_i32(tmp2, tmp2, tmp);
3887 tcg_temp_free_i32(tmp);
3888 }
3889 }
3890 neon_store_reg(rd, pass, tmp2);
3891 } else {
3892 tmp2 = neon_load_reg(rd, pass);
3893 for (n = 0; n < 4; n++) {
3894 tmp = tcg_temp_new_i32();
3895 if (n == 0) {
3896 tcg_gen_mov_i32(tmp, tmp2);
3897 } else {
3898 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3899 }
3900 gen_st8(tmp, addr, IS_USER(s));
3901 tcg_gen_addi_i32(addr, addr, stride);
3902 }
3903 tcg_temp_free_i32(tmp2);
3904 }
3905 }
3906 }
3907 }
3908 rd += spacing;
3909 }
3910 tcg_temp_free_i32(addr);
3911 stride = nregs * 8;
3912 } else {
3913 size = (insn >> 10) & 3;
3914 if (size == 3) {
3915 /* Load single element to all lanes. */
3916 int a = (insn >> 4) & 1;
3917 if (!load) {
3918 return 1;
3919 }
3920 size = (insn >> 6) & 3;
3921 nregs = ((insn >> 8) & 3) + 1;
3922
3923 if (size == 3) {
3924 if (nregs != 4 || a == 0) {
3925 return 1;
3926 }
3927 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3928 size = 2;
3929 }
3930 if (nregs == 1 && a == 1 && size == 0) {
3931 return 1;
3932 }
3933 if (nregs == 3 && a == 1) {
3934 return 1;
3935 }
3936 addr = tcg_temp_new_i32();
3937 load_reg_var(s, addr, rn);
3938 if (nregs == 1) {
3939 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3940 tmp = gen_load_and_replicate(s, addr, size);
3941 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3943 if (insn & (1 << 5)) {
3944 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3945 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3946 }
3947 tcg_temp_free_i32(tmp);
3948 } else {
3949 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3950 stride = (insn & (1 << 5)) ? 2 : 1;
3951 for (reg = 0; reg < nregs; reg++) {
3952 tmp = gen_load_and_replicate(s, addr, size);
3953 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3954 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3955 tcg_temp_free_i32(tmp);
3956 tcg_gen_addi_i32(addr, addr, 1 << size);
3957 rd += stride;
3958 }
3959 }
3960 tcg_temp_free_i32(addr);
3961 stride = (1 << size) * nregs;
3962 } else {
3963 /* Single element. */
3964 pass = (insn >> 7) & 1;
3965 switch (size) {
3966 case 0:
3967 shift = ((insn >> 5) & 3) * 8;
3968 stride = 1;
3969 break;
3970 case 1:
3971 shift = ((insn >> 6) & 1) * 16;
3972 stride = (insn & (1 << 5)) ? 2 : 1;
3973 break;
3974 case 2:
3975 shift = 0;
3976 stride = (insn & (1 << 6)) ? 2 : 1;
3977 break;
3978 default:
3979 abort();
3980 }
3981 nregs = ((insn >> 8) & 3) + 1;
3982 addr = tcg_temp_new_i32();
3983 load_reg_var(s, addr, rn);
3984 for (reg = 0; reg < nregs; reg++) {
3985 if (load) {
3986 switch (size) {
3987 case 0:
3988 tmp = gen_ld8u(addr, IS_USER(s));
3989 break;
3990 case 1:
3991 tmp = gen_ld16u(addr, IS_USER(s));
3992 break;
3993 case 2:
3994 tmp = gen_ld32(addr, IS_USER(s));
3995 break;
3996 default: /* Avoid compiler warnings. */
3997 abort();
3998 }
3999 if (size != 2) {
4000 tmp2 = neon_load_reg(rd, pass);
4001 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4002 tcg_temp_free_i32(tmp2);
4003 }
4004 neon_store_reg(rd, pass, tmp);
4005 } else { /* Store */
4006 tmp = neon_load_reg(rd, pass);
4007 if (shift)
4008 tcg_gen_shri_i32(tmp, tmp, shift);
4009 switch (size) {
4010 case 0:
4011 gen_st8(tmp, addr, IS_USER(s));
4012 break;
4013 case 1:
4014 gen_st16(tmp, addr, IS_USER(s));
4015 break;
4016 case 2:
4017 gen_st32(tmp, addr, IS_USER(s));
4018 break;
4019 }
4020 }
4021 rd += stride;
4022 tcg_gen_addi_i32(addr, addr, 1 << size);
4023 }
4024 tcg_temp_free_i32(addr);
4025 stride = nregs * (1 << size);
4026 }
4027 }
4028 if (rm != 15) {
4029 TCGv base;
4030
4031 base = load_reg(s, rn);
4032 if (rm == 13) {
4033 tcg_gen_addi_i32(base, base, stride);
4034 } else {
4035 TCGv index;
4036 index = load_reg(s, rm);
4037 tcg_gen_add_i32(base, base, index);
4038 tcg_temp_free_i32(index);
4039 }
4040 store_reg(s, rn, base);
4041 }
4042 return 0;
4043 }
4044
4045 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4046 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4047 {
4048 tcg_gen_and_i32(t, t, c);
4049 tcg_gen_andc_i32(f, f, c);
4050 tcg_gen_or_i32(dest, t, f);
4051 }
4052
4053 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4054 {
4055 switch (size) {
4056 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4057 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4058 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4059 default: abort();
4060 }
4061 }
4062
4063 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4064 {
4065 switch (size) {
4066 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4067 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4068 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4069 default: abort();
4070 }
4071 }
4072
4073 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4074 {
4075 switch (size) {
4076 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4077 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4078 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4079 default: abort();
4080 }
4081 }
4082
4083 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4084 {
4085 switch (size) {
4086 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4087 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4088 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4089 default: abort();
4090 }
4091 }
4092
4093 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4094 int q, int u)
4095 {
4096 if (q) {
4097 if (u) {
4098 switch (size) {
4099 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4100 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4101 default: abort();
4102 }
4103 } else {
4104 switch (size) {
4105 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4106 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4107 default: abort();
4108 }
4109 }
4110 } else {
4111 if (u) {
4112 switch (size) {
4113 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4121 default: abort();
4122 }
4123 }
4124 }
4125 }
4126
4127 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4128 {
4129 if (u) {
4130 switch (size) {
4131 case 0: gen_helper_neon_widen_u8(dest, src); break;
4132 case 1: gen_helper_neon_widen_u16(dest, src); break;
4133 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4134 default: abort();
4135 }
4136 } else {
4137 switch (size) {
4138 case 0: gen_helper_neon_widen_s8(dest, src); break;
4139 case 1: gen_helper_neon_widen_s16(dest, src); break;
4140 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4141 default: abort();
4142 }
4143 }
4144 tcg_temp_free_i32(src);
4145 }
4146
4147 static inline void gen_neon_addl(int size)
4148 {
4149 switch (size) {
4150 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4151 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4152 case 2: tcg_gen_add_i64(CPU_V001); break;
4153 default: abort();
4154 }
4155 }
4156
4157 static inline void gen_neon_subl(int size)
4158 {
4159 switch (size) {
4160 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4161 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4162 case 2: tcg_gen_sub_i64(CPU_V001); break;
4163 default: abort();
4164 }
4165 }
4166
4167 static inline void gen_neon_negl(TCGv_i64 var, int size)
4168 {
4169 switch (size) {
4170 case 0: gen_helper_neon_negl_u16(var, var); break;
4171 case 1: gen_helper_neon_negl_u32(var, var); break;
4172 case 2: gen_helper_neon_negl_u64(var, var); break;
4173 default: abort();
4174 }
4175 }
4176
4177 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4178 {
4179 switch (size) {
4180 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4181 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4182 default: abort();
4183 }
4184 }
4185
4186 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4187 {
4188 TCGv_i64 tmp;
4189
4190 switch ((size << 1) | u) {
4191 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4192 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4193 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4194 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4195 case 4:
4196 tmp = gen_muls_i64_i32(a, b);
4197 tcg_gen_mov_i64(dest, tmp);
4198 tcg_temp_free_i64(tmp);
4199 break;
4200 case 5:
4201 tmp = gen_mulu_i64_i32(a, b);
4202 tcg_gen_mov_i64(dest, tmp);
4203 tcg_temp_free_i64(tmp);
4204 break;
4205 default: abort();
4206 }
4207
4208 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4209 Don't forget to clean them now. */
4210 if (size < 2) {
4211 tcg_temp_free_i32(a);
4212 tcg_temp_free_i32(b);
4213 }
4214 }
4215
4216 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4217 {
4218 if (op) {
4219 if (u) {
4220 gen_neon_unarrow_sats(size, dest, src);
4221 } else {
4222 gen_neon_narrow(size, dest, src);
4223 }
4224 } else {
4225 if (u) {
4226 gen_neon_narrow_satu(size, dest, src);
4227 } else {
4228 gen_neon_narrow_sats(size, dest, src);
4229 }
4230 }
4231 }
4232
4233 /* Translate a NEON data processing instruction. Return nonzero if the
4234 instruction is invalid.
4235 We process data in a mixture of 32-bit and 64-bit chunks.
4236 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4237
4238 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4239 {
4240 int op;
4241 int q;
4242 int rd, rn, rm;
4243 int size;
4244 int shift;
4245 int pass;
4246 int count;
4247 int pairwise;
4248 int u;
4249 int n;
4250 uint32_t imm, mask;
4251 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4252 TCGv_i64 tmp64;
4253
4254 if (!s->vfp_enabled)
4255 return 1;
4256 q = (insn & (1 << 6)) != 0;
4257 u = (insn >> 24) & 1;
4258 VFP_DREG_D(rd, insn);
4259 VFP_DREG_N(rn, insn);
4260 VFP_DREG_M(rm, insn);
4261 size = (insn >> 20) & 3;
4262 if ((insn & (1 << 23)) == 0) {
4263 /* Three register same length. */
4264 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4265 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4266 || op == 10 || op == 11 || op == 16)) {
4267 /* 64-bit element instructions. */
4268 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4269 neon_load_reg64(cpu_V0, rn + pass);
4270 neon_load_reg64(cpu_V1, rm + pass);
4271 switch (op) {
4272 case 1: /* VQADD */
4273 if (u) {
4274 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4275 cpu_V0, cpu_V1);
4276 } else {
4277 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4278 cpu_V0, cpu_V1);
4279 }
4280 break;
4281 case 5: /* VQSUB */
4282 if (u) {
4283 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4284 cpu_V0, cpu_V1);
4285 } else {
4286 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4287 cpu_V0, cpu_V1);
4288 }
4289 break;
4290 case 8: /* VSHL */
4291 if (u) {
4292 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4293 } else {
4294 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4295 }
4296 break;
4297 case 9: /* VQSHL */
4298 if (u) {
4299 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4300 cpu_V1, cpu_V0);
4301 } else {
4302 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4303 cpu_V1, cpu_V0);
4304 }
4305 break;
4306 case 10: /* VRSHL */
4307 if (u) {
4308 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4309 } else {
4310 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4311 }
4312 break;
4313 case 11: /* VQRSHL */
4314 if (u) {
4315 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4316 cpu_V1, cpu_V0);
4317 } else {
4318 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4319 cpu_V1, cpu_V0);
4320 }
4321 break;
4322 case 16:
4323 if (u) {
4324 tcg_gen_sub_i64(CPU_V001);
4325 } else {
4326 tcg_gen_add_i64(CPU_V001);
4327 }
4328 break;
4329 default:
4330 abort();
4331 }
4332 neon_store_reg64(cpu_V0, rd + pass);
4333 }
4334 return 0;
4335 }
4336 switch (op) {
4337 case 8: /* VSHL */
4338 case 9: /* VQSHL */
4339 case 10: /* VRSHL */
4340 case 11: /* VQRSHL */
4341 {
4342 int rtmp;
4343 /* Shift instruction operands are reversed. */
4344 rtmp = rn;
4345 rn = rm;
4346 rm = rtmp;
4347 pairwise = 0;
4348 }
4349 break;
4350 case 20: /* VPMAX */
4351 case 21: /* VPMIN */
4352 case 23: /* VPADD */
4353 pairwise = 1;
4354 break;
4355 case 26: /* VPADD (float) */
4356 pairwise = (u && size < 2);
4357 break;
4358 case 30: /* VPMIN/VPMAX (float) */
4359 pairwise = u;
4360 break;
4361 default:
4362 pairwise = 0;
4363 break;
4364 }
4365
4366 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4367
4368 if (pairwise) {
4369 /* Pairwise. */
4370 if (q)
4371 n = (pass & 1) * 2;
4372 else
4373 n = 0;
4374 if (pass < q + 1) {
4375 tmp = neon_load_reg(rn, n);
4376 tmp2 = neon_load_reg(rn, n + 1);
4377 } else {
4378 tmp = neon_load_reg(rm, n);
4379 tmp2 = neon_load_reg(rm, n + 1);
4380 }
4381 } else {
4382 /* Elementwise. */
4383 tmp = neon_load_reg(rn, pass);
4384 tmp2 = neon_load_reg(rm, pass);
4385 }
4386 switch (op) {
4387 case 0: /* VHADD */
4388 GEN_NEON_INTEGER_OP(hadd);
4389 break;
4390 case 1: /* VQADD */
4391 GEN_NEON_INTEGER_OP_ENV(qadd);
4392 break;
4393 case 2: /* VRHADD */
4394 GEN_NEON_INTEGER_OP(rhadd);
4395 break;
4396 case 3: /* Logic ops. */
4397 switch ((u << 2) | size) {
4398 case 0: /* VAND */
4399 tcg_gen_and_i32(tmp, tmp, tmp2);
4400 break;
4401 case 1: /* BIC */
4402 tcg_gen_andc_i32(tmp, tmp, tmp2);
4403 break;
4404 case 2: /* VORR */
4405 tcg_gen_or_i32(tmp, tmp, tmp2);
4406 break;
4407 case 3: /* VORN */
4408 tcg_gen_orc_i32(tmp, tmp, tmp2);
4409 break;
4410 case 4: /* VEOR */
4411 tcg_gen_xor_i32(tmp, tmp, tmp2);
4412 break;
4413 case 5: /* VBSL */
4414 tmp3 = neon_load_reg(rd, pass);
4415 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4416 tcg_temp_free_i32(tmp3);
4417 break;
4418 case 6: /* VBIT */
4419 tmp3 = neon_load_reg(rd, pass);
4420 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4421 tcg_temp_free_i32(tmp3);
4422 break;
4423 case 7: /* VBIF */
4424 tmp3 = neon_load_reg(rd, pass);
4425 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4426 tcg_temp_free_i32(tmp3);
4427 break;
4428 }
4429 break;
4430 case 4: /* VHSUB */
4431 GEN_NEON_INTEGER_OP(hsub);
4432 break;
4433 case 5: /* VQSUB */
4434 GEN_NEON_INTEGER_OP_ENV(qsub);
4435 break;
4436 case 6: /* VCGT */
4437 GEN_NEON_INTEGER_OP(cgt);
4438 break;
4439 case 7: /* VCGE */
4440 GEN_NEON_INTEGER_OP(cge);
4441 break;
4442 case 8: /* VSHL */
4443 GEN_NEON_INTEGER_OP(shl);
4444 break;
4445 case 9: /* VQSHL */
4446 GEN_NEON_INTEGER_OP_ENV(qshl);
4447 break;
4448 case 10: /* VRSHL */
4449 GEN_NEON_INTEGER_OP(rshl);
4450 break;
4451 case 11: /* VQRSHL */
4452 GEN_NEON_INTEGER_OP_ENV(qrshl);
4453 break;
4454 case 12: /* VMAX */
4455 GEN_NEON_INTEGER_OP(max);
4456 break;
4457 case 13: /* VMIN */
4458 GEN_NEON_INTEGER_OP(min);
4459 break;
4460 case 14: /* VABD */
4461 GEN_NEON_INTEGER_OP(abd);
4462 break;
4463 case 15: /* VABA */
4464 GEN_NEON_INTEGER_OP(abd);
4465 tcg_temp_free_i32(tmp2);
4466 tmp2 = neon_load_reg(rd, pass);
4467 gen_neon_add(size, tmp, tmp2);
4468 break;
4469 case 16:
4470 if (!u) { /* VADD */
4471 if (gen_neon_add(size, tmp, tmp2))
4472 return 1;
4473 } else { /* VSUB */
4474 switch (size) {
4475 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4476 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4477 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4478 default: return 1;
4479 }
4480 }
4481 break;
4482 case 17:
4483 if (!u) { /* VTST */
4484 switch (size) {
4485 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4486 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4487 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4488 default: return 1;
4489 }
4490 } else { /* VCEQ */
4491 switch (size) {
4492 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4493 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4494 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4495 default: return 1;
4496 }
4497 }
4498 break;
4499 case 18: /* Multiply. */
4500 switch (size) {
4501 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4502 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4503 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4504 default: return 1;
4505 }
4506 tcg_temp_free_i32(tmp2);
4507 tmp2 = neon_load_reg(rd, pass);
4508 if (u) { /* VMLS */
4509 gen_neon_rsb(size, tmp, tmp2);
4510 } else { /* VMLA */
4511 gen_neon_add(size, tmp, tmp2);
4512 }
4513 break;
4514 case 19: /* VMUL */
4515 if (u) { /* polynomial */
4516 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4517 } else { /* Integer */
4518 switch (size) {
4519 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4520 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4521 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4522 default: return 1;
4523 }
4524 }
4525 break;
4526 case 20: /* VPMAX */
4527 GEN_NEON_INTEGER_OP(pmax);
4528 break;
4529 case 21: /* VPMIN */
4530 GEN_NEON_INTEGER_OP(pmin);
4531 break;
4532 case 22: /* Hultiply high. */
4533 if (!u) { /* VQDMULH */
4534 switch (size) {
4535 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4536 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4537 default: return 1;
4538 }
4539 } else { /* VQRDHMUL */
4540 switch (size) {
4541 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4542 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4543 default: return 1;
4544 }
4545 }
4546 break;
4547 case 23: /* VPADD */
4548 if (u)
4549 return 1;
4550 switch (size) {
4551 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4552 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4553 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4554 default: return 1;
4555 }
4556 break;
4557 case 26: /* Floating point arithnetic. */
4558 switch ((u << 2) | size) {
4559 case 0: /* VADD */
4560 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4561 break;
4562 case 2: /* VSUB */
4563 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4564 break;
4565 case 4: /* VPADD */
4566 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4567 break;
4568 case 6: /* VABD */
4569 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4570 break;
4571 default:
4572 return 1;
4573 }
4574 break;
4575 case 27: /* Float multiply. */
4576 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4577 if (!u) {
4578 tcg_temp_free_i32(tmp2);
4579 tmp2 = neon_load_reg(rd, pass);
4580 if (size == 0) {
4581 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4582 } else {
4583 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4584 }
4585 }
4586 break;
4587 case 28: /* Float compare. */
4588 if (!u) {
4589 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4590 } else {
4591 if (size == 0)
4592 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4593 else
4594 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4595 }
4596 break;
4597 case 29: /* Float compare absolute. */
4598 if (!u)
4599 return 1;
4600 if (size == 0)
4601 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4602 else
4603 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4604 break;
4605 case 30: /* Float min/max. */
4606 if (size == 0)
4607 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4608 else
4609 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4610 break;
4611 case 31:
4612 if (size == 0)
4613 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4614 else
4615 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4616 break;
4617 default:
4618 abort();
4619 }
4620 tcg_temp_free_i32(tmp2);
4621
4622 /* Save the result. For elementwise operations we can put it
4623 straight into the destination register. For pairwise operations
4624 we have to be careful to avoid clobbering the source operands. */
4625 if (pairwise && rd == rm) {
4626 neon_store_scratch(pass, tmp);
4627 } else {
4628 neon_store_reg(rd, pass, tmp);
4629 }
4630
4631 } /* for pass */
4632 if (pairwise && rd == rm) {
4633 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4634 tmp = neon_load_scratch(pass);
4635 neon_store_reg(rd, pass, tmp);
4636 }
4637 }
4638 /* End of 3 register same size operations. */
4639 } else if (insn & (1 << 4)) {
4640 if ((insn & 0x00380080) != 0) {
4641 /* Two registers and shift. */
4642 op = (insn >> 8) & 0xf;
4643 if (insn & (1 << 7)) {
4644 /* 64-bit shift. */
4645 size = 3;
4646 } else {
4647 size = 2;
4648 while ((insn & (1 << (size + 19))) == 0)
4649 size--;
4650 }
4651 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4652 /* To avoid excessive dumplication of ops we implement shift
4653 by immediate using the variable shift operations. */
4654 if (op < 8) {
4655 /* Shift by immediate:
4656 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4657 /* Right shifts are encoded as N - shift, where N is the
4658 element size in bits. */
4659 if (op <= 4)
4660 shift = shift - (1 << (size + 3));
4661 if (size == 3) {
4662 count = q + 1;
4663 } else {
4664 count = q ? 4: 2;
4665 }
4666 switch (size) {
4667 case 0:
4668 imm = (uint8_t) shift;
4669 imm |= imm << 8;
4670 imm |= imm << 16;
4671 break;
4672 case 1:
4673 imm = (uint16_t) shift;
4674 imm |= imm << 16;
4675 break;
4676 case 2:
4677 case 3:
4678 imm = shift;
4679 break;
4680 default:
4681 abort();
4682 }
4683
4684 for (pass = 0; pass < count; pass++) {
4685 if (size == 3) {
4686 neon_load_reg64(cpu_V0, rm + pass);
4687 tcg_gen_movi_i64(cpu_V1, imm);
4688 switch (op) {
4689 case 0: /* VSHR */
4690 case 1: /* VSRA */
4691 if (u)
4692 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4693 else
4694 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4695 break;
4696 case 2: /* VRSHR */
4697 case 3: /* VRSRA */
4698 if (u)
4699 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4700 else
4701 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4702 break;
4703 case 4: /* VSRI */
4704 if (!u)
4705 return 1;
4706 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4707 break;
4708 case 5: /* VSHL, VSLI */
4709 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4710 break;
4711 case 6: /* VQSHLU */
4712 if (u) {
4713 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4714 cpu_V0, cpu_V1);
4715 } else {
4716 return 1;
4717 }
4718 break;
4719 case 7: /* VQSHL */
4720 if (u) {
4721 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4722 cpu_V0, cpu_V1);
4723 } else {
4724 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4725 cpu_V0, cpu_V1);
4726 }
4727 break;
4728 }
4729 if (op == 1 || op == 3) {
4730 /* Accumulate. */
4731 neon_load_reg64(cpu_V1, rd + pass);
4732 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4733 } else if (op == 4 || (op == 5 && u)) {
4734 /* Insert */
4735 neon_load_reg64(cpu_V1, rd + pass);
4736 uint64_t mask;
4737 if (shift < -63 || shift > 63) {
4738 mask = 0;
4739 } else {
4740 if (op == 4) {
4741 mask = 0xffffffffffffffffull >> -shift;
4742 } else {
4743 mask = 0xffffffffffffffffull << shift;
4744 }
4745 }
4746 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4747 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4748 }
4749 neon_store_reg64(cpu_V0, rd + pass);
4750 } else { /* size < 3 */
4751 /* Operands in T0 and T1. */
4752 tmp = neon_load_reg(rm, pass);
4753 tmp2 = tcg_temp_new_i32();
4754 tcg_gen_movi_i32(tmp2, imm);
4755 switch (op) {
4756 case 0: /* VSHR */
4757 case 1: /* VSRA */
4758 GEN_NEON_INTEGER_OP(shl);
4759 break;
4760 case 2: /* VRSHR */
4761 case 3: /* VRSRA */
4762 GEN_NEON_INTEGER_OP(rshl);
4763 break;
4764 case 4: /* VSRI */
4765 if (!u)
4766 return 1;
4767 GEN_NEON_INTEGER_OP(shl);
4768 break;
4769 case 5: /* VSHL, VSLI */
4770 switch (size) {
4771 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4772 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4773 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4774 default: return 1;
4775 }
4776 break;
4777 case 6: /* VQSHLU */
4778 if (!u) {
4779 return 1;
4780 }
4781 switch (size) {
4782 case 0:
4783 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4784 tmp, tmp2);
4785 break;
4786 case 1:
4787 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4788 tmp, tmp2);
4789 break;
4790 case 2:
4791 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4792 tmp, tmp2);
4793 break;
4794 default:
4795 return 1;
4796 }
4797 break;
4798 case 7: /* VQSHL */
4799 GEN_NEON_INTEGER_OP_ENV(qshl);
4800 break;
4801 }
4802 tcg_temp_free_i32(tmp2);
4803
4804 if (op == 1 || op == 3) {
4805 /* Accumulate. */
4806 tmp2 = neon_load_reg(rd, pass);
4807 gen_neon_add(size, tmp, tmp2);
4808 tcg_temp_free_i32(tmp2);
4809 } else if (op == 4 || (op == 5 && u)) {
4810 /* Insert */
4811 switch (size) {
4812 case 0:
4813 if (op == 4)
4814 mask = 0xff >> -shift;
4815 else
4816 mask = (uint8_t)(0xff << shift);
4817 mask |= mask << 8;
4818 mask |= mask << 16;
4819 break;
4820 case 1:
4821 if (op == 4)
4822 mask = 0xffff >> -shift;
4823 else
4824 mask = (uint16_t)(0xffff << shift);
4825 mask |= mask << 16;
4826 break;
4827 case 2:
4828 if (shift < -31 || shift > 31) {
4829 mask = 0;
4830 } else {
4831 if (op == 4)
4832 mask = 0xffffffffu >> -shift;
4833 else
4834 mask = 0xffffffffu << shift;
4835 }
4836 break;
4837 default:
4838 abort();
4839 }
4840 tmp2 = neon_load_reg(rd, pass);
4841 tcg_gen_andi_i32(tmp, tmp, mask);
4842 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4843 tcg_gen_or_i32(tmp, tmp, tmp2);
4844 tcg_temp_free_i32(tmp2);
4845 }
4846 neon_store_reg(rd, pass, tmp);
4847 }
4848 } /* for pass */
4849 } else if (op < 10) {
4850 /* Shift by immediate and narrow:
4851 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4852 int input_unsigned = (op == 8) ? !u : u;
4853
4854 shift = shift - (1 << (size + 3));
4855 size++;
4856 if (size == 3) {
4857 tmp64 = tcg_const_i64(shift);
4858 neon_load_reg64(cpu_V0, rm);
4859 neon_load_reg64(cpu_V1, rm + 1);
4860 for (pass = 0; pass < 2; pass++) {
4861 TCGv_i64 in;
4862 if (pass == 0) {
4863 in = cpu_V0;
4864 } else {
4865 in = cpu_V1;
4866 }
4867 if (q) {
4868 if (input_unsigned) {
4869 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4870 } else {
4871 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4872 }
4873 } else {
4874 if (input_unsigned) {
4875 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4876 } else {
4877 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4878 }
4879 }
4880 tmp = tcg_temp_new_i32();
4881 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4882 neon_store_reg(rd, pass, tmp);
4883 } /* for pass */
4884 tcg_temp_free_i64(tmp64);
4885 } else {
4886 if (size == 1) {
4887 imm = (uint16_t)shift;
4888 imm |= imm << 16;
4889 } else {
4890 /* size == 2 */
4891 imm = (uint32_t)shift;
4892 }
4893 tmp2 = tcg_const_i32(imm);
4894 tmp4 = neon_load_reg(rm + 1, 0);
4895 tmp5 = neon_load_reg(rm + 1, 1);
4896 for (pass = 0; pass < 2; pass++) {
4897 if (pass == 0) {
4898 tmp = neon_load_reg(rm, 0);
4899 } else {
4900 tmp = tmp4;
4901 }
4902 gen_neon_shift_narrow(size, tmp, tmp2, q,
4903 input_unsigned);
4904 if (pass == 0) {
4905 tmp3 = neon_load_reg(rm, 1);
4906 } else {
4907 tmp3 = tmp5;
4908 }
4909 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4910 input_unsigned);
4911 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4912 tcg_temp_free_i32(tmp);
4913 tcg_temp_free_i32(tmp3);
4914 tmp = tcg_temp_new_i32();
4915 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4916 neon_store_reg(rd, pass, tmp);
4917 } /* for pass */
4918 tcg_temp_free_i32(tmp2);
4919 }
4920 } else if (op == 10) {
4921 /* VSHLL */
4922 if (q || size == 3)
4923 return 1;
4924 tmp = neon_load_reg(rm, 0);
4925 tmp2 = neon_load_reg(rm, 1);
4926 for (pass = 0; pass < 2; pass++) {
4927 if (pass == 1)
4928 tmp = tmp2;
4929
4930 gen_neon_widen(cpu_V0, tmp, size, u);
4931
4932 if (shift != 0) {
4933 /* The shift is less than the width of the source
4934 type, so we can just shift the whole register. */
4935 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4936 /* Widen the result of shift: we need to clear
4937 * the potential overflow bits resulting from
4938 * left bits of the narrow input appearing as
4939 * right bits of left the neighbour narrow
4940 * input. */
4941 if (size < 2 || !u) {
4942 uint64_t imm64;
4943 if (size == 0) {
4944 imm = (0xffu >> (8 - shift));
4945 imm |= imm << 16;
4946 } else if (size == 1) {
4947 imm = 0xffff >> (16 - shift);
4948 } else {
4949 /* size == 2 */
4950 imm = 0xffffffff >> (32 - shift);
4951 }
4952 if (size < 2) {
4953 imm64 = imm | (((uint64_t)imm) << 32);
4954 } else {
4955 imm64 = imm;
4956 }
4957 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4958 }
4959 }
4960 neon_store_reg64(cpu_V0, rd + pass);
4961 }
4962 } else if (op >= 14) {
4963 /* VCVT fixed-point. */
4964 /* We have already masked out the must-be-1 top bit of imm6,
4965 * hence this 32-shift where the ARM ARM has 64-imm6.
4966 */
4967 shift = 32 - shift;
4968 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4969 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4970 if (!(op & 1)) {
4971 if (u)
4972 gen_vfp_ulto(0, shift);
4973 else
4974 gen_vfp_slto(0, shift);
4975 } else {
4976 if (u)
4977 gen_vfp_toul(0, shift);
4978 else
4979 gen_vfp_tosl(0, shift);
4980 }
4981 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4982 }
4983 } else {
4984 return 1;
4985 }
4986 } else { /* (insn & 0x00380080) == 0 */
4987 int invert;
4988
4989 op = (insn >> 8) & 0xf;
4990 /* One register and immediate. */
4991 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4992 invert = (insn & (1 << 5)) != 0;
4993 switch (op) {
4994 case 0: case 1:
4995 /* no-op */
4996 break;
4997 case 2: case 3:
4998 imm <<= 8;
4999 break;
5000 case 4: case 5:
5001 imm <<= 16;
5002 break;
5003 case 6: case 7:
5004 imm <<= 24;
5005 break;
5006 case 8: case 9:
5007 imm |= imm << 16;
5008 break;
5009 case 10: case 11:
5010 imm = (imm << 8) | (imm << 24);
5011 break;
5012 case 12:
5013 imm = (imm << 8) | 0xff;
5014 break;
5015 case 13:
5016 imm = (imm << 16) | 0xffff;
5017 break;
5018 case 14:
5019 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5020 if (invert)
5021 imm = ~imm;
5022 break;
5023 case 15:
5024 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5025 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5026 break;
5027 }
5028 if (invert)
5029 imm = ~imm;
5030
5031 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5032 if (op & 1 && op < 12) {
5033 tmp = neon_load_reg(rd, pass);
5034 if (invert) {
5035 /* The immediate value has already been inverted, so
5036 BIC becomes AND. */
5037 tcg_gen_andi_i32(tmp, tmp, imm);
5038 } else {
5039 tcg_gen_ori_i32(tmp, tmp, imm);
5040 }
5041 } else {
5042 /* VMOV, VMVN. */
5043 tmp = tcg_temp_new_i32();
5044 if (op == 14 && invert) {
5045 uint32_t val;
5046 val = 0;
5047 for (n = 0; n < 4; n++) {
5048 if (imm & (1 << (n + (pass & 1) * 4)))
5049 val |= 0xff << (n * 8);
5050 }
5051 tcg_gen_movi_i32(tmp, val);
5052 } else {
5053 tcg_gen_movi_i32(tmp, imm);
5054 }
5055 }
5056 neon_store_reg(rd, pass, tmp);
5057 }
5058 }
5059 } else { /* (insn & 0x00800010 == 0x00800000) */
5060 if (size != 3) {
5061 op = (insn >> 8) & 0xf;
5062 if ((insn & (1 << 6)) == 0) {
5063 /* Three registers of different lengths. */
5064 int src1_wide;
5065 int src2_wide;
5066 int prewiden;
5067 /* prewiden, src1_wide, src2_wide */
5068 static const int neon_3reg_wide[16][3] = {
5069 {1, 0, 0}, /* VADDL */
5070 {1, 1, 0}, /* VADDW */
5071 {1, 0, 0}, /* VSUBL */
5072 {1, 1, 0}, /* VSUBW */
5073 {0, 1, 1}, /* VADDHN */
5074 {0, 0, 0}, /* VABAL */
5075 {0, 1, 1}, /* VSUBHN */
5076 {0, 0, 0}, /* VABDL */
5077 {0, 0, 0}, /* VMLAL */
5078 {0, 0, 0}, /* VQDMLAL */
5079 {0, 0, 0}, /* VMLSL */
5080 {0, 0, 0}, /* VQDMLSL */
5081 {0, 0, 0}, /* Integer VMULL */
5082 {0, 0, 0}, /* VQDMULL */
5083 {0, 0, 0} /* Polynomial VMULL */
5084 };
5085
5086 prewiden = neon_3reg_wide[op][0];
5087 src1_wide = neon_3reg_wide[op][1];
5088 src2_wide = neon_3reg_wide[op][2];
5089
5090 if (size == 0 && (op == 9 || op == 11 || op == 13))
5091 return 1;
5092
5093 /* Avoid overlapping operands. Wide source operands are
5094 always aligned so will never overlap with wide
5095 destinations in problematic ways. */
5096 if (rd == rm && !src2_wide) {
5097 tmp = neon_load_reg(rm, 1);
5098 neon_store_scratch(2, tmp);
5099 } else if (rd == rn && !src1_wide) {
5100 tmp = neon_load_reg(rn, 1);
5101 neon_store_scratch(2, tmp);
5102 }
5103 TCGV_UNUSED(tmp3);
5104 for (pass = 0; pass < 2; pass++) {
5105 if (src1_wide) {
5106 neon_load_reg64(cpu_V0, rn + pass);
5107 TCGV_UNUSED(tmp);
5108 } else {
5109 if (pass == 1 && rd == rn) {
5110 tmp = neon_load_scratch(2);
5111 } else {
5112 tmp = neon_load_reg(rn, pass);
5113 }
5114 if (prewiden) {
5115 gen_neon_widen(cpu_V0, tmp, size, u);
5116 }
5117 }
5118 if (src2_wide) {
5119 neon_load_reg64(cpu_V1, rm + pass);
5120 TCGV_UNUSED(tmp2);
5121 } else {
5122 if (pass == 1 && rd == rm) {
5123 tmp2 = neon_load_scratch(2);
5124 } else {
5125 tmp2 = neon_load_reg(rm, pass);
5126 }
5127 if (prewiden) {
5128 gen_neon_widen(cpu_V1, tmp2, size, u);
5129 }
5130 }
5131 switch (op) {
5132 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5133 gen_neon_addl(size);
5134 break;
5135 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5136 gen_neon_subl(size);
5137 break;
5138 case 5: case 7: /* VABAL, VABDL */
5139 switch ((size << 1) | u) {
5140 case 0:
5141 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5142 break;
5143 case 1:
5144 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5145 break;
5146 case 2:
5147 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5148 break;
5149 case 3:
5150 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5151 break;
5152 case 4:
5153 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5154 break;
5155 case 5:
5156 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5157 break;
5158 default: abort();
5159 }
5160 tcg_temp_free_i32(tmp2);
5161 tcg_temp_free_i32(tmp);
5162 break;
5163 case 8: case 9: case 10: case 11: case 12: case 13:
5164 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5165 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5166 break;
5167 case 14: /* Polynomial VMULL */
5168 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5169 tcg_temp_free_i32(tmp2);
5170 tcg_temp_free_i32(tmp);
5171 break;
5172 default: /* 15 is RESERVED. */
5173 return 1;
5174 }
5175 if (op == 13) {
5176 /* VQDMULL */
5177 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5178 neon_store_reg64(cpu_V0, rd + pass);
5179 } else if (op == 5 || (op >= 8 && op <= 11)) {
5180 /* Accumulate. */
5181 neon_load_reg64(cpu_V1, rd + pass);
5182 switch (op) {
5183 case 10: /* VMLSL */
5184 gen_neon_negl(cpu_V0, size);
5185 /* Fall through */
5186 case 5: case 8: /* VABAL, VMLAL */
5187 gen_neon_addl(size);
5188 break;
5189 case 9: case 11: /* VQDMLAL, VQDMLSL */
5190 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5191 if (op == 11) {
5192 gen_neon_negl(cpu_V0, size);
5193 }
5194 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5195 break;
5196 default:
5197 abort();
5198 }
5199 neon_store_reg64(cpu_V0, rd + pass);
5200 } else if (op == 4 || op == 6) {
5201 /* Narrowing operation. */
5202 tmp = tcg_temp_new_i32();
5203 if (!u) {
5204 switch (size) {
5205 case 0:
5206 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5207 break;
5208 case 1:
5209 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5210 break;
5211 case 2:
5212 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5213 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5214 break;
5215 default: abort();
5216 }
5217 } else {
5218 switch (size) {
5219 case 0:
5220 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5221 break;
5222 case 1:
5223 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5224 break;
5225 case 2:
5226 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5227 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5228 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5229 break;
5230 default: abort();
5231 }
5232 }
5233 if (pass == 0) {
5234 tmp3 = tmp;
5235 } else {
5236 neon_store_reg(rd, 0, tmp3);
5237 neon_store_reg(rd, 1, tmp);
5238 }
5239 } else {
5240 /* Write back the result. */
5241 neon_store_reg64(cpu_V0, rd + pass);
5242 }
5243 }
5244 } else {
5245 /* Two registers and a scalar. */
5246 switch (op) {
5247 case 0: /* Integer VMLA scalar */
5248 case 1: /* Float VMLA scalar */
5249 case 4: /* Integer VMLS scalar */
5250 case 5: /* Floating point VMLS scalar */
5251 case 8: /* Integer VMUL scalar */
5252 case 9: /* Floating point VMUL scalar */
5253 case 12: /* VQDMULH scalar */
5254 case 13: /* VQRDMULH scalar */
5255 tmp = neon_get_scalar(size, rm);
5256 neon_store_scratch(0, tmp);
5257 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5258 tmp = neon_load_scratch(0);
5259 tmp2 = neon_load_reg(rn, pass);
5260 if (op == 12) {
5261 if (size == 1) {
5262 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5263 } else {
5264 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5265 }
5266 } else if (op == 13) {
5267 if (size == 1) {
5268 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5269 } else {
5270 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5271 }
5272 } else if (op & 1) {
5273 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5274 } else {
5275 switch (size) {
5276 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5277 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5278 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5279 default: return 1;
5280 }
5281 }
5282 tcg_temp_free_i32(tmp2);
5283 if (op < 8) {
5284 /* Accumulate. */
5285 tmp2 = neon_load_reg(rd, pass);
5286 switch (op) {
5287 case 0:
5288 gen_neon_add(size, tmp, tmp2);
5289 break;
5290 case 1:
5291 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5292 break;
5293 case 4:
5294 gen_neon_rsb(size, tmp, tmp2);
5295 break;
5296 case 5:
5297 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5298 break;
5299 default:
5300 abort();
5301 }
5302 tcg_temp_free_i32(tmp2);
5303 }
5304 neon_store_reg(rd, pass, tmp);
5305 }
5306 break;
5307 case 2: /* VMLAL sclar */
5308 case 3: /* VQDMLAL scalar */
5309 case 6: /* VMLSL scalar */
5310 case 7: /* VQDMLSL scalar */
5311 case 10: /* VMULL scalar */
5312 case 11: /* VQDMULL scalar */
5313 if (size == 0 && (op == 3 || op == 7 || op == 11))
5314 return 1;
5315
5316 tmp2 = neon_get_scalar(size, rm);
5317 /* We need a copy of tmp2 because gen_neon_mull
5318 * deletes it during pass 0. */
5319 tmp4 = tcg_temp_new_i32();
5320 tcg_gen_mov_i32(tmp4, tmp2);
5321 tmp3 = neon_load_reg(rn, 1);
5322
5323 for (pass = 0; pass < 2; pass++) {
5324 if (pass == 0) {
5325 tmp = neon_load_reg(rn, 0);
5326 } else {
5327 tmp = tmp3;
5328 tmp2 = tmp4;
5329 }
5330 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5331 if (op != 11) {
5332 neon_load_reg64(cpu_V1, rd + pass);
5333 }
5334 switch (op) {
5335 case 6:
5336 gen_neon_negl(cpu_V0, size);
5337 /* Fall through */
5338 case 2:
5339 gen_neon_addl(size);
5340 break;
5341 case 3: case 7:
5342 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5343 if (op == 7) {
5344 gen_neon_negl(cpu_V0, size);
5345 }
5346 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5347 break;
5348 case 10:
5349 /* no-op */
5350 break;
5351 case 11:
5352 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5353 break;
5354 default:
5355 abort();
5356 }
5357 neon_store_reg64(cpu_V0, rd + pass);
5358 }
5359
5360
5361 break;
5362 default: /* 14 and 15 are RESERVED */
5363 return 1;
5364 }
5365 }
5366 } else { /* size == 3 */
5367 if (!u) {
5368 /* Extract. */
5369 imm = (insn >> 8) & 0xf;
5370
5371 if (imm > 7 && !q)
5372 return 1;
5373
5374 if (imm == 0) {
5375 neon_load_reg64(cpu_V0, rn);
5376 if (q) {
5377 neon_load_reg64(cpu_V1, rn + 1);
5378 }
5379 } else if (imm == 8) {
5380 neon_load_reg64(cpu_V0, rn + 1);
5381 if (q) {
5382 neon_load_reg64(cpu_V1, rm);
5383 }
5384 } else if (q) {
5385 tmp64 = tcg_temp_new_i64();
5386 if (imm < 8) {
5387 neon_load_reg64(cpu_V0, rn);
5388 neon_load_reg64(tmp64, rn + 1);
5389 } else {
5390 neon_load_reg64(cpu_V0, rn + 1);
5391 neon_load_reg64(tmp64, rm);
5392 }
5393 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5394 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5395 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5396 if (imm < 8) {
5397 neon_load_reg64(cpu_V1, rm);
5398 } else {
5399 neon_load_reg64(cpu_V1, rm + 1);
5400 imm -= 8;
5401 }
5402 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5403 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5404 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5405 tcg_temp_free_i64(tmp64);
5406 } else {
5407 /* BUGFIX */
5408 neon_load_reg64(cpu_V0, rn);
5409 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5410 neon_load_reg64(cpu_V1, rm);
5411 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5412 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5413 }
5414 neon_store_reg64(cpu_V0, rd);
5415 if (q) {
5416 neon_store_reg64(cpu_V1, rd + 1);
5417 }
5418 } else if ((insn & (1 << 11)) == 0) {
5419 /* Two register misc. */
5420 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5421 size = (insn >> 18) & 3;
5422 switch (op) {
5423 case 0: /* VREV64 */
5424 if (size == 3)
5425 return 1;
5426 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5427 tmp = neon_load_reg(rm, pass * 2);
5428 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5429 switch (size) {
5430 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5431 case 1: gen_swap_half(tmp); break;
5432 case 2: /* no-op */ break;
5433 default: abort();
5434 }
5435 neon_store_reg(rd, pass * 2 + 1, tmp);
5436 if (size == 2) {
5437 neon_store_reg(rd, pass * 2, tmp2);
5438 } else {
5439 switch (size) {
5440 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5441 case 1: gen_swap_half(tmp2); break;
5442 default: abort();
5443 }
5444 neon_store_reg(rd, pass * 2, tmp2);
5445 }
5446 }
5447 break;
5448 case 4: case 5: /* VPADDL */
5449 case 12: case 13: /* VPADAL */
5450 if (size == 3)
5451 return 1;
5452 for (pass = 0; pass < q + 1; pass++) {
5453 tmp = neon_load_reg(rm, pass * 2);
5454 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5455 tmp = neon_load_reg(rm, pass * 2 + 1);
5456 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5457 switch (size) {
5458 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5459 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5460 case 2: tcg_gen_add_i64(CPU_V001); break;
5461 default: abort();
5462 }
5463 if (op >= 12) {
5464 /* Accumulate. */
5465 neon_load_reg64(cpu_V1, rd + pass);
5466 gen_neon_addl(size);
5467 }
5468 neon_store_reg64(cpu_V0, rd + pass);
5469 }
5470 break;
5471 case 33: /* VTRN */
5472 if (size == 2) {
5473 for (n = 0; n < (q ? 4 : 2); n += 2) {
5474 tmp = neon_load_reg(rm, n);
5475 tmp2 = neon_load_reg(rd, n + 1);
5476 neon_store_reg(rm, n, tmp2);
5477 neon_store_reg(rd, n + 1, tmp);
5478 }
5479 } else {
5480 goto elementwise;
5481 }
5482 break;
5483 case 34: /* VUZP */
5484 if (gen_neon_unzip(rd, rm, size, q)) {
5485 return 1;
5486 }
5487 break;
5488 case 35: /* VZIP */
5489 if (gen_neon_zip(rd, rm, size, q)) {
5490 return 1;
5491 }
5492 break;
5493 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5494 if (size == 3)
5495 return 1;
5496 TCGV_UNUSED(tmp2);
5497 for (pass = 0; pass < 2; pass++) {
5498 neon_load_reg64(cpu_V0, rm + pass);
5499 tmp = tcg_temp_new_i32();
5500 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5501 if (pass == 0) {
5502 tmp2 = tmp;
5503 } else {
5504 neon_store_reg(rd, 0, tmp2);
5505 neon_store_reg(rd, 1, tmp);
5506 }
5507 }
5508 break;
5509 case 38: /* VSHLL */
5510 if (q || size == 3)
5511 return 1;
5512 tmp = neon_load_reg(rm, 0);
5513 tmp2 = neon_load_reg(rm, 1);
5514 for (pass = 0; pass < 2; pass++) {
5515 if (pass == 1)
5516 tmp = tmp2;
5517 gen_neon_widen(cpu_V0, tmp, size, 1);
5518 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5519 neon_store_reg64(cpu_V0, rd + pass);
5520 }
5521 break;
5522 case 44: /* VCVT.F16.F32 */
5523 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5524 return 1;
5525 tmp = tcg_temp_new_i32();
5526 tmp2 = tcg_temp_new_i32();
5527 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5528 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5529 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5530 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5531 tcg_gen_shli_i32(tmp2, tmp2, 16);
5532 tcg_gen_or_i32(tmp2, tmp2, tmp);
5533 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5534 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5535 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5536 neon_store_reg(rd, 0, tmp2);
5537 tmp2 = tcg_temp_new_i32();
5538 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5539 tcg_gen_shli_i32(tmp2, tmp2, 16);
5540 tcg_gen_or_i32(tmp2, tmp2, tmp);
5541 neon_store_reg(rd, 1, tmp2);
5542 tcg_temp_free_i32(tmp);
5543 break;
5544 case 46: /* VCVT.F32.F16 */
5545 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5546 return 1;
5547 tmp3 = tcg_temp_new_i32();
5548 tmp = neon_load_reg(rm, 0);
5549 tmp2 = neon_load_reg(rm, 1);
5550 tcg_gen_ext16u_i32(tmp3, tmp);
5551 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5552 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5553 tcg_gen_shri_i32(tmp3, tmp, 16);
5554 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5555 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5556 tcg_temp_free_i32(tmp);
5557 tcg_gen_ext16u_i32(tmp3, tmp2);
5558 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5559 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5560 tcg_gen_shri_i32(tmp3, tmp2, 16);
5561 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5562 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5563 tcg_temp_free_i32(tmp2);
5564 tcg_temp_free_i32(tmp3);
5565 break;
5566 default:
5567 elementwise:
5568 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5569 if (op == 30 || op == 31 || op >= 58) {
5570 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5571 neon_reg_offset(rm, pass));
5572 TCGV_UNUSED(tmp);
5573 } else {
5574 tmp = neon_load_reg(rm, pass);
5575 }
5576 switch (op) {
5577 case 1: /* VREV32 */
5578 switch (size) {
5579 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5580 case 1: gen_swap_half(tmp); break;
5581 default: return 1;
5582 }
5583 break;
5584 case 2: /* VREV16 */
5585 if (size != 0)
5586 return 1;
5587 gen_rev16(tmp);
5588 break;
5589 case 8: /* CLS */
5590 switch (size) {
5591 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5592 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5593 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5594 default: return 1;
5595 }
5596 break;
5597 case 9: /* CLZ */
5598 switch (size) {
5599 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5600 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5601 case 2: gen_helper_clz(tmp, tmp); break;
5602 default: return 1;
5603 }
5604 break;
5605 case 10: /* CNT */
5606 if (size != 0)
5607 return 1;
5608 gen_helper_neon_cnt_u8(tmp, tmp);
5609 break;
5610 case 11: /* VNOT */
5611 if (size != 0)
5612 return 1;
5613 tcg_gen_not_i32(tmp, tmp);
5614 break;
5615 case 14: /* VQABS */
5616 switch (size) {
5617 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5618 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5619 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5620 default: return 1;
5621 }
5622 break;
5623 case 15: /* VQNEG */
5624 switch (size) {
5625 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5626 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5627 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5628 default: return 1;
5629 }
5630 break;
5631 case 16: case 19: /* VCGT #0, VCLE #0 */
5632 tmp2 = tcg_const_i32(0);
5633 switch(size) {
5634 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5635 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5636 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5637 default: return 1;
5638 }
5639 tcg_temp_free(tmp2);
5640 if (op == 19)
5641 tcg_gen_not_i32(tmp, tmp);
5642 break;
5643 case 17: case 20: /* VCGE #0, VCLT #0 */
5644 tmp2 = tcg_const_i32(0);
5645 switch(size) {
5646 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5647 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5648 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5649 default: return 1;
5650 }
5651 tcg_temp_free(tmp2);
5652 if (op == 20)
5653 tcg_gen_not_i32(tmp, tmp);
5654 break;
5655 case 18: /* VCEQ #0 */
5656 tmp2 = tcg_const_i32(0);
5657 switch(size) {
5658 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5659 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5660 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5661 default: return 1;
5662 }
5663 tcg_temp_free(tmp2);
5664 break;
5665 case 22: /* VABS */
5666 switch(size) {
5667 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5668 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5669 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5670 default: return 1;
5671 }
5672 break;
5673 case 23: /* VNEG */
5674 if (size == 3)
5675 return 1;
5676 tmp2 = tcg_const_i32(0);
5677 gen_neon_rsb(size, tmp, tmp2);
5678 tcg_temp_free(tmp2);
5679 break;
5680 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5681 tmp2 = tcg_const_i32(0);
5682 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5683 tcg_temp_free(tmp2);
5684 if (op == 27)
5685 tcg_gen_not_i32(tmp, tmp);
5686 break;
5687 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5688 tmp2 = tcg_const_i32(0);
5689 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5690 tcg_temp_free(tmp2);
5691 if (op == 28)
5692 tcg_gen_not_i32(tmp, tmp);
5693 break;
5694 case 26: /* Float VCEQ #0 */
5695 tmp2 = tcg_const_i32(0);
5696 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5697 tcg_temp_free(tmp2);
5698 break;
5699 case 30: /* Float VABS */
5700 gen_vfp_abs(0);
5701 break;
5702 case 31: /* Float VNEG */
5703 gen_vfp_neg(0);
5704 break;
5705 case 32: /* VSWP */
5706 tmp2 = neon_load_reg(rd, pass);
5707 neon_store_reg(rm, pass, tmp2);
5708 break;
5709 case 33: /* VTRN */
5710 tmp2 = neon_load_reg(rd, pass);
5711 switch (size) {
5712 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5713 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5714 case 2: abort();
5715 default: return 1;
5716 }
5717 neon_store_reg(rm, pass, tmp2);
5718 break;
5719 case 56: /* Integer VRECPE */
5720 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5721 break;
5722 case 57: /* Integer VRSQRTE */
5723 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5724 break;
5725 case 58: /* Float VRECPE */
5726 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5727 break;
5728 case 59: /* Float VRSQRTE */
5729 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5730 break;
5731 case 60: /* VCVT.F32.S32 */
5732 gen_vfp_sito(0);
5733 break;
5734 case 61: /* VCVT.F32.U32 */
5735 gen_vfp_uito(0);
5736 break;
5737 case 62: /* VCVT.S32.F32 */
5738 gen_vfp_tosiz(0);
5739 break;
5740 case 63: /* VCVT.U32.F32 */
5741 gen_vfp_touiz(0);
5742 break;
5743 default:
5744 /* Reserved: 21, 29, 39-56 */
5745 return 1;
5746 }
5747 if (op == 30 || op == 31 || op >= 58) {
5748 tcg_gen_st_f32(cpu_F0s, cpu_env,
5749 neon_reg_offset(rd, pass));
5750 } else {
5751 neon_store_reg(rd, pass, tmp);
5752 }
5753 }
5754 break;
5755 }
5756 } else if ((insn & (1 << 10)) == 0) {
5757 /* VTBL, VTBX. */
5758 n = ((insn >> 5) & 0x18) + 8;
5759 if (insn & (1 << 6)) {
5760 tmp = neon_load_reg(rd, 0);
5761 } else {
5762 tmp = tcg_temp_new_i32();
5763 tcg_gen_movi_i32(tmp, 0);
5764 }
5765 tmp2 = neon_load_reg(rm, 0);
5766 tmp4 = tcg_const_i32(rn);
5767 tmp5 = tcg_const_i32(n);
5768 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5769 tcg_temp_free_i32(tmp);
5770 if (insn & (1 << 6)) {
5771 tmp = neon_load_reg(rd, 1);
5772 } else {
5773 tmp = tcg_temp_new_i32();
5774 tcg_gen_movi_i32(tmp, 0);
5775 }
5776 tmp3 = neon_load_reg(rm, 1);
5777 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5778 tcg_temp_free_i32(tmp5);
5779 tcg_temp_free_i32(tmp4);
5780 neon_store_reg(rd, 0, tmp2);
5781 neon_store_reg(rd, 1, tmp3);
5782 tcg_temp_free_i32(tmp);
5783 } else if ((insn & 0x380) == 0) {
5784 /* VDUP */
5785 if (insn & (1 << 19)) {
5786 tmp = neon_load_reg(rm, 1);
5787 } else {
5788 tmp = neon_load_reg(rm, 0);
5789 }
5790 if (insn & (1 << 16)) {
5791 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5792 } else if (insn & (1 << 17)) {
5793 if ((insn >> 18) & 1)
5794 gen_neon_dup_high16(tmp);
5795 else
5796 gen_neon_dup_low16(tmp);
5797 }
5798 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5799 tmp2 = tcg_temp_new_i32();
5800 tcg_gen_mov_i32(tmp2, tmp);
5801 neon_store_reg(rd, pass, tmp2);
5802 }
5803 tcg_temp_free_i32(tmp);
5804 } else {
5805 return 1;
5806 }
5807 }
5808 }
5809 return 0;
5810 }
5811
5812 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5813 {
5814 int crn = (insn >> 16) & 0xf;
5815 int crm = insn & 0xf;
5816 int op1 = (insn >> 21) & 7;
5817 int op2 = (insn >> 5) & 7;
5818 int rt = (insn >> 12) & 0xf;
5819 TCGv tmp;
5820
5821 /* Minimal set of debug registers, since we don't support debug */
5822 if (op1 == 0 && crn == 0 && op2 == 0) {
5823 switch (crm) {
5824 case 0:
5825 /* DBGDIDR: just RAZ. In particular this means the
5826 * "debug architecture version" bits will read as
5827 * a reserved value, which should cause Linux to
5828 * not try to use the debug hardware.
5829 */
5830 tmp = tcg_const_i32(0);
5831 store_reg(s, rt, tmp);
5832 return 0;
5833 case 1:
5834 case 2:
5835 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5836 * don't implement memory mapped debug components
5837 */
5838 if (ENABLE_ARCH_7) {
5839 tmp = tcg_const_i32(0);
5840 store_reg(s, rt, tmp);
5841 return 0;
5842 }
5843 break;
5844 default:
5845 break;
5846 }
5847 }
5848
5849 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5850 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5851 /* TEECR */
5852 if (IS_USER(s))
5853 return 1;
5854 tmp = load_cpu_field(teecr);
5855 store_reg(s, rt, tmp);
5856 return 0;
5857 }
5858 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5859 /* TEEHBR */
5860 if (IS_USER(s) && (env->teecr & 1))
5861 return 1;
5862 tmp = load_cpu_field(teehbr);
5863 store_reg(s, rt, tmp);
5864 return 0;
5865 }
5866 }
5867 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5868 op1, crn, crm, op2);
5869 return 1;
5870 }
5871
5872 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5873 {
5874 int crn = (insn >> 16) & 0xf;
5875 int crm = insn & 0xf;
5876 int op1 = (insn >> 21) & 7;
5877 int op2 = (insn >> 5) & 7;
5878 int rt = (insn >> 12) & 0xf;
5879 TCGv tmp;
5880
5881 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5882 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5883 /* TEECR */
5884 if (IS_USER(s))
5885 return 1;
5886 tmp = load_reg(s, rt);
5887 gen_helper_set_teecr(cpu_env, tmp);
5888 tcg_temp_free_i32(tmp);
5889 return 0;
5890 }
5891 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5892 /* TEEHBR */
5893 if (IS_USER(s) && (env->teecr & 1))
5894 return 1;
5895 tmp = load_reg(s, rt);
5896 store_cpu_field(tmp, teehbr);
5897 return 0;
5898 }
5899 }
5900 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5901 op1, crn, crm, op2);
5902 return 1;
5903 }
5904
5905 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5906 {
5907 int cpnum;
5908
5909 cpnum = (insn >> 8) & 0xf;
5910 if (arm_feature(env, ARM_FEATURE_XSCALE)
5911 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5912 return 1;
5913
5914 switch (cpnum) {
5915 case 0:
5916 case 1:
5917 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5918 return disas_iwmmxt_insn(env, s, insn);
5919 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5920 return disas_dsp_insn(env, s, insn);
5921 }
5922 return 1;
5923 case 10:
5924 case 11:
5925 return disas_vfp_insn (env, s, insn);
5926 case 14:
5927 /* Coprocessors 7-15 are architecturally reserved by ARM.
5928 Unfortunately Intel decided to ignore this. */
5929 if (arm_feature(env, ARM_FEATURE_XSCALE))
5930 goto board;
5931 if (insn & (1 << 20))
5932 return disas_cp14_read(env, s, insn);
5933 else
5934 return disas_cp14_write(env, s, insn);
5935 case 15:
5936 return disas_cp15_insn (env, s, insn);
5937 default:
5938 board:
5939 /* Unknown coprocessor. See if the board has hooked it. */
5940 return disas_cp_insn (env, s, insn);
5941 }
5942 }
5943
5944
5945 /* Store a 64-bit value to a register pair. Clobbers val. */
5946 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5947 {
5948 TCGv tmp;
5949 tmp = tcg_temp_new_i32();
5950 tcg_gen_trunc_i64_i32(tmp, val);
5951 store_reg(s, rlow, tmp);
5952 tmp = tcg_temp_new_i32();
5953 tcg_gen_shri_i64(val, val, 32);
5954 tcg_gen_trunc_i64_i32(tmp, val);
5955 store_reg(s, rhigh, tmp);
5956 }
5957
5958 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5959 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5960 {
5961 TCGv_i64 tmp;
5962 TCGv tmp2;
5963
5964 /* Load value and extend to 64 bits. */
5965 tmp = tcg_temp_new_i64();
5966 tmp2 = load_reg(s, rlow);
5967 tcg_gen_extu_i32_i64(tmp, tmp2);
5968 tcg_temp_free_i32(tmp2);
5969 tcg_gen_add_i64(val, val, tmp);
5970 tcg_temp_free_i64(tmp);
5971 }
5972
5973 /* load and add a 64-bit value from a register pair. */
5974 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5975 {
5976 TCGv_i64 tmp;
5977 TCGv tmpl;
5978 TCGv tmph;
5979
5980 /* Load 64-bit value rd:rn. */
5981 tmpl = load_reg(s, rlow);
5982 tmph = load_reg(s, rhigh);
5983 tmp = tcg_temp_new_i64();
5984 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5985 tcg_temp_free_i32(tmpl);
5986 tcg_temp_free_i32(tmph);
5987 tcg_gen_add_i64(val, val, tmp);
5988 tcg_temp_free_i64(tmp);
5989 }
5990
5991 /* Set N and Z flags from a 64-bit value. */
5992 static void gen_logicq_cc(TCGv_i64 val)
5993 {
5994 TCGv tmp = tcg_temp_new_i32();
5995 gen_helper_logicq_cc(tmp, val);
5996 gen_logic_CC(tmp);
5997 tcg_temp_free_i32(tmp);
5998 }
5999
6000 /* Load/Store exclusive instructions are implemented by remembering
6001 the value/address loaded, and seeing if these are the same
6002 when the store is performed. This should be is sufficient to implement
6003 the architecturally mandated semantics, and avoids having to monitor
6004 regular stores.
6005
6006 In system emulation mode only one CPU will be running at once, so
6007 this sequence is effectively atomic. In user emulation mode we
6008 throw an exception and handle the atomic operation elsewhere. */
6009 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6010 TCGv addr, int size)
6011 {
6012 TCGv tmp;
6013
6014 switch (size) {
6015 case 0:
6016 tmp = gen_ld8u(addr, IS_USER(s));
6017 break;
6018 case 1:
6019 tmp = gen_ld16u(addr, IS_USER(s));
6020 break;
6021 case 2:
6022 case 3:
6023 tmp = gen_ld32(addr, IS_USER(s));
6024 break;
6025 default:
6026 abort();
6027 }
6028 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6029 store_reg(s, rt, tmp);
6030 if (size == 3) {
6031 TCGv tmp2 = tcg_temp_new_i32();
6032 tcg_gen_addi_i32(tmp2, addr, 4);
6033 tmp = gen_ld32(tmp2, IS_USER(s));
6034 tcg_temp_free_i32(tmp2);
6035 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6036 store_reg(s, rt2, tmp);
6037 }
6038 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6039 }
6040
6041 static void gen_clrex(DisasContext *s)
6042 {
6043 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6044 }
6045
6046 #ifdef CONFIG_USER_ONLY
6047 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6048 TCGv addr, int size)
6049 {
6050 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6051 tcg_gen_movi_i32(cpu_exclusive_info,
6052 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6053 gen_exception_insn(s, 4, EXCP_STREX);
6054 }
6055 #else
6056 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6057 TCGv addr, int size)
6058 {
6059 TCGv tmp;
6060 int done_label;
6061 int fail_label;
6062
6063 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6064 [addr] = {Rt};
6065 {Rd} = 0;
6066 } else {
6067 {Rd} = 1;
6068 } */
6069 fail_label = gen_new_label();
6070 done_label = gen_new_label();
6071 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6072 switch (size) {
6073 case 0:
6074 tmp = gen_ld8u(addr, IS_USER(s));
6075 break;
6076 case 1:
6077 tmp = gen_ld16u(addr, IS_USER(s));
6078 break;
6079 case 2:
6080 case 3:
6081 tmp = gen_ld32(addr, IS_USER(s));
6082 break;
6083 default:
6084 abort();
6085 }
6086 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6087 tcg_temp_free_i32(tmp);
6088 if (size == 3) {
6089 TCGv tmp2 = tcg_temp_new_i32();
6090 tcg_gen_addi_i32(tmp2, addr, 4);
6091 tmp = gen_ld32(tmp2, IS_USER(s));
6092 tcg_temp_free_i32(tmp2);
6093 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6094 tcg_temp_free_i32(tmp);
6095 }
6096 tmp = load_reg(s, rt);
6097 switch (size) {
6098 case 0:
6099 gen_st8(tmp, addr, IS_USER(s));
6100 break;
6101 case 1:
6102 gen_st16(tmp, addr, IS_USER(s));
6103 break;
6104 case 2:
6105 case 3:
6106 gen_st32(tmp, addr, IS_USER(s));
6107 break;
6108 default:
6109 abort();
6110 }
6111 if (size == 3) {
6112 tcg_gen_addi_i32(addr, addr, 4);
6113 tmp = load_reg(s, rt2);
6114 gen_st32(tmp, addr, IS_USER(s));
6115 }
6116 tcg_gen_movi_i32(cpu_R[rd], 0);
6117 tcg_gen_br(done_label);
6118 gen_set_label(fail_label);
6119 tcg_gen_movi_i32(cpu_R[rd], 1);
6120 gen_set_label(done_label);
6121 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6122 }
6123 #endif
6124
6125 static void disas_arm_insn(CPUState * env, DisasContext *s)
6126 {
6127 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6128 TCGv tmp;
6129 TCGv tmp2;
6130 TCGv tmp3;
6131 TCGv addr;
6132 TCGv_i64 tmp64;
6133
6134 insn = ldl_code(s->pc);
6135 s->pc += 4;
6136
6137 /* M variants do not implement ARM mode. */
6138 if (IS_M(env))
6139 goto illegal_op;
6140 cond = insn >> 28;
6141 if (cond == 0xf){
6142 /* Unconditional instructions. */
6143 if (((insn >> 25) & 7) == 1) {
6144 /* NEON Data processing. */
6145 if (!arm_feature(env, ARM_FEATURE_NEON))
6146 goto illegal_op;
6147
6148 if (disas_neon_data_insn(env, s, insn))
6149 goto illegal_op;
6150 return;
6151 }
6152 if ((insn & 0x0f100000) == 0x04000000) {
6153 /* NEON load/store. */
6154 if (!arm_feature(env, ARM_FEATURE_NEON))
6155 goto illegal_op;
6156
6157 if (disas_neon_ls_insn(env, s, insn))
6158 goto illegal_op;
6159 return;
6160 }
6161 if (((insn & 0x0f30f000) == 0x0510f000) ||
6162 ((insn & 0x0f30f010) == 0x0710f000)) {
6163 if ((insn & (1 << 22)) == 0) {
6164 /* PLDW; v7MP */
6165 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6166 goto illegal_op;
6167 }
6168 }
6169 /* Otherwise PLD; v5TE+ */
6170 return;
6171 }
6172 if (((insn & 0x0f70f000) == 0x0450f000) ||
6173 ((insn & 0x0f70f010) == 0x0650f000)) {
6174 ARCH(7);
6175 return; /* PLI; V7 */
6176 }
6177 if (((insn & 0x0f700000) == 0x04100000) ||
6178 ((insn & 0x0f700010) == 0x06100000)) {
6179 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6180 goto illegal_op;
6181 }
6182 return; /* v7MP: Unallocated memory hint: must NOP */
6183 }
6184
6185 if ((insn & 0x0ffffdff) == 0x01010000) {
6186 ARCH(6);
6187 /* setend */
6188 if (insn & (1 << 9)) {
6189 /* BE8 mode not implemented. */
6190 goto illegal_op;
6191 }
6192 return;
6193 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6194 switch ((insn >> 4) & 0xf) {
6195 case 1: /* clrex */
6196 ARCH(6K);
6197 gen_clrex(s);
6198 return;
6199 case 4: /* dsb */
6200 case 5: /* dmb */
6201 case 6: /* isb */
6202 ARCH(7);
6203 /* We don't emulate caches so these are a no-op. */
6204 return;
6205 default:
6206 goto illegal_op;
6207 }
6208 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6209 /* srs */
6210 int32_t offset;
6211 if (IS_USER(s))
6212 goto illegal_op;
6213 ARCH(6);
6214 op1 = (insn & 0x1f);
6215 addr = tcg_temp_new_i32();
6216 tmp = tcg_const_i32(op1);
6217 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6218 tcg_temp_free_i32(tmp);
6219 i = (insn >> 23) & 3;
6220 switch (i) {
6221 case 0: offset = -4; break; /* DA */
6222 case 1: offset = 0; break; /* IA */
6223 case 2: offset = -8; break; /* DB */
6224 case 3: offset = 4; break; /* IB */
6225 default: abort();
6226 }
6227 if (offset)
6228 tcg_gen_addi_i32(addr, addr, offset);
6229 tmp = load_reg(s, 14);
6230 gen_st32(tmp, addr, 0);
6231 tmp = load_cpu_field(spsr);
6232 tcg_gen_addi_i32(addr, addr, 4);
6233 gen_st32(tmp, addr, 0);
6234 if (insn & (1 << 21)) {
6235 /* Base writeback. */
6236 switch (i) {
6237 case 0: offset = -8; break;
6238 case 1: offset = 4; break;
6239 case 2: offset = -4; break;
6240 case 3: offset = 0; break;
6241 default: abort();
6242 }
6243 if (offset)
6244 tcg_gen_addi_i32(addr, addr, offset);
6245 tmp = tcg_const_i32(op1);
6246 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6247 tcg_temp_free_i32(tmp);
6248 tcg_temp_free_i32(addr);
6249 } else {
6250 tcg_temp_free_i32(addr);
6251 }
6252 return;
6253 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6254 /* rfe */
6255 int32_t offset;
6256 if (IS_USER(s))
6257 goto illegal_op;
6258 ARCH(6);
6259 rn = (insn >> 16) & 0xf;
6260 addr = load_reg(s, rn);
6261 i = (insn >> 23) & 3;
6262 switch (i) {
6263 case 0: offset = -4; break; /* DA */
6264 case 1: offset = 0; break; /* IA */
6265 case 2: offset = -8; break; /* DB */
6266 case 3: offset = 4; break; /* IB */
6267 default: abort();
6268 }
6269 if (offset)
6270 tcg_gen_addi_i32(addr, addr, offset);
6271 /* Load PC into tmp and CPSR into tmp2. */
6272 tmp = gen_ld32(addr, 0);
6273 tcg_gen_addi_i32(addr, addr, 4);
6274 tmp2 = gen_ld32(addr, 0);
6275 if (insn & (1 << 21)) {
6276 /* Base writeback. */
6277 switch (i) {
6278 case 0: offset = -8; break;
6279 case 1: offset = 4; break;
6280 case 2: offset = -4; break;
6281 case 3: offset = 0; break;
6282 default: abort();
6283 }
6284 if (offset)
6285 tcg_gen_addi_i32(addr, addr, offset);
6286 store_reg(s, rn, addr);
6287 } else {
6288 tcg_temp_free_i32(addr);
6289 }
6290 gen_rfe(s, tmp, tmp2);
6291 return;
6292 } else if ((insn & 0x0e000000) == 0x0a000000) {
6293 /* branch link and change to thumb (blx <offset>) */
6294 int32_t offset;
6295
6296 val = (uint32_t)s->pc;
6297 tmp = tcg_temp_new_i32();
6298 tcg_gen_movi_i32(tmp, val);
6299 store_reg(s, 14, tmp);
6300 /* Sign-extend the 24-bit offset */
6301 offset = (((int32_t)insn) << 8) >> 8;
6302 /* offset * 4 + bit24 * 2 + (thumb bit) */
6303 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6304 /* pipeline offset */
6305 val += 4;
6306 gen_bx_im(s, val);
6307 return;
6308 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6309 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6310 /* iWMMXt register transfer. */
6311 if (env->cp15.c15_cpar & (1 << 1))
6312 if (!disas_iwmmxt_insn(env, s, insn))
6313 return;
6314 }
6315 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6316 /* Coprocessor double register transfer. */
6317 } else if ((insn & 0x0f000010) == 0x0e000010) {
6318 /* Additional coprocessor register transfer. */
6319 } else if ((insn & 0x0ff10020) == 0x01000000) {
6320 uint32_t mask;
6321 uint32_t val;
6322 /* cps (privileged) */
6323 if (IS_USER(s))
6324 return;
6325 mask = val = 0;
6326 if (insn & (1 << 19)) {
6327 if (insn & (1 << 8))
6328 mask |= CPSR_A;
6329 if (insn & (1 << 7))
6330 mask |= CPSR_I;
6331 if (insn & (1 << 6))
6332 mask |= CPSR_F;
6333 if (insn & (1 << 18))
6334 val |= mask;
6335 }
6336 if (insn & (1 << 17)) {
6337 mask |= CPSR_M;
6338 val |= (insn & 0x1f);
6339 }
6340 if (mask) {
6341 gen_set_psr_im(s, mask, 0, val);
6342 }
6343 return;
6344 }
6345 goto illegal_op;
6346 }
6347 if (cond != 0xe) {
6348 /* if not always execute, we generate a conditional jump to
6349 next instruction */
6350 s->condlabel = gen_new_label();
6351 gen_test_cc(cond ^ 1, s->condlabel);
6352 s->condjmp = 1;
6353 }
6354 if ((insn & 0x0f900000) == 0x03000000) {
6355 if ((insn & (1 << 21)) == 0) {
6356 ARCH(6T2);
6357 rd = (insn >> 12) & 0xf;
6358 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6359 if ((insn & (1 << 22)) == 0) {
6360 /* MOVW */
6361 tmp = tcg_temp_new_i32();
6362 tcg_gen_movi_i32(tmp, val);
6363 } else {
6364 /* MOVT */
6365 tmp = load_reg(s, rd);
6366 tcg_gen_ext16u_i32(tmp, tmp);
6367 tcg_gen_ori_i32(tmp, tmp, val << 16);
6368 }
6369 store_reg(s, rd, tmp);
6370 } else {
6371 if (((insn >> 12) & 0xf) != 0xf)
6372 goto illegal_op;
6373 if (((insn >> 16) & 0xf) == 0) {
6374 gen_nop_hint(s, insn & 0xff);
6375 } else {
6376 /* CPSR = immediate */
6377 val = insn & 0xff;
6378 shift = ((insn >> 8) & 0xf) * 2;
6379 if (shift)
6380 val = (val >> shift) | (val << (32 - shift));
6381 i = ((insn & (1 << 22)) != 0);
6382 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6383 goto illegal_op;
6384 }
6385 }
6386 } else if ((insn & 0x0f900000) == 0x01000000
6387 && (insn & 0x00000090) != 0x00000090) {
6388 /* miscellaneous instructions */
6389 op1 = (insn >> 21) & 3;
6390 sh = (insn >> 4) & 0xf;
6391 rm = insn & 0xf;
6392 switch (sh) {
6393 case 0x0: /* move program status register */
6394 if (op1 & 1) {
6395 /* PSR = reg */
6396 tmp = load_reg(s, rm);
6397 i = ((op1 & 2) != 0);
6398 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6399 goto illegal_op;
6400 } else {
6401 /* reg = PSR */
6402 rd = (insn >> 12) & 0xf;
6403 if (op1 & 2) {
6404 if (IS_USER(s))
6405 goto illegal_op;
6406 tmp = load_cpu_field(spsr);
6407 } else {
6408 tmp = tcg_temp_new_i32();
6409 gen_helper_cpsr_read(tmp);
6410 }
6411 store_reg(s, rd, tmp);
6412 }
6413 break;
6414 case 0x1:
6415 if (op1 == 1) {
6416 /* branch/exchange thumb (bx). */
6417 tmp = load_reg(s, rm);
6418 gen_bx(s, tmp);
6419 } else if (op1 == 3) {
6420 /* clz */
6421 rd = (insn >> 12) & 0xf;
6422 tmp = load_reg(s, rm);
6423 gen_helper_clz(tmp, tmp);
6424 store_reg(s, rd, tmp);
6425 } else {
6426 goto illegal_op;
6427 }
6428 break;
6429 case 0x2:
6430 if (op1 == 1) {
6431 ARCH(5J); /* bxj */
6432 /* Trivial implementation equivalent to bx. */
6433 tmp = load_reg(s, rm);
6434 gen_bx(s, tmp);
6435 } else {
6436 goto illegal_op;
6437 }
6438 break;
6439 case 0x3:
6440 if (op1 != 1)
6441 goto illegal_op;
6442
6443 /* branch link/exchange thumb (blx) */
6444 tmp = load_reg(s, rm);
6445 tmp2 = tcg_temp_new_i32();
6446 tcg_gen_movi_i32(tmp2, s->pc);
6447 store_reg(s, 14, tmp2);
6448 gen_bx(s, tmp);
6449 break;
6450 case 0x5: /* saturating add/subtract */
6451 rd = (insn >> 12) & 0xf;
6452 rn = (insn >> 16) & 0xf;
6453 tmp = load_reg(s, rm);
6454 tmp2 = load_reg(s, rn);
6455 if (op1 & 2)
6456 gen_helper_double_saturate(tmp2, tmp2);
6457 if (op1 & 1)
6458 gen_helper_sub_saturate(tmp, tmp, tmp2);
6459 else
6460 gen_helper_add_saturate(tmp, tmp, tmp2);
6461 tcg_temp_free_i32(tmp2);
6462 store_reg(s, rd, tmp);
6463 break;
6464 case 7:
6465 /* SMC instruction (op1 == 3)
6466 and undefined instructions (op1 == 0 || op1 == 2)
6467 will trap */
6468 if (op1 != 1) {
6469 goto illegal_op;
6470 }
6471 /* bkpt */
6472 gen_exception_insn(s, 4, EXCP_BKPT);
6473 break;
6474 case 0x8: /* signed multiply */
6475 case 0xa:
6476 case 0xc:
6477 case 0xe:
6478 rs = (insn >> 8) & 0xf;
6479 rn = (insn >> 12) & 0xf;
6480 rd = (insn >> 16) & 0xf;
6481 if (op1 == 1) {
6482 /* (32 * 16) >> 16 */
6483 tmp = load_reg(s, rm);
6484 tmp2 = load_reg(s, rs);
6485 if (sh & 4)
6486 tcg_gen_sari_i32(tmp2, tmp2, 16);
6487 else
6488 gen_sxth(tmp2);
6489 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6490 tcg_gen_shri_i64(tmp64, tmp64, 16);
6491 tmp = tcg_temp_new_i32();
6492 tcg_gen_trunc_i64_i32(tmp, tmp64);
6493 tcg_temp_free_i64(tmp64);
6494 if ((sh & 2) == 0) {
6495 tmp2 = load_reg(s, rn);
6496 gen_helper_add_setq(tmp, tmp, tmp2);
6497 tcg_temp_free_i32(tmp2);
6498 }
6499 store_reg(s, rd, tmp);
6500 } else {
6501 /* 16 * 16 */
6502 tmp = load_reg(s, rm);
6503 tmp2 = load_reg(s, rs);
6504 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6505 tcg_temp_free_i32(tmp2);
6506 if (op1 == 2) {
6507 tmp64 = tcg_temp_new_i64();
6508 tcg_gen_ext_i32_i64(tmp64, tmp);
6509 tcg_temp_free_i32(tmp);
6510 gen_addq(s, tmp64, rn, rd);
6511 gen_storeq_reg(s, rn, rd, tmp64);
6512 tcg_temp_free_i64(tmp64);
6513 } else {
6514 if (op1 == 0) {
6515 tmp2 = load_reg(s, rn);
6516 gen_helper_add_setq(tmp, tmp, tmp2);
6517 tcg_temp_free_i32(tmp2);
6518 }
6519 store_reg(s, rd, tmp);
6520 }
6521 }
6522 break;
6523 default:
6524 goto illegal_op;
6525 }
6526 } else if (((insn & 0x0e000000) == 0 &&
6527 (insn & 0x00000090) != 0x90) ||
6528 ((insn & 0x0e000000) == (1 << 25))) {
6529 int set_cc, logic_cc, shiftop;
6530
6531 op1 = (insn >> 21) & 0xf;
6532 set_cc = (insn >> 20) & 1;
6533 logic_cc = table_logic_cc[op1] & set_cc;
6534
6535 /* data processing instruction */
6536 if (insn & (1 << 25)) {
6537 /* immediate operand */
6538 val = insn & 0xff;
6539 shift = ((insn >> 8) & 0xf) * 2;
6540 if (shift) {
6541 val = (val >> shift) | (val << (32 - shift));
6542 }
6543 tmp2 = tcg_temp_new_i32();
6544 tcg_gen_movi_i32(tmp2, val);
6545 if (logic_cc && shift) {
6546 gen_set_CF_bit31(tmp2);
6547 }
6548 } else {
6549 /* register */
6550 rm = (insn) & 0xf;
6551 tmp2 = load_reg(s, rm);
6552 shiftop = (insn >> 5) & 3;
6553 if (!(insn & (1 << 4))) {
6554 shift = (insn >> 7) & 0x1f;
6555 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6556 } else {
6557 rs = (insn >> 8) & 0xf;
6558 tmp = load_reg(s, rs);
6559 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6560 }
6561 }
6562 if (op1 != 0x0f && op1 != 0x0d) {
6563 rn = (insn >> 16) & 0xf;
6564 tmp = load_reg(s, rn);
6565 } else {
6566 TCGV_UNUSED(tmp);
6567 }
6568 rd = (insn >> 12) & 0xf;
6569 switch(op1) {
6570 case 0x00:
6571 tcg_gen_and_i32(tmp, tmp, tmp2);
6572 if (logic_cc) {
6573 gen_logic_CC(tmp);
6574 }
6575 store_reg_bx(env, s, rd, tmp);
6576 break;
6577 case 0x01:
6578 tcg_gen_xor_i32(tmp, tmp, tmp2);
6579 if (logic_cc) {
6580 gen_logic_CC(tmp);
6581 }
6582 store_reg_bx(env, s, rd, tmp);
6583 break;
6584 case 0x02:
6585 if (set_cc && rd == 15) {
6586 /* SUBS r15, ... is used for exception return. */
6587 if (IS_USER(s)) {
6588 goto illegal_op;
6589 }
6590 gen_helper_sub_cc(tmp, tmp, tmp2);
6591 gen_exception_return(s, tmp);
6592 } else {
6593 if (set_cc) {
6594 gen_helper_sub_cc(tmp, tmp, tmp2);
6595 } else {
6596 tcg_gen_sub_i32(tmp, tmp, tmp2);
6597 }
6598 store_reg_bx(env, s, rd, tmp);
6599 }
6600 break;
6601 case 0x03:
6602 if (set_cc) {
6603 gen_helper_sub_cc(tmp, tmp2, tmp);
6604 } else {
6605 tcg_gen_sub_i32(tmp, tmp2, tmp);
6606 }
6607 store_reg_bx(env, s, rd, tmp);
6608 break;
6609 case 0x04:
6610 if (set_cc) {
6611 gen_helper_add_cc(tmp, tmp, tmp2);
6612 } else {
6613 tcg_gen_add_i32(tmp, tmp, tmp2);
6614 }
6615 store_reg_bx(env, s, rd, tmp);
6616 break;
6617 case 0x05:
6618 if (set_cc) {
6619 gen_helper_adc_cc(tmp, tmp, tmp2);
6620 } else {
6621 gen_add_carry(tmp, tmp, tmp2);
6622 }
6623 store_reg_bx(env, s, rd, tmp);
6624 break;
6625 case 0x06:
6626 if (set_cc) {
6627 gen_helper_sbc_cc(tmp, tmp, tmp2);
6628 } else {
6629 gen_sub_carry(tmp, tmp, tmp2);
6630 }
6631 store_reg_bx(env, s, rd, tmp);
6632 break;
6633 case 0x07:
6634 if (set_cc) {
6635 gen_helper_sbc_cc(tmp, tmp2, tmp);
6636 } else {
6637 gen_sub_carry(tmp, tmp2, tmp);
6638 }
6639 store_reg_bx(env, s, rd, tmp);
6640 break;
6641 case 0x08:
6642 if (set_cc) {
6643 tcg_gen_and_i32(tmp, tmp, tmp2);
6644 gen_logic_CC(tmp);
6645 }
6646 tcg_temp_free_i32(tmp);
6647 break;
6648 case 0x09:
6649 if (set_cc) {
6650 tcg_gen_xor_i32(tmp, tmp, tmp2);
6651 gen_logic_CC(tmp);
6652 }
6653 tcg_temp_free_i32(tmp);
6654 break;
6655 case 0x0a:
6656 if (set_cc) {
6657 gen_helper_sub_cc(tmp, tmp, tmp2);
6658 }
6659 tcg_temp_free_i32(tmp);
6660 break;
6661 case 0x0b:
6662 if (set_cc) {
6663 gen_helper_add_cc(tmp, tmp, tmp2);
6664 }
6665 tcg_temp_free_i32(tmp);
6666 break;
6667 case 0x0c:
6668 tcg_gen_or_i32(tmp, tmp, tmp2);
6669 if (logic_cc) {
6670 gen_logic_CC(tmp);
6671 }
6672 store_reg_bx(env, s, rd, tmp);
6673 break;
6674 case 0x0d:
6675 if (logic_cc && rd == 15) {
6676 /* MOVS r15, ... is used for exception return. */
6677 if (IS_USER(s)) {
6678 goto illegal_op;
6679 }
6680 gen_exception_return(s, tmp2);
6681 } else {
6682 if (logic_cc) {
6683 gen_logic_CC(tmp2);
6684 }
6685 store_reg_bx(env, s, rd, tmp2);
6686 }
6687 break;
6688 case 0x0e:
6689 tcg_gen_andc_i32(tmp, tmp, tmp2);
6690 if (logic_cc) {
6691 gen_logic_CC(tmp);
6692 }
6693 store_reg_bx(env, s, rd, tmp);
6694 break;
6695 default:
6696 case 0x0f:
6697 tcg_gen_not_i32(tmp2, tmp2);
6698 if (logic_cc) {
6699 gen_logic_CC(tmp2);
6700 }
6701 store_reg_bx(env, s, rd, tmp2);
6702 break;
6703 }
6704 if (op1 != 0x0f && op1 != 0x0d) {
6705 tcg_temp_free_i32(tmp2);
6706 }
6707 } else {
6708 /* other instructions */
6709 op1 = (insn >> 24) & 0xf;
6710 switch(op1) {
6711 case 0x0:
6712 case 0x1:
6713 /* multiplies, extra load/stores */
6714 sh = (insn >> 5) & 3;
6715 if (sh == 0) {
6716 if (op1 == 0x0) {
6717 rd = (insn >> 16) & 0xf;
6718 rn = (insn >> 12) & 0xf;
6719 rs = (insn >> 8) & 0xf;
6720 rm = (insn) & 0xf;
6721 op1 = (insn >> 20) & 0xf;
6722 switch (op1) {
6723 case 0: case 1: case 2: case 3: case 6:
6724 /* 32 bit mul */
6725 tmp = load_reg(s, rs);
6726 tmp2 = load_reg(s, rm);
6727 tcg_gen_mul_i32(tmp, tmp, tmp2);
6728 tcg_temp_free_i32(tmp2);
6729 if (insn & (1 << 22)) {
6730 /* Subtract (mls) */
6731 ARCH(6T2);
6732 tmp2 = load_reg(s, rn);
6733 tcg_gen_sub_i32(tmp, tmp2, tmp);
6734 tcg_temp_free_i32(tmp2);
6735 } else if (insn & (1 << 21)) {
6736 /* Add */
6737 tmp2 = load_reg(s, rn);
6738 tcg_gen_add_i32(tmp, tmp, tmp2);
6739 tcg_temp_free_i32(tmp2);
6740 }
6741 if (insn & (1 << 20))
6742 gen_logic_CC(tmp);
6743 store_reg(s, rd, tmp);
6744 break;
6745 case 4:
6746 /* 64 bit mul double accumulate (UMAAL) */
6747 ARCH(6);
6748 tmp = load_reg(s, rs);
6749 tmp2 = load_reg(s, rm);
6750 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6751 gen_addq_lo(s, tmp64, rn);
6752 gen_addq_lo(s, tmp64, rd);
6753 gen_storeq_reg(s, rn, rd, tmp64);
6754 tcg_temp_free_i64(tmp64);
6755 break;
6756 case 8: case 9: case 10: case 11:
6757 case 12: case 13: case 14: case 15:
6758 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6759 tmp = load_reg(s, rs);
6760 tmp2 = load_reg(s, rm);
6761 if (insn & (1 << 22)) {
6762 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6763 } else {
6764 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6765 }
6766 if (insn & (1 << 21)) { /* mult accumulate */
6767 gen_addq(s, tmp64, rn, rd);
6768 }
6769 if (insn & (1 << 20)) {
6770 gen_logicq_cc(tmp64);
6771 }
6772 gen_storeq_reg(s, rn, rd, tmp64);
6773 tcg_temp_free_i64(tmp64);
6774 break;
6775 default:
6776 goto illegal_op;
6777 }
6778 } else {
6779 rn = (insn >> 16) & 0xf;
6780 rd = (insn >> 12) & 0xf;
6781 if (insn & (1 << 23)) {
6782 /* load/store exclusive */
6783 op1 = (insn >> 21) & 0x3;
6784 if (op1)
6785 ARCH(6K);
6786 else
6787 ARCH(6);
6788 addr = tcg_temp_local_new_i32();
6789 load_reg_var(s, addr, rn);
6790 if (insn & (1 << 20)) {
6791 switch (op1) {
6792 case 0: /* ldrex */
6793 gen_load_exclusive(s, rd, 15, addr, 2);
6794 break;
6795 case 1: /* ldrexd */
6796 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6797 break;
6798 case 2: /* ldrexb */
6799 gen_load_exclusive(s, rd, 15, addr, 0);
6800 break;
6801 case 3: /* ldrexh */
6802 gen_load_exclusive(s, rd, 15, addr, 1);
6803 break;
6804 default:
6805 abort();
6806 }
6807 } else {
6808 rm = insn & 0xf;
6809 switch (op1) {
6810 case 0: /* strex */
6811 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6812 break;
6813 case 1: /* strexd */
6814 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6815 break;
6816 case 2: /* strexb */
6817 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6818 break;
6819 case 3: /* strexh */
6820 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6821 break;
6822 default:
6823 abort();
6824 }
6825 }
6826 tcg_temp_free(addr);
6827 } else {
6828 /* SWP instruction */
6829 rm = (insn) & 0xf;
6830
6831 /* ??? This is not really atomic. However we know
6832 we never have multiple CPUs running in parallel,
6833 so it is good enough. */
6834 addr = load_reg(s, rn);
6835 tmp = load_reg(s, rm);
6836 if (insn & (1 << 22)) {
6837 tmp2 = gen_ld8u(addr, IS_USER(s));
6838 gen_st8(tmp, addr, IS_USER(s));
6839 } else {
6840 tmp2 = gen_ld32(addr, IS_USER(s));
6841 gen_st32(tmp, addr, IS_USER(s));
6842 }
6843 tcg_temp_free_i32(addr);
6844 store_reg(s, rd, tmp2);
6845 }
6846 }
6847 } else {
6848 int address_offset;
6849 int load;
6850 /* Misc load/store */
6851 rn = (insn >> 16) & 0xf;
6852 rd = (insn >> 12) & 0xf;
6853 addr = load_reg(s, rn);
6854 if (insn & (1 << 24))
6855 gen_add_datah_offset(s, insn, 0, addr);
6856 address_offset = 0;
6857 if (insn & (1 << 20)) {
6858 /* load */
6859 switch(sh) {
6860 case 1:
6861 tmp = gen_ld16u(addr, IS_USER(s));
6862 break;
6863 case 2:
6864 tmp = gen_ld8s(addr, IS_USER(s));
6865 break;
6866 default:
6867 case 3:
6868 tmp = gen_ld16s(addr, IS_USER(s));
6869 break;
6870 }
6871 load = 1;
6872 } else if (sh & 2) {
6873 /* doubleword */
6874 if (sh & 1) {
6875 /* store */
6876 tmp = load_reg(s, rd);
6877 gen_st32(tmp, addr, IS_USER(s));
6878 tcg_gen_addi_i32(addr, addr, 4);
6879 tmp = load_reg(s, rd + 1);
6880 gen_st32(tmp, addr, IS_USER(s));
6881 load = 0;
6882 } else {
6883 /* load */
6884 tmp = gen_ld32(addr, IS_USER(s));
6885 store_reg(s, rd, tmp);
6886 tcg_gen_addi_i32(addr, addr, 4);
6887 tmp = gen_ld32(addr, IS_USER(s));
6888 rd++;
6889 load = 1;
6890 }
6891 address_offset = -4;
6892 } else {
6893 /* store */
6894 tmp = load_reg(s, rd);
6895 gen_st16(tmp, addr, IS_USER(s));
6896 load = 0;
6897 }
6898 /* Perform base writeback before the loaded value to
6899 ensure correct behavior with overlapping index registers.
6900 ldrd with base writeback is is undefined if the
6901 destination and index registers overlap. */
6902 if (!(insn & (1 << 24))) {
6903 gen_add_datah_offset(s, insn, address_offset, addr);
6904 store_reg(s, rn, addr);
6905 } else if (insn & (1 << 21)) {
6906 if (address_offset)
6907 tcg_gen_addi_i32(addr, addr, address_offset);
6908 store_reg(s, rn, addr);
6909 } else {
6910 tcg_temp_free_i32(addr);
6911 }
6912 if (load) {
6913 /* Complete the load. */
6914 store_reg(s, rd, tmp);
6915 }
6916 }
6917 break;
6918 case 0x4:
6919 case 0x5:
6920 goto do_ldst;
6921 case 0x6:
6922 case 0x7:
6923 if (insn & (1 << 4)) {
6924 ARCH(6);
6925 /* Armv6 Media instructions. */
6926 rm = insn & 0xf;
6927 rn = (insn >> 16) & 0xf;
6928 rd = (insn >> 12) & 0xf;
6929 rs = (insn >> 8) & 0xf;
6930 switch ((insn >> 23) & 3) {
6931 case 0: /* Parallel add/subtract. */
6932 op1 = (insn >> 20) & 7;
6933 tmp = load_reg(s, rn);
6934 tmp2 = load_reg(s, rm);
6935 sh = (insn >> 5) & 7;
6936 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6937 goto illegal_op;
6938 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6939 tcg_temp_free_i32(tmp2);
6940 store_reg(s, rd, tmp);
6941 break;
6942 case 1:
6943 if ((insn & 0x00700020) == 0) {
6944 /* Halfword pack. */
6945 tmp = load_reg(s, rn);
6946 tmp2 = load_reg(s, rm);
6947 shift = (insn >> 7) & 0x1f;
6948 if (insn & (1 << 6)) {
6949 /* pkhtb */
6950 if (shift == 0)
6951 shift = 31;
6952 tcg_gen_sari_i32(tmp2, tmp2, shift);
6953 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6954 tcg_gen_ext16u_i32(tmp2, tmp2);
6955 } else {
6956 /* pkhbt */
6957 if (shift)
6958 tcg_gen_shli_i32(tmp2, tmp2, shift);
6959 tcg_gen_ext16u_i32(tmp, tmp);
6960 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6961 }
6962 tcg_gen_or_i32(tmp, tmp, tmp2);
6963 tcg_temp_free_i32(tmp2);
6964 store_reg(s, rd, tmp);
6965 } else if ((insn & 0x00200020) == 0x00200000) {
6966 /* [us]sat */
6967 tmp = load_reg(s, rm);
6968 shift = (insn >> 7) & 0x1f;
6969 if (insn & (1 << 6)) {
6970 if (shift == 0)
6971 shift = 31;
6972 tcg_gen_sari_i32(tmp, tmp, shift);
6973 } else {
6974 tcg_gen_shli_i32(tmp, tmp, shift);
6975 }
6976 sh = (insn >> 16) & 0x1f;
6977 tmp2 = tcg_const_i32(sh);
6978 if (insn & (1 << 22))
6979 gen_helper_usat(tmp, tmp, tmp2);
6980 else
6981 gen_helper_ssat(tmp, tmp, tmp2);
6982 tcg_temp_free_i32(tmp2);
6983 store_reg(s, rd, tmp);
6984 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6985 /* [us]sat16 */
6986 tmp = load_reg(s, rm);
6987 sh = (insn >> 16) & 0x1f;
6988 tmp2 = tcg_const_i32(sh);
6989 if (insn & (1 << 22))
6990 gen_helper_usat16(tmp, tmp, tmp2);
6991 else
6992 gen_helper_ssat16(tmp, tmp, tmp2);
6993 tcg_temp_free_i32(tmp2);
6994 store_reg(s, rd, tmp);
6995 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6996 /* Select bytes. */
6997 tmp = load_reg(s, rn);
6998 tmp2 = load_reg(s, rm);
6999 tmp3 = tcg_temp_new_i32();
7000 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7001 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7002 tcg_temp_free_i32(tmp3);
7003 tcg_temp_free_i32(tmp2);
7004 store_reg(s, rd, tmp);
7005 } else if ((insn & 0x000003e0) == 0x00000060) {
7006 tmp = load_reg(s, rm);
7007 shift = (insn >> 10) & 3;
7008 /* ??? In many cases it's not neccessary to do a
7009 rotate, a shift is sufficient. */
7010 if (shift != 0)
7011 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7012 op1 = (insn >> 20) & 7;
7013 switch (op1) {
7014 case 0: gen_sxtb16(tmp); break;
7015 case 2: gen_sxtb(tmp); break;
7016 case 3: gen_sxth(tmp); break;
7017 case 4: gen_uxtb16(tmp); break;
7018 case 6: gen_uxtb(tmp); break;
7019 case 7: gen_uxth(tmp); break;
7020 default: goto illegal_op;
7021 }
7022 if (rn != 15) {
7023 tmp2 = load_reg(s, rn);
7024 if ((op1 & 3) == 0) {
7025 gen_add16(tmp, tmp2);
7026 } else {
7027 tcg_gen_add_i32(tmp, tmp, tmp2);
7028 tcg_temp_free_i32(tmp2);
7029 }
7030 }
7031 store_reg(s, rd, tmp);
7032 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7033 /* rev */
7034 tmp = load_reg(s, rm);
7035 if (insn & (1 << 22)) {
7036 if (insn & (1 << 7)) {
7037 gen_revsh(tmp);
7038 } else {
7039 ARCH(6T2);
7040 gen_helper_rbit(tmp, tmp);
7041 }
7042 } else {
7043 if (insn & (1 << 7))
7044 gen_rev16(tmp);
7045 else
7046 tcg_gen_bswap32_i32(tmp, tmp);
7047 }
7048 store_reg(s, rd, tmp);
7049 } else {
7050 goto illegal_op;
7051 }
7052 break;
7053 case 2: /* Multiplies (Type 3). */
7054 tmp = load_reg(s, rm);
7055 tmp2 = load_reg(s, rs);
7056 if (insn & (1 << 20)) {
7057 /* Signed multiply most significant [accumulate].
7058 (SMMUL, SMMLA, SMMLS) */
7059 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7060
7061 if (rd != 15) {
7062 tmp = load_reg(s, rd);
7063 if (insn & (1 << 6)) {
7064 tmp64 = gen_subq_msw(tmp64, tmp);
7065 } else {
7066 tmp64 = gen_addq_msw(tmp64, tmp);
7067 }
7068 }
7069 if (insn & (1 << 5)) {
7070 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7071 }
7072 tcg_gen_shri_i64(tmp64, tmp64, 32);
7073 tmp = tcg_temp_new_i32();
7074 tcg_gen_trunc_i64_i32(tmp, tmp64);
7075 tcg_temp_free_i64(tmp64);
7076 store_reg(s, rn, tmp);
7077 } else {
7078 if (insn & (1 << 5))
7079 gen_swap_half(tmp2);
7080 gen_smul_dual(tmp, tmp2);
7081 if (insn & (1 << 6)) {
7082 /* This subtraction cannot overflow. */
7083 tcg_gen_sub_i32(tmp, tmp, tmp2);
7084 } else {
7085 /* This addition cannot overflow 32 bits;
7086 * however it may overflow considered as a signed
7087 * operation, in which case we must set the Q flag.
7088 */
7089 gen_helper_add_setq(tmp, tmp, tmp2);
7090 }
7091 tcg_temp_free_i32(tmp2);
7092 if (insn & (1 << 22)) {
7093 /* smlald, smlsld */
7094 tmp64 = tcg_temp_new_i64();
7095 tcg_gen_ext_i32_i64(tmp64, tmp);
7096 tcg_temp_free_i32(tmp);
7097 gen_addq(s, tmp64, rd, rn);
7098 gen_storeq_reg(s, rd, rn, tmp64);
7099 tcg_temp_free_i64(tmp64);
7100 } else {
7101 /* smuad, smusd, smlad, smlsd */
7102 if (rd != 15)
7103 {
7104 tmp2 = load_reg(s, rd);
7105 gen_helper_add_setq(tmp, tmp, tmp2);
7106 tcg_temp_free_i32(tmp2);
7107 }
7108 store_reg(s, rn, tmp);
7109 }
7110 }
7111 break;
7112 case 3:
7113 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7114 switch (op1) {
7115 case 0: /* Unsigned sum of absolute differences. */
7116 ARCH(6);
7117 tmp = load_reg(s, rm);
7118 tmp2 = load_reg(s, rs);
7119 gen_helper_usad8(tmp, tmp, tmp2);
7120 tcg_temp_free_i32(tmp2);
7121 if (rd != 15) {
7122 tmp2 = load_reg(s, rd);
7123 tcg_gen_add_i32(tmp, tmp, tmp2);
7124 tcg_temp_free_i32(tmp2);
7125 }
7126 store_reg(s, rn, tmp);
7127 break;
7128 case 0x20: case 0x24: case 0x28: case 0x2c:
7129 /* Bitfield insert/clear. */
7130 ARCH(6T2);
7131 shift = (insn >> 7) & 0x1f;
7132 i = (insn >> 16) & 0x1f;
7133 i = i + 1 - shift;
7134 if (rm == 15) {
7135 tmp = tcg_temp_new_i32();
7136 tcg_gen_movi_i32(tmp, 0);
7137 } else {
7138 tmp = load_reg(s, rm);
7139 }
7140 if (i != 32) {
7141 tmp2 = load_reg(s, rd);
7142 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7143 tcg_temp_free_i32(tmp2);
7144 }
7145 store_reg(s, rd, tmp);
7146 break;
7147 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7148 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7149 ARCH(6T2);
7150 tmp = load_reg(s, rm);
7151 shift = (insn >> 7) & 0x1f;
7152 i = ((insn >> 16) & 0x1f) + 1;
7153 if (shift + i > 32)
7154 goto illegal_op;
7155 if (i < 32) {
7156 if (op1 & 0x20) {
7157 gen_ubfx(tmp, shift, (1u << i) - 1);
7158 } else {
7159 gen_sbfx(tmp, shift, i);
7160 }
7161 }
7162 store_reg(s, rd, tmp);
7163 break;
7164 default:
7165 goto illegal_op;
7166 }
7167 break;
7168 }
7169 break;
7170 }
7171 do_ldst:
7172 /* Check for undefined extension instructions
7173 * per the ARM Bible IE:
7174 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7175 */
7176 sh = (0xf << 20) | (0xf << 4);
7177 if (op1 == 0x7 && ((insn & sh) == sh))
7178 {
7179 goto illegal_op;
7180 }
7181 /* load/store byte/word */
7182 rn = (insn >> 16) & 0xf;
7183 rd = (insn >> 12) & 0xf;
7184 tmp2 = load_reg(s, rn);
7185 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7186 if (insn & (1 << 24))
7187 gen_add_data_offset(s, insn, tmp2);
7188 if (insn & (1 << 20)) {
7189 /* load */
7190 if (insn & (1 << 22)) {
7191 tmp = gen_ld8u(tmp2, i);
7192 } else {
7193 tmp = gen_ld32(tmp2, i);
7194 }
7195 } else {
7196 /* store */
7197 tmp = load_reg(s, rd);
7198 if (insn & (1 << 22))
7199 gen_st8(tmp, tmp2, i);
7200 else
7201 gen_st32(tmp, tmp2, i);
7202 }
7203 if (!(insn & (1 << 24))) {
7204 gen_add_data_offset(s, insn, tmp2);
7205 store_reg(s, rn, tmp2);
7206 } else if (insn & (1 << 21)) {
7207 store_reg(s, rn, tmp2);
7208 } else {
7209 tcg_temp_free_i32(tmp2);
7210 }
7211 if (insn & (1 << 20)) {
7212 /* Complete the load. */
7213 if (rd == 15)
7214 gen_bx(s, tmp);
7215 else
7216 store_reg(s, rd, tmp);
7217 }
7218 break;
7219 case 0x08:
7220 case 0x09:
7221 {
7222 int j, n, user, loaded_base;
7223 TCGv loaded_var;
7224 /* load/store multiple words */
7225 /* XXX: store correct base if write back */
7226 user = 0;
7227 if (insn & (1 << 22)) {
7228 if (IS_USER(s))
7229 goto illegal_op; /* only usable in supervisor mode */
7230
7231 if ((insn & (1 << 15)) == 0)
7232 user = 1;
7233 }
7234 rn = (insn >> 16) & 0xf;
7235 addr = load_reg(s, rn);
7236
7237 /* compute total size */
7238 loaded_base = 0;
7239 TCGV_UNUSED(loaded_var);
7240 n = 0;
7241 for(i=0;i<16;i++) {
7242 if (insn & (1 << i))
7243 n++;
7244 }
7245 /* XXX: test invalid n == 0 case ? */
7246 if (insn & (1 << 23)) {
7247 if (insn & (1 << 24)) {
7248 /* pre increment */
7249 tcg_gen_addi_i32(addr, addr, 4);
7250 } else {
7251 /* post increment */
7252 }
7253 } else {
7254 if (insn & (1 << 24)) {
7255 /* pre decrement */
7256 tcg_gen_addi_i32(addr, addr, -(n * 4));
7257 } else {
7258 /* post decrement */
7259 if (n != 1)
7260 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7261 }
7262 }
7263 j = 0;
7264 for(i=0;i<16;i++) {
7265 if (insn & (1 << i)) {
7266 if (insn & (1 << 20)) {
7267 /* load */
7268 tmp = gen_ld32(addr, IS_USER(s));
7269 if (i == 15) {
7270 gen_bx(s, tmp);
7271 } else if (user) {
7272 tmp2 = tcg_const_i32(i);
7273 gen_helper_set_user_reg(tmp2, tmp);
7274 tcg_temp_free_i32(tmp2);
7275 tcg_temp_free_i32(tmp);
7276 } else if (i == rn) {
7277 loaded_var = tmp;
7278 loaded_base = 1;
7279 } else {
7280 store_reg(s, i, tmp);
7281 }
7282 } else {
7283 /* store */
7284 if (i == 15) {
7285 /* special case: r15 = PC + 8 */
7286 val = (long)s->pc + 4;
7287 tmp = tcg_temp_new_i32();
7288 tcg_gen_movi_i32(tmp, val);
7289 } else if (user) {
7290 tmp = tcg_temp_new_i32();
7291 tmp2 = tcg_const_i32(i);
7292 gen_helper_get_user_reg(tmp, tmp2);
7293 tcg_temp_free_i32(tmp2);
7294 } else {
7295 tmp = load_reg(s, i);
7296 }
7297 gen_st32(tmp, addr, IS_USER(s));
7298 }
7299 j++;
7300 /* no need to add after the last transfer */
7301 if (j != n)
7302 tcg_gen_addi_i32(addr, addr, 4);
7303 }
7304 }
7305 if (insn & (1 << 21)) {
7306 /* write back */
7307 if (insn & (1 << 23)) {
7308 if (insn & (1 << 24)) {
7309 /* pre increment */
7310 } else {
7311 /* post increment */
7312 tcg_gen_addi_i32(addr, addr, 4);
7313 }
7314 } else {
7315 if (insn & (1 << 24)) {
7316 /* pre decrement */
7317 if (n != 1)
7318 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7319 } else {
7320 /* post decrement */
7321 tcg_gen_addi_i32(addr, addr, -(n * 4));
7322 }
7323 }
7324 store_reg(s, rn, addr);
7325 } else {
7326 tcg_temp_free_i32(addr);
7327 }
7328 if (loaded_base) {
7329 store_reg(s, rn, loaded_var);
7330 }
7331 if ((insn & (1 << 22)) && !user) {
7332 /* Restore CPSR from SPSR. */
7333 tmp = load_cpu_field(spsr);
7334 gen_set_cpsr(tmp, 0xffffffff);
7335 tcg_temp_free_i32(tmp);
7336 s->is_jmp = DISAS_UPDATE;
7337 }
7338 }
7339 break;
7340 case 0xa:
7341 case 0xb:
7342 {
7343 int32_t offset;
7344
7345 /* branch (and link) */
7346 val = (int32_t)s->pc;
7347 if (insn & (1 << 24)) {
7348 tmp = tcg_temp_new_i32();
7349 tcg_gen_movi_i32(tmp, val);
7350 store_reg(s, 14, tmp);
7351 }
7352 offset = (((int32_t)insn << 8) >> 8);
7353 val += (offset << 2) + 4;
7354 gen_jmp(s, val);
7355 }
7356 break;
7357 case 0xc:
7358 case 0xd:
7359 case 0xe:
7360 /* Coprocessor. */
7361 if (disas_coproc_insn(env, s, insn))
7362 goto illegal_op;
7363 break;
7364 case 0xf:
7365 /* swi */
7366 gen_set_pc_im(s->pc);
7367 s->is_jmp = DISAS_SWI;
7368 break;
7369 default:
7370 illegal_op:
7371 gen_exception_insn(s, 4, EXCP_UDEF);
7372 break;
7373 }
7374 }
7375 }
7376
7377 /* Return true if this is a Thumb-2 logical op. */
7378 static int
7379 thumb2_logic_op(int op)
7380 {
7381 return (op < 8);
7382 }
7383
7384 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7385 then set condition code flags based on the result of the operation.
7386 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7387 to the high bit of T1.
7388 Returns zero if the opcode is valid. */
7389
7390 static int
7391 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7392 {
7393 int logic_cc;
7394
7395 logic_cc = 0;
7396 switch (op) {
7397 case 0: /* and */
7398 tcg_gen_and_i32(t0, t0, t1);
7399 logic_cc = conds;
7400 break;
7401 case 1: /* bic */
7402 tcg_gen_andc_i32(t0, t0, t1);
7403 logic_cc = conds;
7404 break;
7405 case 2: /* orr */
7406 tcg_gen_or_i32(t0, t0, t1);
7407 logic_cc = conds;
7408 break;
7409 case 3: /* orn */
7410 tcg_gen_orc_i32(t0, t0, t1);
7411 logic_cc = conds;
7412 break;
7413 case 4: /* eor */
7414 tcg_gen_xor_i32(t0, t0, t1);
7415 logic_cc = conds;
7416 break;
7417 case 8: /* add */
7418 if (conds)
7419 gen_helper_add_cc(t0, t0, t1);
7420 else
7421 tcg_gen_add_i32(t0, t0, t1);
7422 break;
7423 case 10: /* adc */
7424 if (conds)
7425 gen_helper_adc_cc(t0, t0, t1);
7426 else
7427 gen_adc(t0, t1);
7428 break;
7429 case 11: /* sbc */
7430 if (conds)
7431 gen_helper_sbc_cc(t0, t0, t1);
7432 else
7433 gen_sub_carry(t0, t0, t1);
7434 break;
7435 case 13: /* sub */
7436 if (conds)
7437 gen_helper_sub_cc(t0, t0, t1);
7438 else
7439 tcg_gen_sub_i32(t0, t0, t1);
7440 break;
7441 case 14: /* rsb */
7442 if (conds)
7443 gen_helper_sub_cc(t0, t1, t0);
7444 else
7445 tcg_gen_sub_i32(t0, t1, t0);
7446 break;
7447 default: /* 5, 6, 7, 9, 12, 15. */
7448 return 1;
7449 }
7450 if (logic_cc) {
7451 gen_logic_CC(t0);
7452 if (shifter_out)
7453 gen_set_CF_bit31(t1);
7454 }
7455 return 0;
7456 }
7457
7458 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7459 is not legal. */
7460 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7461 {
7462 uint32_t insn, imm, shift, offset;
7463 uint32_t rd, rn, rm, rs;
7464 TCGv tmp;
7465 TCGv tmp2;
7466 TCGv tmp3;
7467 TCGv addr;
7468 TCGv_i64 tmp64;
7469 int op;
7470 int shiftop;
7471 int conds;
7472 int logic_cc;
7473
7474 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7475 || arm_feature (env, ARM_FEATURE_M))) {
7476 /* Thumb-1 cores may need to treat bl and blx as a pair of
7477 16-bit instructions to get correct prefetch abort behavior. */
7478 insn = insn_hw1;
7479 if ((insn & (1 << 12)) == 0) {
7480 /* Second half of blx. */
7481 offset = ((insn & 0x7ff) << 1);
7482 tmp = load_reg(s, 14);
7483 tcg_gen_addi_i32(tmp, tmp, offset);
7484 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7485
7486 tmp2 = tcg_temp_new_i32();
7487 tcg_gen_movi_i32(tmp2, s->pc | 1);
7488 store_reg(s, 14, tmp2);
7489 gen_bx(s, tmp);
7490 return 0;
7491 }
7492 if (insn & (1 << 11)) {
7493 /* Second half of bl. */
7494 offset = ((insn & 0x7ff) << 1) | 1;
7495 tmp = load_reg(s, 14);
7496 tcg_gen_addi_i32(tmp, tmp, offset);
7497
7498 tmp2 = tcg_temp_new_i32();
7499 tcg_gen_movi_i32(tmp2, s->pc | 1);
7500 store_reg(s, 14, tmp2);
7501 gen_bx(s, tmp);
7502 return 0;
7503 }
7504 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7505 /* Instruction spans a page boundary. Implement it as two
7506 16-bit instructions in case the second half causes an
7507 prefetch abort. */
7508 offset = ((int32_t)insn << 21) >> 9;
7509 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7510 return 0;
7511 }
7512 /* Fall through to 32-bit decode. */
7513 }
7514
7515 insn = lduw_code(s->pc);
7516 s->pc += 2;
7517 insn |= (uint32_t)insn_hw1 << 16;
7518
7519 if ((insn & 0xf800e800) != 0xf000e800) {
7520 ARCH(6T2);
7521 }
7522
7523 rn = (insn >> 16) & 0xf;
7524 rs = (insn >> 12) & 0xf;
7525 rd = (insn >> 8) & 0xf;
7526 rm = insn & 0xf;
7527 switch ((insn >> 25) & 0xf) {
7528 case 0: case 1: case 2: case 3:
7529 /* 16-bit instructions. Should never happen. */
7530 abort();
7531 case 4:
7532 if (insn & (1 << 22)) {
7533 /* Other load/store, table branch. */
7534 if (insn & 0x01200000) {
7535 /* Load/store doubleword. */
7536 if (rn == 15) {
7537 addr = tcg_temp_new_i32();
7538 tcg_gen_movi_i32(addr, s->pc & ~3);
7539 } else {
7540 addr = load_reg(s, rn);
7541 }
7542 offset = (insn & 0xff) * 4;
7543 if ((insn & (1 << 23)) == 0)
7544 offset = -offset;
7545 if (insn & (1 << 24)) {
7546 tcg_gen_addi_i32(addr, addr, offset);
7547 offset = 0;
7548 }
7549 if (insn & (1 << 20)) {
7550 /* ldrd */
7551 tmp = gen_ld32(addr, IS_USER(s));
7552 store_reg(s, rs, tmp);
7553 tcg_gen_addi_i32(addr, addr, 4);
7554 tmp = gen_ld32(addr, IS_USER(s));
7555 store_reg(s, rd, tmp);
7556 } else {
7557 /* strd */
7558 tmp = load_reg(s, rs);
7559 gen_st32(tmp, addr, IS_USER(s));
7560 tcg_gen_addi_i32(addr, addr, 4);
7561 tmp = load_reg(s, rd);
7562 gen_st32(tmp, addr, IS_USER(s));
7563 }
7564 if (insn & (1 << 21)) {
7565 /* Base writeback. */
7566 if (rn == 15)
7567 goto illegal_op;
7568 tcg_gen_addi_i32(addr, addr, offset - 4);
7569 store_reg(s, rn, addr);
7570 } else {
7571 tcg_temp_free_i32(addr);
7572 }
7573 } else if ((insn & (1 << 23)) == 0) {
7574 /* Load/store exclusive word. */
7575 addr = tcg_temp_local_new();
7576 load_reg_var(s, addr, rn);
7577 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7578 if (insn & (1 << 20)) {
7579 gen_load_exclusive(s, rs, 15, addr, 2);
7580 } else {
7581 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7582 }
7583 tcg_temp_free(addr);
7584 } else if ((insn & (1 << 6)) == 0) {
7585 /* Table Branch. */
7586 if (rn == 15) {
7587 addr = tcg_temp_new_i32();
7588 tcg_gen_movi_i32(addr, s->pc);
7589 } else {
7590 addr = load_reg(s, rn);
7591 }
7592 tmp = load_reg(s, rm);
7593 tcg_gen_add_i32(addr, addr, tmp);
7594 if (insn & (1 << 4)) {
7595 /* tbh */
7596 tcg_gen_add_i32(addr, addr, tmp);
7597 tcg_temp_free_i32(tmp);
7598 tmp = gen_ld16u(addr, IS_USER(s));
7599 } else { /* tbb */
7600 tcg_temp_free_i32(tmp);
7601 tmp = gen_ld8u(addr, IS_USER(s));
7602 }
7603 tcg_temp_free_i32(addr);
7604 tcg_gen_shli_i32(tmp, tmp, 1);
7605 tcg_gen_addi_i32(tmp, tmp, s->pc);
7606 store_reg(s, 15, tmp);
7607 } else {
7608 /* Load/store exclusive byte/halfword/doubleword. */
7609 ARCH(7);
7610 op = (insn >> 4) & 0x3;
7611 if (op == 2) {
7612 goto illegal_op;
7613 }
7614 addr = tcg_temp_local_new();
7615 load_reg_var(s, addr, rn);
7616 if (insn & (1 << 20)) {
7617 gen_load_exclusive(s, rs, rd, addr, op);
7618 } else {
7619 gen_store_exclusive(s, rm, rs, rd, addr, op);
7620 }
7621 tcg_temp_free(addr);
7622 }
7623 } else {
7624 /* Load/store multiple, RFE, SRS. */
7625 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7626 /* Not available in user mode. */
7627 if (IS_USER(s))
7628 goto illegal_op;
7629 if (insn & (1 << 20)) {
7630 /* rfe */
7631 addr = load_reg(s, rn);
7632 if ((insn & (1 << 24)) == 0)
7633 tcg_gen_addi_i32(addr, addr, -8);
7634 /* Load PC into tmp and CPSR into tmp2. */
7635 tmp = gen_ld32(addr, 0);
7636 tcg_gen_addi_i32(addr, addr, 4);
7637 tmp2 = gen_ld32(addr, 0);
7638 if (insn & (1 << 21)) {
7639 /* Base writeback. */
7640 if (insn & (1 << 24)) {
7641 tcg_gen_addi_i32(addr, addr, 4);
7642 } else {
7643 tcg_gen_addi_i32(addr, addr, -4);
7644 }
7645 store_reg(s, rn, addr);
7646 } else {
7647 tcg_temp_free_i32(addr);
7648 }
7649 gen_rfe(s, tmp, tmp2);
7650 } else {
7651 /* srs */
7652 op = (insn & 0x1f);
7653 addr = tcg_temp_new_i32();
7654 tmp = tcg_const_i32(op);
7655 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7656 tcg_temp_free_i32(tmp);
7657 if ((insn & (1 << 24)) == 0) {
7658 tcg_gen_addi_i32(addr, addr, -8);
7659 }
7660 tmp = load_reg(s, 14);
7661 gen_st32(tmp, addr, 0);
7662 tcg_gen_addi_i32(addr, addr, 4);
7663 tmp = tcg_temp_new_i32();
7664 gen_helper_cpsr_read(tmp);
7665 gen_st32(tmp, addr, 0);
7666 if (insn & (1 << 21)) {
7667 if ((insn & (1 << 24)) == 0) {
7668 tcg_gen_addi_i32(addr, addr, -4);
7669 } else {
7670 tcg_gen_addi_i32(addr, addr, 4);
7671 }
7672 tmp = tcg_const_i32(op);
7673 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7674 tcg_temp_free_i32(tmp);
7675 } else {
7676 tcg_temp_free_i32(addr);
7677 }
7678 }
7679 } else {
7680 int i;
7681 /* Load/store multiple. */
7682 addr = load_reg(s, rn);
7683 offset = 0;
7684 for (i = 0; i < 16; i++) {
7685 if (insn & (1 << i))
7686 offset += 4;
7687 }
7688 if (insn & (1 << 24)) {
7689 tcg_gen_addi_i32(addr, addr, -offset);
7690 }
7691
7692 for (i = 0; i < 16; i++) {
7693 if ((insn & (1 << i)) == 0)
7694 continue;
7695 if (insn & (1 << 20)) {
7696 /* Load. */
7697 tmp = gen_ld32(addr, IS_USER(s));
7698 if (i == 15) {
7699 gen_bx(s, tmp);
7700 } else {
7701 store_reg(s, i, tmp);
7702 }
7703 } else {
7704 /* Store. */
7705 tmp = load_reg(s, i);
7706 gen_st32(tmp, addr, IS_USER(s));
7707 }
7708 tcg_gen_addi_i32(addr, addr, 4);
7709 }
7710 if (insn & (1 << 21)) {
7711 /* Base register writeback. */
7712 if (insn & (1 << 24)) {
7713 tcg_gen_addi_i32(addr, addr, -offset);
7714 }
7715 /* Fault if writeback register is in register list. */
7716 if (insn & (1 << rn))
7717 goto illegal_op;
7718 store_reg(s, rn, addr);
7719 } else {
7720 tcg_temp_free_i32(addr);
7721 }
7722 }
7723 }
7724 break;
7725 case 5:
7726
7727 op = (insn >> 21) & 0xf;
7728 if (op == 6) {
7729 /* Halfword pack. */
7730 tmp = load_reg(s, rn);
7731 tmp2 = load_reg(s, rm);
7732 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7733 if (insn & (1 << 5)) {
7734 /* pkhtb */
7735 if (shift == 0)
7736 shift = 31;
7737 tcg_gen_sari_i32(tmp2, tmp2, shift);
7738 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7739 tcg_gen_ext16u_i32(tmp2, tmp2);
7740 } else {
7741 /* pkhbt */
7742 if (shift)
7743 tcg_gen_shli_i32(tmp2, tmp2, shift);
7744 tcg_gen_ext16u_i32(tmp, tmp);
7745 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7746 }
7747 tcg_gen_or_i32(tmp, tmp, tmp2);
7748 tcg_temp_free_i32(tmp2);
7749 store_reg(s, rd, tmp);
7750 } else {
7751 /* Data processing register constant shift. */
7752 if (rn == 15) {
7753 tmp = tcg_temp_new_i32();
7754 tcg_gen_movi_i32(tmp, 0);
7755 } else {
7756 tmp = load_reg(s, rn);
7757 }
7758 tmp2 = load_reg(s, rm);
7759
7760 shiftop = (insn >> 4) & 3;
7761 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7762 conds = (insn & (1 << 20)) != 0;
7763 logic_cc = (conds && thumb2_logic_op(op));
7764 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7765 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7766 goto illegal_op;
7767 tcg_temp_free_i32(tmp2);
7768 if (rd != 15) {
7769 store_reg(s, rd, tmp);
7770 } else {
7771 tcg_temp_free_i32(tmp);
7772 }
7773 }
7774 break;
7775 case 13: /* Misc data processing. */
7776 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7777 if (op < 4 && (insn & 0xf000) != 0xf000)
7778 goto illegal_op;
7779 switch (op) {
7780 case 0: /* Register controlled shift. */
7781 tmp = load_reg(s, rn);
7782 tmp2 = load_reg(s, rm);
7783 if ((insn & 0x70) != 0)
7784 goto illegal_op;
7785 op = (insn >> 21) & 3;
7786 logic_cc = (insn & (1 << 20)) != 0;
7787 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7788 if (logic_cc)
7789 gen_logic_CC(tmp);
7790 store_reg_bx(env, s, rd, tmp);
7791 break;
7792 case 1: /* Sign/zero extend. */
7793 tmp = load_reg(s, rm);
7794 shift = (insn >> 4) & 3;
7795 /* ??? In many cases it's not neccessary to do a
7796 rotate, a shift is sufficient. */
7797 if (shift != 0)
7798 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7799 op = (insn >> 20) & 7;
7800 switch (op) {
7801 case 0: gen_sxth(tmp); break;
7802 case 1: gen_uxth(tmp); break;
7803 case 2: gen_sxtb16(tmp); break;
7804 case 3: gen_uxtb16(tmp); break;
7805 case 4: gen_sxtb(tmp); break;
7806 case 5: gen_uxtb(tmp); break;
7807 default: goto illegal_op;
7808 }
7809 if (rn != 15) {
7810 tmp2 = load_reg(s, rn);
7811 if ((op >> 1) == 1) {
7812 gen_add16(tmp, tmp2);
7813 } else {
7814 tcg_gen_add_i32(tmp, tmp, tmp2);
7815 tcg_temp_free_i32(tmp2);
7816 }
7817 }
7818 store_reg(s, rd, tmp);
7819 break;
7820 case 2: /* SIMD add/subtract. */
7821 op = (insn >> 20) & 7;
7822 shift = (insn >> 4) & 7;
7823 if ((op & 3) == 3 || (shift & 3) == 3)
7824 goto illegal_op;
7825 tmp = load_reg(s, rn);
7826 tmp2 = load_reg(s, rm);
7827 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7828 tcg_temp_free_i32(tmp2);
7829 store_reg(s, rd, tmp);
7830 break;
7831 case 3: /* Other data processing. */
7832 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7833 if (op < 4) {
7834 /* Saturating add/subtract. */
7835 tmp = load_reg(s, rn);
7836 tmp2 = load_reg(s, rm);
7837 if (op & 1)
7838 gen_helper_double_saturate(tmp, tmp);
7839 if (op & 2)
7840 gen_helper_sub_saturate(tmp, tmp2, tmp);
7841 else
7842 gen_helper_add_saturate(tmp, tmp, tmp2);
7843 tcg_temp_free_i32(tmp2);
7844 } else {
7845 tmp = load_reg(s, rn);
7846 switch (op) {
7847 case 0x0a: /* rbit */
7848 gen_helper_rbit(tmp, tmp);
7849 break;
7850 case 0x08: /* rev */
7851 tcg_gen_bswap32_i32(tmp, tmp);
7852 break;
7853 case 0x09: /* rev16 */
7854 gen_rev16(tmp);
7855 break;
7856 case 0x0b: /* revsh */
7857 gen_revsh(tmp);
7858 break;
7859 case 0x10: /* sel */
7860 tmp2 = load_reg(s, rm);
7861 tmp3 = tcg_temp_new_i32();
7862 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7863 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7864 tcg_temp_free_i32(tmp3);
7865 tcg_temp_free_i32(tmp2);
7866 break;
7867 case 0x18: /* clz */
7868 gen_helper_clz(tmp, tmp);
7869 break;
7870 default:
7871 goto illegal_op;
7872 }
7873 }
7874 store_reg(s, rd, tmp);
7875 break;
7876 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7877 op = (insn >> 4) & 0xf;
7878 tmp = load_reg(s, rn);
7879 tmp2 = load_reg(s, rm);
7880 switch ((insn >> 20) & 7) {
7881 case 0: /* 32 x 32 -> 32 */
7882 tcg_gen_mul_i32(tmp, tmp, tmp2);
7883 tcg_temp_free_i32(tmp2);
7884 if (rs != 15) {
7885 tmp2 = load_reg(s, rs);
7886 if (op)
7887 tcg_gen_sub_i32(tmp, tmp2, tmp);
7888 else
7889 tcg_gen_add_i32(tmp, tmp, tmp2);
7890 tcg_temp_free_i32(tmp2);
7891 }
7892 break;
7893 case 1: /* 16 x 16 -> 32 */
7894 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7895 tcg_temp_free_i32(tmp2);
7896 if (rs != 15) {
7897 tmp2 = load_reg(s, rs);
7898 gen_helper_add_setq(tmp, tmp, tmp2);
7899 tcg_temp_free_i32(tmp2);
7900 }
7901 break;
7902 case 2: /* Dual multiply add. */
7903 case 4: /* Dual multiply subtract. */
7904 if (op)
7905 gen_swap_half(tmp2);
7906 gen_smul_dual(tmp, tmp2);
7907 if (insn & (1 << 22)) {
7908 /* This subtraction cannot overflow. */
7909 tcg_gen_sub_i32(tmp, tmp, tmp2);
7910 } else {
7911 /* This addition cannot overflow 32 bits;
7912 * however it may overflow considered as a signed
7913 * operation, in which case we must set the Q flag.
7914 */
7915 gen_helper_add_setq(tmp, tmp, tmp2);
7916 }
7917 tcg_temp_free_i32(tmp2);
7918 if (rs != 15)
7919 {
7920 tmp2 = load_reg(s, rs);
7921 gen_helper_add_setq(tmp, tmp, tmp2);
7922 tcg_temp_free_i32(tmp2);
7923 }
7924 break;
7925 case 3: /* 32 * 16 -> 32msb */
7926 if (op)
7927 tcg_gen_sari_i32(tmp2, tmp2, 16);
7928 else
7929 gen_sxth(tmp2);
7930 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7931 tcg_gen_shri_i64(tmp64, tmp64, 16);
7932 tmp = tcg_temp_new_i32();
7933 tcg_gen_trunc_i64_i32(tmp, tmp64);
7934 tcg_temp_free_i64(tmp64);
7935 if (rs != 15)
7936 {
7937 tmp2 = load_reg(s, rs);
7938 gen_helper_add_setq(tmp, tmp, tmp2);
7939 tcg_temp_free_i32(tmp2);
7940 }
7941 break;
7942 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7943 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7944 if (rs != 15) {
7945 tmp = load_reg(s, rs);
7946 if (insn & (1 << 20)) {
7947 tmp64 = gen_addq_msw(tmp64, tmp);
7948 } else {
7949 tmp64 = gen_subq_msw(tmp64, tmp);
7950 }
7951 }
7952 if (insn & (1 << 4)) {
7953 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7954 }
7955 tcg_gen_shri_i64(tmp64, tmp64, 32);
7956 tmp = tcg_temp_new_i32();
7957 tcg_gen_trunc_i64_i32(tmp, tmp64);
7958 tcg_temp_free_i64(tmp64);
7959 break;
7960 case 7: /* Unsigned sum of absolute differences. */
7961 gen_helper_usad8(tmp, tmp, tmp2);
7962 tcg_temp_free_i32(tmp2);
7963 if (rs != 15) {
7964 tmp2 = load_reg(s, rs);
7965 tcg_gen_add_i32(tmp, tmp, tmp2);
7966 tcg_temp_free_i32(tmp2);
7967 }
7968 break;
7969 }
7970 store_reg(s, rd, tmp);
7971 break;
7972 case 6: case 7: /* 64-bit multiply, Divide. */
7973 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7974 tmp = load_reg(s, rn);
7975 tmp2 = load_reg(s, rm);
7976 if ((op & 0x50) == 0x10) {
7977 /* sdiv, udiv */
7978 if (!arm_feature(env, ARM_FEATURE_DIV))
7979 goto illegal_op;
7980 if (op & 0x20)
7981 gen_helper_udiv(tmp, tmp, tmp2);
7982 else
7983 gen_helper_sdiv(tmp, tmp, tmp2);
7984 tcg_temp_free_i32(tmp2);
7985 store_reg(s, rd, tmp);
7986 } else if ((op & 0xe) == 0xc) {
7987 /* Dual multiply accumulate long. */
7988 if (op & 1)
7989 gen_swap_half(tmp2);
7990 gen_smul_dual(tmp, tmp2);
7991 if (op & 0x10) {
7992 tcg_gen_sub_i32(tmp, tmp, tmp2);
7993 } else {
7994 tcg_gen_add_i32(tmp, tmp, tmp2);
7995 }
7996 tcg_temp_free_i32(tmp2);
7997 /* BUGFIX */
7998 tmp64 = tcg_temp_new_i64();
7999 tcg_gen_ext_i32_i64(tmp64, tmp);
8000 tcg_temp_free_i32(tmp);
8001 gen_addq(s, tmp64, rs, rd);
8002 gen_storeq_reg(s, rs, rd, tmp64);
8003 tcg_temp_free_i64(tmp64);
8004 } else {
8005 if (op & 0x20) {
8006 /* Unsigned 64-bit multiply */
8007 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8008 } else {
8009 if (op & 8) {
8010 /* smlalxy */
8011 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8012 tcg_temp_free_i32(tmp2);
8013 tmp64 = tcg_temp_new_i64();
8014 tcg_gen_ext_i32_i64(tmp64, tmp);
8015 tcg_temp_free_i32(tmp);
8016 } else {
8017 /* Signed 64-bit multiply */
8018 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8019 }
8020 }
8021 if (op & 4) {
8022 /* umaal */
8023 gen_addq_lo(s, tmp64, rs);
8024 gen_addq_lo(s, tmp64, rd);
8025 } else if (op & 0x40) {
8026 /* 64-bit accumulate. */
8027 gen_addq(s, tmp64, rs, rd);
8028 }
8029 gen_storeq_reg(s, rs, rd, tmp64);
8030 tcg_temp_free_i64(tmp64);
8031 }
8032 break;
8033 }
8034 break;
8035 case 6: case 7: case 14: case 15:
8036 /* Coprocessor. */
8037 if (((insn >> 24) & 3) == 3) {
8038 /* Translate into the equivalent ARM encoding. */
8039 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8040 if (disas_neon_data_insn(env, s, insn))
8041 goto illegal_op;
8042 } else {
8043 if (insn & (1 << 28))
8044 goto illegal_op;
8045 if (disas_coproc_insn (env, s, insn))
8046 goto illegal_op;
8047 }
8048 break;
8049 case 8: case 9: case 10: case 11:
8050 if (insn & (1 << 15)) {
8051 /* Branches, misc control. */
8052 if (insn & 0x5000) {
8053 /* Unconditional branch. */
8054 /* signextend(hw1[10:0]) -> offset[:12]. */
8055 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8056 /* hw1[10:0] -> offset[11:1]. */
8057 offset |= (insn & 0x7ff) << 1;
8058 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8059 offset[24:22] already have the same value because of the
8060 sign extension above. */
8061 offset ^= ((~insn) & (1 << 13)) << 10;
8062 offset ^= ((~insn) & (1 << 11)) << 11;
8063
8064 if (insn & (1 << 14)) {
8065 /* Branch and link. */
8066 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8067 }
8068
8069 offset += s->pc;
8070 if (insn & (1 << 12)) {
8071 /* b/bl */
8072 gen_jmp(s, offset);
8073 } else {
8074 /* blx */
8075 offset &= ~(uint32_t)2;
8076 gen_bx_im(s, offset);
8077 }
8078 } else if (((insn >> 23) & 7) == 7) {
8079 /* Misc control */
8080 if (insn & (1 << 13))
8081 goto illegal_op;
8082
8083 if (insn & (1 << 26)) {
8084 /* Secure monitor call (v6Z) */
8085 goto illegal_op; /* not implemented. */
8086 } else {
8087 op = (insn >> 20) & 7;
8088 switch (op) {
8089 case 0: /* msr cpsr. */
8090 if (IS_M(env)) {
8091 tmp = load_reg(s, rn);
8092 addr = tcg_const_i32(insn & 0xff);
8093 gen_helper_v7m_msr(cpu_env, addr, tmp);
8094 tcg_temp_free_i32(addr);
8095 tcg_temp_free_i32(tmp);
8096 gen_lookup_tb(s);
8097 break;
8098 }
8099 /* fall through */
8100 case 1: /* msr spsr. */
8101 if (IS_M(env))
8102 goto illegal_op;
8103 tmp = load_reg(s, rn);
8104 if (gen_set_psr(s,
8105 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8106 op == 1, tmp))
8107 goto illegal_op;
8108 break;
8109 case 2: /* cps, nop-hint. */
8110 if (((insn >> 8) & 7) == 0) {
8111 gen_nop_hint(s, insn & 0xff);
8112 }
8113 /* Implemented as NOP in user mode. */
8114 if (IS_USER(s))
8115 break;
8116 offset = 0;
8117 imm = 0;
8118 if (insn & (1 << 10)) {
8119 if (insn & (1 << 7))
8120 offset |= CPSR_A;
8121 if (insn & (1 << 6))
8122 offset |= CPSR_I;
8123 if (insn & (1 << 5))
8124 offset |= CPSR_F;
8125 if (insn & (1 << 9))
8126 imm = CPSR_A | CPSR_I | CPSR_F;
8127 }
8128 if (insn & (1 << 8)) {
8129 offset |= 0x1f;
8130 imm |= (insn & 0x1f);
8131 }
8132 if (offset) {
8133 gen_set_psr_im(s, offset, 0, imm);
8134 }
8135 break;
8136 case 3: /* Special control operations. */
8137 ARCH(7);
8138 op = (insn >> 4) & 0xf;
8139 switch (op) {
8140 case 2: /* clrex */
8141 gen_clrex(s);
8142 break;
8143 case 4: /* dsb */
8144 case 5: /* dmb */
8145 case 6: /* isb */
8146 /* These execute as NOPs. */
8147 break;
8148 default:
8149 goto illegal_op;
8150 }
8151 break;
8152 case 4: /* bxj */
8153 /* Trivial implementation equivalent to bx. */
8154 tmp = load_reg(s, rn);
8155 gen_bx(s, tmp);
8156 break;
8157 case 5: /* Exception return. */
8158 if (IS_USER(s)) {
8159 goto illegal_op;
8160 }
8161 if (rn != 14 || rd != 15) {
8162 goto illegal_op;
8163 }
8164 tmp = load_reg(s, rn);
8165 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8166 gen_exception_return(s, tmp);
8167 break;
8168 case 6: /* mrs cpsr. */
8169 tmp = tcg_temp_new_i32();
8170 if (IS_M(env)) {
8171 addr = tcg_const_i32(insn & 0xff);
8172 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8173 tcg_temp_free_i32(addr);
8174 } else {
8175 gen_helper_cpsr_read(tmp);
8176 }
8177 store_reg(s, rd, tmp);
8178 break;
8179 case 7: /* mrs spsr. */
8180 /* Not accessible in user mode. */
8181 if (IS_USER(s) || IS_M(env))
8182 goto illegal_op;
8183 tmp = load_cpu_field(spsr);
8184 store_reg(s, rd, tmp);
8185 break;
8186 }
8187 }
8188 } else {
8189 /* Conditional branch. */
8190 op = (insn >> 22) & 0xf;
8191 /* Generate a conditional jump to next instruction. */
8192 s->condlabel = gen_new_label();
8193 gen_test_cc(op ^ 1, s->condlabel);
8194 s->condjmp = 1;
8195
8196 /* offset[11:1] = insn[10:0] */
8197 offset = (insn & 0x7ff) << 1;
8198 /* offset[17:12] = insn[21:16]. */
8199 offset |= (insn & 0x003f0000) >> 4;
8200 /* offset[31:20] = insn[26]. */
8201 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8202 /* offset[18] = insn[13]. */
8203 offset |= (insn & (1 << 13)) << 5;
8204 /* offset[19] = insn[11]. */
8205 offset |= (insn & (1 << 11)) << 8;
8206
8207 /* jump to the offset */
8208 gen_jmp(s, s->pc + offset);
8209 }
8210 } else {
8211 /* Data processing immediate. */
8212 if (insn & (1 << 25)) {
8213 if (insn & (1 << 24)) {
8214 if (insn & (1 << 20))
8215 goto illegal_op;
8216 /* Bitfield/Saturate. */
8217 op = (insn >> 21) & 7;
8218 imm = insn & 0x1f;
8219 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8220 if (rn == 15) {
8221 tmp = tcg_temp_new_i32();
8222 tcg_gen_movi_i32(tmp, 0);
8223 } else {
8224 tmp = load_reg(s, rn);
8225 }
8226 switch (op) {
8227 case 2: /* Signed bitfield extract. */
8228 imm++;
8229 if (shift + imm > 32)
8230 goto illegal_op;
8231 if (imm < 32)
8232 gen_sbfx(tmp, shift, imm);
8233 break;
8234 case 6: /* Unsigned bitfield extract. */
8235 imm++;
8236 if (shift + imm > 32)
8237 goto illegal_op;
8238 if (imm < 32)
8239 gen_ubfx(tmp, shift, (1u << imm) - 1);
8240 break;
8241 case 3: /* Bitfield insert/clear. */
8242 if (imm < shift)
8243 goto illegal_op;
8244 imm = imm + 1 - shift;
8245 if (imm != 32) {
8246 tmp2 = load_reg(s, rd);
8247 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8248 tcg_temp_free_i32(tmp2);
8249 }
8250 break;
8251 case 7:
8252 goto illegal_op;
8253 default: /* Saturate. */
8254 if (shift) {
8255 if (op & 1)
8256 tcg_gen_sari_i32(tmp, tmp, shift);
8257 else
8258 tcg_gen_shli_i32(tmp, tmp, shift);
8259 }
8260 tmp2 = tcg_const_i32(imm);
8261 if (op & 4) {
8262 /* Unsigned. */
8263 if ((op & 1) && shift == 0)
8264 gen_helper_usat16(tmp, tmp, tmp2);
8265 else
8266 gen_helper_usat(tmp, tmp, tmp2);
8267 } else {
8268 /* Signed. */
8269 if ((op & 1) && shift == 0)
8270 gen_helper_ssat16(tmp, tmp, tmp2);
8271 else
8272 gen_helper_ssat(tmp, tmp, tmp2);
8273 }
8274 tcg_temp_free_i32(tmp2);
8275 break;
8276 }
8277 store_reg(s, rd, tmp);
8278 } else {
8279 imm = ((insn & 0x04000000) >> 15)
8280 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8281 if (insn & (1 << 22)) {
8282 /* 16-bit immediate. */
8283 imm |= (insn >> 4) & 0xf000;
8284 if (insn & (1 << 23)) {
8285 /* movt */
8286 tmp = load_reg(s, rd);
8287 tcg_gen_ext16u_i32(tmp, tmp);
8288 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8289 } else {
8290 /* movw */
8291 tmp = tcg_temp_new_i32();
8292 tcg_gen_movi_i32(tmp, imm);
8293 }
8294 } else {
8295 /* Add/sub 12-bit immediate. */
8296 if (rn == 15) {
8297 offset = s->pc & ~(uint32_t)3;
8298 if (insn & (1 << 23))
8299 offset -= imm;
8300 else
8301 offset += imm;
8302 tmp = tcg_temp_new_i32();
8303 tcg_gen_movi_i32(tmp, offset);
8304 } else {
8305 tmp = load_reg(s, rn);
8306 if (insn & (1 << 23))
8307 tcg_gen_subi_i32(tmp, tmp, imm);
8308 else
8309 tcg_gen_addi_i32(tmp, tmp, imm);
8310 }
8311 }
8312 store_reg(s, rd, tmp);
8313 }
8314 } else {
8315 int shifter_out = 0;
8316 /* modified 12-bit immediate. */
8317 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8318 imm = (insn & 0xff);
8319 switch (shift) {
8320 case 0: /* XY */
8321 /* Nothing to do. */
8322 break;
8323 case 1: /* 00XY00XY */
8324 imm |= imm << 16;
8325 break;
8326 case 2: /* XY00XY00 */
8327 imm |= imm << 16;
8328 imm <<= 8;
8329 break;
8330 case 3: /* XYXYXYXY */
8331 imm |= imm << 16;
8332 imm |= imm << 8;
8333 break;
8334 default: /* Rotated constant. */
8335 shift = (shift << 1) | (imm >> 7);
8336 imm |= 0x80;
8337 imm = imm << (32 - shift);
8338 shifter_out = 1;
8339 break;
8340 }
8341 tmp2 = tcg_temp_new_i32();
8342 tcg_gen_movi_i32(tmp2, imm);
8343 rn = (insn >> 16) & 0xf;
8344 if (rn == 15) {
8345 tmp = tcg_temp_new_i32();
8346 tcg_gen_movi_i32(tmp, 0);
8347 } else {
8348 tmp = load_reg(s, rn);
8349 }
8350 op = (insn >> 21) & 0xf;
8351 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8352 shifter_out, tmp, tmp2))
8353 goto illegal_op;
8354 tcg_temp_free_i32(tmp2);
8355 rd = (insn >> 8) & 0xf;
8356 if (rd != 15) {
8357 store_reg(s, rd, tmp);
8358 } else {
8359 tcg_temp_free_i32(tmp);
8360 }
8361 }
8362 }
8363 break;
8364 case 12: /* Load/store single data item. */
8365 {
8366 int postinc = 0;
8367 int writeback = 0;
8368 int user;
8369 if ((insn & 0x01100000) == 0x01000000) {
8370 if (disas_neon_ls_insn(env, s, insn))
8371 goto illegal_op;
8372 break;
8373 }
8374 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8375 if (rs == 15) {
8376 if (!(insn & (1 << 20))) {
8377 goto illegal_op;
8378 }
8379 if (op != 2) {
8380 /* Byte or halfword load space with dest == r15 : memory hints.
8381 * Catch them early so we don't emit pointless addressing code.
8382 * This space is a mix of:
8383 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8384 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8385 * cores)
8386 * unallocated hints, which must be treated as NOPs
8387 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8388 * which is easiest for the decoding logic
8389 * Some space which must UNDEF
8390 */
8391 int op1 = (insn >> 23) & 3;
8392 int op2 = (insn >> 6) & 0x3f;
8393 if (op & 2) {
8394 goto illegal_op;
8395 }
8396 if (rn == 15) {
8397 /* UNPREDICTABLE or unallocated hint */
8398 return 0;
8399 }
8400 if (op1 & 1) {
8401 return 0; /* PLD* or unallocated hint */
8402 }
8403 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8404 return 0; /* PLD* or unallocated hint */
8405 }
8406 /* UNDEF space, or an UNPREDICTABLE */
8407 return 1;
8408 }
8409 }
8410 user = IS_USER(s);
8411 if (rn == 15) {
8412 addr = tcg_temp_new_i32();
8413 /* PC relative. */
8414 /* s->pc has already been incremented by 4. */
8415 imm = s->pc & 0xfffffffc;
8416 if (insn & (1 << 23))
8417 imm += insn & 0xfff;
8418 else
8419 imm -= insn & 0xfff;
8420 tcg_gen_movi_i32(addr, imm);
8421 } else {
8422 addr = load_reg(s, rn);
8423 if (insn & (1 << 23)) {
8424 /* Positive offset. */
8425 imm = insn & 0xfff;
8426 tcg_gen_addi_i32(addr, addr, imm);
8427 } else {
8428 imm = insn & 0xff;
8429 switch ((insn >> 8) & 0xf) {
8430 case 0x0: /* Shifted Register. */
8431 shift = (insn >> 4) & 0xf;
8432 if (shift > 3) {
8433 tcg_temp_free_i32(addr);
8434 goto illegal_op;
8435 }
8436 tmp = load_reg(s, rm);
8437 if (shift)
8438 tcg_gen_shli_i32(tmp, tmp, shift);
8439 tcg_gen_add_i32(addr, addr, tmp);
8440 tcg_temp_free_i32(tmp);
8441 break;
8442 case 0xc: /* Negative offset. */
8443 tcg_gen_addi_i32(addr, addr, -imm);
8444 break;
8445 case 0xe: /* User privilege. */
8446 tcg_gen_addi_i32(addr, addr, imm);
8447 user = 1;
8448 break;
8449 case 0x9: /* Post-decrement. */
8450 imm = -imm;
8451 /* Fall through. */
8452 case 0xb: /* Post-increment. */
8453 postinc = 1;
8454 writeback = 1;
8455 break;
8456 case 0xd: /* Pre-decrement. */
8457 imm = -imm;
8458 /* Fall through. */
8459 case 0xf: /* Pre-increment. */
8460 tcg_gen_addi_i32(addr, addr, imm);
8461 writeback = 1;
8462 break;
8463 default:
8464 tcg_temp_free_i32(addr);
8465 goto illegal_op;
8466 }
8467 }
8468 }
8469 if (insn & (1 << 20)) {
8470 /* Load. */
8471 switch (op) {
8472 case 0: tmp = gen_ld8u(addr, user); break;
8473 case 4: tmp = gen_ld8s(addr, user); break;
8474 case 1: tmp = gen_ld16u(addr, user); break;
8475 case 5: tmp = gen_ld16s(addr, user); break;
8476 case 2: tmp = gen_ld32(addr, user); break;
8477 default:
8478 tcg_temp_free_i32(addr);
8479 goto illegal_op;
8480 }
8481 if (rs == 15) {
8482 gen_bx(s, tmp);
8483 } else {
8484 store_reg(s, rs, tmp);
8485 }
8486 } else {
8487 /* Store. */
8488 tmp = load_reg(s, rs);
8489 switch (op) {
8490 case 0: gen_st8(tmp, addr, user); break;
8491 case 1: gen_st16(tmp, addr, user); break;
8492 case 2: gen_st32(tmp, addr, user); break;
8493 default:
8494 tcg_temp_free_i32(addr);
8495 goto illegal_op;
8496 }
8497 }
8498 if (postinc)
8499 tcg_gen_addi_i32(addr, addr, imm);
8500 if (writeback) {
8501 store_reg(s, rn, addr);
8502 } else {
8503 tcg_temp_free_i32(addr);
8504 }
8505 }
8506 break;
8507 default:
8508 goto illegal_op;
8509 }
8510 return 0;
8511 illegal_op:
8512 return 1;
8513 }
8514
8515 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8516 {
8517 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8518 int32_t offset;
8519 int i;
8520 TCGv tmp;
8521 TCGv tmp2;
8522 TCGv addr;
8523
8524 if (s->condexec_mask) {
8525 cond = s->condexec_cond;
8526 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8527 s->condlabel = gen_new_label();
8528 gen_test_cc(cond ^ 1, s->condlabel);
8529 s->condjmp = 1;
8530 }
8531 }
8532
8533 insn = lduw_code(s->pc);
8534 s->pc += 2;
8535
8536 switch (insn >> 12) {
8537 case 0: case 1:
8538
8539 rd = insn & 7;
8540 op = (insn >> 11) & 3;
8541 if (op == 3) {
8542 /* add/subtract */
8543 rn = (insn >> 3) & 7;
8544 tmp = load_reg(s, rn);
8545 if (insn & (1 << 10)) {
8546 /* immediate */
8547 tmp2 = tcg_temp_new_i32();
8548 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8549 } else {
8550 /* reg */
8551 rm = (insn >> 6) & 7;
8552 tmp2 = load_reg(s, rm);
8553 }
8554 if (insn & (1 << 9)) {
8555 if (s->condexec_mask)
8556 tcg_gen_sub_i32(tmp, tmp, tmp2);
8557 else
8558 gen_helper_sub_cc(tmp, tmp, tmp2);
8559 } else {
8560 if (s->condexec_mask)
8561 tcg_gen_add_i32(tmp, tmp, tmp2);
8562 else
8563 gen_helper_add_cc(tmp, tmp, tmp2);
8564 }
8565 tcg_temp_free_i32(tmp2);
8566 store_reg(s, rd, tmp);
8567 } else {
8568 /* shift immediate */
8569 rm = (insn >> 3) & 7;
8570 shift = (insn >> 6) & 0x1f;
8571 tmp = load_reg(s, rm);
8572 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8573 if (!s->condexec_mask)
8574 gen_logic_CC(tmp);
8575 store_reg(s, rd, tmp);
8576 }
8577 break;
8578 case 2: case 3:
8579 /* arithmetic large immediate */
8580 op = (insn >> 11) & 3;
8581 rd = (insn >> 8) & 0x7;
8582 if (op == 0) { /* mov */
8583 tmp = tcg_temp_new_i32();
8584 tcg_gen_movi_i32(tmp, insn & 0xff);
8585 if (!s->condexec_mask)
8586 gen_logic_CC(tmp);
8587 store_reg(s, rd, tmp);
8588 } else {
8589 tmp = load_reg(s, rd);
8590 tmp2 = tcg_temp_new_i32();
8591 tcg_gen_movi_i32(tmp2, insn & 0xff);
8592 switch (op) {
8593 case 1: /* cmp */
8594 gen_helper_sub_cc(tmp, tmp, tmp2);
8595 tcg_temp_free_i32(tmp);
8596 tcg_temp_free_i32(tmp2);
8597 break;
8598 case 2: /* add */
8599 if (s->condexec_mask)
8600 tcg_gen_add_i32(tmp, tmp, tmp2);
8601 else
8602 gen_helper_add_cc(tmp, tmp, tmp2);
8603 tcg_temp_free_i32(tmp2);
8604 store_reg(s, rd, tmp);
8605 break;
8606 case 3: /* sub */
8607 if (s->condexec_mask)
8608 tcg_gen_sub_i32(tmp, tmp, tmp2);
8609 else
8610 gen_helper_sub_cc(tmp, tmp, tmp2);
8611 tcg_temp_free_i32(tmp2);
8612 store_reg(s, rd, tmp);
8613 break;
8614 }
8615 }
8616 break;
8617 case 4:
8618 if (insn & (1 << 11)) {
8619 rd = (insn >> 8) & 7;
8620 /* load pc-relative. Bit 1 of PC is ignored. */
8621 val = s->pc + 2 + ((insn & 0xff) * 4);
8622 val &= ~(uint32_t)2;
8623 addr = tcg_temp_new_i32();
8624 tcg_gen_movi_i32(addr, val);
8625 tmp = gen_ld32(addr, IS_USER(s));
8626 tcg_temp_free_i32(addr);
8627 store_reg(s, rd, tmp);
8628 break;
8629 }
8630 if (insn & (1 << 10)) {
8631 /* data processing extended or blx */
8632 rd = (insn & 7) | ((insn >> 4) & 8);
8633 rm = (insn >> 3) & 0xf;
8634 op = (insn >> 8) & 3;
8635 switch (op) {
8636 case 0: /* add */
8637 tmp = load_reg(s, rd);
8638 tmp2 = load_reg(s, rm);
8639 tcg_gen_add_i32(tmp, tmp, tmp2);
8640 tcg_temp_free_i32(tmp2);
8641 store_reg(s, rd, tmp);
8642 break;
8643 case 1: /* cmp */
8644 tmp = load_reg(s, rd);
8645 tmp2 = load_reg(s, rm);
8646 gen_helper_sub_cc(tmp, tmp, tmp2);
8647 tcg_temp_free_i32(tmp2);
8648 tcg_temp_free_i32(tmp);
8649 break;
8650 case 2: /* mov/cpy */
8651 tmp = load_reg(s, rm);
8652 store_reg(s, rd, tmp);
8653 break;
8654 case 3:/* branch [and link] exchange thumb register */
8655 tmp = load_reg(s, rm);
8656 if (insn & (1 << 7)) {
8657 val = (uint32_t)s->pc | 1;
8658 tmp2 = tcg_temp_new_i32();
8659 tcg_gen_movi_i32(tmp2, val);
8660 store_reg(s, 14, tmp2);
8661 }
8662 gen_bx(s, tmp);
8663 break;
8664 }
8665 break;
8666 }
8667
8668 /* data processing register */
8669 rd = insn & 7;
8670 rm = (insn >> 3) & 7;
8671 op = (insn >> 6) & 0xf;
8672 if (op == 2 || op == 3 || op == 4 || op == 7) {
8673 /* the shift/rotate ops want the operands backwards */
8674 val = rm;
8675 rm = rd;
8676 rd = val;
8677 val = 1;
8678 } else {
8679 val = 0;
8680 }
8681
8682 if (op == 9) { /* neg */
8683 tmp = tcg_temp_new_i32();
8684 tcg_gen_movi_i32(tmp, 0);
8685 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8686 tmp = load_reg(s, rd);
8687 } else {
8688 TCGV_UNUSED(tmp);
8689 }
8690
8691 tmp2 = load_reg(s, rm);
8692 switch (op) {
8693 case 0x0: /* and */
8694 tcg_gen_and_i32(tmp, tmp, tmp2);
8695 if (!s->condexec_mask)
8696 gen_logic_CC(tmp);
8697 break;
8698 case 0x1: /* eor */
8699 tcg_gen_xor_i32(tmp, tmp, tmp2);
8700 if (!s->condexec_mask)
8701 gen_logic_CC(tmp);
8702 break;
8703 case 0x2: /* lsl */
8704 if (s->condexec_mask) {
8705 gen_helper_shl(tmp2, tmp2, tmp);
8706 } else {
8707 gen_helper_shl_cc(tmp2, tmp2, tmp);
8708 gen_logic_CC(tmp2);
8709 }
8710 break;
8711 case 0x3: /* lsr */
8712 if (s->condexec_mask) {
8713 gen_helper_shr(tmp2, tmp2, tmp);
8714 } else {
8715 gen_helper_shr_cc(tmp2, tmp2, tmp);
8716 gen_logic_CC(tmp2);
8717 }
8718 break;
8719 case 0x4: /* asr */
8720 if (s->condexec_mask) {
8721 gen_helper_sar(tmp2, tmp2, tmp);
8722 } else {
8723 gen_helper_sar_cc(tmp2, tmp2, tmp);
8724 gen_logic_CC(tmp2);
8725 }
8726 break;
8727 case 0x5: /* adc */
8728 if (s->condexec_mask)
8729 gen_adc(tmp, tmp2);
8730 else
8731 gen_helper_adc_cc(tmp, tmp, tmp2);
8732 break;
8733 case 0x6: /* sbc */
8734 if (s->condexec_mask)
8735 gen_sub_carry(tmp, tmp, tmp2);
8736 else
8737 gen_helper_sbc_cc(tmp, tmp, tmp2);
8738 break;
8739 case 0x7: /* ror */
8740 if (s->condexec_mask) {
8741 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8742 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8743 } else {
8744 gen_helper_ror_cc(tmp2, tmp2, tmp);
8745 gen_logic_CC(tmp2);
8746 }
8747 break;
8748 case 0x8: /* tst */
8749 tcg_gen_and_i32(tmp, tmp, tmp2);
8750 gen_logic_CC(tmp);
8751 rd = 16;
8752 break;
8753 case 0x9: /* neg */
8754 if (s->condexec_mask)
8755 tcg_gen_neg_i32(tmp, tmp2);
8756 else
8757 gen_helper_sub_cc(tmp, tmp, tmp2);
8758 break;
8759 case 0xa: /* cmp */
8760 gen_helper_sub_cc(tmp, tmp, tmp2);
8761 rd = 16;
8762 break;
8763 case 0xb: /* cmn */
8764 gen_helper_add_cc(tmp, tmp, tmp2);
8765 rd = 16;
8766 break;
8767 case 0xc: /* orr */
8768 tcg_gen_or_i32(tmp, tmp, tmp2);
8769 if (!s->condexec_mask)
8770 gen_logic_CC(tmp);
8771 break;
8772 case 0xd: /* mul */
8773 tcg_gen_mul_i32(tmp, tmp, tmp2);
8774 if (!s->condexec_mask)
8775 gen_logic_CC(tmp);
8776 break;
8777 case 0xe: /* bic */
8778 tcg_gen_andc_i32(tmp, tmp, tmp2);
8779 if (!s->condexec_mask)
8780 gen_logic_CC(tmp);
8781 break;
8782 case 0xf: /* mvn */
8783 tcg_gen_not_i32(tmp2, tmp2);
8784 if (!s->condexec_mask)
8785 gen_logic_CC(tmp2);
8786 val = 1;
8787 rm = rd;
8788 break;
8789 }
8790 if (rd != 16) {
8791 if (val) {
8792 store_reg(s, rm, tmp2);
8793 if (op != 0xf)
8794 tcg_temp_free_i32(tmp);
8795 } else {
8796 store_reg(s, rd, tmp);
8797 tcg_temp_free_i32(tmp2);
8798 }
8799 } else {
8800 tcg_temp_free_i32(tmp);
8801 tcg_temp_free_i32(tmp2);
8802 }
8803 break;
8804
8805 case 5:
8806 /* load/store register offset. */
8807 rd = insn & 7;
8808 rn = (insn >> 3) & 7;
8809 rm = (insn >> 6) & 7;
8810 op = (insn >> 9) & 7;
8811 addr = load_reg(s, rn);
8812 tmp = load_reg(s, rm);
8813 tcg_gen_add_i32(addr, addr, tmp);
8814 tcg_temp_free_i32(tmp);
8815
8816 if (op < 3) /* store */
8817 tmp = load_reg(s, rd);
8818
8819 switch (op) {
8820 case 0: /* str */
8821 gen_st32(tmp, addr, IS_USER(s));
8822 break;
8823 case 1: /* strh */
8824 gen_st16(tmp, addr, IS_USER(s));
8825 break;
8826 case 2: /* strb */
8827 gen_st8(tmp, addr, IS_USER(s));
8828 break;
8829 case 3: /* ldrsb */
8830 tmp = gen_ld8s(addr, IS_USER(s));
8831 break;
8832 case 4: /* ldr */
8833 tmp = gen_ld32(addr, IS_USER(s));
8834 break;
8835 case 5: /* ldrh */
8836 tmp = gen_ld16u(addr, IS_USER(s));
8837 break;
8838 case 6: /* ldrb */
8839 tmp = gen_ld8u(addr, IS_USER(s));
8840 break;
8841 case 7: /* ldrsh */
8842 tmp = gen_ld16s(addr, IS_USER(s));
8843 break;
8844 }
8845 if (op >= 3) /* load */
8846 store_reg(s, rd, tmp);
8847 tcg_temp_free_i32(addr);
8848 break;
8849
8850 case 6:
8851 /* load/store word immediate offset */
8852 rd = insn & 7;
8853 rn = (insn >> 3) & 7;
8854 addr = load_reg(s, rn);
8855 val = (insn >> 4) & 0x7c;
8856 tcg_gen_addi_i32(addr, addr, val);
8857
8858 if (insn & (1 << 11)) {
8859 /* load */
8860 tmp = gen_ld32(addr, IS_USER(s));
8861 store_reg(s, rd, tmp);
8862 } else {
8863 /* store */
8864 tmp = load_reg(s, rd);
8865 gen_st32(tmp, addr, IS_USER(s));
8866 }
8867 tcg_temp_free_i32(addr);
8868 break;
8869
8870 case 7:
8871 /* load/store byte immediate offset */
8872 rd = insn & 7;
8873 rn = (insn >> 3) & 7;
8874 addr = load_reg(s, rn);
8875 val = (insn >> 6) & 0x1f;
8876 tcg_gen_addi_i32(addr, addr, val);
8877
8878 if (insn & (1 << 11)) {
8879 /* load */
8880 tmp = gen_ld8u(addr, IS_USER(s));
8881 store_reg(s, rd, tmp);
8882 } else {
8883 /* store */
8884 tmp = load_reg(s, rd);
8885 gen_st8(tmp, addr, IS_USER(s));
8886 }
8887 tcg_temp_free_i32(addr);
8888 break;
8889
8890 case 8:
8891 /* load/store halfword immediate offset */
8892 rd = insn & 7;
8893 rn = (insn >> 3) & 7;
8894 addr = load_reg(s, rn);
8895 val = (insn >> 5) & 0x3e;
8896 tcg_gen_addi_i32(addr, addr, val);
8897
8898 if (insn & (1 << 11)) {
8899 /* load */
8900 tmp = gen_ld16u(addr, IS_USER(s));
8901 store_reg(s, rd, tmp);
8902 } else {
8903 /* store */
8904 tmp = load_reg(s, rd);
8905 gen_st16(tmp, addr, IS_USER(s));
8906 }
8907 tcg_temp_free_i32(addr);
8908 break;
8909
8910 case 9:
8911 /* load/store from stack */
8912 rd = (insn >> 8) & 7;
8913 addr = load_reg(s, 13);
8914 val = (insn & 0xff) * 4;
8915 tcg_gen_addi_i32(addr, addr, val);
8916
8917 if (insn & (1 << 11)) {
8918 /* load */
8919 tmp = gen_ld32(addr, IS_USER(s));
8920 store_reg(s, rd, tmp);
8921 } else {
8922 /* store */
8923 tmp = load_reg(s, rd);
8924 gen_st32(tmp, addr, IS_USER(s));
8925 }
8926 tcg_temp_free_i32(addr);
8927 break;
8928
8929 case 10:
8930 /* add to high reg */
8931 rd = (insn >> 8) & 7;
8932 if (insn & (1 << 11)) {
8933 /* SP */
8934 tmp = load_reg(s, 13);
8935 } else {
8936 /* PC. bit 1 is ignored. */
8937 tmp = tcg_temp_new_i32();
8938 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8939 }
8940 val = (insn & 0xff) * 4;
8941 tcg_gen_addi_i32(tmp, tmp, val);
8942 store_reg(s, rd, tmp);
8943 break;
8944
8945 case 11:
8946 /* misc */
8947 op = (insn >> 8) & 0xf;
8948 switch (op) {
8949 case 0:
8950 /* adjust stack pointer */
8951 tmp = load_reg(s, 13);
8952 val = (insn & 0x7f) * 4;
8953 if (insn & (1 << 7))
8954 val = -(int32_t)val;
8955 tcg_gen_addi_i32(tmp, tmp, val);
8956 store_reg(s, 13, tmp);
8957 break;
8958
8959 case 2: /* sign/zero extend. */
8960 ARCH(6);
8961 rd = insn & 7;
8962 rm = (insn >> 3) & 7;
8963 tmp = load_reg(s, rm);
8964 switch ((insn >> 6) & 3) {
8965 case 0: gen_sxth(tmp); break;
8966 case 1: gen_sxtb(tmp); break;
8967 case 2: gen_uxth(tmp); break;
8968 case 3: gen_uxtb(tmp); break;
8969 }
8970 store_reg(s, rd, tmp);
8971 break;
8972 case 4: case 5: case 0xc: case 0xd:
8973 /* push/pop */
8974 addr = load_reg(s, 13);
8975 if (insn & (1 << 8))
8976 offset = 4;
8977 else
8978 offset = 0;
8979 for (i = 0; i < 8; i++) {
8980 if (insn & (1 << i))
8981 offset += 4;
8982 }
8983 if ((insn & (1 << 11)) == 0) {
8984 tcg_gen_addi_i32(addr, addr, -offset);
8985 }
8986 for (i = 0; i < 8; i++) {
8987 if (insn & (1 << i)) {
8988 if (insn & (1 << 11)) {
8989 /* pop */
8990 tmp = gen_ld32(addr, IS_USER(s));
8991 store_reg(s, i, tmp);
8992 } else {
8993 /* push */
8994 tmp = load_reg(s, i);
8995 gen_st32(tmp, addr, IS_USER(s));
8996 }
8997 /* advance to the next address. */
8998 tcg_gen_addi_i32(addr, addr, 4);
8999 }
9000 }
9001 TCGV_UNUSED(tmp);
9002 if (insn & (1 << 8)) {
9003 if (insn & (1 << 11)) {
9004 /* pop pc */
9005 tmp = gen_ld32(addr, IS_USER(s));
9006 /* don't set the pc until the rest of the instruction
9007 has completed */
9008 } else {
9009 /* push lr */
9010 tmp = load_reg(s, 14);
9011 gen_st32(tmp, addr, IS_USER(s));
9012 }
9013 tcg_gen_addi_i32(addr, addr, 4);
9014 }
9015 if ((insn & (1 << 11)) == 0) {
9016 tcg_gen_addi_i32(addr, addr, -offset);
9017 }
9018 /* write back the new stack pointer */
9019 store_reg(s, 13, addr);
9020 /* set the new PC value */
9021 if ((insn & 0x0900) == 0x0900)
9022 gen_bx(s, tmp);
9023 break;
9024
9025 case 1: case 3: case 9: case 11: /* czb */
9026 rm = insn & 7;
9027 tmp = load_reg(s, rm);
9028 s->condlabel = gen_new_label();
9029 s->condjmp = 1;
9030 if (insn & (1 << 11))
9031 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9032 else
9033 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9034 tcg_temp_free_i32(tmp);
9035 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9036 val = (uint32_t)s->pc + 2;
9037 val += offset;
9038 gen_jmp(s, val);
9039 break;
9040
9041 case 15: /* IT, nop-hint. */
9042 if ((insn & 0xf) == 0) {
9043 gen_nop_hint(s, (insn >> 4) & 0xf);
9044 break;
9045 }
9046 /* If Then. */
9047 s->condexec_cond = (insn >> 4) & 0xe;
9048 s->condexec_mask = insn & 0x1f;
9049 /* No actual code generated for this insn, just setup state. */
9050 break;
9051
9052 case 0xe: /* bkpt */
9053 gen_exception_insn(s, 2, EXCP_BKPT);
9054 break;
9055
9056 case 0xa: /* rev */
9057 ARCH(6);
9058 rn = (insn >> 3) & 0x7;
9059 rd = insn & 0x7;
9060 tmp = load_reg(s, rn);
9061 switch ((insn >> 6) & 3) {
9062 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9063 case 1: gen_rev16(tmp); break;
9064 case 3: gen_revsh(tmp); break;
9065 default: goto illegal_op;
9066 }
9067 store_reg(s, rd, tmp);
9068 break;
9069
9070 case 6: /* cps */
9071 ARCH(6);
9072 if (IS_USER(s))
9073 break;
9074 if (IS_M(env)) {
9075 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9076 /* PRIMASK */
9077 if (insn & 1) {
9078 addr = tcg_const_i32(16);
9079 gen_helper_v7m_msr(cpu_env, addr, tmp);
9080 tcg_temp_free_i32(addr);
9081 }
9082 /* FAULTMASK */
9083 if (insn & 2) {
9084 addr = tcg_const_i32(17);
9085 gen_helper_v7m_msr(cpu_env, addr, tmp);
9086 tcg_temp_free_i32(addr);
9087 }
9088 tcg_temp_free_i32(tmp);
9089 gen_lookup_tb(s);
9090 } else {
9091 if (insn & (1 << 4))
9092 shift = CPSR_A | CPSR_I | CPSR_F;
9093 else
9094 shift = 0;
9095 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9096 }
9097 break;
9098
9099 default:
9100 goto undef;
9101 }
9102 break;
9103
9104 case 12:
9105 /* load/store multiple */
9106 rn = (insn >> 8) & 0x7;
9107 addr = load_reg(s, rn);
9108 for (i = 0; i < 8; i++) {
9109 if (insn & (1 << i)) {
9110 if (insn & (1 << 11)) {
9111 /* load */
9112 tmp = gen_ld32(addr, IS_USER(s));
9113 store_reg(s, i, tmp);
9114 } else {
9115 /* store */
9116 tmp = load_reg(s, i);
9117 gen_st32(tmp, addr, IS_USER(s));
9118 }
9119 /* advance to the next address */
9120 tcg_gen_addi_i32(addr, addr, 4);
9121 }
9122 }
9123 /* Base register writeback. */
9124 if ((insn & (1 << rn)) == 0) {
9125 store_reg(s, rn, addr);
9126 } else {
9127 tcg_temp_free_i32(addr);
9128 }
9129 break;
9130
9131 case 13:
9132 /* conditional branch or swi */
9133 cond = (insn >> 8) & 0xf;
9134 if (cond == 0xe)
9135 goto undef;
9136
9137 if (cond == 0xf) {
9138 /* swi */
9139 gen_set_pc_im(s->pc);
9140 s->is_jmp = DISAS_SWI;
9141 break;
9142 }
9143 /* generate a conditional jump to next instruction */
9144 s->condlabel = gen_new_label();
9145 gen_test_cc(cond ^ 1, s->condlabel);
9146 s->condjmp = 1;
9147
9148 /* jump to the offset */
9149 val = (uint32_t)s->pc + 2;
9150 offset = ((int32_t)insn << 24) >> 24;
9151 val += offset << 1;
9152 gen_jmp(s, val);
9153 break;
9154
9155 case 14:
9156 if (insn & (1 << 11)) {
9157 if (disas_thumb2_insn(env, s, insn))
9158 goto undef32;
9159 break;
9160 }
9161 /* unconditional branch */
9162 val = (uint32_t)s->pc;
9163 offset = ((int32_t)insn << 21) >> 21;
9164 val += (offset << 1) + 2;
9165 gen_jmp(s, val);
9166 break;
9167
9168 case 15:
9169 if (disas_thumb2_insn(env, s, insn))
9170 goto undef32;
9171 break;
9172 }
9173 return;
9174 undef32:
9175 gen_exception_insn(s, 4, EXCP_UDEF);
9176 return;
9177 illegal_op:
9178 undef:
9179 gen_exception_insn(s, 2, EXCP_UDEF);
9180 }
9181
9182 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9183 basic block 'tb'. If search_pc is TRUE, also generate PC
9184 information for each intermediate instruction. */
9185 static inline void gen_intermediate_code_internal(CPUState *env,
9186 TranslationBlock *tb,
9187 int search_pc)
9188 {
9189 DisasContext dc1, *dc = &dc1;
9190 CPUBreakpoint *bp;
9191 uint16_t *gen_opc_end;
9192 int j, lj;
9193 target_ulong pc_start;
9194 uint32_t next_page_start;
9195 int num_insns;
9196 int max_insns;
9197
9198 /* generate intermediate code */
9199 pc_start = tb->pc;
9200
9201 dc->tb = tb;
9202
9203 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9204
9205 dc->is_jmp = DISAS_NEXT;
9206 dc->pc = pc_start;
9207 dc->singlestep_enabled = env->singlestep_enabled;
9208 dc->condjmp = 0;
9209 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9210 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9211 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9212 #if !defined(CONFIG_USER_ONLY)
9213 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9214 #endif
9215 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9216 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9217 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9218 cpu_F0s = tcg_temp_new_i32();
9219 cpu_F1s = tcg_temp_new_i32();
9220 cpu_F0d = tcg_temp_new_i64();
9221 cpu_F1d = tcg_temp_new_i64();
9222 cpu_V0 = cpu_F0d;
9223 cpu_V1 = cpu_F1d;
9224 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9225 cpu_M0 = tcg_temp_new_i64();
9226 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9227 lj = -1;
9228 num_insns = 0;
9229 max_insns = tb->cflags & CF_COUNT_MASK;
9230 if (max_insns == 0)
9231 max_insns = CF_COUNT_MASK;
9232
9233 gen_icount_start();
9234
9235 tcg_clear_temp_count();
9236
9237 /* A note on handling of the condexec (IT) bits:
9238 *
9239 * We want to avoid the overhead of having to write the updated condexec
9240 * bits back to the CPUState for every instruction in an IT block. So:
9241 * (1) if the condexec bits are not already zero then we write
9242 * zero back into the CPUState now. This avoids complications trying
9243 * to do it at the end of the block. (For example if we don't do this
9244 * it's hard to identify whether we can safely skip writing condexec
9245 * at the end of the TB, which we definitely want to do for the case
9246 * where a TB doesn't do anything with the IT state at all.)
9247 * (2) if we are going to leave the TB then we call gen_set_condexec()
9248 * which will write the correct value into CPUState if zero is wrong.
9249 * This is done both for leaving the TB at the end, and for leaving
9250 * it because of an exception we know will happen, which is done in
9251 * gen_exception_insn(). The latter is necessary because we need to
9252 * leave the TB with the PC/IT state just prior to execution of the
9253 * instruction which caused the exception.
9254 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9255 * then the CPUState will be wrong and we need to reset it.
9256 * This is handled in the same way as restoration of the
9257 * PC in these situations: we will be called again with search_pc=1
9258 * and generate a mapping of the condexec bits for each PC in
9259 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9260 * the condexec bits.
9261 *
9262 * Note that there are no instructions which can read the condexec
9263 * bits, and none which can write non-static values to them, so
9264 * we don't need to care about whether CPUState is correct in the
9265 * middle of a TB.
9266 */
9267
9268 /* Reset the conditional execution bits immediately. This avoids
9269 complications trying to do it at the end of the block. */
9270 if (dc->condexec_mask || dc->condexec_cond)
9271 {
9272 TCGv tmp = tcg_temp_new_i32();
9273 tcg_gen_movi_i32(tmp, 0);
9274 store_cpu_field(tmp, condexec_bits);
9275 }
9276 do {
9277 #ifdef CONFIG_USER_ONLY
9278 /* Intercept jump to the magic kernel page. */
9279 if (dc->pc >= 0xffff0000) {
9280 /* We always get here via a jump, so know we are not in a
9281 conditional execution block. */
9282 gen_exception(EXCP_KERNEL_TRAP);
9283 dc->is_jmp = DISAS_UPDATE;
9284 break;
9285 }
9286 #else
9287 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9288 /* We always get here via a jump, so know we are not in a
9289 conditional execution block. */
9290 gen_exception(EXCP_EXCEPTION_EXIT);
9291 dc->is_jmp = DISAS_UPDATE;
9292 break;
9293 }
9294 #endif
9295
9296 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9297 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9298 if (bp->pc == dc->pc) {
9299 gen_exception_insn(dc, 0, EXCP_DEBUG);
9300 /* Advance PC so that clearing the breakpoint will
9301 invalidate this TB. */
9302 dc->pc += 2;
9303 goto done_generating;
9304 break;
9305 }
9306 }
9307 }
9308 if (search_pc) {
9309 j = gen_opc_ptr - gen_opc_buf;
9310 if (lj < j) {
9311 lj++;
9312 while (lj < j)
9313 gen_opc_instr_start[lj++] = 0;
9314 }
9315 gen_opc_pc[lj] = dc->pc;
9316 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9317 gen_opc_instr_start[lj] = 1;
9318 gen_opc_icount[lj] = num_insns;
9319 }
9320
9321 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9322 gen_io_start();
9323
9324 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9325 tcg_gen_debug_insn_start(dc->pc);
9326 }
9327
9328 if (dc->thumb) {
9329 disas_thumb_insn(env, dc);
9330 if (dc->condexec_mask) {
9331 dc->condexec_cond = (dc->condexec_cond & 0xe)
9332 | ((dc->condexec_mask >> 4) & 1);
9333 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9334 if (dc->condexec_mask == 0) {
9335 dc->condexec_cond = 0;
9336 }
9337 }
9338 } else {
9339 disas_arm_insn(env, dc);
9340 }
9341
9342 if (dc->condjmp && !dc->is_jmp) {
9343 gen_set_label(dc->condlabel);
9344 dc->condjmp = 0;
9345 }
9346
9347 if (tcg_check_temp_count()) {
9348 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9349 }
9350
9351 /* Translation stops when a conditional branch is encountered.
9352 * Otherwise the subsequent code could get translated several times.
9353 * Also stop translation when a page boundary is reached. This
9354 * ensures prefetch aborts occur at the right place. */
9355 num_insns ++;
9356 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9357 !env->singlestep_enabled &&
9358 !singlestep &&
9359 dc->pc < next_page_start &&
9360 num_insns < max_insns);
9361
9362 if (tb->cflags & CF_LAST_IO) {
9363 if (dc->condjmp) {
9364 /* FIXME: This can theoretically happen with self-modifying
9365 code. */
9366 cpu_abort(env, "IO on conditional branch instruction");
9367 }
9368 gen_io_end();
9369 }
9370
9371 /* At this stage dc->condjmp will only be set when the skipped
9372 instruction was a conditional branch or trap, and the PC has
9373 already been written. */
9374 if (unlikely(env->singlestep_enabled)) {
9375 /* Make sure the pc is updated, and raise a debug exception. */
9376 if (dc->condjmp) {
9377 gen_set_condexec(dc);
9378 if (dc->is_jmp == DISAS_SWI) {
9379 gen_exception(EXCP_SWI);
9380 } else {
9381 gen_exception(EXCP_DEBUG);
9382 }
9383 gen_set_label(dc->condlabel);
9384 }
9385 if (dc->condjmp || !dc->is_jmp) {
9386 gen_set_pc_im(dc->pc);
9387 dc->condjmp = 0;
9388 }
9389 gen_set_condexec(dc);
9390 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9391 gen_exception(EXCP_SWI);
9392 } else {
9393 /* FIXME: Single stepping a WFI insn will not halt
9394 the CPU. */
9395 gen_exception(EXCP_DEBUG);
9396 }
9397 } else {
9398 /* While branches must always occur at the end of an IT block,
9399 there are a few other things that can cause us to terminate
9400 the TB in the middel of an IT block:
9401 - Exception generating instructions (bkpt, swi, undefined).
9402 - Page boundaries.
9403 - Hardware watchpoints.
9404 Hardware breakpoints have already been handled and skip this code.
9405 */
9406 gen_set_condexec(dc);
9407 switch(dc->is_jmp) {
9408 case DISAS_NEXT:
9409 gen_goto_tb(dc, 1, dc->pc);
9410 break;
9411 default:
9412 case DISAS_JUMP:
9413 case DISAS_UPDATE:
9414 /* indicate that the hash table must be used to find the next TB */
9415 tcg_gen_exit_tb(0);
9416 break;
9417 case DISAS_TB_JUMP:
9418 /* nothing more to generate */
9419 break;
9420 case DISAS_WFI:
9421 gen_helper_wfi();
9422 break;
9423 case DISAS_SWI:
9424 gen_exception(EXCP_SWI);
9425 break;
9426 }
9427 if (dc->condjmp) {
9428 gen_set_label(dc->condlabel);
9429 gen_set_condexec(dc);
9430 gen_goto_tb(dc, 1, dc->pc);
9431 dc->condjmp = 0;
9432 }
9433 }
9434
9435 done_generating:
9436 gen_icount_end(tb, num_insns);
9437 *gen_opc_ptr = INDEX_op_end;
9438
9439 #ifdef DEBUG_DISAS
9440 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9441 qemu_log("----------------\n");
9442 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9443 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9444 qemu_log("\n");
9445 }
9446 #endif
9447 if (search_pc) {
9448 j = gen_opc_ptr - gen_opc_buf;
9449 lj++;
9450 while (lj <= j)
9451 gen_opc_instr_start[lj++] = 0;
9452 } else {
9453 tb->size = dc->pc - pc_start;
9454 tb->icount = num_insns;
9455 }
9456 }
9457
9458 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9459 {
9460 gen_intermediate_code_internal(env, tb, 0);
9461 }
9462
9463 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9464 {
9465 gen_intermediate_code_internal(env, tb, 1);
9466 }
9467
9468 static const char *cpu_mode_names[16] = {
9469 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9470 "???", "???", "???", "und", "???", "???", "???", "sys"
9471 };
9472
9473 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9474 int flags)
9475 {
9476 int i;
9477 #if 0
9478 union {
9479 uint32_t i;
9480 float s;
9481 } s0, s1;
9482 CPU_DoubleU d;
9483 /* ??? This assumes float64 and double have the same layout.
9484 Oh well, it's only debug dumps. */
9485 union {
9486 float64 f64;
9487 double d;
9488 } d0;
9489 #endif
9490 uint32_t psr;
9491
9492 for(i=0;i<16;i++) {
9493 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9494 if ((i % 4) == 3)
9495 cpu_fprintf(f, "\n");
9496 else
9497 cpu_fprintf(f, " ");
9498 }
9499 psr = cpsr_read(env);
9500 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9501 psr,
9502 psr & (1 << 31) ? 'N' : '-',
9503 psr & (1 << 30) ? 'Z' : '-',
9504 psr & (1 << 29) ? 'C' : '-',
9505 psr & (1 << 28) ? 'V' : '-',
9506 psr & CPSR_T ? 'T' : 'A',
9507 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9508
9509 #if 0
9510 for (i = 0; i < 16; i++) {
9511 d.d = env->vfp.regs[i];
9512 s0.i = d.l.lower;
9513 s1.i = d.l.upper;
9514 d0.f64 = d.d;
9515 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9516 i * 2, (int)s0.i, s0.s,
9517 i * 2 + 1, (int)s1.i, s1.s,
9518 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9519 d0.d);
9520 }
9521 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9522 #endif
9523 }
9524
9525 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9526 unsigned long searched_pc, int pc_pos, void *puc)
9527 {
9528 env->regs[15] = gen_opc_pc[pc_pos];
9529 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9530 }