]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
a86c54c5647d80f2289d898f3dce3502a58becdf
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
70
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
78
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
83
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
95
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
99
100 #include "gen-icount.h"
101
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
108 {
109 int i;
110
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
130
131 #define GEN_HELPER 2
132 #include "helpers.h"
133 }
134
135 static inline TCGv load_cpu_offset(int offset)
136 {
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140 }
141
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144 static inline void store_cpu_offset(TCGv var, int offset)
145 {
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
148 }
149
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
155 {
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
166 }
167 }
168
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
171 {
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
175 }
176
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
180 {
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
187 }
188
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197
198
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200 {
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204 }
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208 static void gen_exception(int excp)
209 {
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
214 }
215
216 static void gen_smul_dual(TCGv a, TCGv b)
217 {
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
229 }
230
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
233 {
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
241 }
242
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
245 {
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
249 }
250
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253 {
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257 }
258
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
261 {
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272 }
273
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 {
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
281 }
282
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
285 {
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295 }
296
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299 {
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
309 }
310
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
315 {
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
318
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
326 }
327
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 {
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
332
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
340 }
341
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
344 {
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
350 }
351
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359 static void gen_add16(TCGv t0, TCGv t1)
360 {
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
370 }
371
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
376 {
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
381 }
382
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
385 {
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
388 }
389
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
392 {
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
398 }
399
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402 {
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
408 }
409
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412 {
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
419 }
420
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
424 static void shifter_out_im(TCGv var, int shift)
425 {
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
436 }
437
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440 {
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
484 }
485 }
486 };
487
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490 {
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
505 }
506 }
507 tcg_temp_free_i32(shift);
508 }
509
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
520 {
521 TCGv_ptr tmp;
522
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
552 }
553 }
554 #undef PAS_OP
555
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
567 {
568 TCGv_ptr tmp;
569
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
599 }
600 }
601 #undef PAS_OP
602
603 static void gen_test_cc(int cc, int label)
604 {
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
608
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
698 tcg_temp_free_i32(tmp);
699 }
700
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718 };
719
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
722 {
723 TCGv tmp;
724
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
731 }
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
733 }
734
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
737 {
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
742 }
743
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749 {
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755 }
756
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763 {
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769 }
770
771 static inline TCGv gen_ld8s(TCGv addr, int index)
772 {
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776 }
777 static inline TCGv gen_ld8u(TCGv addr, int index)
778 {
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782 }
783 static inline TCGv gen_ld16s(TCGv addr, int index)
784 {
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788 }
789 static inline TCGv gen_ld16u(TCGv addr, int index)
790 {
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794 }
795 static inline TCGv gen_ld32(TCGv addr, int index)
796 {
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800 }
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802 {
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806 }
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
808 {
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
811 }
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
813 {
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
816 }
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
818 {
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
821 }
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823 {
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826 }
827
828 static inline void gen_set_pc_im(uint32_t val)
829 {
830 tcg_gen_movi_i32(cpu_R[15], val);
831 }
832
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
835 {
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
838 }
839
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
842 {
843 int val, rm, shift, shiftop;
844 TCGv offset;
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
865 }
866 }
867
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
870 {
871 int val, rm;
872 TCGv offset;
873
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
893 }
894 }
895
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
898 { \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
903 }
904
905 VFP_OP2(add)
906 VFP_OP2(sub)
907 VFP_OP2(mul)
908 VFP_OP2(div)
909
910 #undef VFP_OP2
911
912 static inline void gen_vfp_abs(int dp)
913 {
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
918 }
919
920 static inline void gen_vfp_neg(int dp)
921 {
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
926 }
927
928 static inline void gen_vfp_sqrt(int dp)
929 {
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
934 }
935
936 static inline void gen_vfp_cmp(int dp)
937 {
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
942 }
943
944 static inline void gen_vfp_cmpe(int dp)
945 {
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
950 }
951
952 static inline void gen_vfp_F1_ld0(int dp)
953 {
954 if (dp)
955 tcg_gen_movi_i64(cpu_F1d, 0);
956 else
957 tcg_gen_movi_i32(cpu_F1s, 0);
958 }
959
960 static inline void gen_vfp_uito(int dp)
961 {
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
966 }
967
968 static inline void gen_vfp_sito(int dp)
969 {
970 if (dp)
971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
972 else
973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
974 }
975
976 static inline void gen_vfp_toui(int dp)
977 {
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
982 }
983
984 static inline void gen_vfp_touiz(int dp)
985 {
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
990 }
991
992 static inline void gen_vfp_tosi(int dp)
993 {
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
998 }
999
1000 static inline void gen_vfp_tosiz(int dp)
1001 {
1002 if (dp)
1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1004 else
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1006 }
1007
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1010 { \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1012 if (dp) \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1014 else \
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1017 }
1018 VFP_GEN_FIX(tosh)
1019 VFP_GEN_FIX(tosl)
1020 VFP_GEN_FIX(touh)
1021 VFP_GEN_FIX(toul)
1022 VFP_GEN_FIX(shto)
1023 VFP_GEN_FIX(slto)
1024 VFP_GEN_FIX(uhto)
1025 VFP_GEN_FIX(ulto)
1026 #undef VFP_GEN_FIX
1027
1028 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1029 {
1030 if (dp)
1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1034 }
1035
1036 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1037 {
1038 if (dp)
1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1040 else
1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1042 }
1043
1044 static inline long
1045 vfp_reg_offset (int dp, int reg)
1046 {
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1055 }
1056 }
1057
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060 static inline long
1061 neon_reg_offset (int reg, int n)
1062 {
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1066 }
1067
1068 static TCGv neon_load_reg(int reg, int pass)
1069 {
1070 TCGv tmp = tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1073 }
1074
1075 static void neon_store_reg(int reg, int pass, TCGv var)
1076 {
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1078 tcg_temp_free_i32(var);
1079 }
1080
1081 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1082 {
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1084 }
1085
1086 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1087 {
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1089 }
1090
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1095
1096 static inline void gen_mov_F0_vreg(int dp, int reg)
1097 {
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1102 }
1103
1104 static inline void gen_mov_F1_vreg(int dp, int reg)
1105 {
1106 if (dp)
1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1110 }
1111
1112 static inline void gen_mov_vreg_F0(int dp, int reg)
1113 {
1114 if (dp)
1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1116 else
1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1118 }
1119
1120 #define ARM_CP_RW_BIT (1 << 20)
1121
1122 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1123 {
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1125 }
1126
1127 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1128 {
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1130 }
1131
1132 static inline TCGv iwmmxt_load_creg(int reg)
1133 {
1134 TCGv var = tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
1137 }
1138
1139 static inline void iwmmxt_store_creg(int reg, TCGv var)
1140 {
1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1142 tcg_temp_free_i32(var);
1143 }
1144
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1146 {
1147 iwmmxt_store_reg(cpu_M0, rn);
1148 }
1149
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1151 {
1152 iwmmxt_load_reg(cpu_M0, rn);
1153 }
1154
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1156 {
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1159 }
1160
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1162 {
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1165 }
1166
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1168 {
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1171 }
1172
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 { \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178 }
1179
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1183 IWMMXT_OP(name##l)
1184
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 { \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1189 }
1190
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1201
1202 IWMMXT_OP_SIZE(unpackl)
1203 IWMMXT_OP_SIZE(unpackh)
1204
1205 IWMMXT_OP_1(unpacklub)
1206 IWMMXT_OP_1(unpackluw)
1207 IWMMXT_OP_1(unpacklul)
1208 IWMMXT_OP_1(unpackhub)
1209 IWMMXT_OP_1(unpackhuw)
1210 IWMMXT_OP_1(unpackhul)
1211 IWMMXT_OP_1(unpacklsb)
1212 IWMMXT_OP_1(unpacklsw)
1213 IWMMXT_OP_1(unpacklsl)
1214 IWMMXT_OP_1(unpackhsb)
1215 IWMMXT_OP_1(unpackhsw)
1216 IWMMXT_OP_1(unpackhsl)
1217
1218 IWMMXT_OP_SIZE(cmpeq)
1219 IWMMXT_OP_SIZE(cmpgtu)
1220 IWMMXT_OP_SIZE(cmpgts)
1221
1222 IWMMXT_OP_SIZE(mins)
1223 IWMMXT_OP_SIZE(minu)
1224 IWMMXT_OP_SIZE(maxs)
1225 IWMMXT_OP_SIZE(maxu)
1226
1227 IWMMXT_OP_SIZE(subn)
1228 IWMMXT_OP_SIZE(addn)
1229 IWMMXT_OP_SIZE(subu)
1230 IWMMXT_OP_SIZE(addu)
1231 IWMMXT_OP_SIZE(subs)
1232 IWMMXT_OP_SIZE(adds)
1233
1234 IWMMXT_OP(avgb0)
1235 IWMMXT_OP(avgb1)
1236 IWMMXT_OP(avgw0)
1237 IWMMXT_OP(avgw1)
1238
1239 IWMMXT_OP(msadb)
1240
1241 IWMMXT_OP(packuw)
1242 IWMMXT_OP(packul)
1243 IWMMXT_OP(packuq)
1244 IWMMXT_OP(packsw)
1245 IWMMXT_OP(packsl)
1246 IWMMXT_OP(packsq)
1247
1248 static void gen_op_iwmmxt_set_mup(void)
1249 {
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254 }
1255
1256 static void gen_op_iwmmxt_set_cup(void)
1257 {
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262 }
1263
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1265 {
1266 TCGv tmp = tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269 }
1270
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272 {
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276 }
1277
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1279 {
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1283
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 tcg_temp_free_i32(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310 }
1311
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1313 {
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1316
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
1324 tmp = tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 tcg_temp_free_i32(tmp);
1331 return 0;
1332 }
1333
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337 {
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
1362 addr = tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 tcg_temp_free_i32(addr);
1365 return 1;
1366 }
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 tcg_temp_free_i32(tmp);
1391 }
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = tcg_temp_new_i32();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1416 }
1417 }
1418 }
1419 }
1420 tcg_temp_free_i32(addr);
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 tcg_temp_free_i32(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 tcg_temp_free_i32(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1695 }
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 tcg_temp_free_i32(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = tcg_temp_new_i32();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1718 }
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1727 }
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1733 }
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1750 }
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 tcg_temp_free_i32(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1771 }
1772 tcg_temp_free_i32(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1793 }
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1799 }
1800 gen_set_nzcv(tmp);
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1841 }
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1847 }
1848 gen_set_nzcv(tmp);
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = tcg_temp_new_i32();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1869 }
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 tcg_temp_free_i32(tmp);
1976 return 1;
1977 }
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
1987 break;
1988 }
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 tcg_temp_free_i32(tmp);
2004 return 1;
2005 }
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2015 break;
2016 }
2017 tcg_temp_free_i32(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 tcg_temp_free_i32(tmp);
2032 return 1;
2033 }
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2043 break;
2044 }
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = tcg_temp_new_i32();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 tcg_temp_free_i32(tmp);
2062 return 1;
2063 }
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 tcg_temp_free_i32(tmp);
2069 return 1;
2070 }
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 tcg_temp_free_i32(tmp);
2076 return 1;
2077 }
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2079 break;
2080 }
2081 tcg_temp_free_i32(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
2324 return 1;
2325 }
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336 }
2337
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341 {
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2375 }
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406 }
2407
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411 {
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = tcg_temp_new_i32();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 tcg_temp_free_i32(tmp);
2437 }
2438 return 0;
2439 }
2440
2441 static int cp15_user_ok(uint32_t insn)
2442 {
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459 }
2460
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462 {
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 tcg_temp_free_i32(tmp);
2504 return 0;
2505 }
2506 }
2507 return 1;
2508 }
2509
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513 {
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2516
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2535 }
2536
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2539 */
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2543 */
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2548 }
2549 return 0;
2550 }
2551
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2555 */
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2561 }
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2565 */
2566 }
2567
2568 rd = (insn >> 12) & 0xf;
2569
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2572
2573 tmp2 = tcg_const_i32(insn);
2574 if (insn & ARM_CP_RW_BIT) {
2575 tmp = tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
2579 store_reg(s, rd, tmp);
2580 else
2581 tcg_temp_free_i32(tmp);
2582 } else {
2583 tmp = load_reg(s, rd);
2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2585 tcg_temp_free_i32(tmp);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
2592 }
2593 tcg_temp_free_i32(tmp2);
2594 return 0;
2595 }
2596
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2609
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2616
2617 /* Move between integer and VFP cores. */
2618 static TCGv gen_vfp_mrs(void)
2619 {
2620 TCGv tmp = tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2623 }
2624
2625 static void gen_vfp_msr(TCGv tmp)
2626 {
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
2628 tcg_temp_free_i32(tmp);
2629 }
2630
2631 static void gen_neon_dup_u8(TCGv var, int shift)
2632 {
2633 TCGv tmp = tcg_temp_new_i32();
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
2636 tcg_gen_ext8u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 tcg_temp_free_i32(tmp);
2642 }
2643
2644 static void gen_neon_dup_low16(TCGv var)
2645 {
2646 TCGv tmp = tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var, var);
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
2650 tcg_temp_free_i32(tmp);
2651 }
2652
2653 static void gen_neon_dup_high16(TCGv var)
2654 {
2655 TCGv tmp = tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
2659 tcg_temp_free_i32(tmp);
2660 }
2661
2662 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2663 {
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2680 }
2681 return tmp;
2682 }
2683
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2687 {
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
2690 TCGv addr;
2691 TCGv tmp;
2692 TCGv tmp2;
2693
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2696
2697 if (!s->vfp_enabled) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2705 }
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2715
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2722
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2733 }
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2752 }
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2758 }
2759 }
2760 break;
2761 case 2:
2762 break;
2763 }
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2774 }
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2779 }
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 tcg_temp_free_i32(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 tcg_temp_free_i32(tmp2);
2793 break;
2794 case 2:
2795 break;
2796 }
2797 neon_store_reg(rn, pass, tmp);
2798 }
2799 }
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2809
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2840 }
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2851 }
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2855 }
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 tcg_temp_free_i32(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2862 }
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 tcg_temp_free_i32(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2895 }
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2899 }
2900 }
2901 }
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2913 }
2914
2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2920 }
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2925 */
2926 rm = VFP_SREG_M(insn);
2927 } else {
2928 VFP_DREG_M(rm, insn);
2929 }
2930 } else {
2931 rn = VFP_SREG_N(insn);
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2937 }
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2940 */
2941 rm = VFP_SREG_M(insn);
2942 }
2943
2944 veclen = s->vec_len;
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2947
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
2952
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2958
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
2965 delta_d = (s->vec_stride >> 1) + 1;
2966 else
2967 delta_d = s->vec_stride + 1;
2968
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2975 }
2976 }
2977 }
2978
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
3013 break;
3014 }
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3019 }
3020
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_vfp_neg(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3065
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
3081 tcg_gen_movi_i32(cpu_F0s, n);
3082 }
3083 break;
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3112 tcg_temp_free_i32(tmp);
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
3117 tmp = tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
3123 tcg_temp_free_i32(tmp2);
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
3136 tcg_temp_free_i32(tmp2);
3137 gen_vfp_msr(tmp);
3138 break;
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3155 else
3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_shto(dp, 16 - rm);
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_slto(dp, 32 - rm);
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
3177 gen_vfp_uhto(dp, 16 - rm);
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_ulto(dp, 32 - rm);
3183 break;
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199 gen_vfp_tosh(dp, 16 - rm);
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
3204 gen_vfp_tosl(dp, 32 - rm);
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
3209 gen_vfp_touh(dp, 16 - rm);
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
3214 gen_vfp_toul(dp, 32 - rm);
3215 break;
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3219 }
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3224 }
3225
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3237
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3241
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3248 }
3249 break;
3250 }
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3255
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3270 }
3271 }
3272 }
3273 }
3274 break;
3275 case 0xc:
3276 case 0xd:
3277 if ((insn & 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3285 }
3286
3287 if (insn & ARM_CP_RW_BIT) {
3288 /* vfp->arm */
3289 if (dp) {
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
3298 tmp = gen_vfp_mrs();
3299 store_reg(s, rd, tmp);
3300 gen_mov_F0_vreg(0, rm + 1);
3301 tmp = gen_vfp_mrs();
3302 store_reg(s, rn, tmp);
3303 }
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
3313 } else {
3314 tmp = load_reg(s, rd);
3315 gen_vfp_msr(tmp);
3316 gen_mov_vreg_F0(0, rm);
3317 tmp = load_reg(s, rn);
3318 gen_vfp_msr(tmp);
3319 gen_mov_vreg_F0(0, rm + 1);
3320 }
3321 }
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
3326 VFP_DREG_D(rd, insn);
3327 else
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
3330 addr = tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr, s->pc & ~2);
3332 } else {
3333 addr = load_reg(s, rn);
3334 }
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
3340 tcg_gen_addi_i32(addr, addr, offset);
3341 if (insn & (1 << 20)) {
3342 gen_vfp_ld(s, dp, addr);
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_st(s, dp, addr);
3347 }
3348 tcg_temp_free_i32(addr);
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3355
3356 if (insn & (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3358
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
3364 if (insn & ARM_CP_RW_BIT) {
3365 /* load */
3366 gen_vfp_ld(s, dp, addr);
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
3371 gen_vfp_st(s, dp, addr);
3372 }
3373 tcg_gen_addi_i32(addr, addr, offset);
3374 }
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3383
3384 if (offset != 0)
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
3388 tcg_temp_free_i32(addr);
3389 }
3390 }
3391 }
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3396 }
3397 return 0;
3398 }
3399
3400 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3401 {
3402 TranslationBlock *tb;
3403
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3406 tcg_gen_goto_tb(n);
3407 gen_set_pc_im(dest);
3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
3409 } else {
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb(0);
3412 }
3413 }
3414
3415 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3416 {
3417 if (unlikely(s->singlestep_enabled)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3419 if (s->thumb)
3420 dest |= 1;
3421 gen_bx_im(s, dest);
3422 } else {
3423 gen_goto_tb(s, 0, dest);
3424 s->is_jmp = DISAS_TB_JUMP;
3425 }
3426 }
3427
3428 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3429 {
3430 if (x)
3431 tcg_gen_sari_i32(t0, t0, 16);
3432 else
3433 gen_sxth(t0);
3434 if (y)
3435 tcg_gen_sari_i32(t1, t1, 16);
3436 else
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
3439 }
3440
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3443 uint32_t mask;
3444
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
3454
3455 /* Mask out undefined bits. */
3456 mask &= ~CPSR_RESERVED;
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
3461 if (!arm_feature(env, ARM_FEATURE_V6))
3462 mask &= ~(CPSR_E | CPSR_GE);
3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3464 mask &= ~CPSR_IT;
3465 /* Mask out execution state bits. */
3466 if (!spsr)
3467 mask &= ~CPSR_EXEC;
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
3470 mask &= CPSR_USER;
3471 return mask;
3472 }
3473
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3476 {
3477 TCGv tmp;
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
3482
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
3487 store_cpu_field(tmp, spsr);
3488 } else {
3489 gen_set_cpsr(t0, mask);
3490 }
3491 tcg_temp_free_i32(t0);
3492 gen_lookup_tb(s);
3493 return 0;
3494 }
3495
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3498 {
3499 TCGv tmp;
3500 tmp = tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3503 }
3504
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext *s, TCGv pc)
3507 {
3508 TCGv tmp;
3509 store_reg(s, 15, pc);
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
3512 tcg_temp_free_i32(tmp);
3513 s->is_jmp = DISAS_UPDATE;
3514 }
3515
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3518 {
3519 gen_set_cpsr(cpsr, 0xffffffff);
3520 tcg_temp_free_i32(cpsr);
3521 store_reg(s, 15, pc);
3522 s->is_jmp = DISAS_UPDATE;
3523 }
3524
3525 static inline void
3526 gen_set_condexec (DisasContext *s)
3527 {
3528 if (s->condexec_mask) {
3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3530 TCGv tmp = tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp, val);
3532 store_cpu_field(tmp, condexec_bits);
3533 }
3534 }
3535
3536 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3537 {
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3542 }
3543
3544 static void gen_nop_hint(DisasContext *s, int val)
3545 {
3546 switch (val) {
3547 case 3: /* wfi */
3548 gen_set_pc_im(s->pc);
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3556 }
3557 }
3558
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3560
3561 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3562 {
3563 switch (size) {
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3567 default: abort();
3568 }
3569 }
3570
3571 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3572 {
3573 switch (size) {
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3577 default: return;
3578 }
3579 }
3580
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3586
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3591 break; \
3592 case 1: \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 2: \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 3: \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 4: \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 5: \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 default: return 1; \
3608 }} while (0)
3609
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3612 case 0: \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3614 break; \
3615 case 1: \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3617 break; \
3618 case 2: \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3620 break; \
3621 case 3: \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3623 break; \
3624 case 4: \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3626 break; \
3627 case 5: \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3629 break; \
3630 default: return 1; \
3631 }} while (0)
3632
3633 static TCGv neon_load_scratch(int scratch)
3634 {
3635 TCGv tmp = tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
3638 }
3639
3640 static void neon_store_scratch(int scratch, TCGv var)
3641 {
3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3643 tcg_temp_free_i32(var);
3644 }
3645
3646 static inline TCGv neon_get_scalar(int size, int reg)
3647 {
3648 TCGv tmp;
3649 if (size == 1) {
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
3652 gen_neon_dup_high16(tmp);
3653 } else {
3654 gen_neon_dup_low16(tmp);
3655 }
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
3658 }
3659 return tmp;
3660 }
3661
3662 static int gen_neon_unzip(int rd, int rm, int size, int q)
3663 {
3664 TCGv tmp, tmp2;
3665 if (size == 3 || (!q && size == 2)) {
3666 return 1;
3667 }
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
3673 gen_helper_neon_qunzip8(tmp, tmp2);
3674 break;
3675 case 1:
3676 gen_helper_neon_qunzip16(tmp, tmp2);
3677 break;
3678 case 2:
3679 gen_helper_neon_qunzip32(tmp, tmp2);
3680 break;
3681 default:
3682 abort();
3683 }
3684 } else {
3685 switch (size) {
3686 case 0:
3687 gen_helper_neon_unzip8(tmp, tmp2);
3688 break;
3689 case 1:
3690 gen_helper_neon_unzip16(tmp, tmp2);
3691 break;
3692 default:
3693 abort();
3694 }
3695 }
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
3699 }
3700
3701 static int gen_neon_zip(int rd, int rm, int size, int q)
3702 {
3703 TCGv tmp, tmp2;
3704 if (size == 3 || (!q && size == 2)) {
3705 return 1;
3706 }
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
3712 gen_helper_neon_qzip8(tmp, tmp2);
3713 break;
3714 case 1:
3715 gen_helper_neon_qzip16(tmp, tmp2);
3716 break;
3717 case 2:
3718 gen_helper_neon_qzip32(tmp, tmp2);
3719 break;
3720 default:
3721 abort();
3722 }
3723 } else {
3724 switch (size) {
3725 case 0:
3726 gen_helper_neon_zip8(tmp, tmp2);
3727 break;
3728 case 1:
3729 gen_helper_neon_zip16(tmp, tmp2);
3730 break;
3731 default:
3732 abort();
3733 }
3734 }
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
3738 }
3739
3740 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3741 {
3742 TCGv rd, tmp;
3743
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
3746
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3751
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3757
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
3760 }
3761
3762 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3763 {
3764 TCGv rd, tmp;
3765
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
3768
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3776
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
3779 }
3780
3781
3782 static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786 } neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3798 };
3799
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3803 {
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
3808 int spacing;
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
3815 int n;
3816 TCGv addr;
3817 TCGv tmp;
3818 TCGv tmp2;
3819 TCGv_i64 tmp64;
3820
3821 if (!s->vfp_enabled)
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
3831 if (op > 10)
3832 return 1;
3833 nregs = neon_ls_element_type[op].nregs;
3834 interleave = neon_ls_element_type[op].interleave;
3835 spacing = neon_ls_element_type[op].spacing;
3836 if (size == 3 && (interleave | spacing) != 1)
3837 return 1;
3838 addr = tcg_temp_new_i32();
3839 load_reg_var(s, addr, rn);
3840 stride = (1 << size) * interleave;
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3845 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3846 load_reg_var(s, addr, rn);
3847 tcg_gen_addi_i32(addr, addr, 1 << size);
3848 }
3849 if (size == 3) {
3850 if (load) {
3851 tmp64 = gen_ld64(addr, IS_USER(s));
3852 neon_store_reg64(tmp64, rd);
3853 tcg_temp_free_i64(tmp64);
3854 } else {
3855 tmp64 = tcg_temp_new_i64();
3856 neon_load_reg64(tmp64, rd);
3857 gen_st64(tmp64, addr, IS_USER(s));
3858 }
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 } else {
3861 for (pass = 0; pass < 2; pass++) {
3862 if (size == 2) {
3863 if (load) {
3864 tmp = gen_ld32(addr, IS_USER(s));
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 gen_st32(tmp, addr, IS_USER(s));
3869 }
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 } else if (size == 1) {
3872 if (load) {
3873 tmp = gen_ld16u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 tmp2 = gen_ld16u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
3877 tcg_gen_shli_i32(tmp2, tmp2, 16);
3878 tcg_gen_or_i32(tmp, tmp, tmp2);
3879 tcg_temp_free_i32(tmp2);
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
3883 tmp2 = tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2, tmp, 16);
3885 gen_st16(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 gen_st16(tmp2, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 }
3890 } else /* size == 0 */ {
3891 if (load) {
3892 TCGV_UNUSED(tmp2);
3893 for (n = 0; n < 4; n++) {
3894 tmp = gen_ld8u(addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 if (n == 0) {
3897 tmp2 = tmp;
3898 } else {
3899 tcg_gen_shli_i32(tmp, tmp, n * 8);
3900 tcg_gen_or_i32(tmp2, tmp2, tmp);
3901 tcg_temp_free_i32(tmp);
3902 }
3903 }
3904 neon_store_reg(rd, pass, tmp2);
3905 } else {
3906 tmp2 = neon_load_reg(rd, pass);
3907 for (n = 0; n < 4; n++) {
3908 tmp = tcg_temp_new_i32();
3909 if (n == 0) {
3910 tcg_gen_mov_i32(tmp, tmp2);
3911 } else {
3912 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3913 }
3914 gen_st8(tmp, addr, IS_USER(s));
3915 tcg_gen_addi_i32(addr, addr, stride);
3916 }
3917 tcg_temp_free_i32(tmp2);
3918 }
3919 }
3920 }
3921 }
3922 rd += spacing;
3923 }
3924 tcg_temp_free_i32(addr);
3925 stride = nregs * 8;
3926 } else {
3927 size = (insn >> 10) & 3;
3928 if (size == 3) {
3929 /* Load single element to all lanes. */
3930 int a = (insn >> 4) & 1;
3931 if (!load) {
3932 return 1;
3933 }
3934 size = (insn >> 6) & 3;
3935 nregs = ((insn >> 8) & 3) + 1;
3936
3937 if (size == 3) {
3938 if (nregs != 4 || a == 0) {
3939 return 1;
3940 }
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3942 size = 2;
3943 }
3944 if (nregs == 1 && a == 1 && size == 0) {
3945 return 1;
3946 }
3947 if (nregs == 3 && a == 1) {
3948 return 1;
3949 }
3950 addr = tcg_temp_new_i32();
3951 load_reg_var(s, addr, rn);
3952 if (nregs == 1) {
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp = gen_load_and_replicate(s, addr, size);
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3957 if (insn & (1 << 5)) {
3958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3960 }
3961 tcg_temp_free_i32(tmp);
3962 } else {
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 for (reg = 0; reg < nregs; reg++) {
3966 tmp = gen_load_and_replicate(s, addr, size);
3967 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3969 tcg_temp_free_i32(tmp);
3970 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 rd += stride;
3972 }
3973 }
3974 tcg_temp_free_i32(addr);
3975 stride = (1 << size) * nregs;
3976 } else {
3977 /* Single element. */
3978 pass = (insn >> 7) & 1;
3979 switch (size) {
3980 case 0:
3981 shift = ((insn >> 5) & 3) * 8;
3982 stride = 1;
3983 break;
3984 case 1:
3985 shift = ((insn >> 6) & 1) * 16;
3986 stride = (insn & (1 << 5)) ? 2 : 1;
3987 break;
3988 case 2:
3989 shift = 0;
3990 stride = (insn & (1 << 6)) ? 2 : 1;
3991 break;
3992 default:
3993 abort();
3994 }
3995 nregs = ((insn >> 8) & 3) + 1;
3996 addr = tcg_temp_new_i32();
3997 load_reg_var(s, addr, rn);
3998 for (reg = 0; reg < nregs; reg++) {
3999 if (load) {
4000 switch (size) {
4001 case 0:
4002 tmp = gen_ld8u(addr, IS_USER(s));
4003 break;
4004 case 1:
4005 tmp = gen_ld16u(addr, IS_USER(s));
4006 break;
4007 case 2:
4008 tmp = gen_ld32(addr, IS_USER(s));
4009 break;
4010 default: /* Avoid compiler warnings. */
4011 abort();
4012 }
4013 if (size != 2) {
4014 tmp2 = neon_load_reg(rd, pass);
4015 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4016 tcg_temp_free_i32(tmp2);
4017 }
4018 neon_store_reg(rd, pass, tmp);
4019 } else { /* Store */
4020 tmp = neon_load_reg(rd, pass);
4021 if (shift)
4022 tcg_gen_shri_i32(tmp, tmp, shift);
4023 switch (size) {
4024 case 0:
4025 gen_st8(tmp, addr, IS_USER(s));
4026 break;
4027 case 1:
4028 gen_st16(tmp, addr, IS_USER(s));
4029 break;
4030 case 2:
4031 gen_st32(tmp, addr, IS_USER(s));
4032 break;
4033 }
4034 }
4035 rd += stride;
4036 tcg_gen_addi_i32(addr, addr, 1 << size);
4037 }
4038 tcg_temp_free_i32(addr);
4039 stride = nregs * (1 << size);
4040 }
4041 }
4042 if (rm != 15) {
4043 TCGv base;
4044
4045 base = load_reg(s, rn);
4046 if (rm == 13) {
4047 tcg_gen_addi_i32(base, base, stride);
4048 } else {
4049 TCGv index;
4050 index = load_reg(s, rm);
4051 tcg_gen_add_i32(base, base, index);
4052 tcg_temp_free_i32(index);
4053 }
4054 store_reg(s, rn, base);
4055 }
4056 return 0;
4057 }
4058
4059 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4061 {
4062 tcg_gen_and_i32(t, t, c);
4063 tcg_gen_andc_i32(f, f, c);
4064 tcg_gen_or_i32(dest, t, f);
4065 }
4066
4067 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4068 {
4069 switch (size) {
4070 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4071 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4073 default: abort();
4074 }
4075 }
4076
4077 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4078 {
4079 switch (size) {
4080 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4083 default: abort();
4084 }
4085 }
4086
4087 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4088 {
4089 switch (size) {
4090 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4093 default: abort();
4094 }
4095 }
4096
4097 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4098 {
4099 switch (size) {
4100 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4103 default: abort();
4104 }
4105 }
4106
4107 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4108 int q, int u)
4109 {
4110 if (q) {
4111 if (u) {
4112 switch (size) {
4113 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4121 default: abort();
4122 }
4123 }
4124 } else {
4125 if (u) {
4126 switch (size) {
4127 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4128 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4129 default: abort();
4130 }
4131 } else {
4132 switch (size) {
4133 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4134 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4135 default: abort();
4136 }
4137 }
4138 }
4139 }
4140
4141 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4142 {
4143 if (u) {
4144 switch (size) {
4145 case 0: gen_helper_neon_widen_u8(dest, src); break;
4146 case 1: gen_helper_neon_widen_u16(dest, src); break;
4147 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4148 default: abort();
4149 }
4150 } else {
4151 switch (size) {
4152 case 0: gen_helper_neon_widen_s8(dest, src); break;
4153 case 1: gen_helper_neon_widen_s16(dest, src); break;
4154 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4155 default: abort();
4156 }
4157 }
4158 tcg_temp_free_i32(src);
4159 }
4160
4161 static inline void gen_neon_addl(int size)
4162 {
4163 switch (size) {
4164 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4166 case 2: tcg_gen_add_i64(CPU_V001); break;
4167 default: abort();
4168 }
4169 }
4170
4171 static inline void gen_neon_subl(int size)
4172 {
4173 switch (size) {
4174 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001); break;
4177 default: abort();
4178 }
4179 }
4180
4181 static inline void gen_neon_negl(TCGv_i64 var, int size)
4182 {
4183 switch (size) {
4184 case 0: gen_helper_neon_negl_u16(var, var); break;
4185 case 1: gen_helper_neon_negl_u32(var, var); break;
4186 case 2: gen_helper_neon_negl_u64(var, var); break;
4187 default: abort();
4188 }
4189 }
4190
4191 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4192 {
4193 switch (size) {
4194 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4196 default: abort();
4197 }
4198 }
4199
4200 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4201 {
4202 TCGv_i64 tmp;
4203
4204 switch ((size << 1) | u) {
4205 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4206 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4207 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4208 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4209 case 4:
4210 tmp = gen_muls_i64_i32(a, b);
4211 tcg_gen_mov_i64(dest, tmp);
4212 tcg_temp_free_i64(tmp);
4213 break;
4214 case 5:
4215 tmp = gen_mulu_i64_i32(a, b);
4216 tcg_gen_mov_i64(dest, tmp);
4217 tcg_temp_free_i64(tmp);
4218 break;
4219 default: abort();
4220 }
4221
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4224 if (size < 2) {
4225 tcg_temp_free_i32(a);
4226 tcg_temp_free_i32(b);
4227 }
4228 }
4229
4230 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4231 {
4232 if (op) {
4233 if (u) {
4234 gen_neon_unarrow_sats(size, dest, src);
4235 } else {
4236 gen_neon_narrow(size, dest, src);
4237 }
4238 } else {
4239 if (u) {
4240 gen_neon_narrow_satu(size, dest, src);
4241 } else {
4242 gen_neon_narrow_sats(size, dest, src);
4243 }
4244 }
4245 }
4246
4247 /* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4249 * table A7-9.
4250 */
4251 #define NEON_3R_VHADD 0
4252 #define NEON_3R_VQADD 1
4253 #define NEON_3R_VRHADD 2
4254 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255 #define NEON_3R_VHSUB 4
4256 #define NEON_3R_VQSUB 5
4257 #define NEON_3R_VCGT 6
4258 #define NEON_3R_VCGE 7
4259 #define NEON_3R_VSHL 8
4260 #define NEON_3R_VQSHL 9
4261 #define NEON_3R_VRSHL 10
4262 #define NEON_3R_VQRSHL 11
4263 #define NEON_3R_VMAX 12
4264 #define NEON_3R_VMIN 13
4265 #define NEON_3R_VABD 14
4266 #define NEON_3R_VABA 15
4267 #define NEON_3R_VADD_VSUB 16
4268 #define NEON_3R_VTST_VCEQ 17
4269 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270 #define NEON_3R_VMUL 19
4271 #define NEON_3R_VPMAX 20
4272 #define NEON_3R_VPMIN 21
4273 #define NEON_3R_VQDMULH_VQRDMULH 22
4274 #define NEON_3R_VPADD 23
4275 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4281
4282 static const uint8_t neon_3r_sizes[] = {
4283 [NEON_3R_VHADD] = 0x7,
4284 [NEON_3R_VQADD] = 0xf,
4285 [NEON_3R_VRHADD] = 0x7,
4286 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB] = 0x7,
4288 [NEON_3R_VQSUB] = 0xf,
4289 [NEON_3R_VCGT] = 0x7,
4290 [NEON_3R_VCGE] = 0x7,
4291 [NEON_3R_VSHL] = 0xf,
4292 [NEON_3R_VQSHL] = 0xf,
4293 [NEON_3R_VRSHL] = 0xf,
4294 [NEON_3R_VQRSHL] = 0xf,
4295 [NEON_3R_VMAX] = 0x7,
4296 [NEON_3R_VMIN] = 0x7,
4297 [NEON_3R_VABD] = 0x7,
4298 [NEON_3R_VABA] = 0x7,
4299 [NEON_3R_VADD_VSUB] = 0xf,
4300 [NEON_3R_VTST_VCEQ] = 0x7,
4301 [NEON_3R_VML] = 0x7,
4302 [NEON_3R_VMUL] = 0x7,
4303 [NEON_3R_VPMAX] = 0x7,
4304 [NEON_3R_VPMIN] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4306 [NEON_3R_VPADD] = 0x7,
4307 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4313 };
4314
4315 /* Translate a NEON data processing instruction. Return nonzero if the
4316 instruction is invalid.
4317 We process data in a mixture of 32-bit and 64-bit chunks.
4318 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4319
4320 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4321 {
4322 int op;
4323 int q;
4324 int rd, rn, rm;
4325 int size;
4326 int shift;
4327 int pass;
4328 int count;
4329 int pairwise;
4330 int u;
4331 uint32_t imm, mask;
4332 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4333 TCGv_i64 tmp64;
4334
4335 if (!s->vfp_enabled)
4336 return 1;
4337 q = (insn & (1 << 6)) != 0;
4338 u = (insn >> 24) & 1;
4339 VFP_DREG_D(rd, insn);
4340 VFP_DREG_N(rn, insn);
4341 VFP_DREG_M(rm, insn);
4342 size = (insn >> 20) & 3;
4343 if ((insn & (1 << 23)) == 0) {
4344 /* Three register same length. */
4345 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4346 /* Catch invalid op and bad size combinations: UNDEF */
4347 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4348 return 1;
4349 }
4350 /* All insns of this form UNDEF for either this condition or the
4351 * superset of cases "Q==1"; we catch the latter later.
4352 */
4353 if (q && ((rd | rn | rm) & 1)) {
4354 return 1;
4355 }
4356 if (size == 3 && op != NEON_3R_LOGIC) {
4357 /* 64-bit element instructions. */
4358 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4359 neon_load_reg64(cpu_V0, rn + pass);
4360 neon_load_reg64(cpu_V1, rm + pass);
4361 switch (op) {
4362 case NEON_3R_VQADD:
4363 if (u) {
4364 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4365 } else {
4366 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4367 }
4368 break;
4369 case NEON_3R_VQSUB:
4370 if (u) {
4371 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4372 } else {
4373 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4374 }
4375 break;
4376 case NEON_3R_VSHL:
4377 if (u) {
4378 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4379 } else {
4380 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4381 }
4382 break;
4383 case NEON_3R_VQSHL:
4384 if (u) {
4385 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4386 } else {
4387 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4388 }
4389 break;
4390 case NEON_3R_VRSHL:
4391 if (u) {
4392 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4393 } else {
4394 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4395 }
4396 break;
4397 case NEON_3R_VQRSHL:
4398 if (u) {
4399 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4400 } else {
4401 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4402 }
4403 break;
4404 case NEON_3R_VADD_VSUB:
4405 if (u) {
4406 tcg_gen_sub_i64(CPU_V001);
4407 } else {
4408 tcg_gen_add_i64(CPU_V001);
4409 }
4410 break;
4411 default:
4412 abort();
4413 }
4414 neon_store_reg64(cpu_V0, rd + pass);
4415 }
4416 return 0;
4417 }
4418 pairwise = 0;
4419 switch (op) {
4420 case NEON_3R_VSHL:
4421 case NEON_3R_VQSHL:
4422 case NEON_3R_VRSHL:
4423 case NEON_3R_VQRSHL:
4424 {
4425 int rtmp;
4426 /* Shift instruction operands are reversed. */
4427 rtmp = rn;
4428 rn = rm;
4429 rm = rtmp;
4430 }
4431 break;
4432 case NEON_3R_VPADD:
4433 if (u) {
4434 return 1;
4435 }
4436 /* Fall through */
4437 case NEON_3R_VPMAX:
4438 case NEON_3R_VPMIN:
4439 pairwise = 1;
4440 break;
4441 case NEON_3R_FLOAT_ARITH:
4442 pairwise = (u && size < 2); /* if VPADD (float) */
4443 break;
4444 case NEON_3R_FLOAT_MINMAX:
4445 pairwise = u; /* if VPMIN/VPMAX (float) */
4446 break;
4447 case NEON_3R_FLOAT_CMP:
4448 if (!u && size) {
4449 /* no encoding for U=0 C=1x */
4450 return 1;
4451 }
4452 break;
4453 case NEON_3R_FLOAT_ACMP:
4454 if (!u) {
4455 return 1;
4456 }
4457 break;
4458 case NEON_3R_VRECPS_VRSQRTS:
4459 if (u) {
4460 return 1;
4461 }
4462 break;
4463 case NEON_3R_VMUL:
4464 if (u && (size != 0)) {
4465 /* UNDEF on invalid size for polynomial subcase */
4466 return 1;
4467 }
4468 break;
4469 default:
4470 break;
4471 }
4472
4473 if (pairwise && q) {
4474 /* All the pairwise insns UNDEF if Q is set */
4475 return 1;
4476 }
4477
4478 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4479
4480 if (pairwise) {
4481 /* Pairwise. */
4482 if (pass < 1) {
4483 tmp = neon_load_reg(rn, 0);
4484 tmp2 = neon_load_reg(rn, 1);
4485 } else {
4486 tmp = neon_load_reg(rm, 0);
4487 tmp2 = neon_load_reg(rm, 1);
4488 }
4489 } else {
4490 /* Elementwise. */
4491 tmp = neon_load_reg(rn, pass);
4492 tmp2 = neon_load_reg(rm, pass);
4493 }
4494 switch (op) {
4495 case NEON_3R_VHADD:
4496 GEN_NEON_INTEGER_OP(hadd);
4497 break;
4498 case NEON_3R_VQADD:
4499 GEN_NEON_INTEGER_OP(qadd);
4500 break;
4501 case NEON_3R_VRHADD:
4502 GEN_NEON_INTEGER_OP(rhadd);
4503 break;
4504 case NEON_3R_LOGIC: /* Logic ops. */
4505 switch ((u << 2) | size) {
4506 case 0: /* VAND */
4507 tcg_gen_and_i32(tmp, tmp, tmp2);
4508 break;
4509 case 1: /* BIC */
4510 tcg_gen_andc_i32(tmp, tmp, tmp2);
4511 break;
4512 case 2: /* VORR */
4513 tcg_gen_or_i32(tmp, tmp, tmp2);
4514 break;
4515 case 3: /* VORN */
4516 tcg_gen_orc_i32(tmp, tmp, tmp2);
4517 break;
4518 case 4: /* VEOR */
4519 tcg_gen_xor_i32(tmp, tmp, tmp2);
4520 break;
4521 case 5: /* VBSL */
4522 tmp3 = neon_load_reg(rd, pass);
4523 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4524 tcg_temp_free_i32(tmp3);
4525 break;
4526 case 6: /* VBIT */
4527 tmp3 = neon_load_reg(rd, pass);
4528 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4529 tcg_temp_free_i32(tmp3);
4530 break;
4531 case 7: /* VBIF */
4532 tmp3 = neon_load_reg(rd, pass);
4533 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4534 tcg_temp_free_i32(tmp3);
4535 break;
4536 }
4537 break;
4538 case NEON_3R_VHSUB:
4539 GEN_NEON_INTEGER_OP(hsub);
4540 break;
4541 case NEON_3R_VQSUB:
4542 GEN_NEON_INTEGER_OP(qsub);
4543 break;
4544 case NEON_3R_VCGT:
4545 GEN_NEON_INTEGER_OP(cgt);
4546 break;
4547 case NEON_3R_VCGE:
4548 GEN_NEON_INTEGER_OP(cge);
4549 break;
4550 case NEON_3R_VSHL:
4551 GEN_NEON_INTEGER_OP(shl);
4552 break;
4553 case NEON_3R_VQSHL:
4554 GEN_NEON_INTEGER_OP(qshl);
4555 break;
4556 case NEON_3R_VRSHL:
4557 GEN_NEON_INTEGER_OP(rshl);
4558 break;
4559 case NEON_3R_VQRSHL:
4560 GEN_NEON_INTEGER_OP(qrshl);
4561 break;
4562 case NEON_3R_VMAX:
4563 GEN_NEON_INTEGER_OP(max);
4564 break;
4565 case NEON_3R_VMIN:
4566 GEN_NEON_INTEGER_OP(min);
4567 break;
4568 case NEON_3R_VABD:
4569 GEN_NEON_INTEGER_OP(abd);
4570 break;
4571 case NEON_3R_VABA:
4572 GEN_NEON_INTEGER_OP(abd);
4573 tcg_temp_free_i32(tmp2);
4574 tmp2 = neon_load_reg(rd, pass);
4575 gen_neon_add(size, tmp, tmp2);
4576 break;
4577 case NEON_3R_VADD_VSUB:
4578 if (!u) { /* VADD */
4579 gen_neon_add(size, tmp, tmp2);
4580 } else { /* VSUB */
4581 switch (size) {
4582 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4583 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4584 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4585 default: abort();
4586 }
4587 }
4588 break;
4589 case NEON_3R_VTST_VCEQ:
4590 if (!u) { /* VTST */
4591 switch (size) {
4592 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4593 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4594 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4595 default: abort();
4596 }
4597 } else { /* VCEQ */
4598 switch (size) {
4599 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4600 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4601 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4602 default: abort();
4603 }
4604 }
4605 break;
4606 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4607 switch (size) {
4608 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4609 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4610 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4611 default: abort();
4612 }
4613 tcg_temp_free_i32(tmp2);
4614 tmp2 = neon_load_reg(rd, pass);
4615 if (u) { /* VMLS */
4616 gen_neon_rsb(size, tmp, tmp2);
4617 } else { /* VMLA */
4618 gen_neon_add(size, tmp, tmp2);
4619 }
4620 break;
4621 case NEON_3R_VMUL:
4622 if (u) { /* polynomial */
4623 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4624 } else { /* Integer */
4625 switch (size) {
4626 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4627 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4628 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4629 default: abort();
4630 }
4631 }
4632 break;
4633 case NEON_3R_VPMAX:
4634 GEN_NEON_INTEGER_OP(pmax);
4635 break;
4636 case NEON_3R_VPMIN:
4637 GEN_NEON_INTEGER_OP(pmin);
4638 break;
4639 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4640 if (!u) { /* VQDMULH */
4641 switch (size) {
4642 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4643 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4644 default: abort();
4645 }
4646 } else { /* VQRDMULH */
4647 switch (size) {
4648 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4649 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4650 default: abort();
4651 }
4652 }
4653 break;
4654 case NEON_3R_VPADD:
4655 switch (size) {
4656 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4657 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4658 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4659 default: abort();
4660 }
4661 break;
4662 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4663 switch ((u << 2) | size) {
4664 case 0: /* VADD */
4665 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4666 break;
4667 case 2: /* VSUB */
4668 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4669 break;
4670 case 4: /* VPADD */
4671 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4672 break;
4673 case 6: /* VABD */
4674 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4675 break;
4676 default:
4677 abort();
4678 }
4679 break;
4680 case NEON_3R_FLOAT_MULTIPLY:
4681 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4682 if (!u) {
4683 tcg_temp_free_i32(tmp2);
4684 tmp2 = neon_load_reg(rd, pass);
4685 if (size == 0) {
4686 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4687 } else {
4688 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4689 }
4690 }
4691 break;
4692 case NEON_3R_FLOAT_CMP:
4693 if (!u) {
4694 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4695 } else {
4696 if (size == 0)
4697 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4698 else
4699 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4700 }
4701 break;
4702 case NEON_3R_FLOAT_ACMP:
4703 if (size == 0)
4704 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4705 else
4706 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4707 break;
4708 case NEON_3R_FLOAT_MINMAX:
4709 if (size == 0)
4710 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4711 else
4712 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4713 break;
4714 case NEON_3R_VRECPS_VRSQRTS:
4715 if (size == 0)
4716 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4717 else
4718 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4719 break;
4720 default:
4721 abort();
4722 }
4723 tcg_temp_free_i32(tmp2);
4724
4725 /* Save the result. For elementwise operations we can put it
4726 straight into the destination register. For pairwise operations
4727 we have to be careful to avoid clobbering the source operands. */
4728 if (pairwise && rd == rm) {
4729 neon_store_scratch(pass, tmp);
4730 } else {
4731 neon_store_reg(rd, pass, tmp);
4732 }
4733
4734 } /* for pass */
4735 if (pairwise && rd == rm) {
4736 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4737 tmp = neon_load_scratch(pass);
4738 neon_store_reg(rd, pass, tmp);
4739 }
4740 }
4741 /* End of 3 register same size operations. */
4742 } else if (insn & (1 << 4)) {
4743 if ((insn & 0x00380080) != 0) {
4744 /* Two registers and shift. */
4745 op = (insn >> 8) & 0xf;
4746 if (insn & (1 << 7)) {
4747 /* 64-bit shift. */
4748 if (op > 7) {
4749 return 1;
4750 }
4751 size = 3;
4752 } else {
4753 size = 2;
4754 while ((insn & (1 << (size + 19))) == 0)
4755 size--;
4756 }
4757 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4758 /* To avoid excessive dumplication of ops we implement shift
4759 by immediate using the variable shift operations. */
4760 if (op < 8) {
4761 /* Shift by immediate:
4762 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4763 if (q && ((rd | rm) & 1)) {
4764 return 1;
4765 }
4766 if (!u && (op == 4 || op == 6)) {
4767 return 1;
4768 }
4769 /* Right shifts are encoded as N - shift, where N is the
4770 element size in bits. */
4771 if (op <= 4)
4772 shift = shift - (1 << (size + 3));
4773 if (size == 3) {
4774 count = q + 1;
4775 } else {
4776 count = q ? 4: 2;
4777 }
4778 switch (size) {
4779 case 0:
4780 imm = (uint8_t) shift;
4781 imm |= imm << 8;
4782 imm |= imm << 16;
4783 break;
4784 case 1:
4785 imm = (uint16_t) shift;
4786 imm |= imm << 16;
4787 break;
4788 case 2:
4789 case 3:
4790 imm = shift;
4791 break;
4792 default:
4793 abort();
4794 }
4795
4796 for (pass = 0; pass < count; pass++) {
4797 if (size == 3) {
4798 neon_load_reg64(cpu_V0, rm + pass);
4799 tcg_gen_movi_i64(cpu_V1, imm);
4800 switch (op) {
4801 case 0: /* VSHR */
4802 case 1: /* VSRA */
4803 if (u)
4804 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4805 else
4806 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4807 break;
4808 case 2: /* VRSHR */
4809 case 3: /* VRSRA */
4810 if (u)
4811 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4812 else
4813 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4814 break;
4815 case 4: /* VSRI */
4816 case 5: /* VSHL, VSLI */
4817 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4818 break;
4819 case 6: /* VQSHLU */
4820 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
4821 break;
4822 case 7: /* VQSHL */
4823 if (u) {
4824 gen_helper_neon_qshl_u64(cpu_V0,
4825 cpu_V0, cpu_V1);
4826 } else {
4827 gen_helper_neon_qshl_s64(cpu_V0,
4828 cpu_V0, cpu_V1);
4829 }
4830 break;
4831 }
4832 if (op == 1 || op == 3) {
4833 /* Accumulate. */
4834 neon_load_reg64(cpu_V1, rd + pass);
4835 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4836 } else if (op == 4 || (op == 5 && u)) {
4837 /* Insert */
4838 neon_load_reg64(cpu_V1, rd + pass);
4839 uint64_t mask;
4840 if (shift < -63 || shift > 63) {
4841 mask = 0;
4842 } else {
4843 if (op == 4) {
4844 mask = 0xffffffffffffffffull >> -shift;
4845 } else {
4846 mask = 0xffffffffffffffffull << shift;
4847 }
4848 }
4849 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4850 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4851 }
4852 neon_store_reg64(cpu_V0, rd + pass);
4853 } else { /* size < 3 */
4854 /* Operands in T0 and T1. */
4855 tmp = neon_load_reg(rm, pass);
4856 tmp2 = tcg_temp_new_i32();
4857 tcg_gen_movi_i32(tmp2, imm);
4858 switch (op) {
4859 case 0: /* VSHR */
4860 case 1: /* VSRA */
4861 GEN_NEON_INTEGER_OP(shl);
4862 break;
4863 case 2: /* VRSHR */
4864 case 3: /* VRSRA */
4865 GEN_NEON_INTEGER_OP(rshl);
4866 break;
4867 case 4: /* VSRI */
4868 case 5: /* VSHL, VSLI */
4869 switch (size) {
4870 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4871 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4872 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4873 default: abort();
4874 }
4875 break;
4876 case 6: /* VQSHLU */
4877 switch (size) {
4878 case 0:
4879 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
4880 break;
4881 case 1:
4882 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
4883 break;
4884 case 2:
4885 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
4886 break;
4887 default:
4888 abort();
4889 }
4890 break;
4891 case 7: /* VQSHL */
4892 GEN_NEON_INTEGER_OP(qshl);
4893 break;
4894 }
4895 tcg_temp_free_i32(tmp2);
4896
4897 if (op == 1 || op == 3) {
4898 /* Accumulate. */
4899 tmp2 = neon_load_reg(rd, pass);
4900 gen_neon_add(size, tmp, tmp2);
4901 tcg_temp_free_i32(tmp2);
4902 } else if (op == 4 || (op == 5 && u)) {
4903 /* Insert */
4904 switch (size) {
4905 case 0:
4906 if (op == 4)
4907 mask = 0xff >> -shift;
4908 else
4909 mask = (uint8_t)(0xff << shift);
4910 mask |= mask << 8;
4911 mask |= mask << 16;
4912 break;
4913 case 1:
4914 if (op == 4)
4915 mask = 0xffff >> -shift;
4916 else
4917 mask = (uint16_t)(0xffff << shift);
4918 mask |= mask << 16;
4919 break;
4920 case 2:
4921 if (shift < -31 || shift > 31) {
4922 mask = 0;
4923 } else {
4924 if (op == 4)
4925 mask = 0xffffffffu >> -shift;
4926 else
4927 mask = 0xffffffffu << shift;
4928 }
4929 break;
4930 default:
4931 abort();
4932 }
4933 tmp2 = neon_load_reg(rd, pass);
4934 tcg_gen_andi_i32(tmp, tmp, mask);
4935 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4936 tcg_gen_or_i32(tmp, tmp, tmp2);
4937 tcg_temp_free_i32(tmp2);
4938 }
4939 neon_store_reg(rd, pass, tmp);
4940 }
4941 } /* for pass */
4942 } else if (op < 10) {
4943 /* Shift by immediate and narrow:
4944 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4945 int input_unsigned = (op == 8) ? !u : u;
4946 if (rm & 1) {
4947 return 1;
4948 }
4949 shift = shift - (1 << (size + 3));
4950 size++;
4951 if (size == 3) {
4952 tmp64 = tcg_const_i64(shift);
4953 neon_load_reg64(cpu_V0, rm);
4954 neon_load_reg64(cpu_V1, rm + 1);
4955 for (pass = 0; pass < 2; pass++) {
4956 TCGv_i64 in;
4957 if (pass == 0) {
4958 in = cpu_V0;
4959 } else {
4960 in = cpu_V1;
4961 }
4962 if (q) {
4963 if (input_unsigned) {
4964 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4965 } else {
4966 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4967 }
4968 } else {
4969 if (input_unsigned) {
4970 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4971 } else {
4972 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4973 }
4974 }
4975 tmp = tcg_temp_new_i32();
4976 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4977 neon_store_reg(rd, pass, tmp);
4978 } /* for pass */
4979 tcg_temp_free_i64(tmp64);
4980 } else {
4981 if (size == 1) {
4982 imm = (uint16_t)shift;
4983 imm |= imm << 16;
4984 } else {
4985 /* size == 2 */
4986 imm = (uint32_t)shift;
4987 }
4988 tmp2 = tcg_const_i32(imm);
4989 tmp4 = neon_load_reg(rm + 1, 0);
4990 tmp5 = neon_load_reg(rm + 1, 1);
4991 for (pass = 0; pass < 2; pass++) {
4992 if (pass == 0) {
4993 tmp = neon_load_reg(rm, 0);
4994 } else {
4995 tmp = tmp4;
4996 }
4997 gen_neon_shift_narrow(size, tmp, tmp2, q,
4998 input_unsigned);
4999 if (pass == 0) {
5000 tmp3 = neon_load_reg(rm, 1);
5001 } else {
5002 tmp3 = tmp5;
5003 }
5004 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5005 input_unsigned);
5006 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5007 tcg_temp_free_i32(tmp);
5008 tcg_temp_free_i32(tmp3);
5009 tmp = tcg_temp_new_i32();
5010 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5011 neon_store_reg(rd, pass, tmp);
5012 } /* for pass */
5013 tcg_temp_free_i32(tmp2);
5014 }
5015 } else if (op == 10) {
5016 /* VSHLL, VMOVL */
5017 if (q || (rd & 1)) {
5018 return 1;
5019 }
5020 tmp = neon_load_reg(rm, 0);
5021 tmp2 = neon_load_reg(rm, 1);
5022 for (pass = 0; pass < 2; pass++) {
5023 if (pass == 1)
5024 tmp = tmp2;
5025
5026 gen_neon_widen(cpu_V0, tmp, size, u);
5027
5028 if (shift != 0) {
5029 /* The shift is less than the width of the source
5030 type, so we can just shift the whole register. */
5031 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5032 /* Widen the result of shift: we need to clear
5033 * the potential overflow bits resulting from
5034 * left bits of the narrow input appearing as
5035 * right bits of left the neighbour narrow
5036 * input. */
5037 if (size < 2 || !u) {
5038 uint64_t imm64;
5039 if (size == 0) {
5040 imm = (0xffu >> (8 - shift));
5041 imm |= imm << 16;
5042 } else if (size == 1) {
5043 imm = 0xffff >> (16 - shift);
5044 } else {
5045 /* size == 2 */
5046 imm = 0xffffffff >> (32 - shift);
5047 }
5048 if (size < 2) {
5049 imm64 = imm | (((uint64_t)imm) << 32);
5050 } else {
5051 imm64 = imm;
5052 }
5053 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5054 }
5055 }
5056 neon_store_reg64(cpu_V0, rd + pass);
5057 }
5058 } else if (op >= 14) {
5059 /* VCVT fixed-point. */
5060 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5061 return 1;
5062 }
5063 /* We have already masked out the must-be-1 top bit of imm6,
5064 * hence this 32-shift where the ARM ARM has 64-imm6.
5065 */
5066 shift = 32 - shift;
5067 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5068 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5069 if (!(op & 1)) {
5070 if (u)
5071 gen_vfp_ulto(0, shift);
5072 else
5073 gen_vfp_slto(0, shift);
5074 } else {
5075 if (u)
5076 gen_vfp_toul(0, shift);
5077 else
5078 gen_vfp_tosl(0, shift);
5079 }
5080 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5081 }
5082 } else {
5083 return 1;
5084 }
5085 } else { /* (insn & 0x00380080) == 0 */
5086 int invert;
5087
5088 op = (insn >> 8) & 0xf;
5089 /* One register and immediate. */
5090 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5091 invert = (insn & (1 << 5)) != 0;
5092 switch (op) {
5093 case 0: case 1:
5094 /* no-op */
5095 break;
5096 case 2: case 3:
5097 imm <<= 8;
5098 break;
5099 case 4: case 5:
5100 imm <<= 16;
5101 break;
5102 case 6: case 7:
5103 imm <<= 24;
5104 break;
5105 case 8: case 9:
5106 imm |= imm << 16;
5107 break;
5108 case 10: case 11:
5109 imm = (imm << 8) | (imm << 24);
5110 break;
5111 case 12:
5112 imm = (imm << 8) | 0xff;
5113 break;
5114 case 13:
5115 imm = (imm << 16) | 0xffff;
5116 break;
5117 case 14:
5118 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5119 if (invert)
5120 imm = ~imm;
5121 break;
5122 case 15:
5123 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5124 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5125 break;
5126 }
5127 if (invert)
5128 imm = ~imm;
5129
5130 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5131 if (op & 1 && op < 12) {
5132 tmp = neon_load_reg(rd, pass);
5133 if (invert) {
5134 /* The immediate value has already been inverted, so
5135 BIC becomes AND. */
5136 tcg_gen_andi_i32(tmp, tmp, imm);
5137 } else {
5138 tcg_gen_ori_i32(tmp, tmp, imm);
5139 }
5140 } else {
5141 /* VMOV, VMVN. */
5142 tmp = tcg_temp_new_i32();
5143 if (op == 14 && invert) {
5144 int n;
5145 uint32_t val;
5146 val = 0;
5147 for (n = 0; n < 4; n++) {
5148 if (imm & (1 << (n + (pass & 1) * 4)))
5149 val |= 0xff << (n * 8);
5150 }
5151 tcg_gen_movi_i32(tmp, val);
5152 } else {
5153 tcg_gen_movi_i32(tmp, imm);
5154 }
5155 }
5156 neon_store_reg(rd, pass, tmp);
5157 }
5158 }
5159 } else { /* (insn & 0x00800010 == 0x00800000) */
5160 if (size != 3) {
5161 op = (insn >> 8) & 0xf;
5162 if ((insn & (1 << 6)) == 0) {
5163 /* Three registers of different lengths. */
5164 int src1_wide;
5165 int src2_wide;
5166 int prewiden;
5167 /* prewiden, src1_wide, src2_wide */
5168 static const int neon_3reg_wide[16][3] = {
5169 {1, 0, 0}, /* VADDL */
5170 {1, 1, 0}, /* VADDW */
5171 {1, 0, 0}, /* VSUBL */
5172 {1, 1, 0}, /* VSUBW */
5173 {0, 1, 1}, /* VADDHN */
5174 {0, 0, 0}, /* VABAL */
5175 {0, 1, 1}, /* VSUBHN */
5176 {0, 0, 0}, /* VABDL */
5177 {0, 0, 0}, /* VMLAL */
5178 {0, 0, 0}, /* VQDMLAL */
5179 {0, 0, 0}, /* VMLSL */
5180 {0, 0, 0}, /* VQDMLSL */
5181 {0, 0, 0}, /* Integer VMULL */
5182 {0, 0, 0}, /* VQDMULL */
5183 {0, 0, 0} /* Polynomial VMULL */
5184 };
5185
5186 prewiden = neon_3reg_wide[op][0];
5187 src1_wide = neon_3reg_wide[op][1];
5188 src2_wide = neon_3reg_wide[op][2];
5189
5190 if (size == 0 && (op == 9 || op == 11 || op == 13))
5191 return 1;
5192
5193 /* Avoid overlapping operands. Wide source operands are
5194 always aligned so will never overlap with wide
5195 destinations in problematic ways. */
5196 if (rd == rm && !src2_wide) {
5197 tmp = neon_load_reg(rm, 1);
5198 neon_store_scratch(2, tmp);
5199 } else if (rd == rn && !src1_wide) {
5200 tmp = neon_load_reg(rn, 1);
5201 neon_store_scratch(2, tmp);
5202 }
5203 TCGV_UNUSED(tmp3);
5204 for (pass = 0; pass < 2; pass++) {
5205 if (src1_wide) {
5206 neon_load_reg64(cpu_V0, rn + pass);
5207 TCGV_UNUSED(tmp);
5208 } else {
5209 if (pass == 1 && rd == rn) {
5210 tmp = neon_load_scratch(2);
5211 } else {
5212 tmp = neon_load_reg(rn, pass);
5213 }
5214 if (prewiden) {
5215 gen_neon_widen(cpu_V0, tmp, size, u);
5216 }
5217 }
5218 if (src2_wide) {
5219 neon_load_reg64(cpu_V1, rm + pass);
5220 TCGV_UNUSED(tmp2);
5221 } else {
5222 if (pass == 1 && rd == rm) {
5223 tmp2 = neon_load_scratch(2);
5224 } else {
5225 tmp2 = neon_load_reg(rm, pass);
5226 }
5227 if (prewiden) {
5228 gen_neon_widen(cpu_V1, tmp2, size, u);
5229 }
5230 }
5231 switch (op) {
5232 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5233 gen_neon_addl(size);
5234 break;
5235 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5236 gen_neon_subl(size);
5237 break;
5238 case 5: case 7: /* VABAL, VABDL */
5239 switch ((size << 1) | u) {
5240 case 0:
5241 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5242 break;
5243 case 1:
5244 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5245 break;
5246 case 2:
5247 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5248 break;
5249 case 3:
5250 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5251 break;
5252 case 4:
5253 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5254 break;
5255 case 5:
5256 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5257 break;
5258 default: abort();
5259 }
5260 tcg_temp_free_i32(tmp2);
5261 tcg_temp_free_i32(tmp);
5262 break;
5263 case 8: case 9: case 10: case 11: case 12: case 13:
5264 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5265 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5266 break;
5267 case 14: /* Polynomial VMULL */
5268 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5269 tcg_temp_free_i32(tmp2);
5270 tcg_temp_free_i32(tmp);
5271 break;
5272 default: /* 15 is RESERVED. */
5273 return 1;
5274 }
5275 if (op == 13) {
5276 /* VQDMULL */
5277 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5278 neon_store_reg64(cpu_V0, rd + pass);
5279 } else if (op == 5 || (op >= 8 && op <= 11)) {
5280 /* Accumulate. */
5281 neon_load_reg64(cpu_V1, rd + pass);
5282 switch (op) {
5283 case 10: /* VMLSL */
5284 gen_neon_negl(cpu_V0, size);
5285 /* Fall through */
5286 case 5: case 8: /* VABAL, VMLAL */
5287 gen_neon_addl(size);
5288 break;
5289 case 9: case 11: /* VQDMLAL, VQDMLSL */
5290 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5291 if (op == 11) {
5292 gen_neon_negl(cpu_V0, size);
5293 }
5294 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5295 break;
5296 default:
5297 abort();
5298 }
5299 neon_store_reg64(cpu_V0, rd + pass);
5300 } else if (op == 4 || op == 6) {
5301 /* Narrowing operation. */
5302 tmp = tcg_temp_new_i32();
5303 if (!u) {
5304 switch (size) {
5305 case 0:
5306 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5307 break;
5308 case 1:
5309 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5310 break;
5311 case 2:
5312 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5313 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5314 break;
5315 default: abort();
5316 }
5317 } else {
5318 switch (size) {
5319 case 0:
5320 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5321 break;
5322 case 1:
5323 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5324 break;
5325 case 2:
5326 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5327 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5328 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5329 break;
5330 default: abort();
5331 }
5332 }
5333 if (pass == 0) {
5334 tmp3 = tmp;
5335 } else {
5336 neon_store_reg(rd, 0, tmp3);
5337 neon_store_reg(rd, 1, tmp);
5338 }
5339 } else {
5340 /* Write back the result. */
5341 neon_store_reg64(cpu_V0, rd + pass);
5342 }
5343 }
5344 } else {
5345 /* Two registers and a scalar. */
5346 switch (op) {
5347 case 0: /* Integer VMLA scalar */
5348 case 1: /* Float VMLA scalar */
5349 case 4: /* Integer VMLS scalar */
5350 case 5: /* Floating point VMLS scalar */
5351 case 8: /* Integer VMUL scalar */
5352 case 9: /* Floating point VMUL scalar */
5353 case 12: /* VQDMULH scalar */
5354 case 13: /* VQRDMULH scalar */
5355 tmp = neon_get_scalar(size, rm);
5356 neon_store_scratch(0, tmp);
5357 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5358 tmp = neon_load_scratch(0);
5359 tmp2 = neon_load_reg(rn, pass);
5360 if (op == 12) {
5361 if (size == 1) {
5362 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5363 } else {
5364 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5365 }
5366 } else if (op == 13) {
5367 if (size == 1) {
5368 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5369 } else {
5370 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5371 }
5372 } else if (op & 1) {
5373 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5374 } else {
5375 switch (size) {
5376 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5377 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5378 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5379 default: return 1;
5380 }
5381 }
5382 tcg_temp_free_i32(tmp2);
5383 if (op < 8) {
5384 /* Accumulate. */
5385 tmp2 = neon_load_reg(rd, pass);
5386 switch (op) {
5387 case 0:
5388 gen_neon_add(size, tmp, tmp2);
5389 break;
5390 case 1:
5391 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5392 break;
5393 case 4:
5394 gen_neon_rsb(size, tmp, tmp2);
5395 break;
5396 case 5:
5397 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5398 break;
5399 default:
5400 abort();
5401 }
5402 tcg_temp_free_i32(tmp2);
5403 }
5404 neon_store_reg(rd, pass, tmp);
5405 }
5406 break;
5407 case 2: /* VMLAL sclar */
5408 case 3: /* VQDMLAL scalar */
5409 case 6: /* VMLSL scalar */
5410 case 7: /* VQDMLSL scalar */
5411 case 10: /* VMULL scalar */
5412 case 11: /* VQDMULL scalar */
5413 if (size == 0 && (op == 3 || op == 7 || op == 11))
5414 return 1;
5415
5416 tmp2 = neon_get_scalar(size, rm);
5417 /* We need a copy of tmp2 because gen_neon_mull
5418 * deletes it during pass 0. */
5419 tmp4 = tcg_temp_new_i32();
5420 tcg_gen_mov_i32(tmp4, tmp2);
5421 tmp3 = neon_load_reg(rn, 1);
5422
5423 for (pass = 0; pass < 2; pass++) {
5424 if (pass == 0) {
5425 tmp = neon_load_reg(rn, 0);
5426 } else {
5427 tmp = tmp3;
5428 tmp2 = tmp4;
5429 }
5430 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5431 if (op != 11) {
5432 neon_load_reg64(cpu_V1, rd + pass);
5433 }
5434 switch (op) {
5435 case 6:
5436 gen_neon_negl(cpu_V0, size);
5437 /* Fall through */
5438 case 2:
5439 gen_neon_addl(size);
5440 break;
5441 case 3: case 7:
5442 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5443 if (op == 7) {
5444 gen_neon_negl(cpu_V0, size);
5445 }
5446 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5447 break;
5448 case 10:
5449 /* no-op */
5450 break;
5451 case 11:
5452 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5453 break;
5454 default:
5455 abort();
5456 }
5457 neon_store_reg64(cpu_V0, rd + pass);
5458 }
5459
5460
5461 break;
5462 default: /* 14 and 15 are RESERVED */
5463 return 1;
5464 }
5465 }
5466 } else { /* size == 3 */
5467 if (!u) {
5468 /* Extract. */
5469 imm = (insn >> 8) & 0xf;
5470
5471 if (imm > 7 && !q)
5472 return 1;
5473
5474 if (imm == 0) {
5475 neon_load_reg64(cpu_V0, rn);
5476 if (q) {
5477 neon_load_reg64(cpu_V1, rn + 1);
5478 }
5479 } else if (imm == 8) {
5480 neon_load_reg64(cpu_V0, rn + 1);
5481 if (q) {
5482 neon_load_reg64(cpu_V1, rm);
5483 }
5484 } else if (q) {
5485 tmp64 = tcg_temp_new_i64();
5486 if (imm < 8) {
5487 neon_load_reg64(cpu_V0, rn);
5488 neon_load_reg64(tmp64, rn + 1);
5489 } else {
5490 neon_load_reg64(cpu_V0, rn + 1);
5491 neon_load_reg64(tmp64, rm);
5492 }
5493 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5494 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5495 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5496 if (imm < 8) {
5497 neon_load_reg64(cpu_V1, rm);
5498 } else {
5499 neon_load_reg64(cpu_V1, rm + 1);
5500 imm -= 8;
5501 }
5502 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5503 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5504 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5505 tcg_temp_free_i64(tmp64);
5506 } else {
5507 /* BUGFIX */
5508 neon_load_reg64(cpu_V0, rn);
5509 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5510 neon_load_reg64(cpu_V1, rm);
5511 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5512 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5513 }
5514 neon_store_reg64(cpu_V0, rd);
5515 if (q) {
5516 neon_store_reg64(cpu_V1, rd + 1);
5517 }
5518 } else if ((insn & (1 << 11)) == 0) {
5519 /* Two register misc. */
5520 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5521 size = (insn >> 18) & 3;
5522 switch (op) {
5523 case 0: /* VREV64 */
5524 if (size == 3)
5525 return 1;
5526 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5527 tmp = neon_load_reg(rm, pass * 2);
5528 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5529 switch (size) {
5530 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5531 case 1: gen_swap_half(tmp); break;
5532 case 2: /* no-op */ break;
5533 default: abort();
5534 }
5535 neon_store_reg(rd, pass * 2 + 1, tmp);
5536 if (size == 2) {
5537 neon_store_reg(rd, pass * 2, tmp2);
5538 } else {
5539 switch (size) {
5540 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5541 case 1: gen_swap_half(tmp2); break;
5542 default: abort();
5543 }
5544 neon_store_reg(rd, pass * 2, tmp2);
5545 }
5546 }
5547 break;
5548 case 4: case 5: /* VPADDL */
5549 case 12: case 13: /* VPADAL */
5550 if (size == 3)
5551 return 1;
5552 for (pass = 0; pass < q + 1; pass++) {
5553 tmp = neon_load_reg(rm, pass * 2);
5554 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5555 tmp = neon_load_reg(rm, pass * 2 + 1);
5556 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5557 switch (size) {
5558 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5559 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5560 case 2: tcg_gen_add_i64(CPU_V001); break;
5561 default: abort();
5562 }
5563 if (op >= 12) {
5564 /* Accumulate. */
5565 neon_load_reg64(cpu_V1, rd + pass);
5566 gen_neon_addl(size);
5567 }
5568 neon_store_reg64(cpu_V0, rd + pass);
5569 }
5570 break;
5571 case 33: /* VTRN */
5572 if (size == 2) {
5573 int n;
5574 for (n = 0; n < (q ? 4 : 2); n += 2) {
5575 tmp = neon_load_reg(rm, n);
5576 tmp2 = neon_load_reg(rd, n + 1);
5577 neon_store_reg(rm, n, tmp2);
5578 neon_store_reg(rd, n + 1, tmp);
5579 }
5580 } else {
5581 goto elementwise;
5582 }
5583 break;
5584 case 34: /* VUZP */
5585 if (gen_neon_unzip(rd, rm, size, q)) {
5586 return 1;
5587 }
5588 break;
5589 case 35: /* VZIP */
5590 if (gen_neon_zip(rd, rm, size, q)) {
5591 return 1;
5592 }
5593 break;
5594 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5595 if (size == 3)
5596 return 1;
5597 TCGV_UNUSED(tmp2);
5598 for (pass = 0; pass < 2; pass++) {
5599 neon_load_reg64(cpu_V0, rm + pass);
5600 tmp = tcg_temp_new_i32();
5601 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5602 if (pass == 0) {
5603 tmp2 = tmp;
5604 } else {
5605 neon_store_reg(rd, 0, tmp2);
5606 neon_store_reg(rd, 1, tmp);
5607 }
5608 }
5609 break;
5610 case 38: /* VSHLL */
5611 if (q || size == 3)
5612 return 1;
5613 tmp = neon_load_reg(rm, 0);
5614 tmp2 = neon_load_reg(rm, 1);
5615 for (pass = 0; pass < 2; pass++) {
5616 if (pass == 1)
5617 tmp = tmp2;
5618 gen_neon_widen(cpu_V0, tmp, size, 1);
5619 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5620 neon_store_reg64(cpu_V0, rd + pass);
5621 }
5622 break;
5623 case 44: /* VCVT.F16.F32 */
5624 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5625 return 1;
5626 tmp = tcg_temp_new_i32();
5627 tmp2 = tcg_temp_new_i32();
5628 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5629 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5630 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5631 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5632 tcg_gen_shli_i32(tmp2, tmp2, 16);
5633 tcg_gen_or_i32(tmp2, tmp2, tmp);
5634 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5635 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5636 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5637 neon_store_reg(rd, 0, tmp2);
5638 tmp2 = tcg_temp_new_i32();
5639 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5640 tcg_gen_shli_i32(tmp2, tmp2, 16);
5641 tcg_gen_or_i32(tmp2, tmp2, tmp);
5642 neon_store_reg(rd, 1, tmp2);
5643 tcg_temp_free_i32(tmp);
5644 break;
5645 case 46: /* VCVT.F32.F16 */
5646 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5647 return 1;
5648 tmp3 = tcg_temp_new_i32();
5649 tmp = neon_load_reg(rm, 0);
5650 tmp2 = neon_load_reg(rm, 1);
5651 tcg_gen_ext16u_i32(tmp3, tmp);
5652 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5653 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5654 tcg_gen_shri_i32(tmp3, tmp, 16);
5655 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5656 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5657 tcg_temp_free_i32(tmp);
5658 tcg_gen_ext16u_i32(tmp3, tmp2);
5659 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5660 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5661 tcg_gen_shri_i32(tmp3, tmp2, 16);
5662 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5663 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5664 tcg_temp_free_i32(tmp2);
5665 tcg_temp_free_i32(tmp3);
5666 break;
5667 default:
5668 elementwise:
5669 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5670 if (op == 30 || op == 31 || op >= 58) {
5671 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5672 neon_reg_offset(rm, pass));
5673 TCGV_UNUSED(tmp);
5674 } else {
5675 tmp = neon_load_reg(rm, pass);
5676 }
5677 switch (op) {
5678 case 1: /* VREV32 */
5679 switch (size) {
5680 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5681 case 1: gen_swap_half(tmp); break;
5682 default: return 1;
5683 }
5684 break;
5685 case 2: /* VREV16 */
5686 if (size != 0)
5687 return 1;
5688 gen_rev16(tmp);
5689 break;
5690 case 8: /* CLS */
5691 switch (size) {
5692 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5693 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5694 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5695 default: return 1;
5696 }
5697 break;
5698 case 9: /* CLZ */
5699 switch (size) {
5700 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5701 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5702 case 2: gen_helper_clz(tmp, tmp); break;
5703 default: return 1;
5704 }
5705 break;
5706 case 10: /* CNT */
5707 if (size != 0)
5708 return 1;
5709 gen_helper_neon_cnt_u8(tmp, tmp);
5710 break;
5711 case 11: /* VNOT */
5712 if (size != 0)
5713 return 1;
5714 tcg_gen_not_i32(tmp, tmp);
5715 break;
5716 case 14: /* VQABS */
5717 switch (size) {
5718 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5719 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5720 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5721 default: return 1;
5722 }
5723 break;
5724 case 15: /* VQNEG */
5725 switch (size) {
5726 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5727 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5728 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5729 default: return 1;
5730 }
5731 break;
5732 case 16: case 19: /* VCGT #0, VCLE #0 */
5733 tmp2 = tcg_const_i32(0);
5734 switch(size) {
5735 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5736 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5737 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5738 default: return 1;
5739 }
5740 tcg_temp_free(tmp2);
5741 if (op == 19)
5742 tcg_gen_not_i32(tmp, tmp);
5743 break;
5744 case 17: case 20: /* VCGE #0, VCLT #0 */
5745 tmp2 = tcg_const_i32(0);
5746 switch(size) {
5747 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5748 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5749 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5750 default: return 1;
5751 }
5752 tcg_temp_free(tmp2);
5753 if (op == 20)
5754 tcg_gen_not_i32(tmp, tmp);
5755 break;
5756 case 18: /* VCEQ #0 */
5757 tmp2 = tcg_const_i32(0);
5758 switch(size) {
5759 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5760 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5761 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5762 default: return 1;
5763 }
5764 tcg_temp_free(tmp2);
5765 break;
5766 case 22: /* VABS */
5767 switch(size) {
5768 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5769 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5770 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5771 default: return 1;
5772 }
5773 break;
5774 case 23: /* VNEG */
5775 if (size == 3)
5776 return 1;
5777 tmp2 = tcg_const_i32(0);
5778 gen_neon_rsb(size, tmp, tmp2);
5779 tcg_temp_free(tmp2);
5780 break;
5781 case 24: /* Float VCGT #0 */
5782 tmp2 = tcg_const_i32(0);
5783 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5784 tcg_temp_free(tmp2);
5785 break;
5786 case 25: /* Float VCGE #0 */
5787 tmp2 = tcg_const_i32(0);
5788 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5789 tcg_temp_free(tmp2);
5790 break;
5791 case 26: /* Float VCEQ #0 */
5792 tmp2 = tcg_const_i32(0);
5793 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5794 tcg_temp_free(tmp2);
5795 break;
5796 case 27: /* Float VCLE #0 */
5797 tmp2 = tcg_const_i32(0);
5798 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5799 tcg_temp_free(tmp2);
5800 break;
5801 case 28: /* Float VCLT #0 */
5802 tmp2 = tcg_const_i32(0);
5803 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5804 tcg_temp_free(tmp2);
5805 break;
5806 case 30: /* Float VABS */
5807 gen_vfp_abs(0);
5808 break;
5809 case 31: /* Float VNEG */
5810 gen_vfp_neg(0);
5811 break;
5812 case 32: /* VSWP */
5813 tmp2 = neon_load_reg(rd, pass);
5814 neon_store_reg(rm, pass, tmp2);
5815 break;
5816 case 33: /* VTRN */
5817 tmp2 = neon_load_reg(rd, pass);
5818 switch (size) {
5819 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5820 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5821 case 2: abort();
5822 default: return 1;
5823 }
5824 neon_store_reg(rm, pass, tmp2);
5825 break;
5826 case 56: /* Integer VRECPE */
5827 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5828 break;
5829 case 57: /* Integer VRSQRTE */
5830 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5831 break;
5832 case 58: /* Float VRECPE */
5833 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5834 break;
5835 case 59: /* Float VRSQRTE */
5836 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5837 break;
5838 case 60: /* VCVT.F32.S32 */
5839 gen_vfp_sito(0);
5840 break;
5841 case 61: /* VCVT.F32.U32 */
5842 gen_vfp_uito(0);
5843 break;
5844 case 62: /* VCVT.S32.F32 */
5845 gen_vfp_tosiz(0);
5846 break;
5847 case 63: /* VCVT.U32.F32 */
5848 gen_vfp_touiz(0);
5849 break;
5850 default:
5851 /* Reserved: 21, 29, 39-56 */
5852 return 1;
5853 }
5854 if (op == 30 || op == 31 || op >= 58) {
5855 tcg_gen_st_f32(cpu_F0s, cpu_env,
5856 neon_reg_offset(rd, pass));
5857 } else {
5858 neon_store_reg(rd, pass, tmp);
5859 }
5860 }
5861 break;
5862 }
5863 } else if ((insn & (1 << 10)) == 0) {
5864 /* VTBL, VTBX. */
5865 int n = ((insn >> 5) & 0x18) + 8;
5866 if (insn & (1 << 6)) {
5867 tmp = neon_load_reg(rd, 0);
5868 } else {
5869 tmp = tcg_temp_new_i32();
5870 tcg_gen_movi_i32(tmp, 0);
5871 }
5872 tmp2 = neon_load_reg(rm, 0);
5873 tmp4 = tcg_const_i32(rn);
5874 tmp5 = tcg_const_i32(n);
5875 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5876 tcg_temp_free_i32(tmp);
5877 if (insn & (1 << 6)) {
5878 tmp = neon_load_reg(rd, 1);
5879 } else {
5880 tmp = tcg_temp_new_i32();
5881 tcg_gen_movi_i32(tmp, 0);
5882 }
5883 tmp3 = neon_load_reg(rm, 1);
5884 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5885 tcg_temp_free_i32(tmp5);
5886 tcg_temp_free_i32(tmp4);
5887 neon_store_reg(rd, 0, tmp2);
5888 neon_store_reg(rd, 1, tmp3);
5889 tcg_temp_free_i32(tmp);
5890 } else if ((insn & 0x380) == 0) {
5891 /* VDUP */
5892 if (insn & (1 << 19)) {
5893 tmp = neon_load_reg(rm, 1);
5894 } else {
5895 tmp = neon_load_reg(rm, 0);
5896 }
5897 if (insn & (1 << 16)) {
5898 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5899 } else if (insn & (1 << 17)) {
5900 if ((insn >> 18) & 1)
5901 gen_neon_dup_high16(tmp);
5902 else
5903 gen_neon_dup_low16(tmp);
5904 }
5905 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5906 tmp2 = tcg_temp_new_i32();
5907 tcg_gen_mov_i32(tmp2, tmp);
5908 neon_store_reg(rd, pass, tmp2);
5909 }
5910 tcg_temp_free_i32(tmp);
5911 } else {
5912 return 1;
5913 }
5914 }
5915 }
5916 return 0;
5917 }
5918
5919 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5920 {
5921 int crn = (insn >> 16) & 0xf;
5922 int crm = insn & 0xf;
5923 int op1 = (insn >> 21) & 7;
5924 int op2 = (insn >> 5) & 7;
5925 int rt = (insn >> 12) & 0xf;
5926 TCGv tmp;
5927
5928 /* Minimal set of debug registers, since we don't support debug */
5929 if (op1 == 0 && crn == 0 && op2 == 0) {
5930 switch (crm) {
5931 case 0:
5932 /* DBGDIDR: just RAZ. In particular this means the
5933 * "debug architecture version" bits will read as
5934 * a reserved value, which should cause Linux to
5935 * not try to use the debug hardware.
5936 */
5937 tmp = tcg_const_i32(0);
5938 store_reg(s, rt, tmp);
5939 return 0;
5940 case 1:
5941 case 2:
5942 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5943 * don't implement memory mapped debug components
5944 */
5945 if (ENABLE_ARCH_7) {
5946 tmp = tcg_const_i32(0);
5947 store_reg(s, rt, tmp);
5948 return 0;
5949 }
5950 break;
5951 default:
5952 break;
5953 }
5954 }
5955
5956 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5957 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5958 /* TEECR */
5959 if (IS_USER(s))
5960 return 1;
5961 tmp = load_cpu_field(teecr);
5962 store_reg(s, rt, tmp);
5963 return 0;
5964 }
5965 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5966 /* TEEHBR */
5967 if (IS_USER(s) && (env->teecr & 1))
5968 return 1;
5969 tmp = load_cpu_field(teehbr);
5970 store_reg(s, rt, tmp);
5971 return 0;
5972 }
5973 }
5974 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5975 op1, crn, crm, op2);
5976 return 1;
5977 }
5978
5979 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5980 {
5981 int crn = (insn >> 16) & 0xf;
5982 int crm = insn & 0xf;
5983 int op1 = (insn >> 21) & 7;
5984 int op2 = (insn >> 5) & 7;
5985 int rt = (insn >> 12) & 0xf;
5986 TCGv tmp;
5987
5988 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5989 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5990 /* TEECR */
5991 if (IS_USER(s))
5992 return 1;
5993 tmp = load_reg(s, rt);
5994 gen_helper_set_teecr(cpu_env, tmp);
5995 tcg_temp_free_i32(tmp);
5996 return 0;
5997 }
5998 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5999 /* TEEHBR */
6000 if (IS_USER(s) && (env->teecr & 1))
6001 return 1;
6002 tmp = load_reg(s, rt);
6003 store_cpu_field(tmp, teehbr);
6004 return 0;
6005 }
6006 }
6007 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6008 op1, crn, crm, op2);
6009 return 1;
6010 }
6011
6012 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6013 {
6014 int cpnum;
6015
6016 cpnum = (insn >> 8) & 0xf;
6017 if (arm_feature(env, ARM_FEATURE_XSCALE)
6018 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6019 return 1;
6020
6021 switch (cpnum) {
6022 case 0:
6023 case 1:
6024 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6025 return disas_iwmmxt_insn(env, s, insn);
6026 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6027 return disas_dsp_insn(env, s, insn);
6028 }
6029 return 1;
6030 case 10:
6031 case 11:
6032 return disas_vfp_insn (env, s, insn);
6033 case 14:
6034 /* Coprocessors 7-15 are architecturally reserved by ARM.
6035 Unfortunately Intel decided to ignore this. */
6036 if (arm_feature(env, ARM_FEATURE_XSCALE))
6037 goto board;
6038 if (insn & (1 << 20))
6039 return disas_cp14_read(env, s, insn);
6040 else
6041 return disas_cp14_write(env, s, insn);
6042 case 15:
6043 return disas_cp15_insn (env, s, insn);
6044 default:
6045 board:
6046 /* Unknown coprocessor. See if the board has hooked it. */
6047 return disas_cp_insn (env, s, insn);
6048 }
6049 }
6050
6051
6052 /* Store a 64-bit value to a register pair. Clobbers val. */
6053 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6054 {
6055 TCGv tmp;
6056 tmp = tcg_temp_new_i32();
6057 tcg_gen_trunc_i64_i32(tmp, val);
6058 store_reg(s, rlow, tmp);
6059 tmp = tcg_temp_new_i32();
6060 tcg_gen_shri_i64(val, val, 32);
6061 tcg_gen_trunc_i64_i32(tmp, val);
6062 store_reg(s, rhigh, tmp);
6063 }
6064
6065 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6066 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6067 {
6068 TCGv_i64 tmp;
6069 TCGv tmp2;
6070
6071 /* Load value and extend to 64 bits. */
6072 tmp = tcg_temp_new_i64();
6073 tmp2 = load_reg(s, rlow);
6074 tcg_gen_extu_i32_i64(tmp, tmp2);
6075 tcg_temp_free_i32(tmp2);
6076 tcg_gen_add_i64(val, val, tmp);
6077 tcg_temp_free_i64(tmp);
6078 }
6079
6080 /* load and add a 64-bit value from a register pair. */
6081 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6082 {
6083 TCGv_i64 tmp;
6084 TCGv tmpl;
6085 TCGv tmph;
6086
6087 /* Load 64-bit value rd:rn. */
6088 tmpl = load_reg(s, rlow);
6089 tmph = load_reg(s, rhigh);
6090 tmp = tcg_temp_new_i64();
6091 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6092 tcg_temp_free_i32(tmpl);
6093 tcg_temp_free_i32(tmph);
6094 tcg_gen_add_i64(val, val, tmp);
6095 tcg_temp_free_i64(tmp);
6096 }
6097
6098 /* Set N and Z flags from a 64-bit value. */
6099 static void gen_logicq_cc(TCGv_i64 val)
6100 {
6101 TCGv tmp = tcg_temp_new_i32();
6102 gen_helper_logicq_cc(tmp, val);
6103 gen_logic_CC(tmp);
6104 tcg_temp_free_i32(tmp);
6105 }
6106
6107 /* Load/Store exclusive instructions are implemented by remembering
6108 the value/address loaded, and seeing if these are the same
6109 when the store is performed. This should be is sufficient to implement
6110 the architecturally mandated semantics, and avoids having to monitor
6111 regular stores.
6112
6113 In system emulation mode only one CPU will be running at once, so
6114 this sequence is effectively atomic. In user emulation mode we
6115 throw an exception and handle the atomic operation elsewhere. */
6116 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6117 TCGv addr, int size)
6118 {
6119 TCGv tmp;
6120
6121 switch (size) {
6122 case 0:
6123 tmp = gen_ld8u(addr, IS_USER(s));
6124 break;
6125 case 1:
6126 tmp = gen_ld16u(addr, IS_USER(s));
6127 break;
6128 case 2:
6129 case 3:
6130 tmp = gen_ld32(addr, IS_USER(s));
6131 break;
6132 default:
6133 abort();
6134 }
6135 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6136 store_reg(s, rt, tmp);
6137 if (size == 3) {
6138 TCGv tmp2 = tcg_temp_new_i32();
6139 tcg_gen_addi_i32(tmp2, addr, 4);
6140 tmp = gen_ld32(tmp2, IS_USER(s));
6141 tcg_temp_free_i32(tmp2);
6142 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6143 store_reg(s, rt2, tmp);
6144 }
6145 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6146 }
6147
6148 static void gen_clrex(DisasContext *s)
6149 {
6150 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6151 }
6152
6153 #ifdef CONFIG_USER_ONLY
6154 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6155 TCGv addr, int size)
6156 {
6157 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6158 tcg_gen_movi_i32(cpu_exclusive_info,
6159 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6160 gen_exception_insn(s, 4, EXCP_STREX);
6161 }
6162 #else
6163 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6164 TCGv addr, int size)
6165 {
6166 TCGv tmp;
6167 int done_label;
6168 int fail_label;
6169
6170 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6171 [addr] = {Rt};
6172 {Rd} = 0;
6173 } else {
6174 {Rd} = 1;
6175 } */
6176 fail_label = gen_new_label();
6177 done_label = gen_new_label();
6178 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6179 switch (size) {
6180 case 0:
6181 tmp = gen_ld8u(addr, IS_USER(s));
6182 break;
6183 case 1:
6184 tmp = gen_ld16u(addr, IS_USER(s));
6185 break;
6186 case 2:
6187 case 3:
6188 tmp = gen_ld32(addr, IS_USER(s));
6189 break;
6190 default:
6191 abort();
6192 }
6193 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6194 tcg_temp_free_i32(tmp);
6195 if (size == 3) {
6196 TCGv tmp2 = tcg_temp_new_i32();
6197 tcg_gen_addi_i32(tmp2, addr, 4);
6198 tmp = gen_ld32(tmp2, IS_USER(s));
6199 tcg_temp_free_i32(tmp2);
6200 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6201 tcg_temp_free_i32(tmp);
6202 }
6203 tmp = load_reg(s, rt);
6204 switch (size) {
6205 case 0:
6206 gen_st8(tmp, addr, IS_USER(s));
6207 break;
6208 case 1:
6209 gen_st16(tmp, addr, IS_USER(s));
6210 break;
6211 case 2:
6212 case 3:
6213 gen_st32(tmp, addr, IS_USER(s));
6214 break;
6215 default:
6216 abort();
6217 }
6218 if (size == 3) {
6219 tcg_gen_addi_i32(addr, addr, 4);
6220 tmp = load_reg(s, rt2);
6221 gen_st32(tmp, addr, IS_USER(s));
6222 }
6223 tcg_gen_movi_i32(cpu_R[rd], 0);
6224 tcg_gen_br(done_label);
6225 gen_set_label(fail_label);
6226 tcg_gen_movi_i32(cpu_R[rd], 1);
6227 gen_set_label(done_label);
6228 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6229 }
6230 #endif
6231
6232 static void disas_arm_insn(CPUState * env, DisasContext *s)
6233 {
6234 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6235 TCGv tmp;
6236 TCGv tmp2;
6237 TCGv tmp3;
6238 TCGv addr;
6239 TCGv_i64 tmp64;
6240
6241 insn = ldl_code(s->pc);
6242 s->pc += 4;
6243
6244 /* M variants do not implement ARM mode. */
6245 if (IS_M(env))
6246 goto illegal_op;
6247 cond = insn >> 28;
6248 if (cond == 0xf){
6249 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6250 * choose to UNDEF. In ARMv5 and above the space is used
6251 * for miscellaneous unconditional instructions.
6252 */
6253 ARCH(5);
6254
6255 /* Unconditional instructions. */
6256 if (((insn >> 25) & 7) == 1) {
6257 /* NEON Data processing. */
6258 if (!arm_feature(env, ARM_FEATURE_NEON))
6259 goto illegal_op;
6260
6261 if (disas_neon_data_insn(env, s, insn))
6262 goto illegal_op;
6263 return;
6264 }
6265 if ((insn & 0x0f100000) == 0x04000000) {
6266 /* NEON load/store. */
6267 if (!arm_feature(env, ARM_FEATURE_NEON))
6268 goto illegal_op;
6269
6270 if (disas_neon_ls_insn(env, s, insn))
6271 goto illegal_op;
6272 return;
6273 }
6274 if (((insn & 0x0f30f000) == 0x0510f000) ||
6275 ((insn & 0x0f30f010) == 0x0710f000)) {
6276 if ((insn & (1 << 22)) == 0) {
6277 /* PLDW; v7MP */
6278 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6279 goto illegal_op;
6280 }
6281 }
6282 /* Otherwise PLD; v5TE+ */
6283 ARCH(5TE);
6284 return;
6285 }
6286 if (((insn & 0x0f70f000) == 0x0450f000) ||
6287 ((insn & 0x0f70f010) == 0x0650f000)) {
6288 ARCH(7);
6289 return; /* PLI; V7 */
6290 }
6291 if (((insn & 0x0f700000) == 0x04100000) ||
6292 ((insn & 0x0f700010) == 0x06100000)) {
6293 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6294 goto illegal_op;
6295 }
6296 return; /* v7MP: Unallocated memory hint: must NOP */
6297 }
6298
6299 if ((insn & 0x0ffffdff) == 0x01010000) {
6300 ARCH(6);
6301 /* setend */
6302 if (insn & (1 << 9)) {
6303 /* BE8 mode not implemented. */
6304 goto illegal_op;
6305 }
6306 return;
6307 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6308 switch ((insn >> 4) & 0xf) {
6309 case 1: /* clrex */
6310 ARCH(6K);
6311 gen_clrex(s);
6312 return;
6313 case 4: /* dsb */
6314 case 5: /* dmb */
6315 case 6: /* isb */
6316 ARCH(7);
6317 /* We don't emulate caches so these are a no-op. */
6318 return;
6319 default:
6320 goto illegal_op;
6321 }
6322 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6323 /* srs */
6324 int32_t offset;
6325 if (IS_USER(s))
6326 goto illegal_op;
6327 ARCH(6);
6328 op1 = (insn & 0x1f);
6329 addr = tcg_temp_new_i32();
6330 tmp = tcg_const_i32(op1);
6331 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6332 tcg_temp_free_i32(tmp);
6333 i = (insn >> 23) & 3;
6334 switch (i) {
6335 case 0: offset = -4; break; /* DA */
6336 case 1: offset = 0; break; /* IA */
6337 case 2: offset = -8; break; /* DB */
6338 case 3: offset = 4; break; /* IB */
6339 default: abort();
6340 }
6341 if (offset)
6342 tcg_gen_addi_i32(addr, addr, offset);
6343 tmp = load_reg(s, 14);
6344 gen_st32(tmp, addr, 0);
6345 tmp = load_cpu_field(spsr);
6346 tcg_gen_addi_i32(addr, addr, 4);
6347 gen_st32(tmp, addr, 0);
6348 if (insn & (1 << 21)) {
6349 /* Base writeback. */
6350 switch (i) {
6351 case 0: offset = -8; break;
6352 case 1: offset = 4; break;
6353 case 2: offset = -4; break;
6354 case 3: offset = 0; break;
6355 default: abort();
6356 }
6357 if (offset)
6358 tcg_gen_addi_i32(addr, addr, offset);
6359 tmp = tcg_const_i32(op1);
6360 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6361 tcg_temp_free_i32(tmp);
6362 tcg_temp_free_i32(addr);
6363 } else {
6364 tcg_temp_free_i32(addr);
6365 }
6366 return;
6367 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6368 /* rfe */
6369 int32_t offset;
6370 if (IS_USER(s))
6371 goto illegal_op;
6372 ARCH(6);
6373 rn = (insn >> 16) & 0xf;
6374 addr = load_reg(s, rn);
6375 i = (insn >> 23) & 3;
6376 switch (i) {
6377 case 0: offset = -4; break; /* DA */
6378 case 1: offset = 0; break; /* IA */
6379 case 2: offset = -8; break; /* DB */
6380 case 3: offset = 4; break; /* IB */
6381 default: abort();
6382 }
6383 if (offset)
6384 tcg_gen_addi_i32(addr, addr, offset);
6385 /* Load PC into tmp and CPSR into tmp2. */
6386 tmp = gen_ld32(addr, 0);
6387 tcg_gen_addi_i32(addr, addr, 4);
6388 tmp2 = gen_ld32(addr, 0);
6389 if (insn & (1 << 21)) {
6390 /* Base writeback. */
6391 switch (i) {
6392 case 0: offset = -8; break;
6393 case 1: offset = 4; break;
6394 case 2: offset = -4; break;
6395 case 3: offset = 0; break;
6396 default: abort();
6397 }
6398 if (offset)
6399 tcg_gen_addi_i32(addr, addr, offset);
6400 store_reg(s, rn, addr);
6401 } else {
6402 tcg_temp_free_i32(addr);
6403 }
6404 gen_rfe(s, tmp, tmp2);
6405 return;
6406 } else if ((insn & 0x0e000000) == 0x0a000000) {
6407 /* branch link and change to thumb (blx <offset>) */
6408 int32_t offset;
6409
6410 val = (uint32_t)s->pc;
6411 tmp = tcg_temp_new_i32();
6412 tcg_gen_movi_i32(tmp, val);
6413 store_reg(s, 14, tmp);
6414 /* Sign-extend the 24-bit offset */
6415 offset = (((int32_t)insn) << 8) >> 8;
6416 /* offset * 4 + bit24 * 2 + (thumb bit) */
6417 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6418 /* pipeline offset */
6419 val += 4;
6420 /* protected by ARCH(5); above, near the start of uncond block */
6421 gen_bx_im(s, val);
6422 return;
6423 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6424 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6425 /* iWMMXt register transfer. */
6426 if (env->cp15.c15_cpar & (1 << 1))
6427 if (!disas_iwmmxt_insn(env, s, insn))
6428 return;
6429 }
6430 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6431 /* Coprocessor double register transfer. */
6432 ARCH(5TE);
6433 } else if ((insn & 0x0f000010) == 0x0e000010) {
6434 /* Additional coprocessor register transfer. */
6435 } else if ((insn & 0x0ff10020) == 0x01000000) {
6436 uint32_t mask;
6437 uint32_t val;
6438 /* cps (privileged) */
6439 if (IS_USER(s))
6440 return;
6441 mask = val = 0;
6442 if (insn & (1 << 19)) {
6443 if (insn & (1 << 8))
6444 mask |= CPSR_A;
6445 if (insn & (1 << 7))
6446 mask |= CPSR_I;
6447 if (insn & (1 << 6))
6448 mask |= CPSR_F;
6449 if (insn & (1 << 18))
6450 val |= mask;
6451 }
6452 if (insn & (1 << 17)) {
6453 mask |= CPSR_M;
6454 val |= (insn & 0x1f);
6455 }
6456 if (mask) {
6457 gen_set_psr_im(s, mask, 0, val);
6458 }
6459 return;
6460 }
6461 goto illegal_op;
6462 }
6463 if (cond != 0xe) {
6464 /* if not always execute, we generate a conditional jump to
6465 next instruction */
6466 s->condlabel = gen_new_label();
6467 gen_test_cc(cond ^ 1, s->condlabel);
6468 s->condjmp = 1;
6469 }
6470 if ((insn & 0x0f900000) == 0x03000000) {
6471 if ((insn & (1 << 21)) == 0) {
6472 ARCH(6T2);
6473 rd = (insn >> 12) & 0xf;
6474 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6475 if ((insn & (1 << 22)) == 0) {
6476 /* MOVW */
6477 tmp = tcg_temp_new_i32();
6478 tcg_gen_movi_i32(tmp, val);
6479 } else {
6480 /* MOVT */
6481 tmp = load_reg(s, rd);
6482 tcg_gen_ext16u_i32(tmp, tmp);
6483 tcg_gen_ori_i32(tmp, tmp, val << 16);
6484 }
6485 store_reg(s, rd, tmp);
6486 } else {
6487 if (((insn >> 12) & 0xf) != 0xf)
6488 goto illegal_op;
6489 if (((insn >> 16) & 0xf) == 0) {
6490 gen_nop_hint(s, insn & 0xff);
6491 } else {
6492 /* CPSR = immediate */
6493 val = insn & 0xff;
6494 shift = ((insn >> 8) & 0xf) * 2;
6495 if (shift)
6496 val = (val >> shift) | (val << (32 - shift));
6497 i = ((insn & (1 << 22)) != 0);
6498 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6499 goto illegal_op;
6500 }
6501 }
6502 } else if ((insn & 0x0f900000) == 0x01000000
6503 && (insn & 0x00000090) != 0x00000090) {
6504 /* miscellaneous instructions */
6505 op1 = (insn >> 21) & 3;
6506 sh = (insn >> 4) & 0xf;
6507 rm = insn & 0xf;
6508 switch (sh) {
6509 case 0x0: /* move program status register */
6510 if (op1 & 1) {
6511 /* PSR = reg */
6512 tmp = load_reg(s, rm);
6513 i = ((op1 & 2) != 0);
6514 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6515 goto illegal_op;
6516 } else {
6517 /* reg = PSR */
6518 rd = (insn >> 12) & 0xf;
6519 if (op1 & 2) {
6520 if (IS_USER(s))
6521 goto illegal_op;
6522 tmp = load_cpu_field(spsr);
6523 } else {
6524 tmp = tcg_temp_new_i32();
6525 gen_helper_cpsr_read(tmp);
6526 }
6527 store_reg(s, rd, tmp);
6528 }
6529 break;
6530 case 0x1:
6531 if (op1 == 1) {
6532 /* branch/exchange thumb (bx). */
6533 ARCH(4T);
6534 tmp = load_reg(s, rm);
6535 gen_bx(s, tmp);
6536 } else if (op1 == 3) {
6537 /* clz */
6538 ARCH(5);
6539 rd = (insn >> 12) & 0xf;
6540 tmp = load_reg(s, rm);
6541 gen_helper_clz(tmp, tmp);
6542 store_reg(s, rd, tmp);
6543 } else {
6544 goto illegal_op;
6545 }
6546 break;
6547 case 0x2:
6548 if (op1 == 1) {
6549 ARCH(5J); /* bxj */
6550 /* Trivial implementation equivalent to bx. */
6551 tmp = load_reg(s, rm);
6552 gen_bx(s, tmp);
6553 } else {
6554 goto illegal_op;
6555 }
6556 break;
6557 case 0x3:
6558 if (op1 != 1)
6559 goto illegal_op;
6560
6561 ARCH(5);
6562 /* branch link/exchange thumb (blx) */
6563 tmp = load_reg(s, rm);
6564 tmp2 = tcg_temp_new_i32();
6565 tcg_gen_movi_i32(tmp2, s->pc);
6566 store_reg(s, 14, tmp2);
6567 gen_bx(s, tmp);
6568 break;
6569 case 0x5: /* saturating add/subtract */
6570 ARCH(5TE);
6571 rd = (insn >> 12) & 0xf;
6572 rn = (insn >> 16) & 0xf;
6573 tmp = load_reg(s, rm);
6574 tmp2 = load_reg(s, rn);
6575 if (op1 & 2)
6576 gen_helper_double_saturate(tmp2, tmp2);
6577 if (op1 & 1)
6578 gen_helper_sub_saturate(tmp, tmp, tmp2);
6579 else
6580 gen_helper_add_saturate(tmp, tmp, tmp2);
6581 tcg_temp_free_i32(tmp2);
6582 store_reg(s, rd, tmp);
6583 break;
6584 case 7:
6585 /* SMC instruction (op1 == 3)
6586 and undefined instructions (op1 == 0 || op1 == 2)
6587 will trap */
6588 if (op1 != 1) {
6589 goto illegal_op;
6590 }
6591 /* bkpt */
6592 ARCH(5);
6593 gen_exception_insn(s, 4, EXCP_BKPT);
6594 break;
6595 case 0x8: /* signed multiply */
6596 case 0xa:
6597 case 0xc:
6598 case 0xe:
6599 ARCH(5TE);
6600 rs = (insn >> 8) & 0xf;
6601 rn = (insn >> 12) & 0xf;
6602 rd = (insn >> 16) & 0xf;
6603 if (op1 == 1) {
6604 /* (32 * 16) >> 16 */
6605 tmp = load_reg(s, rm);
6606 tmp2 = load_reg(s, rs);
6607 if (sh & 4)
6608 tcg_gen_sari_i32(tmp2, tmp2, 16);
6609 else
6610 gen_sxth(tmp2);
6611 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6612 tcg_gen_shri_i64(tmp64, tmp64, 16);
6613 tmp = tcg_temp_new_i32();
6614 tcg_gen_trunc_i64_i32(tmp, tmp64);
6615 tcg_temp_free_i64(tmp64);
6616 if ((sh & 2) == 0) {
6617 tmp2 = load_reg(s, rn);
6618 gen_helper_add_setq(tmp, tmp, tmp2);
6619 tcg_temp_free_i32(tmp2);
6620 }
6621 store_reg(s, rd, tmp);
6622 } else {
6623 /* 16 * 16 */
6624 tmp = load_reg(s, rm);
6625 tmp2 = load_reg(s, rs);
6626 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6627 tcg_temp_free_i32(tmp2);
6628 if (op1 == 2) {
6629 tmp64 = tcg_temp_new_i64();
6630 tcg_gen_ext_i32_i64(tmp64, tmp);
6631 tcg_temp_free_i32(tmp);
6632 gen_addq(s, tmp64, rn, rd);
6633 gen_storeq_reg(s, rn, rd, tmp64);
6634 tcg_temp_free_i64(tmp64);
6635 } else {
6636 if (op1 == 0) {
6637 tmp2 = load_reg(s, rn);
6638 gen_helper_add_setq(tmp, tmp, tmp2);
6639 tcg_temp_free_i32(tmp2);
6640 }
6641 store_reg(s, rd, tmp);
6642 }
6643 }
6644 break;
6645 default:
6646 goto illegal_op;
6647 }
6648 } else if (((insn & 0x0e000000) == 0 &&
6649 (insn & 0x00000090) != 0x90) ||
6650 ((insn & 0x0e000000) == (1 << 25))) {
6651 int set_cc, logic_cc, shiftop;
6652
6653 op1 = (insn >> 21) & 0xf;
6654 set_cc = (insn >> 20) & 1;
6655 logic_cc = table_logic_cc[op1] & set_cc;
6656
6657 /* data processing instruction */
6658 if (insn & (1 << 25)) {
6659 /* immediate operand */
6660 val = insn & 0xff;
6661 shift = ((insn >> 8) & 0xf) * 2;
6662 if (shift) {
6663 val = (val >> shift) | (val << (32 - shift));
6664 }
6665 tmp2 = tcg_temp_new_i32();
6666 tcg_gen_movi_i32(tmp2, val);
6667 if (logic_cc && shift) {
6668 gen_set_CF_bit31(tmp2);
6669 }
6670 } else {
6671 /* register */
6672 rm = (insn) & 0xf;
6673 tmp2 = load_reg(s, rm);
6674 shiftop = (insn >> 5) & 3;
6675 if (!(insn & (1 << 4))) {
6676 shift = (insn >> 7) & 0x1f;
6677 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6678 } else {
6679 rs = (insn >> 8) & 0xf;
6680 tmp = load_reg(s, rs);
6681 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6682 }
6683 }
6684 if (op1 != 0x0f && op1 != 0x0d) {
6685 rn = (insn >> 16) & 0xf;
6686 tmp = load_reg(s, rn);
6687 } else {
6688 TCGV_UNUSED(tmp);
6689 }
6690 rd = (insn >> 12) & 0xf;
6691 switch(op1) {
6692 case 0x00:
6693 tcg_gen_and_i32(tmp, tmp, tmp2);
6694 if (logic_cc) {
6695 gen_logic_CC(tmp);
6696 }
6697 store_reg_bx(env, s, rd, tmp);
6698 break;
6699 case 0x01:
6700 tcg_gen_xor_i32(tmp, tmp, tmp2);
6701 if (logic_cc) {
6702 gen_logic_CC(tmp);
6703 }
6704 store_reg_bx(env, s, rd, tmp);
6705 break;
6706 case 0x02:
6707 if (set_cc && rd == 15) {
6708 /* SUBS r15, ... is used for exception return. */
6709 if (IS_USER(s)) {
6710 goto illegal_op;
6711 }
6712 gen_helper_sub_cc(tmp, tmp, tmp2);
6713 gen_exception_return(s, tmp);
6714 } else {
6715 if (set_cc) {
6716 gen_helper_sub_cc(tmp, tmp, tmp2);
6717 } else {
6718 tcg_gen_sub_i32(tmp, tmp, tmp2);
6719 }
6720 store_reg_bx(env, s, rd, tmp);
6721 }
6722 break;
6723 case 0x03:
6724 if (set_cc) {
6725 gen_helper_sub_cc(tmp, tmp2, tmp);
6726 } else {
6727 tcg_gen_sub_i32(tmp, tmp2, tmp);
6728 }
6729 store_reg_bx(env, s, rd, tmp);
6730 break;
6731 case 0x04:
6732 if (set_cc) {
6733 gen_helper_add_cc(tmp, tmp, tmp2);
6734 } else {
6735 tcg_gen_add_i32(tmp, tmp, tmp2);
6736 }
6737 store_reg_bx(env, s, rd, tmp);
6738 break;
6739 case 0x05:
6740 if (set_cc) {
6741 gen_helper_adc_cc(tmp, tmp, tmp2);
6742 } else {
6743 gen_add_carry(tmp, tmp, tmp2);
6744 }
6745 store_reg_bx(env, s, rd, tmp);
6746 break;
6747 case 0x06:
6748 if (set_cc) {
6749 gen_helper_sbc_cc(tmp, tmp, tmp2);
6750 } else {
6751 gen_sub_carry(tmp, tmp, tmp2);
6752 }
6753 store_reg_bx(env, s, rd, tmp);
6754 break;
6755 case 0x07:
6756 if (set_cc) {
6757 gen_helper_sbc_cc(tmp, tmp2, tmp);
6758 } else {
6759 gen_sub_carry(tmp, tmp2, tmp);
6760 }
6761 store_reg_bx(env, s, rd, tmp);
6762 break;
6763 case 0x08:
6764 if (set_cc) {
6765 tcg_gen_and_i32(tmp, tmp, tmp2);
6766 gen_logic_CC(tmp);
6767 }
6768 tcg_temp_free_i32(tmp);
6769 break;
6770 case 0x09:
6771 if (set_cc) {
6772 tcg_gen_xor_i32(tmp, tmp, tmp2);
6773 gen_logic_CC(tmp);
6774 }
6775 tcg_temp_free_i32(tmp);
6776 break;
6777 case 0x0a:
6778 if (set_cc) {
6779 gen_helper_sub_cc(tmp, tmp, tmp2);
6780 }
6781 tcg_temp_free_i32(tmp);
6782 break;
6783 case 0x0b:
6784 if (set_cc) {
6785 gen_helper_add_cc(tmp, tmp, tmp2);
6786 }
6787 tcg_temp_free_i32(tmp);
6788 break;
6789 case 0x0c:
6790 tcg_gen_or_i32(tmp, tmp, tmp2);
6791 if (logic_cc) {
6792 gen_logic_CC(tmp);
6793 }
6794 store_reg_bx(env, s, rd, tmp);
6795 break;
6796 case 0x0d:
6797 if (logic_cc && rd == 15) {
6798 /* MOVS r15, ... is used for exception return. */
6799 if (IS_USER(s)) {
6800 goto illegal_op;
6801 }
6802 gen_exception_return(s, tmp2);
6803 } else {
6804 if (logic_cc) {
6805 gen_logic_CC(tmp2);
6806 }
6807 store_reg_bx(env, s, rd, tmp2);
6808 }
6809 break;
6810 case 0x0e:
6811 tcg_gen_andc_i32(tmp, tmp, tmp2);
6812 if (logic_cc) {
6813 gen_logic_CC(tmp);
6814 }
6815 store_reg_bx(env, s, rd, tmp);
6816 break;
6817 default:
6818 case 0x0f:
6819 tcg_gen_not_i32(tmp2, tmp2);
6820 if (logic_cc) {
6821 gen_logic_CC(tmp2);
6822 }
6823 store_reg_bx(env, s, rd, tmp2);
6824 break;
6825 }
6826 if (op1 != 0x0f && op1 != 0x0d) {
6827 tcg_temp_free_i32(tmp2);
6828 }
6829 } else {
6830 /* other instructions */
6831 op1 = (insn >> 24) & 0xf;
6832 switch(op1) {
6833 case 0x0:
6834 case 0x1:
6835 /* multiplies, extra load/stores */
6836 sh = (insn >> 5) & 3;
6837 if (sh == 0) {
6838 if (op1 == 0x0) {
6839 rd = (insn >> 16) & 0xf;
6840 rn = (insn >> 12) & 0xf;
6841 rs = (insn >> 8) & 0xf;
6842 rm = (insn) & 0xf;
6843 op1 = (insn >> 20) & 0xf;
6844 switch (op1) {
6845 case 0: case 1: case 2: case 3: case 6:
6846 /* 32 bit mul */
6847 tmp = load_reg(s, rs);
6848 tmp2 = load_reg(s, rm);
6849 tcg_gen_mul_i32(tmp, tmp, tmp2);
6850 tcg_temp_free_i32(tmp2);
6851 if (insn & (1 << 22)) {
6852 /* Subtract (mls) */
6853 ARCH(6T2);
6854 tmp2 = load_reg(s, rn);
6855 tcg_gen_sub_i32(tmp, tmp2, tmp);
6856 tcg_temp_free_i32(tmp2);
6857 } else if (insn & (1 << 21)) {
6858 /* Add */
6859 tmp2 = load_reg(s, rn);
6860 tcg_gen_add_i32(tmp, tmp, tmp2);
6861 tcg_temp_free_i32(tmp2);
6862 }
6863 if (insn & (1 << 20))
6864 gen_logic_CC(tmp);
6865 store_reg(s, rd, tmp);
6866 break;
6867 case 4:
6868 /* 64 bit mul double accumulate (UMAAL) */
6869 ARCH(6);
6870 tmp = load_reg(s, rs);
6871 tmp2 = load_reg(s, rm);
6872 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6873 gen_addq_lo(s, tmp64, rn);
6874 gen_addq_lo(s, tmp64, rd);
6875 gen_storeq_reg(s, rn, rd, tmp64);
6876 tcg_temp_free_i64(tmp64);
6877 break;
6878 case 8: case 9: case 10: case 11:
6879 case 12: case 13: case 14: case 15:
6880 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6881 tmp = load_reg(s, rs);
6882 tmp2 = load_reg(s, rm);
6883 if (insn & (1 << 22)) {
6884 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6885 } else {
6886 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6887 }
6888 if (insn & (1 << 21)) { /* mult accumulate */
6889 gen_addq(s, tmp64, rn, rd);
6890 }
6891 if (insn & (1 << 20)) {
6892 gen_logicq_cc(tmp64);
6893 }
6894 gen_storeq_reg(s, rn, rd, tmp64);
6895 tcg_temp_free_i64(tmp64);
6896 break;
6897 default:
6898 goto illegal_op;
6899 }
6900 } else {
6901 rn = (insn >> 16) & 0xf;
6902 rd = (insn >> 12) & 0xf;
6903 if (insn & (1 << 23)) {
6904 /* load/store exclusive */
6905 op1 = (insn >> 21) & 0x3;
6906 if (op1)
6907 ARCH(6K);
6908 else
6909 ARCH(6);
6910 addr = tcg_temp_local_new_i32();
6911 load_reg_var(s, addr, rn);
6912 if (insn & (1 << 20)) {
6913 switch (op1) {
6914 case 0: /* ldrex */
6915 gen_load_exclusive(s, rd, 15, addr, 2);
6916 break;
6917 case 1: /* ldrexd */
6918 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6919 break;
6920 case 2: /* ldrexb */
6921 gen_load_exclusive(s, rd, 15, addr, 0);
6922 break;
6923 case 3: /* ldrexh */
6924 gen_load_exclusive(s, rd, 15, addr, 1);
6925 break;
6926 default:
6927 abort();
6928 }
6929 } else {
6930 rm = insn & 0xf;
6931 switch (op1) {
6932 case 0: /* strex */
6933 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6934 break;
6935 case 1: /* strexd */
6936 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6937 break;
6938 case 2: /* strexb */
6939 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6940 break;
6941 case 3: /* strexh */
6942 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6943 break;
6944 default:
6945 abort();
6946 }
6947 }
6948 tcg_temp_free(addr);
6949 } else {
6950 /* SWP instruction */
6951 rm = (insn) & 0xf;
6952
6953 /* ??? This is not really atomic. However we know
6954 we never have multiple CPUs running in parallel,
6955 so it is good enough. */
6956 addr = load_reg(s, rn);
6957 tmp = load_reg(s, rm);
6958 if (insn & (1 << 22)) {
6959 tmp2 = gen_ld8u(addr, IS_USER(s));
6960 gen_st8(tmp, addr, IS_USER(s));
6961 } else {
6962 tmp2 = gen_ld32(addr, IS_USER(s));
6963 gen_st32(tmp, addr, IS_USER(s));
6964 }
6965 tcg_temp_free_i32(addr);
6966 store_reg(s, rd, tmp2);
6967 }
6968 }
6969 } else {
6970 int address_offset;
6971 int load;
6972 /* Misc load/store */
6973 rn = (insn >> 16) & 0xf;
6974 rd = (insn >> 12) & 0xf;
6975 addr = load_reg(s, rn);
6976 if (insn & (1 << 24))
6977 gen_add_datah_offset(s, insn, 0, addr);
6978 address_offset = 0;
6979 if (insn & (1 << 20)) {
6980 /* load */
6981 switch(sh) {
6982 case 1:
6983 tmp = gen_ld16u(addr, IS_USER(s));
6984 break;
6985 case 2:
6986 tmp = gen_ld8s(addr, IS_USER(s));
6987 break;
6988 default:
6989 case 3:
6990 tmp = gen_ld16s(addr, IS_USER(s));
6991 break;
6992 }
6993 load = 1;
6994 } else if (sh & 2) {
6995 ARCH(5TE);
6996 /* doubleword */
6997 if (sh & 1) {
6998 /* store */
6999 tmp = load_reg(s, rd);
7000 gen_st32(tmp, addr, IS_USER(s));
7001 tcg_gen_addi_i32(addr, addr, 4);
7002 tmp = load_reg(s, rd + 1);
7003 gen_st32(tmp, addr, IS_USER(s));
7004 load = 0;
7005 } else {
7006 /* load */
7007 tmp = gen_ld32(addr, IS_USER(s));
7008 store_reg(s, rd, tmp);
7009 tcg_gen_addi_i32(addr, addr, 4);
7010 tmp = gen_ld32(addr, IS_USER(s));
7011 rd++;
7012 load = 1;
7013 }
7014 address_offset = -4;
7015 } else {
7016 /* store */
7017 tmp = load_reg(s, rd);
7018 gen_st16(tmp, addr, IS_USER(s));
7019 load = 0;
7020 }
7021 /* Perform base writeback before the loaded value to
7022 ensure correct behavior with overlapping index registers.
7023 ldrd with base writeback is is undefined if the
7024 destination and index registers overlap. */
7025 if (!(insn & (1 << 24))) {
7026 gen_add_datah_offset(s, insn, address_offset, addr);
7027 store_reg(s, rn, addr);
7028 } else if (insn & (1 << 21)) {
7029 if (address_offset)
7030 tcg_gen_addi_i32(addr, addr, address_offset);
7031 store_reg(s, rn, addr);
7032 } else {
7033 tcg_temp_free_i32(addr);
7034 }
7035 if (load) {
7036 /* Complete the load. */
7037 store_reg(s, rd, tmp);
7038 }
7039 }
7040 break;
7041 case 0x4:
7042 case 0x5:
7043 goto do_ldst;
7044 case 0x6:
7045 case 0x7:
7046 if (insn & (1 << 4)) {
7047 ARCH(6);
7048 /* Armv6 Media instructions. */
7049 rm = insn & 0xf;
7050 rn = (insn >> 16) & 0xf;
7051 rd = (insn >> 12) & 0xf;
7052 rs = (insn >> 8) & 0xf;
7053 switch ((insn >> 23) & 3) {
7054 case 0: /* Parallel add/subtract. */
7055 op1 = (insn >> 20) & 7;
7056 tmp = load_reg(s, rn);
7057 tmp2 = load_reg(s, rm);
7058 sh = (insn >> 5) & 7;
7059 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7060 goto illegal_op;
7061 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7062 tcg_temp_free_i32(tmp2);
7063 store_reg(s, rd, tmp);
7064 break;
7065 case 1:
7066 if ((insn & 0x00700020) == 0) {
7067 /* Halfword pack. */
7068 tmp = load_reg(s, rn);
7069 tmp2 = load_reg(s, rm);
7070 shift = (insn >> 7) & 0x1f;
7071 if (insn & (1 << 6)) {
7072 /* pkhtb */
7073 if (shift == 0)
7074 shift = 31;
7075 tcg_gen_sari_i32(tmp2, tmp2, shift);
7076 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7077 tcg_gen_ext16u_i32(tmp2, tmp2);
7078 } else {
7079 /* pkhbt */
7080 if (shift)
7081 tcg_gen_shli_i32(tmp2, tmp2, shift);
7082 tcg_gen_ext16u_i32(tmp, tmp);
7083 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7084 }
7085 tcg_gen_or_i32(tmp, tmp, tmp2);
7086 tcg_temp_free_i32(tmp2);
7087 store_reg(s, rd, tmp);
7088 } else if ((insn & 0x00200020) == 0x00200000) {
7089 /* [us]sat */
7090 tmp = load_reg(s, rm);
7091 shift = (insn >> 7) & 0x1f;
7092 if (insn & (1 << 6)) {
7093 if (shift == 0)
7094 shift = 31;
7095 tcg_gen_sari_i32(tmp, tmp, shift);
7096 } else {
7097 tcg_gen_shli_i32(tmp, tmp, shift);
7098 }
7099 sh = (insn >> 16) & 0x1f;
7100 tmp2 = tcg_const_i32(sh);
7101 if (insn & (1 << 22))
7102 gen_helper_usat(tmp, tmp, tmp2);
7103 else
7104 gen_helper_ssat(tmp, tmp, tmp2);
7105 tcg_temp_free_i32(tmp2);
7106 store_reg(s, rd, tmp);
7107 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7108 /* [us]sat16 */
7109 tmp = load_reg(s, rm);
7110 sh = (insn >> 16) & 0x1f;
7111 tmp2 = tcg_const_i32(sh);
7112 if (insn & (1 << 22))
7113 gen_helper_usat16(tmp, tmp, tmp2);
7114 else
7115 gen_helper_ssat16(tmp, tmp, tmp2);
7116 tcg_temp_free_i32(tmp2);
7117 store_reg(s, rd, tmp);
7118 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7119 /* Select bytes. */
7120 tmp = load_reg(s, rn);
7121 tmp2 = load_reg(s, rm);
7122 tmp3 = tcg_temp_new_i32();
7123 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7124 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7125 tcg_temp_free_i32(tmp3);
7126 tcg_temp_free_i32(tmp2);
7127 store_reg(s, rd, tmp);
7128 } else if ((insn & 0x000003e0) == 0x00000060) {
7129 tmp = load_reg(s, rm);
7130 shift = (insn >> 10) & 3;
7131 /* ??? In many cases it's not neccessary to do a
7132 rotate, a shift is sufficient. */
7133 if (shift != 0)
7134 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7135 op1 = (insn >> 20) & 7;
7136 switch (op1) {
7137 case 0: gen_sxtb16(tmp); break;
7138 case 2: gen_sxtb(tmp); break;
7139 case 3: gen_sxth(tmp); break;
7140 case 4: gen_uxtb16(tmp); break;
7141 case 6: gen_uxtb(tmp); break;
7142 case 7: gen_uxth(tmp); break;
7143 default: goto illegal_op;
7144 }
7145 if (rn != 15) {
7146 tmp2 = load_reg(s, rn);
7147 if ((op1 & 3) == 0) {
7148 gen_add16(tmp, tmp2);
7149 } else {
7150 tcg_gen_add_i32(tmp, tmp, tmp2);
7151 tcg_temp_free_i32(tmp2);
7152 }
7153 }
7154 store_reg(s, rd, tmp);
7155 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7156 /* rev */
7157 tmp = load_reg(s, rm);
7158 if (insn & (1 << 22)) {
7159 if (insn & (1 << 7)) {
7160 gen_revsh(tmp);
7161 } else {
7162 ARCH(6T2);
7163 gen_helper_rbit(tmp, tmp);
7164 }
7165 } else {
7166 if (insn & (1 << 7))
7167 gen_rev16(tmp);
7168 else
7169 tcg_gen_bswap32_i32(tmp, tmp);
7170 }
7171 store_reg(s, rd, tmp);
7172 } else {
7173 goto illegal_op;
7174 }
7175 break;
7176 case 2: /* Multiplies (Type 3). */
7177 tmp = load_reg(s, rm);
7178 tmp2 = load_reg(s, rs);
7179 if (insn & (1 << 20)) {
7180 /* Signed multiply most significant [accumulate].
7181 (SMMUL, SMMLA, SMMLS) */
7182 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7183
7184 if (rd != 15) {
7185 tmp = load_reg(s, rd);
7186 if (insn & (1 << 6)) {
7187 tmp64 = gen_subq_msw(tmp64, tmp);
7188 } else {
7189 tmp64 = gen_addq_msw(tmp64, tmp);
7190 }
7191 }
7192 if (insn & (1 << 5)) {
7193 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7194 }
7195 tcg_gen_shri_i64(tmp64, tmp64, 32);
7196 tmp = tcg_temp_new_i32();
7197 tcg_gen_trunc_i64_i32(tmp, tmp64);
7198 tcg_temp_free_i64(tmp64);
7199 store_reg(s, rn, tmp);
7200 } else {
7201 if (insn & (1 << 5))
7202 gen_swap_half(tmp2);
7203 gen_smul_dual(tmp, tmp2);
7204 if (insn & (1 << 6)) {
7205 /* This subtraction cannot overflow. */
7206 tcg_gen_sub_i32(tmp, tmp, tmp2);
7207 } else {
7208 /* This addition cannot overflow 32 bits;
7209 * however it may overflow considered as a signed
7210 * operation, in which case we must set the Q flag.
7211 */
7212 gen_helper_add_setq(tmp, tmp, tmp2);
7213 }
7214 tcg_temp_free_i32(tmp2);
7215 if (insn & (1 << 22)) {
7216 /* smlald, smlsld */
7217 tmp64 = tcg_temp_new_i64();
7218 tcg_gen_ext_i32_i64(tmp64, tmp);
7219 tcg_temp_free_i32(tmp);
7220 gen_addq(s, tmp64, rd, rn);
7221 gen_storeq_reg(s, rd, rn, tmp64);
7222 tcg_temp_free_i64(tmp64);
7223 } else {
7224 /* smuad, smusd, smlad, smlsd */
7225 if (rd != 15)
7226 {
7227 tmp2 = load_reg(s, rd);
7228 gen_helper_add_setq(tmp, tmp, tmp2);
7229 tcg_temp_free_i32(tmp2);
7230 }
7231 store_reg(s, rn, tmp);
7232 }
7233 }
7234 break;
7235 case 3:
7236 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7237 switch (op1) {
7238 case 0: /* Unsigned sum of absolute differences. */
7239 ARCH(6);
7240 tmp = load_reg(s, rm);
7241 tmp2 = load_reg(s, rs);
7242 gen_helper_usad8(tmp, tmp, tmp2);
7243 tcg_temp_free_i32(tmp2);
7244 if (rd != 15) {
7245 tmp2 = load_reg(s, rd);
7246 tcg_gen_add_i32(tmp, tmp, tmp2);
7247 tcg_temp_free_i32(tmp2);
7248 }
7249 store_reg(s, rn, tmp);
7250 break;
7251 case 0x20: case 0x24: case 0x28: case 0x2c:
7252 /* Bitfield insert/clear. */
7253 ARCH(6T2);
7254 shift = (insn >> 7) & 0x1f;
7255 i = (insn >> 16) & 0x1f;
7256 i = i + 1 - shift;
7257 if (rm == 15) {
7258 tmp = tcg_temp_new_i32();
7259 tcg_gen_movi_i32(tmp, 0);
7260 } else {
7261 tmp = load_reg(s, rm);
7262 }
7263 if (i != 32) {
7264 tmp2 = load_reg(s, rd);
7265 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7266 tcg_temp_free_i32(tmp2);
7267 }
7268 store_reg(s, rd, tmp);
7269 break;
7270 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7271 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7272 ARCH(6T2);
7273 tmp = load_reg(s, rm);
7274 shift = (insn >> 7) & 0x1f;
7275 i = ((insn >> 16) & 0x1f) + 1;
7276 if (shift + i > 32)
7277 goto illegal_op;
7278 if (i < 32) {
7279 if (op1 & 0x20) {
7280 gen_ubfx(tmp, shift, (1u << i) - 1);
7281 } else {
7282 gen_sbfx(tmp, shift, i);
7283 }
7284 }
7285 store_reg(s, rd, tmp);
7286 break;
7287 default:
7288 goto illegal_op;
7289 }
7290 break;
7291 }
7292 break;
7293 }
7294 do_ldst:
7295 /* Check for undefined extension instructions
7296 * per the ARM Bible IE:
7297 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7298 */
7299 sh = (0xf << 20) | (0xf << 4);
7300 if (op1 == 0x7 && ((insn & sh) == sh))
7301 {
7302 goto illegal_op;
7303 }
7304 /* load/store byte/word */
7305 rn = (insn >> 16) & 0xf;
7306 rd = (insn >> 12) & 0xf;
7307 tmp2 = load_reg(s, rn);
7308 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7309 if (insn & (1 << 24))
7310 gen_add_data_offset(s, insn, tmp2);
7311 if (insn & (1 << 20)) {
7312 /* load */
7313 if (insn & (1 << 22)) {
7314 tmp = gen_ld8u(tmp2, i);
7315 } else {
7316 tmp = gen_ld32(tmp2, i);
7317 }
7318 } else {
7319 /* store */
7320 tmp = load_reg(s, rd);
7321 if (insn & (1 << 22))
7322 gen_st8(tmp, tmp2, i);
7323 else
7324 gen_st32(tmp, tmp2, i);
7325 }
7326 if (!(insn & (1 << 24))) {
7327 gen_add_data_offset(s, insn, tmp2);
7328 store_reg(s, rn, tmp2);
7329 } else if (insn & (1 << 21)) {
7330 store_reg(s, rn, tmp2);
7331 } else {
7332 tcg_temp_free_i32(tmp2);
7333 }
7334 if (insn & (1 << 20)) {
7335 /* Complete the load. */
7336 store_reg_from_load(env, s, rd, tmp);
7337 }
7338 break;
7339 case 0x08:
7340 case 0x09:
7341 {
7342 int j, n, user, loaded_base;
7343 TCGv loaded_var;
7344 /* load/store multiple words */
7345 /* XXX: store correct base if write back */
7346 user = 0;
7347 if (insn & (1 << 22)) {
7348 if (IS_USER(s))
7349 goto illegal_op; /* only usable in supervisor mode */
7350
7351 if ((insn & (1 << 15)) == 0)
7352 user = 1;
7353 }
7354 rn = (insn >> 16) & 0xf;
7355 addr = load_reg(s, rn);
7356
7357 /* compute total size */
7358 loaded_base = 0;
7359 TCGV_UNUSED(loaded_var);
7360 n = 0;
7361 for(i=0;i<16;i++) {
7362 if (insn & (1 << i))
7363 n++;
7364 }
7365 /* XXX: test invalid n == 0 case ? */
7366 if (insn & (1 << 23)) {
7367 if (insn & (1 << 24)) {
7368 /* pre increment */
7369 tcg_gen_addi_i32(addr, addr, 4);
7370 } else {
7371 /* post increment */
7372 }
7373 } else {
7374 if (insn & (1 << 24)) {
7375 /* pre decrement */
7376 tcg_gen_addi_i32(addr, addr, -(n * 4));
7377 } else {
7378 /* post decrement */
7379 if (n != 1)
7380 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7381 }
7382 }
7383 j = 0;
7384 for(i=0;i<16;i++) {
7385 if (insn & (1 << i)) {
7386 if (insn & (1 << 20)) {
7387 /* load */
7388 tmp = gen_ld32(addr, IS_USER(s));
7389 if (user) {
7390 tmp2 = tcg_const_i32(i);
7391 gen_helper_set_user_reg(tmp2, tmp);
7392 tcg_temp_free_i32(tmp2);
7393 tcg_temp_free_i32(tmp);
7394 } else if (i == rn) {
7395 loaded_var = tmp;
7396 loaded_base = 1;
7397 } else {
7398 store_reg_from_load(env, s, i, tmp);
7399 }
7400 } else {
7401 /* store */
7402 if (i == 15) {
7403 /* special case: r15 = PC + 8 */
7404 val = (long)s->pc + 4;
7405 tmp = tcg_temp_new_i32();
7406 tcg_gen_movi_i32(tmp, val);
7407 } else if (user) {
7408 tmp = tcg_temp_new_i32();
7409 tmp2 = tcg_const_i32(i);
7410 gen_helper_get_user_reg(tmp, tmp2);
7411 tcg_temp_free_i32(tmp2);
7412 } else {
7413 tmp = load_reg(s, i);
7414 }
7415 gen_st32(tmp, addr, IS_USER(s));
7416 }
7417 j++;
7418 /* no need to add after the last transfer */
7419 if (j != n)
7420 tcg_gen_addi_i32(addr, addr, 4);
7421 }
7422 }
7423 if (insn & (1 << 21)) {
7424 /* write back */
7425 if (insn & (1 << 23)) {
7426 if (insn & (1 << 24)) {
7427 /* pre increment */
7428 } else {
7429 /* post increment */
7430 tcg_gen_addi_i32(addr, addr, 4);
7431 }
7432 } else {
7433 if (insn & (1 << 24)) {
7434 /* pre decrement */
7435 if (n != 1)
7436 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7437 } else {
7438 /* post decrement */
7439 tcg_gen_addi_i32(addr, addr, -(n * 4));
7440 }
7441 }
7442 store_reg(s, rn, addr);
7443 } else {
7444 tcg_temp_free_i32(addr);
7445 }
7446 if (loaded_base) {
7447 store_reg(s, rn, loaded_var);
7448 }
7449 if ((insn & (1 << 22)) && !user) {
7450 /* Restore CPSR from SPSR. */
7451 tmp = load_cpu_field(spsr);
7452 gen_set_cpsr(tmp, 0xffffffff);
7453 tcg_temp_free_i32(tmp);
7454 s->is_jmp = DISAS_UPDATE;
7455 }
7456 }
7457 break;
7458 case 0xa:
7459 case 0xb:
7460 {
7461 int32_t offset;
7462
7463 /* branch (and link) */
7464 val = (int32_t)s->pc;
7465 if (insn & (1 << 24)) {
7466 tmp = tcg_temp_new_i32();
7467 tcg_gen_movi_i32(tmp, val);
7468 store_reg(s, 14, tmp);
7469 }
7470 offset = (((int32_t)insn << 8) >> 8);
7471 val += (offset << 2) + 4;
7472 gen_jmp(s, val);
7473 }
7474 break;
7475 case 0xc:
7476 case 0xd:
7477 case 0xe:
7478 /* Coprocessor. */
7479 if (disas_coproc_insn(env, s, insn))
7480 goto illegal_op;
7481 break;
7482 case 0xf:
7483 /* swi */
7484 gen_set_pc_im(s->pc);
7485 s->is_jmp = DISAS_SWI;
7486 break;
7487 default:
7488 illegal_op:
7489 gen_exception_insn(s, 4, EXCP_UDEF);
7490 break;
7491 }
7492 }
7493 }
7494
7495 /* Return true if this is a Thumb-2 logical op. */
7496 static int
7497 thumb2_logic_op(int op)
7498 {
7499 return (op < 8);
7500 }
7501
7502 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7503 then set condition code flags based on the result of the operation.
7504 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7505 to the high bit of T1.
7506 Returns zero if the opcode is valid. */
7507
7508 static int
7509 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7510 {
7511 int logic_cc;
7512
7513 logic_cc = 0;
7514 switch (op) {
7515 case 0: /* and */
7516 tcg_gen_and_i32(t0, t0, t1);
7517 logic_cc = conds;
7518 break;
7519 case 1: /* bic */
7520 tcg_gen_andc_i32(t0, t0, t1);
7521 logic_cc = conds;
7522 break;
7523 case 2: /* orr */
7524 tcg_gen_or_i32(t0, t0, t1);
7525 logic_cc = conds;
7526 break;
7527 case 3: /* orn */
7528 tcg_gen_orc_i32(t0, t0, t1);
7529 logic_cc = conds;
7530 break;
7531 case 4: /* eor */
7532 tcg_gen_xor_i32(t0, t0, t1);
7533 logic_cc = conds;
7534 break;
7535 case 8: /* add */
7536 if (conds)
7537 gen_helper_add_cc(t0, t0, t1);
7538 else
7539 tcg_gen_add_i32(t0, t0, t1);
7540 break;
7541 case 10: /* adc */
7542 if (conds)
7543 gen_helper_adc_cc(t0, t0, t1);
7544 else
7545 gen_adc(t0, t1);
7546 break;
7547 case 11: /* sbc */
7548 if (conds)
7549 gen_helper_sbc_cc(t0, t0, t1);
7550 else
7551 gen_sub_carry(t0, t0, t1);
7552 break;
7553 case 13: /* sub */
7554 if (conds)
7555 gen_helper_sub_cc(t0, t0, t1);
7556 else
7557 tcg_gen_sub_i32(t0, t0, t1);
7558 break;
7559 case 14: /* rsb */
7560 if (conds)
7561 gen_helper_sub_cc(t0, t1, t0);
7562 else
7563 tcg_gen_sub_i32(t0, t1, t0);
7564 break;
7565 default: /* 5, 6, 7, 9, 12, 15. */
7566 return 1;
7567 }
7568 if (logic_cc) {
7569 gen_logic_CC(t0);
7570 if (shifter_out)
7571 gen_set_CF_bit31(t1);
7572 }
7573 return 0;
7574 }
7575
7576 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7577 is not legal. */
7578 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7579 {
7580 uint32_t insn, imm, shift, offset;
7581 uint32_t rd, rn, rm, rs;
7582 TCGv tmp;
7583 TCGv tmp2;
7584 TCGv tmp3;
7585 TCGv addr;
7586 TCGv_i64 tmp64;
7587 int op;
7588 int shiftop;
7589 int conds;
7590 int logic_cc;
7591
7592 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7593 || arm_feature (env, ARM_FEATURE_M))) {
7594 /* Thumb-1 cores may need to treat bl and blx as a pair of
7595 16-bit instructions to get correct prefetch abort behavior. */
7596 insn = insn_hw1;
7597 if ((insn & (1 << 12)) == 0) {
7598 ARCH(5);
7599 /* Second half of blx. */
7600 offset = ((insn & 0x7ff) << 1);
7601 tmp = load_reg(s, 14);
7602 tcg_gen_addi_i32(tmp, tmp, offset);
7603 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7604
7605 tmp2 = tcg_temp_new_i32();
7606 tcg_gen_movi_i32(tmp2, s->pc | 1);
7607 store_reg(s, 14, tmp2);
7608 gen_bx(s, tmp);
7609 return 0;
7610 }
7611 if (insn & (1 << 11)) {
7612 /* Second half of bl. */
7613 offset = ((insn & 0x7ff) << 1) | 1;
7614 tmp = load_reg(s, 14);
7615 tcg_gen_addi_i32(tmp, tmp, offset);
7616
7617 tmp2 = tcg_temp_new_i32();
7618 tcg_gen_movi_i32(tmp2, s->pc | 1);
7619 store_reg(s, 14, tmp2);
7620 gen_bx(s, tmp);
7621 return 0;
7622 }
7623 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7624 /* Instruction spans a page boundary. Implement it as two
7625 16-bit instructions in case the second half causes an
7626 prefetch abort. */
7627 offset = ((int32_t)insn << 21) >> 9;
7628 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7629 return 0;
7630 }
7631 /* Fall through to 32-bit decode. */
7632 }
7633
7634 insn = lduw_code(s->pc);
7635 s->pc += 2;
7636 insn |= (uint32_t)insn_hw1 << 16;
7637
7638 if ((insn & 0xf800e800) != 0xf000e800) {
7639 ARCH(6T2);
7640 }
7641
7642 rn = (insn >> 16) & 0xf;
7643 rs = (insn >> 12) & 0xf;
7644 rd = (insn >> 8) & 0xf;
7645 rm = insn & 0xf;
7646 switch ((insn >> 25) & 0xf) {
7647 case 0: case 1: case 2: case 3:
7648 /* 16-bit instructions. Should never happen. */
7649 abort();
7650 case 4:
7651 if (insn & (1 << 22)) {
7652 /* Other load/store, table branch. */
7653 if (insn & 0x01200000) {
7654 /* Load/store doubleword. */
7655 if (rn == 15) {
7656 addr = tcg_temp_new_i32();
7657 tcg_gen_movi_i32(addr, s->pc & ~3);
7658 } else {
7659 addr = load_reg(s, rn);
7660 }
7661 offset = (insn & 0xff) * 4;
7662 if ((insn & (1 << 23)) == 0)
7663 offset = -offset;
7664 if (insn & (1 << 24)) {
7665 tcg_gen_addi_i32(addr, addr, offset);
7666 offset = 0;
7667 }
7668 if (insn & (1 << 20)) {
7669 /* ldrd */
7670 tmp = gen_ld32(addr, IS_USER(s));
7671 store_reg(s, rs, tmp);
7672 tcg_gen_addi_i32(addr, addr, 4);
7673 tmp = gen_ld32(addr, IS_USER(s));
7674 store_reg(s, rd, tmp);
7675 } else {
7676 /* strd */
7677 tmp = load_reg(s, rs);
7678 gen_st32(tmp, addr, IS_USER(s));
7679 tcg_gen_addi_i32(addr, addr, 4);
7680 tmp = load_reg(s, rd);
7681 gen_st32(tmp, addr, IS_USER(s));
7682 }
7683 if (insn & (1 << 21)) {
7684 /* Base writeback. */
7685 if (rn == 15)
7686 goto illegal_op;
7687 tcg_gen_addi_i32(addr, addr, offset - 4);
7688 store_reg(s, rn, addr);
7689 } else {
7690 tcg_temp_free_i32(addr);
7691 }
7692 } else if ((insn & (1 << 23)) == 0) {
7693 /* Load/store exclusive word. */
7694 addr = tcg_temp_local_new();
7695 load_reg_var(s, addr, rn);
7696 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7697 if (insn & (1 << 20)) {
7698 gen_load_exclusive(s, rs, 15, addr, 2);
7699 } else {
7700 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7701 }
7702 tcg_temp_free(addr);
7703 } else if ((insn & (1 << 6)) == 0) {
7704 /* Table Branch. */
7705 if (rn == 15) {
7706 addr = tcg_temp_new_i32();
7707 tcg_gen_movi_i32(addr, s->pc);
7708 } else {
7709 addr = load_reg(s, rn);
7710 }
7711 tmp = load_reg(s, rm);
7712 tcg_gen_add_i32(addr, addr, tmp);
7713 if (insn & (1 << 4)) {
7714 /* tbh */
7715 tcg_gen_add_i32(addr, addr, tmp);
7716 tcg_temp_free_i32(tmp);
7717 tmp = gen_ld16u(addr, IS_USER(s));
7718 } else { /* tbb */
7719 tcg_temp_free_i32(tmp);
7720 tmp = gen_ld8u(addr, IS_USER(s));
7721 }
7722 tcg_temp_free_i32(addr);
7723 tcg_gen_shli_i32(tmp, tmp, 1);
7724 tcg_gen_addi_i32(tmp, tmp, s->pc);
7725 store_reg(s, 15, tmp);
7726 } else {
7727 /* Load/store exclusive byte/halfword/doubleword. */
7728 ARCH(7);
7729 op = (insn >> 4) & 0x3;
7730 if (op == 2) {
7731 goto illegal_op;
7732 }
7733 addr = tcg_temp_local_new();
7734 load_reg_var(s, addr, rn);
7735 if (insn & (1 << 20)) {
7736 gen_load_exclusive(s, rs, rd, addr, op);
7737 } else {
7738 gen_store_exclusive(s, rm, rs, rd, addr, op);
7739 }
7740 tcg_temp_free(addr);
7741 }
7742 } else {
7743 /* Load/store multiple, RFE, SRS. */
7744 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7745 /* Not available in user mode. */
7746 if (IS_USER(s))
7747 goto illegal_op;
7748 if (insn & (1 << 20)) {
7749 /* rfe */
7750 addr = load_reg(s, rn);
7751 if ((insn & (1 << 24)) == 0)
7752 tcg_gen_addi_i32(addr, addr, -8);
7753 /* Load PC into tmp and CPSR into tmp2. */
7754 tmp = gen_ld32(addr, 0);
7755 tcg_gen_addi_i32(addr, addr, 4);
7756 tmp2 = gen_ld32(addr, 0);
7757 if (insn & (1 << 21)) {
7758 /* Base writeback. */
7759 if (insn & (1 << 24)) {
7760 tcg_gen_addi_i32(addr, addr, 4);
7761 } else {
7762 tcg_gen_addi_i32(addr, addr, -4);
7763 }
7764 store_reg(s, rn, addr);
7765 } else {
7766 tcg_temp_free_i32(addr);
7767 }
7768 gen_rfe(s, tmp, tmp2);
7769 } else {
7770 /* srs */
7771 op = (insn & 0x1f);
7772 addr = tcg_temp_new_i32();
7773 tmp = tcg_const_i32(op);
7774 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7775 tcg_temp_free_i32(tmp);
7776 if ((insn & (1 << 24)) == 0) {
7777 tcg_gen_addi_i32(addr, addr, -8);
7778 }
7779 tmp = load_reg(s, 14);
7780 gen_st32(tmp, addr, 0);
7781 tcg_gen_addi_i32(addr, addr, 4);
7782 tmp = tcg_temp_new_i32();
7783 gen_helper_cpsr_read(tmp);
7784 gen_st32(tmp, addr, 0);
7785 if (insn & (1 << 21)) {
7786 if ((insn & (1 << 24)) == 0) {
7787 tcg_gen_addi_i32(addr, addr, -4);
7788 } else {
7789 tcg_gen_addi_i32(addr, addr, 4);
7790 }
7791 tmp = tcg_const_i32(op);
7792 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7793 tcg_temp_free_i32(tmp);
7794 } else {
7795 tcg_temp_free_i32(addr);
7796 }
7797 }
7798 } else {
7799 int i;
7800 /* Load/store multiple. */
7801 addr = load_reg(s, rn);
7802 offset = 0;
7803 for (i = 0; i < 16; i++) {
7804 if (insn & (1 << i))
7805 offset += 4;
7806 }
7807 if (insn & (1 << 24)) {
7808 tcg_gen_addi_i32(addr, addr, -offset);
7809 }
7810
7811 for (i = 0; i < 16; i++) {
7812 if ((insn & (1 << i)) == 0)
7813 continue;
7814 if (insn & (1 << 20)) {
7815 /* Load. */
7816 tmp = gen_ld32(addr, IS_USER(s));
7817 if (i == 15) {
7818 gen_bx(s, tmp);
7819 } else {
7820 store_reg(s, i, tmp);
7821 }
7822 } else {
7823 /* Store. */
7824 tmp = load_reg(s, i);
7825 gen_st32(tmp, addr, IS_USER(s));
7826 }
7827 tcg_gen_addi_i32(addr, addr, 4);
7828 }
7829 if (insn & (1 << 21)) {
7830 /* Base register writeback. */
7831 if (insn & (1 << 24)) {
7832 tcg_gen_addi_i32(addr, addr, -offset);
7833 }
7834 /* Fault if writeback register is in register list. */
7835 if (insn & (1 << rn))
7836 goto illegal_op;
7837 store_reg(s, rn, addr);
7838 } else {
7839 tcg_temp_free_i32(addr);
7840 }
7841 }
7842 }
7843 break;
7844 case 5:
7845
7846 op = (insn >> 21) & 0xf;
7847 if (op == 6) {
7848 /* Halfword pack. */
7849 tmp = load_reg(s, rn);
7850 tmp2 = load_reg(s, rm);
7851 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7852 if (insn & (1 << 5)) {
7853 /* pkhtb */
7854 if (shift == 0)
7855 shift = 31;
7856 tcg_gen_sari_i32(tmp2, tmp2, shift);
7857 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7858 tcg_gen_ext16u_i32(tmp2, tmp2);
7859 } else {
7860 /* pkhbt */
7861 if (shift)
7862 tcg_gen_shli_i32(tmp2, tmp2, shift);
7863 tcg_gen_ext16u_i32(tmp, tmp);
7864 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7865 }
7866 tcg_gen_or_i32(tmp, tmp, tmp2);
7867 tcg_temp_free_i32(tmp2);
7868 store_reg(s, rd, tmp);
7869 } else {
7870 /* Data processing register constant shift. */
7871 if (rn == 15) {
7872 tmp = tcg_temp_new_i32();
7873 tcg_gen_movi_i32(tmp, 0);
7874 } else {
7875 tmp = load_reg(s, rn);
7876 }
7877 tmp2 = load_reg(s, rm);
7878
7879 shiftop = (insn >> 4) & 3;
7880 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7881 conds = (insn & (1 << 20)) != 0;
7882 logic_cc = (conds && thumb2_logic_op(op));
7883 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7884 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7885 goto illegal_op;
7886 tcg_temp_free_i32(tmp2);
7887 if (rd != 15) {
7888 store_reg(s, rd, tmp);
7889 } else {
7890 tcg_temp_free_i32(tmp);
7891 }
7892 }
7893 break;
7894 case 13: /* Misc data processing. */
7895 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7896 if (op < 4 && (insn & 0xf000) != 0xf000)
7897 goto illegal_op;
7898 switch (op) {
7899 case 0: /* Register controlled shift. */
7900 tmp = load_reg(s, rn);
7901 tmp2 = load_reg(s, rm);
7902 if ((insn & 0x70) != 0)
7903 goto illegal_op;
7904 op = (insn >> 21) & 3;
7905 logic_cc = (insn & (1 << 20)) != 0;
7906 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7907 if (logic_cc)
7908 gen_logic_CC(tmp);
7909 store_reg_bx(env, s, rd, tmp);
7910 break;
7911 case 1: /* Sign/zero extend. */
7912 tmp = load_reg(s, rm);
7913 shift = (insn >> 4) & 3;
7914 /* ??? In many cases it's not neccessary to do a
7915 rotate, a shift is sufficient. */
7916 if (shift != 0)
7917 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7918 op = (insn >> 20) & 7;
7919 switch (op) {
7920 case 0: gen_sxth(tmp); break;
7921 case 1: gen_uxth(tmp); break;
7922 case 2: gen_sxtb16(tmp); break;
7923 case 3: gen_uxtb16(tmp); break;
7924 case 4: gen_sxtb(tmp); break;
7925 case 5: gen_uxtb(tmp); break;
7926 default: goto illegal_op;
7927 }
7928 if (rn != 15) {
7929 tmp2 = load_reg(s, rn);
7930 if ((op >> 1) == 1) {
7931 gen_add16(tmp, tmp2);
7932 } else {
7933 tcg_gen_add_i32(tmp, tmp, tmp2);
7934 tcg_temp_free_i32(tmp2);
7935 }
7936 }
7937 store_reg(s, rd, tmp);
7938 break;
7939 case 2: /* SIMD add/subtract. */
7940 op = (insn >> 20) & 7;
7941 shift = (insn >> 4) & 7;
7942 if ((op & 3) == 3 || (shift & 3) == 3)
7943 goto illegal_op;
7944 tmp = load_reg(s, rn);
7945 tmp2 = load_reg(s, rm);
7946 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7947 tcg_temp_free_i32(tmp2);
7948 store_reg(s, rd, tmp);
7949 break;
7950 case 3: /* Other data processing. */
7951 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7952 if (op < 4) {
7953 /* Saturating add/subtract. */
7954 tmp = load_reg(s, rn);
7955 tmp2 = load_reg(s, rm);
7956 if (op & 1)
7957 gen_helper_double_saturate(tmp, tmp);
7958 if (op & 2)
7959 gen_helper_sub_saturate(tmp, tmp2, tmp);
7960 else
7961 gen_helper_add_saturate(tmp, tmp, tmp2);
7962 tcg_temp_free_i32(tmp2);
7963 } else {
7964 tmp = load_reg(s, rn);
7965 switch (op) {
7966 case 0x0a: /* rbit */
7967 gen_helper_rbit(tmp, tmp);
7968 break;
7969 case 0x08: /* rev */
7970 tcg_gen_bswap32_i32(tmp, tmp);
7971 break;
7972 case 0x09: /* rev16 */
7973 gen_rev16(tmp);
7974 break;
7975 case 0x0b: /* revsh */
7976 gen_revsh(tmp);
7977 break;
7978 case 0x10: /* sel */
7979 tmp2 = load_reg(s, rm);
7980 tmp3 = tcg_temp_new_i32();
7981 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7982 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7983 tcg_temp_free_i32(tmp3);
7984 tcg_temp_free_i32(tmp2);
7985 break;
7986 case 0x18: /* clz */
7987 gen_helper_clz(tmp, tmp);
7988 break;
7989 default:
7990 goto illegal_op;
7991 }
7992 }
7993 store_reg(s, rd, tmp);
7994 break;
7995 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7996 op = (insn >> 4) & 0xf;
7997 tmp = load_reg(s, rn);
7998 tmp2 = load_reg(s, rm);
7999 switch ((insn >> 20) & 7) {
8000 case 0: /* 32 x 32 -> 32 */
8001 tcg_gen_mul_i32(tmp, tmp, tmp2);
8002 tcg_temp_free_i32(tmp2);
8003 if (rs != 15) {
8004 tmp2 = load_reg(s, rs);
8005 if (op)
8006 tcg_gen_sub_i32(tmp, tmp2, tmp);
8007 else
8008 tcg_gen_add_i32(tmp, tmp, tmp2);
8009 tcg_temp_free_i32(tmp2);
8010 }
8011 break;
8012 case 1: /* 16 x 16 -> 32 */
8013 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8014 tcg_temp_free_i32(tmp2);
8015 if (rs != 15) {
8016 tmp2 = load_reg(s, rs);
8017 gen_helper_add_setq(tmp, tmp, tmp2);
8018 tcg_temp_free_i32(tmp2);
8019 }
8020 break;
8021 case 2: /* Dual multiply add. */
8022 case 4: /* Dual multiply subtract. */
8023 if (op)
8024 gen_swap_half(tmp2);
8025 gen_smul_dual(tmp, tmp2);
8026 if (insn & (1 << 22)) {
8027 /* This subtraction cannot overflow. */
8028 tcg_gen_sub_i32(tmp, tmp, tmp2);
8029 } else {
8030 /* This addition cannot overflow 32 bits;
8031 * however it may overflow considered as a signed
8032 * operation, in which case we must set the Q flag.
8033 */
8034 gen_helper_add_setq(tmp, tmp, tmp2);
8035 }
8036 tcg_temp_free_i32(tmp2);
8037 if (rs != 15)
8038 {
8039 tmp2 = load_reg(s, rs);
8040 gen_helper_add_setq(tmp, tmp, tmp2);
8041 tcg_temp_free_i32(tmp2);
8042 }
8043 break;
8044 case 3: /* 32 * 16 -> 32msb */
8045 if (op)
8046 tcg_gen_sari_i32(tmp2, tmp2, 16);
8047 else
8048 gen_sxth(tmp2);
8049 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8050 tcg_gen_shri_i64(tmp64, tmp64, 16);
8051 tmp = tcg_temp_new_i32();
8052 tcg_gen_trunc_i64_i32(tmp, tmp64);
8053 tcg_temp_free_i64(tmp64);
8054 if (rs != 15)
8055 {
8056 tmp2 = load_reg(s, rs);
8057 gen_helper_add_setq(tmp, tmp, tmp2);
8058 tcg_temp_free_i32(tmp2);
8059 }
8060 break;
8061 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8062 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8063 if (rs != 15) {
8064 tmp = load_reg(s, rs);
8065 if (insn & (1 << 20)) {
8066 tmp64 = gen_addq_msw(tmp64, tmp);
8067 } else {
8068 tmp64 = gen_subq_msw(tmp64, tmp);
8069 }
8070 }
8071 if (insn & (1 << 4)) {
8072 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8073 }
8074 tcg_gen_shri_i64(tmp64, tmp64, 32);
8075 tmp = tcg_temp_new_i32();
8076 tcg_gen_trunc_i64_i32(tmp, tmp64);
8077 tcg_temp_free_i64(tmp64);
8078 break;
8079 case 7: /* Unsigned sum of absolute differences. */
8080 gen_helper_usad8(tmp, tmp, tmp2);
8081 tcg_temp_free_i32(tmp2);
8082 if (rs != 15) {
8083 tmp2 = load_reg(s, rs);
8084 tcg_gen_add_i32(tmp, tmp, tmp2);
8085 tcg_temp_free_i32(tmp2);
8086 }
8087 break;
8088 }
8089 store_reg(s, rd, tmp);
8090 break;
8091 case 6: case 7: /* 64-bit multiply, Divide. */
8092 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8093 tmp = load_reg(s, rn);
8094 tmp2 = load_reg(s, rm);
8095 if ((op & 0x50) == 0x10) {
8096 /* sdiv, udiv */
8097 if (!arm_feature(env, ARM_FEATURE_DIV))
8098 goto illegal_op;
8099 if (op & 0x20)
8100 gen_helper_udiv(tmp, tmp, tmp2);
8101 else
8102 gen_helper_sdiv(tmp, tmp, tmp2);
8103 tcg_temp_free_i32(tmp2);
8104 store_reg(s, rd, tmp);
8105 } else if ((op & 0xe) == 0xc) {
8106 /* Dual multiply accumulate long. */
8107 if (op & 1)
8108 gen_swap_half(tmp2);
8109 gen_smul_dual(tmp, tmp2);
8110 if (op & 0x10) {
8111 tcg_gen_sub_i32(tmp, tmp, tmp2);
8112 } else {
8113 tcg_gen_add_i32(tmp, tmp, tmp2);
8114 }
8115 tcg_temp_free_i32(tmp2);
8116 /* BUGFIX */
8117 tmp64 = tcg_temp_new_i64();
8118 tcg_gen_ext_i32_i64(tmp64, tmp);
8119 tcg_temp_free_i32(tmp);
8120 gen_addq(s, tmp64, rs, rd);
8121 gen_storeq_reg(s, rs, rd, tmp64);
8122 tcg_temp_free_i64(tmp64);
8123 } else {
8124 if (op & 0x20) {
8125 /* Unsigned 64-bit multiply */
8126 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8127 } else {
8128 if (op & 8) {
8129 /* smlalxy */
8130 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8131 tcg_temp_free_i32(tmp2);
8132 tmp64 = tcg_temp_new_i64();
8133 tcg_gen_ext_i32_i64(tmp64, tmp);
8134 tcg_temp_free_i32(tmp);
8135 } else {
8136 /* Signed 64-bit multiply */
8137 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8138 }
8139 }
8140 if (op & 4) {
8141 /* umaal */
8142 gen_addq_lo(s, tmp64, rs);
8143 gen_addq_lo(s, tmp64, rd);
8144 } else if (op & 0x40) {
8145 /* 64-bit accumulate. */
8146 gen_addq(s, tmp64, rs, rd);
8147 }
8148 gen_storeq_reg(s, rs, rd, tmp64);
8149 tcg_temp_free_i64(tmp64);
8150 }
8151 break;
8152 }
8153 break;
8154 case 6: case 7: case 14: case 15:
8155 /* Coprocessor. */
8156 if (((insn >> 24) & 3) == 3) {
8157 /* Translate into the equivalent ARM encoding. */
8158 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8159 if (disas_neon_data_insn(env, s, insn))
8160 goto illegal_op;
8161 } else {
8162 if (insn & (1 << 28))
8163 goto illegal_op;
8164 if (disas_coproc_insn (env, s, insn))
8165 goto illegal_op;
8166 }
8167 break;
8168 case 8: case 9: case 10: case 11:
8169 if (insn & (1 << 15)) {
8170 /* Branches, misc control. */
8171 if (insn & 0x5000) {
8172 /* Unconditional branch. */
8173 /* signextend(hw1[10:0]) -> offset[:12]. */
8174 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8175 /* hw1[10:0] -> offset[11:1]. */
8176 offset |= (insn & 0x7ff) << 1;
8177 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8178 offset[24:22] already have the same value because of the
8179 sign extension above. */
8180 offset ^= ((~insn) & (1 << 13)) << 10;
8181 offset ^= ((~insn) & (1 << 11)) << 11;
8182
8183 if (insn & (1 << 14)) {
8184 /* Branch and link. */
8185 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8186 }
8187
8188 offset += s->pc;
8189 if (insn & (1 << 12)) {
8190 /* b/bl */
8191 gen_jmp(s, offset);
8192 } else {
8193 /* blx */
8194 offset &= ~(uint32_t)2;
8195 /* thumb2 bx, no need to check */
8196 gen_bx_im(s, offset);
8197 }
8198 } else if (((insn >> 23) & 7) == 7) {
8199 /* Misc control */
8200 if (insn & (1 << 13))
8201 goto illegal_op;
8202
8203 if (insn & (1 << 26)) {
8204 /* Secure monitor call (v6Z) */
8205 goto illegal_op; /* not implemented. */
8206 } else {
8207 op = (insn >> 20) & 7;
8208 switch (op) {
8209 case 0: /* msr cpsr. */
8210 if (IS_M(env)) {
8211 tmp = load_reg(s, rn);
8212 addr = tcg_const_i32(insn & 0xff);
8213 gen_helper_v7m_msr(cpu_env, addr, tmp);
8214 tcg_temp_free_i32(addr);
8215 tcg_temp_free_i32(tmp);
8216 gen_lookup_tb(s);
8217 break;
8218 }
8219 /* fall through */
8220 case 1: /* msr spsr. */
8221 if (IS_M(env))
8222 goto illegal_op;
8223 tmp = load_reg(s, rn);
8224 if (gen_set_psr(s,
8225 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8226 op == 1, tmp))
8227 goto illegal_op;
8228 break;
8229 case 2: /* cps, nop-hint. */
8230 if (((insn >> 8) & 7) == 0) {
8231 gen_nop_hint(s, insn & 0xff);
8232 }
8233 /* Implemented as NOP in user mode. */
8234 if (IS_USER(s))
8235 break;
8236 offset = 0;
8237 imm = 0;
8238 if (insn & (1 << 10)) {
8239 if (insn & (1 << 7))
8240 offset |= CPSR_A;
8241 if (insn & (1 << 6))
8242 offset |= CPSR_I;
8243 if (insn & (1 << 5))
8244 offset |= CPSR_F;
8245 if (insn & (1 << 9))
8246 imm = CPSR_A | CPSR_I | CPSR_F;
8247 }
8248 if (insn & (1 << 8)) {
8249 offset |= 0x1f;
8250 imm |= (insn & 0x1f);
8251 }
8252 if (offset) {
8253 gen_set_psr_im(s, offset, 0, imm);
8254 }
8255 break;
8256 case 3: /* Special control operations. */
8257 ARCH(7);
8258 op = (insn >> 4) & 0xf;
8259 switch (op) {
8260 case 2: /* clrex */
8261 gen_clrex(s);
8262 break;
8263 case 4: /* dsb */
8264 case 5: /* dmb */
8265 case 6: /* isb */
8266 /* These execute as NOPs. */
8267 break;
8268 default:
8269 goto illegal_op;
8270 }
8271 break;
8272 case 4: /* bxj */
8273 /* Trivial implementation equivalent to bx. */
8274 tmp = load_reg(s, rn);
8275 gen_bx(s, tmp);
8276 break;
8277 case 5: /* Exception return. */
8278 if (IS_USER(s)) {
8279 goto illegal_op;
8280 }
8281 if (rn != 14 || rd != 15) {
8282 goto illegal_op;
8283 }
8284 tmp = load_reg(s, rn);
8285 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8286 gen_exception_return(s, tmp);
8287 break;
8288 case 6: /* mrs cpsr. */
8289 tmp = tcg_temp_new_i32();
8290 if (IS_M(env)) {
8291 addr = tcg_const_i32(insn & 0xff);
8292 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8293 tcg_temp_free_i32(addr);
8294 } else {
8295 gen_helper_cpsr_read(tmp);
8296 }
8297 store_reg(s, rd, tmp);
8298 break;
8299 case 7: /* mrs spsr. */
8300 /* Not accessible in user mode. */
8301 if (IS_USER(s) || IS_M(env))
8302 goto illegal_op;
8303 tmp = load_cpu_field(spsr);
8304 store_reg(s, rd, tmp);
8305 break;
8306 }
8307 }
8308 } else {
8309 /* Conditional branch. */
8310 op = (insn >> 22) & 0xf;
8311 /* Generate a conditional jump to next instruction. */
8312 s->condlabel = gen_new_label();
8313 gen_test_cc(op ^ 1, s->condlabel);
8314 s->condjmp = 1;
8315
8316 /* offset[11:1] = insn[10:0] */
8317 offset = (insn & 0x7ff) << 1;
8318 /* offset[17:12] = insn[21:16]. */
8319 offset |= (insn & 0x003f0000) >> 4;
8320 /* offset[31:20] = insn[26]. */
8321 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8322 /* offset[18] = insn[13]. */
8323 offset |= (insn & (1 << 13)) << 5;
8324 /* offset[19] = insn[11]. */
8325 offset |= (insn & (1 << 11)) << 8;
8326
8327 /* jump to the offset */
8328 gen_jmp(s, s->pc + offset);
8329 }
8330 } else {
8331 /* Data processing immediate. */
8332 if (insn & (1 << 25)) {
8333 if (insn & (1 << 24)) {
8334 if (insn & (1 << 20))
8335 goto illegal_op;
8336 /* Bitfield/Saturate. */
8337 op = (insn >> 21) & 7;
8338 imm = insn & 0x1f;
8339 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8340 if (rn == 15) {
8341 tmp = tcg_temp_new_i32();
8342 tcg_gen_movi_i32(tmp, 0);
8343 } else {
8344 tmp = load_reg(s, rn);
8345 }
8346 switch (op) {
8347 case 2: /* Signed bitfield extract. */
8348 imm++;
8349 if (shift + imm > 32)
8350 goto illegal_op;
8351 if (imm < 32)
8352 gen_sbfx(tmp, shift, imm);
8353 break;
8354 case 6: /* Unsigned bitfield extract. */
8355 imm++;
8356 if (shift + imm > 32)
8357 goto illegal_op;
8358 if (imm < 32)
8359 gen_ubfx(tmp, shift, (1u << imm) - 1);
8360 break;
8361 case 3: /* Bitfield insert/clear. */
8362 if (imm < shift)
8363 goto illegal_op;
8364 imm = imm + 1 - shift;
8365 if (imm != 32) {
8366 tmp2 = load_reg(s, rd);
8367 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8368 tcg_temp_free_i32(tmp2);
8369 }
8370 break;
8371 case 7:
8372 goto illegal_op;
8373 default: /* Saturate. */
8374 if (shift) {
8375 if (op & 1)
8376 tcg_gen_sari_i32(tmp, tmp, shift);
8377 else
8378 tcg_gen_shli_i32(tmp, tmp, shift);
8379 }
8380 tmp2 = tcg_const_i32(imm);
8381 if (op & 4) {
8382 /* Unsigned. */
8383 if ((op & 1) && shift == 0)
8384 gen_helper_usat16(tmp, tmp, tmp2);
8385 else
8386 gen_helper_usat(tmp, tmp, tmp2);
8387 } else {
8388 /* Signed. */
8389 if ((op & 1) && shift == 0)
8390 gen_helper_ssat16(tmp, tmp, tmp2);
8391 else
8392 gen_helper_ssat(tmp, tmp, tmp2);
8393 }
8394 tcg_temp_free_i32(tmp2);
8395 break;
8396 }
8397 store_reg(s, rd, tmp);
8398 } else {
8399 imm = ((insn & 0x04000000) >> 15)
8400 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8401 if (insn & (1 << 22)) {
8402 /* 16-bit immediate. */
8403 imm |= (insn >> 4) & 0xf000;
8404 if (insn & (1 << 23)) {
8405 /* movt */
8406 tmp = load_reg(s, rd);
8407 tcg_gen_ext16u_i32(tmp, tmp);
8408 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8409 } else {
8410 /* movw */
8411 tmp = tcg_temp_new_i32();
8412 tcg_gen_movi_i32(tmp, imm);
8413 }
8414 } else {
8415 /* Add/sub 12-bit immediate. */
8416 if (rn == 15) {
8417 offset = s->pc & ~(uint32_t)3;
8418 if (insn & (1 << 23))
8419 offset -= imm;
8420 else
8421 offset += imm;
8422 tmp = tcg_temp_new_i32();
8423 tcg_gen_movi_i32(tmp, offset);
8424 } else {
8425 tmp = load_reg(s, rn);
8426 if (insn & (1 << 23))
8427 tcg_gen_subi_i32(tmp, tmp, imm);
8428 else
8429 tcg_gen_addi_i32(tmp, tmp, imm);
8430 }
8431 }
8432 store_reg(s, rd, tmp);
8433 }
8434 } else {
8435 int shifter_out = 0;
8436 /* modified 12-bit immediate. */
8437 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8438 imm = (insn & 0xff);
8439 switch (shift) {
8440 case 0: /* XY */
8441 /* Nothing to do. */
8442 break;
8443 case 1: /* 00XY00XY */
8444 imm |= imm << 16;
8445 break;
8446 case 2: /* XY00XY00 */
8447 imm |= imm << 16;
8448 imm <<= 8;
8449 break;
8450 case 3: /* XYXYXYXY */
8451 imm |= imm << 16;
8452 imm |= imm << 8;
8453 break;
8454 default: /* Rotated constant. */
8455 shift = (shift << 1) | (imm >> 7);
8456 imm |= 0x80;
8457 imm = imm << (32 - shift);
8458 shifter_out = 1;
8459 break;
8460 }
8461 tmp2 = tcg_temp_new_i32();
8462 tcg_gen_movi_i32(tmp2, imm);
8463 rn = (insn >> 16) & 0xf;
8464 if (rn == 15) {
8465 tmp = tcg_temp_new_i32();
8466 tcg_gen_movi_i32(tmp, 0);
8467 } else {
8468 tmp = load_reg(s, rn);
8469 }
8470 op = (insn >> 21) & 0xf;
8471 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8472 shifter_out, tmp, tmp2))
8473 goto illegal_op;
8474 tcg_temp_free_i32(tmp2);
8475 rd = (insn >> 8) & 0xf;
8476 if (rd != 15) {
8477 store_reg(s, rd, tmp);
8478 } else {
8479 tcg_temp_free_i32(tmp);
8480 }
8481 }
8482 }
8483 break;
8484 case 12: /* Load/store single data item. */
8485 {
8486 int postinc = 0;
8487 int writeback = 0;
8488 int user;
8489 if ((insn & 0x01100000) == 0x01000000) {
8490 if (disas_neon_ls_insn(env, s, insn))
8491 goto illegal_op;
8492 break;
8493 }
8494 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8495 if (rs == 15) {
8496 if (!(insn & (1 << 20))) {
8497 goto illegal_op;
8498 }
8499 if (op != 2) {
8500 /* Byte or halfword load space with dest == r15 : memory hints.
8501 * Catch them early so we don't emit pointless addressing code.
8502 * This space is a mix of:
8503 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8504 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8505 * cores)
8506 * unallocated hints, which must be treated as NOPs
8507 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8508 * which is easiest for the decoding logic
8509 * Some space which must UNDEF
8510 */
8511 int op1 = (insn >> 23) & 3;
8512 int op2 = (insn >> 6) & 0x3f;
8513 if (op & 2) {
8514 goto illegal_op;
8515 }
8516 if (rn == 15) {
8517 /* UNPREDICTABLE or unallocated hint */
8518 return 0;
8519 }
8520 if (op1 & 1) {
8521 return 0; /* PLD* or unallocated hint */
8522 }
8523 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8524 return 0; /* PLD* or unallocated hint */
8525 }
8526 /* UNDEF space, or an UNPREDICTABLE */
8527 return 1;
8528 }
8529 }
8530 user = IS_USER(s);
8531 if (rn == 15) {
8532 addr = tcg_temp_new_i32();
8533 /* PC relative. */
8534 /* s->pc has already been incremented by 4. */
8535 imm = s->pc & 0xfffffffc;
8536 if (insn & (1 << 23))
8537 imm += insn & 0xfff;
8538 else
8539 imm -= insn & 0xfff;
8540 tcg_gen_movi_i32(addr, imm);
8541 } else {
8542 addr = load_reg(s, rn);
8543 if (insn & (1 << 23)) {
8544 /* Positive offset. */
8545 imm = insn & 0xfff;
8546 tcg_gen_addi_i32(addr, addr, imm);
8547 } else {
8548 imm = insn & 0xff;
8549 switch ((insn >> 8) & 0xf) {
8550 case 0x0: /* Shifted Register. */
8551 shift = (insn >> 4) & 0xf;
8552 if (shift > 3) {
8553 tcg_temp_free_i32(addr);
8554 goto illegal_op;
8555 }
8556 tmp = load_reg(s, rm);
8557 if (shift)
8558 tcg_gen_shli_i32(tmp, tmp, shift);
8559 tcg_gen_add_i32(addr, addr, tmp);
8560 tcg_temp_free_i32(tmp);
8561 break;
8562 case 0xc: /* Negative offset. */
8563 tcg_gen_addi_i32(addr, addr, -imm);
8564 break;
8565 case 0xe: /* User privilege. */
8566 tcg_gen_addi_i32(addr, addr, imm);
8567 user = 1;
8568 break;
8569 case 0x9: /* Post-decrement. */
8570 imm = -imm;
8571 /* Fall through. */
8572 case 0xb: /* Post-increment. */
8573 postinc = 1;
8574 writeback = 1;
8575 break;
8576 case 0xd: /* Pre-decrement. */
8577 imm = -imm;
8578 /* Fall through. */
8579 case 0xf: /* Pre-increment. */
8580 tcg_gen_addi_i32(addr, addr, imm);
8581 writeback = 1;
8582 break;
8583 default:
8584 tcg_temp_free_i32(addr);
8585 goto illegal_op;
8586 }
8587 }
8588 }
8589 if (insn & (1 << 20)) {
8590 /* Load. */
8591 switch (op) {
8592 case 0: tmp = gen_ld8u(addr, user); break;
8593 case 4: tmp = gen_ld8s(addr, user); break;
8594 case 1: tmp = gen_ld16u(addr, user); break;
8595 case 5: tmp = gen_ld16s(addr, user); break;
8596 case 2: tmp = gen_ld32(addr, user); break;
8597 default:
8598 tcg_temp_free_i32(addr);
8599 goto illegal_op;
8600 }
8601 if (rs == 15) {
8602 gen_bx(s, tmp);
8603 } else {
8604 store_reg(s, rs, tmp);
8605 }
8606 } else {
8607 /* Store. */
8608 tmp = load_reg(s, rs);
8609 switch (op) {
8610 case 0: gen_st8(tmp, addr, user); break;
8611 case 1: gen_st16(tmp, addr, user); break;
8612 case 2: gen_st32(tmp, addr, user); break;
8613 default:
8614 tcg_temp_free_i32(addr);
8615 goto illegal_op;
8616 }
8617 }
8618 if (postinc)
8619 tcg_gen_addi_i32(addr, addr, imm);
8620 if (writeback) {
8621 store_reg(s, rn, addr);
8622 } else {
8623 tcg_temp_free_i32(addr);
8624 }
8625 }
8626 break;
8627 default:
8628 goto illegal_op;
8629 }
8630 return 0;
8631 illegal_op:
8632 return 1;
8633 }
8634
8635 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8636 {
8637 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8638 int32_t offset;
8639 int i;
8640 TCGv tmp;
8641 TCGv tmp2;
8642 TCGv addr;
8643
8644 if (s->condexec_mask) {
8645 cond = s->condexec_cond;
8646 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8647 s->condlabel = gen_new_label();
8648 gen_test_cc(cond ^ 1, s->condlabel);
8649 s->condjmp = 1;
8650 }
8651 }
8652
8653 insn = lduw_code(s->pc);
8654 s->pc += 2;
8655
8656 switch (insn >> 12) {
8657 case 0: case 1:
8658
8659 rd = insn & 7;
8660 op = (insn >> 11) & 3;
8661 if (op == 3) {
8662 /* add/subtract */
8663 rn = (insn >> 3) & 7;
8664 tmp = load_reg(s, rn);
8665 if (insn & (1 << 10)) {
8666 /* immediate */
8667 tmp2 = tcg_temp_new_i32();
8668 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8669 } else {
8670 /* reg */
8671 rm = (insn >> 6) & 7;
8672 tmp2 = load_reg(s, rm);
8673 }
8674 if (insn & (1 << 9)) {
8675 if (s->condexec_mask)
8676 tcg_gen_sub_i32(tmp, tmp, tmp2);
8677 else
8678 gen_helper_sub_cc(tmp, tmp, tmp2);
8679 } else {
8680 if (s->condexec_mask)
8681 tcg_gen_add_i32(tmp, tmp, tmp2);
8682 else
8683 gen_helper_add_cc(tmp, tmp, tmp2);
8684 }
8685 tcg_temp_free_i32(tmp2);
8686 store_reg(s, rd, tmp);
8687 } else {
8688 /* shift immediate */
8689 rm = (insn >> 3) & 7;
8690 shift = (insn >> 6) & 0x1f;
8691 tmp = load_reg(s, rm);
8692 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8693 if (!s->condexec_mask)
8694 gen_logic_CC(tmp);
8695 store_reg(s, rd, tmp);
8696 }
8697 break;
8698 case 2: case 3:
8699 /* arithmetic large immediate */
8700 op = (insn >> 11) & 3;
8701 rd = (insn >> 8) & 0x7;
8702 if (op == 0) { /* mov */
8703 tmp = tcg_temp_new_i32();
8704 tcg_gen_movi_i32(tmp, insn & 0xff);
8705 if (!s->condexec_mask)
8706 gen_logic_CC(tmp);
8707 store_reg(s, rd, tmp);
8708 } else {
8709 tmp = load_reg(s, rd);
8710 tmp2 = tcg_temp_new_i32();
8711 tcg_gen_movi_i32(tmp2, insn & 0xff);
8712 switch (op) {
8713 case 1: /* cmp */
8714 gen_helper_sub_cc(tmp, tmp, tmp2);
8715 tcg_temp_free_i32(tmp);
8716 tcg_temp_free_i32(tmp2);
8717 break;
8718 case 2: /* add */
8719 if (s->condexec_mask)
8720 tcg_gen_add_i32(tmp, tmp, tmp2);
8721 else
8722 gen_helper_add_cc(tmp, tmp, tmp2);
8723 tcg_temp_free_i32(tmp2);
8724 store_reg(s, rd, tmp);
8725 break;
8726 case 3: /* sub */
8727 if (s->condexec_mask)
8728 tcg_gen_sub_i32(tmp, tmp, tmp2);
8729 else
8730 gen_helper_sub_cc(tmp, tmp, tmp2);
8731 tcg_temp_free_i32(tmp2);
8732 store_reg(s, rd, tmp);
8733 break;
8734 }
8735 }
8736 break;
8737 case 4:
8738 if (insn & (1 << 11)) {
8739 rd = (insn >> 8) & 7;
8740 /* load pc-relative. Bit 1 of PC is ignored. */
8741 val = s->pc + 2 + ((insn & 0xff) * 4);
8742 val &= ~(uint32_t)2;
8743 addr = tcg_temp_new_i32();
8744 tcg_gen_movi_i32(addr, val);
8745 tmp = gen_ld32(addr, IS_USER(s));
8746 tcg_temp_free_i32(addr);
8747 store_reg(s, rd, tmp);
8748 break;
8749 }
8750 if (insn & (1 << 10)) {
8751 /* data processing extended or blx */
8752 rd = (insn & 7) | ((insn >> 4) & 8);
8753 rm = (insn >> 3) & 0xf;
8754 op = (insn >> 8) & 3;
8755 switch (op) {
8756 case 0: /* add */
8757 tmp = load_reg(s, rd);
8758 tmp2 = load_reg(s, rm);
8759 tcg_gen_add_i32(tmp, tmp, tmp2);
8760 tcg_temp_free_i32(tmp2);
8761 store_reg(s, rd, tmp);
8762 break;
8763 case 1: /* cmp */
8764 tmp = load_reg(s, rd);
8765 tmp2 = load_reg(s, rm);
8766 gen_helper_sub_cc(tmp, tmp, tmp2);
8767 tcg_temp_free_i32(tmp2);
8768 tcg_temp_free_i32(tmp);
8769 break;
8770 case 2: /* mov/cpy */
8771 tmp = load_reg(s, rm);
8772 store_reg(s, rd, tmp);
8773 break;
8774 case 3:/* branch [and link] exchange thumb register */
8775 tmp = load_reg(s, rm);
8776 if (insn & (1 << 7)) {
8777 ARCH(5);
8778 val = (uint32_t)s->pc | 1;
8779 tmp2 = tcg_temp_new_i32();
8780 tcg_gen_movi_i32(tmp2, val);
8781 store_reg(s, 14, tmp2);
8782 }
8783 /* already thumb, no need to check */
8784 gen_bx(s, tmp);
8785 break;
8786 }
8787 break;
8788 }
8789
8790 /* data processing register */
8791 rd = insn & 7;
8792 rm = (insn >> 3) & 7;
8793 op = (insn >> 6) & 0xf;
8794 if (op == 2 || op == 3 || op == 4 || op == 7) {
8795 /* the shift/rotate ops want the operands backwards */
8796 val = rm;
8797 rm = rd;
8798 rd = val;
8799 val = 1;
8800 } else {
8801 val = 0;
8802 }
8803
8804 if (op == 9) { /* neg */
8805 tmp = tcg_temp_new_i32();
8806 tcg_gen_movi_i32(tmp, 0);
8807 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8808 tmp = load_reg(s, rd);
8809 } else {
8810 TCGV_UNUSED(tmp);
8811 }
8812
8813 tmp2 = load_reg(s, rm);
8814 switch (op) {
8815 case 0x0: /* and */
8816 tcg_gen_and_i32(tmp, tmp, tmp2);
8817 if (!s->condexec_mask)
8818 gen_logic_CC(tmp);
8819 break;
8820 case 0x1: /* eor */
8821 tcg_gen_xor_i32(tmp, tmp, tmp2);
8822 if (!s->condexec_mask)
8823 gen_logic_CC(tmp);
8824 break;
8825 case 0x2: /* lsl */
8826 if (s->condexec_mask) {
8827 gen_helper_shl(tmp2, tmp2, tmp);
8828 } else {
8829 gen_helper_shl_cc(tmp2, tmp2, tmp);
8830 gen_logic_CC(tmp2);
8831 }
8832 break;
8833 case 0x3: /* lsr */
8834 if (s->condexec_mask) {
8835 gen_helper_shr(tmp2, tmp2, tmp);
8836 } else {
8837 gen_helper_shr_cc(tmp2, tmp2, tmp);
8838 gen_logic_CC(tmp2);
8839 }
8840 break;
8841 case 0x4: /* asr */
8842 if (s->condexec_mask) {
8843 gen_helper_sar(tmp2, tmp2, tmp);
8844 } else {
8845 gen_helper_sar_cc(tmp2, tmp2, tmp);
8846 gen_logic_CC(tmp2);
8847 }
8848 break;
8849 case 0x5: /* adc */
8850 if (s->condexec_mask)
8851 gen_adc(tmp, tmp2);
8852 else
8853 gen_helper_adc_cc(tmp, tmp, tmp2);
8854 break;
8855 case 0x6: /* sbc */
8856 if (s->condexec_mask)
8857 gen_sub_carry(tmp, tmp, tmp2);
8858 else
8859 gen_helper_sbc_cc(tmp, tmp, tmp2);
8860 break;
8861 case 0x7: /* ror */
8862 if (s->condexec_mask) {
8863 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8864 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8865 } else {
8866 gen_helper_ror_cc(tmp2, tmp2, tmp);
8867 gen_logic_CC(tmp2);
8868 }
8869 break;
8870 case 0x8: /* tst */
8871 tcg_gen_and_i32(tmp, tmp, tmp2);
8872 gen_logic_CC(tmp);
8873 rd = 16;
8874 break;
8875 case 0x9: /* neg */
8876 if (s->condexec_mask)
8877 tcg_gen_neg_i32(tmp, tmp2);
8878 else
8879 gen_helper_sub_cc(tmp, tmp, tmp2);
8880 break;
8881 case 0xa: /* cmp */
8882 gen_helper_sub_cc(tmp, tmp, tmp2);
8883 rd = 16;
8884 break;
8885 case 0xb: /* cmn */
8886 gen_helper_add_cc(tmp, tmp, tmp2);
8887 rd = 16;
8888 break;
8889 case 0xc: /* orr */
8890 tcg_gen_or_i32(tmp, tmp, tmp2);
8891 if (!s->condexec_mask)
8892 gen_logic_CC(tmp);
8893 break;
8894 case 0xd: /* mul */
8895 tcg_gen_mul_i32(tmp, tmp, tmp2);
8896 if (!s->condexec_mask)
8897 gen_logic_CC(tmp);
8898 break;
8899 case 0xe: /* bic */
8900 tcg_gen_andc_i32(tmp, tmp, tmp2);
8901 if (!s->condexec_mask)
8902 gen_logic_CC(tmp);
8903 break;
8904 case 0xf: /* mvn */
8905 tcg_gen_not_i32(tmp2, tmp2);
8906 if (!s->condexec_mask)
8907 gen_logic_CC(tmp2);
8908 val = 1;
8909 rm = rd;
8910 break;
8911 }
8912 if (rd != 16) {
8913 if (val) {
8914 store_reg(s, rm, tmp2);
8915 if (op != 0xf)
8916 tcg_temp_free_i32(tmp);
8917 } else {
8918 store_reg(s, rd, tmp);
8919 tcg_temp_free_i32(tmp2);
8920 }
8921 } else {
8922 tcg_temp_free_i32(tmp);
8923 tcg_temp_free_i32(tmp2);
8924 }
8925 break;
8926
8927 case 5:
8928 /* load/store register offset. */
8929 rd = insn & 7;
8930 rn = (insn >> 3) & 7;
8931 rm = (insn >> 6) & 7;
8932 op = (insn >> 9) & 7;
8933 addr = load_reg(s, rn);
8934 tmp = load_reg(s, rm);
8935 tcg_gen_add_i32(addr, addr, tmp);
8936 tcg_temp_free_i32(tmp);
8937
8938 if (op < 3) /* store */
8939 tmp = load_reg(s, rd);
8940
8941 switch (op) {
8942 case 0: /* str */
8943 gen_st32(tmp, addr, IS_USER(s));
8944 break;
8945 case 1: /* strh */
8946 gen_st16(tmp, addr, IS_USER(s));
8947 break;
8948 case 2: /* strb */
8949 gen_st8(tmp, addr, IS_USER(s));
8950 break;
8951 case 3: /* ldrsb */
8952 tmp = gen_ld8s(addr, IS_USER(s));
8953 break;
8954 case 4: /* ldr */
8955 tmp = gen_ld32(addr, IS_USER(s));
8956 break;
8957 case 5: /* ldrh */
8958 tmp = gen_ld16u(addr, IS_USER(s));
8959 break;
8960 case 6: /* ldrb */
8961 tmp = gen_ld8u(addr, IS_USER(s));
8962 break;
8963 case 7: /* ldrsh */
8964 tmp = gen_ld16s(addr, IS_USER(s));
8965 break;
8966 }
8967 if (op >= 3) /* load */
8968 store_reg(s, rd, tmp);
8969 tcg_temp_free_i32(addr);
8970 break;
8971
8972 case 6:
8973 /* load/store word immediate offset */
8974 rd = insn & 7;
8975 rn = (insn >> 3) & 7;
8976 addr = load_reg(s, rn);
8977 val = (insn >> 4) & 0x7c;
8978 tcg_gen_addi_i32(addr, addr, val);
8979
8980 if (insn & (1 << 11)) {
8981 /* load */
8982 tmp = gen_ld32(addr, IS_USER(s));
8983 store_reg(s, rd, tmp);
8984 } else {
8985 /* store */
8986 tmp = load_reg(s, rd);
8987 gen_st32(tmp, addr, IS_USER(s));
8988 }
8989 tcg_temp_free_i32(addr);
8990 break;
8991
8992 case 7:
8993 /* load/store byte immediate offset */
8994 rd = insn & 7;
8995 rn = (insn >> 3) & 7;
8996 addr = load_reg(s, rn);
8997 val = (insn >> 6) & 0x1f;
8998 tcg_gen_addi_i32(addr, addr, val);
8999
9000 if (insn & (1 << 11)) {
9001 /* load */
9002 tmp = gen_ld8u(addr, IS_USER(s));
9003 store_reg(s, rd, tmp);
9004 } else {
9005 /* store */
9006 tmp = load_reg(s, rd);
9007 gen_st8(tmp, addr, IS_USER(s));
9008 }
9009 tcg_temp_free_i32(addr);
9010 break;
9011
9012 case 8:
9013 /* load/store halfword immediate offset */
9014 rd = insn & 7;
9015 rn = (insn >> 3) & 7;
9016 addr = load_reg(s, rn);
9017 val = (insn >> 5) & 0x3e;
9018 tcg_gen_addi_i32(addr, addr, val);
9019
9020 if (insn & (1 << 11)) {
9021 /* load */
9022 tmp = gen_ld16u(addr, IS_USER(s));
9023 store_reg(s, rd, tmp);
9024 } else {
9025 /* store */
9026 tmp = load_reg(s, rd);
9027 gen_st16(tmp, addr, IS_USER(s));
9028 }
9029 tcg_temp_free_i32(addr);
9030 break;
9031
9032 case 9:
9033 /* load/store from stack */
9034 rd = (insn >> 8) & 7;
9035 addr = load_reg(s, 13);
9036 val = (insn & 0xff) * 4;
9037 tcg_gen_addi_i32(addr, addr, val);
9038
9039 if (insn & (1 << 11)) {
9040 /* load */
9041 tmp = gen_ld32(addr, IS_USER(s));
9042 store_reg(s, rd, tmp);
9043 } else {
9044 /* store */
9045 tmp = load_reg(s, rd);
9046 gen_st32(tmp, addr, IS_USER(s));
9047 }
9048 tcg_temp_free_i32(addr);
9049 break;
9050
9051 case 10:
9052 /* add to high reg */
9053 rd = (insn >> 8) & 7;
9054 if (insn & (1 << 11)) {
9055 /* SP */
9056 tmp = load_reg(s, 13);
9057 } else {
9058 /* PC. bit 1 is ignored. */
9059 tmp = tcg_temp_new_i32();
9060 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9061 }
9062 val = (insn & 0xff) * 4;
9063 tcg_gen_addi_i32(tmp, tmp, val);
9064 store_reg(s, rd, tmp);
9065 break;
9066
9067 case 11:
9068 /* misc */
9069 op = (insn >> 8) & 0xf;
9070 switch (op) {
9071 case 0:
9072 /* adjust stack pointer */
9073 tmp = load_reg(s, 13);
9074 val = (insn & 0x7f) * 4;
9075 if (insn & (1 << 7))
9076 val = -(int32_t)val;
9077 tcg_gen_addi_i32(tmp, tmp, val);
9078 store_reg(s, 13, tmp);
9079 break;
9080
9081 case 2: /* sign/zero extend. */
9082 ARCH(6);
9083 rd = insn & 7;
9084 rm = (insn >> 3) & 7;
9085 tmp = load_reg(s, rm);
9086 switch ((insn >> 6) & 3) {
9087 case 0: gen_sxth(tmp); break;
9088 case 1: gen_sxtb(tmp); break;
9089 case 2: gen_uxth(tmp); break;
9090 case 3: gen_uxtb(tmp); break;
9091 }
9092 store_reg(s, rd, tmp);
9093 break;
9094 case 4: case 5: case 0xc: case 0xd:
9095 /* push/pop */
9096 addr = load_reg(s, 13);
9097 if (insn & (1 << 8))
9098 offset = 4;
9099 else
9100 offset = 0;
9101 for (i = 0; i < 8; i++) {
9102 if (insn & (1 << i))
9103 offset += 4;
9104 }
9105 if ((insn & (1 << 11)) == 0) {
9106 tcg_gen_addi_i32(addr, addr, -offset);
9107 }
9108 for (i = 0; i < 8; i++) {
9109 if (insn & (1 << i)) {
9110 if (insn & (1 << 11)) {
9111 /* pop */
9112 tmp = gen_ld32(addr, IS_USER(s));
9113 store_reg(s, i, tmp);
9114 } else {
9115 /* push */
9116 tmp = load_reg(s, i);
9117 gen_st32(tmp, addr, IS_USER(s));
9118 }
9119 /* advance to the next address. */
9120 tcg_gen_addi_i32(addr, addr, 4);
9121 }
9122 }
9123 TCGV_UNUSED(tmp);
9124 if (insn & (1 << 8)) {
9125 if (insn & (1 << 11)) {
9126 /* pop pc */
9127 tmp = gen_ld32(addr, IS_USER(s));
9128 /* don't set the pc until the rest of the instruction
9129 has completed */
9130 } else {
9131 /* push lr */
9132 tmp = load_reg(s, 14);
9133 gen_st32(tmp, addr, IS_USER(s));
9134 }
9135 tcg_gen_addi_i32(addr, addr, 4);
9136 }
9137 if ((insn & (1 << 11)) == 0) {
9138 tcg_gen_addi_i32(addr, addr, -offset);
9139 }
9140 /* write back the new stack pointer */
9141 store_reg(s, 13, addr);
9142 /* set the new PC value */
9143 if ((insn & 0x0900) == 0x0900) {
9144 store_reg_from_load(env, s, 15, tmp);
9145 }
9146 break;
9147
9148 case 1: case 3: case 9: case 11: /* czb */
9149 rm = insn & 7;
9150 tmp = load_reg(s, rm);
9151 s->condlabel = gen_new_label();
9152 s->condjmp = 1;
9153 if (insn & (1 << 11))
9154 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9155 else
9156 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9157 tcg_temp_free_i32(tmp);
9158 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9159 val = (uint32_t)s->pc + 2;
9160 val += offset;
9161 gen_jmp(s, val);
9162 break;
9163
9164 case 15: /* IT, nop-hint. */
9165 if ((insn & 0xf) == 0) {
9166 gen_nop_hint(s, (insn >> 4) & 0xf);
9167 break;
9168 }
9169 /* If Then. */
9170 s->condexec_cond = (insn >> 4) & 0xe;
9171 s->condexec_mask = insn & 0x1f;
9172 /* No actual code generated for this insn, just setup state. */
9173 break;
9174
9175 case 0xe: /* bkpt */
9176 ARCH(5);
9177 gen_exception_insn(s, 2, EXCP_BKPT);
9178 break;
9179
9180 case 0xa: /* rev */
9181 ARCH(6);
9182 rn = (insn >> 3) & 0x7;
9183 rd = insn & 0x7;
9184 tmp = load_reg(s, rn);
9185 switch ((insn >> 6) & 3) {
9186 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9187 case 1: gen_rev16(tmp); break;
9188 case 3: gen_revsh(tmp); break;
9189 default: goto illegal_op;
9190 }
9191 store_reg(s, rd, tmp);
9192 break;
9193
9194 case 6: /* cps */
9195 ARCH(6);
9196 if (IS_USER(s))
9197 break;
9198 if (IS_M(env)) {
9199 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9200 /* PRIMASK */
9201 if (insn & 1) {
9202 addr = tcg_const_i32(16);
9203 gen_helper_v7m_msr(cpu_env, addr, tmp);
9204 tcg_temp_free_i32(addr);
9205 }
9206 /* FAULTMASK */
9207 if (insn & 2) {
9208 addr = tcg_const_i32(17);
9209 gen_helper_v7m_msr(cpu_env, addr, tmp);
9210 tcg_temp_free_i32(addr);
9211 }
9212 tcg_temp_free_i32(tmp);
9213 gen_lookup_tb(s);
9214 } else {
9215 if (insn & (1 << 4))
9216 shift = CPSR_A | CPSR_I | CPSR_F;
9217 else
9218 shift = 0;
9219 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9220 }
9221 break;
9222
9223 default:
9224 goto undef;
9225 }
9226 break;
9227
9228 case 12:
9229 /* load/store multiple */
9230 rn = (insn >> 8) & 0x7;
9231 addr = load_reg(s, rn);
9232 for (i = 0; i < 8; i++) {
9233 if (insn & (1 << i)) {
9234 if (insn & (1 << 11)) {
9235 /* load */
9236 tmp = gen_ld32(addr, IS_USER(s));
9237 store_reg(s, i, tmp);
9238 } else {
9239 /* store */
9240 tmp = load_reg(s, i);
9241 gen_st32(tmp, addr, IS_USER(s));
9242 }
9243 /* advance to the next address */
9244 tcg_gen_addi_i32(addr, addr, 4);
9245 }
9246 }
9247 /* Base register writeback. */
9248 if ((insn & (1 << rn)) == 0) {
9249 store_reg(s, rn, addr);
9250 } else {
9251 tcg_temp_free_i32(addr);
9252 }
9253 break;
9254
9255 case 13:
9256 /* conditional branch or swi */
9257 cond = (insn >> 8) & 0xf;
9258 if (cond == 0xe)
9259 goto undef;
9260
9261 if (cond == 0xf) {
9262 /* swi */
9263 gen_set_pc_im(s->pc);
9264 s->is_jmp = DISAS_SWI;
9265 break;
9266 }
9267 /* generate a conditional jump to next instruction */
9268 s->condlabel = gen_new_label();
9269 gen_test_cc(cond ^ 1, s->condlabel);
9270 s->condjmp = 1;
9271
9272 /* jump to the offset */
9273 val = (uint32_t)s->pc + 2;
9274 offset = ((int32_t)insn << 24) >> 24;
9275 val += offset << 1;
9276 gen_jmp(s, val);
9277 break;
9278
9279 case 14:
9280 if (insn & (1 << 11)) {
9281 if (disas_thumb2_insn(env, s, insn))
9282 goto undef32;
9283 break;
9284 }
9285 /* unconditional branch */
9286 val = (uint32_t)s->pc;
9287 offset = ((int32_t)insn << 21) >> 21;
9288 val += (offset << 1) + 2;
9289 gen_jmp(s, val);
9290 break;
9291
9292 case 15:
9293 if (disas_thumb2_insn(env, s, insn))
9294 goto undef32;
9295 break;
9296 }
9297 return;
9298 undef32:
9299 gen_exception_insn(s, 4, EXCP_UDEF);
9300 return;
9301 illegal_op:
9302 undef:
9303 gen_exception_insn(s, 2, EXCP_UDEF);
9304 }
9305
9306 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9307 basic block 'tb'. If search_pc is TRUE, also generate PC
9308 information for each intermediate instruction. */
9309 static inline void gen_intermediate_code_internal(CPUState *env,
9310 TranslationBlock *tb,
9311 int search_pc)
9312 {
9313 DisasContext dc1, *dc = &dc1;
9314 CPUBreakpoint *bp;
9315 uint16_t *gen_opc_end;
9316 int j, lj;
9317 target_ulong pc_start;
9318 uint32_t next_page_start;
9319 int num_insns;
9320 int max_insns;
9321
9322 /* generate intermediate code */
9323 pc_start = tb->pc;
9324
9325 dc->tb = tb;
9326
9327 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9328
9329 dc->is_jmp = DISAS_NEXT;
9330 dc->pc = pc_start;
9331 dc->singlestep_enabled = env->singlestep_enabled;
9332 dc->condjmp = 0;
9333 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9334 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9335 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9336 #if !defined(CONFIG_USER_ONLY)
9337 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9338 #endif
9339 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9340 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9341 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9342 cpu_F0s = tcg_temp_new_i32();
9343 cpu_F1s = tcg_temp_new_i32();
9344 cpu_F0d = tcg_temp_new_i64();
9345 cpu_F1d = tcg_temp_new_i64();
9346 cpu_V0 = cpu_F0d;
9347 cpu_V1 = cpu_F1d;
9348 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9349 cpu_M0 = tcg_temp_new_i64();
9350 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9351 lj = -1;
9352 num_insns = 0;
9353 max_insns = tb->cflags & CF_COUNT_MASK;
9354 if (max_insns == 0)
9355 max_insns = CF_COUNT_MASK;
9356
9357 gen_icount_start();
9358
9359 tcg_clear_temp_count();
9360
9361 /* A note on handling of the condexec (IT) bits:
9362 *
9363 * We want to avoid the overhead of having to write the updated condexec
9364 * bits back to the CPUState for every instruction in an IT block. So:
9365 * (1) if the condexec bits are not already zero then we write
9366 * zero back into the CPUState now. This avoids complications trying
9367 * to do it at the end of the block. (For example if we don't do this
9368 * it's hard to identify whether we can safely skip writing condexec
9369 * at the end of the TB, which we definitely want to do for the case
9370 * where a TB doesn't do anything with the IT state at all.)
9371 * (2) if we are going to leave the TB then we call gen_set_condexec()
9372 * which will write the correct value into CPUState if zero is wrong.
9373 * This is done both for leaving the TB at the end, and for leaving
9374 * it because of an exception we know will happen, which is done in
9375 * gen_exception_insn(). The latter is necessary because we need to
9376 * leave the TB with the PC/IT state just prior to execution of the
9377 * instruction which caused the exception.
9378 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9379 * then the CPUState will be wrong and we need to reset it.
9380 * This is handled in the same way as restoration of the
9381 * PC in these situations: we will be called again with search_pc=1
9382 * and generate a mapping of the condexec bits for each PC in
9383 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9384 * the condexec bits.
9385 *
9386 * Note that there are no instructions which can read the condexec
9387 * bits, and none which can write non-static values to them, so
9388 * we don't need to care about whether CPUState is correct in the
9389 * middle of a TB.
9390 */
9391
9392 /* Reset the conditional execution bits immediately. This avoids
9393 complications trying to do it at the end of the block. */
9394 if (dc->condexec_mask || dc->condexec_cond)
9395 {
9396 TCGv tmp = tcg_temp_new_i32();
9397 tcg_gen_movi_i32(tmp, 0);
9398 store_cpu_field(tmp, condexec_bits);
9399 }
9400 do {
9401 #ifdef CONFIG_USER_ONLY
9402 /* Intercept jump to the magic kernel page. */
9403 if (dc->pc >= 0xffff0000) {
9404 /* We always get here via a jump, so know we are not in a
9405 conditional execution block. */
9406 gen_exception(EXCP_KERNEL_TRAP);
9407 dc->is_jmp = DISAS_UPDATE;
9408 break;
9409 }
9410 #else
9411 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9412 /* We always get here via a jump, so know we are not in a
9413 conditional execution block. */
9414 gen_exception(EXCP_EXCEPTION_EXIT);
9415 dc->is_jmp = DISAS_UPDATE;
9416 break;
9417 }
9418 #endif
9419
9420 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9421 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9422 if (bp->pc == dc->pc) {
9423 gen_exception_insn(dc, 0, EXCP_DEBUG);
9424 /* Advance PC so that clearing the breakpoint will
9425 invalidate this TB. */
9426 dc->pc += 2;
9427 goto done_generating;
9428 break;
9429 }
9430 }
9431 }
9432 if (search_pc) {
9433 j = gen_opc_ptr - gen_opc_buf;
9434 if (lj < j) {
9435 lj++;
9436 while (lj < j)
9437 gen_opc_instr_start[lj++] = 0;
9438 }
9439 gen_opc_pc[lj] = dc->pc;
9440 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9441 gen_opc_instr_start[lj] = 1;
9442 gen_opc_icount[lj] = num_insns;
9443 }
9444
9445 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9446 gen_io_start();
9447
9448 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9449 tcg_gen_debug_insn_start(dc->pc);
9450 }
9451
9452 if (dc->thumb) {
9453 disas_thumb_insn(env, dc);
9454 if (dc->condexec_mask) {
9455 dc->condexec_cond = (dc->condexec_cond & 0xe)
9456 | ((dc->condexec_mask >> 4) & 1);
9457 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9458 if (dc->condexec_mask == 0) {
9459 dc->condexec_cond = 0;
9460 }
9461 }
9462 } else {
9463 disas_arm_insn(env, dc);
9464 }
9465
9466 if (dc->condjmp && !dc->is_jmp) {
9467 gen_set_label(dc->condlabel);
9468 dc->condjmp = 0;
9469 }
9470
9471 if (tcg_check_temp_count()) {
9472 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9473 }
9474
9475 /* Translation stops when a conditional branch is encountered.
9476 * Otherwise the subsequent code could get translated several times.
9477 * Also stop translation when a page boundary is reached. This
9478 * ensures prefetch aborts occur at the right place. */
9479 num_insns ++;
9480 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9481 !env->singlestep_enabled &&
9482 !singlestep &&
9483 dc->pc < next_page_start &&
9484 num_insns < max_insns);
9485
9486 if (tb->cflags & CF_LAST_IO) {
9487 if (dc->condjmp) {
9488 /* FIXME: This can theoretically happen with self-modifying
9489 code. */
9490 cpu_abort(env, "IO on conditional branch instruction");
9491 }
9492 gen_io_end();
9493 }
9494
9495 /* At this stage dc->condjmp will only be set when the skipped
9496 instruction was a conditional branch or trap, and the PC has
9497 already been written. */
9498 if (unlikely(env->singlestep_enabled)) {
9499 /* Make sure the pc is updated, and raise a debug exception. */
9500 if (dc->condjmp) {
9501 gen_set_condexec(dc);
9502 if (dc->is_jmp == DISAS_SWI) {
9503 gen_exception(EXCP_SWI);
9504 } else {
9505 gen_exception(EXCP_DEBUG);
9506 }
9507 gen_set_label(dc->condlabel);
9508 }
9509 if (dc->condjmp || !dc->is_jmp) {
9510 gen_set_pc_im(dc->pc);
9511 dc->condjmp = 0;
9512 }
9513 gen_set_condexec(dc);
9514 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9515 gen_exception(EXCP_SWI);
9516 } else {
9517 /* FIXME: Single stepping a WFI insn will not halt
9518 the CPU. */
9519 gen_exception(EXCP_DEBUG);
9520 }
9521 } else {
9522 /* While branches must always occur at the end of an IT block,
9523 there are a few other things that can cause us to terminate
9524 the TB in the middel of an IT block:
9525 - Exception generating instructions (bkpt, swi, undefined).
9526 - Page boundaries.
9527 - Hardware watchpoints.
9528 Hardware breakpoints have already been handled and skip this code.
9529 */
9530 gen_set_condexec(dc);
9531 switch(dc->is_jmp) {
9532 case DISAS_NEXT:
9533 gen_goto_tb(dc, 1, dc->pc);
9534 break;
9535 default:
9536 case DISAS_JUMP:
9537 case DISAS_UPDATE:
9538 /* indicate that the hash table must be used to find the next TB */
9539 tcg_gen_exit_tb(0);
9540 break;
9541 case DISAS_TB_JUMP:
9542 /* nothing more to generate */
9543 break;
9544 case DISAS_WFI:
9545 gen_helper_wfi();
9546 break;
9547 case DISAS_SWI:
9548 gen_exception(EXCP_SWI);
9549 break;
9550 }
9551 if (dc->condjmp) {
9552 gen_set_label(dc->condlabel);
9553 gen_set_condexec(dc);
9554 gen_goto_tb(dc, 1, dc->pc);
9555 dc->condjmp = 0;
9556 }
9557 }
9558
9559 done_generating:
9560 gen_icount_end(tb, num_insns);
9561 *gen_opc_ptr = INDEX_op_end;
9562
9563 #ifdef DEBUG_DISAS
9564 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9565 qemu_log("----------------\n");
9566 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9567 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9568 qemu_log("\n");
9569 }
9570 #endif
9571 if (search_pc) {
9572 j = gen_opc_ptr - gen_opc_buf;
9573 lj++;
9574 while (lj <= j)
9575 gen_opc_instr_start[lj++] = 0;
9576 } else {
9577 tb->size = dc->pc - pc_start;
9578 tb->icount = num_insns;
9579 }
9580 }
9581
9582 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9583 {
9584 gen_intermediate_code_internal(env, tb, 0);
9585 }
9586
9587 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9588 {
9589 gen_intermediate_code_internal(env, tb, 1);
9590 }
9591
9592 static const char *cpu_mode_names[16] = {
9593 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9594 "???", "???", "???", "und", "???", "???", "???", "sys"
9595 };
9596
9597 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9598 int flags)
9599 {
9600 int i;
9601 #if 0
9602 union {
9603 uint32_t i;
9604 float s;
9605 } s0, s1;
9606 CPU_DoubleU d;
9607 /* ??? This assumes float64 and double have the same layout.
9608 Oh well, it's only debug dumps. */
9609 union {
9610 float64 f64;
9611 double d;
9612 } d0;
9613 #endif
9614 uint32_t psr;
9615
9616 for(i=0;i<16;i++) {
9617 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9618 if ((i % 4) == 3)
9619 cpu_fprintf(f, "\n");
9620 else
9621 cpu_fprintf(f, " ");
9622 }
9623 psr = cpsr_read(env);
9624 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9625 psr,
9626 psr & (1 << 31) ? 'N' : '-',
9627 psr & (1 << 30) ? 'Z' : '-',
9628 psr & (1 << 29) ? 'C' : '-',
9629 psr & (1 << 28) ? 'V' : '-',
9630 psr & CPSR_T ? 'T' : 'A',
9631 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9632
9633 #if 0
9634 for (i = 0; i < 16; i++) {
9635 d.d = env->vfp.regs[i];
9636 s0.i = d.l.lower;
9637 s1.i = d.l.upper;
9638 d0.f64 = d.d;
9639 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9640 i * 2, (int)s0.i, s0.s,
9641 i * 2 + 1, (int)s1.i, s1.s,
9642 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9643 d0.d);
9644 }
9645 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9646 #endif
9647 }
9648
9649 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9650 unsigned long searched_pc, int pc_pos, void *puc)
9651 {
9652 env->regs[15] = gen_opc_pc[pc_pos];
9653 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9654 }