]> git.proxmox.com Git - mirror_qemu.git/blob - target-arm/translate.c
Merge remote-tracking branch 'kwolf/for-anthony' into staging
[mirror_qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
47
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 int bswap_code;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
70
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
78
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
83
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
95
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
99
100 #include "gen-icount.h"
101
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
108 {
109 int i;
110
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUARMState, regs[i]),
116 regnames[i]);
117 }
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUARMState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUARMState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUARMState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUARMState, exclusive_info), "exclusive_info");
129 #endif
130
131 #define GEN_HELPER 2
132 #include "helper.h"
133 }
134
135 static inline TCGv load_cpu_offset(int offset)
136 {
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140 }
141
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
143
144 static inline void store_cpu_offset(TCGv var, int offset)
145 {
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
148 }
149
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
152
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
155 {
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
166 }
167 }
168
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
171 {
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
175 }
176
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
180 {
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
187 }
188
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197
198
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200 {
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204 }
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208 static void gen_exception(int excp)
209 {
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
214 }
215
216 static void gen_smul_dual(TCGv a, TCGv b)
217 {
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
229 }
230
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
233 {
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
241 }
242
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
245 {
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
249 }
250
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253 {
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257 }
258
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
261 {
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272 }
273
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 {
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
281 }
282
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
285 {
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295 }
296
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299 {
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
309 }
310
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
315 {
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
318
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
326 }
327
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 {
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
332
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
340 }
341
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
344 {
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
350 }
351
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359 static void gen_add16(TCGv t0, TCGv t1)
360 {
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
370 }
371
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
373
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
376 {
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
381 }
382
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
385 {
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
388 }
389
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
392 {
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
398 }
399
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402 {
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
408 }
409
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412 {
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
419 }
420
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
424 static void shifter_out_im(TCGv var, int shift)
425 {
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
436 }
437
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440 {
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
484 }
485 }
486 };
487
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490 {
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
505 }
506 }
507 tcg_temp_free_i32(shift);
508 }
509
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
520 {
521 TCGv_ptr tmp;
522
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
552 }
553 }
554 #undef PAS_OP
555
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
567 {
568 TCGv_ptr tmp;
569
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
599 }
600 }
601 #undef PAS_OP
602
603 static void gen_test_cc(int cc, int label)
604 {
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
608
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
698 tcg_temp_free_i32(tmp);
699 }
700
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718 };
719
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
722 {
723 TCGv tmp;
724
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
730 tcg_temp_free_i32(tmp);
731 }
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
733 }
734
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
737 {
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
742 }
743
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
748 int reg, TCGv var)
749 {
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755 }
756
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
762 int reg, TCGv var)
763 {
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769 }
770
771 static inline TCGv gen_ld8s(TCGv addr, int index)
772 {
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776 }
777 static inline TCGv gen_ld8u(TCGv addr, int index)
778 {
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782 }
783 static inline TCGv gen_ld16s(TCGv addr, int index)
784 {
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788 }
789 static inline TCGv gen_ld16u(TCGv addr, int index)
790 {
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794 }
795 static inline TCGv gen_ld32(TCGv addr, int index)
796 {
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800 }
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802 {
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806 }
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
808 {
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
811 }
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
813 {
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
816 }
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
818 {
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
821 }
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823 {
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826 }
827
828 static inline void gen_set_pc_im(uint32_t val)
829 {
830 tcg_gen_movi_i32(cpu_R[15], val);
831 }
832
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
835 {
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
838 }
839
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
842 {
843 int val, rm, shift, shiftop;
844 TCGv offset;
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
865 }
866 }
867
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
870 {
871 int val, rm;
872 TCGv offset;
873
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
893 }
894 }
895
896 static TCGv_ptr get_fpstatus_ptr(int neon)
897 {
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
899 int offset;
900 if (neon) {
901 offset = offsetof(CPUARMState, vfp.standard_fp_status);
902 } else {
903 offset = offsetof(CPUARMState, vfp.fp_status);
904 }
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
906 return statusptr;
907 }
908
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
911 { \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 if (dp) { \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 } else { \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 } \
918 tcg_temp_free_ptr(fpst); \
919 }
920
921 VFP_OP2(add)
922 VFP_OP2(sub)
923 VFP_OP2(mul)
924 VFP_OP2(div)
925
926 #undef VFP_OP2
927
928 static inline void gen_vfp_F1_mul(int dp)
929 {
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst = get_fpstatus_ptr(0);
932 if (dp) {
933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
934 } else {
935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
936 }
937 tcg_temp_free_ptr(fpst);
938 }
939
940 static inline void gen_vfp_F1_neg(int dp)
941 {
942 /* Like gen_vfp_neg() but put result in F1 */
943 if (dp) {
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
945 } else {
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
947 }
948 }
949
950 static inline void gen_vfp_abs(int dp)
951 {
952 if (dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
954 else
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
956 }
957
958 static inline void gen_vfp_neg(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
962 else
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
964 }
965
966 static inline void gen_vfp_sqrt(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_cmp(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
978 else
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
980 }
981
982 static inline void gen_vfp_cmpe(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
986 else
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
988 }
989
990 static inline void gen_vfp_F1_ld0(int dp)
991 {
992 if (dp)
993 tcg_gen_movi_i64(cpu_F1d, 0);
994 else
995 tcg_gen_movi_i32(cpu_F1s, 0);
996 }
997
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1000 { \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1002 if (dp) { \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 } else { \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 } \
1007 tcg_temp_free_ptr(statusptr); \
1008 }
1009
1010 VFP_GEN_ITOF(uito)
1011 VFP_GEN_ITOF(sito)
1012 #undef VFP_GEN_ITOF
1013
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1016 { \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 } \
1023 tcg_temp_free_ptr(statusptr); \
1024 }
1025
1026 VFP_GEN_FTOI(toui)
1027 VFP_GEN_FTOI(touiz)
1028 VFP_GEN_FTOI(tosi)
1029 VFP_GEN_FTOI(tosiz)
1030 #undef VFP_GEN_FTOI
1031
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1034 { \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1037 if (dp) { \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 } else { \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 } \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1044 }
1045 VFP_GEN_FIX(tosh)
1046 VFP_GEN_FIX(tosl)
1047 VFP_GEN_FIX(touh)
1048 VFP_GEN_FIX(toul)
1049 VFP_GEN_FIX(shto)
1050 VFP_GEN_FIX(slto)
1051 VFP_GEN_FIX(uhto)
1052 VFP_GEN_FIX(ulto)
1053 #undef VFP_GEN_FIX
1054
1055 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1056 {
1057 if (dp)
1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1059 else
1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1061 }
1062
1063 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1064 {
1065 if (dp)
1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1067 else
1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1069 }
1070
1071 static inline long
1072 vfp_reg_offset (int dp, int reg)
1073 {
1074 if (dp)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1076 else if (reg & 1) {
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1079 } else {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1082 }
1083 }
1084
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1087 static inline long
1088 neon_reg_offset (int reg, int n)
1089 {
1090 int sreg;
1091 sreg = reg * 2 + n;
1092 return vfp_reg_offset(0, sreg);
1093 }
1094
1095 static TCGv neon_load_reg(int reg, int pass)
1096 {
1097 TCGv tmp = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1099 return tmp;
1100 }
1101
1102 static void neon_store_reg(int reg, int pass, TCGv var)
1103 {
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1105 tcg_temp_free_i32(var);
1106 }
1107
1108 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1109 {
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1111 }
1112
1113 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1114 {
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1116 }
1117
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1122
1123 static inline void gen_mov_F0_vreg(int dp, int reg)
1124 {
1125 if (dp)
1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1127 else
1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1129 }
1130
1131 static inline void gen_mov_F1_vreg(int dp, int reg)
1132 {
1133 if (dp)
1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1135 else
1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1137 }
1138
1139 static inline void gen_mov_vreg_F0(int dp, int reg)
1140 {
1141 if (dp)
1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1143 else
1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1145 }
1146
1147 #define ARM_CP_RW_BIT (1 << 20)
1148
1149 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1150 {
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1152 }
1153
1154 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1155 {
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1157 }
1158
1159 static inline TCGv iwmmxt_load_creg(int reg)
1160 {
1161 TCGv var = tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1163 return var;
1164 }
1165
1166 static inline void iwmmxt_store_creg(int reg, TCGv var)
1167 {
1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1169 tcg_temp_free_i32(var);
1170 }
1171
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1173 {
1174 iwmmxt_store_reg(cpu_M0, rn);
1175 }
1176
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1178 {
1179 iwmmxt_load_reg(cpu_M0, rn);
1180 }
1181
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1183 {
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1186 }
1187
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1189 {
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1192 }
1193
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1195 {
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1198 }
1199
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202 { \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1205 }
1206
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209 { \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1212 }
1213
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1218
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1221 { \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1223 }
1224
1225 IWMMXT_OP(maddsq)
1226 IWMMXT_OP(madduq)
1227 IWMMXT_OP(sadb)
1228 IWMMXT_OP(sadw)
1229 IWMMXT_OP(mulslw)
1230 IWMMXT_OP(mulshw)
1231 IWMMXT_OP(mululw)
1232 IWMMXT_OP(muluhw)
1233 IWMMXT_OP(macsw)
1234 IWMMXT_OP(macuw)
1235
1236 IWMMXT_OP_ENV_SIZE(unpackl)
1237 IWMMXT_OP_ENV_SIZE(unpackh)
1238
1239 IWMMXT_OP_ENV1(unpacklub)
1240 IWMMXT_OP_ENV1(unpackluw)
1241 IWMMXT_OP_ENV1(unpacklul)
1242 IWMMXT_OP_ENV1(unpackhub)
1243 IWMMXT_OP_ENV1(unpackhuw)
1244 IWMMXT_OP_ENV1(unpackhul)
1245 IWMMXT_OP_ENV1(unpacklsb)
1246 IWMMXT_OP_ENV1(unpacklsw)
1247 IWMMXT_OP_ENV1(unpacklsl)
1248 IWMMXT_OP_ENV1(unpackhsb)
1249 IWMMXT_OP_ENV1(unpackhsw)
1250 IWMMXT_OP_ENV1(unpackhsl)
1251
1252 IWMMXT_OP_ENV_SIZE(cmpeq)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu)
1254 IWMMXT_OP_ENV_SIZE(cmpgts)
1255
1256 IWMMXT_OP_ENV_SIZE(mins)
1257 IWMMXT_OP_ENV_SIZE(minu)
1258 IWMMXT_OP_ENV_SIZE(maxs)
1259 IWMMXT_OP_ENV_SIZE(maxu)
1260
1261 IWMMXT_OP_ENV_SIZE(subn)
1262 IWMMXT_OP_ENV_SIZE(addn)
1263 IWMMXT_OP_ENV_SIZE(subu)
1264 IWMMXT_OP_ENV_SIZE(addu)
1265 IWMMXT_OP_ENV_SIZE(subs)
1266 IWMMXT_OP_ENV_SIZE(adds)
1267
1268 IWMMXT_OP_ENV(avgb0)
1269 IWMMXT_OP_ENV(avgb1)
1270 IWMMXT_OP_ENV(avgw0)
1271 IWMMXT_OP_ENV(avgw1)
1272
1273 IWMMXT_OP(msadb)
1274
1275 IWMMXT_OP_ENV(packuw)
1276 IWMMXT_OP_ENV(packul)
1277 IWMMXT_OP_ENV(packuq)
1278 IWMMXT_OP_ENV(packsw)
1279 IWMMXT_OP_ENV(packsl)
1280 IWMMXT_OP_ENV(packsq)
1281
1282 static void gen_op_iwmmxt_set_mup(void)
1283 {
1284 TCGv tmp;
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1288 }
1289
1290 static void gen_op_iwmmxt_set_cup(void)
1291 {
1292 TCGv tmp;
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1296 }
1297
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1299 {
1300 TCGv tmp = tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1303 }
1304
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1306 {
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1310 }
1311
1312 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1313 {
1314 int rd;
1315 uint32_t offset;
1316 TCGv tmp;
1317
1318 rd = (insn >> 16) & 0xf;
1319 tmp = load_reg(s, rd);
1320
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1323 /* Pre indexed */
1324 if (insn & (1 << 23))
1325 tcg_gen_addi_i32(tmp, tmp, offset);
1326 else
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
1329 if (insn & (1 << 21))
1330 store_reg(s, rd, tmp);
1331 else
1332 tcg_temp_free_i32(tmp);
1333 } else if (insn & (1 << 21)) {
1334 /* Post indexed */
1335 tcg_gen_mov_i32(dest, tmp);
1336 if (insn & (1 << 23))
1337 tcg_gen_addi_i32(tmp, tmp, offset);
1338 else
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
1341 } else if (!(insn & (1 << 23)))
1342 return 1;
1343 return 0;
1344 }
1345
1346 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1347 {
1348 int rd = (insn >> 0) & 0xf;
1349 TCGv tmp;
1350
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1353 return 1;
1354 } else {
1355 tmp = iwmmxt_load_creg(rd);
1356 }
1357 } else {
1358 tmp = tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1361 }
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
1364 tcg_temp_free_i32(tmp);
1365 return 0;
1366 }
1367
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1371 {
1372 int rd, wrd;
1373 int rdhi, rdlo, rd0, rd1, i;
1374 TCGv addr;
1375 TCGv tmp, tmp2, tmp3;
1376
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1379 wrd = insn & 0xf;
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
1390 gen_op_iwmmxt_set_mup();
1391 }
1392 return 0;
1393 }
1394
1395 wrd = (insn >> 12) & 0xf;
1396 addr = tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s, insn, addr)) {
1398 tcg_temp_free_i32(addr);
1399 return 1;
1400 }
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1403 tmp = tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
1406 } else {
1407 i = 1;
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1411 i = 0;
1412 } else { /* WLDRW wRd */
1413 tmp = gen_ld32(addr, IS_USER(s));
1414 }
1415 } else {
1416 if (insn & (1 << 22)) { /* WLDRH */
1417 tmp = gen_ld16u(addr, IS_USER(s));
1418 } else { /* WLDRB */
1419 tmp = gen_ld8u(addr, IS_USER(s));
1420 }
1421 }
1422 if (i) {
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1424 tcg_temp_free_i32(tmp);
1425 }
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 }
1428 } else {
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
1432 } else {
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
1434 tmp = tcg_temp_new_i32();
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp);
1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st32(tmp, addr, IS_USER(s));
1442 }
1443 } else {
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1446 gen_st16(tmp, addr, IS_USER(s));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1449 gen_st8(tmp, addr, IS_USER(s));
1450 }
1451 }
1452 }
1453 }
1454 tcg_temp_free_i32(addr);
1455 return 0;
1456 }
1457
1458 if ((insn & 0x0f000000) != 0x0e000000)
1459 return 1;
1460
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1472 break;
1473 case 0x011: /* TMCR */
1474 if (insn & 0xf)
1475 return 1;
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1478 switch (wrd) {
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1481 break;
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1484 /* Fall through. */
1485 case ARM_IWMMXT_wCSSF:
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
1489 tcg_temp_free_i32(tmp2);
1490 iwmmxt_store_creg(wrd, tmp);
1491 break;
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
1499 break;
1500 default:
1501 return 1;
1502 }
1503 break;
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x111: /* TMRC */
1516 if (insn & 0xf)
1517 return 1;
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
1522 break;
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 break;
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1564 case 0:
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1566 break;
1567 case 1:
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1569 break;
1570 case 2:
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1572 break;
1573 case 3:
1574 return 1;
1575 }
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1586 case 0:
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1591 break;
1592 case 2:
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1594 break;
1595 case 3:
1596 return 1;
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1601 break;
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1609 else
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1626 } else {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1631 }
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 break;
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1657 case 0:
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1659 break;
1660 case 1:
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1662 break;
1663 case 2:
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1665 break;
1666 case 3:
1667 return 1;
1668 }
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1672 break;
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1683 } else {
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1688 }
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1702 tcg_temp_free_i32(tmp);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn >> 6) & 3) == 3)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 tmp = load_reg(s, rd);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1714 case 0:
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
1717 break;
1718 case 1:
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
1721 break;
1722 case 2:
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
1725 break;
1726 default:
1727 TCGV_UNUSED(tmp2);
1728 TCGV_UNUSED(tmp3);
1729 }
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
1733 tcg_temp_free_i32(tmp);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 break;
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
1741 return 1;
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 tmp = tcg_temp_new_i32();
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1748 if (insn & 8) {
1749 tcg_gen_ext8s_i32(tmp, tmp);
1750 } else {
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
1752 }
1753 break;
1754 case 1:
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1757 if (insn & 8) {
1758 tcg_gen_ext16s_i32(tmp, tmp);
1759 } else {
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1761 }
1762 break;
1763 case 2:
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1766 break;
1767 }
1768 store_reg(s, rd, tmp);
1769 break;
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1772 return 1;
1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1774 switch ((insn >> 22) & 3) {
1775 case 0:
1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1777 break;
1778 case 1:
1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1780 break;
1781 case 2:
1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1783 break;
1784 }
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1786 gen_set_nzcv(tmp);
1787 tcg_temp_free_i32(tmp);
1788 break;
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn >> 6) & 3) == 3)
1791 return 1;
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
1794 tmp = load_reg(s, rd);
1795 switch ((insn >> 6) & 3) {
1796 case 0:
1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1798 break;
1799 case 1:
1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1801 break;
1802 case 2:
1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1804 break;
1805 }
1806 tcg_temp_free_i32(tmp);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 break;
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1812 return 1;
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1814 tmp2 = tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2, tmp);
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 for (i = 0; i < 7; i ++) {
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
1821 }
1822 break;
1823 case 1:
1824 for (i = 0; i < 3; i ++) {
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1827 }
1828 break;
1829 case 2:
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
1832 break;
1833 }
1834 gen_set_nzcv(tmp);
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
1837 break;
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1843 case 0:
1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1845 break;
1846 case 1:
1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1848 break;
1849 case 2:
1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1851 break;
1852 case 3:
1853 return 1;
1854 }
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1860 return 1;
1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1862 tmp2 = tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2, tmp);
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 for (i = 0; i < 7; i ++) {
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
1869 }
1870 break;
1871 case 1:
1872 for (i = 0; i < 3; i ++) {
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1875 }
1876 break;
1877 case 2:
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
1880 break;
1881 }
1882 gen_set_nzcv(tmp);
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
1885 break;
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1890 return 1;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 tmp = tcg_temp_new_i32();
1893 switch ((insn >> 22) & 3) {
1894 case 0:
1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1896 break;
1897 case 1:
1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1899 break;
1900 case 2:
1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1902 break;
1903 }
1904 store_reg(s, rd, tmp);
1905 break;
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1930 break;
1931 case 3:
1932 return 1;
1933 }
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1947 else
1948 gen_op_iwmmxt_unpacklub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackluw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1959 else
1960 gen_op_iwmmxt_unpacklul_M0();
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1978 else
1979 gen_op_iwmmxt_unpackhub_M0();
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1984 else
1985 gen_op_iwmmxt_unpackhuw_M0();
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1990 else
1991 gen_op_iwmmxt_unpackhul_M0();
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 tmp = tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 tcg_temp_free_i32(tmp);
2010 return 1;
2011 }
2012 switch ((insn >> 22) & 3) {
2013 case 1:
2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2015 break;
2016 case 2:
2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2018 break;
2019 case 3:
2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2021 break;
2022 }
2023 tcg_temp_free_i32(tmp);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 tmp = tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2037 tcg_temp_free_i32(tmp);
2038 return 1;
2039 }
2040 switch ((insn >> 22) & 3) {
2041 case 1:
2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2043 break;
2044 case 2:
2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2046 break;
2047 case 3:
2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2049 break;
2050 }
2051 tcg_temp_free_i32(tmp);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2055 break;
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn >> 22) & 3) == 0)
2059 return 1;
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2065 tcg_temp_free_i32(tmp);
2066 return 1;
2067 }
2068 switch ((insn >> 22) & 3) {
2069 case 1:
2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2071 break;
2072 case 2:
2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2074 break;
2075 case 3:
2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 }
2079 tcg_temp_free_i32(tmp);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn >> 22) & 3) == 0)
2087 return 1;
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 tmp = tcg_temp_new_i32();
2092 switch ((insn >> 22) & 3) {
2093 case 1:
2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2095 tcg_temp_free_i32(tmp);
2096 return 1;
2097 }
2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2099 break;
2100 case 2:
2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2102 tcg_temp_free_i32(tmp);
2103 return 1;
2104 }
2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2106 break;
2107 case 3:
2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2109 tcg_temp_free_i32(tmp);
2110 return 1;
2111 }
2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2113 break;
2114 }
2115 tcg_temp_free_i32(tmp);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2163 break;
2164 case 1:
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2167 else
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2169 break;
2170 case 2:
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2173 else
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2175 break;
2176 case 3:
2177 return 1;
2178 }
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2191 tcg_temp_free(tmp);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2204 case 0x0:
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2206 break;
2207 case 0x1:
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2209 break;
2210 case 0x3:
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2212 break;
2213 case 0x4:
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2215 break;
2216 case 0x5:
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2218 break;
2219 case 0x7:
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2221 break;
2222 case 0x8:
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2224 break;
2225 case 0x9:
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2227 break;
2228 case 0xb:
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2230 break;
2231 default:
2232 return 1;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2247 tcg_temp_free(tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2261 case 0x0:
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2263 break;
2264 case 0x1:
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2266 break;
2267 case 0x3:
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2269 break;
2270 case 0x4:
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2272 break;
2273 case 0x5:
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2275 break;
2276 case 0x7:
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2278 break;
2279 case 0x8:
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2281 break;
2282 case 0x9:
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2284 break;
2285 case 0xb:
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2287 break;
2288 default:
2289 return 1;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2300 return 1;
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
2305 switch ((insn >> 22) & 3) {
2306 case 1:
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2309 else
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2311 break;
2312 case 2:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2317 break;
2318 case 3:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2323 break;
2324 }
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2337 return 1;
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2344 break;
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2347 break;
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn & (1 << 16))
2350 tcg_gen_shri_i32(tmp, tmp, 16);
2351 if (insn & (1 << 17))
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2354 break;
2355 default:
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
2358 return 1;
2359 }
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2364 break;
2365 default:
2366 return 1;
2367 }
2368
2369 return 0;
2370 }
2371
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2375 {
2376 int acc, rd0, rd1, rdhi, rdlo;
2377 TCGv tmp, tmp2;
2378
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2382 rd1 = insn & 0xf;
2383 acc = (insn >> 5) & 7;
2384
2385 if (acc != 0)
2386 return 1;
2387
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
2390 switch ((insn >> 16) & 0xf) {
2391 case 0x0: /* MIA */
2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2393 break;
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2396 break;
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn & (1 << 16))
2402 tcg_gen_shri_i32(tmp, tmp, 16);
2403 if (insn & (1 << 17))
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2406 break;
2407 default:
2408 return 1;
2409 }
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
2412
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2414 return 0;
2415 }
2416
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2421 acc = insn & 7;
2422
2423 if (acc != 0)
2424 return 1;
2425
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2432 } else { /* MAR */
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
2435 }
2436 return 0;
2437 }
2438
2439 return 1;
2440 }
2441
2442 /* Disassemble system coprocessor instruction. Return nonzero if
2443 instruction is not defined. */
2444 static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2445 {
2446 TCGv tmp, tmp2;
2447 uint32_t rd = (insn >> 12) & 0xf;
2448 uint32_t cp = (insn >> 8) & 0xf;
2449 if (IS_USER(s)) {
2450 return 1;
2451 }
2452
2453 if (insn & ARM_CP_RW_BIT) {
2454 if (!env->cp[cp].cp_read)
2455 return 1;
2456 gen_set_pc_im(s->pc);
2457 tmp = tcg_temp_new_i32();
2458 tmp2 = tcg_const_i32(insn);
2459 gen_helper_get_cp(tmp, cpu_env, tmp2);
2460 tcg_temp_free(tmp2);
2461 store_reg(s, rd, tmp);
2462 } else {
2463 if (!env->cp[cp].cp_write)
2464 return 1;
2465 gen_set_pc_im(s->pc);
2466 tmp = load_reg(s, rd);
2467 tmp2 = tcg_const_i32(insn);
2468 gen_helper_set_cp(cpu_env, tmp2, tmp);
2469 tcg_temp_free(tmp2);
2470 tcg_temp_free_i32(tmp);
2471 }
2472 return 0;
2473 }
2474
2475 static int cp15_user_ok(CPUARMState *env, uint32_t insn)
2476 {
2477 int cpn = (insn >> 16) & 0xf;
2478 int cpm = insn & 0xf;
2479 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2480
2481 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2482 /* Performance monitor registers fall into three categories:
2483 * (a) always UNDEF in usermode
2484 * (b) UNDEF only if PMUSERENR.EN is 0
2485 * (c) always read OK and UNDEF on write (PMUSERENR only)
2486 */
2487 if ((cpm == 12 && (op < 6)) ||
2488 (cpm == 13 && (op < 3))) {
2489 return env->cp15.c9_pmuserenr;
2490 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2491 /* PMUSERENR, read only */
2492 return 1;
2493 }
2494 return 0;
2495 }
2496
2497 if (cpn == 13 && cpm == 0) {
2498 /* TLS register. */
2499 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2500 return 1;
2501 }
2502 return 0;
2503 }
2504
2505 static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2506 {
2507 TCGv tmp;
2508 int cpn = (insn >> 16) & 0xf;
2509 int cpm = insn & 0xf;
2510 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2511
2512 if (!arm_feature(env, ARM_FEATURE_V6K))
2513 return 0;
2514
2515 if (!(cpn == 13 && cpm == 0))
2516 return 0;
2517
2518 if (insn & ARM_CP_RW_BIT) {
2519 switch (op) {
2520 case 2:
2521 tmp = load_cpu_field(cp15.c13_tls1);
2522 break;
2523 case 3:
2524 tmp = load_cpu_field(cp15.c13_tls2);
2525 break;
2526 case 4:
2527 tmp = load_cpu_field(cp15.c13_tls3);
2528 break;
2529 default:
2530 return 0;
2531 }
2532 store_reg(s, rd, tmp);
2533
2534 } else {
2535 tmp = load_reg(s, rd);
2536 switch (op) {
2537 case 2:
2538 store_cpu_field(tmp, cp15.c13_tls1);
2539 break;
2540 case 3:
2541 store_cpu_field(tmp, cp15.c13_tls2);
2542 break;
2543 case 4:
2544 store_cpu_field(tmp, cp15.c13_tls3);
2545 break;
2546 default:
2547 tcg_temp_free_i32(tmp);
2548 return 0;
2549 }
2550 }
2551 return 1;
2552 }
2553
2554 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2555 instruction is not defined. */
2556 static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2557 {
2558 uint32_t rd;
2559 TCGv tmp, tmp2;
2560
2561 /* M profile cores use memory mapped registers instead of cp15. */
2562 if (arm_feature(env, ARM_FEATURE_M))
2563 return 1;
2564
2565 if ((insn & (1 << 25)) == 0) {
2566 if (insn & (1 << 20)) {
2567 /* mrrc */
2568 return 1;
2569 }
2570 /* mcrr. Used for block cache operations, so implement as no-op. */
2571 return 0;
2572 }
2573 if ((insn & (1 << 4)) == 0) {
2574 /* cdp */
2575 return 1;
2576 }
2577 /* We special case a number of cp15 instructions which were used
2578 * for things which are real instructions in ARMv7. This allows
2579 * them to work in linux-user mode which doesn't provide functional
2580 * get_cp15/set_cp15 helpers, and is more efficient anyway.
2581 */
2582 switch ((insn & 0x0fff0fff)) {
2583 case 0x0e070f90:
2584 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2585 * In v7, this must NOP.
2586 */
2587 if (IS_USER(s)) {
2588 return 1;
2589 }
2590 if (!arm_feature(env, ARM_FEATURE_V7)) {
2591 /* Wait for interrupt. */
2592 gen_set_pc_im(s->pc);
2593 s->is_jmp = DISAS_WFI;
2594 }
2595 return 0;
2596 case 0x0e070f58:
2597 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2598 * so this is slightly over-broad.
2599 */
2600 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
2601 /* Wait for interrupt. */
2602 gen_set_pc_im(s->pc);
2603 s->is_jmp = DISAS_WFI;
2604 return 0;
2605 }
2606 /* Otherwise continue to handle via helper function.
2607 * In particular, on v7 and some v6 cores this is one of
2608 * the VA-PA registers.
2609 */
2610 break;
2611 case 0x0e070f3d:
2612 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2613 if (arm_feature(env, ARM_FEATURE_V6)) {
2614 return IS_USER(s) ? 1 : 0;
2615 }
2616 break;
2617 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2618 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2619 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2620 /* Barriers in both v6 and v7 */
2621 if (arm_feature(env, ARM_FEATURE_V6)) {
2622 return 0;
2623 }
2624 break;
2625 default:
2626 break;
2627 }
2628
2629 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2630 return 1;
2631 }
2632
2633 rd = (insn >> 12) & 0xf;
2634
2635 if (cp15_tls_load_store(env, s, insn, rd))
2636 return 0;
2637
2638 tmp2 = tcg_const_i32(insn);
2639 if (insn & ARM_CP_RW_BIT) {
2640 tmp = tcg_temp_new_i32();
2641 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2642 /* If the destination register is r15 then sets condition codes. */
2643 if (rd != 15)
2644 store_reg(s, rd, tmp);
2645 else
2646 tcg_temp_free_i32(tmp);
2647 } else {
2648 tmp = load_reg(s, rd);
2649 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2650 tcg_temp_free_i32(tmp);
2651 /* Normally we would always end the TB here, but Linux
2652 * arch/arm/mach-pxa/sleep.S expects two instructions following
2653 * an MMU enable to execute from cache. Imitate this behaviour. */
2654 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2655 (insn & 0x0fff0fff) != 0x0e010f10)
2656 gen_lookup_tb(s);
2657 }
2658 tcg_temp_free_i32(tmp2);
2659 return 0;
2660 }
2661
2662 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2663 #define VFP_SREG(insn, bigbit, smallbit) \
2664 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2665 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2666 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2667 reg = (((insn) >> (bigbit)) & 0x0f) \
2668 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2669 } else { \
2670 if (insn & (1 << (smallbit))) \
2671 return 1; \
2672 reg = ((insn) >> (bigbit)) & 0x0f; \
2673 }} while (0)
2674
2675 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2676 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2677 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2678 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2679 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2680 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2681
2682 /* Move between integer and VFP cores. */
2683 static TCGv gen_vfp_mrs(void)
2684 {
2685 TCGv tmp = tcg_temp_new_i32();
2686 tcg_gen_mov_i32(tmp, cpu_F0s);
2687 return tmp;
2688 }
2689
2690 static void gen_vfp_msr(TCGv tmp)
2691 {
2692 tcg_gen_mov_i32(cpu_F0s, tmp);
2693 tcg_temp_free_i32(tmp);
2694 }
2695
2696 static void gen_neon_dup_u8(TCGv var, int shift)
2697 {
2698 TCGv tmp = tcg_temp_new_i32();
2699 if (shift)
2700 tcg_gen_shri_i32(var, var, shift);
2701 tcg_gen_ext8u_i32(var, var);
2702 tcg_gen_shli_i32(tmp, var, 8);
2703 tcg_gen_or_i32(var, var, tmp);
2704 tcg_gen_shli_i32(tmp, var, 16);
2705 tcg_gen_or_i32(var, var, tmp);
2706 tcg_temp_free_i32(tmp);
2707 }
2708
2709 static void gen_neon_dup_low16(TCGv var)
2710 {
2711 TCGv tmp = tcg_temp_new_i32();
2712 tcg_gen_ext16u_i32(var, var);
2713 tcg_gen_shli_i32(tmp, var, 16);
2714 tcg_gen_or_i32(var, var, tmp);
2715 tcg_temp_free_i32(tmp);
2716 }
2717
2718 static void gen_neon_dup_high16(TCGv var)
2719 {
2720 TCGv tmp = tcg_temp_new_i32();
2721 tcg_gen_andi_i32(var, var, 0xffff0000);
2722 tcg_gen_shri_i32(tmp, var, 16);
2723 tcg_gen_or_i32(var, var, tmp);
2724 tcg_temp_free_i32(tmp);
2725 }
2726
2727 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2728 {
2729 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2730 TCGv tmp;
2731 switch (size) {
2732 case 0:
2733 tmp = gen_ld8u(addr, IS_USER(s));
2734 gen_neon_dup_u8(tmp, 0);
2735 break;
2736 case 1:
2737 tmp = gen_ld16u(addr, IS_USER(s));
2738 gen_neon_dup_low16(tmp);
2739 break;
2740 case 2:
2741 tmp = gen_ld32(addr, IS_USER(s));
2742 break;
2743 default: /* Avoid compiler warnings. */
2744 abort();
2745 }
2746 return tmp;
2747 }
2748
2749 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2750 (ie. an undefined instruction). */
2751 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2752 {
2753 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2754 int dp, veclen;
2755 TCGv addr;
2756 TCGv tmp;
2757 TCGv tmp2;
2758
2759 if (!arm_feature(env, ARM_FEATURE_VFP))
2760 return 1;
2761
2762 if (!s->vfp_enabled) {
2763 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2764 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2765 return 1;
2766 rn = (insn >> 16) & 0xf;
2767 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2768 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2769 return 1;
2770 }
2771 dp = ((insn & 0xf00) == 0xb00);
2772 switch ((insn >> 24) & 0xf) {
2773 case 0xe:
2774 if (insn & (1 << 4)) {
2775 /* single register transfer */
2776 rd = (insn >> 12) & 0xf;
2777 if (dp) {
2778 int size;
2779 int pass;
2780
2781 VFP_DREG_N(rn, insn);
2782 if (insn & 0xf)
2783 return 1;
2784 if (insn & 0x00c00060
2785 && !arm_feature(env, ARM_FEATURE_NEON))
2786 return 1;
2787
2788 pass = (insn >> 21) & 1;
2789 if (insn & (1 << 22)) {
2790 size = 0;
2791 offset = ((insn >> 5) & 3) * 8;
2792 } else if (insn & (1 << 5)) {
2793 size = 1;
2794 offset = (insn & (1 << 6)) ? 16 : 0;
2795 } else {
2796 size = 2;
2797 offset = 0;
2798 }
2799 if (insn & ARM_CP_RW_BIT) {
2800 /* vfp->arm */
2801 tmp = neon_load_reg(rn, pass);
2802 switch (size) {
2803 case 0:
2804 if (offset)
2805 tcg_gen_shri_i32(tmp, tmp, offset);
2806 if (insn & (1 << 23))
2807 gen_uxtb(tmp);
2808 else
2809 gen_sxtb(tmp);
2810 break;
2811 case 1:
2812 if (insn & (1 << 23)) {
2813 if (offset) {
2814 tcg_gen_shri_i32(tmp, tmp, 16);
2815 } else {
2816 gen_uxth(tmp);
2817 }
2818 } else {
2819 if (offset) {
2820 tcg_gen_sari_i32(tmp, tmp, 16);
2821 } else {
2822 gen_sxth(tmp);
2823 }
2824 }
2825 break;
2826 case 2:
2827 break;
2828 }
2829 store_reg(s, rd, tmp);
2830 } else {
2831 /* arm->vfp */
2832 tmp = load_reg(s, rd);
2833 if (insn & (1 << 23)) {
2834 /* VDUP */
2835 if (size == 0) {
2836 gen_neon_dup_u8(tmp, 0);
2837 } else if (size == 1) {
2838 gen_neon_dup_low16(tmp);
2839 }
2840 for (n = 0; n <= pass * 2; n++) {
2841 tmp2 = tcg_temp_new_i32();
2842 tcg_gen_mov_i32(tmp2, tmp);
2843 neon_store_reg(rn, n, tmp2);
2844 }
2845 neon_store_reg(rn, n, tmp);
2846 } else {
2847 /* VMOV */
2848 switch (size) {
2849 case 0:
2850 tmp2 = neon_load_reg(rn, pass);
2851 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2852 tcg_temp_free_i32(tmp2);
2853 break;
2854 case 1:
2855 tmp2 = neon_load_reg(rn, pass);
2856 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2857 tcg_temp_free_i32(tmp2);
2858 break;
2859 case 2:
2860 break;
2861 }
2862 neon_store_reg(rn, pass, tmp);
2863 }
2864 }
2865 } else { /* !dp */
2866 if ((insn & 0x6f) != 0x00)
2867 return 1;
2868 rn = VFP_SREG_N(insn);
2869 if (insn & ARM_CP_RW_BIT) {
2870 /* vfp->arm */
2871 if (insn & (1 << 21)) {
2872 /* system register */
2873 rn >>= 1;
2874
2875 switch (rn) {
2876 case ARM_VFP_FPSID:
2877 /* VFP2 allows access to FSID from userspace.
2878 VFP3 restricts all id registers to privileged
2879 accesses. */
2880 if (IS_USER(s)
2881 && arm_feature(env, ARM_FEATURE_VFP3))
2882 return 1;
2883 tmp = load_cpu_field(vfp.xregs[rn]);
2884 break;
2885 case ARM_VFP_FPEXC:
2886 if (IS_USER(s))
2887 return 1;
2888 tmp = load_cpu_field(vfp.xregs[rn]);
2889 break;
2890 case ARM_VFP_FPINST:
2891 case ARM_VFP_FPINST2:
2892 /* Not present in VFP3. */
2893 if (IS_USER(s)
2894 || arm_feature(env, ARM_FEATURE_VFP3))
2895 return 1;
2896 tmp = load_cpu_field(vfp.xregs[rn]);
2897 break;
2898 case ARM_VFP_FPSCR:
2899 if (rd == 15) {
2900 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2901 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2902 } else {
2903 tmp = tcg_temp_new_i32();
2904 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2905 }
2906 break;
2907 case ARM_VFP_MVFR0:
2908 case ARM_VFP_MVFR1:
2909 if (IS_USER(s)
2910 || !arm_feature(env, ARM_FEATURE_MVFR))
2911 return 1;
2912 tmp = load_cpu_field(vfp.xregs[rn]);
2913 break;
2914 default:
2915 return 1;
2916 }
2917 } else {
2918 gen_mov_F0_vreg(0, rn);
2919 tmp = gen_vfp_mrs();
2920 }
2921 if (rd == 15) {
2922 /* Set the 4 flag bits in the CPSR. */
2923 gen_set_nzcv(tmp);
2924 tcg_temp_free_i32(tmp);
2925 } else {
2926 store_reg(s, rd, tmp);
2927 }
2928 } else {
2929 /* arm->vfp */
2930 tmp = load_reg(s, rd);
2931 if (insn & (1 << 21)) {
2932 rn >>= 1;
2933 /* system register */
2934 switch (rn) {
2935 case ARM_VFP_FPSID:
2936 case ARM_VFP_MVFR0:
2937 case ARM_VFP_MVFR1:
2938 /* Writes are ignored. */
2939 break;
2940 case ARM_VFP_FPSCR:
2941 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2942 tcg_temp_free_i32(tmp);
2943 gen_lookup_tb(s);
2944 break;
2945 case ARM_VFP_FPEXC:
2946 if (IS_USER(s))
2947 return 1;
2948 /* TODO: VFP subarchitecture support.
2949 * For now, keep the EN bit only */
2950 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2951 store_cpu_field(tmp, vfp.xregs[rn]);
2952 gen_lookup_tb(s);
2953 break;
2954 case ARM_VFP_FPINST:
2955 case ARM_VFP_FPINST2:
2956 store_cpu_field(tmp, vfp.xregs[rn]);
2957 break;
2958 default:
2959 return 1;
2960 }
2961 } else {
2962 gen_vfp_msr(tmp);
2963 gen_mov_vreg_F0(0, rn);
2964 }
2965 }
2966 }
2967 } else {
2968 /* data processing */
2969 /* The opcode is in bits 23, 21, 20 and 6. */
2970 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2971 if (dp) {
2972 if (op == 15) {
2973 /* rn is opcode */
2974 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2975 } else {
2976 /* rn is register number */
2977 VFP_DREG_N(rn, insn);
2978 }
2979
2980 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2981 /* Integer or single precision destination. */
2982 rd = VFP_SREG_D(insn);
2983 } else {
2984 VFP_DREG_D(rd, insn);
2985 }
2986 if (op == 15 &&
2987 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2988 /* VCVT from int is always from S reg regardless of dp bit.
2989 * VCVT with immediate frac_bits has same format as SREG_M
2990 */
2991 rm = VFP_SREG_M(insn);
2992 } else {
2993 VFP_DREG_M(rm, insn);
2994 }
2995 } else {
2996 rn = VFP_SREG_N(insn);
2997 if (op == 15 && rn == 15) {
2998 /* Double precision destination. */
2999 VFP_DREG_D(rd, insn);
3000 } else {
3001 rd = VFP_SREG_D(insn);
3002 }
3003 /* NB that we implicitly rely on the encoding for the frac_bits
3004 * in VCVT of fixed to float being the same as that of an SREG_M
3005 */
3006 rm = VFP_SREG_M(insn);
3007 }
3008
3009 veclen = s->vec_len;
3010 if (op == 15 && rn > 3)
3011 veclen = 0;
3012
3013 /* Shut up compiler warnings. */
3014 delta_m = 0;
3015 delta_d = 0;
3016 bank_mask = 0;
3017
3018 if (veclen > 0) {
3019 if (dp)
3020 bank_mask = 0xc;
3021 else
3022 bank_mask = 0x18;
3023
3024 /* Figure out what type of vector operation this is. */
3025 if ((rd & bank_mask) == 0) {
3026 /* scalar */
3027 veclen = 0;
3028 } else {
3029 if (dp)
3030 delta_d = (s->vec_stride >> 1) + 1;
3031 else
3032 delta_d = s->vec_stride + 1;
3033
3034 if ((rm & bank_mask) == 0) {
3035 /* mixed scalar/vector */
3036 delta_m = 0;
3037 } else {
3038 /* vector */
3039 delta_m = delta_d;
3040 }
3041 }
3042 }
3043
3044 /* Load the initial operands. */
3045 if (op == 15) {
3046 switch (rn) {
3047 case 16:
3048 case 17:
3049 /* Integer source */
3050 gen_mov_F0_vreg(0, rm);
3051 break;
3052 case 8:
3053 case 9:
3054 /* Compare */
3055 gen_mov_F0_vreg(dp, rd);
3056 gen_mov_F1_vreg(dp, rm);
3057 break;
3058 case 10:
3059 case 11:
3060 /* Compare with zero */
3061 gen_mov_F0_vreg(dp, rd);
3062 gen_vfp_F1_ld0(dp);
3063 break;
3064 case 20:
3065 case 21:
3066 case 22:
3067 case 23:
3068 case 28:
3069 case 29:
3070 case 30:
3071 case 31:
3072 /* Source and destination the same. */
3073 gen_mov_F0_vreg(dp, rd);
3074 break;
3075 case 4:
3076 case 5:
3077 case 6:
3078 case 7:
3079 /* VCVTB, VCVTT: only present with the halfprec extension,
3080 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3081 */
3082 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3083 return 1;
3084 }
3085 /* Otherwise fall through */
3086 default:
3087 /* One source operand. */
3088 gen_mov_F0_vreg(dp, rm);
3089 break;
3090 }
3091 } else {
3092 /* Two source operands. */
3093 gen_mov_F0_vreg(dp, rn);
3094 gen_mov_F1_vreg(dp, rm);
3095 }
3096
3097 for (;;) {
3098 /* Perform the calculation. */
3099 switch (op) {
3100 case 0: /* VMLA: fd + (fn * fm) */
3101 /* Note that order of inputs to the add matters for NaNs */
3102 gen_vfp_F1_mul(dp);
3103 gen_mov_F0_vreg(dp, rd);
3104 gen_vfp_add(dp);
3105 break;
3106 case 1: /* VMLS: fd + -(fn * fm) */
3107 gen_vfp_mul(dp);
3108 gen_vfp_F1_neg(dp);
3109 gen_mov_F0_vreg(dp, rd);
3110 gen_vfp_add(dp);
3111 break;
3112 case 2: /* VNMLS: -fd + (fn * fm) */
3113 /* Note that it isn't valid to replace (-A + B) with (B - A)
3114 * or similar plausible looking simplifications
3115 * because this will give wrong results for NaNs.
3116 */
3117 gen_vfp_F1_mul(dp);
3118 gen_mov_F0_vreg(dp, rd);
3119 gen_vfp_neg(dp);
3120 gen_vfp_add(dp);
3121 break;
3122 case 3: /* VNMLA: -fd + -(fn * fm) */
3123 gen_vfp_mul(dp);
3124 gen_vfp_F1_neg(dp);
3125 gen_mov_F0_vreg(dp, rd);
3126 gen_vfp_neg(dp);
3127 gen_vfp_add(dp);
3128 break;
3129 case 4: /* mul: fn * fm */
3130 gen_vfp_mul(dp);
3131 break;
3132 case 5: /* nmul: -(fn * fm) */
3133 gen_vfp_mul(dp);
3134 gen_vfp_neg(dp);
3135 break;
3136 case 6: /* add: fn + fm */
3137 gen_vfp_add(dp);
3138 break;
3139 case 7: /* sub: fn - fm */
3140 gen_vfp_sub(dp);
3141 break;
3142 case 8: /* div: fn / fm */
3143 gen_vfp_div(dp);
3144 break;
3145 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3146 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3147 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3148 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3149 /* These are fused multiply-add, and must be done as one
3150 * floating point operation with no rounding between the
3151 * multiplication and addition steps.
3152 * NB that doing the negations here as separate steps is
3153 * correct : an input NaN should come out with its sign bit
3154 * flipped if it is a negated-input.
3155 */
3156 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3157 return 1;
3158 }
3159 if (dp) {
3160 TCGv_ptr fpst;
3161 TCGv_i64 frd;
3162 if (op & 1) {
3163 /* VFNMS, VFMS */
3164 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3165 }
3166 frd = tcg_temp_new_i64();
3167 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3168 if (op & 2) {
3169 /* VFNMA, VFNMS */
3170 gen_helper_vfp_negd(frd, frd);
3171 }
3172 fpst = get_fpstatus_ptr(0);
3173 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3174 cpu_F1d, frd, fpst);
3175 tcg_temp_free_ptr(fpst);
3176 tcg_temp_free_i64(frd);
3177 } else {
3178 TCGv_ptr fpst;
3179 TCGv_i32 frd;
3180 if (op & 1) {
3181 /* VFNMS, VFMS */
3182 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3183 }
3184 frd = tcg_temp_new_i32();
3185 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3186 if (op & 2) {
3187 gen_helper_vfp_negs(frd, frd);
3188 }
3189 fpst = get_fpstatus_ptr(0);
3190 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3191 cpu_F1s, frd, fpst);
3192 tcg_temp_free_ptr(fpst);
3193 tcg_temp_free_i32(frd);
3194 }
3195 break;
3196 case 14: /* fconst */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199
3200 n = (insn << 12) & 0x80000000;
3201 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3202 if (dp) {
3203 if (i & 0x40)
3204 i |= 0x3f80;
3205 else
3206 i |= 0x4000;
3207 n |= i << 16;
3208 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3209 } else {
3210 if (i & 0x40)
3211 i |= 0x780;
3212 else
3213 i |= 0x800;
3214 n |= i << 19;
3215 tcg_gen_movi_i32(cpu_F0s, n);
3216 }
3217 break;
3218 case 15: /* extension space */
3219 switch (rn) {
3220 case 0: /* cpy */
3221 /* no-op */
3222 break;
3223 case 1: /* abs */
3224 gen_vfp_abs(dp);
3225 break;
3226 case 2: /* neg */
3227 gen_vfp_neg(dp);
3228 break;
3229 case 3: /* sqrt */
3230 gen_vfp_sqrt(dp);
3231 break;
3232 case 4: /* vcvtb.f32.f16 */
3233 tmp = gen_vfp_mrs();
3234 tcg_gen_ext16u_i32(tmp, tmp);
3235 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3236 tcg_temp_free_i32(tmp);
3237 break;
3238 case 5: /* vcvtt.f32.f16 */
3239 tmp = gen_vfp_mrs();
3240 tcg_gen_shri_i32(tmp, tmp, 16);
3241 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3242 tcg_temp_free_i32(tmp);
3243 break;
3244 case 6: /* vcvtb.f16.f32 */
3245 tmp = tcg_temp_new_i32();
3246 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3247 gen_mov_F0_vreg(0, rd);
3248 tmp2 = gen_vfp_mrs();
3249 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3250 tcg_gen_or_i32(tmp, tmp, tmp2);
3251 tcg_temp_free_i32(tmp2);
3252 gen_vfp_msr(tmp);
3253 break;
3254 case 7: /* vcvtt.f16.f32 */
3255 tmp = tcg_temp_new_i32();
3256 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3257 tcg_gen_shli_i32(tmp, tmp, 16);
3258 gen_mov_F0_vreg(0, rd);
3259 tmp2 = gen_vfp_mrs();
3260 tcg_gen_ext16u_i32(tmp2, tmp2);
3261 tcg_gen_or_i32(tmp, tmp, tmp2);
3262 tcg_temp_free_i32(tmp2);
3263 gen_vfp_msr(tmp);
3264 break;
3265 case 8: /* cmp */
3266 gen_vfp_cmp(dp);
3267 break;
3268 case 9: /* cmpe */
3269 gen_vfp_cmpe(dp);
3270 break;
3271 case 10: /* cmpz */
3272 gen_vfp_cmp(dp);
3273 break;
3274 case 11: /* cmpez */
3275 gen_vfp_F1_ld0(dp);
3276 gen_vfp_cmpe(dp);
3277 break;
3278 case 15: /* single<->double conversion */
3279 if (dp)
3280 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3281 else
3282 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3283 break;
3284 case 16: /* fuito */
3285 gen_vfp_uito(dp, 0);
3286 break;
3287 case 17: /* fsito */
3288 gen_vfp_sito(dp, 0);
3289 break;
3290 case 20: /* fshto */
3291 if (!arm_feature(env, ARM_FEATURE_VFP3))
3292 return 1;
3293 gen_vfp_shto(dp, 16 - rm, 0);
3294 break;
3295 case 21: /* fslto */
3296 if (!arm_feature(env, ARM_FEATURE_VFP3))
3297 return 1;
3298 gen_vfp_slto(dp, 32 - rm, 0);
3299 break;
3300 case 22: /* fuhto */
3301 if (!arm_feature(env, ARM_FEATURE_VFP3))
3302 return 1;
3303 gen_vfp_uhto(dp, 16 - rm, 0);
3304 break;
3305 case 23: /* fulto */
3306 if (!arm_feature(env, ARM_FEATURE_VFP3))
3307 return 1;
3308 gen_vfp_ulto(dp, 32 - rm, 0);
3309 break;
3310 case 24: /* ftoui */
3311 gen_vfp_toui(dp, 0);
3312 break;
3313 case 25: /* ftouiz */
3314 gen_vfp_touiz(dp, 0);
3315 break;
3316 case 26: /* ftosi */
3317 gen_vfp_tosi(dp, 0);
3318 break;
3319 case 27: /* ftosiz */
3320 gen_vfp_tosiz(dp, 0);
3321 break;
3322 case 28: /* ftosh */
3323 if (!arm_feature(env, ARM_FEATURE_VFP3))
3324 return 1;
3325 gen_vfp_tosh(dp, 16 - rm, 0);
3326 break;
3327 case 29: /* ftosl */
3328 if (!arm_feature(env, ARM_FEATURE_VFP3))
3329 return 1;
3330 gen_vfp_tosl(dp, 32 - rm, 0);
3331 break;
3332 case 30: /* ftouh */
3333 if (!arm_feature(env, ARM_FEATURE_VFP3))
3334 return 1;
3335 gen_vfp_touh(dp, 16 - rm, 0);
3336 break;
3337 case 31: /* ftoul */
3338 if (!arm_feature(env, ARM_FEATURE_VFP3))
3339 return 1;
3340 gen_vfp_toul(dp, 32 - rm, 0);
3341 break;
3342 default: /* undefined */
3343 return 1;
3344 }
3345 break;
3346 default: /* undefined */
3347 return 1;
3348 }
3349
3350 /* Write back the result. */
3351 if (op == 15 && (rn >= 8 && rn <= 11))
3352 ; /* Comparison, do nothing. */
3353 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3354 /* VCVT double to int: always integer result. */
3355 gen_mov_vreg_F0(0, rd);
3356 else if (op == 15 && rn == 15)
3357 /* conversion */
3358 gen_mov_vreg_F0(!dp, rd);
3359 else
3360 gen_mov_vreg_F0(dp, rd);
3361
3362 /* break out of the loop if we have finished */
3363 if (veclen == 0)
3364 break;
3365
3366 if (op == 15 && delta_m == 0) {
3367 /* single source one-many */
3368 while (veclen--) {
3369 rd = ((rd + delta_d) & (bank_mask - 1))
3370 | (rd & bank_mask);
3371 gen_mov_vreg_F0(dp, rd);
3372 }
3373 break;
3374 }
3375 /* Setup the next operands. */
3376 veclen--;
3377 rd = ((rd + delta_d) & (bank_mask - 1))
3378 | (rd & bank_mask);
3379
3380 if (op == 15) {
3381 /* One source operand. */
3382 rm = ((rm + delta_m) & (bank_mask - 1))
3383 | (rm & bank_mask);
3384 gen_mov_F0_vreg(dp, rm);
3385 } else {
3386 /* Two source operands. */
3387 rn = ((rn + delta_d) & (bank_mask - 1))
3388 | (rn & bank_mask);
3389 gen_mov_F0_vreg(dp, rn);
3390 if (delta_m) {
3391 rm = ((rm + delta_m) & (bank_mask - 1))
3392 | (rm & bank_mask);
3393 gen_mov_F1_vreg(dp, rm);
3394 }
3395 }
3396 }
3397 }
3398 break;
3399 case 0xc:
3400 case 0xd:
3401 if ((insn & 0x03e00000) == 0x00400000) {
3402 /* two-register transfer */
3403 rn = (insn >> 16) & 0xf;
3404 rd = (insn >> 12) & 0xf;
3405 if (dp) {
3406 VFP_DREG_M(rm, insn);
3407 } else {
3408 rm = VFP_SREG_M(insn);
3409 }
3410
3411 if (insn & ARM_CP_RW_BIT) {
3412 /* vfp->arm */
3413 if (dp) {
3414 gen_mov_F0_vreg(0, rm * 2);
3415 tmp = gen_vfp_mrs();
3416 store_reg(s, rd, tmp);
3417 gen_mov_F0_vreg(0, rm * 2 + 1);
3418 tmp = gen_vfp_mrs();
3419 store_reg(s, rn, tmp);
3420 } else {
3421 gen_mov_F0_vreg(0, rm);
3422 tmp = gen_vfp_mrs();
3423 store_reg(s, rd, tmp);
3424 gen_mov_F0_vreg(0, rm + 1);
3425 tmp = gen_vfp_mrs();
3426 store_reg(s, rn, tmp);
3427 }
3428 } else {
3429 /* arm->vfp */
3430 if (dp) {
3431 tmp = load_reg(s, rd);
3432 gen_vfp_msr(tmp);
3433 gen_mov_vreg_F0(0, rm * 2);
3434 tmp = load_reg(s, rn);
3435 gen_vfp_msr(tmp);
3436 gen_mov_vreg_F0(0, rm * 2 + 1);
3437 } else {
3438 tmp = load_reg(s, rd);
3439 gen_vfp_msr(tmp);
3440 gen_mov_vreg_F0(0, rm);
3441 tmp = load_reg(s, rn);
3442 gen_vfp_msr(tmp);
3443 gen_mov_vreg_F0(0, rm + 1);
3444 }
3445 }
3446 } else {
3447 /* Load/store */
3448 rn = (insn >> 16) & 0xf;
3449 if (dp)
3450 VFP_DREG_D(rd, insn);
3451 else
3452 rd = VFP_SREG_D(insn);
3453 if ((insn & 0x01200000) == 0x01000000) {
3454 /* Single load/store */
3455 offset = (insn & 0xff) << 2;
3456 if ((insn & (1 << 23)) == 0)
3457 offset = -offset;
3458 if (s->thumb && rn == 15) {
3459 /* This is actually UNPREDICTABLE */
3460 addr = tcg_temp_new_i32();
3461 tcg_gen_movi_i32(addr, s->pc & ~2);
3462 } else {
3463 addr = load_reg(s, rn);
3464 }
3465 tcg_gen_addi_i32(addr, addr, offset);
3466 if (insn & (1 << 20)) {
3467 gen_vfp_ld(s, dp, addr);
3468 gen_mov_vreg_F0(dp, rd);
3469 } else {
3470 gen_mov_F0_vreg(dp, rd);
3471 gen_vfp_st(s, dp, addr);
3472 }
3473 tcg_temp_free_i32(addr);
3474 } else {
3475 /* load/store multiple */
3476 int w = insn & (1 << 21);
3477 if (dp)
3478 n = (insn >> 1) & 0x7f;
3479 else
3480 n = insn & 0xff;
3481
3482 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3483 /* P == U , W == 1 => UNDEF */
3484 return 1;
3485 }
3486 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3487 /* UNPREDICTABLE cases for bad immediates: we choose to
3488 * UNDEF to avoid generating huge numbers of TCG ops
3489 */
3490 return 1;
3491 }
3492 if (rn == 15 && w) {
3493 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3494 return 1;
3495 }
3496
3497 if (s->thumb && rn == 15) {
3498 /* This is actually UNPREDICTABLE */
3499 addr = tcg_temp_new_i32();
3500 tcg_gen_movi_i32(addr, s->pc & ~2);
3501 } else {
3502 addr = load_reg(s, rn);
3503 }
3504 if (insn & (1 << 24)) /* pre-decrement */
3505 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3506
3507 if (dp)
3508 offset = 8;
3509 else
3510 offset = 4;
3511 for (i = 0; i < n; i++) {
3512 if (insn & ARM_CP_RW_BIT) {
3513 /* load */
3514 gen_vfp_ld(s, dp, addr);
3515 gen_mov_vreg_F0(dp, rd + i);
3516 } else {
3517 /* store */
3518 gen_mov_F0_vreg(dp, rd + i);
3519 gen_vfp_st(s, dp, addr);
3520 }
3521 tcg_gen_addi_i32(addr, addr, offset);
3522 }
3523 if (w) {
3524 /* writeback */
3525 if (insn & (1 << 24))
3526 offset = -offset * n;
3527 else if (dp && (insn & 1))
3528 offset = 4;
3529 else
3530 offset = 0;
3531
3532 if (offset != 0)
3533 tcg_gen_addi_i32(addr, addr, offset);
3534 store_reg(s, rn, addr);
3535 } else {
3536 tcg_temp_free_i32(addr);
3537 }
3538 }
3539 }
3540 break;
3541 default:
3542 /* Should never happen. */
3543 return 1;
3544 }
3545 return 0;
3546 }
3547
3548 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3549 {
3550 TranslationBlock *tb;
3551
3552 tb = s->tb;
3553 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3554 tcg_gen_goto_tb(n);
3555 gen_set_pc_im(dest);
3556 tcg_gen_exit_tb((tcg_target_long)tb + n);
3557 } else {
3558 gen_set_pc_im(dest);
3559 tcg_gen_exit_tb(0);
3560 }
3561 }
3562
3563 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3564 {
3565 if (unlikely(s->singlestep_enabled)) {
3566 /* An indirect jump so that we still trigger the debug exception. */
3567 if (s->thumb)
3568 dest |= 1;
3569 gen_bx_im(s, dest);
3570 } else {
3571 gen_goto_tb(s, 0, dest);
3572 s->is_jmp = DISAS_TB_JUMP;
3573 }
3574 }
3575
3576 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3577 {
3578 if (x)
3579 tcg_gen_sari_i32(t0, t0, 16);
3580 else
3581 gen_sxth(t0);
3582 if (y)
3583 tcg_gen_sari_i32(t1, t1, 16);
3584 else
3585 gen_sxth(t1);
3586 tcg_gen_mul_i32(t0, t0, t1);
3587 }
3588
3589 /* Return the mask of PSR bits set by a MSR instruction. */
3590 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3591 uint32_t mask;
3592
3593 mask = 0;
3594 if (flags & (1 << 0))
3595 mask |= 0xff;
3596 if (flags & (1 << 1))
3597 mask |= 0xff00;
3598 if (flags & (1 << 2))
3599 mask |= 0xff0000;
3600 if (flags & (1 << 3))
3601 mask |= 0xff000000;
3602
3603 /* Mask out undefined bits. */
3604 mask &= ~CPSR_RESERVED;
3605 if (!arm_feature(env, ARM_FEATURE_V4T))
3606 mask &= ~CPSR_T;
3607 if (!arm_feature(env, ARM_FEATURE_V5))
3608 mask &= ~CPSR_Q; /* V5TE in reality*/
3609 if (!arm_feature(env, ARM_FEATURE_V6))
3610 mask &= ~(CPSR_E | CPSR_GE);
3611 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3612 mask &= ~CPSR_IT;
3613 /* Mask out execution state bits. */
3614 if (!spsr)
3615 mask &= ~CPSR_EXEC;
3616 /* Mask out privileged bits. */
3617 if (IS_USER(s))
3618 mask &= CPSR_USER;
3619 return mask;
3620 }
3621
3622 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3623 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3624 {
3625 TCGv tmp;
3626 if (spsr) {
3627 /* ??? This is also undefined in system mode. */
3628 if (IS_USER(s))
3629 return 1;
3630
3631 tmp = load_cpu_field(spsr);
3632 tcg_gen_andi_i32(tmp, tmp, ~mask);
3633 tcg_gen_andi_i32(t0, t0, mask);
3634 tcg_gen_or_i32(tmp, tmp, t0);
3635 store_cpu_field(tmp, spsr);
3636 } else {
3637 gen_set_cpsr(t0, mask);
3638 }
3639 tcg_temp_free_i32(t0);
3640 gen_lookup_tb(s);
3641 return 0;
3642 }
3643
3644 /* Returns nonzero if access to the PSR is not permitted. */
3645 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3646 {
3647 TCGv tmp;
3648 tmp = tcg_temp_new_i32();
3649 tcg_gen_movi_i32(tmp, val);
3650 return gen_set_psr(s, mask, spsr, tmp);
3651 }
3652
3653 /* Generate an old-style exception return. Marks pc as dead. */
3654 static void gen_exception_return(DisasContext *s, TCGv pc)
3655 {
3656 TCGv tmp;
3657 store_reg(s, 15, pc);
3658 tmp = load_cpu_field(spsr);
3659 gen_set_cpsr(tmp, 0xffffffff);
3660 tcg_temp_free_i32(tmp);
3661 s->is_jmp = DISAS_UPDATE;
3662 }
3663
3664 /* Generate a v6 exception return. Marks both values as dead. */
3665 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3666 {
3667 gen_set_cpsr(cpsr, 0xffffffff);
3668 tcg_temp_free_i32(cpsr);
3669 store_reg(s, 15, pc);
3670 s->is_jmp = DISAS_UPDATE;
3671 }
3672
3673 static inline void
3674 gen_set_condexec (DisasContext *s)
3675 {
3676 if (s->condexec_mask) {
3677 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3678 TCGv tmp = tcg_temp_new_i32();
3679 tcg_gen_movi_i32(tmp, val);
3680 store_cpu_field(tmp, condexec_bits);
3681 }
3682 }
3683
3684 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3685 {
3686 gen_set_condexec(s);
3687 gen_set_pc_im(s->pc - offset);
3688 gen_exception(excp);
3689 s->is_jmp = DISAS_JUMP;
3690 }
3691
3692 static void gen_nop_hint(DisasContext *s, int val)
3693 {
3694 switch (val) {
3695 case 3: /* wfi */
3696 gen_set_pc_im(s->pc);
3697 s->is_jmp = DISAS_WFI;
3698 break;
3699 case 2: /* wfe */
3700 case 4: /* sev */
3701 /* TODO: Implement SEV and WFE. May help SMP performance. */
3702 default: /* nop */
3703 break;
3704 }
3705 }
3706
3707 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3708
3709 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3710 {
3711 switch (size) {
3712 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3713 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3714 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3715 default: abort();
3716 }
3717 }
3718
3719 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3720 {
3721 switch (size) {
3722 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3723 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3724 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3725 default: return;
3726 }
3727 }
3728
3729 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3730 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3731 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3732 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3733 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3734
3735 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3736 switch ((size << 1) | u) { \
3737 case 0: \
3738 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3739 break; \
3740 case 1: \
3741 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3742 break; \
3743 case 2: \
3744 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3745 break; \
3746 case 3: \
3747 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3748 break; \
3749 case 4: \
3750 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3751 break; \
3752 case 5: \
3753 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3754 break; \
3755 default: return 1; \
3756 }} while (0)
3757
3758 #define GEN_NEON_INTEGER_OP(name) do { \
3759 switch ((size << 1) | u) { \
3760 case 0: \
3761 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3762 break; \
3763 case 1: \
3764 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3765 break; \
3766 case 2: \
3767 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3768 break; \
3769 case 3: \
3770 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3771 break; \
3772 case 4: \
3773 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3774 break; \
3775 case 5: \
3776 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3777 break; \
3778 default: return 1; \
3779 }} while (0)
3780
3781 static TCGv neon_load_scratch(int scratch)
3782 {
3783 TCGv tmp = tcg_temp_new_i32();
3784 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3785 return tmp;
3786 }
3787
3788 static void neon_store_scratch(int scratch, TCGv var)
3789 {
3790 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3791 tcg_temp_free_i32(var);
3792 }
3793
3794 static inline TCGv neon_get_scalar(int size, int reg)
3795 {
3796 TCGv tmp;
3797 if (size == 1) {
3798 tmp = neon_load_reg(reg & 7, reg >> 4);
3799 if (reg & 8) {
3800 gen_neon_dup_high16(tmp);
3801 } else {
3802 gen_neon_dup_low16(tmp);
3803 }
3804 } else {
3805 tmp = neon_load_reg(reg & 15, reg >> 4);
3806 }
3807 return tmp;
3808 }
3809
3810 static int gen_neon_unzip(int rd, int rm, int size, int q)
3811 {
3812 TCGv tmp, tmp2;
3813 if (!q && size == 2) {
3814 return 1;
3815 }
3816 tmp = tcg_const_i32(rd);
3817 tmp2 = tcg_const_i32(rm);
3818 if (q) {
3819 switch (size) {
3820 case 0:
3821 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3822 break;
3823 case 1:
3824 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3825 break;
3826 case 2:
3827 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3828 break;
3829 default:
3830 abort();
3831 }
3832 } else {
3833 switch (size) {
3834 case 0:
3835 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3836 break;
3837 case 1:
3838 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3839 break;
3840 default:
3841 abort();
3842 }
3843 }
3844 tcg_temp_free_i32(tmp);
3845 tcg_temp_free_i32(tmp2);
3846 return 0;
3847 }
3848
3849 static int gen_neon_zip(int rd, int rm, int size, int q)
3850 {
3851 TCGv tmp, tmp2;
3852 if (!q && size == 2) {
3853 return 1;
3854 }
3855 tmp = tcg_const_i32(rd);
3856 tmp2 = tcg_const_i32(rm);
3857 if (q) {
3858 switch (size) {
3859 case 0:
3860 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3861 break;
3862 case 1:
3863 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3864 break;
3865 case 2:
3866 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3867 break;
3868 default:
3869 abort();
3870 }
3871 } else {
3872 switch (size) {
3873 case 0:
3874 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3875 break;
3876 case 1:
3877 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3878 break;
3879 default:
3880 abort();
3881 }
3882 }
3883 tcg_temp_free_i32(tmp);
3884 tcg_temp_free_i32(tmp2);
3885 return 0;
3886 }
3887
3888 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3889 {
3890 TCGv rd, tmp;
3891
3892 rd = tcg_temp_new_i32();
3893 tmp = tcg_temp_new_i32();
3894
3895 tcg_gen_shli_i32(rd, t0, 8);
3896 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3897 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3898 tcg_gen_or_i32(rd, rd, tmp);
3899
3900 tcg_gen_shri_i32(t1, t1, 8);
3901 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3902 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3903 tcg_gen_or_i32(t1, t1, tmp);
3904 tcg_gen_mov_i32(t0, rd);
3905
3906 tcg_temp_free_i32(tmp);
3907 tcg_temp_free_i32(rd);
3908 }
3909
3910 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3911 {
3912 TCGv rd, tmp;
3913
3914 rd = tcg_temp_new_i32();
3915 tmp = tcg_temp_new_i32();
3916
3917 tcg_gen_shli_i32(rd, t0, 16);
3918 tcg_gen_andi_i32(tmp, t1, 0xffff);
3919 tcg_gen_or_i32(rd, rd, tmp);
3920 tcg_gen_shri_i32(t1, t1, 16);
3921 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3922 tcg_gen_or_i32(t1, t1, tmp);
3923 tcg_gen_mov_i32(t0, rd);
3924
3925 tcg_temp_free_i32(tmp);
3926 tcg_temp_free_i32(rd);
3927 }
3928
3929
3930 static struct {
3931 int nregs;
3932 int interleave;
3933 int spacing;
3934 } neon_ls_element_type[11] = {
3935 {4, 4, 1},
3936 {4, 4, 2},
3937 {4, 1, 1},
3938 {4, 2, 1},
3939 {3, 3, 1},
3940 {3, 3, 2},
3941 {3, 1, 1},
3942 {1, 1, 1},
3943 {2, 2, 1},
3944 {2, 2, 2},
3945 {2, 1, 1}
3946 };
3947
3948 /* Translate a NEON load/store element instruction. Return nonzero if the
3949 instruction is invalid. */
3950 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3951 {
3952 int rd, rn, rm;
3953 int op;
3954 int nregs;
3955 int interleave;
3956 int spacing;
3957 int stride;
3958 int size;
3959 int reg;
3960 int pass;
3961 int load;
3962 int shift;
3963 int n;
3964 TCGv addr;
3965 TCGv tmp;
3966 TCGv tmp2;
3967 TCGv_i64 tmp64;
3968
3969 if (!s->vfp_enabled)
3970 return 1;
3971 VFP_DREG_D(rd, insn);
3972 rn = (insn >> 16) & 0xf;
3973 rm = insn & 0xf;
3974 load = (insn & (1 << 21)) != 0;
3975 if ((insn & (1 << 23)) == 0) {
3976 /* Load store all elements. */
3977 op = (insn >> 8) & 0xf;
3978 size = (insn >> 6) & 3;
3979 if (op > 10)
3980 return 1;
3981 /* Catch UNDEF cases for bad values of align field */
3982 switch (op & 0xc) {
3983 case 4:
3984 if (((insn >> 5) & 1) == 1) {
3985 return 1;
3986 }
3987 break;
3988 case 8:
3989 if (((insn >> 4) & 3) == 3) {
3990 return 1;
3991 }
3992 break;
3993 default:
3994 break;
3995 }
3996 nregs = neon_ls_element_type[op].nregs;
3997 interleave = neon_ls_element_type[op].interleave;
3998 spacing = neon_ls_element_type[op].spacing;
3999 if (size == 3 && (interleave | spacing) != 1)
4000 return 1;
4001 addr = tcg_temp_new_i32();
4002 load_reg_var(s, addr, rn);
4003 stride = (1 << size) * interleave;
4004 for (reg = 0; reg < nregs; reg++) {
4005 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4006 load_reg_var(s, addr, rn);
4007 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4008 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4009 load_reg_var(s, addr, rn);
4010 tcg_gen_addi_i32(addr, addr, 1 << size);
4011 }
4012 if (size == 3) {
4013 if (load) {
4014 tmp64 = gen_ld64(addr, IS_USER(s));
4015 neon_store_reg64(tmp64, rd);
4016 tcg_temp_free_i64(tmp64);
4017 } else {
4018 tmp64 = tcg_temp_new_i64();
4019 neon_load_reg64(tmp64, rd);
4020 gen_st64(tmp64, addr, IS_USER(s));
4021 }
4022 tcg_gen_addi_i32(addr, addr, stride);
4023 } else {
4024 for (pass = 0; pass < 2; pass++) {
4025 if (size == 2) {
4026 if (load) {
4027 tmp = gen_ld32(addr, IS_USER(s));
4028 neon_store_reg(rd, pass, tmp);
4029 } else {
4030 tmp = neon_load_reg(rd, pass);
4031 gen_st32(tmp, addr, IS_USER(s));
4032 }
4033 tcg_gen_addi_i32(addr, addr, stride);
4034 } else if (size == 1) {
4035 if (load) {
4036 tmp = gen_ld16u(addr, IS_USER(s));
4037 tcg_gen_addi_i32(addr, addr, stride);
4038 tmp2 = gen_ld16u(addr, IS_USER(s));
4039 tcg_gen_addi_i32(addr, addr, stride);
4040 tcg_gen_shli_i32(tmp2, tmp2, 16);
4041 tcg_gen_or_i32(tmp, tmp, tmp2);
4042 tcg_temp_free_i32(tmp2);
4043 neon_store_reg(rd, pass, tmp);
4044 } else {
4045 tmp = neon_load_reg(rd, pass);
4046 tmp2 = tcg_temp_new_i32();
4047 tcg_gen_shri_i32(tmp2, tmp, 16);
4048 gen_st16(tmp, addr, IS_USER(s));
4049 tcg_gen_addi_i32(addr, addr, stride);
4050 gen_st16(tmp2, addr, IS_USER(s));
4051 tcg_gen_addi_i32(addr, addr, stride);
4052 }
4053 } else /* size == 0 */ {
4054 if (load) {
4055 TCGV_UNUSED(tmp2);
4056 for (n = 0; n < 4; n++) {
4057 tmp = gen_ld8u(addr, IS_USER(s));
4058 tcg_gen_addi_i32(addr, addr, stride);
4059 if (n == 0) {
4060 tmp2 = tmp;
4061 } else {
4062 tcg_gen_shli_i32(tmp, tmp, n * 8);
4063 tcg_gen_or_i32(tmp2, tmp2, tmp);
4064 tcg_temp_free_i32(tmp);
4065 }
4066 }
4067 neon_store_reg(rd, pass, tmp2);
4068 } else {
4069 tmp2 = neon_load_reg(rd, pass);
4070 for (n = 0; n < 4; n++) {
4071 tmp = tcg_temp_new_i32();
4072 if (n == 0) {
4073 tcg_gen_mov_i32(tmp, tmp2);
4074 } else {
4075 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4076 }
4077 gen_st8(tmp, addr, IS_USER(s));
4078 tcg_gen_addi_i32(addr, addr, stride);
4079 }
4080 tcg_temp_free_i32(tmp2);
4081 }
4082 }
4083 }
4084 }
4085 rd += spacing;
4086 }
4087 tcg_temp_free_i32(addr);
4088 stride = nregs * 8;
4089 } else {
4090 size = (insn >> 10) & 3;
4091 if (size == 3) {
4092 /* Load single element to all lanes. */
4093 int a = (insn >> 4) & 1;
4094 if (!load) {
4095 return 1;
4096 }
4097 size = (insn >> 6) & 3;
4098 nregs = ((insn >> 8) & 3) + 1;
4099
4100 if (size == 3) {
4101 if (nregs != 4 || a == 0) {
4102 return 1;
4103 }
4104 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4105 size = 2;
4106 }
4107 if (nregs == 1 && a == 1 && size == 0) {
4108 return 1;
4109 }
4110 if (nregs == 3 && a == 1) {
4111 return 1;
4112 }
4113 addr = tcg_temp_new_i32();
4114 load_reg_var(s, addr, rn);
4115 if (nregs == 1) {
4116 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4117 tmp = gen_load_and_replicate(s, addr, size);
4118 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4119 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4120 if (insn & (1 << 5)) {
4121 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4122 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4123 }
4124 tcg_temp_free_i32(tmp);
4125 } else {
4126 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4127 stride = (insn & (1 << 5)) ? 2 : 1;
4128 for (reg = 0; reg < nregs; reg++) {
4129 tmp = gen_load_and_replicate(s, addr, size);
4130 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4131 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4132 tcg_temp_free_i32(tmp);
4133 tcg_gen_addi_i32(addr, addr, 1 << size);
4134 rd += stride;
4135 }
4136 }
4137 tcg_temp_free_i32(addr);
4138 stride = (1 << size) * nregs;
4139 } else {
4140 /* Single element. */
4141 int idx = (insn >> 4) & 0xf;
4142 pass = (insn >> 7) & 1;
4143 switch (size) {
4144 case 0:
4145 shift = ((insn >> 5) & 3) * 8;
4146 stride = 1;
4147 break;
4148 case 1:
4149 shift = ((insn >> 6) & 1) * 16;
4150 stride = (insn & (1 << 5)) ? 2 : 1;
4151 break;
4152 case 2:
4153 shift = 0;
4154 stride = (insn & (1 << 6)) ? 2 : 1;
4155 break;
4156 default:
4157 abort();
4158 }
4159 nregs = ((insn >> 8) & 3) + 1;
4160 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4161 switch (nregs) {
4162 case 1:
4163 if (((idx & (1 << size)) != 0) ||
4164 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4165 return 1;
4166 }
4167 break;
4168 case 3:
4169 if ((idx & 1) != 0) {
4170 return 1;
4171 }
4172 /* fall through */
4173 case 2:
4174 if (size == 2 && (idx & 2) != 0) {
4175 return 1;
4176 }
4177 break;
4178 case 4:
4179 if ((size == 2) && ((idx & 3) == 3)) {
4180 return 1;
4181 }
4182 break;
4183 default:
4184 abort();
4185 }
4186 if ((rd + stride * (nregs - 1)) > 31) {
4187 /* Attempts to write off the end of the register file
4188 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4189 * the neon_load_reg() would write off the end of the array.
4190 */
4191 return 1;
4192 }
4193 addr = tcg_temp_new_i32();
4194 load_reg_var(s, addr, rn);
4195 for (reg = 0; reg < nregs; reg++) {
4196 if (load) {
4197 switch (size) {
4198 case 0:
4199 tmp = gen_ld8u(addr, IS_USER(s));
4200 break;
4201 case 1:
4202 tmp = gen_ld16u(addr, IS_USER(s));
4203 break;
4204 case 2:
4205 tmp = gen_ld32(addr, IS_USER(s));
4206 break;
4207 default: /* Avoid compiler warnings. */
4208 abort();
4209 }
4210 if (size != 2) {
4211 tmp2 = neon_load_reg(rd, pass);
4212 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4213 tcg_temp_free_i32(tmp2);
4214 }
4215 neon_store_reg(rd, pass, tmp);
4216 } else { /* Store */
4217 tmp = neon_load_reg(rd, pass);
4218 if (shift)
4219 tcg_gen_shri_i32(tmp, tmp, shift);
4220 switch (size) {
4221 case 0:
4222 gen_st8(tmp, addr, IS_USER(s));
4223 break;
4224 case 1:
4225 gen_st16(tmp, addr, IS_USER(s));
4226 break;
4227 case 2:
4228 gen_st32(tmp, addr, IS_USER(s));
4229 break;
4230 }
4231 }
4232 rd += stride;
4233 tcg_gen_addi_i32(addr, addr, 1 << size);
4234 }
4235 tcg_temp_free_i32(addr);
4236 stride = nregs * (1 << size);
4237 }
4238 }
4239 if (rm != 15) {
4240 TCGv base;
4241
4242 base = load_reg(s, rn);
4243 if (rm == 13) {
4244 tcg_gen_addi_i32(base, base, stride);
4245 } else {
4246 TCGv index;
4247 index = load_reg(s, rm);
4248 tcg_gen_add_i32(base, base, index);
4249 tcg_temp_free_i32(index);
4250 }
4251 store_reg(s, rn, base);
4252 }
4253 return 0;
4254 }
4255
4256 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4257 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4258 {
4259 tcg_gen_and_i32(t, t, c);
4260 tcg_gen_andc_i32(f, f, c);
4261 tcg_gen_or_i32(dest, t, f);
4262 }
4263
4264 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4265 {
4266 switch (size) {
4267 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4268 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4269 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4270 default: abort();
4271 }
4272 }
4273
4274 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4275 {
4276 switch (size) {
4277 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4278 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4279 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4280 default: abort();
4281 }
4282 }
4283
4284 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4285 {
4286 switch (size) {
4287 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4288 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4289 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4290 default: abort();
4291 }
4292 }
4293
4294 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4295 {
4296 switch (size) {
4297 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4298 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4299 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4300 default: abort();
4301 }
4302 }
4303
4304 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4305 int q, int u)
4306 {
4307 if (q) {
4308 if (u) {
4309 switch (size) {
4310 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4311 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4312 default: abort();
4313 }
4314 } else {
4315 switch (size) {
4316 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4317 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4318 default: abort();
4319 }
4320 }
4321 } else {
4322 if (u) {
4323 switch (size) {
4324 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4325 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4326 default: abort();
4327 }
4328 } else {
4329 switch (size) {
4330 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4331 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4332 default: abort();
4333 }
4334 }
4335 }
4336 }
4337
4338 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4339 {
4340 if (u) {
4341 switch (size) {
4342 case 0: gen_helper_neon_widen_u8(dest, src); break;
4343 case 1: gen_helper_neon_widen_u16(dest, src); break;
4344 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4345 default: abort();
4346 }
4347 } else {
4348 switch (size) {
4349 case 0: gen_helper_neon_widen_s8(dest, src); break;
4350 case 1: gen_helper_neon_widen_s16(dest, src); break;
4351 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4352 default: abort();
4353 }
4354 }
4355 tcg_temp_free_i32(src);
4356 }
4357
4358 static inline void gen_neon_addl(int size)
4359 {
4360 switch (size) {
4361 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4362 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4363 case 2: tcg_gen_add_i64(CPU_V001); break;
4364 default: abort();
4365 }
4366 }
4367
4368 static inline void gen_neon_subl(int size)
4369 {
4370 switch (size) {
4371 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4372 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4373 case 2: tcg_gen_sub_i64(CPU_V001); break;
4374 default: abort();
4375 }
4376 }
4377
4378 static inline void gen_neon_negl(TCGv_i64 var, int size)
4379 {
4380 switch (size) {
4381 case 0: gen_helper_neon_negl_u16(var, var); break;
4382 case 1: gen_helper_neon_negl_u32(var, var); break;
4383 case 2: gen_helper_neon_negl_u64(var, var); break;
4384 default: abort();
4385 }
4386 }
4387
4388 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4389 {
4390 switch (size) {
4391 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4392 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4393 default: abort();
4394 }
4395 }
4396
4397 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4398 {
4399 TCGv_i64 tmp;
4400
4401 switch ((size << 1) | u) {
4402 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4403 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4404 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4405 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4406 case 4:
4407 tmp = gen_muls_i64_i32(a, b);
4408 tcg_gen_mov_i64(dest, tmp);
4409 tcg_temp_free_i64(tmp);
4410 break;
4411 case 5:
4412 tmp = gen_mulu_i64_i32(a, b);
4413 tcg_gen_mov_i64(dest, tmp);
4414 tcg_temp_free_i64(tmp);
4415 break;
4416 default: abort();
4417 }
4418
4419 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4420 Don't forget to clean them now. */
4421 if (size < 2) {
4422 tcg_temp_free_i32(a);
4423 tcg_temp_free_i32(b);
4424 }
4425 }
4426
4427 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4428 {
4429 if (op) {
4430 if (u) {
4431 gen_neon_unarrow_sats(size, dest, src);
4432 } else {
4433 gen_neon_narrow(size, dest, src);
4434 }
4435 } else {
4436 if (u) {
4437 gen_neon_narrow_satu(size, dest, src);
4438 } else {
4439 gen_neon_narrow_sats(size, dest, src);
4440 }
4441 }
4442 }
4443
4444 /* Symbolic constants for op fields for Neon 3-register same-length.
4445 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4446 * table A7-9.
4447 */
4448 #define NEON_3R_VHADD 0
4449 #define NEON_3R_VQADD 1
4450 #define NEON_3R_VRHADD 2
4451 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4452 #define NEON_3R_VHSUB 4
4453 #define NEON_3R_VQSUB 5
4454 #define NEON_3R_VCGT 6
4455 #define NEON_3R_VCGE 7
4456 #define NEON_3R_VSHL 8
4457 #define NEON_3R_VQSHL 9
4458 #define NEON_3R_VRSHL 10
4459 #define NEON_3R_VQRSHL 11
4460 #define NEON_3R_VMAX 12
4461 #define NEON_3R_VMIN 13
4462 #define NEON_3R_VABD 14
4463 #define NEON_3R_VABA 15
4464 #define NEON_3R_VADD_VSUB 16
4465 #define NEON_3R_VTST_VCEQ 17
4466 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4467 #define NEON_3R_VMUL 19
4468 #define NEON_3R_VPMAX 20
4469 #define NEON_3R_VPMIN 21
4470 #define NEON_3R_VQDMULH_VQRDMULH 22
4471 #define NEON_3R_VPADD 23
4472 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4473 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4474 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4475 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4476 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4477 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4478 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4479
4480 static const uint8_t neon_3r_sizes[] = {
4481 [NEON_3R_VHADD] = 0x7,
4482 [NEON_3R_VQADD] = 0xf,
4483 [NEON_3R_VRHADD] = 0x7,
4484 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4485 [NEON_3R_VHSUB] = 0x7,
4486 [NEON_3R_VQSUB] = 0xf,
4487 [NEON_3R_VCGT] = 0x7,
4488 [NEON_3R_VCGE] = 0x7,
4489 [NEON_3R_VSHL] = 0xf,
4490 [NEON_3R_VQSHL] = 0xf,
4491 [NEON_3R_VRSHL] = 0xf,
4492 [NEON_3R_VQRSHL] = 0xf,
4493 [NEON_3R_VMAX] = 0x7,
4494 [NEON_3R_VMIN] = 0x7,
4495 [NEON_3R_VABD] = 0x7,
4496 [NEON_3R_VABA] = 0x7,
4497 [NEON_3R_VADD_VSUB] = 0xf,
4498 [NEON_3R_VTST_VCEQ] = 0x7,
4499 [NEON_3R_VML] = 0x7,
4500 [NEON_3R_VMUL] = 0x7,
4501 [NEON_3R_VPMAX] = 0x7,
4502 [NEON_3R_VPMIN] = 0x7,
4503 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4504 [NEON_3R_VPADD] = 0x7,
4505 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4506 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4507 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4508 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4509 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4510 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4511 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4512 };
4513
4514 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4515 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4516 * table A7-13.
4517 */
4518 #define NEON_2RM_VREV64 0
4519 #define NEON_2RM_VREV32 1
4520 #define NEON_2RM_VREV16 2
4521 #define NEON_2RM_VPADDL 4
4522 #define NEON_2RM_VPADDL_U 5
4523 #define NEON_2RM_VCLS 8
4524 #define NEON_2RM_VCLZ 9
4525 #define NEON_2RM_VCNT 10
4526 #define NEON_2RM_VMVN 11
4527 #define NEON_2RM_VPADAL 12
4528 #define NEON_2RM_VPADAL_U 13
4529 #define NEON_2RM_VQABS 14
4530 #define NEON_2RM_VQNEG 15
4531 #define NEON_2RM_VCGT0 16
4532 #define NEON_2RM_VCGE0 17
4533 #define NEON_2RM_VCEQ0 18
4534 #define NEON_2RM_VCLE0 19
4535 #define NEON_2RM_VCLT0 20
4536 #define NEON_2RM_VABS 22
4537 #define NEON_2RM_VNEG 23
4538 #define NEON_2RM_VCGT0_F 24
4539 #define NEON_2RM_VCGE0_F 25
4540 #define NEON_2RM_VCEQ0_F 26
4541 #define NEON_2RM_VCLE0_F 27
4542 #define NEON_2RM_VCLT0_F 28
4543 #define NEON_2RM_VABS_F 30
4544 #define NEON_2RM_VNEG_F 31
4545 #define NEON_2RM_VSWP 32
4546 #define NEON_2RM_VTRN 33
4547 #define NEON_2RM_VUZP 34
4548 #define NEON_2RM_VZIP 35
4549 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4550 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4551 #define NEON_2RM_VSHLL 38
4552 #define NEON_2RM_VCVT_F16_F32 44
4553 #define NEON_2RM_VCVT_F32_F16 46
4554 #define NEON_2RM_VRECPE 56
4555 #define NEON_2RM_VRSQRTE 57
4556 #define NEON_2RM_VRECPE_F 58
4557 #define NEON_2RM_VRSQRTE_F 59
4558 #define NEON_2RM_VCVT_FS 60
4559 #define NEON_2RM_VCVT_FU 61
4560 #define NEON_2RM_VCVT_SF 62
4561 #define NEON_2RM_VCVT_UF 63
4562
4563 static int neon_2rm_is_float_op(int op)
4564 {
4565 /* Return true if this neon 2reg-misc op is float-to-float */
4566 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4567 op >= NEON_2RM_VRECPE_F);
4568 }
4569
4570 /* Each entry in this array has bit n set if the insn allows
4571 * size value n (otherwise it will UNDEF). Since unallocated
4572 * op values will have no bits set they always UNDEF.
4573 */
4574 static const uint8_t neon_2rm_sizes[] = {
4575 [NEON_2RM_VREV64] = 0x7,
4576 [NEON_2RM_VREV32] = 0x3,
4577 [NEON_2RM_VREV16] = 0x1,
4578 [NEON_2RM_VPADDL] = 0x7,
4579 [NEON_2RM_VPADDL_U] = 0x7,
4580 [NEON_2RM_VCLS] = 0x7,
4581 [NEON_2RM_VCLZ] = 0x7,
4582 [NEON_2RM_VCNT] = 0x1,
4583 [NEON_2RM_VMVN] = 0x1,
4584 [NEON_2RM_VPADAL] = 0x7,
4585 [NEON_2RM_VPADAL_U] = 0x7,
4586 [NEON_2RM_VQABS] = 0x7,
4587 [NEON_2RM_VQNEG] = 0x7,
4588 [NEON_2RM_VCGT0] = 0x7,
4589 [NEON_2RM_VCGE0] = 0x7,
4590 [NEON_2RM_VCEQ0] = 0x7,
4591 [NEON_2RM_VCLE0] = 0x7,
4592 [NEON_2RM_VCLT0] = 0x7,
4593 [NEON_2RM_VABS] = 0x7,
4594 [NEON_2RM_VNEG] = 0x7,
4595 [NEON_2RM_VCGT0_F] = 0x4,
4596 [NEON_2RM_VCGE0_F] = 0x4,
4597 [NEON_2RM_VCEQ0_F] = 0x4,
4598 [NEON_2RM_VCLE0_F] = 0x4,
4599 [NEON_2RM_VCLT0_F] = 0x4,
4600 [NEON_2RM_VABS_F] = 0x4,
4601 [NEON_2RM_VNEG_F] = 0x4,
4602 [NEON_2RM_VSWP] = 0x1,
4603 [NEON_2RM_VTRN] = 0x7,
4604 [NEON_2RM_VUZP] = 0x7,
4605 [NEON_2RM_VZIP] = 0x7,
4606 [NEON_2RM_VMOVN] = 0x7,
4607 [NEON_2RM_VQMOVN] = 0x7,
4608 [NEON_2RM_VSHLL] = 0x7,
4609 [NEON_2RM_VCVT_F16_F32] = 0x2,
4610 [NEON_2RM_VCVT_F32_F16] = 0x2,
4611 [NEON_2RM_VRECPE] = 0x4,
4612 [NEON_2RM_VRSQRTE] = 0x4,
4613 [NEON_2RM_VRECPE_F] = 0x4,
4614 [NEON_2RM_VRSQRTE_F] = 0x4,
4615 [NEON_2RM_VCVT_FS] = 0x4,
4616 [NEON_2RM_VCVT_FU] = 0x4,
4617 [NEON_2RM_VCVT_SF] = 0x4,
4618 [NEON_2RM_VCVT_UF] = 0x4,
4619 };
4620
4621 /* Translate a NEON data processing instruction. Return nonzero if the
4622 instruction is invalid.
4623 We process data in a mixture of 32-bit and 64-bit chunks.
4624 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4625
4626 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4627 {
4628 int op;
4629 int q;
4630 int rd, rn, rm;
4631 int size;
4632 int shift;
4633 int pass;
4634 int count;
4635 int pairwise;
4636 int u;
4637 uint32_t imm, mask;
4638 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4639 TCGv_i64 tmp64;
4640
4641 if (!s->vfp_enabled)
4642 return 1;
4643 q = (insn & (1 << 6)) != 0;
4644 u = (insn >> 24) & 1;
4645 VFP_DREG_D(rd, insn);
4646 VFP_DREG_N(rn, insn);
4647 VFP_DREG_M(rm, insn);
4648 size = (insn >> 20) & 3;
4649 if ((insn & (1 << 23)) == 0) {
4650 /* Three register same length. */
4651 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4652 /* Catch invalid op and bad size combinations: UNDEF */
4653 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4654 return 1;
4655 }
4656 /* All insns of this form UNDEF for either this condition or the
4657 * superset of cases "Q==1"; we catch the latter later.
4658 */
4659 if (q && ((rd | rn | rm) & 1)) {
4660 return 1;
4661 }
4662 if (size == 3 && op != NEON_3R_LOGIC) {
4663 /* 64-bit element instructions. */
4664 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4665 neon_load_reg64(cpu_V0, rn + pass);
4666 neon_load_reg64(cpu_V1, rm + pass);
4667 switch (op) {
4668 case NEON_3R_VQADD:
4669 if (u) {
4670 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4671 cpu_V0, cpu_V1);
4672 } else {
4673 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4674 cpu_V0, cpu_V1);
4675 }
4676 break;
4677 case NEON_3R_VQSUB:
4678 if (u) {
4679 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4680 cpu_V0, cpu_V1);
4681 } else {
4682 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4683 cpu_V0, cpu_V1);
4684 }
4685 break;
4686 case NEON_3R_VSHL:
4687 if (u) {
4688 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4689 } else {
4690 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4691 }
4692 break;
4693 case NEON_3R_VQSHL:
4694 if (u) {
4695 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4696 cpu_V1, cpu_V0);
4697 } else {
4698 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4699 cpu_V1, cpu_V0);
4700 }
4701 break;
4702 case NEON_3R_VRSHL:
4703 if (u) {
4704 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4705 } else {
4706 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4707 }
4708 break;
4709 case NEON_3R_VQRSHL:
4710 if (u) {
4711 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4712 cpu_V1, cpu_V0);
4713 } else {
4714 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4715 cpu_V1, cpu_V0);
4716 }
4717 break;
4718 case NEON_3R_VADD_VSUB:
4719 if (u) {
4720 tcg_gen_sub_i64(CPU_V001);
4721 } else {
4722 tcg_gen_add_i64(CPU_V001);
4723 }
4724 break;
4725 default:
4726 abort();
4727 }
4728 neon_store_reg64(cpu_V0, rd + pass);
4729 }
4730 return 0;
4731 }
4732 pairwise = 0;
4733 switch (op) {
4734 case NEON_3R_VSHL:
4735 case NEON_3R_VQSHL:
4736 case NEON_3R_VRSHL:
4737 case NEON_3R_VQRSHL:
4738 {
4739 int rtmp;
4740 /* Shift instruction operands are reversed. */
4741 rtmp = rn;
4742 rn = rm;
4743 rm = rtmp;
4744 }
4745 break;
4746 case NEON_3R_VPADD:
4747 if (u) {
4748 return 1;
4749 }
4750 /* Fall through */
4751 case NEON_3R_VPMAX:
4752 case NEON_3R_VPMIN:
4753 pairwise = 1;
4754 break;
4755 case NEON_3R_FLOAT_ARITH:
4756 pairwise = (u && size < 2); /* if VPADD (float) */
4757 break;
4758 case NEON_3R_FLOAT_MINMAX:
4759 pairwise = u; /* if VPMIN/VPMAX (float) */
4760 break;
4761 case NEON_3R_FLOAT_CMP:
4762 if (!u && size) {
4763 /* no encoding for U=0 C=1x */
4764 return 1;
4765 }
4766 break;
4767 case NEON_3R_FLOAT_ACMP:
4768 if (!u) {
4769 return 1;
4770 }
4771 break;
4772 case NEON_3R_VRECPS_VRSQRTS:
4773 if (u) {
4774 return 1;
4775 }
4776 break;
4777 case NEON_3R_VMUL:
4778 if (u && (size != 0)) {
4779 /* UNDEF on invalid size for polynomial subcase */
4780 return 1;
4781 }
4782 break;
4783 case NEON_3R_VFM:
4784 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4785 return 1;
4786 }
4787 break;
4788 default:
4789 break;
4790 }
4791
4792 if (pairwise && q) {
4793 /* All the pairwise insns UNDEF if Q is set */
4794 return 1;
4795 }
4796
4797 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4798
4799 if (pairwise) {
4800 /* Pairwise. */
4801 if (pass < 1) {
4802 tmp = neon_load_reg(rn, 0);
4803 tmp2 = neon_load_reg(rn, 1);
4804 } else {
4805 tmp = neon_load_reg(rm, 0);
4806 tmp2 = neon_load_reg(rm, 1);
4807 }
4808 } else {
4809 /* Elementwise. */
4810 tmp = neon_load_reg(rn, pass);
4811 tmp2 = neon_load_reg(rm, pass);
4812 }
4813 switch (op) {
4814 case NEON_3R_VHADD:
4815 GEN_NEON_INTEGER_OP(hadd);
4816 break;
4817 case NEON_3R_VQADD:
4818 GEN_NEON_INTEGER_OP_ENV(qadd);
4819 break;
4820 case NEON_3R_VRHADD:
4821 GEN_NEON_INTEGER_OP(rhadd);
4822 break;
4823 case NEON_3R_LOGIC: /* Logic ops. */
4824 switch ((u << 2) | size) {
4825 case 0: /* VAND */
4826 tcg_gen_and_i32(tmp, tmp, tmp2);
4827 break;
4828 case 1: /* BIC */
4829 tcg_gen_andc_i32(tmp, tmp, tmp2);
4830 break;
4831 case 2: /* VORR */
4832 tcg_gen_or_i32(tmp, tmp, tmp2);
4833 break;
4834 case 3: /* VORN */
4835 tcg_gen_orc_i32(tmp, tmp, tmp2);
4836 break;
4837 case 4: /* VEOR */
4838 tcg_gen_xor_i32(tmp, tmp, tmp2);
4839 break;
4840 case 5: /* VBSL */
4841 tmp3 = neon_load_reg(rd, pass);
4842 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4843 tcg_temp_free_i32(tmp3);
4844 break;
4845 case 6: /* VBIT */
4846 tmp3 = neon_load_reg(rd, pass);
4847 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4848 tcg_temp_free_i32(tmp3);
4849 break;
4850 case 7: /* VBIF */
4851 tmp3 = neon_load_reg(rd, pass);
4852 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4853 tcg_temp_free_i32(tmp3);
4854 break;
4855 }
4856 break;
4857 case NEON_3R_VHSUB:
4858 GEN_NEON_INTEGER_OP(hsub);
4859 break;
4860 case NEON_3R_VQSUB:
4861 GEN_NEON_INTEGER_OP_ENV(qsub);
4862 break;
4863 case NEON_3R_VCGT:
4864 GEN_NEON_INTEGER_OP(cgt);
4865 break;
4866 case NEON_3R_VCGE:
4867 GEN_NEON_INTEGER_OP(cge);
4868 break;
4869 case NEON_3R_VSHL:
4870 GEN_NEON_INTEGER_OP(shl);
4871 break;
4872 case NEON_3R_VQSHL:
4873 GEN_NEON_INTEGER_OP_ENV(qshl);
4874 break;
4875 case NEON_3R_VRSHL:
4876 GEN_NEON_INTEGER_OP(rshl);
4877 break;
4878 case NEON_3R_VQRSHL:
4879 GEN_NEON_INTEGER_OP_ENV(qrshl);
4880 break;
4881 case NEON_3R_VMAX:
4882 GEN_NEON_INTEGER_OP(max);
4883 break;
4884 case NEON_3R_VMIN:
4885 GEN_NEON_INTEGER_OP(min);
4886 break;
4887 case NEON_3R_VABD:
4888 GEN_NEON_INTEGER_OP(abd);
4889 break;
4890 case NEON_3R_VABA:
4891 GEN_NEON_INTEGER_OP(abd);
4892 tcg_temp_free_i32(tmp2);
4893 tmp2 = neon_load_reg(rd, pass);
4894 gen_neon_add(size, tmp, tmp2);
4895 break;
4896 case NEON_3R_VADD_VSUB:
4897 if (!u) { /* VADD */
4898 gen_neon_add(size, tmp, tmp2);
4899 } else { /* VSUB */
4900 switch (size) {
4901 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4902 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4903 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4904 default: abort();
4905 }
4906 }
4907 break;
4908 case NEON_3R_VTST_VCEQ:
4909 if (!u) { /* VTST */
4910 switch (size) {
4911 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4912 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4913 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4914 default: abort();
4915 }
4916 } else { /* VCEQ */
4917 switch (size) {
4918 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4919 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4920 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4921 default: abort();
4922 }
4923 }
4924 break;
4925 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4926 switch (size) {
4927 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4928 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4929 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4930 default: abort();
4931 }
4932 tcg_temp_free_i32(tmp2);
4933 tmp2 = neon_load_reg(rd, pass);
4934 if (u) { /* VMLS */
4935 gen_neon_rsb(size, tmp, tmp2);
4936 } else { /* VMLA */
4937 gen_neon_add(size, tmp, tmp2);
4938 }
4939 break;
4940 case NEON_3R_VMUL:
4941 if (u) { /* polynomial */
4942 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4943 } else { /* Integer */
4944 switch (size) {
4945 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4946 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4947 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4948 default: abort();
4949 }
4950 }
4951 break;
4952 case NEON_3R_VPMAX:
4953 GEN_NEON_INTEGER_OP(pmax);
4954 break;
4955 case NEON_3R_VPMIN:
4956 GEN_NEON_INTEGER_OP(pmin);
4957 break;
4958 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4959 if (!u) { /* VQDMULH */
4960 switch (size) {
4961 case 1:
4962 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4963 break;
4964 case 2:
4965 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4966 break;
4967 default: abort();
4968 }
4969 } else { /* VQRDMULH */
4970 switch (size) {
4971 case 1:
4972 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4973 break;
4974 case 2:
4975 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4976 break;
4977 default: abort();
4978 }
4979 }
4980 break;
4981 case NEON_3R_VPADD:
4982 switch (size) {
4983 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4984 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4985 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4986 default: abort();
4987 }
4988 break;
4989 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4990 {
4991 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4992 switch ((u << 2) | size) {
4993 case 0: /* VADD */
4994 case 4: /* VPADD */
4995 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4996 break;
4997 case 2: /* VSUB */
4998 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4999 break;
5000 case 6: /* VABD */
5001 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5002 break;
5003 default:
5004 abort();
5005 }
5006 tcg_temp_free_ptr(fpstatus);
5007 break;
5008 }
5009 case NEON_3R_FLOAT_MULTIPLY:
5010 {
5011 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5012 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5013 if (!u) {
5014 tcg_temp_free_i32(tmp2);
5015 tmp2 = neon_load_reg(rd, pass);
5016 if (size == 0) {
5017 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5018 } else {
5019 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5020 }
5021 }
5022 tcg_temp_free_ptr(fpstatus);
5023 break;
5024 }
5025 case NEON_3R_FLOAT_CMP:
5026 {
5027 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5028 if (!u) {
5029 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5030 } else {
5031 if (size == 0) {
5032 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5033 } else {
5034 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5035 }
5036 }
5037 tcg_temp_free_ptr(fpstatus);
5038 break;
5039 }
5040 case NEON_3R_FLOAT_ACMP:
5041 {
5042 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5043 if (size == 0) {
5044 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5045 } else {
5046 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5047 }
5048 tcg_temp_free_ptr(fpstatus);
5049 break;
5050 }
5051 case NEON_3R_FLOAT_MINMAX:
5052 {
5053 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5054 if (size == 0) {
5055 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
5056 } else {
5057 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5058 }
5059 tcg_temp_free_ptr(fpstatus);
5060 break;
5061 }
5062 case NEON_3R_VRECPS_VRSQRTS:
5063 if (size == 0)
5064 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5065 else
5066 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5067 break;
5068 case NEON_3R_VFM:
5069 {
5070 /* VFMA, VFMS: fused multiply-add */
5071 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5072 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5073 if (size) {
5074 /* VFMS */
5075 gen_helper_vfp_negs(tmp, tmp);
5076 }
5077 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5078 tcg_temp_free_i32(tmp3);
5079 tcg_temp_free_ptr(fpstatus);
5080 break;
5081 }
5082 default:
5083 abort();
5084 }
5085 tcg_temp_free_i32(tmp2);
5086
5087 /* Save the result. For elementwise operations we can put it
5088 straight into the destination register. For pairwise operations
5089 we have to be careful to avoid clobbering the source operands. */
5090 if (pairwise && rd == rm) {
5091 neon_store_scratch(pass, tmp);
5092 } else {
5093 neon_store_reg(rd, pass, tmp);
5094 }
5095
5096 } /* for pass */
5097 if (pairwise && rd == rm) {
5098 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5099 tmp = neon_load_scratch(pass);
5100 neon_store_reg(rd, pass, tmp);
5101 }
5102 }
5103 /* End of 3 register same size operations. */
5104 } else if (insn & (1 << 4)) {
5105 if ((insn & 0x00380080) != 0) {
5106 /* Two registers and shift. */
5107 op = (insn >> 8) & 0xf;
5108 if (insn & (1 << 7)) {
5109 /* 64-bit shift. */
5110 if (op > 7) {
5111 return 1;
5112 }
5113 size = 3;
5114 } else {
5115 size = 2;
5116 while ((insn & (1 << (size + 19))) == 0)
5117 size--;
5118 }
5119 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5120 /* To avoid excessive dumplication of ops we implement shift
5121 by immediate using the variable shift operations. */
5122 if (op < 8) {
5123 /* Shift by immediate:
5124 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5125 if (q && ((rd | rm) & 1)) {
5126 return 1;
5127 }
5128 if (!u && (op == 4 || op == 6)) {
5129 return 1;
5130 }
5131 /* Right shifts are encoded as N - shift, where N is the
5132 element size in bits. */
5133 if (op <= 4)
5134 shift = shift - (1 << (size + 3));
5135 if (size == 3) {
5136 count = q + 1;
5137 } else {
5138 count = q ? 4: 2;
5139 }
5140 switch (size) {
5141 case 0:
5142 imm = (uint8_t) shift;
5143 imm |= imm << 8;
5144 imm |= imm << 16;
5145 break;
5146 case 1:
5147 imm = (uint16_t) shift;
5148 imm |= imm << 16;
5149 break;
5150 case 2:
5151 case 3:
5152 imm = shift;
5153 break;
5154 default:
5155 abort();
5156 }
5157
5158 for (pass = 0; pass < count; pass++) {
5159 if (size == 3) {
5160 neon_load_reg64(cpu_V0, rm + pass);
5161 tcg_gen_movi_i64(cpu_V1, imm);
5162 switch (op) {
5163 case 0: /* VSHR */
5164 case 1: /* VSRA */
5165 if (u)
5166 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5167 else
5168 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5169 break;
5170 case 2: /* VRSHR */
5171 case 3: /* VRSRA */
5172 if (u)
5173 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5174 else
5175 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5176 break;
5177 case 4: /* VSRI */
5178 case 5: /* VSHL, VSLI */
5179 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5180 break;
5181 case 6: /* VQSHLU */
5182 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5183 cpu_V0, cpu_V1);
5184 break;
5185 case 7: /* VQSHL */
5186 if (u) {
5187 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5188 cpu_V0, cpu_V1);
5189 } else {
5190 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5191 cpu_V0, cpu_V1);
5192 }
5193 break;
5194 }
5195 if (op == 1 || op == 3) {
5196 /* Accumulate. */
5197 neon_load_reg64(cpu_V1, rd + pass);
5198 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5199 } else if (op == 4 || (op == 5 && u)) {
5200 /* Insert */
5201 neon_load_reg64(cpu_V1, rd + pass);
5202 uint64_t mask;
5203 if (shift < -63 || shift > 63) {
5204 mask = 0;
5205 } else {
5206 if (op == 4) {
5207 mask = 0xffffffffffffffffull >> -shift;
5208 } else {
5209 mask = 0xffffffffffffffffull << shift;
5210 }
5211 }
5212 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5213 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5214 }
5215 neon_store_reg64(cpu_V0, rd + pass);
5216 } else { /* size < 3 */
5217 /* Operands in T0 and T1. */
5218 tmp = neon_load_reg(rm, pass);
5219 tmp2 = tcg_temp_new_i32();
5220 tcg_gen_movi_i32(tmp2, imm);
5221 switch (op) {
5222 case 0: /* VSHR */
5223 case 1: /* VSRA */
5224 GEN_NEON_INTEGER_OP(shl);
5225 break;
5226 case 2: /* VRSHR */
5227 case 3: /* VRSRA */
5228 GEN_NEON_INTEGER_OP(rshl);
5229 break;
5230 case 4: /* VSRI */
5231 case 5: /* VSHL, VSLI */
5232 switch (size) {
5233 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5234 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5235 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5236 default: abort();
5237 }
5238 break;
5239 case 6: /* VQSHLU */
5240 switch (size) {
5241 case 0:
5242 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5243 tmp, tmp2);
5244 break;
5245 case 1:
5246 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5247 tmp, tmp2);
5248 break;
5249 case 2:
5250 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5251 tmp, tmp2);
5252 break;
5253 default:
5254 abort();
5255 }
5256 break;
5257 case 7: /* VQSHL */
5258 GEN_NEON_INTEGER_OP_ENV(qshl);
5259 break;
5260 }
5261 tcg_temp_free_i32(tmp2);
5262
5263 if (op == 1 || op == 3) {
5264 /* Accumulate. */
5265 tmp2 = neon_load_reg(rd, pass);
5266 gen_neon_add(size, tmp, tmp2);
5267 tcg_temp_free_i32(tmp2);
5268 } else if (op == 4 || (op == 5 && u)) {
5269 /* Insert */
5270 switch (size) {
5271 case 0:
5272 if (op == 4)
5273 mask = 0xff >> -shift;
5274 else
5275 mask = (uint8_t)(0xff << shift);
5276 mask |= mask << 8;
5277 mask |= mask << 16;
5278 break;
5279 case 1:
5280 if (op == 4)
5281 mask = 0xffff >> -shift;
5282 else
5283 mask = (uint16_t)(0xffff << shift);
5284 mask |= mask << 16;
5285 break;
5286 case 2:
5287 if (shift < -31 || shift > 31) {
5288 mask = 0;
5289 } else {
5290 if (op == 4)
5291 mask = 0xffffffffu >> -shift;
5292 else
5293 mask = 0xffffffffu << shift;
5294 }
5295 break;
5296 default:
5297 abort();
5298 }
5299 tmp2 = neon_load_reg(rd, pass);
5300 tcg_gen_andi_i32(tmp, tmp, mask);
5301 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5302 tcg_gen_or_i32(tmp, tmp, tmp2);
5303 tcg_temp_free_i32(tmp2);
5304 }
5305 neon_store_reg(rd, pass, tmp);
5306 }
5307 } /* for pass */
5308 } else if (op < 10) {
5309 /* Shift by immediate and narrow:
5310 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5311 int input_unsigned = (op == 8) ? !u : u;
5312 if (rm & 1) {
5313 return 1;
5314 }
5315 shift = shift - (1 << (size + 3));
5316 size++;
5317 if (size == 3) {
5318 tmp64 = tcg_const_i64(shift);
5319 neon_load_reg64(cpu_V0, rm);
5320 neon_load_reg64(cpu_V1, rm + 1);
5321 for (pass = 0; pass < 2; pass++) {
5322 TCGv_i64 in;
5323 if (pass == 0) {
5324 in = cpu_V0;
5325 } else {
5326 in = cpu_V1;
5327 }
5328 if (q) {
5329 if (input_unsigned) {
5330 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5331 } else {
5332 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5333 }
5334 } else {
5335 if (input_unsigned) {
5336 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5337 } else {
5338 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5339 }
5340 }
5341 tmp = tcg_temp_new_i32();
5342 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5343 neon_store_reg(rd, pass, tmp);
5344 } /* for pass */
5345 tcg_temp_free_i64(tmp64);
5346 } else {
5347 if (size == 1) {
5348 imm = (uint16_t)shift;
5349 imm |= imm << 16;
5350 } else {
5351 /* size == 2 */
5352 imm = (uint32_t)shift;
5353 }
5354 tmp2 = tcg_const_i32(imm);
5355 tmp4 = neon_load_reg(rm + 1, 0);
5356 tmp5 = neon_load_reg(rm + 1, 1);
5357 for (pass = 0; pass < 2; pass++) {
5358 if (pass == 0) {
5359 tmp = neon_load_reg(rm, 0);
5360 } else {
5361 tmp = tmp4;
5362 }
5363 gen_neon_shift_narrow(size, tmp, tmp2, q,
5364 input_unsigned);
5365 if (pass == 0) {
5366 tmp3 = neon_load_reg(rm, 1);
5367 } else {
5368 tmp3 = tmp5;
5369 }
5370 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5371 input_unsigned);
5372 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5373 tcg_temp_free_i32(tmp);
5374 tcg_temp_free_i32(tmp3);
5375 tmp = tcg_temp_new_i32();
5376 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5377 neon_store_reg(rd, pass, tmp);
5378 } /* for pass */
5379 tcg_temp_free_i32(tmp2);
5380 }
5381 } else if (op == 10) {
5382 /* VSHLL, VMOVL */
5383 if (q || (rd & 1)) {
5384 return 1;
5385 }
5386 tmp = neon_load_reg(rm, 0);
5387 tmp2 = neon_load_reg(rm, 1);
5388 for (pass = 0; pass < 2; pass++) {
5389 if (pass == 1)
5390 tmp = tmp2;
5391
5392 gen_neon_widen(cpu_V0, tmp, size, u);
5393
5394 if (shift != 0) {
5395 /* The shift is less than the width of the source
5396 type, so we can just shift the whole register. */
5397 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5398 /* Widen the result of shift: we need to clear
5399 * the potential overflow bits resulting from
5400 * left bits of the narrow input appearing as
5401 * right bits of left the neighbour narrow
5402 * input. */
5403 if (size < 2 || !u) {
5404 uint64_t imm64;
5405 if (size == 0) {
5406 imm = (0xffu >> (8 - shift));
5407 imm |= imm << 16;
5408 } else if (size == 1) {
5409 imm = 0xffff >> (16 - shift);
5410 } else {
5411 /* size == 2 */
5412 imm = 0xffffffff >> (32 - shift);
5413 }
5414 if (size < 2) {
5415 imm64 = imm | (((uint64_t)imm) << 32);
5416 } else {
5417 imm64 = imm;
5418 }
5419 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5420 }
5421 }
5422 neon_store_reg64(cpu_V0, rd + pass);
5423 }
5424 } else if (op >= 14) {
5425 /* VCVT fixed-point. */
5426 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5427 return 1;
5428 }
5429 /* We have already masked out the must-be-1 top bit of imm6,
5430 * hence this 32-shift where the ARM ARM has 64-imm6.
5431 */
5432 shift = 32 - shift;
5433 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5434 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5435 if (!(op & 1)) {
5436 if (u)
5437 gen_vfp_ulto(0, shift, 1);
5438 else
5439 gen_vfp_slto(0, shift, 1);
5440 } else {
5441 if (u)
5442 gen_vfp_toul(0, shift, 1);
5443 else
5444 gen_vfp_tosl(0, shift, 1);
5445 }
5446 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5447 }
5448 } else {
5449 return 1;
5450 }
5451 } else { /* (insn & 0x00380080) == 0 */
5452 int invert;
5453 if (q && (rd & 1)) {
5454 return 1;
5455 }
5456
5457 op = (insn >> 8) & 0xf;
5458 /* One register and immediate. */
5459 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5460 invert = (insn & (1 << 5)) != 0;
5461 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5462 * We choose to not special-case this and will behave as if a
5463 * valid constant encoding of 0 had been given.
5464 */
5465 switch (op) {
5466 case 0: case 1:
5467 /* no-op */
5468 break;
5469 case 2: case 3:
5470 imm <<= 8;
5471 break;
5472 case 4: case 5:
5473 imm <<= 16;
5474 break;
5475 case 6: case 7:
5476 imm <<= 24;
5477 break;
5478 case 8: case 9:
5479 imm |= imm << 16;
5480 break;
5481 case 10: case 11:
5482 imm = (imm << 8) | (imm << 24);
5483 break;
5484 case 12:
5485 imm = (imm << 8) | 0xff;
5486 break;
5487 case 13:
5488 imm = (imm << 16) | 0xffff;
5489 break;
5490 case 14:
5491 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5492 if (invert)
5493 imm = ~imm;
5494 break;
5495 case 15:
5496 if (invert) {
5497 return 1;
5498 }
5499 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5500 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5501 break;
5502 }
5503 if (invert)
5504 imm = ~imm;
5505
5506 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5507 if (op & 1 && op < 12) {
5508 tmp = neon_load_reg(rd, pass);
5509 if (invert) {
5510 /* The immediate value has already been inverted, so
5511 BIC becomes AND. */
5512 tcg_gen_andi_i32(tmp, tmp, imm);
5513 } else {
5514 tcg_gen_ori_i32(tmp, tmp, imm);
5515 }
5516 } else {
5517 /* VMOV, VMVN. */
5518 tmp = tcg_temp_new_i32();
5519 if (op == 14 && invert) {
5520 int n;
5521 uint32_t val;
5522 val = 0;
5523 for (n = 0; n < 4; n++) {
5524 if (imm & (1 << (n + (pass & 1) * 4)))
5525 val |= 0xff << (n * 8);
5526 }
5527 tcg_gen_movi_i32(tmp, val);
5528 } else {
5529 tcg_gen_movi_i32(tmp, imm);
5530 }
5531 }
5532 neon_store_reg(rd, pass, tmp);
5533 }
5534 }
5535 } else { /* (insn & 0x00800010 == 0x00800000) */
5536 if (size != 3) {
5537 op = (insn >> 8) & 0xf;
5538 if ((insn & (1 << 6)) == 0) {
5539 /* Three registers of different lengths. */
5540 int src1_wide;
5541 int src2_wide;
5542 int prewiden;
5543 /* undefreq: bit 0 : UNDEF if size != 0
5544 * bit 1 : UNDEF if size == 0
5545 * bit 2 : UNDEF if U == 1
5546 * Note that [1:0] set implies 'always UNDEF'
5547 */
5548 int undefreq;
5549 /* prewiden, src1_wide, src2_wide, undefreq */
5550 static const int neon_3reg_wide[16][4] = {
5551 {1, 0, 0, 0}, /* VADDL */
5552 {1, 1, 0, 0}, /* VADDW */
5553 {1, 0, 0, 0}, /* VSUBL */
5554 {1, 1, 0, 0}, /* VSUBW */
5555 {0, 1, 1, 0}, /* VADDHN */
5556 {0, 0, 0, 0}, /* VABAL */
5557 {0, 1, 1, 0}, /* VSUBHN */
5558 {0, 0, 0, 0}, /* VABDL */
5559 {0, 0, 0, 0}, /* VMLAL */
5560 {0, 0, 0, 6}, /* VQDMLAL */
5561 {0, 0, 0, 0}, /* VMLSL */
5562 {0, 0, 0, 6}, /* VQDMLSL */
5563 {0, 0, 0, 0}, /* Integer VMULL */
5564 {0, 0, 0, 2}, /* VQDMULL */
5565 {0, 0, 0, 5}, /* Polynomial VMULL */
5566 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5567 };
5568
5569 prewiden = neon_3reg_wide[op][0];
5570 src1_wide = neon_3reg_wide[op][1];
5571 src2_wide = neon_3reg_wide[op][2];
5572 undefreq = neon_3reg_wide[op][3];
5573
5574 if (((undefreq & 1) && (size != 0)) ||
5575 ((undefreq & 2) && (size == 0)) ||
5576 ((undefreq & 4) && u)) {
5577 return 1;
5578 }
5579 if ((src1_wide && (rn & 1)) ||
5580 (src2_wide && (rm & 1)) ||
5581 (!src2_wide && (rd & 1))) {
5582 return 1;
5583 }
5584
5585 /* Avoid overlapping operands. Wide source operands are
5586 always aligned so will never overlap with wide
5587 destinations in problematic ways. */
5588 if (rd == rm && !src2_wide) {
5589 tmp = neon_load_reg(rm, 1);
5590 neon_store_scratch(2, tmp);
5591 } else if (rd == rn && !src1_wide) {
5592 tmp = neon_load_reg(rn, 1);
5593 neon_store_scratch(2, tmp);
5594 }
5595 TCGV_UNUSED(tmp3);
5596 for (pass = 0; pass < 2; pass++) {
5597 if (src1_wide) {
5598 neon_load_reg64(cpu_V0, rn + pass);
5599 TCGV_UNUSED(tmp);
5600 } else {
5601 if (pass == 1 && rd == rn) {
5602 tmp = neon_load_scratch(2);
5603 } else {
5604 tmp = neon_load_reg(rn, pass);
5605 }
5606 if (prewiden) {
5607 gen_neon_widen(cpu_V0, tmp, size, u);
5608 }
5609 }
5610 if (src2_wide) {
5611 neon_load_reg64(cpu_V1, rm + pass);
5612 TCGV_UNUSED(tmp2);
5613 } else {
5614 if (pass == 1 && rd == rm) {
5615 tmp2 = neon_load_scratch(2);
5616 } else {
5617 tmp2 = neon_load_reg(rm, pass);
5618 }
5619 if (prewiden) {
5620 gen_neon_widen(cpu_V1, tmp2, size, u);
5621 }
5622 }
5623 switch (op) {
5624 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5625 gen_neon_addl(size);
5626 break;
5627 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5628 gen_neon_subl(size);
5629 break;
5630 case 5: case 7: /* VABAL, VABDL */
5631 switch ((size << 1) | u) {
5632 case 0:
5633 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5634 break;
5635 case 1:
5636 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5637 break;
5638 case 2:
5639 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5640 break;
5641 case 3:
5642 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5643 break;
5644 case 4:
5645 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5646 break;
5647 case 5:
5648 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5649 break;
5650 default: abort();
5651 }
5652 tcg_temp_free_i32(tmp2);
5653 tcg_temp_free_i32(tmp);
5654 break;
5655 case 8: case 9: case 10: case 11: case 12: case 13:
5656 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5657 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5658 break;
5659 case 14: /* Polynomial VMULL */
5660 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5661 tcg_temp_free_i32(tmp2);
5662 tcg_temp_free_i32(tmp);
5663 break;
5664 default: /* 15 is RESERVED: caught earlier */
5665 abort();
5666 }
5667 if (op == 13) {
5668 /* VQDMULL */
5669 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5670 neon_store_reg64(cpu_V0, rd + pass);
5671 } else if (op == 5 || (op >= 8 && op <= 11)) {
5672 /* Accumulate. */
5673 neon_load_reg64(cpu_V1, rd + pass);
5674 switch (op) {
5675 case 10: /* VMLSL */
5676 gen_neon_negl(cpu_V0, size);
5677 /* Fall through */
5678 case 5: case 8: /* VABAL, VMLAL */
5679 gen_neon_addl(size);
5680 break;
5681 case 9: case 11: /* VQDMLAL, VQDMLSL */
5682 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5683 if (op == 11) {
5684 gen_neon_negl(cpu_V0, size);
5685 }
5686 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5687 break;
5688 default:
5689 abort();
5690 }
5691 neon_store_reg64(cpu_V0, rd + pass);
5692 } else if (op == 4 || op == 6) {
5693 /* Narrowing operation. */
5694 tmp = tcg_temp_new_i32();
5695 if (!u) {
5696 switch (size) {
5697 case 0:
5698 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5699 break;
5700 case 1:
5701 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5702 break;
5703 case 2:
5704 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5705 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5706 break;
5707 default: abort();
5708 }
5709 } else {
5710 switch (size) {
5711 case 0:
5712 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5713 break;
5714 case 1:
5715 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5716 break;
5717 case 2:
5718 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5719 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5720 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5721 break;
5722 default: abort();
5723 }
5724 }
5725 if (pass == 0) {
5726 tmp3 = tmp;
5727 } else {
5728 neon_store_reg(rd, 0, tmp3);
5729 neon_store_reg(rd, 1, tmp);
5730 }
5731 } else {
5732 /* Write back the result. */
5733 neon_store_reg64(cpu_V0, rd + pass);
5734 }
5735 }
5736 } else {
5737 /* Two registers and a scalar. NB that for ops of this form
5738 * the ARM ARM labels bit 24 as Q, but it is in our variable
5739 * 'u', not 'q'.
5740 */
5741 if (size == 0) {
5742 return 1;
5743 }
5744 switch (op) {
5745 case 1: /* Float VMLA scalar */
5746 case 5: /* Floating point VMLS scalar */
5747 case 9: /* Floating point VMUL scalar */
5748 if (size == 1) {
5749 return 1;
5750 }
5751 /* fall through */
5752 case 0: /* Integer VMLA scalar */
5753 case 4: /* Integer VMLS scalar */
5754 case 8: /* Integer VMUL scalar */
5755 case 12: /* VQDMULH scalar */
5756 case 13: /* VQRDMULH scalar */
5757 if (u && ((rd | rn) & 1)) {
5758 return 1;
5759 }
5760 tmp = neon_get_scalar(size, rm);
5761 neon_store_scratch(0, tmp);
5762 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5763 tmp = neon_load_scratch(0);
5764 tmp2 = neon_load_reg(rn, pass);
5765 if (op == 12) {
5766 if (size == 1) {
5767 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5768 } else {
5769 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5770 }
5771 } else if (op == 13) {
5772 if (size == 1) {
5773 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5774 } else {
5775 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5776 }
5777 } else if (op & 1) {
5778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5779 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5780 tcg_temp_free_ptr(fpstatus);
5781 } else {
5782 switch (size) {
5783 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5784 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5785 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5786 default: abort();
5787 }
5788 }
5789 tcg_temp_free_i32(tmp2);
5790 if (op < 8) {
5791 /* Accumulate. */
5792 tmp2 = neon_load_reg(rd, pass);
5793 switch (op) {
5794 case 0:
5795 gen_neon_add(size, tmp, tmp2);
5796 break;
5797 case 1:
5798 {
5799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5800 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5801 tcg_temp_free_ptr(fpstatus);
5802 break;
5803 }
5804 case 4:
5805 gen_neon_rsb(size, tmp, tmp2);
5806 break;
5807 case 5:
5808 {
5809 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5810 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5811 tcg_temp_free_ptr(fpstatus);
5812 break;
5813 }
5814 default:
5815 abort();
5816 }
5817 tcg_temp_free_i32(tmp2);
5818 }
5819 neon_store_reg(rd, pass, tmp);
5820 }
5821 break;
5822 case 3: /* VQDMLAL scalar */
5823 case 7: /* VQDMLSL scalar */
5824 case 11: /* VQDMULL scalar */
5825 if (u == 1) {
5826 return 1;
5827 }
5828 /* fall through */
5829 case 2: /* VMLAL sclar */
5830 case 6: /* VMLSL scalar */
5831 case 10: /* VMULL scalar */
5832 if (rd & 1) {
5833 return 1;
5834 }
5835 tmp2 = neon_get_scalar(size, rm);
5836 /* We need a copy of tmp2 because gen_neon_mull
5837 * deletes it during pass 0. */
5838 tmp4 = tcg_temp_new_i32();
5839 tcg_gen_mov_i32(tmp4, tmp2);
5840 tmp3 = neon_load_reg(rn, 1);
5841
5842 for (pass = 0; pass < 2; pass++) {
5843 if (pass == 0) {
5844 tmp = neon_load_reg(rn, 0);
5845 } else {
5846 tmp = tmp3;
5847 tmp2 = tmp4;
5848 }
5849 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5850 if (op != 11) {
5851 neon_load_reg64(cpu_V1, rd + pass);
5852 }
5853 switch (op) {
5854 case 6:
5855 gen_neon_negl(cpu_V0, size);
5856 /* Fall through */
5857 case 2:
5858 gen_neon_addl(size);
5859 break;
5860 case 3: case 7:
5861 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5862 if (op == 7) {
5863 gen_neon_negl(cpu_V0, size);
5864 }
5865 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5866 break;
5867 case 10:
5868 /* no-op */
5869 break;
5870 case 11:
5871 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5872 break;
5873 default:
5874 abort();
5875 }
5876 neon_store_reg64(cpu_V0, rd + pass);
5877 }
5878
5879
5880 break;
5881 default: /* 14 and 15 are RESERVED */
5882 return 1;
5883 }
5884 }
5885 } else { /* size == 3 */
5886 if (!u) {
5887 /* Extract. */
5888 imm = (insn >> 8) & 0xf;
5889
5890 if (imm > 7 && !q)
5891 return 1;
5892
5893 if (q && ((rd | rn | rm) & 1)) {
5894 return 1;
5895 }
5896
5897 if (imm == 0) {
5898 neon_load_reg64(cpu_V0, rn);
5899 if (q) {
5900 neon_load_reg64(cpu_V1, rn + 1);
5901 }
5902 } else if (imm == 8) {
5903 neon_load_reg64(cpu_V0, rn + 1);
5904 if (q) {
5905 neon_load_reg64(cpu_V1, rm);
5906 }
5907 } else if (q) {
5908 tmp64 = tcg_temp_new_i64();
5909 if (imm < 8) {
5910 neon_load_reg64(cpu_V0, rn);
5911 neon_load_reg64(tmp64, rn + 1);
5912 } else {
5913 neon_load_reg64(cpu_V0, rn + 1);
5914 neon_load_reg64(tmp64, rm);
5915 }
5916 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5917 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5918 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5919 if (imm < 8) {
5920 neon_load_reg64(cpu_V1, rm);
5921 } else {
5922 neon_load_reg64(cpu_V1, rm + 1);
5923 imm -= 8;
5924 }
5925 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5926 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5927 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5928 tcg_temp_free_i64(tmp64);
5929 } else {
5930 /* BUGFIX */
5931 neon_load_reg64(cpu_V0, rn);
5932 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5933 neon_load_reg64(cpu_V1, rm);
5934 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5935 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5936 }
5937 neon_store_reg64(cpu_V0, rd);
5938 if (q) {
5939 neon_store_reg64(cpu_V1, rd + 1);
5940 }
5941 } else if ((insn & (1 << 11)) == 0) {
5942 /* Two register misc. */
5943 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5944 size = (insn >> 18) & 3;
5945 /* UNDEF for unknown op values and bad op-size combinations */
5946 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5947 return 1;
5948 }
5949 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5950 q && ((rm | rd) & 1)) {
5951 return 1;
5952 }
5953 switch (op) {
5954 case NEON_2RM_VREV64:
5955 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5956 tmp = neon_load_reg(rm, pass * 2);
5957 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5958 switch (size) {
5959 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5960 case 1: gen_swap_half(tmp); break;
5961 case 2: /* no-op */ break;
5962 default: abort();
5963 }
5964 neon_store_reg(rd, pass * 2 + 1, tmp);
5965 if (size == 2) {
5966 neon_store_reg(rd, pass * 2, tmp2);
5967 } else {
5968 switch (size) {
5969 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5970 case 1: gen_swap_half(tmp2); break;
5971 default: abort();
5972 }
5973 neon_store_reg(rd, pass * 2, tmp2);
5974 }
5975 }
5976 break;
5977 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5978 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5979 for (pass = 0; pass < q + 1; pass++) {
5980 tmp = neon_load_reg(rm, pass * 2);
5981 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5982 tmp = neon_load_reg(rm, pass * 2 + 1);
5983 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5984 switch (size) {
5985 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5986 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5987 case 2: tcg_gen_add_i64(CPU_V001); break;
5988 default: abort();
5989 }
5990 if (op >= NEON_2RM_VPADAL) {
5991 /* Accumulate. */
5992 neon_load_reg64(cpu_V1, rd + pass);
5993 gen_neon_addl(size);
5994 }
5995 neon_store_reg64(cpu_V0, rd + pass);
5996 }
5997 break;
5998 case NEON_2RM_VTRN:
5999 if (size == 2) {
6000 int n;
6001 for (n = 0; n < (q ? 4 : 2); n += 2) {
6002 tmp = neon_load_reg(rm, n);
6003 tmp2 = neon_load_reg(rd, n + 1);
6004 neon_store_reg(rm, n, tmp2);
6005 neon_store_reg(rd, n + 1, tmp);
6006 }
6007 } else {
6008 goto elementwise;
6009 }
6010 break;
6011 case NEON_2RM_VUZP:
6012 if (gen_neon_unzip(rd, rm, size, q)) {
6013 return 1;
6014 }
6015 break;
6016 case NEON_2RM_VZIP:
6017 if (gen_neon_zip(rd, rm, size, q)) {
6018 return 1;
6019 }
6020 break;
6021 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6022 /* also VQMOVUN; op field and mnemonics don't line up */
6023 if (rm & 1) {
6024 return 1;
6025 }
6026 TCGV_UNUSED(tmp2);
6027 for (pass = 0; pass < 2; pass++) {
6028 neon_load_reg64(cpu_V0, rm + pass);
6029 tmp = tcg_temp_new_i32();
6030 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6031 tmp, cpu_V0);
6032 if (pass == 0) {
6033 tmp2 = tmp;
6034 } else {
6035 neon_store_reg(rd, 0, tmp2);
6036 neon_store_reg(rd, 1, tmp);
6037 }
6038 }
6039 break;
6040 case NEON_2RM_VSHLL:
6041 if (q || (rd & 1)) {
6042 return 1;
6043 }
6044 tmp = neon_load_reg(rm, 0);
6045 tmp2 = neon_load_reg(rm, 1);
6046 for (pass = 0; pass < 2; pass++) {
6047 if (pass == 1)
6048 tmp = tmp2;
6049 gen_neon_widen(cpu_V0, tmp, size, 1);
6050 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6051 neon_store_reg64(cpu_V0, rd + pass);
6052 }
6053 break;
6054 case NEON_2RM_VCVT_F16_F32:
6055 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6056 q || (rm & 1)) {
6057 return 1;
6058 }
6059 tmp = tcg_temp_new_i32();
6060 tmp2 = tcg_temp_new_i32();
6061 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6062 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6063 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6064 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6065 tcg_gen_shli_i32(tmp2, tmp2, 16);
6066 tcg_gen_or_i32(tmp2, tmp2, tmp);
6067 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6068 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6069 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6070 neon_store_reg(rd, 0, tmp2);
6071 tmp2 = tcg_temp_new_i32();
6072 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6073 tcg_gen_shli_i32(tmp2, tmp2, 16);
6074 tcg_gen_or_i32(tmp2, tmp2, tmp);
6075 neon_store_reg(rd, 1, tmp2);
6076 tcg_temp_free_i32(tmp);
6077 break;
6078 case NEON_2RM_VCVT_F32_F16:
6079 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6080 q || (rd & 1)) {
6081 return 1;
6082 }
6083 tmp3 = tcg_temp_new_i32();
6084 tmp = neon_load_reg(rm, 0);
6085 tmp2 = neon_load_reg(rm, 1);
6086 tcg_gen_ext16u_i32(tmp3, tmp);
6087 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6088 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6089 tcg_gen_shri_i32(tmp3, tmp, 16);
6090 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6091 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6092 tcg_temp_free_i32(tmp);
6093 tcg_gen_ext16u_i32(tmp3, tmp2);
6094 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6095 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6096 tcg_gen_shri_i32(tmp3, tmp2, 16);
6097 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6098 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6099 tcg_temp_free_i32(tmp2);
6100 tcg_temp_free_i32(tmp3);
6101 break;
6102 default:
6103 elementwise:
6104 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6105 if (neon_2rm_is_float_op(op)) {
6106 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6107 neon_reg_offset(rm, pass));
6108 TCGV_UNUSED(tmp);
6109 } else {
6110 tmp = neon_load_reg(rm, pass);
6111 }
6112 switch (op) {
6113 case NEON_2RM_VREV32:
6114 switch (size) {
6115 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6116 case 1: gen_swap_half(tmp); break;
6117 default: abort();
6118 }
6119 break;
6120 case NEON_2RM_VREV16:
6121 gen_rev16(tmp);
6122 break;
6123 case NEON_2RM_VCLS:
6124 switch (size) {
6125 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6126 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6127 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6128 default: abort();
6129 }
6130 break;
6131 case NEON_2RM_VCLZ:
6132 switch (size) {
6133 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6134 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6135 case 2: gen_helper_clz(tmp, tmp); break;
6136 default: abort();
6137 }
6138 break;
6139 case NEON_2RM_VCNT:
6140 gen_helper_neon_cnt_u8(tmp, tmp);
6141 break;
6142 case NEON_2RM_VMVN:
6143 tcg_gen_not_i32(tmp, tmp);
6144 break;
6145 case NEON_2RM_VQABS:
6146 switch (size) {
6147 case 0:
6148 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6149 break;
6150 case 1:
6151 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6152 break;
6153 case 2:
6154 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6155 break;
6156 default: abort();
6157 }
6158 break;
6159 case NEON_2RM_VQNEG:
6160 switch (size) {
6161 case 0:
6162 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6163 break;
6164 case 1:
6165 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6166 break;
6167 case 2:
6168 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6169 break;
6170 default: abort();
6171 }
6172 break;
6173 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6174 tmp2 = tcg_const_i32(0);
6175 switch(size) {
6176 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6177 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6178 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6179 default: abort();
6180 }
6181 tcg_temp_free(tmp2);
6182 if (op == NEON_2RM_VCLE0) {
6183 tcg_gen_not_i32(tmp, tmp);
6184 }
6185 break;
6186 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6187 tmp2 = tcg_const_i32(0);
6188 switch(size) {
6189 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6190 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6191 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6192 default: abort();
6193 }
6194 tcg_temp_free(tmp2);
6195 if (op == NEON_2RM_VCLT0) {
6196 tcg_gen_not_i32(tmp, tmp);
6197 }
6198 break;
6199 case NEON_2RM_VCEQ0:
6200 tmp2 = tcg_const_i32(0);
6201 switch(size) {
6202 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6203 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6204 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6205 default: abort();
6206 }
6207 tcg_temp_free(tmp2);
6208 break;
6209 case NEON_2RM_VABS:
6210 switch(size) {
6211 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6212 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6213 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6214 default: abort();
6215 }
6216 break;
6217 case NEON_2RM_VNEG:
6218 tmp2 = tcg_const_i32(0);
6219 gen_neon_rsb(size, tmp, tmp2);
6220 tcg_temp_free(tmp2);
6221 break;
6222 case NEON_2RM_VCGT0_F:
6223 {
6224 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6225 tmp2 = tcg_const_i32(0);
6226 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6227 tcg_temp_free(tmp2);
6228 tcg_temp_free_ptr(fpstatus);
6229 break;
6230 }
6231 case NEON_2RM_VCGE0_F:
6232 {
6233 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6234 tmp2 = tcg_const_i32(0);
6235 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6236 tcg_temp_free(tmp2);
6237 tcg_temp_free_ptr(fpstatus);
6238 break;
6239 }
6240 case NEON_2RM_VCEQ0_F:
6241 {
6242 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6243 tmp2 = tcg_const_i32(0);
6244 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6245 tcg_temp_free(tmp2);
6246 tcg_temp_free_ptr(fpstatus);
6247 break;
6248 }
6249 case NEON_2RM_VCLE0_F:
6250 {
6251 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6252 tmp2 = tcg_const_i32(0);
6253 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6254 tcg_temp_free(tmp2);
6255 tcg_temp_free_ptr(fpstatus);
6256 break;
6257 }
6258 case NEON_2RM_VCLT0_F:
6259 {
6260 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6261 tmp2 = tcg_const_i32(0);
6262 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6263 tcg_temp_free(tmp2);
6264 tcg_temp_free_ptr(fpstatus);
6265 break;
6266 }
6267 case NEON_2RM_VABS_F:
6268 gen_vfp_abs(0);
6269 break;
6270 case NEON_2RM_VNEG_F:
6271 gen_vfp_neg(0);
6272 break;
6273 case NEON_2RM_VSWP:
6274 tmp2 = neon_load_reg(rd, pass);
6275 neon_store_reg(rm, pass, tmp2);
6276 break;
6277 case NEON_2RM_VTRN:
6278 tmp2 = neon_load_reg(rd, pass);
6279 switch (size) {
6280 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6281 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6282 default: abort();
6283 }
6284 neon_store_reg(rm, pass, tmp2);
6285 break;
6286 case NEON_2RM_VRECPE:
6287 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6288 break;
6289 case NEON_2RM_VRSQRTE:
6290 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6291 break;
6292 case NEON_2RM_VRECPE_F:
6293 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6294 break;
6295 case NEON_2RM_VRSQRTE_F:
6296 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6297 break;
6298 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6299 gen_vfp_sito(0, 1);
6300 break;
6301 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6302 gen_vfp_uito(0, 1);
6303 break;
6304 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6305 gen_vfp_tosiz(0, 1);
6306 break;
6307 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6308 gen_vfp_touiz(0, 1);
6309 break;
6310 default:
6311 /* Reserved op values were caught by the
6312 * neon_2rm_sizes[] check earlier.
6313 */
6314 abort();
6315 }
6316 if (neon_2rm_is_float_op(op)) {
6317 tcg_gen_st_f32(cpu_F0s, cpu_env,
6318 neon_reg_offset(rd, pass));
6319 } else {
6320 neon_store_reg(rd, pass, tmp);
6321 }
6322 }
6323 break;
6324 }
6325 } else if ((insn & (1 << 10)) == 0) {
6326 /* VTBL, VTBX. */
6327 int n = ((insn >> 8) & 3) + 1;
6328 if ((rn + n) > 32) {
6329 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6330 * helper function running off the end of the register file.
6331 */
6332 return 1;
6333 }
6334 n <<= 3;
6335 if (insn & (1 << 6)) {
6336 tmp = neon_load_reg(rd, 0);
6337 } else {
6338 tmp = tcg_temp_new_i32();
6339 tcg_gen_movi_i32(tmp, 0);
6340 }
6341 tmp2 = neon_load_reg(rm, 0);
6342 tmp4 = tcg_const_i32(rn);
6343 tmp5 = tcg_const_i32(n);
6344 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6345 tcg_temp_free_i32(tmp);
6346 if (insn & (1 << 6)) {
6347 tmp = neon_load_reg(rd, 1);
6348 } else {
6349 tmp = tcg_temp_new_i32();
6350 tcg_gen_movi_i32(tmp, 0);
6351 }
6352 tmp3 = neon_load_reg(rm, 1);
6353 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6354 tcg_temp_free_i32(tmp5);
6355 tcg_temp_free_i32(tmp4);
6356 neon_store_reg(rd, 0, tmp2);
6357 neon_store_reg(rd, 1, tmp3);
6358 tcg_temp_free_i32(tmp);
6359 } else if ((insn & 0x380) == 0) {
6360 /* VDUP */
6361 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6362 return 1;
6363 }
6364 if (insn & (1 << 19)) {
6365 tmp = neon_load_reg(rm, 1);
6366 } else {
6367 tmp = neon_load_reg(rm, 0);
6368 }
6369 if (insn & (1 << 16)) {
6370 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6371 } else if (insn & (1 << 17)) {
6372 if ((insn >> 18) & 1)
6373 gen_neon_dup_high16(tmp);
6374 else
6375 gen_neon_dup_low16(tmp);
6376 }
6377 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6378 tmp2 = tcg_temp_new_i32();
6379 tcg_gen_mov_i32(tmp2, tmp);
6380 neon_store_reg(rd, pass, tmp2);
6381 }
6382 tcg_temp_free_i32(tmp);
6383 } else {
6384 return 1;
6385 }
6386 }
6387 }
6388 return 0;
6389 }
6390
6391 static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn)
6392 {
6393 int crn = (insn >> 16) & 0xf;
6394 int crm = insn & 0xf;
6395 int op1 = (insn >> 21) & 7;
6396 int op2 = (insn >> 5) & 7;
6397 int rt = (insn >> 12) & 0xf;
6398 TCGv tmp;
6399
6400 /* Minimal set of debug registers, since we don't support debug */
6401 if (op1 == 0 && crn == 0 && op2 == 0) {
6402 switch (crm) {
6403 case 0:
6404 /* DBGDIDR: just RAZ. In particular this means the
6405 * "debug architecture version" bits will read as
6406 * a reserved value, which should cause Linux to
6407 * not try to use the debug hardware.
6408 */
6409 tmp = tcg_const_i32(0);
6410 store_reg(s, rt, tmp);
6411 return 0;
6412 case 1:
6413 case 2:
6414 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6415 * don't implement memory mapped debug components
6416 */
6417 if (ENABLE_ARCH_7) {
6418 tmp = tcg_const_i32(0);
6419 store_reg(s, rt, tmp);
6420 return 0;
6421 }
6422 break;
6423 default:
6424 break;
6425 }
6426 }
6427
6428 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6429 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6430 /* TEECR */
6431 if (IS_USER(s))
6432 return 1;
6433 tmp = load_cpu_field(teecr);
6434 store_reg(s, rt, tmp);
6435 return 0;
6436 }
6437 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6438 /* TEEHBR */
6439 if (IS_USER(s) && (env->teecr & 1))
6440 return 1;
6441 tmp = load_cpu_field(teehbr);
6442 store_reg(s, rt, tmp);
6443 return 0;
6444 }
6445 }
6446 return 1;
6447 }
6448
6449 static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn)
6450 {
6451 int crn = (insn >> 16) & 0xf;
6452 int crm = insn & 0xf;
6453 int op1 = (insn >> 21) & 7;
6454 int op2 = (insn >> 5) & 7;
6455 int rt = (insn >> 12) & 0xf;
6456 TCGv tmp;
6457
6458 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6459 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6460 /* TEECR */
6461 if (IS_USER(s))
6462 return 1;
6463 tmp = load_reg(s, rt);
6464 gen_helper_set_teecr(cpu_env, tmp);
6465 tcg_temp_free_i32(tmp);
6466 return 0;
6467 }
6468 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6469 /* TEEHBR */
6470 if (IS_USER(s) && (env->teecr & 1))
6471 return 1;
6472 tmp = load_reg(s, rt);
6473 store_cpu_field(tmp, teehbr);
6474 return 0;
6475 }
6476 }
6477 return 1;
6478 }
6479
6480 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6481 {
6482 int cpnum;
6483
6484 cpnum = (insn >> 8) & 0xf;
6485 if (arm_feature(env, ARM_FEATURE_XSCALE)
6486 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6487 return 1;
6488
6489 switch (cpnum) {
6490 case 0:
6491 case 1:
6492 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6493 return disas_iwmmxt_insn(env, s, insn);
6494 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6495 return disas_dsp_insn(env, s, insn);
6496 }
6497 return 1;
6498 case 10:
6499 case 11:
6500 return disas_vfp_insn (env, s, insn);
6501 case 14:
6502 /* Coprocessors 7-15 are architecturally reserved by ARM.
6503 Unfortunately Intel decided to ignore this. */
6504 if (arm_feature(env, ARM_FEATURE_XSCALE))
6505 goto board;
6506 if (insn & (1 << 20))
6507 return disas_cp14_read(env, s, insn);
6508 else
6509 return disas_cp14_write(env, s, insn);
6510 case 15:
6511 return disas_cp15_insn (env, s, insn);
6512 default:
6513 board:
6514 /* Unknown coprocessor. See if the board has hooked it. */
6515 return disas_cp_insn (env, s, insn);
6516 }
6517 }
6518
6519
6520 /* Store a 64-bit value to a register pair. Clobbers val. */
6521 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6522 {
6523 TCGv tmp;
6524 tmp = tcg_temp_new_i32();
6525 tcg_gen_trunc_i64_i32(tmp, val);
6526 store_reg(s, rlow, tmp);
6527 tmp = tcg_temp_new_i32();
6528 tcg_gen_shri_i64(val, val, 32);
6529 tcg_gen_trunc_i64_i32(tmp, val);
6530 store_reg(s, rhigh, tmp);
6531 }
6532
6533 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6534 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6535 {
6536 TCGv_i64 tmp;
6537 TCGv tmp2;
6538
6539 /* Load value and extend to 64 bits. */
6540 tmp = tcg_temp_new_i64();
6541 tmp2 = load_reg(s, rlow);
6542 tcg_gen_extu_i32_i64(tmp, tmp2);
6543 tcg_temp_free_i32(tmp2);
6544 tcg_gen_add_i64(val, val, tmp);
6545 tcg_temp_free_i64(tmp);
6546 }
6547
6548 /* load and add a 64-bit value from a register pair. */
6549 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6550 {
6551 TCGv_i64 tmp;
6552 TCGv tmpl;
6553 TCGv tmph;
6554
6555 /* Load 64-bit value rd:rn. */
6556 tmpl = load_reg(s, rlow);
6557 tmph = load_reg(s, rhigh);
6558 tmp = tcg_temp_new_i64();
6559 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6560 tcg_temp_free_i32(tmpl);
6561 tcg_temp_free_i32(tmph);
6562 tcg_gen_add_i64(val, val, tmp);
6563 tcg_temp_free_i64(tmp);
6564 }
6565
6566 /* Set N and Z flags from a 64-bit value. */
6567 static void gen_logicq_cc(TCGv_i64 val)
6568 {
6569 TCGv tmp = tcg_temp_new_i32();
6570 gen_helper_logicq_cc(tmp, val);
6571 gen_logic_CC(tmp);
6572 tcg_temp_free_i32(tmp);
6573 }
6574
6575 /* Load/Store exclusive instructions are implemented by remembering
6576 the value/address loaded, and seeing if these are the same
6577 when the store is performed. This should be is sufficient to implement
6578 the architecturally mandated semantics, and avoids having to monitor
6579 regular stores.
6580
6581 In system emulation mode only one CPU will be running at once, so
6582 this sequence is effectively atomic. In user emulation mode we
6583 throw an exception and handle the atomic operation elsewhere. */
6584 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6585 TCGv addr, int size)
6586 {
6587 TCGv tmp;
6588
6589 switch (size) {
6590 case 0:
6591 tmp = gen_ld8u(addr, IS_USER(s));
6592 break;
6593 case 1:
6594 tmp = gen_ld16u(addr, IS_USER(s));
6595 break;
6596 case 2:
6597 case 3:
6598 tmp = gen_ld32(addr, IS_USER(s));
6599 break;
6600 default:
6601 abort();
6602 }
6603 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6604 store_reg(s, rt, tmp);
6605 if (size == 3) {
6606 TCGv tmp2 = tcg_temp_new_i32();
6607 tcg_gen_addi_i32(tmp2, addr, 4);
6608 tmp = gen_ld32(tmp2, IS_USER(s));
6609 tcg_temp_free_i32(tmp2);
6610 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6611 store_reg(s, rt2, tmp);
6612 }
6613 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6614 }
6615
6616 static void gen_clrex(DisasContext *s)
6617 {
6618 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6619 }
6620
6621 #ifdef CONFIG_USER_ONLY
6622 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6623 TCGv addr, int size)
6624 {
6625 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6626 tcg_gen_movi_i32(cpu_exclusive_info,
6627 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6628 gen_exception_insn(s, 4, EXCP_STREX);
6629 }
6630 #else
6631 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6632 TCGv addr, int size)
6633 {
6634 TCGv tmp;
6635 int done_label;
6636 int fail_label;
6637
6638 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6639 [addr] = {Rt};
6640 {Rd} = 0;
6641 } else {
6642 {Rd} = 1;
6643 } */
6644 fail_label = gen_new_label();
6645 done_label = gen_new_label();
6646 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6647 switch (size) {
6648 case 0:
6649 tmp = gen_ld8u(addr, IS_USER(s));
6650 break;
6651 case 1:
6652 tmp = gen_ld16u(addr, IS_USER(s));
6653 break;
6654 case 2:
6655 case 3:
6656 tmp = gen_ld32(addr, IS_USER(s));
6657 break;
6658 default:
6659 abort();
6660 }
6661 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6662 tcg_temp_free_i32(tmp);
6663 if (size == 3) {
6664 TCGv tmp2 = tcg_temp_new_i32();
6665 tcg_gen_addi_i32(tmp2, addr, 4);
6666 tmp = gen_ld32(tmp2, IS_USER(s));
6667 tcg_temp_free_i32(tmp2);
6668 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6669 tcg_temp_free_i32(tmp);
6670 }
6671 tmp = load_reg(s, rt);
6672 switch (size) {
6673 case 0:
6674 gen_st8(tmp, addr, IS_USER(s));
6675 break;
6676 case 1:
6677 gen_st16(tmp, addr, IS_USER(s));
6678 break;
6679 case 2:
6680 case 3:
6681 gen_st32(tmp, addr, IS_USER(s));
6682 break;
6683 default:
6684 abort();
6685 }
6686 if (size == 3) {
6687 tcg_gen_addi_i32(addr, addr, 4);
6688 tmp = load_reg(s, rt2);
6689 gen_st32(tmp, addr, IS_USER(s));
6690 }
6691 tcg_gen_movi_i32(cpu_R[rd], 0);
6692 tcg_gen_br(done_label);
6693 gen_set_label(fail_label);
6694 tcg_gen_movi_i32(cpu_R[rd], 1);
6695 gen_set_label(done_label);
6696 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6697 }
6698 #endif
6699
6700 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6701 {
6702 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6703 TCGv tmp;
6704 TCGv tmp2;
6705 TCGv tmp3;
6706 TCGv addr;
6707 TCGv_i64 tmp64;
6708
6709 insn = arm_ldl_code(s->pc, s->bswap_code);
6710 s->pc += 4;
6711
6712 /* M variants do not implement ARM mode. */
6713 if (IS_M(env))
6714 goto illegal_op;
6715 cond = insn >> 28;
6716 if (cond == 0xf){
6717 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6718 * choose to UNDEF. In ARMv5 and above the space is used
6719 * for miscellaneous unconditional instructions.
6720 */
6721 ARCH(5);
6722
6723 /* Unconditional instructions. */
6724 if (((insn >> 25) & 7) == 1) {
6725 /* NEON Data processing. */
6726 if (!arm_feature(env, ARM_FEATURE_NEON))
6727 goto illegal_op;
6728
6729 if (disas_neon_data_insn(env, s, insn))
6730 goto illegal_op;
6731 return;
6732 }
6733 if ((insn & 0x0f100000) == 0x04000000) {
6734 /* NEON load/store. */
6735 if (!arm_feature(env, ARM_FEATURE_NEON))
6736 goto illegal_op;
6737
6738 if (disas_neon_ls_insn(env, s, insn))
6739 goto illegal_op;
6740 return;
6741 }
6742 if (((insn & 0x0f30f000) == 0x0510f000) ||
6743 ((insn & 0x0f30f010) == 0x0710f000)) {
6744 if ((insn & (1 << 22)) == 0) {
6745 /* PLDW; v7MP */
6746 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6747 goto illegal_op;
6748 }
6749 }
6750 /* Otherwise PLD; v5TE+ */
6751 ARCH(5TE);
6752 return;
6753 }
6754 if (((insn & 0x0f70f000) == 0x0450f000) ||
6755 ((insn & 0x0f70f010) == 0x0650f000)) {
6756 ARCH(7);
6757 return; /* PLI; V7 */
6758 }
6759 if (((insn & 0x0f700000) == 0x04100000) ||
6760 ((insn & 0x0f700010) == 0x06100000)) {
6761 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6762 goto illegal_op;
6763 }
6764 return; /* v7MP: Unallocated memory hint: must NOP */
6765 }
6766
6767 if ((insn & 0x0ffffdff) == 0x01010000) {
6768 ARCH(6);
6769 /* setend */
6770 if (insn & (1 << 9)) {
6771 /* BE8 mode not implemented. */
6772 goto illegal_op;
6773 }
6774 return;
6775 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6776 switch ((insn >> 4) & 0xf) {
6777 case 1: /* clrex */
6778 ARCH(6K);
6779 gen_clrex(s);
6780 return;
6781 case 4: /* dsb */
6782 case 5: /* dmb */
6783 case 6: /* isb */
6784 ARCH(7);
6785 /* We don't emulate caches so these are a no-op. */
6786 return;
6787 default:
6788 goto illegal_op;
6789 }
6790 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6791 /* srs */
6792 int32_t offset;
6793 if (IS_USER(s))
6794 goto illegal_op;
6795 ARCH(6);
6796 op1 = (insn & 0x1f);
6797 addr = tcg_temp_new_i32();
6798 tmp = tcg_const_i32(op1);
6799 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6800 tcg_temp_free_i32(tmp);
6801 i = (insn >> 23) & 3;
6802 switch (i) {
6803 case 0: offset = -4; break; /* DA */
6804 case 1: offset = 0; break; /* IA */
6805 case 2: offset = -8; break; /* DB */
6806 case 3: offset = 4; break; /* IB */
6807 default: abort();
6808 }
6809 if (offset)
6810 tcg_gen_addi_i32(addr, addr, offset);
6811 tmp = load_reg(s, 14);
6812 gen_st32(tmp, addr, 0);
6813 tmp = load_cpu_field(spsr);
6814 tcg_gen_addi_i32(addr, addr, 4);
6815 gen_st32(tmp, addr, 0);
6816 if (insn & (1 << 21)) {
6817 /* Base writeback. */
6818 switch (i) {
6819 case 0: offset = -8; break;
6820 case 1: offset = 4; break;
6821 case 2: offset = -4; break;
6822 case 3: offset = 0; break;
6823 default: abort();
6824 }
6825 if (offset)
6826 tcg_gen_addi_i32(addr, addr, offset);
6827 tmp = tcg_const_i32(op1);
6828 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6829 tcg_temp_free_i32(tmp);
6830 tcg_temp_free_i32(addr);
6831 } else {
6832 tcg_temp_free_i32(addr);
6833 }
6834 return;
6835 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6836 /* rfe */
6837 int32_t offset;
6838 if (IS_USER(s))
6839 goto illegal_op;
6840 ARCH(6);
6841 rn = (insn >> 16) & 0xf;
6842 addr = load_reg(s, rn);
6843 i = (insn >> 23) & 3;
6844 switch (i) {
6845 case 0: offset = -4; break; /* DA */
6846 case 1: offset = 0; break; /* IA */
6847 case 2: offset = -8; break; /* DB */
6848 case 3: offset = 4; break; /* IB */
6849 default: abort();
6850 }
6851 if (offset)
6852 tcg_gen_addi_i32(addr, addr, offset);
6853 /* Load PC into tmp and CPSR into tmp2. */
6854 tmp = gen_ld32(addr, 0);
6855 tcg_gen_addi_i32(addr, addr, 4);
6856 tmp2 = gen_ld32(addr, 0);
6857 if (insn & (1 << 21)) {
6858 /* Base writeback. */
6859 switch (i) {
6860 case 0: offset = -8; break;
6861 case 1: offset = 4; break;
6862 case 2: offset = -4; break;
6863 case 3: offset = 0; break;
6864 default: abort();
6865 }
6866 if (offset)
6867 tcg_gen_addi_i32(addr, addr, offset);
6868 store_reg(s, rn, addr);
6869 } else {
6870 tcg_temp_free_i32(addr);
6871 }
6872 gen_rfe(s, tmp, tmp2);
6873 return;
6874 } else if ((insn & 0x0e000000) == 0x0a000000) {
6875 /* branch link and change to thumb (blx <offset>) */
6876 int32_t offset;
6877
6878 val = (uint32_t)s->pc;
6879 tmp = tcg_temp_new_i32();
6880 tcg_gen_movi_i32(tmp, val);
6881 store_reg(s, 14, tmp);
6882 /* Sign-extend the 24-bit offset */
6883 offset = (((int32_t)insn) << 8) >> 8;
6884 /* offset * 4 + bit24 * 2 + (thumb bit) */
6885 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6886 /* pipeline offset */
6887 val += 4;
6888 /* protected by ARCH(5); above, near the start of uncond block */
6889 gen_bx_im(s, val);
6890 return;
6891 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6892 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6893 /* iWMMXt register transfer. */
6894 if (env->cp15.c15_cpar & (1 << 1))
6895 if (!disas_iwmmxt_insn(env, s, insn))
6896 return;
6897 }
6898 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6899 /* Coprocessor double register transfer. */
6900 ARCH(5TE);
6901 } else if ((insn & 0x0f000010) == 0x0e000010) {
6902 /* Additional coprocessor register transfer. */
6903 } else if ((insn & 0x0ff10020) == 0x01000000) {
6904 uint32_t mask;
6905 uint32_t val;
6906 /* cps (privileged) */
6907 if (IS_USER(s))
6908 return;
6909 mask = val = 0;
6910 if (insn & (1 << 19)) {
6911 if (insn & (1 << 8))
6912 mask |= CPSR_A;
6913 if (insn & (1 << 7))
6914 mask |= CPSR_I;
6915 if (insn & (1 << 6))
6916 mask |= CPSR_F;
6917 if (insn & (1 << 18))
6918 val |= mask;
6919 }
6920 if (insn & (1 << 17)) {
6921 mask |= CPSR_M;
6922 val |= (insn & 0x1f);
6923 }
6924 if (mask) {
6925 gen_set_psr_im(s, mask, 0, val);
6926 }
6927 return;
6928 }
6929 goto illegal_op;
6930 }
6931 if (cond != 0xe) {
6932 /* if not always execute, we generate a conditional jump to
6933 next instruction */
6934 s->condlabel = gen_new_label();
6935 gen_test_cc(cond ^ 1, s->condlabel);
6936 s->condjmp = 1;
6937 }
6938 if ((insn & 0x0f900000) == 0x03000000) {
6939 if ((insn & (1 << 21)) == 0) {
6940 ARCH(6T2);
6941 rd = (insn >> 12) & 0xf;
6942 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6943 if ((insn & (1 << 22)) == 0) {
6944 /* MOVW */
6945 tmp = tcg_temp_new_i32();
6946 tcg_gen_movi_i32(tmp, val);
6947 } else {
6948 /* MOVT */
6949 tmp = load_reg(s, rd);
6950 tcg_gen_ext16u_i32(tmp, tmp);
6951 tcg_gen_ori_i32(tmp, tmp, val << 16);
6952 }
6953 store_reg(s, rd, tmp);
6954 } else {
6955 if (((insn >> 12) & 0xf) != 0xf)
6956 goto illegal_op;
6957 if (((insn >> 16) & 0xf) == 0) {
6958 gen_nop_hint(s, insn & 0xff);
6959 } else {
6960 /* CPSR = immediate */
6961 val = insn & 0xff;
6962 shift = ((insn >> 8) & 0xf) * 2;
6963 if (shift)
6964 val = (val >> shift) | (val << (32 - shift));
6965 i = ((insn & (1 << 22)) != 0);
6966 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6967 goto illegal_op;
6968 }
6969 }
6970 } else if ((insn & 0x0f900000) == 0x01000000
6971 && (insn & 0x00000090) != 0x00000090) {
6972 /* miscellaneous instructions */
6973 op1 = (insn >> 21) & 3;
6974 sh = (insn >> 4) & 0xf;
6975 rm = insn & 0xf;
6976 switch (sh) {
6977 case 0x0: /* move program status register */
6978 if (op1 & 1) {
6979 /* PSR = reg */
6980 tmp = load_reg(s, rm);
6981 i = ((op1 & 2) != 0);
6982 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6983 goto illegal_op;
6984 } else {
6985 /* reg = PSR */
6986 rd = (insn >> 12) & 0xf;
6987 if (op1 & 2) {
6988 if (IS_USER(s))
6989 goto illegal_op;
6990 tmp = load_cpu_field(spsr);
6991 } else {
6992 tmp = tcg_temp_new_i32();
6993 gen_helper_cpsr_read(tmp);
6994 }
6995 store_reg(s, rd, tmp);
6996 }
6997 break;
6998 case 0x1:
6999 if (op1 == 1) {
7000 /* branch/exchange thumb (bx). */
7001 ARCH(4T);
7002 tmp = load_reg(s, rm);
7003 gen_bx(s, tmp);
7004 } else if (op1 == 3) {
7005 /* clz */
7006 ARCH(5);
7007 rd = (insn >> 12) & 0xf;
7008 tmp = load_reg(s, rm);
7009 gen_helper_clz(tmp, tmp);
7010 store_reg(s, rd, tmp);
7011 } else {
7012 goto illegal_op;
7013 }
7014 break;
7015 case 0x2:
7016 if (op1 == 1) {
7017 ARCH(5J); /* bxj */
7018 /* Trivial implementation equivalent to bx. */
7019 tmp = load_reg(s, rm);
7020 gen_bx(s, tmp);
7021 } else {
7022 goto illegal_op;
7023 }
7024 break;
7025 case 0x3:
7026 if (op1 != 1)
7027 goto illegal_op;
7028
7029 ARCH(5);
7030 /* branch link/exchange thumb (blx) */
7031 tmp = load_reg(s, rm);
7032 tmp2 = tcg_temp_new_i32();
7033 tcg_gen_movi_i32(tmp2, s->pc);
7034 store_reg(s, 14, tmp2);
7035 gen_bx(s, tmp);
7036 break;
7037 case 0x5: /* saturating add/subtract */
7038 ARCH(5TE);
7039 rd = (insn >> 12) & 0xf;
7040 rn = (insn >> 16) & 0xf;
7041 tmp = load_reg(s, rm);
7042 tmp2 = load_reg(s, rn);
7043 if (op1 & 2)
7044 gen_helper_double_saturate(tmp2, tmp2);
7045 if (op1 & 1)
7046 gen_helper_sub_saturate(tmp, tmp, tmp2);
7047 else
7048 gen_helper_add_saturate(tmp, tmp, tmp2);
7049 tcg_temp_free_i32(tmp2);
7050 store_reg(s, rd, tmp);
7051 break;
7052 case 7:
7053 /* SMC instruction (op1 == 3)
7054 and undefined instructions (op1 == 0 || op1 == 2)
7055 will trap */
7056 if (op1 != 1) {
7057 goto illegal_op;
7058 }
7059 /* bkpt */
7060 ARCH(5);
7061 gen_exception_insn(s, 4, EXCP_BKPT);
7062 break;
7063 case 0x8: /* signed multiply */
7064 case 0xa:
7065 case 0xc:
7066 case 0xe:
7067 ARCH(5TE);
7068 rs = (insn >> 8) & 0xf;
7069 rn = (insn >> 12) & 0xf;
7070 rd = (insn >> 16) & 0xf;
7071 if (op1 == 1) {
7072 /* (32 * 16) >> 16 */
7073 tmp = load_reg(s, rm);
7074 tmp2 = load_reg(s, rs);
7075 if (sh & 4)
7076 tcg_gen_sari_i32(tmp2, tmp2, 16);
7077 else
7078 gen_sxth(tmp2);
7079 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7080 tcg_gen_shri_i64(tmp64, tmp64, 16);
7081 tmp = tcg_temp_new_i32();
7082 tcg_gen_trunc_i64_i32(tmp, tmp64);
7083 tcg_temp_free_i64(tmp64);
7084 if ((sh & 2) == 0) {
7085 tmp2 = load_reg(s, rn);
7086 gen_helper_add_setq(tmp, tmp, tmp2);
7087 tcg_temp_free_i32(tmp2);
7088 }
7089 store_reg(s, rd, tmp);
7090 } else {
7091 /* 16 * 16 */
7092 tmp = load_reg(s, rm);
7093 tmp2 = load_reg(s, rs);
7094 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7095 tcg_temp_free_i32(tmp2);
7096 if (op1 == 2) {
7097 tmp64 = tcg_temp_new_i64();
7098 tcg_gen_ext_i32_i64(tmp64, tmp);
7099 tcg_temp_free_i32(tmp);
7100 gen_addq(s, tmp64, rn, rd);
7101 gen_storeq_reg(s, rn, rd, tmp64);
7102 tcg_temp_free_i64(tmp64);
7103 } else {
7104 if (op1 == 0) {
7105 tmp2 = load_reg(s, rn);
7106 gen_helper_add_setq(tmp, tmp, tmp2);
7107 tcg_temp_free_i32(tmp2);
7108 }
7109 store_reg(s, rd, tmp);
7110 }
7111 }
7112 break;
7113 default:
7114 goto illegal_op;
7115 }
7116 } else if (((insn & 0x0e000000) == 0 &&
7117 (insn & 0x00000090) != 0x90) ||
7118 ((insn & 0x0e000000) == (1 << 25))) {
7119 int set_cc, logic_cc, shiftop;
7120
7121 op1 = (insn >> 21) & 0xf;
7122 set_cc = (insn >> 20) & 1;
7123 logic_cc = table_logic_cc[op1] & set_cc;
7124
7125 /* data processing instruction */
7126 if (insn & (1 << 25)) {
7127 /* immediate operand */
7128 val = insn & 0xff;
7129 shift = ((insn >> 8) & 0xf) * 2;
7130 if (shift) {
7131 val = (val >> shift) | (val << (32 - shift));
7132 }
7133 tmp2 = tcg_temp_new_i32();
7134 tcg_gen_movi_i32(tmp2, val);
7135 if (logic_cc && shift) {
7136 gen_set_CF_bit31(tmp2);
7137 }
7138 } else {
7139 /* register */
7140 rm = (insn) & 0xf;
7141 tmp2 = load_reg(s, rm);
7142 shiftop = (insn >> 5) & 3;
7143 if (!(insn & (1 << 4))) {
7144 shift = (insn >> 7) & 0x1f;
7145 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7146 } else {
7147 rs = (insn >> 8) & 0xf;
7148 tmp = load_reg(s, rs);
7149 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7150 }
7151 }
7152 if (op1 != 0x0f && op1 != 0x0d) {
7153 rn = (insn >> 16) & 0xf;
7154 tmp = load_reg(s, rn);
7155 } else {
7156 TCGV_UNUSED(tmp);
7157 }
7158 rd = (insn >> 12) & 0xf;
7159 switch(op1) {
7160 case 0x00:
7161 tcg_gen_and_i32(tmp, tmp, tmp2);
7162 if (logic_cc) {
7163 gen_logic_CC(tmp);
7164 }
7165 store_reg_bx(env, s, rd, tmp);
7166 break;
7167 case 0x01:
7168 tcg_gen_xor_i32(tmp, tmp, tmp2);
7169 if (logic_cc) {
7170 gen_logic_CC(tmp);
7171 }
7172 store_reg_bx(env, s, rd, tmp);
7173 break;
7174 case 0x02:
7175 if (set_cc && rd == 15) {
7176 /* SUBS r15, ... is used for exception return. */
7177 if (IS_USER(s)) {
7178 goto illegal_op;
7179 }
7180 gen_helper_sub_cc(tmp, tmp, tmp2);
7181 gen_exception_return(s, tmp);
7182 } else {
7183 if (set_cc) {
7184 gen_helper_sub_cc(tmp, tmp, tmp2);
7185 } else {
7186 tcg_gen_sub_i32(tmp, tmp, tmp2);
7187 }
7188 store_reg_bx(env, s, rd, tmp);
7189 }
7190 break;
7191 case 0x03:
7192 if (set_cc) {
7193 gen_helper_sub_cc(tmp, tmp2, tmp);
7194 } else {
7195 tcg_gen_sub_i32(tmp, tmp2, tmp);
7196 }
7197 store_reg_bx(env, s, rd, tmp);
7198 break;
7199 case 0x04:
7200 if (set_cc) {
7201 gen_helper_add_cc(tmp, tmp, tmp2);
7202 } else {
7203 tcg_gen_add_i32(tmp, tmp, tmp2);
7204 }
7205 store_reg_bx(env, s, rd, tmp);
7206 break;
7207 case 0x05:
7208 if (set_cc) {
7209 gen_helper_adc_cc(tmp, tmp, tmp2);
7210 } else {
7211 gen_add_carry(tmp, tmp, tmp2);
7212 }
7213 store_reg_bx(env, s, rd, tmp);
7214 break;
7215 case 0x06:
7216 if (set_cc) {
7217 gen_helper_sbc_cc(tmp, tmp, tmp2);
7218 } else {
7219 gen_sub_carry(tmp, tmp, tmp2);
7220 }
7221 store_reg_bx(env, s, rd, tmp);
7222 break;
7223 case 0x07:
7224 if (set_cc) {
7225 gen_helper_sbc_cc(tmp, tmp2, tmp);
7226 } else {
7227 gen_sub_carry(tmp, tmp2, tmp);
7228 }
7229 store_reg_bx(env, s, rd, tmp);
7230 break;
7231 case 0x08:
7232 if (set_cc) {
7233 tcg_gen_and_i32(tmp, tmp, tmp2);
7234 gen_logic_CC(tmp);
7235 }
7236 tcg_temp_free_i32(tmp);
7237 break;
7238 case 0x09:
7239 if (set_cc) {
7240 tcg_gen_xor_i32(tmp, tmp, tmp2);
7241 gen_logic_CC(tmp);
7242 }
7243 tcg_temp_free_i32(tmp);
7244 break;
7245 case 0x0a:
7246 if (set_cc) {
7247 gen_helper_sub_cc(tmp, tmp, tmp2);
7248 }
7249 tcg_temp_free_i32(tmp);
7250 break;
7251 case 0x0b:
7252 if (set_cc) {
7253 gen_helper_add_cc(tmp, tmp, tmp2);
7254 }
7255 tcg_temp_free_i32(tmp);
7256 break;
7257 case 0x0c:
7258 tcg_gen_or_i32(tmp, tmp, tmp2);
7259 if (logic_cc) {
7260 gen_logic_CC(tmp);
7261 }
7262 store_reg_bx(env, s, rd, tmp);
7263 break;
7264 case 0x0d:
7265 if (logic_cc && rd == 15) {
7266 /* MOVS r15, ... is used for exception return. */
7267 if (IS_USER(s)) {
7268 goto illegal_op;
7269 }
7270 gen_exception_return(s, tmp2);
7271 } else {
7272 if (logic_cc) {
7273 gen_logic_CC(tmp2);
7274 }
7275 store_reg_bx(env, s, rd, tmp2);
7276 }
7277 break;
7278 case 0x0e:
7279 tcg_gen_andc_i32(tmp, tmp, tmp2);
7280 if (logic_cc) {
7281 gen_logic_CC(tmp);
7282 }
7283 store_reg_bx(env, s, rd, tmp);
7284 break;
7285 default:
7286 case 0x0f:
7287 tcg_gen_not_i32(tmp2, tmp2);
7288 if (logic_cc) {
7289 gen_logic_CC(tmp2);
7290 }
7291 store_reg_bx(env, s, rd, tmp2);
7292 break;
7293 }
7294 if (op1 != 0x0f && op1 != 0x0d) {
7295 tcg_temp_free_i32(tmp2);
7296 }
7297 } else {
7298 /* other instructions */
7299 op1 = (insn >> 24) & 0xf;
7300 switch(op1) {
7301 case 0x0:
7302 case 0x1:
7303 /* multiplies, extra load/stores */
7304 sh = (insn >> 5) & 3;
7305 if (sh == 0) {
7306 if (op1 == 0x0) {
7307 rd = (insn >> 16) & 0xf;
7308 rn = (insn >> 12) & 0xf;
7309 rs = (insn >> 8) & 0xf;
7310 rm = (insn) & 0xf;
7311 op1 = (insn >> 20) & 0xf;
7312 switch (op1) {
7313 case 0: case 1: case 2: case 3: case 6:
7314 /* 32 bit mul */
7315 tmp = load_reg(s, rs);
7316 tmp2 = load_reg(s, rm);
7317 tcg_gen_mul_i32(tmp, tmp, tmp2);
7318 tcg_temp_free_i32(tmp2);
7319 if (insn & (1 << 22)) {
7320 /* Subtract (mls) */
7321 ARCH(6T2);
7322 tmp2 = load_reg(s, rn);
7323 tcg_gen_sub_i32(tmp, tmp2, tmp);
7324 tcg_temp_free_i32(tmp2);
7325 } else if (insn & (1 << 21)) {
7326 /* Add */
7327 tmp2 = load_reg(s, rn);
7328 tcg_gen_add_i32(tmp, tmp, tmp2);
7329 tcg_temp_free_i32(tmp2);
7330 }
7331 if (insn & (1 << 20))
7332 gen_logic_CC(tmp);
7333 store_reg(s, rd, tmp);
7334 break;
7335 case 4:
7336 /* 64 bit mul double accumulate (UMAAL) */
7337 ARCH(6);
7338 tmp = load_reg(s, rs);
7339 tmp2 = load_reg(s, rm);
7340 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7341 gen_addq_lo(s, tmp64, rn);
7342 gen_addq_lo(s, tmp64, rd);
7343 gen_storeq_reg(s, rn, rd, tmp64);
7344 tcg_temp_free_i64(tmp64);
7345 break;
7346 case 8: case 9: case 10: case 11:
7347 case 12: case 13: case 14: case 15:
7348 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7349 tmp = load_reg(s, rs);
7350 tmp2 = load_reg(s, rm);
7351 if (insn & (1 << 22)) {
7352 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7353 } else {
7354 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7355 }
7356 if (insn & (1 << 21)) { /* mult accumulate */
7357 gen_addq(s, tmp64, rn, rd);
7358 }
7359 if (insn & (1 << 20)) {
7360 gen_logicq_cc(tmp64);
7361 }
7362 gen_storeq_reg(s, rn, rd, tmp64);
7363 tcg_temp_free_i64(tmp64);
7364 break;
7365 default:
7366 goto illegal_op;
7367 }
7368 } else {
7369 rn = (insn >> 16) & 0xf;
7370 rd = (insn >> 12) & 0xf;
7371 if (insn & (1 << 23)) {
7372 /* load/store exclusive */
7373 op1 = (insn >> 21) & 0x3;
7374 if (op1)
7375 ARCH(6K);
7376 else
7377 ARCH(6);
7378 addr = tcg_temp_local_new_i32();
7379 load_reg_var(s, addr, rn);
7380 if (insn & (1 << 20)) {
7381 switch (op1) {
7382 case 0: /* ldrex */
7383 gen_load_exclusive(s, rd, 15, addr, 2);
7384 break;
7385 case 1: /* ldrexd */
7386 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7387 break;
7388 case 2: /* ldrexb */
7389 gen_load_exclusive(s, rd, 15, addr, 0);
7390 break;
7391 case 3: /* ldrexh */
7392 gen_load_exclusive(s, rd, 15, addr, 1);
7393 break;
7394 default:
7395 abort();
7396 }
7397 } else {
7398 rm = insn & 0xf;
7399 switch (op1) {
7400 case 0: /* strex */
7401 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7402 break;
7403 case 1: /* strexd */
7404 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7405 break;
7406 case 2: /* strexb */
7407 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7408 break;
7409 case 3: /* strexh */
7410 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7411 break;
7412 default:
7413 abort();
7414 }
7415 }
7416 tcg_temp_free(addr);
7417 } else {
7418 /* SWP instruction */
7419 rm = (insn) & 0xf;
7420
7421 /* ??? This is not really atomic. However we know
7422 we never have multiple CPUs running in parallel,
7423 so it is good enough. */
7424 addr = load_reg(s, rn);
7425 tmp = load_reg(s, rm);
7426 if (insn & (1 << 22)) {
7427 tmp2 = gen_ld8u(addr, IS_USER(s));
7428 gen_st8(tmp, addr, IS_USER(s));
7429 } else {
7430 tmp2 = gen_ld32(addr, IS_USER(s));
7431 gen_st32(tmp, addr, IS_USER(s));
7432 }
7433 tcg_temp_free_i32(addr);
7434 store_reg(s, rd, tmp2);
7435 }
7436 }
7437 } else {
7438 int address_offset;
7439 int load;
7440 /* Misc load/store */
7441 rn = (insn >> 16) & 0xf;
7442 rd = (insn >> 12) & 0xf;
7443 addr = load_reg(s, rn);
7444 if (insn & (1 << 24))
7445 gen_add_datah_offset(s, insn, 0, addr);
7446 address_offset = 0;
7447 if (insn & (1 << 20)) {
7448 /* load */
7449 switch(sh) {
7450 case 1:
7451 tmp = gen_ld16u(addr, IS_USER(s));
7452 break;
7453 case 2:
7454 tmp = gen_ld8s(addr, IS_USER(s));
7455 break;
7456 default:
7457 case 3:
7458 tmp = gen_ld16s(addr, IS_USER(s));
7459 break;
7460 }
7461 load = 1;
7462 } else if (sh & 2) {
7463 ARCH(5TE);
7464 /* doubleword */
7465 if (sh & 1) {
7466 /* store */
7467 tmp = load_reg(s, rd);
7468 gen_st32(tmp, addr, IS_USER(s));
7469 tcg_gen_addi_i32(addr, addr, 4);
7470 tmp = load_reg(s, rd + 1);
7471 gen_st32(tmp, addr, IS_USER(s));
7472 load = 0;
7473 } else {
7474 /* load */
7475 tmp = gen_ld32(addr, IS_USER(s));
7476 store_reg(s, rd, tmp);
7477 tcg_gen_addi_i32(addr, addr, 4);
7478 tmp = gen_ld32(addr, IS_USER(s));
7479 rd++;
7480 load = 1;
7481 }
7482 address_offset = -4;
7483 } else {
7484 /* store */
7485 tmp = load_reg(s, rd);
7486 gen_st16(tmp, addr, IS_USER(s));
7487 load = 0;
7488 }
7489 /* Perform base writeback before the loaded value to
7490 ensure correct behavior with overlapping index registers.
7491 ldrd with base writeback is is undefined if the
7492 destination and index registers overlap. */
7493 if (!(insn & (1 << 24))) {
7494 gen_add_datah_offset(s, insn, address_offset, addr);
7495 store_reg(s, rn, addr);
7496 } else if (insn & (1 << 21)) {
7497 if (address_offset)
7498 tcg_gen_addi_i32(addr, addr, address_offset);
7499 store_reg(s, rn, addr);
7500 } else {
7501 tcg_temp_free_i32(addr);
7502 }
7503 if (load) {
7504 /* Complete the load. */
7505 store_reg(s, rd, tmp);
7506 }
7507 }
7508 break;
7509 case 0x4:
7510 case 0x5:
7511 goto do_ldst;
7512 case 0x6:
7513 case 0x7:
7514 if (insn & (1 << 4)) {
7515 ARCH(6);
7516 /* Armv6 Media instructions. */
7517 rm = insn & 0xf;
7518 rn = (insn >> 16) & 0xf;
7519 rd = (insn >> 12) & 0xf;
7520 rs = (insn >> 8) & 0xf;
7521 switch ((insn >> 23) & 3) {
7522 case 0: /* Parallel add/subtract. */
7523 op1 = (insn >> 20) & 7;
7524 tmp = load_reg(s, rn);
7525 tmp2 = load_reg(s, rm);
7526 sh = (insn >> 5) & 7;
7527 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7528 goto illegal_op;
7529 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7530 tcg_temp_free_i32(tmp2);
7531 store_reg(s, rd, tmp);
7532 break;
7533 case 1:
7534 if ((insn & 0x00700020) == 0) {
7535 /* Halfword pack. */
7536 tmp = load_reg(s, rn);
7537 tmp2 = load_reg(s, rm);
7538 shift = (insn >> 7) & 0x1f;
7539 if (insn & (1 << 6)) {
7540 /* pkhtb */
7541 if (shift == 0)
7542 shift = 31;
7543 tcg_gen_sari_i32(tmp2, tmp2, shift);
7544 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7545 tcg_gen_ext16u_i32(tmp2, tmp2);
7546 } else {
7547 /* pkhbt */
7548 if (shift)
7549 tcg_gen_shli_i32(tmp2, tmp2, shift);
7550 tcg_gen_ext16u_i32(tmp, tmp);
7551 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7552 }
7553 tcg_gen_or_i32(tmp, tmp, tmp2);
7554 tcg_temp_free_i32(tmp2);
7555 store_reg(s, rd, tmp);
7556 } else if ((insn & 0x00200020) == 0x00200000) {
7557 /* [us]sat */
7558 tmp = load_reg(s, rm);
7559 shift = (insn >> 7) & 0x1f;
7560 if (insn & (1 << 6)) {
7561 if (shift == 0)
7562 shift = 31;
7563 tcg_gen_sari_i32(tmp, tmp, shift);
7564 } else {
7565 tcg_gen_shli_i32(tmp, tmp, shift);
7566 }
7567 sh = (insn >> 16) & 0x1f;
7568 tmp2 = tcg_const_i32(sh);
7569 if (insn & (1 << 22))
7570 gen_helper_usat(tmp, tmp, tmp2);
7571 else
7572 gen_helper_ssat(tmp, tmp, tmp2);
7573 tcg_temp_free_i32(tmp2);
7574 store_reg(s, rd, tmp);
7575 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7576 /* [us]sat16 */
7577 tmp = load_reg(s, rm);
7578 sh = (insn >> 16) & 0x1f;
7579 tmp2 = tcg_const_i32(sh);
7580 if (insn & (1 << 22))
7581 gen_helper_usat16(tmp, tmp, tmp2);
7582 else
7583 gen_helper_ssat16(tmp, tmp, tmp2);
7584 tcg_temp_free_i32(tmp2);
7585 store_reg(s, rd, tmp);
7586 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7587 /* Select bytes. */
7588 tmp = load_reg(s, rn);
7589 tmp2 = load_reg(s, rm);
7590 tmp3 = tcg_temp_new_i32();
7591 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7592 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7593 tcg_temp_free_i32(tmp3);
7594 tcg_temp_free_i32(tmp2);
7595 store_reg(s, rd, tmp);
7596 } else if ((insn & 0x000003e0) == 0x00000060) {
7597 tmp = load_reg(s, rm);
7598 shift = (insn >> 10) & 3;
7599 /* ??? In many cases it's not necessary to do a
7600 rotate, a shift is sufficient. */
7601 if (shift != 0)
7602 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7603 op1 = (insn >> 20) & 7;
7604 switch (op1) {
7605 case 0: gen_sxtb16(tmp); break;
7606 case 2: gen_sxtb(tmp); break;
7607 case 3: gen_sxth(tmp); break;
7608 case 4: gen_uxtb16(tmp); break;
7609 case 6: gen_uxtb(tmp); break;
7610 case 7: gen_uxth(tmp); break;
7611 default: goto illegal_op;
7612 }
7613 if (rn != 15) {
7614 tmp2 = load_reg(s, rn);
7615 if ((op1 & 3) == 0) {
7616 gen_add16(tmp, tmp2);
7617 } else {
7618 tcg_gen_add_i32(tmp, tmp, tmp2);
7619 tcg_temp_free_i32(tmp2);
7620 }
7621 }
7622 store_reg(s, rd, tmp);
7623 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7624 /* rev */
7625 tmp = load_reg(s, rm);
7626 if (insn & (1 << 22)) {
7627 if (insn & (1 << 7)) {
7628 gen_revsh(tmp);
7629 } else {
7630 ARCH(6T2);
7631 gen_helper_rbit(tmp, tmp);
7632 }
7633 } else {
7634 if (insn & (1 << 7))
7635 gen_rev16(tmp);
7636 else
7637 tcg_gen_bswap32_i32(tmp, tmp);
7638 }
7639 store_reg(s, rd, tmp);
7640 } else {
7641 goto illegal_op;
7642 }
7643 break;
7644 case 2: /* Multiplies (Type 3). */
7645 switch ((insn >> 20) & 0x7) {
7646 case 5:
7647 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7648 /* op2 not 00x or 11x : UNDEF */
7649 goto illegal_op;
7650 }
7651 /* Signed multiply most significant [accumulate].
7652 (SMMUL, SMMLA, SMMLS) */
7653 tmp = load_reg(s, rm);
7654 tmp2 = load_reg(s, rs);
7655 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7656
7657 if (rd != 15) {
7658 tmp = load_reg(s, rd);
7659 if (insn & (1 << 6)) {
7660 tmp64 = gen_subq_msw(tmp64, tmp);
7661 } else {
7662 tmp64 = gen_addq_msw(tmp64, tmp);
7663 }
7664 }
7665 if (insn & (1 << 5)) {
7666 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7667 }
7668 tcg_gen_shri_i64(tmp64, tmp64, 32);
7669 tmp = tcg_temp_new_i32();
7670 tcg_gen_trunc_i64_i32(tmp, tmp64);
7671 tcg_temp_free_i64(tmp64);
7672 store_reg(s, rn, tmp);
7673 break;
7674 case 0:
7675 case 4:
7676 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7677 if (insn & (1 << 7)) {
7678 goto illegal_op;
7679 }
7680 tmp = load_reg(s, rm);
7681 tmp2 = load_reg(s, rs);
7682 if (insn & (1 << 5))
7683 gen_swap_half(tmp2);
7684 gen_smul_dual(tmp, tmp2);
7685 if (insn & (1 << 6)) {
7686 /* This subtraction cannot overflow. */
7687 tcg_gen_sub_i32(tmp, tmp, tmp2);
7688 } else {
7689 /* This addition cannot overflow 32 bits;
7690 * however it may overflow considered as a signed
7691 * operation, in which case we must set the Q flag.
7692 */
7693 gen_helper_add_setq(tmp, tmp, tmp2);
7694 }
7695 tcg_temp_free_i32(tmp2);
7696 if (insn & (1 << 22)) {
7697 /* smlald, smlsld */
7698 tmp64 = tcg_temp_new_i64();
7699 tcg_gen_ext_i32_i64(tmp64, tmp);
7700 tcg_temp_free_i32(tmp);
7701 gen_addq(s, tmp64, rd, rn);
7702 gen_storeq_reg(s, rd, rn, tmp64);
7703 tcg_temp_free_i64(tmp64);
7704 } else {
7705 /* smuad, smusd, smlad, smlsd */
7706 if (rd != 15)
7707 {
7708 tmp2 = load_reg(s, rd);
7709 gen_helper_add_setq(tmp, tmp, tmp2);
7710 tcg_temp_free_i32(tmp2);
7711 }
7712 store_reg(s, rn, tmp);
7713 }
7714 break;
7715 case 1:
7716 case 3:
7717 /* SDIV, UDIV */
7718 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7719 goto illegal_op;
7720 }
7721 if (((insn >> 5) & 7) || (rd != 15)) {
7722 goto illegal_op;
7723 }
7724 tmp = load_reg(s, rm);
7725 tmp2 = load_reg(s, rs);
7726 if (insn & (1 << 21)) {
7727 gen_helper_udiv(tmp, tmp, tmp2);
7728 } else {
7729 gen_helper_sdiv(tmp, tmp, tmp2);
7730 }
7731 tcg_temp_free_i32(tmp2);
7732 store_reg(s, rn, tmp);
7733 break;
7734 default:
7735 goto illegal_op;
7736 }
7737 break;
7738 case 3:
7739 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7740 switch (op1) {
7741 case 0: /* Unsigned sum of absolute differences. */
7742 ARCH(6);
7743 tmp = load_reg(s, rm);
7744 tmp2 = load_reg(s, rs);
7745 gen_helper_usad8(tmp, tmp, tmp2);
7746 tcg_temp_free_i32(tmp2);
7747 if (rd != 15) {
7748 tmp2 = load_reg(s, rd);
7749 tcg_gen_add_i32(tmp, tmp, tmp2);
7750 tcg_temp_free_i32(tmp2);
7751 }
7752 store_reg(s, rn, tmp);
7753 break;
7754 case 0x20: case 0x24: case 0x28: case 0x2c:
7755 /* Bitfield insert/clear. */
7756 ARCH(6T2);
7757 shift = (insn >> 7) & 0x1f;
7758 i = (insn >> 16) & 0x1f;
7759 i = i + 1 - shift;
7760 if (rm == 15) {
7761 tmp = tcg_temp_new_i32();
7762 tcg_gen_movi_i32(tmp, 0);
7763 } else {
7764 tmp = load_reg(s, rm);
7765 }
7766 if (i != 32) {
7767 tmp2 = load_reg(s, rd);
7768 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7769 tcg_temp_free_i32(tmp2);
7770 }
7771 store_reg(s, rd, tmp);
7772 break;
7773 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7774 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7775 ARCH(6T2);
7776 tmp = load_reg(s, rm);
7777 shift = (insn >> 7) & 0x1f;
7778 i = ((insn >> 16) & 0x1f) + 1;
7779 if (shift + i > 32)
7780 goto illegal_op;
7781 if (i < 32) {
7782 if (op1 & 0x20) {
7783 gen_ubfx(tmp, shift, (1u << i) - 1);
7784 } else {
7785 gen_sbfx(tmp, shift, i);
7786 }
7787 }
7788 store_reg(s, rd, tmp);
7789 break;
7790 default:
7791 goto illegal_op;
7792 }
7793 break;
7794 }
7795 break;
7796 }
7797 do_ldst:
7798 /* Check for undefined extension instructions
7799 * per the ARM Bible IE:
7800 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7801 */
7802 sh = (0xf << 20) | (0xf << 4);
7803 if (op1 == 0x7 && ((insn & sh) == sh))
7804 {
7805 goto illegal_op;
7806 }
7807 /* load/store byte/word */
7808 rn = (insn >> 16) & 0xf;
7809 rd = (insn >> 12) & 0xf;
7810 tmp2 = load_reg(s, rn);
7811 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7812 if (insn & (1 << 24))
7813 gen_add_data_offset(s, insn, tmp2);
7814 if (insn & (1 << 20)) {
7815 /* load */
7816 if (insn & (1 << 22)) {
7817 tmp = gen_ld8u(tmp2, i);
7818 } else {
7819 tmp = gen_ld32(tmp2, i);
7820 }
7821 } else {
7822 /* store */
7823 tmp = load_reg(s, rd);
7824 if (insn & (1 << 22))
7825 gen_st8(tmp, tmp2, i);
7826 else
7827 gen_st32(tmp, tmp2, i);
7828 }
7829 if (!(insn & (1 << 24))) {
7830 gen_add_data_offset(s, insn, tmp2);
7831 store_reg(s, rn, tmp2);
7832 } else if (insn & (1 << 21)) {
7833 store_reg(s, rn, tmp2);
7834 } else {
7835 tcg_temp_free_i32(tmp2);
7836 }
7837 if (insn & (1 << 20)) {
7838 /* Complete the load. */
7839 store_reg_from_load(env, s, rd, tmp);
7840 }
7841 break;
7842 case 0x08:
7843 case 0x09:
7844 {
7845 int j, n, user, loaded_base;
7846 TCGv loaded_var;
7847 /* load/store multiple words */
7848 /* XXX: store correct base if write back */
7849 user = 0;
7850 if (insn & (1 << 22)) {
7851 if (IS_USER(s))
7852 goto illegal_op; /* only usable in supervisor mode */
7853
7854 if ((insn & (1 << 15)) == 0)
7855 user = 1;
7856 }
7857 rn = (insn >> 16) & 0xf;
7858 addr = load_reg(s, rn);
7859
7860 /* compute total size */
7861 loaded_base = 0;
7862 TCGV_UNUSED(loaded_var);
7863 n = 0;
7864 for(i=0;i<16;i++) {
7865 if (insn & (1 << i))
7866 n++;
7867 }
7868 /* XXX: test invalid n == 0 case ? */
7869 if (insn & (1 << 23)) {
7870 if (insn & (1 << 24)) {
7871 /* pre increment */
7872 tcg_gen_addi_i32(addr, addr, 4);
7873 } else {
7874 /* post increment */
7875 }
7876 } else {
7877 if (insn & (1 << 24)) {
7878 /* pre decrement */
7879 tcg_gen_addi_i32(addr, addr, -(n * 4));
7880 } else {
7881 /* post decrement */
7882 if (n != 1)
7883 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7884 }
7885 }
7886 j = 0;
7887 for(i=0;i<16;i++) {
7888 if (insn & (1 << i)) {
7889 if (insn & (1 << 20)) {
7890 /* load */
7891 tmp = gen_ld32(addr, IS_USER(s));
7892 if (user) {
7893 tmp2 = tcg_const_i32(i);
7894 gen_helper_set_user_reg(tmp2, tmp);
7895 tcg_temp_free_i32(tmp2);
7896 tcg_temp_free_i32(tmp);
7897 } else if (i == rn) {
7898 loaded_var = tmp;
7899 loaded_base = 1;
7900 } else {
7901 store_reg_from_load(env, s, i, tmp);
7902 }
7903 } else {
7904 /* store */
7905 if (i == 15) {
7906 /* special case: r15 = PC + 8 */
7907 val = (long)s->pc + 4;
7908 tmp = tcg_temp_new_i32();
7909 tcg_gen_movi_i32(tmp, val);
7910 } else if (user) {
7911 tmp = tcg_temp_new_i32();
7912 tmp2 = tcg_const_i32(i);
7913 gen_helper_get_user_reg(tmp, tmp2);
7914 tcg_temp_free_i32(tmp2);
7915 } else {
7916 tmp = load_reg(s, i);
7917 }
7918 gen_st32(tmp, addr, IS_USER(s));
7919 }
7920 j++;
7921 /* no need to add after the last transfer */
7922 if (j != n)
7923 tcg_gen_addi_i32(addr, addr, 4);
7924 }
7925 }
7926 if (insn & (1 << 21)) {
7927 /* write back */
7928 if (insn & (1 << 23)) {
7929 if (insn & (1 << 24)) {
7930 /* pre increment */
7931 } else {
7932 /* post increment */
7933 tcg_gen_addi_i32(addr, addr, 4);
7934 }
7935 } else {
7936 if (insn & (1 << 24)) {
7937 /* pre decrement */
7938 if (n != 1)
7939 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7940 } else {
7941 /* post decrement */
7942 tcg_gen_addi_i32(addr, addr, -(n * 4));
7943 }
7944 }
7945 store_reg(s, rn, addr);
7946 } else {
7947 tcg_temp_free_i32(addr);
7948 }
7949 if (loaded_base) {
7950 store_reg(s, rn, loaded_var);
7951 }
7952 if ((insn & (1 << 22)) && !user) {
7953 /* Restore CPSR from SPSR. */
7954 tmp = load_cpu_field(spsr);
7955 gen_set_cpsr(tmp, 0xffffffff);
7956 tcg_temp_free_i32(tmp);
7957 s->is_jmp = DISAS_UPDATE;
7958 }
7959 }
7960 break;
7961 case 0xa:
7962 case 0xb:
7963 {
7964 int32_t offset;
7965
7966 /* branch (and link) */
7967 val = (int32_t)s->pc;
7968 if (insn & (1 << 24)) {
7969 tmp = tcg_temp_new_i32();
7970 tcg_gen_movi_i32(tmp, val);
7971 store_reg(s, 14, tmp);
7972 }
7973 offset = (((int32_t)insn << 8) >> 8);
7974 val += (offset << 2) + 4;
7975 gen_jmp(s, val);
7976 }
7977 break;
7978 case 0xc:
7979 case 0xd:
7980 case 0xe:
7981 /* Coprocessor. */
7982 if (disas_coproc_insn(env, s, insn))
7983 goto illegal_op;
7984 break;
7985 case 0xf:
7986 /* swi */
7987 gen_set_pc_im(s->pc);
7988 s->is_jmp = DISAS_SWI;
7989 break;
7990 default:
7991 illegal_op:
7992 gen_exception_insn(s, 4, EXCP_UDEF);
7993 break;
7994 }
7995 }
7996 }
7997
7998 /* Return true if this is a Thumb-2 logical op. */
7999 static int
8000 thumb2_logic_op(int op)
8001 {
8002 return (op < 8);
8003 }
8004
8005 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8006 then set condition code flags based on the result of the operation.
8007 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8008 to the high bit of T1.
8009 Returns zero if the opcode is valid. */
8010
8011 static int
8012 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
8013 {
8014 int logic_cc;
8015
8016 logic_cc = 0;
8017 switch (op) {
8018 case 0: /* and */
8019 tcg_gen_and_i32(t0, t0, t1);
8020 logic_cc = conds;
8021 break;
8022 case 1: /* bic */
8023 tcg_gen_andc_i32(t0, t0, t1);
8024 logic_cc = conds;
8025 break;
8026 case 2: /* orr */
8027 tcg_gen_or_i32(t0, t0, t1);
8028 logic_cc = conds;
8029 break;
8030 case 3: /* orn */
8031 tcg_gen_orc_i32(t0, t0, t1);
8032 logic_cc = conds;
8033 break;
8034 case 4: /* eor */
8035 tcg_gen_xor_i32(t0, t0, t1);
8036 logic_cc = conds;
8037 break;
8038 case 8: /* add */
8039 if (conds)
8040 gen_helper_add_cc(t0, t0, t1);
8041 else
8042 tcg_gen_add_i32(t0, t0, t1);
8043 break;
8044 case 10: /* adc */
8045 if (conds)
8046 gen_helper_adc_cc(t0, t0, t1);
8047 else
8048 gen_adc(t0, t1);
8049 break;
8050 case 11: /* sbc */
8051 if (conds)
8052 gen_helper_sbc_cc(t0, t0, t1);
8053 else
8054 gen_sub_carry(t0, t0, t1);
8055 break;
8056 case 13: /* sub */
8057 if (conds)
8058 gen_helper_sub_cc(t0, t0, t1);
8059 else
8060 tcg_gen_sub_i32(t0, t0, t1);
8061 break;
8062 case 14: /* rsb */
8063 if (conds)
8064 gen_helper_sub_cc(t0, t1, t0);
8065 else
8066 tcg_gen_sub_i32(t0, t1, t0);
8067 break;
8068 default: /* 5, 6, 7, 9, 12, 15. */
8069 return 1;
8070 }
8071 if (logic_cc) {
8072 gen_logic_CC(t0);
8073 if (shifter_out)
8074 gen_set_CF_bit31(t1);
8075 }
8076 return 0;
8077 }
8078
8079 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8080 is not legal. */
8081 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8082 {
8083 uint32_t insn, imm, shift, offset;
8084 uint32_t rd, rn, rm, rs;
8085 TCGv tmp;
8086 TCGv tmp2;
8087 TCGv tmp3;
8088 TCGv addr;
8089 TCGv_i64 tmp64;
8090 int op;
8091 int shiftop;
8092 int conds;
8093 int logic_cc;
8094
8095 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8096 || arm_feature (env, ARM_FEATURE_M))) {
8097 /* Thumb-1 cores may need to treat bl and blx as a pair of
8098 16-bit instructions to get correct prefetch abort behavior. */
8099 insn = insn_hw1;
8100 if ((insn & (1 << 12)) == 0) {
8101 ARCH(5);
8102 /* Second half of blx. */
8103 offset = ((insn & 0x7ff) << 1);
8104 tmp = load_reg(s, 14);
8105 tcg_gen_addi_i32(tmp, tmp, offset);
8106 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8107
8108 tmp2 = tcg_temp_new_i32();
8109 tcg_gen_movi_i32(tmp2, s->pc | 1);
8110 store_reg(s, 14, tmp2);
8111 gen_bx(s, tmp);
8112 return 0;
8113 }
8114 if (insn & (1 << 11)) {
8115 /* Second half of bl. */
8116 offset = ((insn & 0x7ff) << 1) | 1;
8117 tmp = load_reg(s, 14);
8118 tcg_gen_addi_i32(tmp, tmp, offset);
8119
8120 tmp2 = tcg_temp_new_i32();
8121 tcg_gen_movi_i32(tmp2, s->pc | 1);
8122 store_reg(s, 14, tmp2);
8123 gen_bx(s, tmp);
8124 return 0;
8125 }
8126 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8127 /* Instruction spans a page boundary. Implement it as two
8128 16-bit instructions in case the second half causes an
8129 prefetch abort. */
8130 offset = ((int32_t)insn << 21) >> 9;
8131 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8132 return 0;
8133 }
8134 /* Fall through to 32-bit decode. */
8135 }
8136
8137 insn = arm_lduw_code(s->pc, s->bswap_code);
8138 s->pc += 2;
8139 insn |= (uint32_t)insn_hw1 << 16;
8140
8141 if ((insn & 0xf800e800) != 0xf000e800) {
8142 ARCH(6T2);
8143 }
8144
8145 rn = (insn >> 16) & 0xf;
8146 rs = (insn >> 12) & 0xf;
8147 rd = (insn >> 8) & 0xf;
8148 rm = insn & 0xf;
8149 switch ((insn >> 25) & 0xf) {
8150 case 0: case 1: case 2: case 3:
8151 /* 16-bit instructions. Should never happen. */
8152 abort();
8153 case 4:
8154 if (insn & (1 << 22)) {
8155 /* Other load/store, table branch. */
8156 if (insn & 0x01200000) {
8157 /* Load/store doubleword. */
8158 if (rn == 15) {
8159 addr = tcg_temp_new_i32();
8160 tcg_gen_movi_i32(addr, s->pc & ~3);
8161 } else {
8162 addr = load_reg(s, rn);
8163 }
8164 offset = (insn & 0xff) * 4;
8165 if ((insn & (1 << 23)) == 0)
8166 offset = -offset;
8167 if (insn & (1 << 24)) {
8168 tcg_gen_addi_i32(addr, addr, offset);
8169 offset = 0;
8170 }
8171 if (insn & (1 << 20)) {
8172 /* ldrd */
8173 tmp = gen_ld32(addr, IS_USER(s));
8174 store_reg(s, rs, tmp);
8175 tcg_gen_addi_i32(addr, addr, 4);
8176 tmp = gen_ld32(addr, IS_USER(s));
8177 store_reg(s, rd, tmp);
8178 } else {
8179 /* strd */
8180 tmp = load_reg(s, rs);
8181 gen_st32(tmp, addr, IS_USER(s));
8182 tcg_gen_addi_i32(addr, addr, 4);
8183 tmp = load_reg(s, rd);
8184 gen_st32(tmp, addr, IS_USER(s));
8185 }
8186 if (insn & (1 << 21)) {
8187 /* Base writeback. */
8188 if (rn == 15)
8189 goto illegal_op;
8190 tcg_gen_addi_i32(addr, addr, offset - 4);
8191 store_reg(s, rn, addr);
8192 } else {
8193 tcg_temp_free_i32(addr);
8194 }
8195 } else if ((insn & (1 << 23)) == 0) {
8196 /* Load/store exclusive word. */
8197 addr = tcg_temp_local_new();
8198 load_reg_var(s, addr, rn);
8199 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8200 if (insn & (1 << 20)) {
8201 gen_load_exclusive(s, rs, 15, addr, 2);
8202 } else {
8203 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8204 }
8205 tcg_temp_free(addr);
8206 } else if ((insn & (1 << 6)) == 0) {
8207 /* Table Branch. */
8208 if (rn == 15) {
8209 addr = tcg_temp_new_i32();
8210 tcg_gen_movi_i32(addr, s->pc);
8211 } else {
8212 addr = load_reg(s, rn);
8213 }
8214 tmp = load_reg(s, rm);
8215 tcg_gen_add_i32(addr, addr, tmp);
8216 if (insn & (1 << 4)) {
8217 /* tbh */
8218 tcg_gen_add_i32(addr, addr, tmp);
8219 tcg_temp_free_i32(tmp);
8220 tmp = gen_ld16u(addr, IS_USER(s));
8221 } else { /* tbb */
8222 tcg_temp_free_i32(tmp);
8223 tmp = gen_ld8u(addr, IS_USER(s));
8224 }
8225 tcg_temp_free_i32(addr);
8226 tcg_gen_shli_i32(tmp, tmp, 1);
8227 tcg_gen_addi_i32(tmp, tmp, s->pc);
8228 store_reg(s, 15, tmp);
8229 } else {
8230 /* Load/store exclusive byte/halfword/doubleword. */
8231 ARCH(7);
8232 op = (insn >> 4) & 0x3;
8233 if (op == 2) {
8234 goto illegal_op;
8235 }
8236 addr = tcg_temp_local_new();
8237 load_reg_var(s, addr, rn);
8238 if (insn & (1 << 20)) {
8239 gen_load_exclusive(s, rs, rd, addr, op);
8240 } else {
8241 gen_store_exclusive(s, rm, rs, rd, addr, op);
8242 }
8243 tcg_temp_free(addr);
8244 }
8245 } else {
8246 /* Load/store multiple, RFE, SRS. */
8247 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8248 /* Not available in user mode. */
8249 if (IS_USER(s))
8250 goto illegal_op;
8251 if (insn & (1 << 20)) {
8252 /* rfe */
8253 addr = load_reg(s, rn);
8254 if ((insn & (1 << 24)) == 0)
8255 tcg_gen_addi_i32(addr, addr, -8);
8256 /* Load PC into tmp and CPSR into tmp2. */
8257 tmp = gen_ld32(addr, 0);
8258 tcg_gen_addi_i32(addr, addr, 4);
8259 tmp2 = gen_ld32(addr, 0);
8260 if (insn & (1 << 21)) {
8261 /* Base writeback. */
8262 if (insn & (1 << 24)) {
8263 tcg_gen_addi_i32(addr, addr, 4);
8264 } else {
8265 tcg_gen_addi_i32(addr, addr, -4);
8266 }
8267 store_reg(s, rn, addr);
8268 } else {
8269 tcg_temp_free_i32(addr);
8270 }
8271 gen_rfe(s, tmp, tmp2);
8272 } else {
8273 /* srs */
8274 op = (insn & 0x1f);
8275 addr = tcg_temp_new_i32();
8276 tmp = tcg_const_i32(op);
8277 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8278 tcg_temp_free_i32(tmp);
8279 if ((insn & (1 << 24)) == 0) {
8280 tcg_gen_addi_i32(addr, addr, -8);
8281 }
8282 tmp = load_reg(s, 14);
8283 gen_st32(tmp, addr, 0);
8284 tcg_gen_addi_i32(addr, addr, 4);
8285 tmp = tcg_temp_new_i32();
8286 gen_helper_cpsr_read(tmp);
8287 gen_st32(tmp, addr, 0);
8288 if (insn & (1 << 21)) {
8289 if ((insn & (1 << 24)) == 0) {
8290 tcg_gen_addi_i32(addr, addr, -4);
8291 } else {
8292 tcg_gen_addi_i32(addr, addr, 4);
8293 }
8294 tmp = tcg_const_i32(op);
8295 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8296 tcg_temp_free_i32(tmp);
8297 } else {
8298 tcg_temp_free_i32(addr);
8299 }
8300 }
8301 } else {
8302 int i, loaded_base = 0;
8303 TCGv loaded_var;
8304 /* Load/store multiple. */
8305 addr = load_reg(s, rn);
8306 offset = 0;
8307 for (i = 0; i < 16; i++) {
8308 if (insn & (1 << i))
8309 offset += 4;
8310 }
8311 if (insn & (1 << 24)) {
8312 tcg_gen_addi_i32(addr, addr, -offset);
8313 }
8314
8315 TCGV_UNUSED(loaded_var);
8316 for (i = 0; i < 16; i++) {
8317 if ((insn & (1 << i)) == 0)
8318 continue;
8319 if (insn & (1 << 20)) {
8320 /* Load. */
8321 tmp = gen_ld32(addr, IS_USER(s));
8322 if (i == 15) {
8323 gen_bx(s, tmp);
8324 } else if (i == rn) {
8325 loaded_var = tmp;
8326 loaded_base = 1;
8327 } else {
8328 store_reg(s, i, tmp);
8329 }
8330 } else {
8331 /* Store. */
8332 tmp = load_reg(s, i);
8333 gen_st32(tmp, addr, IS_USER(s));
8334 }
8335 tcg_gen_addi_i32(addr, addr, 4);
8336 }
8337 if (loaded_base) {
8338 store_reg(s, rn, loaded_var);
8339 }
8340 if (insn & (1 << 21)) {
8341 /* Base register writeback. */
8342 if (insn & (1 << 24)) {
8343 tcg_gen_addi_i32(addr, addr, -offset);
8344 }
8345 /* Fault if writeback register is in register list. */
8346 if (insn & (1 << rn))
8347 goto illegal_op;
8348 store_reg(s, rn, addr);
8349 } else {
8350 tcg_temp_free_i32(addr);
8351 }
8352 }
8353 }
8354 break;
8355 case 5:
8356
8357 op = (insn >> 21) & 0xf;
8358 if (op == 6) {
8359 /* Halfword pack. */
8360 tmp = load_reg(s, rn);
8361 tmp2 = load_reg(s, rm);
8362 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8363 if (insn & (1 << 5)) {
8364 /* pkhtb */
8365 if (shift == 0)
8366 shift = 31;
8367 tcg_gen_sari_i32(tmp2, tmp2, shift);
8368 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8369 tcg_gen_ext16u_i32(tmp2, tmp2);
8370 } else {
8371 /* pkhbt */
8372 if (shift)
8373 tcg_gen_shli_i32(tmp2, tmp2, shift);
8374 tcg_gen_ext16u_i32(tmp, tmp);
8375 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8376 }
8377 tcg_gen_or_i32(tmp, tmp, tmp2);
8378 tcg_temp_free_i32(tmp2);
8379 store_reg(s, rd, tmp);
8380 } else {
8381 /* Data processing register constant shift. */
8382 if (rn == 15) {
8383 tmp = tcg_temp_new_i32();
8384 tcg_gen_movi_i32(tmp, 0);
8385 } else {
8386 tmp = load_reg(s, rn);
8387 }
8388 tmp2 = load_reg(s, rm);
8389
8390 shiftop = (insn >> 4) & 3;
8391 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8392 conds = (insn & (1 << 20)) != 0;
8393 logic_cc = (conds && thumb2_logic_op(op));
8394 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8395 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8396 goto illegal_op;
8397 tcg_temp_free_i32(tmp2);
8398 if (rd != 15) {
8399 store_reg(s, rd, tmp);
8400 } else {
8401 tcg_temp_free_i32(tmp);
8402 }
8403 }
8404 break;
8405 case 13: /* Misc data processing. */
8406 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8407 if (op < 4 && (insn & 0xf000) != 0xf000)
8408 goto illegal_op;
8409 switch (op) {
8410 case 0: /* Register controlled shift. */
8411 tmp = load_reg(s, rn);
8412 tmp2 = load_reg(s, rm);
8413 if ((insn & 0x70) != 0)
8414 goto illegal_op;
8415 op = (insn >> 21) & 3;
8416 logic_cc = (insn & (1 << 20)) != 0;
8417 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8418 if (logic_cc)
8419 gen_logic_CC(tmp);
8420 store_reg_bx(env, s, rd, tmp);
8421 break;
8422 case 1: /* Sign/zero extend. */
8423 tmp = load_reg(s, rm);
8424 shift = (insn >> 4) & 3;
8425 /* ??? In many cases it's not necessary to do a
8426 rotate, a shift is sufficient. */
8427 if (shift != 0)
8428 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8429 op = (insn >> 20) & 7;
8430 switch (op) {
8431 case 0: gen_sxth(tmp); break;
8432 case 1: gen_uxth(tmp); break;
8433 case 2: gen_sxtb16(tmp); break;
8434 case 3: gen_uxtb16(tmp); break;
8435 case 4: gen_sxtb(tmp); break;
8436 case 5: gen_uxtb(tmp); break;
8437 default: goto illegal_op;
8438 }
8439 if (rn != 15) {
8440 tmp2 = load_reg(s, rn);
8441 if ((op >> 1) == 1) {
8442 gen_add16(tmp, tmp2);
8443 } else {
8444 tcg_gen_add_i32(tmp, tmp, tmp2);
8445 tcg_temp_free_i32(tmp2);
8446 }
8447 }
8448 store_reg(s, rd, tmp);
8449 break;
8450 case 2: /* SIMD add/subtract. */
8451 op = (insn >> 20) & 7;
8452 shift = (insn >> 4) & 7;
8453 if ((op & 3) == 3 || (shift & 3) == 3)
8454 goto illegal_op;
8455 tmp = load_reg(s, rn);
8456 tmp2 = load_reg(s, rm);
8457 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8458 tcg_temp_free_i32(tmp2);
8459 store_reg(s, rd, tmp);
8460 break;
8461 case 3: /* Other data processing. */
8462 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8463 if (op < 4) {
8464 /* Saturating add/subtract. */
8465 tmp = load_reg(s, rn);
8466 tmp2 = load_reg(s, rm);
8467 if (op & 1)
8468 gen_helper_double_saturate(tmp, tmp);
8469 if (op & 2)
8470 gen_helper_sub_saturate(tmp, tmp2, tmp);
8471 else
8472 gen_helper_add_saturate(tmp, tmp, tmp2);
8473 tcg_temp_free_i32(tmp2);
8474 } else {
8475 tmp = load_reg(s, rn);
8476 switch (op) {
8477 case 0x0a: /* rbit */
8478 gen_helper_rbit(tmp, tmp);
8479 break;
8480 case 0x08: /* rev */
8481 tcg_gen_bswap32_i32(tmp, tmp);
8482 break;
8483 case 0x09: /* rev16 */
8484 gen_rev16(tmp);
8485 break;
8486 case 0x0b: /* revsh */
8487 gen_revsh(tmp);
8488 break;
8489 case 0x10: /* sel */
8490 tmp2 = load_reg(s, rm);
8491 tmp3 = tcg_temp_new_i32();
8492 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8493 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8494 tcg_temp_free_i32(tmp3);
8495 tcg_temp_free_i32(tmp2);
8496 break;
8497 case 0x18: /* clz */
8498 gen_helper_clz(tmp, tmp);
8499 break;
8500 default:
8501 goto illegal_op;
8502 }
8503 }
8504 store_reg(s, rd, tmp);
8505 break;
8506 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8507 op = (insn >> 4) & 0xf;
8508 tmp = load_reg(s, rn);
8509 tmp2 = load_reg(s, rm);
8510 switch ((insn >> 20) & 7) {
8511 case 0: /* 32 x 32 -> 32 */
8512 tcg_gen_mul_i32(tmp, tmp, tmp2);
8513 tcg_temp_free_i32(tmp2);
8514 if (rs != 15) {
8515 tmp2 = load_reg(s, rs);
8516 if (op)
8517 tcg_gen_sub_i32(tmp, tmp2, tmp);
8518 else
8519 tcg_gen_add_i32(tmp, tmp, tmp2);
8520 tcg_temp_free_i32(tmp2);
8521 }
8522 break;
8523 case 1: /* 16 x 16 -> 32 */
8524 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8525 tcg_temp_free_i32(tmp2);
8526 if (rs != 15) {
8527 tmp2 = load_reg(s, rs);
8528 gen_helper_add_setq(tmp, tmp, tmp2);
8529 tcg_temp_free_i32(tmp2);
8530 }
8531 break;
8532 case 2: /* Dual multiply add. */
8533 case 4: /* Dual multiply subtract. */
8534 if (op)
8535 gen_swap_half(tmp2);
8536 gen_smul_dual(tmp, tmp2);
8537 if (insn & (1 << 22)) {
8538 /* This subtraction cannot overflow. */
8539 tcg_gen_sub_i32(tmp, tmp, tmp2);
8540 } else {
8541 /* This addition cannot overflow 32 bits;
8542 * however it may overflow considered as a signed
8543 * operation, in which case we must set the Q flag.
8544 */
8545 gen_helper_add_setq(tmp, tmp, tmp2);
8546 }
8547 tcg_temp_free_i32(tmp2);
8548 if (rs != 15)
8549 {
8550 tmp2 = load_reg(s, rs);
8551 gen_helper_add_setq(tmp, tmp, tmp2);
8552 tcg_temp_free_i32(tmp2);
8553 }
8554 break;
8555 case 3: /* 32 * 16 -> 32msb */
8556 if (op)
8557 tcg_gen_sari_i32(tmp2, tmp2, 16);
8558 else
8559 gen_sxth(tmp2);
8560 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8561 tcg_gen_shri_i64(tmp64, tmp64, 16);
8562 tmp = tcg_temp_new_i32();
8563 tcg_gen_trunc_i64_i32(tmp, tmp64);
8564 tcg_temp_free_i64(tmp64);
8565 if (rs != 15)
8566 {
8567 tmp2 = load_reg(s, rs);
8568 gen_helper_add_setq(tmp, tmp, tmp2);
8569 tcg_temp_free_i32(tmp2);
8570 }
8571 break;
8572 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8573 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8574 if (rs != 15) {
8575 tmp = load_reg(s, rs);
8576 if (insn & (1 << 20)) {
8577 tmp64 = gen_addq_msw(tmp64, tmp);
8578 } else {
8579 tmp64 = gen_subq_msw(tmp64, tmp);
8580 }
8581 }
8582 if (insn & (1 << 4)) {
8583 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8584 }
8585 tcg_gen_shri_i64(tmp64, tmp64, 32);
8586 tmp = tcg_temp_new_i32();
8587 tcg_gen_trunc_i64_i32(tmp, tmp64);
8588 tcg_temp_free_i64(tmp64);
8589 break;
8590 case 7: /* Unsigned sum of absolute differences. */
8591 gen_helper_usad8(tmp, tmp, tmp2);
8592 tcg_temp_free_i32(tmp2);
8593 if (rs != 15) {
8594 tmp2 = load_reg(s, rs);
8595 tcg_gen_add_i32(tmp, tmp, tmp2);
8596 tcg_temp_free_i32(tmp2);
8597 }
8598 break;
8599 }
8600 store_reg(s, rd, tmp);
8601 break;
8602 case 6: case 7: /* 64-bit multiply, Divide. */
8603 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8604 tmp = load_reg(s, rn);
8605 tmp2 = load_reg(s, rm);
8606 if ((op & 0x50) == 0x10) {
8607 /* sdiv, udiv */
8608 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8609 goto illegal_op;
8610 }
8611 if (op & 0x20)
8612 gen_helper_udiv(tmp, tmp, tmp2);
8613 else
8614 gen_helper_sdiv(tmp, tmp, tmp2);
8615 tcg_temp_free_i32(tmp2);
8616 store_reg(s, rd, tmp);
8617 } else if ((op & 0xe) == 0xc) {
8618 /* Dual multiply accumulate long. */
8619 if (op & 1)
8620 gen_swap_half(tmp2);
8621 gen_smul_dual(tmp, tmp2);
8622 if (op & 0x10) {
8623 tcg_gen_sub_i32(tmp, tmp, tmp2);
8624 } else {
8625 tcg_gen_add_i32(tmp, tmp, tmp2);
8626 }
8627 tcg_temp_free_i32(tmp2);
8628 /* BUGFIX */
8629 tmp64 = tcg_temp_new_i64();
8630 tcg_gen_ext_i32_i64(tmp64, tmp);
8631 tcg_temp_free_i32(tmp);
8632 gen_addq(s, tmp64, rs, rd);
8633 gen_storeq_reg(s, rs, rd, tmp64);
8634 tcg_temp_free_i64(tmp64);
8635 } else {
8636 if (op & 0x20) {
8637 /* Unsigned 64-bit multiply */
8638 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8639 } else {
8640 if (op & 8) {
8641 /* smlalxy */
8642 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8643 tcg_temp_free_i32(tmp2);
8644 tmp64 = tcg_temp_new_i64();
8645 tcg_gen_ext_i32_i64(tmp64, tmp);
8646 tcg_temp_free_i32(tmp);
8647 } else {
8648 /* Signed 64-bit multiply */
8649 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8650 }
8651 }
8652 if (op & 4) {
8653 /* umaal */
8654 gen_addq_lo(s, tmp64, rs);
8655 gen_addq_lo(s, tmp64, rd);
8656 } else if (op & 0x40) {
8657 /* 64-bit accumulate. */
8658 gen_addq(s, tmp64, rs, rd);
8659 }
8660 gen_storeq_reg(s, rs, rd, tmp64);
8661 tcg_temp_free_i64(tmp64);
8662 }
8663 break;
8664 }
8665 break;
8666 case 6: case 7: case 14: case 15:
8667 /* Coprocessor. */
8668 if (((insn >> 24) & 3) == 3) {
8669 /* Translate into the equivalent ARM encoding. */
8670 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8671 if (disas_neon_data_insn(env, s, insn))
8672 goto illegal_op;
8673 } else {
8674 if (insn & (1 << 28))
8675 goto illegal_op;
8676 if (disas_coproc_insn (env, s, insn))
8677 goto illegal_op;
8678 }
8679 break;
8680 case 8: case 9: case 10: case 11:
8681 if (insn & (1 << 15)) {
8682 /* Branches, misc control. */
8683 if (insn & 0x5000) {
8684 /* Unconditional branch. */
8685 /* signextend(hw1[10:0]) -> offset[:12]. */
8686 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8687 /* hw1[10:0] -> offset[11:1]. */
8688 offset |= (insn & 0x7ff) << 1;
8689 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8690 offset[24:22] already have the same value because of the
8691 sign extension above. */
8692 offset ^= ((~insn) & (1 << 13)) << 10;
8693 offset ^= ((~insn) & (1 << 11)) << 11;
8694
8695 if (insn & (1 << 14)) {
8696 /* Branch and link. */
8697 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8698 }
8699
8700 offset += s->pc;
8701 if (insn & (1 << 12)) {
8702 /* b/bl */
8703 gen_jmp(s, offset);
8704 } else {
8705 /* blx */
8706 offset &= ~(uint32_t)2;
8707 /* thumb2 bx, no need to check */
8708 gen_bx_im(s, offset);
8709 }
8710 } else if (((insn >> 23) & 7) == 7) {
8711 /* Misc control */
8712 if (insn & (1 << 13))
8713 goto illegal_op;
8714
8715 if (insn & (1 << 26)) {
8716 /* Secure monitor call (v6Z) */
8717 goto illegal_op; /* not implemented. */
8718 } else {
8719 op = (insn >> 20) & 7;
8720 switch (op) {
8721 case 0: /* msr cpsr. */
8722 if (IS_M(env)) {
8723 tmp = load_reg(s, rn);
8724 addr = tcg_const_i32(insn & 0xff);
8725 gen_helper_v7m_msr(cpu_env, addr, tmp);
8726 tcg_temp_free_i32(addr);
8727 tcg_temp_free_i32(tmp);
8728 gen_lookup_tb(s);
8729 break;
8730 }
8731 /* fall through */
8732 case 1: /* msr spsr. */
8733 if (IS_M(env))
8734 goto illegal_op;
8735 tmp = load_reg(s, rn);
8736 if (gen_set_psr(s,
8737 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8738 op == 1, tmp))
8739 goto illegal_op;
8740 break;
8741 case 2: /* cps, nop-hint. */
8742 if (((insn >> 8) & 7) == 0) {
8743 gen_nop_hint(s, insn & 0xff);
8744 }
8745 /* Implemented as NOP in user mode. */
8746 if (IS_USER(s))
8747 break;
8748 offset = 0;
8749 imm = 0;
8750 if (insn & (1 << 10)) {
8751 if (insn & (1 << 7))
8752 offset |= CPSR_A;
8753 if (insn & (1 << 6))
8754 offset |= CPSR_I;
8755 if (insn & (1 << 5))
8756 offset |= CPSR_F;
8757 if (insn & (1 << 9))
8758 imm = CPSR_A | CPSR_I | CPSR_F;
8759 }
8760 if (insn & (1 << 8)) {
8761 offset |= 0x1f;
8762 imm |= (insn & 0x1f);
8763 }
8764 if (offset) {
8765 gen_set_psr_im(s, offset, 0, imm);
8766 }
8767 break;
8768 case 3: /* Special control operations. */
8769 ARCH(7);
8770 op = (insn >> 4) & 0xf;
8771 switch (op) {
8772 case 2: /* clrex */
8773 gen_clrex(s);
8774 break;
8775 case 4: /* dsb */
8776 case 5: /* dmb */
8777 case 6: /* isb */
8778 /* These execute as NOPs. */
8779 break;
8780 default:
8781 goto illegal_op;
8782 }
8783 break;
8784 case 4: /* bxj */
8785 /* Trivial implementation equivalent to bx. */
8786 tmp = load_reg(s, rn);
8787 gen_bx(s, tmp);
8788 break;
8789 case 5: /* Exception return. */
8790 if (IS_USER(s)) {
8791 goto illegal_op;
8792 }
8793 if (rn != 14 || rd != 15) {
8794 goto illegal_op;
8795 }
8796 tmp = load_reg(s, rn);
8797 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8798 gen_exception_return(s, tmp);
8799 break;
8800 case 6: /* mrs cpsr. */
8801 tmp = tcg_temp_new_i32();
8802 if (IS_M(env)) {
8803 addr = tcg_const_i32(insn & 0xff);
8804 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8805 tcg_temp_free_i32(addr);
8806 } else {
8807 gen_helper_cpsr_read(tmp);
8808 }
8809 store_reg(s, rd, tmp);
8810 break;
8811 case 7: /* mrs spsr. */
8812 /* Not accessible in user mode. */
8813 if (IS_USER(s) || IS_M(env))
8814 goto illegal_op;
8815 tmp = load_cpu_field(spsr);
8816 store_reg(s, rd, tmp);
8817 break;
8818 }
8819 }
8820 } else {
8821 /* Conditional branch. */
8822 op = (insn >> 22) & 0xf;
8823 /* Generate a conditional jump to next instruction. */
8824 s->condlabel = gen_new_label();
8825 gen_test_cc(op ^ 1, s->condlabel);
8826 s->condjmp = 1;
8827
8828 /* offset[11:1] = insn[10:0] */
8829 offset = (insn & 0x7ff) << 1;
8830 /* offset[17:12] = insn[21:16]. */
8831 offset |= (insn & 0x003f0000) >> 4;
8832 /* offset[31:20] = insn[26]. */
8833 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8834 /* offset[18] = insn[13]. */
8835 offset |= (insn & (1 << 13)) << 5;
8836 /* offset[19] = insn[11]. */
8837 offset |= (insn & (1 << 11)) << 8;
8838
8839 /* jump to the offset */
8840 gen_jmp(s, s->pc + offset);
8841 }
8842 } else {
8843 /* Data processing immediate. */
8844 if (insn & (1 << 25)) {
8845 if (insn & (1 << 24)) {
8846 if (insn & (1 << 20))
8847 goto illegal_op;
8848 /* Bitfield/Saturate. */
8849 op = (insn >> 21) & 7;
8850 imm = insn & 0x1f;
8851 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8852 if (rn == 15) {
8853 tmp = tcg_temp_new_i32();
8854 tcg_gen_movi_i32(tmp, 0);
8855 } else {
8856 tmp = load_reg(s, rn);
8857 }
8858 switch (op) {
8859 case 2: /* Signed bitfield extract. */
8860 imm++;
8861 if (shift + imm > 32)
8862 goto illegal_op;
8863 if (imm < 32)
8864 gen_sbfx(tmp, shift, imm);
8865 break;
8866 case 6: /* Unsigned bitfield extract. */
8867 imm++;
8868 if (shift + imm > 32)
8869 goto illegal_op;
8870 if (imm < 32)
8871 gen_ubfx(tmp, shift, (1u << imm) - 1);
8872 break;
8873 case 3: /* Bitfield insert/clear. */
8874 if (imm < shift)
8875 goto illegal_op;
8876 imm = imm + 1 - shift;
8877 if (imm != 32) {
8878 tmp2 = load_reg(s, rd);
8879 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8880 tcg_temp_free_i32(tmp2);
8881 }
8882 break;
8883 case 7:
8884 goto illegal_op;
8885 default: /* Saturate. */
8886 if (shift) {
8887 if (op & 1)
8888 tcg_gen_sari_i32(tmp, tmp, shift);
8889 else
8890 tcg_gen_shli_i32(tmp, tmp, shift);
8891 }
8892 tmp2 = tcg_const_i32(imm);
8893 if (op & 4) {
8894 /* Unsigned. */
8895 if ((op & 1) && shift == 0)
8896 gen_helper_usat16(tmp, tmp, tmp2);
8897 else
8898 gen_helper_usat(tmp, tmp, tmp2);
8899 } else {
8900 /* Signed. */
8901 if ((op & 1) && shift == 0)
8902 gen_helper_ssat16(tmp, tmp, tmp2);
8903 else
8904 gen_helper_ssat(tmp, tmp, tmp2);
8905 }
8906 tcg_temp_free_i32(tmp2);
8907 break;
8908 }
8909 store_reg(s, rd, tmp);
8910 } else {
8911 imm = ((insn & 0x04000000) >> 15)
8912 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8913 if (insn & (1 << 22)) {
8914 /* 16-bit immediate. */
8915 imm |= (insn >> 4) & 0xf000;
8916 if (insn & (1 << 23)) {
8917 /* movt */
8918 tmp = load_reg(s, rd);
8919 tcg_gen_ext16u_i32(tmp, tmp);
8920 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8921 } else {
8922 /* movw */
8923 tmp = tcg_temp_new_i32();
8924 tcg_gen_movi_i32(tmp, imm);
8925 }
8926 } else {
8927 /* Add/sub 12-bit immediate. */
8928 if (rn == 15) {
8929 offset = s->pc & ~(uint32_t)3;
8930 if (insn & (1 << 23))
8931 offset -= imm;
8932 else
8933 offset += imm;
8934 tmp = tcg_temp_new_i32();
8935 tcg_gen_movi_i32(tmp, offset);
8936 } else {
8937 tmp = load_reg(s, rn);
8938 if (insn & (1 << 23))
8939 tcg_gen_subi_i32(tmp, tmp, imm);
8940 else
8941 tcg_gen_addi_i32(tmp, tmp, imm);
8942 }
8943 }
8944 store_reg(s, rd, tmp);
8945 }
8946 } else {
8947 int shifter_out = 0;
8948 /* modified 12-bit immediate. */
8949 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8950 imm = (insn & 0xff);
8951 switch (shift) {
8952 case 0: /* XY */
8953 /* Nothing to do. */
8954 break;
8955 case 1: /* 00XY00XY */
8956 imm |= imm << 16;
8957 break;
8958 case 2: /* XY00XY00 */
8959 imm |= imm << 16;
8960 imm <<= 8;
8961 break;
8962 case 3: /* XYXYXYXY */
8963 imm |= imm << 16;
8964 imm |= imm << 8;
8965 break;
8966 default: /* Rotated constant. */
8967 shift = (shift << 1) | (imm >> 7);
8968 imm |= 0x80;
8969 imm = imm << (32 - shift);
8970 shifter_out = 1;
8971 break;
8972 }
8973 tmp2 = tcg_temp_new_i32();
8974 tcg_gen_movi_i32(tmp2, imm);
8975 rn = (insn >> 16) & 0xf;
8976 if (rn == 15) {
8977 tmp = tcg_temp_new_i32();
8978 tcg_gen_movi_i32(tmp, 0);
8979 } else {
8980 tmp = load_reg(s, rn);
8981 }
8982 op = (insn >> 21) & 0xf;
8983 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8984 shifter_out, tmp, tmp2))
8985 goto illegal_op;
8986 tcg_temp_free_i32(tmp2);
8987 rd = (insn >> 8) & 0xf;
8988 if (rd != 15) {
8989 store_reg(s, rd, tmp);
8990 } else {
8991 tcg_temp_free_i32(tmp);
8992 }
8993 }
8994 }
8995 break;
8996 case 12: /* Load/store single data item. */
8997 {
8998 int postinc = 0;
8999 int writeback = 0;
9000 int user;
9001 if ((insn & 0x01100000) == 0x01000000) {
9002 if (disas_neon_ls_insn(env, s, insn))
9003 goto illegal_op;
9004 break;
9005 }
9006 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9007 if (rs == 15) {
9008 if (!(insn & (1 << 20))) {
9009 goto illegal_op;
9010 }
9011 if (op != 2) {
9012 /* Byte or halfword load space with dest == r15 : memory hints.
9013 * Catch them early so we don't emit pointless addressing code.
9014 * This space is a mix of:
9015 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9016 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9017 * cores)
9018 * unallocated hints, which must be treated as NOPs
9019 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9020 * which is easiest for the decoding logic
9021 * Some space which must UNDEF
9022 */
9023 int op1 = (insn >> 23) & 3;
9024 int op2 = (insn >> 6) & 0x3f;
9025 if (op & 2) {
9026 goto illegal_op;
9027 }
9028 if (rn == 15) {
9029 /* UNPREDICTABLE, unallocated hint or
9030 * PLD/PLDW/PLI (literal)
9031 */
9032 return 0;
9033 }
9034 if (op1 & 1) {
9035 return 0; /* PLD/PLDW/PLI or unallocated hint */
9036 }
9037 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
9038 return 0; /* PLD/PLDW/PLI or unallocated hint */
9039 }
9040 /* UNDEF space, or an UNPREDICTABLE */
9041 return 1;
9042 }
9043 }
9044 user = IS_USER(s);
9045 if (rn == 15) {
9046 addr = tcg_temp_new_i32();
9047 /* PC relative. */
9048 /* s->pc has already been incremented by 4. */
9049 imm = s->pc & 0xfffffffc;
9050 if (insn & (1 << 23))
9051 imm += insn & 0xfff;
9052 else
9053 imm -= insn & 0xfff;
9054 tcg_gen_movi_i32(addr, imm);
9055 } else {
9056 addr = load_reg(s, rn);
9057 if (insn & (1 << 23)) {
9058 /* Positive offset. */
9059 imm = insn & 0xfff;
9060 tcg_gen_addi_i32(addr, addr, imm);
9061 } else {
9062 imm = insn & 0xff;
9063 switch ((insn >> 8) & 0xf) {
9064 case 0x0: /* Shifted Register. */
9065 shift = (insn >> 4) & 0xf;
9066 if (shift > 3) {
9067 tcg_temp_free_i32(addr);
9068 goto illegal_op;
9069 }
9070 tmp = load_reg(s, rm);
9071 if (shift)
9072 tcg_gen_shli_i32(tmp, tmp, shift);
9073 tcg_gen_add_i32(addr, addr, tmp);
9074 tcg_temp_free_i32(tmp);
9075 break;
9076 case 0xc: /* Negative offset. */
9077 tcg_gen_addi_i32(addr, addr, -imm);
9078 break;
9079 case 0xe: /* User privilege. */
9080 tcg_gen_addi_i32(addr, addr, imm);
9081 user = 1;
9082 break;
9083 case 0x9: /* Post-decrement. */
9084 imm = -imm;
9085 /* Fall through. */
9086 case 0xb: /* Post-increment. */
9087 postinc = 1;
9088 writeback = 1;
9089 break;
9090 case 0xd: /* Pre-decrement. */
9091 imm = -imm;
9092 /* Fall through. */
9093 case 0xf: /* Pre-increment. */
9094 tcg_gen_addi_i32(addr, addr, imm);
9095 writeback = 1;
9096 break;
9097 default:
9098 tcg_temp_free_i32(addr);
9099 goto illegal_op;
9100 }
9101 }
9102 }
9103 if (insn & (1 << 20)) {
9104 /* Load. */
9105 switch (op) {
9106 case 0: tmp = gen_ld8u(addr, user); break;
9107 case 4: tmp = gen_ld8s(addr, user); break;
9108 case 1: tmp = gen_ld16u(addr, user); break;
9109 case 5: tmp = gen_ld16s(addr, user); break;
9110 case 2: tmp = gen_ld32(addr, user); break;
9111 default:
9112 tcg_temp_free_i32(addr);
9113 goto illegal_op;
9114 }
9115 if (rs == 15) {
9116 gen_bx(s, tmp);
9117 } else {
9118 store_reg(s, rs, tmp);
9119 }
9120 } else {
9121 /* Store. */
9122 tmp = load_reg(s, rs);
9123 switch (op) {
9124 case 0: gen_st8(tmp, addr, user); break;
9125 case 1: gen_st16(tmp, addr, user); break;
9126 case 2: gen_st32(tmp, addr, user); break;
9127 default:
9128 tcg_temp_free_i32(addr);
9129 goto illegal_op;
9130 }
9131 }
9132 if (postinc)
9133 tcg_gen_addi_i32(addr, addr, imm);
9134 if (writeback) {
9135 store_reg(s, rn, addr);
9136 } else {
9137 tcg_temp_free_i32(addr);
9138 }
9139 }
9140 break;
9141 default:
9142 goto illegal_op;
9143 }
9144 return 0;
9145 illegal_op:
9146 return 1;
9147 }
9148
9149 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9150 {
9151 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9152 int32_t offset;
9153 int i;
9154 TCGv tmp;
9155 TCGv tmp2;
9156 TCGv addr;
9157
9158 if (s->condexec_mask) {
9159 cond = s->condexec_cond;
9160 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9161 s->condlabel = gen_new_label();
9162 gen_test_cc(cond ^ 1, s->condlabel);
9163 s->condjmp = 1;
9164 }
9165 }
9166
9167 insn = arm_lduw_code(s->pc, s->bswap_code);
9168 s->pc += 2;
9169
9170 switch (insn >> 12) {
9171 case 0: case 1:
9172
9173 rd = insn & 7;
9174 op = (insn >> 11) & 3;
9175 if (op == 3) {
9176 /* add/subtract */
9177 rn = (insn >> 3) & 7;
9178 tmp = load_reg(s, rn);
9179 if (insn & (1 << 10)) {
9180 /* immediate */
9181 tmp2 = tcg_temp_new_i32();
9182 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9183 } else {
9184 /* reg */
9185 rm = (insn >> 6) & 7;
9186 tmp2 = load_reg(s, rm);
9187 }
9188 if (insn & (1 << 9)) {
9189 if (s->condexec_mask)
9190 tcg_gen_sub_i32(tmp, tmp, tmp2);
9191 else
9192 gen_helper_sub_cc(tmp, tmp, tmp2);
9193 } else {
9194 if (s->condexec_mask)
9195 tcg_gen_add_i32(tmp, tmp, tmp2);
9196 else
9197 gen_helper_add_cc(tmp, tmp, tmp2);
9198 }
9199 tcg_temp_free_i32(tmp2);
9200 store_reg(s, rd, tmp);
9201 } else {
9202 /* shift immediate */
9203 rm = (insn >> 3) & 7;
9204 shift = (insn >> 6) & 0x1f;
9205 tmp = load_reg(s, rm);
9206 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9207 if (!s->condexec_mask)
9208 gen_logic_CC(tmp);
9209 store_reg(s, rd, tmp);
9210 }
9211 break;
9212 case 2: case 3:
9213 /* arithmetic large immediate */
9214 op = (insn >> 11) & 3;
9215 rd = (insn >> 8) & 0x7;
9216 if (op == 0) { /* mov */
9217 tmp = tcg_temp_new_i32();
9218 tcg_gen_movi_i32(tmp, insn & 0xff);
9219 if (!s->condexec_mask)
9220 gen_logic_CC(tmp);
9221 store_reg(s, rd, tmp);
9222 } else {
9223 tmp = load_reg(s, rd);
9224 tmp2 = tcg_temp_new_i32();
9225 tcg_gen_movi_i32(tmp2, insn & 0xff);
9226 switch (op) {
9227 case 1: /* cmp */
9228 gen_helper_sub_cc(tmp, tmp, tmp2);
9229 tcg_temp_free_i32(tmp);
9230 tcg_temp_free_i32(tmp2);
9231 break;
9232 case 2: /* add */
9233 if (s->condexec_mask)
9234 tcg_gen_add_i32(tmp, tmp, tmp2);
9235 else
9236 gen_helper_add_cc(tmp, tmp, tmp2);
9237 tcg_temp_free_i32(tmp2);
9238 store_reg(s, rd, tmp);
9239 break;
9240 case 3: /* sub */
9241 if (s->condexec_mask)
9242 tcg_gen_sub_i32(tmp, tmp, tmp2);
9243 else
9244 gen_helper_sub_cc(tmp, tmp, tmp2);
9245 tcg_temp_free_i32(tmp2);
9246 store_reg(s, rd, tmp);
9247 break;
9248 }
9249 }
9250 break;
9251 case 4:
9252 if (insn & (1 << 11)) {
9253 rd = (insn >> 8) & 7;
9254 /* load pc-relative. Bit 1 of PC is ignored. */
9255 val = s->pc + 2 + ((insn & 0xff) * 4);
9256 val &= ~(uint32_t)2;
9257 addr = tcg_temp_new_i32();
9258 tcg_gen_movi_i32(addr, val);
9259 tmp = gen_ld32(addr, IS_USER(s));
9260 tcg_temp_free_i32(addr);
9261 store_reg(s, rd, tmp);
9262 break;
9263 }
9264 if (insn & (1 << 10)) {
9265 /* data processing extended or blx */
9266 rd = (insn & 7) | ((insn >> 4) & 8);
9267 rm = (insn >> 3) & 0xf;
9268 op = (insn >> 8) & 3;
9269 switch (op) {
9270 case 0: /* add */
9271 tmp = load_reg(s, rd);
9272 tmp2 = load_reg(s, rm);
9273 tcg_gen_add_i32(tmp, tmp, tmp2);
9274 tcg_temp_free_i32(tmp2);
9275 store_reg(s, rd, tmp);
9276 break;
9277 case 1: /* cmp */
9278 tmp = load_reg(s, rd);
9279 tmp2 = load_reg(s, rm);
9280 gen_helper_sub_cc(tmp, tmp, tmp2);
9281 tcg_temp_free_i32(tmp2);
9282 tcg_temp_free_i32(tmp);
9283 break;
9284 case 2: /* mov/cpy */
9285 tmp = load_reg(s, rm);
9286 store_reg(s, rd, tmp);
9287 break;
9288 case 3:/* branch [and link] exchange thumb register */
9289 tmp = load_reg(s, rm);
9290 if (insn & (1 << 7)) {
9291 ARCH(5);
9292 val = (uint32_t)s->pc | 1;
9293 tmp2 = tcg_temp_new_i32();
9294 tcg_gen_movi_i32(tmp2, val);
9295 store_reg(s, 14, tmp2);
9296 }
9297 /* already thumb, no need to check */
9298 gen_bx(s, tmp);
9299 break;
9300 }
9301 break;
9302 }
9303
9304 /* data processing register */
9305 rd = insn & 7;
9306 rm = (insn >> 3) & 7;
9307 op = (insn >> 6) & 0xf;
9308 if (op == 2 || op == 3 || op == 4 || op == 7) {
9309 /* the shift/rotate ops want the operands backwards */
9310 val = rm;
9311 rm = rd;
9312 rd = val;
9313 val = 1;
9314 } else {
9315 val = 0;
9316 }
9317
9318 if (op == 9) { /* neg */
9319 tmp = tcg_temp_new_i32();
9320 tcg_gen_movi_i32(tmp, 0);
9321 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9322 tmp = load_reg(s, rd);
9323 } else {
9324 TCGV_UNUSED(tmp);
9325 }
9326
9327 tmp2 = load_reg(s, rm);
9328 switch (op) {
9329 case 0x0: /* and */
9330 tcg_gen_and_i32(tmp, tmp, tmp2);
9331 if (!s->condexec_mask)
9332 gen_logic_CC(tmp);
9333 break;
9334 case 0x1: /* eor */
9335 tcg_gen_xor_i32(tmp, tmp, tmp2);
9336 if (!s->condexec_mask)
9337 gen_logic_CC(tmp);
9338 break;
9339 case 0x2: /* lsl */
9340 if (s->condexec_mask) {
9341 gen_helper_shl(tmp2, tmp2, tmp);
9342 } else {
9343 gen_helper_shl_cc(tmp2, tmp2, tmp);
9344 gen_logic_CC(tmp2);
9345 }
9346 break;
9347 case 0x3: /* lsr */
9348 if (s->condexec_mask) {
9349 gen_helper_shr(tmp2, tmp2, tmp);
9350 } else {
9351 gen_helper_shr_cc(tmp2, tmp2, tmp);
9352 gen_logic_CC(tmp2);
9353 }
9354 break;
9355 case 0x4: /* asr */
9356 if (s->condexec_mask) {
9357 gen_helper_sar(tmp2, tmp2, tmp);
9358 } else {
9359 gen_helper_sar_cc(tmp2, tmp2, tmp);
9360 gen_logic_CC(tmp2);
9361 }
9362 break;
9363 case 0x5: /* adc */
9364 if (s->condexec_mask)
9365 gen_adc(tmp, tmp2);
9366 else
9367 gen_helper_adc_cc(tmp, tmp, tmp2);
9368 break;
9369 case 0x6: /* sbc */
9370 if (s->condexec_mask)
9371 gen_sub_carry(tmp, tmp, tmp2);
9372 else
9373 gen_helper_sbc_cc(tmp, tmp, tmp2);
9374 break;
9375 case 0x7: /* ror */
9376 if (s->condexec_mask) {
9377 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9378 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9379 } else {
9380 gen_helper_ror_cc(tmp2, tmp2, tmp);
9381 gen_logic_CC(tmp2);
9382 }
9383 break;
9384 case 0x8: /* tst */
9385 tcg_gen_and_i32(tmp, tmp, tmp2);
9386 gen_logic_CC(tmp);
9387 rd = 16;
9388 break;
9389 case 0x9: /* neg */
9390 if (s->condexec_mask)
9391 tcg_gen_neg_i32(tmp, tmp2);
9392 else
9393 gen_helper_sub_cc(tmp, tmp, tmp2);
9394 break;
9395 case 0xa: /* cmp */
9396 gen_helper_sub_cc(tmp, tmp, tmp2);
9397 rd = 16;
9398 break;
9399 case 0xb: /* cmn */
9400 gen_helper_add_cc(tmp, tmp, tmp2);
9401 rd = 16;
9402 break;
9403 case 0xc: /* orr */
9404 tcg_gen_or_i32(tmp, tmp, tmp2);
9405 if (!s->condexec_mask)
9406 gen_logic_CC(tmp);
9407 break;
9408 case 0xd: /* mul */
9409 tcg_gen_mul_i32(tmp, tmp, tmp2);
9410 if (!s->condexec_mask)
9411 gen_logic_CC(tmp);
9412 break;
9413 case 0xe: /* bic */
9414 tcg_gen_andc_i32(tmp, tmp, tmp2);
9415 if (!s->condexec_mask)
9416 gen_logic_CC(tmp);
9417 break;
9418 case 0xf: /* mvn */
9419 tcg_gen_not_i32(tmp2, tmp2);
9420 if (!s->condexec_mask)
9421 gen_logic_CC(tmp2);
9422 val = 1;
9423 rm = rd;
9424 break;
9425 }
9426 if (rd != 16) {
9427 if (val) {
9428 store_reg(s, rm, tmp2);
9429 if (op != 0xf)
9430 tcg_temp_free_i32(tmp);
9431 } else {
9432 store_reg(s, rd, tmp);
9433 tcg_temp_free_i32(tmp2);
9434 }
9435 } else {
9436 tcg_temp_free_i32(tmp);
9437 tcg_temp_free_i32(tmp2);
9438 }
9439 break;
9440
9441 case 5:
9442 /* load/store register offset. */
9443 rd = insn & 7;
9444 rn = (insn >> 3) & 7;
9445 rm = (insn >> 6) & 7;
9446 op = (insn >> 9) & 7;
9447 addr = load_reg(s, rn);
9448 tmp = load_reg(s, rm);
9449 tcg_gen_add_i32(addr, addr, tmp);
9450 tcg_temp_free_i32(tmp);
9451
9452 if (op < 3) /* store */
9453 tmp = load_reg(s, rd);
9454
9455 switch (op) {
9456 case 0: /* str */
9457 gen_st32(tmp, addr, IS_USER(s));
9458 break;
9459 case 1: /* strh */
9460 gen_st16(tmp, addr, IS_USER(s));
9461 break;
9462 case 2: /* strb */
9463 gen_st8(tmp, addr, IS_USER(s));
9464 break;
9465 case 3: /* ldrsb */
9466 tmp = gen_ld8s(addr, IS_USER(s));
9467 break;
9468 case 4: /* ldr */
9469 tmp = gen_ld32(addr, IS_USER(s));
9470 break;
9471 case 5: /* ldrh */
9472 tmp = gen_ld16u(addr, IS_USER(s));
9473 break;
9474 case 6: /* ldrb */
9475 tmp = gen_ld8u(addr, IS_USER(s));
9476 break;
9477 case 7: /* ldrsh */
9478 tmp = gen_ld16s(addr, IS_USER(s));
9479 break;
9480 }
9481 if (op >= 3) /* load */
9482 store_reg(s, rd, tmp);
9483 tcg_temp_free_i32(addr);
9484 break;
9485
9486 case 6:
9487 /* load/store word immediate offset */
9488 rd = insn & 7;
9489 rn = (insn >> 3) & 7;
9490 addr = load_reg(s, rn);
9491 val = (insn >> 4) & 0x7c;
9492 tcg_gen_addi_i32(addr, addr, val);
9493
9494 if (insn & (1 << 11)) {
9495 /* load */
9496 tmp = gen_ld32(addr, IS_USER(s));
9497 store_reg(s, rd, tmp);
9498 } else {
9499 /* store */
9500 tmp = load_reg(s, rd);
9501 gen_st32(tmp, addr, IS_USER(s));
9502 }
9503 tcg_temp_free_i32(addr);
9504 break;
9505
9506 case 7:
9507 /* load/store byte immediate offset */
9508 rd = insn & 7;
9509 rn = (insn >> 3) & 7;
9510 addr = load_reg(s, rn);
9511 val = (insn >> 6) & 0x1f;
9512 tcg_gen_addi_i32(addr, addr, val);
9513
9514 if (insn & (1 << 11)) {
9515 /* load */
9516 tmp = gen_ld8u(addr, IS_USER(s));
9517 store_reg(s, rd, tmp);
9518 } else {
9519 /* store */
9520 tmp = load_reg(s, rd);
9521 gen_st8(tmp, addr, IS_USER(s));
9522 }
9523 tcg_temp_free_i32(addr);
9524 break;
9525
9526 case 8:
9527 /* load/store halfword immediate offset */
9528 rd = insn & 7;
9529 rn = (insn >> 3) & 7;
9530 addr = load_reg(s, rn);
9531 val = (insn >> 5) & 0x3e;
9532 tcg_gen_addi_i32(addr, addr, val);
9533
9534 if (insn & (1 << 11)) {
9535 /* load */
9536 tmp = gen_ld16u(addr, IS_USER(s));
9537 store_reg(s, rd, tmp);
9538 } else {
9539 /* store */
9540 tmp = load_reg(s, rd);
9541 gen_st16(tmp, addr, IS_USER(s));
9542 }
9543 tcg_temp_free_i32(addr);
9544 break;
9545
9546 case 9:
9547 /* load/store from stack */
9548 rd = (insn >> 8) & 7;
9549 addr = load_reg(s, 13);
9550 val = (insn & 0xff) * 4;
9551 tcg_gen_addi_i32(addr, addr, val);
9552
9553 if (insn & (1 << 11)) {
9554 /* load */
9555 tmp = gen_ld32(addr, IS_USER(s));
9556 store_reg(s, rd, tmp);
9557 } else {
9558 /* store */
9559 tmp = load_reg(s, rd);
9560 gen_st32(tmp, addr, IS_USER(s));
9561 }
9562 tcg_temp_free_i32(addr);
9563 break;
9564
9565 case 10:
9566 /* add to high reg */
9567 rd = (insn >> 8) & 7;
9568 if (insn & (1 << 11)) {
9569 /* SP */
9570 tmp = load_reg(s, 13);
9571 } else {
9572 /* PC. bit 1 is ignored. */
9573 tmp = tcg_temp_new_i32();
9574 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9575 }
9576 val = (insn & 0xff) * 4;
9577 tcg_gen_addi_i32(tmp, tmp, val);
9578 store_reg(s, rd, tmp);
9579 break;
9580
9581 case 11:
9582 /* misc */
9583 op = (insn >> 8) & 0xf;
9584 switch (op) {
9585 case 0:
9586 /* adjust stack pointer */
9587 tmp = load_reg(s, 13);
9588 val = (insn & 0x7f) * 4;
9589 if (insn & (1 << 7))
9590 val = -(int32_t)val;
9591 tcg_gen_addi_i32(tmp, tmp, val);
9592 store_reg(s, 13, tmp);
9593 break;
9594
9595 case 2: /* sign/zero extend. */
9596 ARCH(6);
9597 rd = insn & 7;
9598 rm = (insn >> 3) & 7;
9599 tmp = load_reg(s, rm);
9600 switch ((insn >> 6) & 3) {
9601 case 0: gen_sxth(tmp); break;
9602 case 1: gen_sxtb(tmp); break;
9603 case 2: gen_uxth(tmp); break;
9604 case 3: gen_uxtb(tmp); break;
9605 }
9606 store_reg(s, rd, tmp);
9607 break;
9608 case 4: case 5: case 0xc: case 0xd:
9609 /* push/pop */
9610 addr = load_reg(s, 13);
9611 if (insn & (1 << 8))
9612 offset = 4;
9613 else
9614 offset = 0;
9615 for (i = 0; i < 8; i++) {
9616 if (insn & (1 << i))
9617 offset += 4;
9618 }
9619 if ((insn & (1 << 11)) == 0) {
9620 tcg_gen_addi_i32(addr, addr, -offset);
9621 }
9622 for (i = 0; i < 8; i++) {
9623 if (insn & (1 << i)) {
9624 if (insn & (1 << 11)) {
9625 /* pop */
9626 tmp = gen_ld32(addr, IS_USER(s));
9627 store_reg(s, i, tmp);
9628 } else {
9629 /* push */
9630 tmp = load_reg(s, i);
9631 gen_st32(tmp, addr, IS_USER(s));
9632 }
9633 /* advance to the next address. */
9634 tcg_gen_addi_i32(addr, addr, 4);
9635 }
9636 }
9637 TCGV_UNUSED(tmp);
9638 if (insn & (1 << 8)) {
9639 if (insn & (1 << 11)) {
9640 /* pop pc */
9641 tmp = gen_ld32(addr, IS_USER(s));
9642 /* don't set the pc until the rest of the instruction
9643 has completed */
9644 } else {
9645 /* push lr */
9646 tmp = load_reg(s, 14);
9647 gen_st32(tmp, addr, IS_USER(s));
9648 }
9649 tcg_gen_addi_i32(addr, addr, 4);
9650 }
9651 if ((insn & (1 << 11)) == 0) {
9652 tcg_gen_addi_i32(addr, addr, -offset);
9653 }
9654 /* write back the new stack pointer */
9655 store_reg(s, 13, addr);
9656 /* set the new PC value */
9657 if ((insn & 0x0900) == 0x0900) {
9658 store_reg_from_load(env, s, 15, tmp);
9659 }
9660 break;
9661
9662 case 1: case 3: case 9: case 11: /* czb */
9663 rm = insn & 7;
9664 tmp = load_reg(s, rm);
9665 s->condlabel = gen_new_label();
9666 s->condjmp = 1;
9667 if (insn & (1 << 11))
9668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9669 else
9670 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9671 tcg_temp_free_i32(tmp);
9672 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9673 val = (uint32_t)s->pc + 2;
9674 val += offset;
9675 gen_jmp(s, val);
9676 break;
9677
9678 case 15: /* IT, nop-hint. */
9679 if ((insn & 0xf) == 0) {
9680 gen_nop_hint(s, (insn >> 4) & 0xf);
9681 break;
9682 }
9683 /* If Then. */
9684 s->condexec_cond = (insn >> 4) & 0xe;
9685 s->condexec_mask = insn & 0x1f;
9686 /* No actual code generated for this insn, just setup state. */
9687 break;
9688
9689 case 0xe: /* bkpt */
9690 ARCH(5);
9691 gen_exception_insn(s, 2, EXCP_BKPT);
9692 break;
9693
9694 case 0xa: /* rev */
9695 ARCH(6);
9696 rn = (insn >> 3) & 0x7;
9697 rd = insn & 0x7;
9698 tmp = load_reg(s, rn);
9699 switch ((insn >> 6) & 3) {
9700 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9701 case 1: gen_rev16(tmp); break;
9702 case 3: gen_revsh(tmp); break;
9703 default: goto illegal_op;
9704 }
9705 store_reg(s, rd, tmp);
9706 break;
9707
9708 case 6:
9709 switch ((insn >> 5) & 7) {
9710 case 2:
9711 /* setend */
9712 ARCH(6);
9713 if (insn & (1 << 3)) {
9714 /* BE8 mode not implemented. */
9715 goto illegal_op;
9716 }
9717 break;
9718 case 3:
9719 /* cps */
9720 ARCH(6);
9721 if (IS_USER(s)) {
9722 break;
9723 }
9724 if (IS_M(env)) {
9725 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9726 /* FAULTMASK */
9727 if (insn & 1) {
9728 addr = tcg_const_i32(19);
9729 gen_helper_v7m_msr(cpu_env, addr, tmp);
9730 tcg_temp_free_i32(addr);
9731 }
9732 /* PRIMASK */
9733 if (insn & 2) {
9734 addr = tcg_const_i32(16);
9735 gen_helper_v7m_msr(cpu_env, addr, tmp);
9736 tcg_temp_free_i32(addr);
9737 }
9738 tcg_temp_free_i32(tmp);
9739 gen_lookup_tb(s);
9740 } else {
9741 if (insn & (1 << 4)) {
9742 shift = CPSR_A | CPSR_I | CPSR_F;
9743 } else {
9744 shift = 0;
9745 }
9746 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9747 }
9748 break;
9749 default:
9750 goto undef;
9751 }
9752 break;
9753
9754 default:
9755 goto undef;
9756 }
9757 break;
9758
9759 case 12:
9760 {
9761 /* load/store multiple */
9762 TCGv loaded_var;
9763 TCGV_UNUSED(loaded_var);
9764 rn = (insn >> 8) & 0x7;
9765 addr = load_reg(s, rn);
9766 for (i = 0; i < 8; i++) {
9767 if (insn & (1 << i)) {
9768 if (insn & (1 << 11)) {
9769 /* load */
9770 tmp = gen_ld32(addr, IS_USER(s));
9771 if (i == rn) {
9772 loaded_var = tmp;
9773 } else {
9774 store_reg(s, i, tmp);
9775 }
9776 } else {
9777 /* store */
9778 tmp = load_reg(s, i);
9779 gen_st32(tmp, addr, IS_USER(s));
9780 }
9781 /* advance to the next address */
9782 tcg_gen_addi_i32(addr, addr, 4);
9783 }
9784 }
9785 if ((insn & (1 << rn)) == 0) {
9786 /* base reg not in list: base register writeback */
9787 store_reg(s, rn, addr);
9788 } else {
9789 /* base reg in list: if load, complete it now */
9790 if (insn & (1 << 11)) {
9791 store_reg(s, rn, loaded_var);
9792 }
9793 tcg_temp_free_i32(addr);
9794 }
9795 break;
9796 }
9797 case 13:
9798 /* conditional branch or swi */
9799 cond = (insn >> 8) & 0xf;
9800 if (cond == 0xe)
9801 goto undef;
9802
9803 if (cond == 0xf) {
9804 /* swi */
9805 gen_set_pc_im(s->pc);
9806 s->is_jmp = DISAS_SWI;
9807 break;
9808 }
9809 /* generate a conditional jump to next instruction */
9810 s->condlabel = gen_new_label();
9811 gen_test_cc(cond ^ 1, s->condlabel);
9812 s->condjmp = 1;
9813
9814 /* jump to the offset */
9815 val = (uint32_t)s->pc + 2;
9816 offset = ((int32_t)insn << 24) >> 24;
9817 val += offset << 1;
9818 gen_jmp(s, val);
9819 break;
9820
9821 case 14:
9822 if (insn & (1 << 11)) {
9823 if (disas_thumb2_insn(env, s, insn))
9824 goto undef32;
9825 break;
9826 }
9827 /* unconditional branch */
9828 val = (uint32_t)s->pc;
9829 offset = ((int32_t)insn << 21) >> 21;
9830 val += (offset << 1) + 2;
9831 gen_jmp(s, val);
9832 break;
9833
9834 case 15:
9835 if (disas_thumb2_insn(env, s, insn))
9836 goto undef32;
9837 break;
9838 }
9839 return;
9840 undef32:
9841 gen_exception_insn(s, 4, EXCP_UDEF);
9842 return;
9843 illegal_op:
9844 undef:
9845 gen_exception_insn(s, 2, EXCP_UDEF);
9846 }
9847
9848 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9849 basic block 'tb'. If search_pc is TRUE, also generate PC
9850 information for each intermediate instruction. */
9851 static inline void gen_intermediate_code_internal(CPUARMState *env,
9852 TranslationBlock *tb,
9853 int search_pc)
9854 {
9855 DisasContext dc1, *dc = &dc1;
9856 CPUBreakpoint *bp;
9857 uint16_t *gen_opc_end;
9858 int j, lj;
9859 target_ulong pc_start;
9860 uint32_t next_page_start;
9861 int num_insns;
9862 int max_insns;
9863
9864 /* generate intermediate code */
9865 pc_start = tb->pc;
9866
9867 dc->tb = tb;
9868
9869 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9870
9871 dc->is_jmp = DISAS_NEXT;
9872 dc->pc = pc_start;
9873 dc->singlestep_enabled = env->singlestep_enabled;
9874 dc->condjmp = 0;
9875 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9876 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9877 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9878 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9879 #if !defined(CONFIG_USER_ONLY)
9880 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9881 #endif
9882 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9883 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9884 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9885 cpu_F0s = tcg_temp_new_i32();
9886 cpu_F1s = tcg_temp_new_i32();
9887 cpu_F0d = tcg_temp_new_i64();
9888 cpu_F1d = tcg_temp_new_i64();
9889 cpu_V0 = cpu_F0d;
9890 cpu_V1 = cpu_F1d;
9891 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9892 cpu_M0 = tcg_temp_new_i64();
9893 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9894 lj = -1;
9895 num_insns = 0;
9896 max_insns = tb->cflags & CF_COUNT_MASK;
9897 if (max_insns == 0)
9898 max_insns = CF_COUNT_MASK;
9899
9900 gen_icount_start();
9901
9902 tcg_clear_temp_count();
9903
9904 /* A note on handling of the condexec (IT) bits:
9905 *
9906 * We want to avoid the overhead of having to write the updated condexec
9907 * bits back to the CPUARMState for every instruction in an IT block. So:
9908 * (1) if the condexec bits are not already zero then we write
9909 * zero back into the CPUARMState now. This avoids complications trying
9910 * to do it at the end of the block. (For example if we don't do this
9911 * it's hard to identify whether we can safely skip writing condexec
9912 * at the end of the TB, which we definitely want to do for the case
9913 * where a TB doesn't do anything with the IT state at all.)
9914 * (2) if we are going to leave the TB then we call gen_set_condexec()
9915 * which will write the correct value into CPUARMState if zero is wrong.
9916 * This is done both for leaving the TB at the end, and for leaving
9917 * it because of an exception we know will happen, which is done in
9918 * gen_exception_insn(). The latter is necessary because we need to
9919 * leave the TB with the PC/IT state just prior to execution of the
9920 * instruction which caused the exception.
9921 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9922 * then the CPUARMState will be wrong and we need to reset it.
9923 * This is handled in the same way as restoration of the
9924 * PC in these situations: we will be called again with search_pc=1
9925 * and generate a mapping of the condexec bits for each PC in
9926 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9927 * this to restore the condexec bits.
9928 *
9929 * Note that there are no instructions which can read the condexec
9930 * bits, and none which can write non-static values to them, so
9931 * we don't need to care about whether CPUARMState is correct in the
9932 * middle of a TB.
9933 */
9934
9935 /* Reset the conditional execution bits immediately. This avoids
9936 complications trying to do it at the end of the block. */
9937 if (dc->condexec_mask || dc->condexec_cond)
9938 {
9939 TCGv tmp = tcg_temp_new_i32();
9940 tcg_gen_movi_i32(tmp, 0);
9941 store_cpu_field(tmp, condexec_bits);
9942 }
9943 do {
9944 #ifdef CONFIG_USER_ONLY
9945 /* Intercept jump to the magic kernel page. */
9946 if (dc->pc >= 0xffff0000) {
9947 /* We always get here via a jump, so know we are not in a
9948 conditional execution block. */
9949 gen_exception(EXCP_KERNEL_TRAP);
9950 dc->is_jmp = DISAS_UPDATE;
9951 break;
9952 }
9953 #else
9954 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9955 /* We always get here via a jump, so know we are not in a
9956 conditional execution block. */
9957 gen_exception(EXCP_EXCEPTION_EXIT);
9958 dc->is_jmp = DISAS_UPDATE;
9959 break;
9960 }
9961 #endif
9962
9963 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9964 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9965 if (bp->pc == dc->pc) {
9966 gen_exception_insn(dc, 0, EXCP_DEBUG);
9967 /* Advance PC so that clearing the breakpoint will
9968 invalidate this TB. */
9969 dc->pc += 2;
9970 goto done_generating;
9971 break;
9972 }
9973 }
9974 }
9975 if (search_pc) {
9976 j = gen_opc_ptr - gen_opc_buf;
9977 if (lj < j) {
9978 lj++;
9979 while (lj < j)
9980 gen_opc_instr_start[lj++] = 0;
9981 }
9982 gen_opc_pc[lj] = dc->pc;
9983 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9984 gen_opc_instr_start[lj] = 1;
9985 gen_opc_icount[lj] = num_insns;
9986 }
9987
9988 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9989 gen_io_start();
9990
9991 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9992 tcg_gen_debug_insn_start(dc->pc);
9993 }
9994
9995 if (dc->thumb) {
9996 disas_thumb_insn(env, dc);
9997 if (dc->condexec_mask) {
9998 dc->condexec_cond = (dc->condexec_cond & 0xe)
9999 | ((dc->condexec_mask >> 4) & 1);
10000 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10001 if (dc->condexec_mask == 0) {
10002 dc->condexec_cond = 0;
10003 }
10004 }
10005 } else {
10006 disas_arm_insn(env, dc);
10007 }
10008
10009 if (dc->condjmp && !dc->is_jmp) {
10010 gen_set_label(dc->condlabel);
10011 dc->condjmp = 0;
10012 }
10013
10014 if (tcg_check_temp_count()) {
10015 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10016 }
10017
10018 /* Translation stops when a conditional branch is encountered.
10019 * Otherwise the subsequent code could get translated several times.
10020 * Also stop translation when a page boundary is reached. This
10021 * ensures prefetch aborts occur at the right place. */
10022 num_insns ++;
10023 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
10024 !env->singlestep_enabled &&
10025 !singlestep &&
10026 dc->pc < next_page_start &&
10027 num_insns < max_insns);
10028
10029 if (tb->cflags & CF_LAST_IO) {
10030 if (dc->condjmp) {
10031 /* FIXME: This can theoretically happen with self-modifying
10032 code. */
10033 cpu_abort(env, "IO on conditional branch instruction");
10034 }
10035 gen_io_end();
10036 }
10037
10038 /* At this stage dc->condjmp will only be set when the skipped
10039 instruction was a conditional branch or trap, and the PC has
10040 already been written. */
10041 if (unlikely(env->singlestep_enabled)) {
10042 /* Make sure the pc is updated, and raise a debug exception. */
10043 if (dc->condjmp) {
10044 gen_set_condexec(dc);
10045 if (dc->is_jmp == DISAS_SWI) {
10046 gen_exception(EXCP_SWI);
10047 } else {
10048 gen_exception(EXCP_DEBUG);
10049 }
10050 gen_set_label(dc->condlabel);
10051 }
10052 if (dc->condjmp || !dc->is_jmp) {
10053 gen_set_pc_im(dc->pc);
10054 dc->condjmp = 0;
10055 }
10056 gen_set_condexec(dc);
10057 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
10058 gen_exception(EXCP_SWI);
10059 } else {
10060 /* FIXME: Single stepping a WFI insn will not halt
10061 the CPU. */
10062 gen_exception(EXCP_DEBUG);
10063 }
10064 } else {
10065 /* While branches must always occur at the end of an IT block,
10066 there are a few other things that can cause us to terminate
10067 the TB in the middel of an IT block:
10068 - Exception generating instructions (bkpt, swi, undefined).
10069 - Page boundaries.
10070 - Hardware watchpoints.
10071 Hardware breakpoints have already been handled and skip this code.
10072 */
10073 gen_set_condexec(dc);
10074 switch(dc->is_jmp) {
10075 case DISAS_NEXT:
10076 gen_goto_tb(dc, 1, dc->pc);
10077 break;
10078 default:
10079 case DISAS_JUMP:
10080 case DISAS_UPDATE:
10081 /* indicate that the hash table must be used to find the next TB */
10082 tcg_gen_exit_tb(0);
10083 break;
10084 case DISAS_TB_JUMP:
10085 /* nothing more to generate */
10086 break;
10087 case DISAS_WFI:
10088 gen_helper_wfi();
10089 break;
10090 case DISAS_SWI:
10091 gen_exception(EXCP_SWI);
10092 break;
10093 }
10094 if (dc->condjmp) {
10095 gen_set_label(dc->condlabel);
10096 gen_set_condexec(dc);
10097 gen_goto_tb(dc, 1, dc->pc);
10098 dc->condjmp = 0;
10099 }
10100 }
10101
10102 done_generating:
10103 gen_icount_end(tb, num_insns);
10104 *gen_opc_ptr = INDEX_op_end;
10105
10106 #ifdef DEBUG_DISAS
10107 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10108 qemu_log("----------------\n");
10109 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10110 log_target_disas(pc_start, dc->pc - pc_start,
10111 dc->thumb | (dc->bswap_code << 1));
10112 qemu_log("\n");
10113 }
10114 #endif
10115 if (search_pc) {
10116 j = gen_opc_ptr - gen_opc_buf;
10117 lj++;
10118 while (lj <= j)
10119 gen_opc_instr_start[lj++] = 0;
10120 } else {
10121 tb->size = dc->pc - pc_start;
10122 tb->icount = num_insns;
10123 }
10124 }
10125
10126 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10127 {
10128 gen_intermediate_code_internal(env, tb, 0);
10129 }
10130
10131 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10132 {
10133 gen_intermediate_code_internal(env, tb, 1);
10134 }
10135
10136 static const char *cpu_mode_names[16] = {
10137 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10138 "???", "???", "???", "und", "???", "???", "???", "sys"
10139 };
10140
10141 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10142 int flags)
10143 {
10144 int i;
10145 #if 0
10146 union {
10147 uint32_t i;
10148 float s;
10149 } s0, s1;
10150 CPU_DoubleU d;
10151 /* ??? This assumes float64 and double have the same layout.
10152 Oh well, it's only debug dumps. */
10153 union {
10154 float64 f64;
10155 double d;
10156 } d0;
10157 #endif
10158 uint32_t psr;
10159
10160 for(i=0;i<16;i++) {
10161 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10162 if ((i % 4) == 3)
10163 cpu_fprintf(f, "\n");
10164 else
10165 cpu_fprintf(f, " ");
10166 }
10167 psr = cpsr_read(env);
10168 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10169 psr,
10170 psr & (1 << 31) ? 'N' : '-',
10171 psr & (1 << 30) ? 'Z' : '-',
10172 psr & (1 << 29) ? 'C' : '-',
10173 psr & (1 << 28) ? 'V' : '-',
10174 psr & CPSR_T ? 'T' : 'A',
10175 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10176
10177 #if 0
10178 for (i = 0; i < 16; i++) {
10179 d.d = env->vfp.regs[i];
10180 s0.i = d.l.lower;
10181 s1.i = d.l.upper;
10182 d0.f64 = d.d;
10183 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10184 i * 2, (int)s0.i, s0.s,
10185 i * 2 + 1, (int)s1.i, s1.s,
10186 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10187 d0.d);
10188 }
10189 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10190 #endif
10191 }
10192
10193 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10194 {
10195 env->regs[15] = gen_opc_pc[pc_pos];
10196 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
10197 }