]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Merge remote-tracking branch 'kwolf/for-anthony' into staging
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
47
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 #if !defined(CONFIG_USER_ONLY)
63 int user;
64 #endif
65 int vfp_enabled;
66 int vec_len;
67 int vec_stride;
68 } DisasContext;
69
70 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
72 #if defined(CONFIG_USER_ONLY)
73 #define IS_USER(s) 1
74 #else
75 #define IS_USER(s) (s->user)
76 #endif
77
78 /* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80 #define DISAS_WFI 4
81 #define DISAS_SWI 5
82
83 static TCGv_ptr cpu_env;
84 /* We reuse the same 64-bit temporaries for efficiency. */
85 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
86 static TCGv_i32 cpu_R[16];
87 static TCGv_i32 cpu_exclusive_addr;
88 static TCGv_i32 cpu_exclusive_val;
89 static TCGv_i32 cpu_exclusive_high;
90 #ifdef CONFIG_USER_ONLY
91 static TCGv_i32 cpu_exclusive_test;
92 static TCGv_i32 cpu_exclusive_info;
93 #endif
94
95 /* FIXME: These should be removed. */
96 static TCGv cpu_F0s, cpu_F1s;
97 static TCGv_i64 cpu_F0d, cpu_F1d;
98
99 #include "gen-icount.h"
100
101 static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
105 /* initialize TCG globals. */
106 void arm_translate_init(void)
107 {
108 int i;
109
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123 #ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128 #endif
129
130 #define GEN_HELPER 2
131 #include "helper.h"
132 }
133
134 static inline TCGv load_cpu_offset(int offset)
135 {
136 TCGv tmp = tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139 }
140
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143 static inline void store_cpu_offset(TCGv var, int offset)
144 {
145 tcg_gen_st_i32(var, cpu_env, offset);
146 tcg_temp_free_i32(var);
147 }
148
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext *s, TCGv var, int reg)
154 {
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_mov_i32(var, cpu_R[reg]);
165 }
166 }
167
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv load_reg(DisasContext *s, int reg)
170 {
171 TCGv tmp = tcg_temp_new_i32();
172 load_reg_var(s, tmp, reg);
173 return tmp;
174 }
175
176 /* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178 static void store_reg(DisasContext *s, int reg, TCGv var)
179 {
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
184 tcg_gen_mov_i32(cpu_R[reg], var);
185 tcg_temp_free_i32(var);
186 }
187
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
196
197
198 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199 {
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203 }
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207 static void gen_exception(int excp)
208 {
209 TCGv tmp = tcg_temp_new_i32();
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
212 tcg_temp_free_i32(tmp);
213 }
214
215 static void gen_smul_dual(TCGv a, TCGv b)
216 {
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
222 tcg_temp_free_i32(tmp2);
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
227 tcg_temp_free_i32(tmp1);
228 }
229
230 /* Byteswap each halfword. */
231 static void gen_rev16(TCGv var)
232 {
233 TCGv tmp = tcg_temp_new_i32();
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
239 tcg_temp_free_i32(tmp);
240 }
241
242 /* Byteswap low halfword and sign extend. */
243 static void gen_revsh(TCGv var)
244 {
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
248 }
249
250 /* Unsigned bitfield extract. */
251 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252 {
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256 }
257
258 /* Signed bitfield extract. */
259 static void gen_sbfx(TCGv var, int shift, int width)
260 {
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271 }
272
273 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
274 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275 {
276 tcg_gen_andi_i32(val, val, mask);
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
279 tcg_gen_or_i32(dest, base, val);
280 }
281
282 /* Return (b << 32) + a. Mark inputs as dead */
283 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
284 {
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
288 tcg_temp_free_i32(b);
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294 }
295
296 /* Return (b << 32) - a. Mark inputs as dead. */
297 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298 {
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
302 tcg_temp_free_i32(b);
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
308 }
309
310 /* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
312 /* 32x32->64 multiply. Marks inputs as dead. */
313 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
314 {
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
317
318 tcg_gen_extu_i32_i64(tmp1, a);
319 tcg_temp_free_i32(a);
320 tcg_gen_extu_i32_i64(tmp2, b);
321 tcg_temp_free_i32(b);
322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
323 tcg_temp_free_i64(tmp2);
324 return tmp1;
325 }
326
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
328 {
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
331
332 tcg_gen_ext_i32_i64(tmp1, a);
333 tcg_temp_free_i32(a);
334 tcg_gen_ext_i32_i64(tmp2, b);
335 tcg_temp_free_i32(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 tcg_temp_free_i64(tmp2);
338 return tmp1;
339 }
340
341 /* Swap low and high halfwords. */
342 static void gen_swap_half(TCGv var)
343 {
344 TCGv tmp = tcg_temp_new_i32();
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
348 tcg_temp_free_i32(tmp);
349 }
350
351 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358 static void gen_add16(TCGv t0, TCGv t1)
359 {
360 TCGv tmp = tcg_temp_new_i32();
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
369 }
370
371 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
373 /* Set CF to the top bit of var. */
374 static void gen_set_CF_bit31(TCGv var)
375 {
376 TCGv tmp = tcg_temp_new_i32();
377 tcg_gen_shri_i32(tmp, var, 31);
378 gen_set_CF(tmp);
379 tcg_temp_free_i32(tmp);
380 }
381
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv var)
384 {
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
387 }
388
389 /* T0 += T1 + CF. */
390 static void gen_adc(TCGv t0, TCGv t1)
391 {
392 TCGv tmp;
393 tcg_gen_add_i32(t0, t0, t1);
394 tmp = load_cpu_field(CF);
395 tcg_gen_add_i32(t0, t0, tmp);
396 tcg_temp_free_i32(tmp);
397 }
398
399 /* dest = T0 + T1 + CF. */
400 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401 {
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
406 tcg_temp_free_i32(tmp);
407 }
408
409 /* dest = T0 - T1 + CF - 1. */
410 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411 {
412 TCGv tmp;
413 tcg_gen_sub_i32(dest, t0, t1);
414 tmp = load_cpu_field(CF);
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
417 tcg_temp_free_i32(tmp);
418 }
419
420 /* FIXME: Implement this natively. */
421 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
423 static void shifter_out_im(TCGv var, int shift)
424 {
425 TCGv tmp = tcg_temp_new_i32();
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
428 } else {
429 tcg_gen_shri_i32(tmp, var, shift);
430 if (shift != 31)
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
434 tcg_temp_free_i32(tmp);
435 }
436
437 /* Shift by immediate. Includes special handling for shift == 0. */
438 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439 {
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
474 tcg_gen_rotri_i32(var, var, shift); break;
475 } else {
476 TCGv tmp = load_cpu_field(CF);
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
482 tcg_temp_free_i32(tmp);
483 }
484 }
485 };
486
487 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489 {
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
504 }
505 }
506 tcg_temp_free_i32(shift);
507 }
508
509 #define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
518 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
519 {
520 TCGv_ptr tmp;
521
522 switch (op1) {
523 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
525 tmp = tcg_temp_new_ptr();
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
528 tcg_temp_free_ptr(tmp);
529 break;
530 case 5:
531 tmp = tcg_temp_new_ptr();
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
534 tcg_temp_free_ptr(tmp);
535 break;
536 #undef gen_pas_helper
537 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550 #undef gen_pas_helper
551 }
552 }
553 #undef PAS_OP
554
555 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556 #define PAS_OP(pfx) \
557 switch (op1) { \
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
565 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
566 {
567 TCGv_ptr tmp;
568
569 switch (op2) {
570 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
572 tmp = tcg_temp_new_ptr();
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
575 tcg_temp_free_ptr(tmp);
576 break;
577 case 4:
578 tmp = tcg_temp_new_ptr();
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
581 tcg_temp_free_ptr(tmp);
582 break;
583 #undef gen_pas_helper
584 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597 #undef gen_pas_helper
598 }
599 }
600 #undef PAS_OP
601
602 static void gen_test_cc(int cc, int label)
603 {
604 TCGv tmp;
605 TCGv tmp2;
606 int inv;
607
608 switch (cc) {
609 case 0: /* eq: Z */
610 tmp = load_cpu_field(ZF);
611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
612 break;
613 case 1: /* ne: !Z */
614 tmp = load_cpu_field(ZF);
615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
624 break;
625 case 4: /* mi: N */
626 tmp = load_cpu_field(NF);
627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
628 break;
629 case 5: /* pl: !N */
630 tmp = load_cpu_field(NF);
631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
645 tcg_temp_free_i32(tmp);
646 tmp = load_cpu_field(ZF);
647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
653 tcg_temp_free_i32(tmp);
654 tmp = load_cpu_field(ZF);
655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
659 tmp2 = load_cpu_field(NF);
660 tcg_gen_xor_i32(tmp, tmp, tmp2);
661 tcg_temp_free_i32(tmp2);
662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
666 tmp2 = load_cpu_field(NF);
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 tcg_temp_free_i32(tmp2);
669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
673 tmp = load_cpu_field(ZF);
674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
675 tcg_temp_free_i32(tmp);
676 tmp = load_cpu_field(VF);
677 tmp2 = load_cpu_field(NF);
678 tcg_gen_xor_i32(tmp, tmp, tmp2);
679 tcg_temp_free_i32(tmp2);
680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
684 tmp = load_cpu_field(ZF);
685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
686 tcg_temp_free_i32(tmp);
687 tmp = load_cpu_field(VF);
688 tmp2 = load_cpu_field(NF);
689 tcg_gen_xor_i32(tmp, tmp, tmp2);
690 tcg_temp_free_i32(tmp2);
691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
697 tcg_temp_free_i32(tmp);
698 }
699
700 static const uint8_t table_logic_cc[16] = {
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717 };
718
719 /* Set PC and Thumb state from an immediate address. */
720 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
721 {
722 TCGv tmp;
723
724 s->is_jmp = DISAS_UPDATE;
725 if (s->thumb != (addr & 1)) {
726 tmp = tcg_temp_new_i32();
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
729 tcg_temp_free_i32(tmp);
730 }
731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
732 }
733
734 /* Set PC and Thumb state from var. var is marked as dead. */
735 static inline void gen_bx(DisasContext *s, TCGv var)
736 {
737 s->is_jmp = DISAS_UPDATE;
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
741 }
742
743 /* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746 static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748 {
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762 {
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768 }
769
770 static inline TCGv gen_ld8s(TCGv addr, int index)
771 {
772 TCGv tmp = tcg_temp_new_i32();
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775 }
776 static inline TCGv gen_ld8u(TCGv addr, int index)
777 {
778 TCGv tmp = tcg_temp_new_i32();
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781 }
782 static inline TCGv gen_ld16s(TCGv addr, int index)
783 {
784 TCGv tmp = tcg_temp_new_i32();
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787 }
788 static inline TCGv gen_ld16u(TCGv addr, int index)
789 {
790 TCGv tmp = tcg_temp_new_i32();
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793 }
794 static inline TCGv gen_ld32(TCGv addr, int index)
795 {
796 TCGv tmp = tcg_temp_new_i32();
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799 }
800 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801 {
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805 }
806 static inline void gen_st8(TCGv val, TCGv addr, int index)
807 {
808 tcg_gen_qemu_st8(val, addr, index);
809 tcg_temp_free_i32(val);
810 }
811 static inline void gen_st16(TCGv val, TCGv addr, int index)
812 {
813 tcg_gen_qemu_st16(val, addr, index);
814 tcg_temp_free_i32(val);
815 }
816 static inline void gen_st32(TCGv val, TCGv addr, int index)
817 {
818 tcg_gen_qemu_st32(val, addr, index);
819 tcg_temp_free_i32(val);
820 }
821 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822 {
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825 }
826
827 static inline void gen_set_pc_im(uint32_t val)
828 {
829 tcg_gen_movi_i32(cpu_R[15], val);
830 }
831
832 /* Force a TB lookup after an instruction that changes the CPU state. */
833 static inline void gen_lookup_tb(DisasContext *s)
834 {
835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
836 s->is_jmp = DISAS_UPDATE;
837 }
838
839 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
841 {
842 int val, rm, shift, shiftop;
843 TCGv offset;
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
850 if (val != 0)
851 tcg_gen_addi_i32(var, var, val);
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
856 shiftop = (insn >> 5) & 3;
857 offset = load_reg(s, rm);
858 gen_arm_shift_im(offset, shiftop, shift, 0);
859 if (!(insn & (1 << 23)))
860 tcg_gen_sub_i32(var, var, offset);
861 else
862 tcg_gen_add_i32(var, var, offset);
863 tcg_temp_free_i32(offset);
864 }
865 }
866
867 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
868 int extra, TCGv var)
869 {
870 int val, rm;
871 TCGv offset;
872
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
878 val += extra;
879 if (val != 0)
880 tcg_gen_addi_i32(var, var, val);
881 } else {
882 /* register */
883 if (extra)
884 tcg_gen_addi_i32(var, var, extra);
885 rm = (insn) & 0xf;
886 offset = load_reg(s, rm);
887 if (!(insn & (1 << 23)))
888 tcg_gen_sub_i32(var, var, offset);
889 else
890 tcg_gen_add_i32(var, var, offset);
891 tcg_temp_free_i32(offset);
892 }
893 }
894
895 static TCGv_ptr get_fpstatus_ptr(int neon)
896 {
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906 }
907
908 #define VFP_OP2(name) \
909 static inline void gen_vfp_##name(int dp) \
910 { \
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
918 }
919
920 VFP_OP2(add)
921 VFP_OP2(sub)
922 VFP_OP2(mul)
923 VFP_OP2(div)
924
925 #undef VFP_OP2
926
927 static inline void gen_vfp_F1_mul(int dp)
928 {
929 /* Like gen_vfp_mul() but put result in F1 */
930 TCGv_ptr fpst = get_fpstatus_ptr(0);
931 if (dp) {
932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
933 } else {
934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
935 }
936 tcg_temp_free_ptr(fpst);
937 }
938
939 static inline void gen_vfp_F1_neg(int dp)
940 {
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947 }
948
949 static inline void gen_vfp_abs(int dp)
950 {
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955 }
956
957 static inline void gen_vfp_neg(int dp)
958 {
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963 }
964
965 static inline void gen_vfp_sqrt(int dp)
966 {
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971 }
972
973 static inline void gen_vfp_cmp(int dp)
974 {
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979 }
980
981 static inline void gen_vfp_cmpe(int dp)
982 {
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987 }
988
989 static inline void gen_vfp_F1_ld0(int dp)
990 {
991 if (dp)
992 tcg_gen_movi_i64(cpu_F1d, 0);
993 else
994 tcg_gen_movi_i32(cpu_F1s, 0);
995 }
996
997 #define VFP_GEN_ITOF(name) \
998 static inline void gen_vfp_##name(int dp, int neon) \
999 { \
1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
1006 tcg_temp_free_ptr(statusptr); \
1007 }
1008
1009 VFP_GEN_ITOF(uito)
1010 VFP_GEN_ITOF(sito)
1011 #undef VFP_GEN_ITOF
1012
1013 #define VFP_GEN_FTOI(name) \
1014 static inline void gen_vfp_##name(int dp, int neon) \
1015 { \
1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
1022 tcg_temp_free_ptr(statusptr); \
1023 }
1024
1025 VFP_GEN_FTOI(toui)
1026 VFP_GEN_FTOI(touiz)
1027 VFP_GEN_FTOI(tosi)
1028 VFP_GEN_FTOI(tosiz)
1029 #undef VFP_GEN_FTOI
1030
1031 #define VFP_GEN_FIX(name) \
1032 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1033 { \
1034 TCGv tmp_shift = tcg_const_i32(shift); \
1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
1041 tcg_temp_free_i32(tmp_shift); \
1042 tcg_temp_free_ptr(statusptr); \
1043 }
1044 VFP_GEN_FIX(tosh)
1045 VFP_GEN_FIX(tosl)
1046 VFP_GEN_FIX(touh)
1047 VFP_GEN_FIX(toul)
1048 VFP_GEN_FIX(shto)
1049 VFP_GEN_FIX(slto)
1050 VFP_GEN_FIX(uhto)
1051 VFP_GEN_FIX(ulto)
1052 #undef VFP_GEN_FIX
1053
1054 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1055 {
1056 if (dp)
1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1058 else
1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1060 }
1061
1062 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1063 {
1064 if (dp)
1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1066 else
1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1068 }
1069
1070 static inline long
1071 vfp_reg_offset (int dp, int reg)
1072 {
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082 }
1083
1084 /* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086 static inline long
1087 neon_reg_offset (int reg, int n)
1088 {
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092 }
1093
1094 static TCGv neon_load_reg(int reg, int pass)
1095 {
1096 TCGv tmp = tcg_temp_new_i32();
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099 }
1100
1101 static void neon_store_reg(int reg, int pass, TCGv var)
1102 {
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1104 tcg_temp_free_i32(var);
1105 }
1106
1107 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1108 {
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110 }
1111
1112 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1113 {
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115 }
1116
1117 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1118 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1119 #define tcg_gen_st_f32 tcg_gen_st_i32
1120 #define tcg_gen_st_f64 tcg_gen_st_i64
1121
1122 static inline void gen_mov_F0_vreg(int dp, int reg)
1123 {
1124 if (dp)
1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1126 else
1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1128 }
1129
1130 static inline void gen_mov_F1_vreg(int dp, int reg)
1131 {
1132 if (dp)
1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1134 else
1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1136 }
1137
1138 static inline void gen_mov_vreg_F0(int dp, int reg)
1139 {
1140 if (dp)
1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1142 else
1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1144 }
1145
1146 #define ARM_CP_RW_BIT (1 << 20)
1147
1148 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1149 {
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1151 }
1152
1153 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1154 {
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1156 }
1157
1158 static inline TCGv iwmmxt_load_creg(int reg)
1159 {
1160 TCGv var = tcg_temp_new_i32();
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
1163 }
1164
1165 static inline void iwmmxt_store_creg(int reg, TCGv var)
1166 {
1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1168 tcg_temp_free_i32(var);
1169 }
1170
1171 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172 {
1173 iwmmxt_store_reg(cpu_M0, rn);
1174 }
1175
1176 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177 {
1178 iwmmxt_load_reg(cpu_M0, rn);
1179 }
1180
1181 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182 {
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185 }
1186
1187 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188 {
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191 }
1192
1193 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194 {
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197 }
1198
1199 #define IWMMXT_OP(name) \
1200 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201 { \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204 }
1205
1206 #define IWMMXT_OP_ENV(name) \
1207 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208 { \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211 }
1212
1213 #define IWMMXT_OP_ENV_SIZE(name) \
1214 IWMMXT_OP_ENV(name##b) \
1215 IWMMXT_OP_ENV(name##w) \
1216 IWMMXT_OP_ENV(name##l)
1217
1218 #define IWMMXT_OP_ENV1(name) \
1219 static inline void gen_op_iwmmxt_##name##_M0(void) \
1220 { \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1222 }
1223
1224 IWMMXT_OP(maddsq)
1225 IWMMXT_OP(madduq)
1226 IWMMXT_OP(sadb)
1227 IWMMXT_OP(sadw)
1228 IWMMXT_OP(mulslw)
1229 IWMMXT_OP(mulshw)
1230 IWMMXT_OP(mululw)
1231 IWMMXT_OP(muluhw)
1232 IWMMXT_OP(macsw)
1233 IWMMXT_OP(macuw)
1234
1235 IWMMXT_OP_ENV_SIZE(unpackl)
1236 IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238 IWMMXT_OP_ENV1(unpacklub)
1239 IWMMXT_OP_ENV1(unpackluw)
1240 IWMMXT_OP_ENV1(unpacklul)
1241 IWMMXT_OP_ENV1(unpackhub)
1242 IWMMXT_OP_ENV1(unpackhuw)
1243 IWMMXT_OP_ENV1(unpackhul)
1244 IWMMXT_OP_ENV1(unpacklsb)
1245 IWMMXT_OP_ENV1(unpacklsw)
1246 IWMMXT_OP_ENV1(unpacklsl)
1247 IWMMXT_OP_ENV1(unpackhsb)
1248 IWMMXT_OP_ENV1(unpackhsw)
1249 IWMMXT_OP_ENV1(unpackhsl)
1250
1251 IWMMXT_OP_ENV_SIZE(cmpeq)
1252 IWMMXT_OP_ENV_SIZE(cmpgtu)
1253 IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255 IWMMXT_OP_ENV_SIZE(mins)
1256 IWMMXT_OP_ENV_SIZE(minu)
1257 IWMMXT_OP_ENV_SIZE(maxs)
1258 IWMMXT_OP_ENV_SIZE(maxu)
1259
1260 IWMMXT_OP_ENV_SIZE(subn)
1261 IWMMXT_OP_ENV_SIZE(addn)
1262 IWMMXT_OP_ENV_SIZE(subu)
1263 IWMMXT_OP_ENV_SIZE(addu)
1264 IWMMXT_OP_ENV_SIZE(subs)
1265 IWMMXT_OP_ENV_SIZE(adds)
1266
1267 IWMMXT_OP_ENV(avgb0)
1268 IWMMXT_OP_ENV(avgb1)
1269 IWMMXT_OP_ENV(avgw0)
1270 IWMMXT_OP_ENV(avgw1)
1271
1272 IWMMXT_OP(msadb)
1273
1274 IWMMXT_OP_ENV(packuw)
1275 IWMMXT_OP_ENV(packul)
1276 IWMMXT_OP_ENV(packuq)
1277 IWMMXT_OP_ENV(packsw)
1278 IWMMXT_OP_ENV(packsl)
1279 IWMMXT_OP_ENV(packsq)
1280
1281 static void gen_op_iwmmxt_set_mup(void)
1282 {
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287 }
1288
1289 static void gen_op_iwmmxt_set_cup(void)
1290 {
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295 }
1296
1297 static void gen_op_iwmmxt_setpsr_nz(void)
1298 {
1299 TCGv tmp = tcg_temp_new_i32();
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302 }
1303
1304 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305 {
1306 iwmmxt_load_reg(cpu_V1, rn);
1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309 }
1310
1311 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1312 {
1313 int rd;
1314 uint32_t offset;
1315 TCGv tmp;
1316
1317 rd = (insn >> 16) & 0xf;
1318 tmp = load_reg(s, rd);
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
1324 tcg_gen_addi_i32(tmp, tmp, offset);
1325 else
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
1328 if (insn & (1 << 21))
1329 store_reg(s, rd, tmp);
1330 else
1331 tcg_temp_free_i32(tmp);
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
1334 tcg_gen_mov_i32(dest, tmp);
1335 if (insn & (1 << 23))
1336 tcg_gen_addi_i32(tmp, tmp, offset);
1337 else
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343 }
1344
1345 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1346 {
1347 int rd = (insn >> 0) & 0xf;
1348 TCGv tmp;
1349
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1352 return 1;
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
1357 tmp = tcg_temp_new_i32();
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
1363 tcg_temp_free_i32(tmp);
1364 return 0;
1365 }
1366
1367 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1368 (ie. an undefined instruction). */
1369 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1370 {
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1386 } else { /* TMCRR */
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
1395 addr = tcg_temp_new_i32();
1396 if (gen_iwmmxt_address(s, insn, addr)) {
1397 tcg_temp_free_i32(addr);
1398 return 1;
1399 }
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1402 tmp = tcg_temp_new_i32();
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
1405 } else {
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1410 i = 0;
1411 } else { /* WLDRW wRd */
1412 tmp = gen_ld32(addr, IS_USER(s));
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
1416 tmp = gen_ld16u(addr, IS_USER(s));
1417 } else { /* WLDRB */
1418 tmp = gen_ld8u(addr, IS_USER(s));
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1423 tcg_temp_free_i32(tmp);
1424 }
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
1433 tmp = tcg_temp_new_i32();
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
1436 tcg_temp_free_i32(tmp);
1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1440 gen_st32(tmp, addr, IS_USER(s));
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1445 gen_st16(tmp, addr, IS_USER(s));
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1448 gen_st8(tmp, addr, IS_USER(s));
1449 }
1450 }
1451 }
1452 }
1453 tcg_temp_free_i32(addr);
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
1488 tcg_temp_free_i32(tmp2);
1489 iwmmxt_store_creg(wrd, tmp);
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1701 tcg_temp_free_i32(tmp);
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
1710 tmp = load_reg(s, rd);
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
1716 break;
1717 case 1:
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
1720 break;
1721 case 2:
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
1724 break;
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
1728 }
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
1732 tcg_temp_free_i32(tmp);
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
1742 tmp = tcg_temp_new_i32();
1743 switch ((insn >> 22) & 3) {
1744 case 0:
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
1751 }
1752 break;
1753 case 1:
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1760 }
1761 break;
1762 case 2:
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1765 break;
1766 }
1767 store_reg(s, rd, tmp);
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1771 return 1;
1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1773 switch ((insn >> 22) & 3) {
1774 case 0:
1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1776 break;
1777 case 1:
1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1779 break;
1780 case 2:
1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1782 break;
1783 }
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
1786 tcg_temp_free_i32(tmp);
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
1793 tmp = load_reg(s, rd);
1794 switch ((insn >> 6) & 3) {
1795 case 0:
1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1797 break;
1798 case 1:
1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1800 break;
1801 case 2:
1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1803 break;
1804 }
1805 tcg_temp_free_i32(tmp);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1811 return 1;
1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1813 tmp2 = tcg_temp_new_i32();
1814 tcg_gen_mov_i32(tmp2, tmp);
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
1826 }
1827 break;
1828 case 2:
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
1831 break;
1832 }
1833 gen_set_nzcv(tmp);
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1844 break;
1845 case 1:
1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1847 break;
1848 case 2:
1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1859 return 1;
1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1861 tmp2 = tcg_temp_new_i32();
1862 tcg_gen_mov_i32(tmp2, tmp);
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
1874 }
1875 break;
1876 case 2:
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
1879 break;
1880 }
1881 gen_set_nzcv(tmp);
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
1891 tmp = tcg_temp_new_i32();
1892 switch ((insn >> 22) & 3) {
1893 case 0:
1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1895 break;
1896 case 1:
1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1898 break;
1899 case 2:
1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1901 break;
1902 }
1903 store_reg(s, rd, tmp);
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = tcg_temp_new_i32();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 tcg_temp_free_i32(tmp);
2009 return 1;
2010 }
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2021 }
2022 tcg_temp_free_i32(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = tcg_temp_new_i32();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 tcg_temp_free_i32(tmp);
2037 return 1;
2038 }
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2049 }
2050 tcg_temp_free_i32(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = tcg_temp_new_i32();
2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2064 tcg_temp_free_i32(tmp);
2065 return 1;
2066 }
2067 switch ((insn >> 22) & 3) {
2068 case 1:
2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2073 break;
2074 case 3:
2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2076 break;
2077 }
2078 tcg_temp_free_i32(tmp);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 tmp = tcg_temp_new_i32();
2091 switch ((insn >> 22) & 3) {
2092 case 1:
2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2094 tcg_temp_free_i32(tmp);
2095 return 1;
2096 }
2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2098 break;
2099 case 2:
2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2101 tcg_temp_free_i32(tmp);
2102 return 1;
2103 }
2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2105 break;
2106 case 3:
2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2108 tcg_temp_free_i32(tmp);
2109 return 1;
2110 }
2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2112 break;
2113 }
2114 tcg_temp_free_i32(tmp);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2246 tcg_temp_free(tmp);
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
2304 switch ((insn >> 22) & 3) {
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2343 break;
2344 case 0x8: /* TMIAPH */
2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2348 if (insn & (1 << 16))
2349 tcg_gen_shri_i32(tmp, tmp, 16);
2350 if (insn & (1 << 17))
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2353 break;
2354 default:
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
2357 return 1;
2358 }
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369 }
2370
2371 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2372 (ie. an undefined instruction). */
2373 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2374 {
2375 int acc, rd0, rd1, rdhi, rdlo;
2376 TCGv tmp, tmp2;
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2392 break;
2393 case 0x8: /* MIAPH */
2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
2400 if (insn & (1 << 16))
2401 tcg_gen_shri_i32(tmp, tmp, 16);
2402 if (insn & (1 << 17))
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2405 break;
2406 default:
2407 return 1;
2408 }
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2431 } else { /* MAR */
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439 }
2440
2441 /* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2444 {
2445 TCGv tmp, tmp2;
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
2452 if (insn & ARM_CP_RW_BIT) {
2453 if (!env->cp[cp].cp_read)
2454 return 1;
2455 gen_set_pc_im(s->pc);
2456 tmp = tcg_temp_new_i32();
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
2460 store_reg(s, rd, tmp);
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
2469 tcg_temp_free_i32(tmp);
2470 }
2471 return 0;
2472 }
2473
2474 static int cp15_user_ok(CPUState *env, uint32_t insn)
2475 {
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
2501 if (cpn == 7) {
2502 /* ISB, DSB, DMB. */
2503 if ((cpm == 5 && op == 4)
2504 || (cpm == 10 && (op == 4 || op == 5)))
2505 return 1;
2506 }
2507 return 0;
2508 }
2509
2510 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2511 {
2512 TCGv tmp;
2513 int cpn = (insn >> 16) & 0xf;
2514 int cpm = insn & 0xf;
2515 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2516
2517 if (!arm_feature(env, ARM_FEATURE_V6K))
2518 return 0;
2519
2520 if (!(cpn == 13 && cpm == 0))
2521 return 0;
2522
2523 if (insn & ARM_CP_RW_BIT) {
2524 switch (op) {
2525 case 2:
2526 tmp = load_cpu_field(cp15.c13_tls1);
2527 break;
2528 case 3:
2529 tmp = load_cpu_field(cp15.c13_tls2);
2530 break;
2531 case 4:
2532 tmp = load_cpu_field(cp15.c13_tls3);
2533 break;
2534 default:
2535 return 0;
2536 }
2537 store_reg(s, rd, tmp);
2538
2539 } else {
2540 tmp = load_reg(s, rd);
2541 switch (op) {
2542 case 2:
2543 store_cpu_field(tmp, cp15.c13_tls1);
2544 break;
2545 case 3:
2546 store_cpu_field(tmp, cp15.c13_tls2);
2547 break;
2548 case 4:
2549 store_cpu_field(tmp, cp15.c13_tls3);
2550 break;
2551 default:
2552 tcg_temp_free_i32(tmp);
2553 return 0;
2554 }
2555 }
2556 return 1;
2557 }
2558
2559 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2560 instruction is not defined. */
2561 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2562 {
2563 uint32_t rd;
2564 TCGv tmp, tmp2;
2565
2566 /* M profile cores use memory mapped registers instead of cp15. */
2567 if (arm_feature(env, ARM_FEATURE_M))
2568 return 1;
2569
2570 if ((insn & (1 << 25)) == 0) {
2571 if (insn & (1 << 20)) {
2572 /* mrrc */
2573 return 1;
2574 }
2575 /* mcrr. Used for block cache operations, so implement as no-op. */
2576 return 0;
2577 }
2578 if ((insn & (1 << 4)) == 0) {
2579 /* cdp */
2580 return 1;
2581 }
2582 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2583 return 1;
2584 }
2585
2586 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2587 * instructions rather than a separate instruction.
2588 */
2589 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2590 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2591 * In v7, this must NOP.
2592 */
2593 if (!arm_feature(env, ARM_FEATURE_V7)) {
2594 /* Wait for interrupt. */
2595 gen_set_pc_im(s->pc);
2596 s->is_jmp = DISAS_WFI;
2597 }
2598 return 0;
2599 }
2600
2601 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2602 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2603 * so this is slightly over-broad.
2604 */
2605 if (!arm_feature(env, ARM_FEATURE_V6)) {
2606 /* Wait for interrupt. */
2607 gen_set_pc_im(s->pc);
2608 s->is_jmp = DISAS_WFI;
2609 return 0;
2610 }
2611 /* Otherwise fall through to handle via helper function.
2612 * In particular, on v7 and some v6 cores this is one of
2613 * the VA-PA registers.
2614 */
2615 }
2616
2617 rd = (insn >> 12) & 0xf;
2618
2619 if (cp15_tls_load_store(env, s, insn, rd))
2620 return 0;
2621
2622 tmp2 = tcg_const_i32(insn);
2623 if (insn & ARM_CP_RW_BIT) {
2624 tmp = tcg_temp_new_i32();
2625 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2626 /* If the destination register is r15 then sets condition codes. */
2627 if (rd != 15)
2628 store_reg(s, rd, tmp);
2629 else
2630 tcg_temp_free_i32(tmp);
2631 } else {
2632 tmp = load_reg(s, rd);
2633 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2634 tcg_temp_free_i32(tmp);
2635 /* Normally we would always end the TB here, but Linux
2636 * arch/arm/mach-pxa/sleep.S expects two instructions following
2637 * an MMU enable to execute from cache. Imitate this behaviour. */
2638 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2639 (insn & 0x0fff0fff) != 0x0e010f10)
2640 gen_lookup_tb(s);
2641 }
2642 tcg_temp_free_i32(tmp2);
2643 return 0;
2644 }
2645
2646 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2647 #define VFP_SREG(insn, bigbit, smallbit) \
2648 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2649 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2650 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2651 reg = (((insn) >> (bigbit)) & 0x0f) \
2652 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2653 } else { \
2654 if (insn & (1 << (smallbit))) \
2655 return 1; \
2656 reg = ((insn) >> (bigbit)) & 0x0f; \
2657 }} while (0)
2658
2659 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2660 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2661 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2662 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2663 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2664 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2665
2666 /* Move between integer and VFP cores. */
2667 static TCGv gen_vfp_mrs(void)
2668 {
2669 TCGv tmp = tcg_temp_new_i32();
2670 tcg_gen_mov_i32(tmp, cpu_F0s);
2671 return tmp;
2672 }
2673
2674 static void gen_vfp_msr(TCGv tmp)
2675 {
2676 tcg_gen_mov_i32(cpu_F0s, tmp);
2677 tcg_temp_free_i32(tmp);
2678 }
2679
2680 static void gen_neon_dup_u8(TCGv var, int shift)
2681 {
2682 TCGv tmp = tcg_temp_new_i32();
2683 if (shift)
2684 tcg_gen_shri_i32(var, var, shift);
2685 tcg_gen_ext8u_i32(var, var);
2686 tcg_gen_shli_i32(tmp, var, 8);
2687 tcg_gen_or_i32(var, var, tmp);
2688 tcg_gen_shli_i32(tmp, var, 16);
2689 tcg_gen_or_i32(var, var, tmp);
2690 tcg_temp_free_i32(tmp);
2691 }
2692
2693 static void gen_neon_dup_low16(TCGv var)
2694 {
2695 TCGv tmp = tcg_temp_new_i32();
2696 tcg_gen_ext16u_i32(var, var);
2697 tcg_gen_shli_i32(tmp, var, 16);
2698 tcg_gen_or_i32(var, var, tmp);
2699 tcg_temp_free_i32(tmp);
2700 }
2701
2702 static void gen_neon_dup_high16(TCGv var)
2703 {
2704 TCGv tmp = tcg_temp_new_i32();
2705 tcg_gen_andi_i32(var, var, 0xffff0000);
2706 tcg_gen_shri_i32(tmp, var, 16);
2707 tcg_gen_or_i32(var, var, tmp);
2708 tcg_temp_free_i32(tmp);
2709 }
2710
2711 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2712 {
2713 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2714 TCGv tmp;
2715 switch (size) {
2716 case 0:
2717 tmp = gen_ld8u(addr, IS_USER(s));
2718 gen_neon_dup_u8(tmp, 0);
2719 break;
2720 case 1:
2721 tmp = gen_ld16u(addr, IS_USER(s));
2722 gen_neon_dup_low16(tmp);
2723 break;
2724 case 2:
2725 tmp = gen_ld32(addr, IS_USER(s));
2726 break;
2727 default: /* Avoid compiler warnings. */
2728 abort();
2729 }
2730 return tmp;
2731 }
2732
2733 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2734 (ie. an undefined instruction). */
2735 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2736 {
2737 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2738 int dp, veclen;
2739 TCGv addr;
2740 TCGv tmp;
2741 TCGv tmp2;
2742
2743 if (!arm_feature(env, ARM_FEATURE_VFP))
2744 return 1;
2745
2746 if (!s->vfp_enabled) {
2747 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2748 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2749 return 1;
2750 rn = (insn >> 16) & 0xf;
2751 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2752 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2753 return 1;
2754 }
2755 dp = ((insn & 0xf00) == 0xb00);
2756 switch ((insn >> 24) & 0xf) {
2757 case 0xe:
2758 if (insn & (1 << 4)) {
2759 /* single register transfer */
2760 rd = (insn >> 12) & 0xf;
2761 if (dp) {
2762 int size;
2763 int pass;
2764
2765 VFP_DREG_N(rn, insn);
2766 if (insn & 0xf)
2767 return 1;
2768 if (insn & 0x00c00060
2769 && !arm_feature(env, ARM_FEATURE_NEON))
2770 return 1;
2771
2772 pass = (insn >> 21) & 1;
2773 if (insn & (1 << 22)) {
2774 size = 0;
2775 offset = ((insn >> 5) & 3) * 8;
2776 } else if (insn & (1 << 5)) {
2777 size = 1;
2778 offset = (insn & (1 << 6)) ? 16 : 0;
2779 } else {
2780 size = 2;
2781 offset = 0;
2782 }
2783 if (insn & ARM_CP_RW_BIT) {
2784 /* vfp->arm */
2785 tmp = neon_load_reg(rn, pass);
2786 switch (size) {
2787 case 0:
2788 if (offset)
2789 tcg_gen_shri_i32(tmp, tmp, offset);
2790 if (insn & (1 << 23))
2791 gen_uxtb(tmp);
2792 else
2793 gen_sxtb(tmp);
2794 break;
2795 case 1:
2796 if (insn & (1 << 23)) {
2797 if (offset) {
2798 tcg_gen_shri_i32(tmp, tmp, 16);
2799 } else {
2800 gen_uxth(tmp);
2801 }
2802 } else {
2803 if (offset) {
2804 tcg_gen_sari_i32(tmp, tmp, 16);
2805 } else {
2806 gen_sxth(tmp);
2807 }
2808 }
2809 break;
2810 case 2:
2811 break;
2812 }
2813 store_reg(s, rd, tmp);
2814 } else {
2815 /* arm->vfp */
2816 tmp = load_reg(s, rd);
2817 if (insn & (1 << 23)) {
2818 /* VDUP */
2819 if (size == 0) {
2820 gen_neon_dup_u8(tmp, 0);
2821 } else if (size == 1) {
2822 gen_neon_dup_low16(tmp);
2823 }
2824 for (n = 0; n <= pass * 2; n++) {
2825 tmp2 = tcg_temp_new_i32();
2826 tcg_gen_mov_i32(tmp2, tmp);
2827 neon_store_reg(rn, n, tmp2);
2828 }
2829 neon_store_reg(rn, n, tmp);
2830 } else {
2831 /* VMOV */
2832 switch (size) {
2833 case 0:
2834 tmp2 = neon_load_reg(rn, pass);
2835 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2836 tcg_temp_free_i32(tmp2);
2837 break;
2838 case 1:
2839 tmp2 = neon_load_reg(rn, pass);
2840 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2841 tcg_temp_free_i32(tmp2);
2842 break;
2843 case 2:
2844 break;
2845 }
2846 neon_store_reg(rn, pass, tmp);
2847 }
2848 }
2849 } else { /* !dp */
2850 if ((insn & 0x6f) != 0x00)
2851 return 1;
2852 rn = VFP_SREG_N(insn);
2853 if (insn & ARM_CP_RW_BIT) {
2854 /* vfp->arm */
2855 if (insn & (1 << 21)) {
2856 /* system register */
2857 rn >>= 1;
2858
2859 switch (rn) {
2860 case ARM_VFP_FPSID:
2861 /* VFP2 allows access to FSID from userspace.
2862 VFP3 restricts all id registers to privileged
2863 accesses. */
2864 if (IS_USER(s)
2865 && arm_feature(env, ARM_FEATURE_VFP3))
2866 return 1;
2867 tmp = load_cpu_field(vfp.xregs[rn]);
2868 break;
2869 case ARM_VFP_FPEXC:
2870 if (IS_USER(s))
2871 return 1;
2872 tmp = load_cpu_field(vfp.xregs[rn]);
2873 break;
2874 case ARM_VFP_FPINST:
2875 case ARM_VFP_FPINST2:
2876 /* Not present in VFP3. */
2877 if (IS_USER(s)
2878 || arm_feature(env, ARM_FEATURE_VFP3))
2879 return 1;
2880 tmp = load_cpu_field(vfp.xregs[rn]);
2881 break;
2882 case ARM_VFP_FPSCR:
2883 if (rd == 15) {
2884 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2885 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2886 } else {
2887 tmp = tcg_temp_new_i32();
2888 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2889 }
2890 break;
2891 case ARM_VFP_MVFR0:
2892 case ARM_VFP_MVFR1:
2893 if (IS_USER(s)
2894 || !arm_feature(env, ARM_FEATURE_VFP3))
2895 return 1;
2896 tmp = load_cpu_field(vfp.xregs[rn]);
2897 break;
2898 default:
2899 return 1;
2900 }
2901 } else {
2902 gen_mov_F0_vreg(0, rn);
2903 tmp = gen_vfp_mrs();
2904 }
2905 if (rd == 15) {
2906 /* Set the 4 flag bits in the CPSR. */
2907 gen_set_nzcv(tmp);
2908 tcg_temp_free_i32(tmp);
2909 } else {
2910 store_reg(s, rd, tmp);
2911 }
2912 } else {
2913 /* arm->vfp */
2914 tmp = load_reg(s, rd);
2915 if (insn & (1 << 21)) {
2916 rn >>= 1;
2917 /* system register */
2918 switch (rn) {
2919 case ARM_VFP_FPSID:
2920 case ARM_VFP_MVFR0:
2921 case ARM_VFP_MVFR1:
2922 /* Writes are ignored. */
2923 break;
2924 case ARM_VFP_FPSCR:
2925 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2926 tcg_temp_free_i32(tmp);
2927 gen_lookup_tb(s);
2928 break;
2929 case ARM_VFP_FPEXC:
2930 if (IS_USER(s))
2931 return 1;
2932 /* TODO: VFP subarchitecture support.
2933 * For now, keep the EN bit only */
2934 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2935 store_cpu_field(tmp, vfp.xregs[rn]);
2936 gen_lookup_tb(s);
2937 break;
2938 case ARM_VFP_FPINST:
2939 case ARM_VFP_FPINST2:
2940 store_cpu_field(tmp, vfp.xregs[rn]);
2941 break;
2942 default:
2943 return 1;
2944 }
2945 } else {
2946 gen_vfp_msr(tmp);
2947 gen_mov_vreg_F0(0, rn);
2948 }
2949 }
2950 }
2951 } else {
2952 /* data processing */
2953 /* The opcode is in bits 23, 21, 20 and 6. */
2954 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2955 if (dp) {
2956 if (op == 15) {
2957 /* rn is opcode */
2958 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2959 } else {
2960 /* rn is register number */
2961 VFP_DREG_N(rn, insn);
2962 }
2963
2964 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2965 /* Integer or single precision destination. */
2966 rd = VFP_SREG_D(insn);
2967 } else {
2968 VFP_DREG_D(rd, insn);
2969 }
2970 if (op == 15 &&
2971 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2972 /* VCVT from int is always from S reg regardless of dp bit.
2973 * VCVT with immediate frac_bits has same format as SREG_M
2974 */
2975 rm = VFP_SREG_M(insn);
2976 } else {
2977 VFP_DREG_M(rm, insn);
2978 }
2979 } else {
2980 rn = VFP_SREG_N(insn);
2981 if (op == 15 && rn == 15) {
2982 /* Double precision destination. */
2983 VFP_DREG_D(rd, insn);
2984 } else {
2985 rd = VFP_SREG_D(insn);
2986 }
2987 /* NB that we implicitly rely on the encoding for the frac_bits
2988 * in VCVT of fixed to float being the same as that of an SREG_M
2989 */
2990 rm = VFP_SREG_M(insn);
2991 }
2992
2993 veclen = s->vec_len;
2994 if (op == 15 && rn > 3)
2995 veclen = 0;
2996
2997 /* Shut up compiler warnings. */
2998 delta_m = 0;
2999 delta_d = 0;
3000 bank_mask = 0;
3001
3002 if (veclen > 0) {
3003 if (dp)
3004 bank_mask = 0xc;
3005 else
3006 bank_mask = 0x18;
3007
3008 /* Figure out what type of vector operation this is. */
3009 if ((rd & bank_mask) == 0) {
3010 /* scalar */
3011 veclen = 0;
3012 } else {
3013 if (dp)
3014 delta_d = (s->vec_stride >> 1) + 1;
3015 else
3016 delta_d = s->vec_stride + 1;
3017
3018 if ((rm & bank_mask) == 0) {
3019 /* mixed scalar/vector */
3020 delta_m = 0;
3021 } else {
3022 /* vector */
3023 delta_m = delta_d;
3024 }
3025 }
3026 }
3027
3028 /* Load the initial operands. */
3029 if (op == 15) {
3030 switch (rn) {
3031 case 16:
3032 case 17:
3033 /* Integer source */
3034 gen_mov_F0_vreg(0, rm);
3035 break;
3036 case 8:
3037 case 9:
3038 /* Compare */
3039 gen_mov_F0_vreg(dp, rd);
3040 gen_mov_F1_vreg(dp, rm);
3041 break;
3042 case 10:
3043 case 11:
3044 /* Compare with zero */
3045 gen_mov_F0_vreg(dp, rd);
3046 gen_vfp_F1_ld0(dp);
3047 break;
3048 case 20:
3049 case 21:
3050 case 22:
3051 case 23:
3052 case 28:
3053 case 29:
3054 case 30:
3055 case 31:
3056 /* Source and destination the same. */
3057 gen_mov_F0_vreg(dp, rd);
3058 break;
3059 default:
3060 /* One source operand. */
3061 gen_mov_F0_vreg(dp, rm);
3062 break;
3063 }
3064 } else {
3065 /* Two source operands. */
3066 gen_mov_F0_vreg(dp, rn);
3067 gen_mov_F1_vreg(dp, rm);
3068 }
3069
3070 for (;;) {
3071 /* Perform the calculation. */
3072 switch (op) {
3073 case 0: /* VMLA: fd + (fn * fm) */
3074 /* Note that order of inputs to the add matters for NaNs */
3075 gen_vfp_F1_mul(dp);
3076 gen_mov_F0_vreg(dp, rd);
3077 gen_vfp_add(dp);
3078 break;
3079 case 1: /* VMLS: fd + -(fn * fm) */
3080 gen_vfp_mul(dp);
3081 gen_vfp_F1_neg(dp);
3082 gen_mov_F0_vreg(dp, rd);
3083 gen_vfp_add(dp);
3084 break;
3085 case 2: /* VNMLS: -fd + (fn * fm) */
3086 /* Note that it isn't valid to replace (-A + B) with (B - A)
3087 * or similar plausible looking simplifications
3088 * because this will give wrong results for NaNs.
3089 */
3090 gen_vfp_F1_mul(dp);
3091 gen_mov_F0_vreg(dp, rd);
3092 gen_vfp_neg(dp);
3093 gen_vfp_add(dp);
3094 break;
3095 case 3: /* VNMLA: -fd + -(fn * fm) */
3096 gen_vfp_mul(dp);
3097 gen_vfp_F1_neg(dp);
3098 gen_mov_F0_vreg(dp, rd);
3099 gen_vfp_neg(dp);
3100 gen_vfp_add(dp);
3101 break;
3102 case 4: /* mul: fn * fm */
3103 gen_vfp_mul(dp);
3104 break;
3105 case 5: /* nmul: -(fn * fm) */
3106 gen_vfp_mul(dp);
3107 gen_vfp_neg(dp);
3108 break;
3109 case 6: /* add: fn + fm */
3110 gen_vfp_add(dp);
3111 break;
3112 case 7: /* sub: fn - fm */
3113 gen_vfp_sub(dp);
3114 break;
3115 case 8: /* div: fn / fm */
3116 gen_vfp_div(dp);
3117 break;
3118 case 14: /* fconst */
3119 if (!arm_feature(env, ARM_FEATURE_VFP3))
3120 return 1;
3121
3122 n = (insn << 12) & 0x80000000;
3123 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3124 if (dp) {
3125 if (i & 0x40)
3126 i |= 0x3f80;
3127 else
3128 i |= 0x4000;
3129 n |= i << 16;
3130 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3131 } else {
3132 if (i & 0x40)
3133 i |= 0x780;
3134 else
3135 i |= 0x800;
3136 n |= i << 19;
3137 tcg_gen_movi_i32(cpu_F0s, n);
3138 }
3139 break;
3140 case 15: /* extension space */
3141 switch (rn) {
3142 case 0: /* cpy */
3143 /* no-op */
3144 break;
3145 case 1: /* abs */
3146 gen_vfp_abs(dp);
3147 break;
3148 case 2: /* neg */
3149 gen_vfp_neg(dp);
3150 break;
3151 case 3: /* sqrt */
3152 gen_vfp_sqrt(dp);
3153 break;
3154 case 4: /* vcvtb.f32.f16 */
3155 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3156 return 1;
3157 tmp = gen_vfp_mrs();
3158 tcg_gen_ext16u_i32(tmp, tmp);
3159 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3160 tcg_temp_free_i32(tmp);
3161 break;
3162 case 5: /* vcvtt.f32.f16 */
3163 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3164 return 1;
3165 tmp = gen_vfp_mrs();
3166 tcg_gen_shri_i32(tmp, tmp, 16);
3167 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3168 tcg_temp_free_i32(tmp);
3169 break;
3170 case 6: /* vcvtb.f16.f32 */
3171 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3172 return 1;
3173 tmp = tcg_temp_new_i32();
3174 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3175 gen_mov_F0_vreg(0, rd);
3176 tmp2 = gen_vfp_mrs();
3177 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3178 tcg_gen_or_i32(tmp, tmp, tmp2);
3179 tcg_temp_free_i32(tmp2);
3180 gen_vfp_msr(tmp);
3181 break;
3182 case 7: /* vcvtt.f16.f32 */
3183 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3184 return 1;
3185 tmp = tcg_temp_new_i32();
3186 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3187 tcg_gen_shli_i32(tmp, tmp, 16);
3188 gen_mov_F0_vreg(0, rd);
3189 tmp2 = gen_vfp_mrs();
3190 tcg_gen_ext16u_i32(tmp2, tmp2);
3191 tcg_gen_or_i32(tmp, tmp, tmp2);
3192 tcg_temp_free_i32(tmp2);
3193 gen_vfp_msr(tmp);
3194 break;
3195 case 8: /* cmp */
3196 gen_vfp_cmp(dp);
3197 break;
3198 case 9: /* cmpe */
3199 gen_vfp_cmpe(dp);
3200 break;
3201 case 10: /* cmpz */
3202 gen_vfp_cmp(dp);
3203 break;
3204 case 11: /* cmpez */
3205 gen_vfp_F1_ld0(dp);
3206 gen_vfp_cmpe(dp);
3207 break;
3208 case 15: /* single<->double conversion */
3209 if (dp)
3210 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3211 else
3212 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3213 break;
3214 case 16: /* fuito */
3215 gen_vfp_uito(dp, 0);
3216 break;
3217 case 17: /* fsito */
3218 gen_vfp_sito(dp, 0);
3219 break;
3220 case 20: /* fshto */
3221 if (!arm_feature(env, ARM_FEATURE_VFP3))
3222 return 1;
3223 gen_vfp_shto(dp, 16 - rm, 0);
3224 break;
3225 case 21: /* fslto */
3226 if (!arm_feature(env, ARM_FEATURE_VFP3))
3227 return 1;
3228 gen_vfp_slto(dp, 32 - rm, 0);
3229 break;
3230 case 22: /* fuhto */
3231 if (!arm_feature(env, ARM_FEATURE_VFP3))
3232 return 1;
3233 gen_vfp_uhto(dp, 16 - rm, 0);
3234 break;
3235 case 23: /* fulto */
3236 if (!arm_feature(env, ARM_FEATURE_VFP3))
3237 return 1;
3238 gen_vfp_ulto(dp, 32 - rm, 0);
3239 break;
3240 case 24: /* ftoui */
3241 gen_vfp_toui(dp, 0);
3242 break;
3243 case 25: /* ftouiz */
3244 gen_vfp_touiz(dp, 0);
3245 break;
3246 case 26: /* ftosi */
3247 gen_vfp_tosi(dp, 0);
3248 break;
3249 case 27: /* ftosiz */
3250 gen_vfp_tosiz(dp, 0);
3251 break;
3252 case 28: /* ftosh */
3253 if (!arm_feature(env, ARM_FEATURE_VFP3))
3254 return 1;
3255 gen_vfp_tosh(dp, 16 - rm, 0);
3256 break;
3257 case 29: /* ftosl */
3258 if (!arm_feature(env, ARM_FEATURE_VFP3))
3259 return 1;
3260 gen_vfp_tosl(dp, 32 - rm, 0);
3261 break;
3262 case 30: /* ftouh */
3263 if (!arm_feature(env, ARM_FEATURE_VFP3))
3264 return 1;
3265 gen_vfp_touh(dp, 16 - rm, 0);
3266 break;
3267 case 31: /* ftoul */
3268 if (!arm_feature(env, ARM_FEATURE_VFP3))
3269 return 1;
3270 gen_vfp_toul(dp, 32 - rm, 0);
3271 break;
3272 default: /* undefined */
3273 printf ("rn:%d\n", rn);
3274 return 1;
3275 }
3276 break;
3277 default: /* undefined */
3278 printf ("op:%d\n", op);
3279 return 1;
3280 }
3281
3282 /* Write back the result. */
3283 if (op == 15 && (rn >= 8 && rn <= 11))
3284 ; /* Comparison, do nothing. */
3285 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3286 /* VCVT double to int: always integer result. */
3287 gen_mov_vreg_F0(0, rd);
3288 else if (op == 15 && rn == 15)
3289 /* conversion */
3290 gen_mov_vreg_F0(!dp, rd);
3291 else
3292 gen_mov_vreg_F0(dp, rd);
3293
3294 /* break out of the loop if we have finished */
3295 if (veclen == 0)
3296 break;
3297
3298 if (op == 15 && delta_m == 0) {
3299 /* single source one-many */
3300 while (veclen--) {
3301 rd = ((rd + delta_d) & (bank_mask - 1))
3302 | (rd & bank_mask);
3303 gen_mov_vreg_F0(dp, rd);
3304 }
3305 break;
3306 }
3307 /* Setup the next operands. */
3308 veclen--;
3309 rd = ((rd + delta_d) & (bank_mask - 1))
3310 | (rd & bank_mask);
3311
3312 if (op == 15) {
3313 /* One source operand. */
3314 rm = ((rm + delta_m) & (bank_mask - 1))
3315 | (rm & bank_mask);
3316 gen_mov_F0_vreg(dp, rm);
3317 } else {
3318 /* Two source operands. */
3319 rn = ((rn + delta_d) & (bank_mask - 1))
3320 | (rn & bank_mask);
3321 gen_mov_F0_vreg(dp, rn);
3322 if (delta_m) {
3323 rm = ((rm + delta_m) & (bank_mask - 1))
3324 | (rm & bank_mask);
3325 gen_mov_F1_vreg(dp, rm);
3326 }
3327 }
3328 }
3329 }
3330 break;
3331 case 0xc:
3332 case 0xd:
3333 if ((insn & 0x03e00000) == 0x00400000) {
3334 /* two-register transfer */
3335 rn = (insn >> 16) & 0xf;
3336 rd = (insn >> 12) & 0xf;
3337 if (dp) {
3338 VFP_DREG_M(rm, insn);
3339 } else {
3340 rm = VFP_SREG_M(insn);
3341 }
3342
3343 if (insn & ARM_CP_RW_BIT) {
3344 /* vfp->arm */
3345 if (dp) {
3346 gen_mov_F0_vreg(0, rm * 2);
3347 tmp = gen_vfp_mrs();
3348 store_reg(s, rd, tmp);
3349 gen_mov_F0_vreg(0, rm * 2 + 1);
3350 tmp = gen_vfp_mrs();
3351 store_reg(s, rn, tmp);
3352 } else {
3353 gen_mov_F0_vreg(0, rm);
3354 tmp = gen_vfp_mrs();
3355 store_reg(s, rd, tmp);
3356 gen_mov_F0_vreg(0, rm + 1);
3357 tmp = gen_vfp_mrs();
3358 store_reg(s, rn, tmp);
3359 }
3360 } else {
3361 /* arm->vfp */
3362 if (dp) {
3363 tmp = load_reg(s, rd);
3364 gen_vfp_msr(tmp);
3365 gen_mov_vreg_F0(0, rm * 2);
3366 tmp = load_reg(s, rn);
3367 gen_vfp_msr(tmp);
3368 gen_mov_vreg_F0(0, rm * 2 + 1);
3369 } else {
3370 tmp = load_reg(s, rd);
3371 gen_vfp_msr(tmp);
3372 gen_mov_vreg_F0(0, rm);
3373 tmp = load_reg(s, rn);
3374 gen_vfp_msr(tmp);
3375 gen_mov_vreg_F0(0, rm + 1);
3376 }
3377 }
3378 } else {
3379 /* Load/store */
3380 rn = (insn >> 16) & 0xf;
3381 if (dp)
3382 VFP_DREG_D(rd, insn);
3383 else
3384 rd = VFP_SREG_D(insn);
3385 if (s->thumb && rn == 15) {
3386 addr = tcg_temp_new_i32();
3387 tcg_gen_movi_i32(addr, s->pc & ~2);
3388 } else {
3389 addr = load_reg(s, rn);
3390 }
3391 if ((insn & 0x01200000) == 0x01000000) {
3392 /* Single load/store */
3393 offset = (insn & 0xff) << 2;
3394 if ((insn & (1 << 23)) == 0)
3395 offset = -offset;
3396 tcg_gen_addi_i32(addr, addr, offset);
3397 if (insn & (1 << 20)) {
3398 gen_vfp_ld(s, dp, addr);
3399 gen_mov_vreg_F0(dp, rd);
3400 } else {
3401 gen_mov_F0_vreg(dp, rd);
3402 gen_vfp_st(s, dp, addr);
3403 }
3404 tcg_temp_free_i32(addr);
3405 } else {
3406 /* load/store multiple */
3407 if (dp)
3408 n = (insn >> 1) & 0x7f;
3409 else
3410 n = insn & 0xff;
3411
3412 if (insn & (1 << 24)) /* pre-decrement */
3413 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3414
3415 if (dp)
3416 offset = 8;
3417 else
3418 offset = 4;
3419 for (i = 0; i < n; i++) {
3420 if (insn & ARM_CP_RW_BIT) {
3421 /* load */
3422 gen_vfp_ld(s, dp, addr);
3423 gen_mov_vreg_F0(dp, rd + i);
3424 } else {
3425 /* store */
3426 gen_mov_F0_vreg(dp, rd + i);
3427 gen_vfp_st(s, dp, addr);
3428 }
3429 tcg_gen_addi_i32(addr, addr, offset);
3430 }
3431 if (insn & (1 << 21)) {
3432 /* writeback */
3433 if (insn & (1 << 24))
3434 offset = -offset * n;
3435 else if (dp && (insn & 1))
3436 offset = 4;
3437 else
3438 offset = 0;
3439
3440 if (offset != 0)
3441 tcg_gen_addi_i32(addr, addr, offset);
3442 store_reg(s, rn, addr);
3443 } else {
3444 tcg_temp_free_i32(addr);
3445 }
3446 }
3447 }
3448 break;
3449 default:
3450 /* Should never happen. */
3451 return 1;
3452 }
3453 return 0;
3454 }
3455
3456 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3457 {
3458 TranslationBlock *tb;
3459
3460 tb = s->tb;
3461 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3462 tcg_gen_goto_tb(n);
3463 gen_set_pc_im(dest);
3464 tcg_gen_exit_tb((tcg_target_long)tb + n);
3465 } else {
3466 gen_set_pc_im(dest);
3467 tcg_gen_exit_tb(0);
3468 }
3469 }
3470
3471 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3472 {
3473 if (unlikely(s->singlestep_enabled)) {
3474 /* An indirect jump so that we still trigger the debug exception. */
3475 if (s->thumb)
3476 dest |= 1;
3477 gen_bx_im(s, dest);
3478 } else {
3479 gen_goto_tb(s, 0, dest);
3480 s->is_jmp = DISAS_TB_JUMP;
3481 }
3482 }
3483
3484 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3485 {
3486 if (x)
3487 tcg_gen_sari_i32(t0, t0, 16);
3488 else
3489 gen_sxth(t0);
3490 if (y)
3491 tcg_gen_sari_i32(t1, t1, 16);
3492 else
3493 gen_sxth(t1);
3494 tcg_gen_mul_i32(t0, t0, t1);
3495 }
3496
3497 /* Return the mask of PSR bits set by a MSR instruction. */
3498 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3499 uint32_t mask;
3500
3501 mask = 0;
3502 if (flags & (1 << 0))
3503 mask |= 0xff;
3504 if (flags & (1 << 1))
3505 mask |= 0xff00;
3506 if (flags & (1 << 2))
3507 mask |= 0xff0000;
3508 if (flags & (1 << 3))
3509 mask |= 0xff000000;
3510
3511 /* Mask out undefined bits. */
3512 mask &= ~CPSR_RESERVED;
3513 if (!arm_feature(env, ARM_FEATURE_V4T))
3514 mask &= ~CPSR_T;
3515 if (!arm_feature(env, ARM_FEATURE_V5))
3516 mask &= ~CPSR_Q; /* V5TE in reality*/
3517 if (!arm_feature(env, ARM_FEATURE_V6))
3518 mask &= ~(CPSR_E | CPSR_GE);
3519 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3520 mask &= ~CPSR_IT;
3521 /* Mask out execution state bits. */
3522 if (!spsr)
3523 mask &= ~CPSR_EXEC;
3524 /* Mask out privileged bits. */
3525 if (IS_USER(s))
3526 mask &= CPSR_USER;
3527 return mask;
3528 }
3529
3530 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3531 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3532 {
3533 TCGv tmp;
3534 if (spsr) {
3535 /* ??? This is also undefined in system mode. */
3536 if (IS_USER(s))
3537 return 1;
3538
3539 tmp = load_cpu_field(spsr);
3540 tcg_gen_andi_i32(tmp, tmp, ~mask);
3541 tcg_gen_andi_i32(t0, t0, mask);
3542 tcg_gen_or_i32(tmp, tmp, t0);
3543 store_cpu_field(tmp, spsr);
3544 } else {
3545 gen_set_cpsr(t0, mask);
3546 }
3547 tcg_temp_free_i32(t0);
3548 gen_lookup_tb(s);
3549 return 0;
3550 }
3551
3552 /* Returns nonzero if access to the PSR is not permitted. */
3553 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3554 {
3555 TCGv tmp;
3556 tmp = tcg_temp_new_i32();
3557 tcg_gen_movi_i32(tmp, val);
3558 return gen_set_psr(s, mask, spsr, tmp);
3559 }
3560
3561 /* Generate an old-style exception return. Marks pc as dead. */
3562 static void gen_exception_return(DisasContext *s, TCGv pc)
3563 {
3564 TCGv tmp;
3565 store_reg(s, 15, pc);
3566 tmp = load_cpu_field(spsr);
3567 gen_set_cpsr(tmp, 0xffffffff);
3568 tcg_temp_free_i32(tmp);
3569 s->is_jmp = DISAS_UPDATE;
3570 }
3571
3572 /* Generate a v6 exception return. Marks both values as dead. */
3573 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3574 {
3575 gen_set_cpsr(cpsr, 0xffffffff);
3576 tcg_temp_free_i32(cpsr);
3577 store_reg(s, 15, pc);
3578 s->is_jmp = DISAS_UPDATE;
3579 }
3580
3581 static inline void
3582 gen_set_condexec (DisasContext *s)
3583 {
3584 if (s->condexec_mask) {
3585 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3586 TCGv tmp = tcg_temp_new_i32();
3587 tcg_gen_movi_i32(tmp, val);
3588 store_cpu_field(tmp, condexec_bits);
3589 }
3590 }
3591
3592 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3593 {
3594 gen_set_condexec(s);
3595 gen_set_pc_im(s->pc - offset);
3596 gen_exception(excp);
3597 s->is_jmp = DISAS_JUMP;
3598 }
3599
3600 static void gen_nop_hint(DisasContext *s, int val)
3601 {
3602 switch (val) {
3603 case 3: /* wfi */
3604 gen_set_pc_im(s->pc);
3605 s->is_jmp = DISAS_WFI;
3606 break;
3607 case 2: /* wfe */
3608 case 4: /* sev */
3609 /* TODO: Implement SEV and WFE. May help SMP performance. */
3610 default: /* nop */
3611 break;
3612 }
3613 }
3614
3615 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3616
3617 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3618 {
3619 switch (size) {
3620 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3621 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3622 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3623 default: abort();
3624 }
3625 }
3626
3627 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3628 {
3629 switch (size) {
3630 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3631 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3632 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3633 default: return;
3634 }
3635 }
3636
3637 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3638 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3639 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3640 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3641 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3642
3643 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3644 switch ((size << 1) | u) { \
3645 case 0: \
3646 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3647 break; \
3648 case 1: \
3649 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3650 break; \
3651 case 2: \
3652 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3653 break; \
3654 case 3: \
3655 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3656 break; \
3657 case 4: \
3658 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3659 break; \
3660 case 5: \
3661 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3662 break; \
3663 default: return 1; \
3664 }} while (0)
3665
3666 #define GEN_NEON_INTEGER_OP(name) do { \
3667 switch ((size << 1) | u) { \
3668 case 0: \
3669 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3670 break; \
3671 case 1: \
3672 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3673 break; \
3674 case 2: \
3675 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3676 break; \
3677 case 3: \
3678 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3679 break; \
3680 case 4: \
3681 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3682 break; \
3683 case 5: \
3684 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3685 break; \
3686 default: return 1; \
3687 }} while (0)
3688
3689 static TCGv neon_load_scratch(int scratch)
3690 {
3691 TCGv tmp = tcg_temp_new_i32();
3692 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3693 return tmp;
3694 }
3695
3696 static void neon_store_scratch(int scratch, TCGv var)
3697 {
3698 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3699 tcg_temp_free_i32(var);
3700 }
3701
3702 static inline TCGv neon_get_scalar(int size, int reg)
3703 {
3704 TCGv tmp;
3705 if (size == 1) {
3706 tmp = neon_load_reg(reg & 7, reg >> 4);
3707 if (reg & 8) {
3708 gen_neon_dup_high16(tmp);
3709 } else {
3710 gen_neon_dup_low16(tmp);
3711 }
3712 } else {
3713 tmp = neon_load_reg(reg & 15, reg >> 4);
3714 }
3715 return tmp;
3716 }
3717
3718 static int gen_neon_unzip(int rd, int rm, int size, int q)
3719 {
3720 TCGv tmp, tmp2;
3721 if (!q && size == 2) {
3722 return 1;
3723 }
3724 tmp = tcg_const_i32(rd);
3725 tmp2 = tcg_const_i32(rm);
3726 if (q) {
3727 switch (size) {
3728 case 0:
3729 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3730 break;
3731 case 1:
3732 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3733 break;
3734 case 2:
3735 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3736 break;
3737 default:
3738 abort();
3739 }
3740 } else {
3741 switch (size) {
3742 case 0:
3743 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3744 break;
3745 case 1:
3746 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3747 break;
3748 default:
3749 abort();
3750 }
3751 }
3752 tcg_temp_free_i32(tmp);
3753 tcg_temp_free_i32(tmp2);
3754 return 0;
3755 }
3756
3757 static int gen_neon_zip(int rd, int rm, int size, int q)
3758 {
3759 TCGv tmp, tmp2;
3760 if (!q && size == 2) {
3761 return 1;
3762 }
3763 tmp = tcg_const_i32(rd);
3764 tmp2 = tcg_const_i32(rm);
3765 if (q) {
3766 switch (size) {
3767 case 0:
3768 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3769 break;
3770 case 1:
3771 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3772 break;
3773 case 2:
3774 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3775 break;
3776 default:
3777 abort();
3778 }
3779 } else {
3780 switch (size) {
3781 case 0:
3782 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3783 break;
3784 case 1:
3785 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3786 break;
3787 default:
3788 abort();
3789 }
3790 }
3791 tcg_temp_free_i32(tmp);
3792 tcg_temp_free_i32(tmp2);
3793 return 0;
3794 }
3795
3796 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3797 {
3798 TCGv rd, tmp;
3799
3800 rd = tcg_temp_new_i32();
3801 tmp = tcg_temp_new_i32();
3802
3803 tcg_gen_shli_i32(rd, t0, 8);
3804 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3805 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3806 tcg_gen_or_i32(rd, rd, tmp);
3807
3808 tcg_gen_shri_i32(t1, t1, 8);
3809 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3810 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3811 tcg_gen_or_i32(t1, t1, tmp);
3812 tcg_gen_mov_i32(t0, rd);
3813
3814 tcg_temp_free_i32(tmp);
3815 tcg_temp_free_i32(rd);
3816 }
3817
3818 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3819 {
3820 TCGv rd, tmp;
3821
3822 rd = tcg_temp_new_i32();
3823 tmp = tcg_temp_new_i32();
3824
3825 tcg_gen_shli_i32(rd, t0, 16);
3826 tcg_gen_andi_i32(tmp, t1, 0xffff);
3827 tcg_gen_or_i32(rd, rd, tmp);
3828 tcg_gen_shri_i32(t1, t1, 16);
3829 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3830 tcg_gen_or_i32(t1, t1, tmp);
3831 tcg_gen_mov_i32(t0, rd);
3832
3833 tcg_temp_free_i32(tmp);
3834 tcg_temp_free_i32(rd);
3835 }
3836
3837
3838 static struct {
3839 int nregs;
3840 int interleave;
3841 int spacing;
3842 } neon_ls_element_type[11] = {
3843 {4, 4, 1},
3844 {4, 4, 2},
3845 {4, 1, 1},
3846 {4, 2, 1},
3847 {3, 3, 1},
3848 {3, 3, 2},
3849 {3, 1, 1},
3850 {1, 1, 1},
3851 {2, 2, 1},
3852 {2, 2, 2},
3853 {2, 1, 1}
3854 };
3855
3856 /* Translate a NEON load/store element instruction. Return nonzero if the
3857 instruction is invalid. */
3858 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3859 {
3860 int rd, rn, rm;
3861 int op;
3862 int nregs;
3863 int interleave;
3864 int spacing;
3865 int stride;
3866 int size;
3867 int reg;
3868 int pass;
3869 int load;
3870 int shift;
3871 int n;
3872 TCGv addr;
3873 TCGv tmp;
3874 TCGv tmp2;
3875 TCGv_i64 tmp64;
3876
3877 if (!s->vfp_enabled)
3878 return 1;
3879 VFP_DREG_D(rd, insn);
3880 rn = (insn >> 16) & 0xf;
3881 rm = insn & 0xf;
3882 load = (insn & (1 << 21)) != 0;
3883 if ((insn & (1 << 23)) == 0) {
3884 /* Load store all elements. */
3885 op = (insn >> 8) & 0xf;
3886 size = (insn >> 6) & 3;
3887 if (op > 10)
3888 return 1;
3889 /* Catch UNDEF cases for bad values of align field */
3890 switch (op & 0xc) {
3891 case 4:
3892 if (((insn >> 5) & 1) == 1) {
3893 return 1;
3894 }
3895 break;
3896 case 8:
3897 if (((insn >> 4) & 3) == 3) {
3898 return 1;
3899 }
3900 break;
3901 default:
3902 break;
3903 }
3904 nregs = neon_ls_element_type[op].nregs;
3905 interleave = neon_ls_element_type[op].interleave;
3906 spacing = neon_ls_element_type[op].spacing;
3907 if (size == 3 && (interleave | spacing) != 1)
3908 return 1;
3909 addr = tcg_temp_new_i32();
3910 load_reg_var(s, addr, rn);
3911 stride = (1 << size) * interleave;
3912 for (reg = 0; reg < nregs; reg++) {
3913 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3914 load_reg_var(s, addr, rn);
3915 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3916 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3917 load_reg_var(s, addr, rn);
3918 tcg_gen_addi_i32(addr, addr, 1 << size);
3919 }
3920 if (size == 3) {
3921 if (load) {
3922 tmp64 = gen_ld64(addr, IS_USER(s));
3923 neon_store_reg64(tmp64, rd);
3924 tcg_temp_free_i64(tmp64);
3925 } else {
3926 tmp64 = tcg_temp_new_i64();
3927 neon_load_reg64(tmp64, rd);
3928 gen_st64(tmp64, addr, IS_USER(s));
3929 }
3930 tcg_gen_addi_i32(addr, addr, stride);
3931 } else {
3932 for (pass = 0; pass < 2; pass++) {
3933 if (size == 2) {
3934 if (load) {
3935 tmp = gen_ld32(addr, IS_USER(s));
3936 neon_store_reg(rd, pass, tmp);
3937 } else {
3938 tmp = neon_load_reg(rd, pass);
3939 gen_st32(tmp, addr, IS_USER(s));
3940 }
3941 tcg_gen_addi_i32(addr, addr, stride);
3942 } else if (size == 1) {
3943 if (load) {
3944 tmp = gen_ld16u(addr, IS_USER(s));
3945 tcg_gen_addi_i32(addr, addr, stride);
3946 tmp2 = gen_ld16u(addr, IS_USER(s));
3947 tcg_gen_addi_i32(addr, addr, stride);
3948 tcg_gen_shli_i32(tmp2, tmp2, 16);
3949 tcg_gen_or_i32(tmp, tmp, tmp2);
3950 tcg_temp_free_i32(tmp2);
3951 neon_store_reg(rd, pass, tmp);
3952 } else {
3953 tmp = neon_load_reg(rd, pass);
3954 tmp2 = tcg_temp_new_i32();
3955 tcg_gen_shri_i32(tmp2, tmp, 16);
3956 gen_st16(tmp, addr, IS_USER(s));
3957 tcg_gen_addi_i32(addr, addr, stride);
3958 gen_st16(tmp2, addr, IS_USER(s));
3959 tcg_gen_addi_i32(addr, addr, stride);
3960 }
3961 } else /* size == 0 */ {
3962 if (load) {
3963 TCGV_UNUSED(tmp2);
3964 for (n = 0; n < 4; n++) {
3965 tmp = gen_ld8u(addr, IS_USER(s));
3966 tcg_gen_addi_i32(addr, addr, stride);
3967 if (n == 0) {
3968 tmp2 = tmp;
3969 } else {
3970 tcg_gen_shli_i32(tmp, tmp, n * 8);
3971 tcg_gen_or_i32(tmp2, tmp2, tmp);
3972 tcg_temp_free_i32(tmp);
3973 }
3974 }
3975 neon_store_reg(rd, pass, tmp2);
3976 } else {
3977 tmp2 = neon_load_reg(rd, pass);
3978 for (n = 0; n < 4; n++) {
3979 tmp = tcg_temp_new_i32();
3980 if (n == 0) {
3981 tcg_gen_mov_i32(tmp, tmp2);
3982 } else {
3983 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3984 }
3985 gen_st8(tmp, addr, IS_USER(s));
3986 tcg_gen_addi_i32(addr, addr, stride);
3987 }
3988 tcg_temp_free_i32(tmp2);
3989 }
3990 }
3991 }
3992 }
3993 rd += spacing;
3994 }
3995 tcg_temp_free_i32(addr);
3996 stride = nregs * 8;
3997 } else {
3998 size = (insn >> 10) & 3;
3999 if (size == 3) {
4000 /* Load single element to all lanes. */
4001 int a = (insn >> 4) & 1;
4002 if (!load) {
4003 return 1;
4004 }
4005 size = (insn >> 6) & 3;
4006 nregs = ((insn >> 8) & 3) + 1;
4007
4008 if (size == 3) {
4009 if (nregs != 4 || a == 0) {
4010 return 1;
4011 }
4012 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4013 size = 2;
4014 }
4015 if (nregs == 1 && a == 1 && size == 0) {
4016 return 1;
4017 }
4018 if (nregs == 3 && a == 1) {
4019 return 1;
4020 }
4021 addr = tcg_temp_new_i32();
4022 load_reg_var(s, addr, rn);
4023 if (nregs == 1) {
4024 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4025 tmp = gen_load_and_replicate(s, addr, size);
4026 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4027 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4028 if (insn & (1 << 5)) {
4029 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4030 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4031 }
4032 tcg_temp_free_i32(tmp);
4033 } else {
4034 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4035 stride = (insn & (1 << 5)) ? 2 : 1;
4036 for (reg = 0; reg < nregs; reg++) {
4037 tmp = gen_load_and_replicate(s, addr, size);
4038 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4039 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4040 tcg_temp_free_i32(tmp);
4041 tcg_gen_addi_i32(addr, addr, 1 << size);
4042 rd += stride;
4043 }
4044 }
4045 tcg_temp_free_i32(addr);
4046 stride = (1 << size) * nregs;
4047 } else {
4048 /* Single element. */
4049 int idx = (insn >> 4) & 0xf;
4050 pass = (insn >> 7) & 1;
4051 switch (size) {
4052 case 0:
4053 shift = ((insn >> 5) & 3) * 8;
4054 stride = 1;
4055 break;
4056 case 1:
4057 shift = ((insn >> 6) & 1) * 16;
4058 stride = (insn & (1 << 5)) ? 2 : 1;
4059 break;
4060 case 2:
4061 shift = 0;
4062 stride = (insn & (1 << 6)) ? 2 : 1;
4063 break;
4064 default:
4065 abort();
4066 }
4067 nregs = ((insn >> 8) & 3) + 1;
4068 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4069 switch (nregs) {
4070 case 1:
4071 if (((idx & (1 << size)) != 0) ||
4072 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4073 return 1;
4074 }
4075 break;
4076 case 3:
4077 if ((idx & 1) != 0) {
4078 return 1;
4079 }
4080 /* fall through */
4081 case 2:
4082 if (size == 2 && (idx & 2) != 0) {
4083 return 1;
4084 }
4085 break;
4086 case 4:
4087 if ((size == 2) && ((idx & 3) == 3)) {
4088 return 1;
4089 }
4090 break;
4091 default:
4092 abort();
4093 }
4094 if ((rd + stride * (nregs - 1)) > 31) {
4095 /* Attempts to write off the end of the register file
4096 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4097 * the neon_load_reg() would write off the end of the array.
4098 */
4099 return 1;
4100 }
4101 addr = tcg_temp_new_i32();
4102 load_reg_var(s, addr, rn);
4103 for (reg = 0; reg < nregs; reg++) {
4104 if (load) {
4105 switch (size) {
4106 case 0:
4107 tmp = gen_ld8u(addr, IS_USER(s));
4108 break;
4109 case 1:
4110 tmp = gen_ld16u(addr, IS_USER(s));
4111 break;
4112 case 2:
4113 tmp = gen_ld32(addr, IS_USER(s));
4114 break;
4115 default: /* Avoid compiler warnings. */
4116 abort();
4117 }
4118 if (size != 2) {
4119 tmp2 = neon_load_reg(rd, pass);
4120 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4121 tcg_temp_free_i32(tmp2);
4122 }
4123 neon_store_reg(rd, pass, tmp);
4124 } else { /* Store */
4125 tmp = neon_load_reg(rd, pass);
4126 if (shift)
4127 tcg_gen_shri_i32(tmp, tmp, shift);
4128 switch (size) {
4129 case 0:
4130 gen_st8(tmp, addr, IS_USER(s));
4131 break;
4132 case 1:
4133 gen_st16(tmp, addr, IS_USER(s));
4134 break;
4135 case 2:
4136 gen_st32(tmp, addr, IS_USER(s));
4137 break;
4138 }
4139 }
4140 rd += stride;
4141 tcg_gen_addi_i32(addr, addr, 1 << size);
4142 }
4143 tcg_temp_free_i32(addr);
4144 stride = nregs * (1 << size);
4145 }
4146 }
4147 if (rm != 15) {
4148 TCGv base;
4149
4150 base = load_reg(s, rn);
4151 if (rm == 13) {
4152 tcg_gen_addi_i32(base, base, stride);
4153 } else {
4154 TCGv index;
4155 index = load_reg(s, rm);
4156 tcg_gen_add_i32(base, base, index);
4157 tcg_temp_free_i32(index);
4158 }
4159 store_reg(s, rn, base);
4160 }
4161 return 0;
4162 }
4163
4164 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4165 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4166 {
4167 tcg_gen_and_i32(t, t, c);
4168 tcg_gen_andc_i32(f, f, c);
4169 tcg_gen_or_i32(dest, t, f);
4170 }
4171
4172 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4173 {
4174 switch (size) {
4175 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4176 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4177 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4178 default: abort();
4179 }
4180 }
4181
4182 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4183 {
4184 switch (size) {
4185 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4186 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4187 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4188 default: abort();
4189 }
4190 }
4191
4192 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4193 {
4194 switch (size) {
4195 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4196 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4197 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4198 default: abort();
4199 }
4200 }
4201
4202 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4203 {
4204 switch (size) {
4205 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4206 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4207 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4208 default: abort();
4209 }
4210 }
4211
4212 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4213 int q, int u)
4214 {
4215 if (q) {
4216 if (u) {
4217 switch (size) {
4218 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4219 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4220 default: abort();
4221 }
4222 } else {
4223 switch (size) {
4224 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4225 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4226 default: abort();
4227 }
4228 }
4229 } else {
4230 if (u) {
4231 switch (size) {
4232 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4233 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4234 default: abort();
4235 }
4236 } else {
4237 switch (size) {
4238 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4239 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4240 default: abort();
4241 }
4242 }
4243 }
4244 }
4245
4246 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4247 {
4248 if (u) {
4249 switch (size) {
4250 case 0: gen_helper_neon_widen_u8(dest, src); break;
4251 case 1: gen_helper_neon_widen_u16(dest, src); break;
4252 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4253 default: abort();
4254 }
4255 } else {
4256 switch (size) {
4257 case 0: gen_helper_neon_widen_s8(dest, src); break;
4258 case 1: gen_helper_neon_widen_s16(dest, src); break;
4259 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4260 default: abort();
4261 }
4262 }
4263 tcg_temp_free_i32(src);
4264 }
4265
4266 static inline void gen_neon_addl(int size)
4267 {
4268 switch (size) {
4269 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4270 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4271 case 2: tcg_gen_add_i64(CPU_V001); break;
4272 default: abort();
4273 }
4274 }
4275
4276 static inline void gen_neon_subl(int size)
4277 {
4278 switch (size) {
4279 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4280 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4281 case 2: tcg_gen_sub_i64(CPU_V001); break;
4282 default: abort();
4283 }
4284 }
4285
4286 static inline void gen_neon_negl(TCGv_i64 var, int size)
4287 {
4288 switch (size) {
4289 case 0: gen_helper_neon_negl_u16(var, var); break;
4290 case 1: gen_helper_neon_negl_u32(var, var); break;
4291 case 2: gen_helper_neon_negl_u64(var, var); break;
4292 default: abort();
4293 }
4294 }
4295
4296 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4297 {
4298 switch (size) {
4299 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4300 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4301 default: abort();
4302 }
4303 }
4304
4305 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4306 {
4307 TCGv_i64 tmp;
4308
4309 switch ((size << 1) | u) {
4310 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4311 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4312 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4313 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4314 case 4:
4315 tmp = gen_muls_i64_i32(a, b);
4316 tcg_gen_mov_i64(dest, tmp);
4317 tcg_temp_free_i64(tmp);
4318 break;
4319 case 5:
4320 tmp = gen_mulu_i64_i32(a, b);
4321 tcg_gen_mov_i64(dest, tmp);
4322 tcg_temp_free_i64(tmp);
4323 break;
4324 default: abort();
4325 }
4326
4327 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4328 Don't forget to clean them now. */
4329 if (size < 2) {
4330 tcg_temp_free_i32(a);
4331 tcg_temp_free_i32(b);
4332 }
4333 }
4334
4335 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4336 {
4337 if (op) {
4338 if (u) {
4339 gen_neon_unarrow_sats(size, dest, src);
4340 } else {
4341 gen_neon_narrow(size, dest, src);
4342 }
4343 } else {
4344 if (u) {
4345 gen_neon_narrow_satu(size, dest, src);
4346 } else {
4347 gen_neon_narrow_sats(size, dest, src);
4348 }
4349 }
4350 }
4351
4352 /* Symbolic constants for op fields for Neon 3-register same-length.
4353 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4354 * table A7-9.
4355 */
4356 #define NEON_3R_VHADD 0
4357 #define NEON_3R_VQADD 1
4358 #define NEON_3R_VRHADD 2
4359 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4360 #define NEON_3R_VHSUB 4
4361 #define NEON_3R_VQSUB 5
4362 #define NEON_3R_VCGT 6
4363 #define NEON_3R_VCGE 7
4364 #define NEON_3R_VSHL 8
4365 #define NEON_3R_VQSHL 9
4366 #define NEON_3R_VRSHL 10
4367 #define NEON_3R_VQRSHL 11
4368 #define NEON_3R_VMAX 12
4369 #define NEON_3R_VMIN 13
4370 #define NEON_3R_VABD 14
4371 #define NEON_3R_VABA 15
4372 #define NEON_3R_VADD_VSUB 16
4373 #define NEON_3R_VTST_VCEQ 17
4374 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4375 #define NEON_3R_VMUL 19
4376 #define NEON_3R_VPMAX 20
4377 #define NEON_3R_VPMIN 21
4378 #define NEON_3R_VQDMULH_VQRDMULH 22
4379 #define NEON_3R_VPADD 23
4380 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4381 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4382 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4383 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4384 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4385 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4386
4387 static const uint8_t neon_3r_sizes[] = {
4388 [NEON_3R_VHADD] = 0x7,
4389 [NEON_3R_VQADD] = 0xf,
4390 [NEON_3R_VRHADD] = 0x7,
4391 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4392 [NEON_3R_VHSUB] = 0x7,
4393 [NEON_3R_VQSUB] = 0xf,
4394 [NEON_3R_VCGT] = 0x7,
4395 [NEON_3R_VCGE] = 0x7,
4396 [NEON_3R_VSHL] = 0xf,
4397 [NEON_3R_VQSHL] = 0xf,
4398 [NEON_3R_VRSHL] = 0xf,
4399 [NEON_3R_VQRSHL] = 0xf,
4400 [NEON_3R_VMAX] = 0x7,
4401 [NEON_3R_VMIN] = 0x7,
4402 [NEON_3R_VABD] = 0x7,
4403 [NEON_3R_VABA] = 0x7,
4404 [NEON_3R_VADD_VSUB] = 0xf,
4405 [NEON_3R_VTST_VCEQ] = 0x7,
4406 [NEON_3R_VML] = 0x7,
4407 [NEON_3R_VMUL] = 0x7,
4408 [NEON_3R_VPMAX] = 0x7,
4409 [NEON_3R_VPMIN] = 0x7,
4410 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4411 [NEON_3R_VPADD] = 0x7,
4412 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4413 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4414 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4415 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4416 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4417 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4418 };
4419
4420 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4421 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4422 * table A7-13.
4423 */
4424 #define NEON_2RM_VREV64 0
4425 #define NEON_2RM_VREV32 1
4426 #define NEON_2RM_VREV16 2
4427 #define NEON_2RM_VPADDL 4
4428 #define NEON_2RM_VPADDL_U 5
4429 #define NEON_2RM_VCLS 8
4430 #define NEON_2RM_VCLZ 9
4431 #define NEON_2RM_VCNT 10
4432 #define NEON_2RM_VMVN 11
4433 #define NEON_2RM_VPADAL 12
4434 #define NEON_2RM_VPADAL_U 13
4435 #define NEON_2RM_VQABS 14
4436 #define NEON_2RM_VQNEG 15
4437 #define NEON_2RM_VCGT0 16
4438 #define NEON_2RM_VCGE0 17
4439 #define NEON_2RM_VCEQ0 18
4440 #define NEON_2RM_VCLE0 19
4441 #define NEON_2RM_VCLT0 20
4442 #define NEON_2RM_VABS 22
4443 #define NEON_2RM_VNEG 23
4444 #define NEON_2RM_VCGT0_F 24
4445 #define NEON_2RM_VCGE0_F 25
4446 #define NEON_2RM_VCEQ0_F 26
4447 #define NEON_2RM_VCLE0_F 27
4448 #define NEON_2RM_VCLT0_F 28
4449 #define NEON_2RM_VABS_F 30
4450 #define NEON_2RM_VNEG_F 31
4451 #define NEON_2RM_VSWP 32
4452 #define NEON_2RM_VTRN 33
4453 #define NEON_2RM_VUZP 34
4454 #define NEON_2RM_VZIP 35
4455 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4456 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4457 #define NEON_2RM_VSHLL 38
4458 #define NEON_2RM_VCVT_F16_F32 44
4459 #define NEON_2RM_VCVT_F32_F16 46
4460 #define NEON_2RM_VRECPE 56
4461 #define NEON_2RM_VRSQRTE 57
4462 #define NEON_2RM_VRECPE_F 58
4463 #define NEON_2RM_VRSQRTE_F 59
4464 #define NEON_2RM_VCVT_FS 60
4465 #define NEON_2RM_VCVT_FU 61
4466 #define NEON_2RM_VCVT_SF 62
4467 #define NEON_2RM_VCVT_UF 63
4468
4469 static int neon_2rm_is_float_op(int op)
4470 {
4471 /* Return true if this neon 2reg-misc op is float-to-float */
4472 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4473 op >= NEON_2RM_VRECPE_F);
4474 }
4475
4476 /* Each entry in this array has bit n set if the insn allows
4477 * size value n (otherwise it will UNDEF). Since unallocated
4478 * op values will have no bits set they always UNDEF.
4479 */
4480 static const uint8_t neon_2rm_sizes[] = {
4481 [NEON_2RM_VREV64] = 0x7,
4482 [NEON_2RM_VREV32] = 0x3,
4483 [NEON_2RM_VREV16] = 0x1,
4484 [NEON_2RM_VPADDL] = 0x7,
4485 [NEON_2RM_VPADDL_U] = 0x7,
4486 [NEON_2RM_VCLS] = 0x7,
4487 [NEON_2RM_VCLZ] = 0x7,
4488 [NEON_2RM_VCNT] = 0x1,
4489 [NEON_2RM_VMVN] = 0x1,
4490 [NEON_2RM_VPADAL] = 0x7,
4491 [NEON_2RM_VPADAL_U] = 0x7,
4492 [NEON_2RM_VQABS] = 0x7,
4493 [NEON_2RM_VQNEG] = 0x7,
4494 [NEON_2RM_VCGT0] = 0x7,
4495 [NEON_2RM_VCGE0] = 0x7,
4496 [NEON_2RM_VCEQ0] = 0x7,
4497 [NEON_2RM_VCLE0] = 0x7,
4498 [NEON_2RM_VCLT0] = 0x7,
4499 [NEON_2RM_VABS] = 0x7,
4500 [NEON_2RM_VNEG] = 0x7,
4501 [NEON_2RM_VCGT0_F] = 0x4,
4502 [NEON_2RM_VCGE0_F] = 0x4,
4503 [NEON_2RM_VCEQ0_F] = 0x4,
4504 [NEON_2RM_VCLE0_F] = 0x4,
4505 [NEON_2RM_VCLT0_F] = 0x4,
4506 [NEON_2RM_VABS_F] = 0x4,
4507 [NEON_2RM_VNEG_F] = 0x4,
4508 [NEON_2RM_VSWP] = 0x1,
4509 [NEON_2RM_VTRN] = 0x7,
4510 [NEON_2RM_VUZP] = 0x7,
4511 [NEON_2RM_VZIP] = 0x7,
4512 [NEON_2RM_VMOVN] = 0x7,
4513 [NEON_2RM_VQMOVN] = 0x7,
4514 [NEON_2RM_VSHLL] = 0x7,
4515 [NEON_2RM_VCVT_F16_F32] = 0x2,
4516 [NEON_2RM_VCVT_F32_F16] = 0x2,
4517 [NEON_2RM_VRECPE] = 0x4,
4518 [NEON_2RM_VRSQRTE] = 0x4,
4519 [NEON_2RM_VRECPE_F] = 0x4,
4520 [NEON_2RM_VRSQRTE_F] = 0x4,
4521 [NEON_2RM_VCVT_FS] = 0x4,
4522 [NEON_2RM_VCVT_FU] = 0x4,
4523 [NEON_2RM_VCVT_SF] = 0x4,
4524 [NEON_2RM_VCVT_UF] = 0x4,
4525 };
4526
4527 /* Translate a NEON data processing instruction. Return nonzero if the
4528 instruction is invalid.
4529 We process data in a mixture of 32-bit and 64-bit chunks.
4530 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4531
4532 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4533 {
4534 int op;
4535 int q;
4536 int rd, rn, rm;
4537 int size;
4538 int shift;
4539 int pass;
4540 int count;
4541 int pairwise;
4542 int u;
4543 uint32_t imm, mask;
4544 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4545 TCGv_i64 tmp64;
4546
4547 if (!s->vfp_enabled)
4548 return 1;
4549 q = (insn & (1 << 6)) != 0;
4550 u = (insn >> 24) & 1;
4551 VFP_DREG_D(rd, insn);
4552 VFP_DREG_N(rn, insn);
4553 VFP_DREG_M(rm, insn);
4554 size = (insn >> 20) & 3;
4555 if ((insn & (1 << 23)) == 0) {
4556 /* Three register same length. */
4557 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4558 /* Catch invalid op and bad size combinations: UNDEF */
4559 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4560 return 1;
4561 }
4562 /* All insns of this form UNDEF for either this condition or the
4563 * superset of cases "Q==1"; we catch the latter later.
4564 */
4565 if (q && ((rd | rn | rm) & 1)) {
4566 return 1;
4567 }
4568 if (size == 3 && op != NEON_3R_LOGIC) {
4569 /* 64-bit element instructions. */
4570 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4571 neon_load_reg64(cpu_V0, rn + pass);
4572 neon_load_reg64(cpu_V1, rm + pass);
4573 switch (op) {
4574 case NEON_3R_VQADD:
4575 if (u) {
4576 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4577 cpu_V0, cpu_V1);
4578 } else {
4579 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4580 cpu_V0, cpu_V1);
4581 }
4582 break;
4583 case NEON_3R_VQSUB:
4584 if (u) {
4585 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4586 cpu_V0, cpu_V1);
4587 } else {
4588 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4589 cpu_V0, cpu_V1);
4590 }
4591 break;
4592 case NEON_3R_VSHL:
4593 if (u) {
4594 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4595 } else {
4596 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4597 }
4598 break;
4599 case NEON_3R_VQSHL:
4600 if (u) {
4601 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4602 cpu_V1, cpu_V0);
4603 } else {
4604 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4605 cpu_V1, cpu_V0);
4606 }
4607 break;
4608 case NEON_3R_VRSHL:
4609 if (u) {
4610 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4611 } else {
4612 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4613 }
4614 break;
4615 case NEON_3R_VQRSHL:
4616 if (u) {
4617 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4618 cpu_V1, cpu_V0);
4619 } else {
4620 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4621 cpu_V1, cpu_V0);
4622 }
4623 break;
4624 case NEON_3R_VADD_VSUB:
4625 if (u) {
4626 tcg_gen_sub_i64(CPU_V001);
4627 } else {
4628 tcg_gen_add_i64(CPU_V001);
4629 }
4630 break;
4631 default:
4632 abort();
4633 }
4634 neon_store_reg64(cpu_V0, rd + pass);
4635 }
4636 return 0;
4637 }
4638 pairwise = 0;
4639 switch (op) {
4640 case NEON_3R_VSHL:
4641 case NEON_3R_VQSHL:
4642 case NEON_3R_VRSHL:
4643 case NEON_3R_VQRSHL:
4644 {
4645 int rtmp;
4646 /* Shift instruction operands are reversed. */
4647 rtmp = rn;
4648 rn = rm;
4649 rm = rtmp;
4650 }
4651 break;
4652 case NEON_3R_VPADD:
4653 if (u) {
4654 return 1;
4655 }
4656 /* Fall through */
4657 case NEON_3R_VPMAX:
4658 case NEON_3R_VPMIN:
4659 pairwise = 1;
4660 break;
4661 case NEON_3R_FLOAT_ARITH:
4662 pairwise = (u && size < 2); /* if VPADD (float) */
4663 break;
4664 case NEON_3R_FLOAT_MINMAX:
4665 pairwise = u; /* if VPMIN/VPMAX (float) */
4666 break;
4667 case NEON_3R_FLOAT_CMP:
4668 if (!u && size) {
4669 /* no encoding for U=0 C=1x */
4670 return 1;
4671 }
4672 break;
4673 case NEON_3R_FLOAT_ACMP:
4674 if (!u) {
4675 return 1;
4676 }
4677 break;
4678 case NEON_3R_VRECPS_VRSQRTS:
4679 if (u) {
4680 return 1;
4681 }
4682 break;
4683 case NEON_3R_VMUL:
4684 if (u && (size != 0)) {
4685 /* UNDEF on invalid size for polynomial subcase */
4686 return 1;
4687 }
4688 break;
4689 default:
4690 break;
4691 }
4692
4693 if (pairwise && q) {
4694 /* All the pairwise insns UNDEF if Q is set */
4695 return 1;
4696 }
4697
4698 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4699
4700 if (pairwise) {
4701 /* Pairwise. */
4702 if (pass < 1) {
4703 tmp = neon_load_reg(rn, 0);
4704 tmp2 = neon_load_reg(rn, 1);
4705 } else {
4706 tmp = neon_load_reg(rm, 0);
4707 tmp2 = neon_load_reg(rm, 1);
4708 }
4709 } else {
4710 /* Elementwise. */
4711 tmp = neon_load_reg(rn, pass);
4712 tmp2 = neon_load_reg(rm, pass);
4713 }
4714 switch (op) {
4715 case NEON_3R_VHADD:
4716 GEN_NEON_INTEGER_OP(hadd);
4717 break;
4718 case NEON_3R_VQADD:
4719 GEN_NEON_INTEGER_OP_ENV(qadd);
4720 break;
4721 case NEON_3R_VRHADD:
4722 GEN_NEON_INTEGER_OP(rhadd);
4723 break;
4724 case NEON_3R_LOGIC: /* Logic ops. */
4725 switch ((u << 2) | size) {
4726 case 0: /* VAND */
4727 tcg_gen_and_i32(tmp, tmp, tmp2);
4728 break;
4729 case 1: /* BIC */
4730 tcg_gen_andc_i32(tmp, tmp, tmp2);
4731 break;
4732 case 2: /* VORR */
4733 tcg_gen_or_i32(tmp, tmp, tmp2);
4734 break;
4735 case 3: /* VORN */
4736 tcg_gen_orc_i32(tmp, tmp, tmp2);
4737 break;
4738 case 4: /* VEOR */
4739 tcg_gen_xor_i32(tmp, tmp, tmp2);
4740 break;
4741 case 5: /* VBSL */
4742 tmp3 = neon_load_reg(rd, pass);
4743 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4744 tcg_temp_free_i32(tmp3);
4745 break;
4746 case 6: /* VBIT */
4747 tmp3 = neon_load_reg(rd, pass);
4748 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4749 tcg_temp_free_i32(tmp3);
4750 break;
4751 case 7: /* VBIF */
4752 tmp3 = neon_load_reg(rd, pass);
4753 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4754 tcg_temp_free_i32(tmp3);
4755 break;
4756 }
4757 break;
4758 case NEON_3R_VHSUB:
4759 GEN_NEON_INTEGER_OP(hsub);
4760 break;
4761 case NEON_3R_VQSUB:
4762 GEN_NEON_INTEGER_OP_ENV(qsub);
4763 break;
4764 case NEON_3R_VCGT:
4765 GEN_NEON_INTEGER_OP(cgt);
4766 break;
4767 case NEON_3R_VCGE:
4768 GEN_NEON_INTEGER_OP(cge);
4769 break;
4770 case NEON_3R_VSHL:
4771 GEN_NEON_INTEGER_OP(shl);
4772 break;
4773 case NEON_3R_VQSHL:
4774 GEN_NEON_INTEGER_OP_ENV(qshl);
4775 break;
4776 case NEON_3R_VRSHL:
4777 GEN_NEON_INTEGER_OP(rshl);
4778 break;
4779 case NEON_3R_VQRSHL:
4780 GEN_NEON_INTEGER_OP_ENV(qrshl);
4781 break;
4782 case NEON_3R_VMAX:
4783 GEN_NEON_INTEGER_OP(max);
4784 break;
4785 case NEON_3R_VMIN:
4786 GEN_NEON_INTEGER_OP(min);
4787 break;
4788 case NEON_3R_VABD:
4789 GEN_NEON_INTEGER_OP(abd);
4790 break;
4791 case NEON_3R_VABA:
4792 GEN_NEON_INTEGER_OP(abd);
4793 tcg_temp_free_i32(tmp2);
4794 tmp2 = neon_load_reg(rd, pass);
4795 gen_neon_add(size, tmp, tmp2);
4796 break;
4797 case NEON_3R_VADD_VSUB:
4798 if (!u) { /* VADD */
4799 gen_neon_add(size, tmp, tmp2);
4800 } else { /* VSUB */
4801 switch (size) {
4802 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4804 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4805 default: abort();
4806 }
4807 }
4808 break;
4809 case NEON_3R_VTST_VCEQ:
4810 if (!u) { /* VTST */
4811 switch (size) {
4812 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4813 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4814 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4815 default: abort();
4816 }
4817 } else { /* VCEQ */
4818 switch (size) {
4819 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4820 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4821 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4822 default: abort();
4823 }
4824 }
4825 break;
4826 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4827 switch (size) {
4828 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4829 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4830 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4831 default: abort();
4832 }
4833 tcg_temp_free_i32(tmp2);
4834 tmp2 = neon_load_reg(rd, pass);
4835 if (u) { /* VMLS */
4836 gen_neon_rsb(size, tmp, tmp2);
4837 } else { /* VMLA */
4838 gen_neon_add(size, tmp, tmp2);
4839 }
4840 break;
4841 case NEON_3R_VMUL:
4842 if (u) { /* polynomial */
4843 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4844 } else { /* Integer */
4845 switch (size) {
4846 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4847 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4848 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4849 default: abort();
4850 }
4851 }
4852 break;
4853 case NEON_3R_VPMAX:
4854 GEN_NEON_INTEGER_OP(pmax);
4855 break;
4856 case NEON_3R_VPMIN:
4857 GEN_NEON_INTEGER_OP(pmin);
4858 break;
4859 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4860 if (!u) { /* VQDMULH */
4861 switch (size) {
4862 case 1:
4863 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4864 break;
4865 case 2:
4866 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4867 break;
4868 default: abort();
4869 }
4870 } else { /* VQRDMULH */
4871 switch (size) {
4872 case 1:
4873 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4874 break;
4875 case 2:
4876 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4877 break;
4878 default: abort();
4879 }
4880 }
4881 break;
4882 case NEON_3R_VPADD:
4883 switch (size) {
4884 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4885 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4886 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4887 default: abort();
4888 }
4889 break;
4890 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4891 {
4892 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4893 switch ((u << 2) | size) {
4894 case 0: /* VADD */
4895 case 4: /* VPADD */
4896 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4897 break;
4898 case 2: /* VSUB */
4899 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4900 break;
4901 case 6: /* VABD */
4902 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4903 break;
4904 default:
4905 abort();
4906 }
4907 tcg_temp_free_ptr(fpstatus);
4908 break;
4909 }
4910 case NEON_3R_FLOAT_MULTIPLY:
4911 {
4912 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4913 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4914 if (!u) {
4915 tcg_temp_free_i32(tmp2);
4916 tmp2 = neon_load_reg(rd, pass);
4917 if (size == 0) {
4918 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4919 } else {
4920 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4921 }
4922 }
4923 tcg_temp_free_ptr(fpstatus);
4924 break;
4925 }
4926 case NEON_3R_FLOAT_CMP:
4927 {
4928 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4929 if (!u) {
4930 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4931 } else {
4932 if (size == 0) {
4933 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4934 } else {
4935 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4936 }
4937 }
4938 tcg_temp_free_ptr(fpstatus);
4939 break;
4940 }
4941 case NEON_3R_FLOAT_ACMP:
4942 {
4943 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4944 if (size == 0) {
4945 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4946 } else {
4947 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4948 }
4949 tcg_temp_free_ptr(fpstatus);
4950 break;
4951 }
4952 case NEON_3R_FLOAT_MINMAX:
4953 {
4954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4955 if (size == 0) {
4956 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4957 } else {
4958 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4959 }
4960 tcg_temp_free_ptr(fpstatus);
4961 break;
4962 }
4963 case NEON_3R_VRECPS_VRSQRTS:
4964 if (size == 0)
4965 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4966 else
4967 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4968 break;
4969 default:
4970 abort();
4971 }
4972 tcg_temp_free_i32(tmp2);
4973
4974 /* Save the result. For elementwise operations we can put it
4975 straight into the destination register. For pairwise operations
4976 we have to be careful to avoid clobbering the source operands. */
4977 if (pairwise && rd == rm) {
4978 neon_store_scratch(pass, tmp);
4979 } else {
4980 neon_store_reg(rd, pass, tmp);
4981 }
4982
4983 } /* for pass */
4984 if (pairwise && rd == rm) {
4985 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4986 tmp = neon_load_scratch(pass);
4987 neon_store_reg(rd, pass, tmp);
4988 }
4989 }
4990 /* End of 3 register same size operations. */
4991 } else if (insn & (1 << 4)) {
4992 if ((insn & 0x00380080) != 0) {
4993 /* Two registers and shift. */
4994 op = (insn >> 8) & 0xf;
4995 if (insn & (1 << 7)) {
4996 /* 64-bit shift. */
4997 if (op > 7) {
4998 return 1;
4999 }
5000 size = 3;
5001 } else {
5002 size = 2;
5003 while ((insn & (1 << (size + 19))) == 0)
5004 size--;
5005 }
5006 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5007 /* To avoid excessive dumplication of ops we implement shift
5008 by immediate using the variable shift operations. */
5009 if (op < 8) {
5010 /* Shift by immediate:
5011 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5012 if (q && ((rd | rm) & 1)) {
5013 return 1;
5014 }
5015 if (!u && (op == 4 || op == 6)) {
5016 return 1;
5017 }
5018 /* Right shifts are encoded as N - shift, where N is the
5019 element size in bits. */
5020 if (op <= 4)
5021 shift = shift - (1 << (size + 3));
5022 if (size == 3) {
5023 count = q + 1;
5024 } else {
5025 count = q ? 4: 2;
5026 }
5027 switch (size) {
5028 case 0:
5029 imm = (uint8_t) shift;
5030 imm |= imm << 8;
5031 imm |= imm << 16;
5032 break;
5033 case 1:
5034 imm = (uint16_t) shift;
5035 imm |= imm << 16;
5036 break;
5037 case 2:
5038 case 3:
5039 imm = shift;
5040 break;
5041 default:
5042 abort();
5043 }
5044
5045 for (pass = 0; pass < count; pass++) {
5046 if (size == 3) {
5047 neon_load_reg64(cpu_V0, rm + pass);
5048 tcg_gen_movi_i64(cpu_V1, imm);
5049 switch (op) {
5050 case 0: /* VSHR */
5051 case 1: /* VSRA */
5052 if (u)
5053 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5054 else
5055 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5056 break;
5057 case 2: /* VRSHR */
5058 case 3: /* VRSRA */
5059 if (u)
5060 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5061 else
5062 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5063 break;
5064 case 4: /* VSRI */
5065 case 5: /* VSHL, VSLI */
5066 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5067 break;
5068 case 6: /* VQSHLU */
5069 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5070 cpu_V0, cpu_V1);
5071 break;
5072 case 7: /* VQSHL */
5073 if (u) {
5074 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5075 cpu_V0, cpu_V1);
5076 } else {
5077 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5078 cpu_V0, cpu_V1);
5079 }
5080 break;
5081 }
5082 if (op == 1 || op == 3) {
5083 /* Accumulate. */
5084 neon_load_reg64(cpu_V1, rd + pass);
5085 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5086 } else if (op == 4 || (op == 5 && u)) {
5087 /* Insert */
5088 neon_load_reg64(cpu_V1, rd + pass);
5089 uint64_t mask;
5090 if (shift < -63 || shift > 63) {
5091 mask = 0;
5092 } else {
5093 if (op == 4) {
5094 mask = 0xffffffffffffffffull >> -shift;
5095 } else {
5096 mask = 0xffffffffffffffffull << shift;
5097 }
5098 }
5099 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5100 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5101 }
5102 neon_store_reg64(cpu_V0, rd + pass);
5103 } else { /* size < 3 */
5104 /* Operands in T0 and T1. */
5105 tmp = neon_load_reg(rm, pass);
5106 tmp2 = tcg_temp_new_i32();
5107 tcg_gen_movi_i32(tmp2, imm);
5108 switch (op) {
5109 case 0: /* VSHR */
5110 case 1: /* VSRA */
5111 GEN_NEON_INTEGER_OP(shl);
5112 break;
5113 case 2: /* VRSHR */
5114 case 3: /* VRSRA */
5115 GEN_NEON_INTEGER_OP(rshl);
5116 break;
5117 case 4: /* VSRI */
5118 case 5: /* VSHL, VSLI */
5119 switch (size) {
5120 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5121 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5122 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5123 default: abort();
5124 }
5125 break;
5126 case 6: /* VQSHLU */
5127 switch (size) {
5128 case 0:
5129 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5130 tmp, tmp2);
5131 break;
5132 case 1:
5133 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5134 tmp, tmp2);
5135 break;
5136 case 2:
5137 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5138 tmp, tmp2);
5139 break;
5140 default:
5141 abort();
5142 }
5143 break;
5144 case 7: /* VQSHL */
5145 GEN_NEON_INTEGER_OP_ENV(qshl);
5146 break;
5147 }
5148 tcg_temp_free_i32(tmp2);
5149
5150 if (op == 1 || op == 3) {
5151 /* Accumulate. */
5152 tmp2 = neon_load_reg(rd, pass);
5153 gen_neon_add(size, tmp, tmp2);
5154 tcg_temp_free_i32(tmp2);
5155 } else if (op == 4 || (op == 5 && u)) {
5156 /* Insert */
5157 switch (size) {
5158 case 0:
5159 if (op == 4)
5160 mask = 0xff >> -shift;
5161 else
5162 mask = (uint8_t)(0xff << shift);
5163 mask |= mask << 8;
5164 mask |= mask << 16;
5165 break;
5166 case 1:
5167 if (op == 4)
5168 mask = 0xffff >> -shift;
5169 else
5170 mask = (uint16_t)(0xffff << shift);
5171 mask |= mask << 16;
5172 break;
5173 case 2:
5174 if (shift < -31 || shift > 31) {
5175 mask = 0;
5176 } else {
5177 if (op == 4)
5178 mask = 0xffffffffu >> -shift;
5179 else
5180 mask = 0xffffffffu << shift;
5181 }
5182 break;
5183 default:
5184 abort();
5185 }
5186 tmp2 = neon_load_reg(rd, pass);
5187 tcg_gen_andi_i32(tmp, tmp, mask);
5188 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5189 tcg_gen_or_i32(tmp, tmp, tmp2);
5190 tcg_temp_free_i32(tmp2);
5191 }
5192 neon_store_reg(rd, pass, tmp);
5193 }
5194 } /* for pass */
5195 } else if (op < 10) {
5196 /* Shift by immediate and narrow:
5197 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5198 int input_unsigned = (op == 8) ? !u : u;
5199 if (rm & 1) {
5200 return 1;
5201 }
5202 shift = shift - (1 << (size + 3));
5203 size++;
5204 if (size == 3) {
5205 tmp64 = tcg_const_i64(shift);
5206 neon_load_reg64(cpu_V0, rm);
5207 neon_load_reg64(cpu_V1, rm + 1);
5208 for (pass = 0; pass < 2; pass++) {
5209 TCGv_i64 in;
5210 if (pass == 0) {
5211 in = cpu_V0;
5212 } else {
5213 in = cpu_V1;
5214 }
5215 if (q) {
5216 if (input_unsigned) {
5217 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5218 } else {
5219 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5220 }
5221 } else {
5222 if (input_unsigned) {
5223 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5224 } else {
5225 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5226 }
5227 }
5228 tmp = tcg_temp_new_i32();
5229 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5230 neon_store_reg(rd, pass, tmp);
5231 } /* for pass */
5232 tcg_temp_free_i64(tmp64);
5233 } else {
5234 if (size == 1) {
5235 imm = (uint16_t)shift;
5236 imm |= imm << 16;
5237 } else {
5238 /* size == 2 */
5239 imm = (uint32_t)shift;
5240 }
5241 tmp2 = tcg_const_i32(imm);
5242 tmp4 = neon_load_reg(rm + 1, 0);
5243 tmp5 = neon_load_reg(rm + 1, 1);
5244 for (pass = 0; pass < 2; pass++) {
5245 if (pass == 0) {
5246 tmp = neon_load_reg(rm, 0);
5247 } else {
5248 tmp = tmp4;
5249 }
5250 gen_neon_shift_narrow(size, tmp, tmp2, q,
5251 input_unsigned);
5252 if (pass == 0) {
5253 tmp3 = neon_load_reg(rm, 1);
5254 } else {
5255 tmp3 = tmp5;
5256 }
5257 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5258 input_unsigned);
5259 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5260 tcg_temp_free_i32(tmp);
5261 tcg_temp_free_i32(tmp3);
5262 tmp = tcg_temp_new_i32();
5263 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5264 neon_store_reg(rd, pass, tmp);
5265 } /* for pass */
5266 tcg_temp_free_i32(tmp2);
5267 }
5268 } else if (op == 10) {
5269 /* VSHLL, VMOVL */
5270 if (q || (rd & 1)) {
5271 return 1;
5272 }
5273 tmp = neon_load_reg(rm, 0);
5274 tmp2 = neon_load_reg(rm, 1);
5275 for (pass = 0; pass < 2; pass++) {
5276 if (pass == 1)
5277 tmp = tmp2;
5278
5279 gen_neon_widen(cpu_V0, tmp, size, u);
5280
5281 if (shift != 0) {
5282 /* The shift is less than the width of the source
5283 type, so we can just shift the whole register. */
5284 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5285 /* Widen the result of shift: we need to clear
5286 * the potential overflow bits resulting from
5287 * left bits of the narrow input appearing as
5288 * right bits of left the neighbour narrow
5289 * input. */
5290 if (size < 2 || !u) {
5291 uint64_t imm64;
5292 if (size == 0) {
5293 imm = (0xffu >> (8 - shift));
5294 imm |= imm << 16;
5295 } else if (size == 1) {
5296 imm = 0xffff >> (16 - shift);
5297 } else {
5298 /* size == 2 */
5299 imm = 0xffffffff >> (32 - shift);
5300 }
5301 if (size < 2) {
5302 imm64 = imm | (((uint64_t)imm) << 32);
5303 } else {
5304 imm64 = imm;
5305 }
5306 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5307 }
5308 }
5309 neon_store_reg64(cpu_V0, rd + pass);
5310 }
5311 } else if (op >= 14) {
5312 /* VCVT fixed-point. */
5313 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5314 return 1;
5315 }
5316 /* We have already masked out the must-be-1 top bit of imm6,
5317 * hence this 32-shift where the ARM ARM has 64-imm6.
5318 */
5319 shift = 32 - shift;
5320 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5321 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5322 if (!(op & 1)) {
5323 if (u)
5324 gen_vfp_ulto(0, shift, 1);
5325 else
5326 gen_vfp_slto(0, shift, 1);
5327 } else {
5328 if (u)
5329 gen_vfp_toul(0, shift, 1);
5330 else
5331 gen_vfp_tosl(0, shift, 1);
5332 }
5333 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5334 }
5335 } else {
5336 return 1;
5337 }
5338 } else { /* (insn & 0x00380080) == 0 */
5339 int invert;
5340 if (q && (rd & 1)) {
5341 return 1;
5342 }
5343
5344 op = (insn >> 8) & 0xf;
5345 /* One register and immediate. */
5346 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5347 invert = (insn & (1 << 5)) != 0;
5348 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5349 * We choose to not special-case this and will behave as if a
5350 * valid constant encoding of 0 had been given.
5351 */
5352 switch (op) {
5353 case 0: case 1:
5354 /* no-op */
5355 break;
5356 case 2: case 3:
5357 imm <<= 8;
5358 break;
5359 case 4: case 5:
5360 imm <<= 16;
5361 break;
5362 case 6: case 7:
5363 imm <<= 24;
5364 break;
5365 case 8: case 9:
5366 imm |= imm << 16;
5367 break;
5368 case 10: case 11:
5369 imm = (imm << 8) | (imm << 24);
5370 break;
5371 case 12:
5372 imm = (imm << 8) | 0xff;
5373 break;
5374 case 13:
5375 imm = (imm << 16) | 0xffff;
5376 break;
5377 case 14:
5378 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5379 if (invert)
5380 imm = ~imm;
5381 break;
5382 case 15:
5383 if (invert) {
5384 return 1;
5385 }
5386 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5387 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5388 break;
5389 }
5390 if (invert)
5391 imm = ~imm;
5392
5393 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5394 if (op & 1 && op < 12) {
5395 tmp = neon_load_reg(rd, pass);
5396 if (invert) {
5397 /* The immediate value has already been inverted, so
5398 BIC becomes AND. */
5399 tcg_gen_andi_i32(tmp, tmp, imm);
5400 } else {
5401 tcg_gen_ori_i32(tmp, tmp, imm);
5402 }
5403 } else {
5404 /* VMOV, VMVN. */
5405 tmp = tcg_temp_new_i32();
5406 if (op == 14 && invert) {
5407 int n;
5408 uint32_t val;
5409 val = 0;
5410 for (n = 0; n < 4; n++) {
5411 if (imm & (1 << (n + (pass & 1) * 4)))
5412 val |= 0xff << (n * 8);
5413 }
5414 tcg_gen_movi_i32(tmp, val);
5415 } else {
5416 tcg_gen_movi_i32(tmp, imm);
5417 }
5418 }
5419 neon_store_reg(rd, pass, tmp);
5420 }
5421 }
5422 } else { /* (insn & 0x00800010 == 0x00800000) */
5423 if (size != 3) {
5424 op = (insn >> 8) & 0xf;
5425 if ((insn & (1 << 6)) == 0) {
5426 /* Three registers of different lengths. */
5427 int src1_wide;
5428 int src2_wide;
5429 int prewiden;
5430 /* undefreq: bit 0 : UNDEF if size != 0
5431 * bit 1 : UNDEF if size == 0
5432 * bit 2 : UNDEF if U == 1
5433 * Note that [1:0] set implies 'always UNDEF'
5434 */
5435 int undefreq;
5436 /* prewiden, src1_wide, src2_wide, undefreq */
5437 static const int neon_3reg_wide[16][4] = {
5438 {1, 0, 0, 0}, /* VADDL */
5439 {1, 1, 0, 0}, /* VADDW */
5440 {1, 0, 0, 0}, /* VSUBL */
5441 {1, 1, 0, 0}, /* VSUBW */
5442 {0, 1, 1, 0}, /* VADDHN */
5443 {0, 0, 0, 0}, /* VABAL */
5444 {0, 1, 1, 0}, /* VSUBHN */
5445 {0, 0, 0, 0}, /* VABDL */
5446 {0, 0, 0, 0}, /* VMLAL */
5447 {0, 0, 0, 6}, /* VQDMLAL */
5448 {0, 0, 0, 0}, /* VMLSL */
5449 {0, 0, 0, 6}, /* VQDMLSL */
5450 {0, 0, 0, 0}, /* Integer VMULL */
5451 {0, 0, 0, 2}, /* VQDMULL */
5452 {0, 0, 0, 5}, /* Polynomial VMULL */
5453 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5454 };
5455
5456 prewiden = neon_3reg_wide[op][0];
5457 src1_wide = neon_3reg_wide[op][1];
5458 src2_wide = neon_3reg_wide[op][2];
5459 undefreq = neon_3reg_wide[op][3];
5460
5461 if (((undefreq & 1) && (size != 0)) ||
5462 ((undefreq & 2) && (size == 0)) ||
5463 ((undefreq & 4) && u)) {
5464 return 1;
5465 }
5466 if ((src1_wide && (rn & 1)) ||
5467 (src2_wide && (rm & 1)) ||
5468 (!src2_wide && (rd & 1))) {
5469 return 1;
5470 }
5471
5472 /* Avoid overlapping operands. Wide source operands are
5473 always aligned so will never overlap with wide
5474 destinations in problematic ways. */
5475 if (rd == rm && !src2_wide) {
5476 tmp = neon_load_reg(rm, 1);
5477 neon_store_scratch(2, tmp);
5478 } else if (rd == rn && !src1_wide) {
5479 tmp = neon_load_reg(rn, 1);
5480 neon_store_scratch(2, tmp);
5481 }
5482 TCGV_UNUSED(tmp3);
5483 for (pass = 0; pass < 2; pass++) {
5484 if (src1_wide) {
5485 neon_load_reg64(cpu_V0, rn + pass);
5486 TCGV_UNUSED(tmp);
5487 } else {
5488 if (pass == 1 && rd == rn) {
5489 tmp = neon_load_scratch(2);
5490 } else {
5491 tmp = neon_load_reg(rn, pass);
5492 }
5493 if (prewiden) {
5494 gen_neon_widen(cpu_V0, tmp, size, u);
5495 }
5496 }
5497 if (src2_wide) {
5498 neon_load_reg64(cpu_V1, rm + pass);
5499 TCGV_UNUSED(tmp2);
5500 } else {
5501 if (pass == 1 && rd == rm) {
5502 tmp2 = neon_load_scratch(2);
5503 } else {
5504 tmp2 = neon_load_reg(rm, pass);
5505 }
5506 if (prewiden) {
5507 gen_neon_widen(cpu_V1, tmp2, size, u);
5508 }
5509 }
5510 switch (op) {
5511 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5512 gen_neon_addl(size);
5513 break;
5514 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5515 gen_neon_subl(size);
5516 break;
5517 case 5: case 7: /* VABAL, VABDL */
5518 switch ((size << 1) | u) {
5519 case 0:
5520 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5521 break;
5522 case 1:
5523 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5524 break;
5525 case 2:
5526 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5527 break;
5528 case 3:
5529 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5530 break;
5531 case 4:
5532 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5533 break;
5534 case 5:
5535 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5536 break;
5537 default: abort();
5538 }
5539 tcg_temp_free_i32(tmp2);
5540 tcg_temp_free_i32(tmp);
5541 break;
5542 case 8: case 9: case 10: case 11: case 12: case 13:
5543 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5544 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5545 break;
5546 case 14: /* Polynomial VMULL */
5547 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5548 tcg_temp_free_i32(tmp2);
5549 tcg_temp_free_i32(tmp);
5550 break;
5551 default: /* 15 is RESERVED: caught earlier */
5552 abort();
5553 }
5554 if (op == 13) {
5555 /* VQDMULL */
5556 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5557 neon_store_reg64(cpu_V0, rd + pass);
5558 } else if (op == 5 || (op >= 8 && op <= 11)) {
5559 /* Accumulate. */
5560 neon_load_reg64(cpu_V1, rd + pass);
5561 switch (op) {
5562 case 10: /* VMLSL */
5563 gen_neon_negl(cpu_V0, size);
5564 /* Fall through */
5565 case 5: case 8: /* VABAL, VMLAL */
5566 gen_neon_addl(size);
5567 break;
5568 case 9: case 11: /* VQDMLAL, VQDMLSL */
5569 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5570 if (op == 11) {
5571 gen_neon_negl(cpu_V0, size);
5572 }
5573 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5574 break;
5575 default:
5576 abort();
5577 }
5578 neon_store_reg64(cpu_V0, rd + pass);
5579 } else if (op == 4 || op == 6) {
5580 /* Narrowing operation. */
5581 tmp = tcg_temp_new_i32();
5582 if (!u) {
5583 switch (size) {
5584 case 0:
5585 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5586 break;
5587 case 1:
5588 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5589 break;
5590 case 2:
5591 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5592 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5593 break;
5594 default: abort();
5595 }
5596 } else {
5597 switch (size) {
5598 case 0:
5599 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5600 break;
5601 case 1:
5602 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5603 break;
5604 case 2:
5605 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5606 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5607 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5608 break;
5609 default: abort();
5610 }
5611 }
5612 if (pass == 0) {
5613 tmp3 = tmp;
5614 } else {
5615 neon_store_reg(rd, 0, tmp3);
5616 neon_store_reg(rd, 1, tmp);
5617 }
5618 } else {
5619 /* Write back the result. */
5620 neon_store_reg64(cpu_V0, rd + pass);
5621 }
5622 }
5623 } else {
5624 /* Two registers and a scalar. NB that for ops of this form
5625 * the ARM ARM labels bit 24 as Q, but it is in our variable
5626 * 'u', not 'q'.
5627 */
5628 if (size == 0) {
5629 return 1;
5630 }
5631 switch (op) {
5632 case 1: /* Float VMLA scalar */
5633 case 5: /* Floating point VMLS scalar */
5634 case 9: /* Floating point VMUL scalar */
5635 if (size == 1) {
5636 return 1;
5637 }
5638 /* fall through */
5639 case 0: /* Integer VMLA scalar */
5640 case 4: /* Integer VMLS scalar */
5641 case 8: /* Integer VMUL scalar */
5642 case 12: /* VQDMULH scalar */
5643 case 13: /* VQRDMULH scalar */
5644 if (u && ((rd | rn) & 1)) {
5645 return 1;
5646 }
5647 tmp = neon_get_scalar(size, rm);
5648 neon_store_scratch(0, tmp);
5649 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5650 tmp = neon_load_scratch(0);
5651 tmp2 = neon_load_reg(rn, pass);
5652 if (op == 12) {
5653 if (size == 1) {
5654 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5655 } else {
5656 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5657 }
5658 } else if (op == 13) {
5659 if (size == 1) {
5660 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5661 } else {
5662 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5663 }
5664 } else if (op & 1) {
5665 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5666 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5667 tcg_temp_free_ptr(fpstatus);
5668 } else {
5669 switch (size) {
5670 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5671 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5672 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5673 default: abort();
5674 }
5675 }
5676 tcg_temp_free_i32(tmp2);
5677 if (op < 8) {
5678 /* Accumulate. */
5679 tmp2 = neon_load_reg(rd, pass);
5680 switch (op) {
5681 case 0:
5682 gen_neon_add(size, tmp, tmp2);
5683 break;
5684 case 1:
5685 {
5686 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5687 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5688 tcg_temp_free_ptr(fpstatus);
5689 break;
5690 }
5691 case 4:
5692 gen_neon_rsb(size, tmp, tmp2);
5693 break;
5694 case 5:
5695 {
5696 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5697 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5698 tcg_temp_free_ptr(fpstatus);
5699 break;
5700 }
5701 default:
5702 abort();
5703 }
5704 tcg_temp_free_i32(tmp2);
5705 }
5706 neon_store_reg(rd, pass, tmp);
5707 }
5708 break;
5709 case 3: /* VQDMLAL scalar */
5710 case 7: /* VQDMLSL scalar */
5711 case 11: /* VQDMULL scalar */
5712 if (u == 1) {
5713 return 1;
5714 }
5715 /* fall through */
5716 case 2: /* VMLAL sclar */
5717 case 6: /* VMLSL scalar */
5718 case 10: /* VMULL scalar */
5719 if (rd & 1) {
5720 return 1;
5721 }
5722 tmp2 = neon_get_scalar(size, rm);
5723 /* We need a copy of tmp2 because gen_neon_mull
5724 * deletes it during pass 0. */
5725 tmp4 = tcg_temp_new_i32();
5726 tcg_gen_mov_i32(tmp4, tmp2);
5727 tmp3 = neon_load_reg(rn, 1);
5728
5729 for (pass = 0; pass < 2; pass++) {
5730 if (pass == 0) {
5731 tmp = neon_load_reg(rn, 0);
5732 } else {
5733 tmp = tmp3;
5734 tmp2 = tmp4;
5735 }
5736 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5737 if (op != 11) {
5738 neon_load_reg64(cpu_V1, rd + pass);
5739 }
5740 switch (op) {
5741 case 6:
5742 gen_neon_negl(cpu_V0, size);
5743 /* Fall through */
5744 case 2:
5745 gen_neon_addl(size);
5746 break;
5747 case 3: case 7:
5748 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5749 if (op == 7) {
5750 gen_neon_negl(cpu_V0, size);
5751 }
5752 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5753 break;
5754 case 10:
5755 /* no-op */
5756 break;
5757 case 11:
5758 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5759 break;
5760 default:
5761 abort();
5762 }
5763 neon_store_reg64(cpu_V0, rd + pass);
5764 }
5765
5766
5767 break;
5768 default: /* 14 and 15 are RESERVED */
5769 return 1;
5770 }
5771 }
5772 } else { /* size == 3 */
5773 if (!u) {
5774 /* Extract. */
5775 imm = (insn >> 8) & 0xf;
5776
5777 if (imm > 7 && !q)
5778 return 1;
5779
5780 if (q && ((rd | rn | rm) & 1)) {
5781 return 1;
5782 }
5783
5784 if (imm == 0) {
5785 neon_load_reg64(cpu_V0, rn);
5786 if (q) {
5787 neon_load_reg64(cpu_V1, rn + 1);
5788 }
5789 } else if (imm == 8) {
5790 neon_load_reg64(cpu_V0, rn + 1);
5791 if (q) {
5792 neon_load_reg64(cpu_V1, rm);
5793 }
5794 } else if (q) {
5795 tmp64 = tcg_temp_new_i64();
5796 if (imm < 8) {
5797 neon_load_reg64(cpu_V0, rn);
5798 neon_load_reg64(tmp64, rn + 1);
5799 } else {
5800 neon_load_reg64(cpu_V0, rn + 1);
5801 neon_load_reg64(tmp64, rm);
5802 }
5803 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5804 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5805 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5806 if (imm < 8) {
5807 neon_load_reg64(cpu_V1, rm);
5808 } else {
5809 neon_load_reg64(cpu_V1, rm + 1);
5810 imm -= 8;
5811 }
5812 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5813 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5814 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5815 tcg_temp_free_i64(tmp64);
5816 } else {
5817 /* BUGFIX */
5818 neon_load_reg64(cpu_V0, rn);
5819 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5820 neon_load_reg64(cpu_V1, rm);
5821 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5822 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5823 }
5824 neon_store_reg64(cpu_V0, rd);
5825 if (q) {
5826 neon_store_reg64(cpu_V1, rd + 1);
5827 }
5828 } else if ((insn & (1 << 11)) == 0) {
5829 /* Two register misc. */
5830 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5831 size = (insn >> 18) & 3;
5832 /* UNDEF for unknown op values and bad op-size combinations */
5833 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5834 return 1;
5835 }
5836 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5837 q && ((rm | rd) & 1)) {
5838 return 1;
5839 }
5840 switch (op) {
5841 case NEON_2RM_VREV64:
5842 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5843 tmp = neon_load_reg(rm, pass * 2);
5844 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5845 switch (size) {
5846 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5847 case 1: gen_swap_half(tmp); break;
5848 case 2: /* no-op */ break;
5849 default: abort();
5850 }
5851 neon_store_reg(rd, pass * 2 + 1, tmp);
5852 if (size == 2) {
5853 neon_store_reg(rd, pass * 2, tmp2);
5854 } else {
5855 switch (size) {
5856 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5857 case 1: gen_swap_half(tmp2); break;
5858 default: abort();
5859 }
5860 neon_store_reg(rd, pass * 2, tmp2);
5861 }
5862 }
5863 break;
5864 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5865 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5866 for (pass = 0; pass < q + 1; pass++) {
5867 tmp = neon_load_reg(rm, pass * 2);
5868 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5869 tmp = neon_load_reg(rm, pass * 2 + 1);
5870 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5871 switch (size) {
5872 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5873 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5874 case 2: tcg_gen_add_i64(CPU_V001); break;
5875 default: abort();
5876 }
5877 if (op >= NEON_2RM_VPADAL) {
5878 /* Accumulate. */
5879 neon_load_reg64(cpu_V1, rd + pass);
5880 gen_neon_addl(size);
5881 }
5882 neon_store_reg64(cpu_V0, rd + pass);
5883 }
5884 break;
5885 case NEON_2RM_VTRN:
5886 if (size == 2) {
5887 int n;
5888 for (n = 0; n < (q ? 4 : 2); n += 2) {
5889 tmp = neon_load_reg(rm, n);
5890 tmp2 = neon_load_reg(rd, n + 1);
5891 neon_store_reg(rm, n, tmp2);
5892 neon_store_reg(rd, n + 1, tmp);
5893 }
5894 } else {
5895 goto elementwise;
5896 }
5897 break;
5898 case NEON_2RM_VUZP:
5899 if (gen_neon_unzip(rd, rm, size, q)) {
5900 return 1;
5901 }
5902 break;
5903 case NEON_2RM_VZIP:
5904 if (gen_neon_zip(rd, rm, size, q)) {
5905 return 1;
5906 }
5907 break;
5908 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5909 /* also VQMOVUN; op field and mnemonics don't line up */
5910 if (rm & 1) {
5911 return 1;
5912 }
5913 TCGV_UNUSED(tmp2);
5914 for (pass = 0; pass < 2; pass++) {
5915 neon_load_reg64(cpu_V0, rm + pass);
5916 tmp = tcg_temp_new_i32();
5917 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5918 tmp, cpu_V0);
5919 if (pass == 0) {
5920 tmp2 = tmp;
5921 } else {
5922 neon_store_reg(rd, 0, tmp2);
5923 neon_store_reg(rd, 1, tmp);
5924 }
5925 }
5926 break;
5927 case NEON_2RM_VSHLL:
5928 if (q || (rd & 1)) {
5929 return 1;
5930 }
5931 tmp = neon_load_reg(rm, 0);
5932 tmp2 = neon_load_reg(rm, 1);
5933 for (pass = 0; pass < 2; pass++) {
5934 if (pass == 1)
5935 tmp = tmp2;
5936 gen_neon_widen(cpu_V0, tmp, size, 1);
5937 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5938 neon_store_reg64(cpu_V0, rd + pass);
5939 }
5940 break;
5941 case NEON_2RM_VCVT_F16_F32:
5942 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5943 q || (rm & 1)) {
5944 return 1;
5945 }
5946 tmp = tcg_temp_new_i32();
5947 tmp2 = tcg_temp_new_i32();
5948 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5949 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5950 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5951 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5952 tcg_gen_shli_i32(tmp2, tmp2, 16);
5953 tcg_gen_or_i32(tmp2, tmp2, tmp);
5954 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5955 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5957 neon_store_reg(rd, 0, tmp2);
5958 tmp2 = tcg_temp_new_i32();
5959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5960 tcg_gen_shli_i32(tmp2, tmp2, 16);
5961 tcg_gen_or_i32(tmp2, tmp2, tmp);
5962 neon_store_reg(rd, 1, tmp2);
5963 tcg_temp_free_i32(tmp);
5964 break;
5965 case NEON_2RM_VCVT_F32_F16:
5966 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5967 q || (rd & 1)) {
5968 return 1;
5969 }
5970 tmp3 = tcg_temp_new_i32();
5971 tmp = neon_load_reg(rm, 0);
5972 tmp2 = neon_load_reg(rm, 1);
5973 tcg_gen_ext16u_i32(tmp3, tmp);
5974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5976 tcg_gen_shri_i32(tmp3, tmp, 16);
5977 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5978 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5979 tcg_temp_free_i32(tmp);
5980 tcg_gen_ext16u_i32(tmp3, tmp2);
5981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5983 tcg_gen_shri_i32(tmp3, tmp2, 16);
5984 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5985 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5986 tcg_temp_free_i32(tmp2);
5987 tcg_temp_free_i32(tmp3);
5988 break;
5989 default:
5990 elementwise:
5991 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5992 if (neon_2rm_is_float_op(op)) {
5993 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5994 neon_reg_offset(rm, pass));
5995 TCGV_UNUSED(tmp);
5996 } else {
5997 tmp = neon_load_reg(rm, pass);
5998 }
5999 switch (op) {
6000 case NEON_2RM_VREV32:
6001 switch (size) {
6002 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6003 case 1: gen_swap_half(tmp); break;
6004 default: abort();
6005 }
6006 break;
6007 case NEON_2RM_VREV16:
6008 gen_rev16(tmp);
6009 break;
6010 case NEON_2RM_VCLS:
6011 switch (size) {
6012 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6013 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6014 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6015 default: abort();
6016 }
6017 break;
6018 case NEON_2RM_VCLZ:
6019 switch (size) {
6020 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6021 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6022 case 2: gen_helper_clz(tmp, tmp); break;
6023 default: abort();
6024 }
6025 break;
6026 case NEON_2RM_VCNT:
6027 gen_helper_neon_cnt_u8(tmp, tmp);
6028 break;
6029 case NEON_2RM_VMVN:
6030 tcg_gen_not_i32(tmp, tmp);
6031 break;
6032 case NEON_2RM_VQABS:
6033 switch (size) {
6034 case 0:
6035 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6036 break;
6037 case 1:
6038 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6039 break;
6040 case 2:
6041 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6042 break;
6043 default: abort();
6044 }
6045 break;
6046 case NEON_2RM_VQNEG:
6047 switch (size) {
6048 case 0:
6049 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6050 break;
6051 case 1:
6052 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6053 break;
6054 case 2:
6055 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6056 break;
6057 default: abort();
6058 }
6059 break;
6060 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6061 tmp2 = tcg_const_i32(0);
6062 switch(size) {
6063 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6064 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6065 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6066 default: abort();
6067 }
6068 tcg_temp_free(tmp2);
6069 if (op == NEON_2RM_VCLE0) {
6070 tcg_gen_not_i32(tmp, tmp);
6071 }
6072 break;
6073 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6074 tmp2 = tcg_const_i32(0);
6075 switch(size) {
6076 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6077 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6078 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6079 default: abort();
6080 }
6081 tcg_temp_free(tmp2);
6082 if (op == NEON_2RM_VCLT0) {
6083 tcg_gen_not_i32(tmp, tmp);
6084 }
6085 break;
6086 case NEON_2RM_VCEQ0:
6087 tmp2 = tcg_const_i32(0);
6088 switch(size) {
6089 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6090 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6091 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6092 default: abort();
6093 }
6094 tcg_temp_free(tmp2);
6095 break;
6096 case NEON_2RM_VABS:
6097 switch(size) {
6098 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6099 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6100 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6101 default: abort();
6102 }
6103 break;
6104 case NEON_2RM_VNEG:
6105 tmp2 = tcg_const_i32(0);
6106 gen_neon_rsb(size, tmp, tmp2);
6107 tcg_temp_free(tmp2);
6108 break;
6109 case NEON_2RM_VCGT0_F:
6110 {
6111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6112 tmp2 = tcg_const_i32(0);
6113 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6114 tcg_temp_free(tmp2);
6115 tcg_temp_free_ptr(fpstatus);
6116 break;
6117 }
6118 case NEON_2RM_VCGE0_F:
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6121 tmp2 = tcg_const_i32(0);
6122 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6123 tcg_temp_free(tmp2);
6124 tcg_temp_free_ptr(fpstatus);
6125 break;
6126 }
6127 case NEON_2RM_VCEQ0_F:
6128 {
6129 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6130 tmp2 = tcg_const_i32(0);
6131 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6132 tcg_temp_free(tmp2);
6133 tcg_temp_free_ptr(fpstatus);
6134 break;
6135 }
6136 case NEON_2RM_VCLE0_F:
6137 {
6138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6139 tmp2 = tcg_const_i32(0);
6140 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6141 tcg_temp_free(tmp2);
6142 tcg_temp_free_ptr(fpstatus);
6143 break;
6144 }
6145 case NEON_2RM_VCLT0_F:
6146 {
6147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6148 tmp2 = tcg_const_i32(0);
6149 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6150 tcg_temp_free(tmp2);
6151 tcg_temp_free_ptr(fpstatus);
6152 break;
6153 }
6154 case NEON_2RM_VABS_F:
6155 gen_vfp_abs(0);
6156 break;
6157 case NEON_2RM_VNEG_F:
6158 gen_vfp_neg(0);
6159 break;
6160 case NEON_2RM_VSWP:
6161 tmp2 = neon_load_reg(rd, pass);
6162 neon_store_reg(rm, pass, tmp2);
6163 break;
6164 case NEON_2RM_VTRN:
6165 tmp2 = neon_load_reg(rd, pass);
6166 switch (size) {
6167 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6168 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6169 default: abort();
6170 }
6171 neon_store_reg(rm, pass, tmp2);
6172 break;
6173 case NEON_2RM_VRECPE:
6174 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6175 break;
6176 case NEON_2RM_VRSQRTE:
6177 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6178 break;
6179 case NEON_2RM_VRECPE_F:
6180 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6181 break;
6182 case NEON_2RM_VRSQRTE_F:
6183 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6184 break;
6185 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6186 gen_vfp_sito(0, 1);
6187 break;
6188 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6189 gen_vfp_uito(0, 1);
6190 break;
6191 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6192 gen_vfp_tosiz(0, 1);
6193 break;
6194 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6195 gen_vfp_touiz(0, 1);
6196 break;
6197 default:
6198 /* Reserved op values were caught by the
6199 * neon_2rm_sizes[] check earlier.
6200 */
6201 abort();
6202 }
6203 if (neon_2rm_is_float_op(op)) {
6204 tcg_gen_st_f32(cpu_F0s, cpu_env,
6205 neon_reg_offset(rd, pass));
6206 } else {
6207 neon_store_reg(rd, pass, tmp);
6208 }
6209 }
6210 break;
6211 }
6212 } else if ((insn & (1 << 10)) == 0) {
6213 /* VTBL, VTBX. */
6214 int n = ((insn >> 8) & 3) + 1;
6215 if ((rn + n) > 32) {
6216 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6217 * helper function running off the end of the register file.
6218 */
6219 return 1;
6220 }
6221 n <<= 3;
6222 if (insn & (1 << 6)) {
6223 tmp = neon_load_reg(rd, 0);
6224 } else {
6225 tmp = tcg_temp_new_i32();
6226 tcg_gen_movi_i32(tmp, 0);
6227 }
6228 tmp2 = neon_load_reg(rm, 0);
6229 tmp4 = tcg_const_i32(rn);
6230 tmp5 = tcg_const_i32(n);
6231 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6232 tcg_temp_free_i32(tmp);
6233 if (insn & (1 << 6)) {
6234 tmp = neon_load_reg(rd, 1);
6235 } else {
6236 tmp = tcg_temp_new_i32();
6237 tcg_gen_movi_i32(tmp, 0);
6238 }
6239 tmp3 = neon_load_reg(rm, 1);
6240 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6241 tcg_temp_free_i32(tmp5);
6242 tcg_temp_free_i32(tmp4);
6243 neon_store_reg(rd, 0, tmp2);
6244 neon_store_reg(rd, 1, tmp3);
6245 tcg_temp_free_i32(tmp);
6246 } else if ((insn & 0x380) == 0) {
6247 /* VDUP */
6248 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6249 return 1;
6250 }
6251 if (insn & (1 << 19)) {
6252 tmp = neon_load_reg(rm, 1);
6253 } else {
6254 tmp = neon_load_reg(rm, 0);
6255 }
6256 if (insn & (1 << 16)) {
6257 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6258 } else if (insn & (1 << 17)) {
6259 if ((insn >> 18) & 1)
6260 gen_neon_dup_high16(tmp);
6261 else
6262 gen_neon_dup_low16(tmp);
6263 }
6264 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6265 tmp2 = tcg_temp_new_i32();
6266 tcg_gen_mov_i32(tmp2, tmp);
6267 neon_store_reg(rd, pass, tmp2);
6268 }
6269 tcg_temp_free_i32(tmp);
6270 } else {
6271 return 1;
6272 }
6273 }
6274 }
6275 return 0;
6276 }
6277
6278 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6279 {
6280 int crn = (insn >> 16) & 0xf;
6281 int crm = insn & 0xf;
6282 int op1 = (insn >> 21) & 7;
6283 int op2 = (insn >> 5) & 7;
6284 int rt = (insn >> 12) & 0xf;
6285 TCGv tmp;
6286
6287 /* Minimal set of debug registers, since we don't support debug */
6288 if (op1 == 0 && crn == 0 && op2 == 0) {
6289 switch (crm) {
6290 case 0:
6291 /* DBGDIDR: just RAZ. In particular this means the
6292 * "debug architecture version" bits will read as
6293 * a reserved value, which should cause Linux to
6294 * not try to use the debug hardware.
6295 */
6296 tmp = tcg_const_i32(0);
6297 store_reg(s, rt, tmp);
6298 return 0;
6299 case 1:
6300 case 2:
6301 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6302 * don't implement memory mapped debug components
6303 */
6304 if (ENABLE_ARCH_7) {
6305 tmp = tcg_const_i32(0);
6306 store_reg(s, rt, tmp);
6307 return 0;
6308 }
6309 break;
6310 default:
6311 break;
6312 }
6313 }
6314
6315 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6316 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6317 /* TEECR */
6318 if (IS_USER(s))
6319 return 1;
6320 tmp = load_cpu_field(teecr);
6321 store_reg(s, rt, tmp);
6322 return 0;
6323 }
6324 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6325 /* TEEHBR */
6326 if (IS_USER(s) && (env->teecr & 1))
6327 return 1;
6328 tmp = load_cpu_field(teehbr);
6329 store_reg(s, rt, tmp);
6330 return 0;
6331 }
6332 }
6333 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6334 op1, crn, crm, op2);
6335 return 1;
6336 }
6337
6338 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6339 {
6340 int crn = (insn >> 16) & 0xf;
6341 int crm = insn & 0xf;
6342 int op1 = (insn >> 21) & 7;
6343 int op2 = (insn >> 5) & 7;
6344 int rt = (insn >> 12) & 0xf;
6345 TCGv tmp;
6346
6347 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6348 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6349 /* TEECR */
6350 if (IS_USER(s))
6351 return 1;
6352 tmp = load_reg(s, rt);
6353 gen_helper_set_teecr(cpu_env, tmp);
6354 tcg_temp_free_i32(tmp);
6355 return 0;
6356 }
6357 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6358 /* TEEHBR */
6359 if (IS_USER(s) && (env->teecr & 1))
6360 return 1;
6361 tmp = load_reg(s, rt);
6362 store_cpu_field(tmp, teehbr);
6363 return 0;
6364 }
6365 }
6366 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6367 op1, crn, crm, op2);
6368 return 1;
6369 }
6370
6371 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6372 {
6373 int cpnum;
6374
6375 cpnum = (insn >> 8) & 0xf;
6376 if (arm_feature(env, ARM_FEATURE_XSCALE)
6377 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6378 return 1;
6379
6380 switch (cpnum) {
6381 case 0:
6382 case 1:
6383 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6384 return disas_iwmmxt_insn(env, s, insn);
6385 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6386 return disas_dsp_insn(env, s, insn);
6387 }
6388 return 1;
6389 case 10:
6390 case 11:
6391 return disas_vfp_insn (env, s, insn);
6392 case 14:
6393 /* Coprocessors 7-15 are architecturally reserved by ARM.
6394 Unfortunately Intel decided to ignore this. */
6395 if (arm_feature(env, ARM_FEATURE_XSCALE))
6396 goto board;
6397 if (insn & (1 << 20))
6398 return disas_cp14_read(env, s, insn);
6399 else
6400 return disas_cp14_write(env, s, insn);
6401 case 15:
6402 return disas_cp15_insn (env, s, insn);
6403 default:
6404 board:
6405 /* Unknown coprocessor. See if the board has hooked it. */
6406 return disas_cp_insn (env, s, insn);
6407 }
6408 }
6409
6410
6411 /* Store a 64-bit value to a register pair. Clobbers val. */
6412 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6413 {
6414 TCGv tmp;
6415 tmp = tcg_temp_new_i32();
6416 tcg_gen_trunc_i64_i32(tmp, val);
6417 store_reg(s, rlow, tmp);
6418 tmp = tcg_temp_new_i32();
6419 tcg_gen_shri_i64(val, val, 32);
6420 tcg_gen_trunc_i64_i32(tmp, val);
6421 store_reg(s, rhigh, tmp);
6422 }
6423
6424 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6425 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6426 {
6427 TCGv_i64 tmp;
6428 TCGv tmp2;
6429
6430 /* Load value and extend to 64 bits. */
6431 tmp = tcg_temp_new_i64();
6432 tmp2 = load_reg(s, rlow);
6433 tcg_gen_extu_i32_i64(tmp, tmp2);
6434 tcg_temp_free_i32(tmp2);
6435 tcg_gen_add_i64(val, val, tmp);
6436 tcg_temp_free_i64(tmp);
6437 }
6438
6439 /* load and add a 64-bit value from a register pair. */
6440 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6441 {
6442 TCGv_i64 tmp;
6443 TCGv tmpl;
6444 TCGv tmph;
6445
6446 /* Load 64-bit value rd:rn. */
6447 tmpl = load_reg(s, rlow);
6448 tmph = load_reg(s, rhigh);
6449 tmp = tcg_temp_new_i64();
6450 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6451 tcg_temp_free_i32(tmpl);
6452 tcg_temp_free_i32(tmph);
6453 tcg_gen_add_i64(val, val, tmp);
6454 tcg_temp_free_i64(tmp);
6455 }
6456
6457 /* Set N and Z flags from a 64-bit value. */
6458 static void gen_logicq_cc(TCGv_i64 val)
6459 {
6460 TCGv tmp = tcg_temp_new_i32();
6461 gen_helper_logicq_cc(tmp, val);
6462 gen_logic_CC(tmp);
6463 tcg_temp_free_i32(tmp);
6464 }
6465
6466 /* Load/Store exclusive instructions are implemented by remembering
6467 the value/address loaded, and seeing if these are the same
6468 when the store is performed. This should be is sufficient to implement
6469 the architecturally mandated semantics, and avoids having to monitor
6470 regular stores.
6471
6472 In system emulation mode only one CPU will be running at once, so
6473 this sequence is effectively atomic. In user emulation mode we
6474 throw an exception and handle the atomic operation elsewhere. */
6475 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6476 TCGv addr, int size)
6477 {
6478 TCGv tmp;
6479
6480 switch (size) {
6481 case 0:
6482 tmp = gen_ld8u(addr, IS_USER(s));
6483 break;
6484 case 1:
6485 tmp = gen_ld16u(addr, IS_USER(s));
6486 break;
6487 case 2:
6488 case 3:
6489 tmp = gen_ld32(addr, IS_USER(s));
6490 break;
6491 default:
6492 abort();
6493 }
6494 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6495 store_reg(s, rt, tmp);
6496 if (size == 3) {
6497 TCGv tmp2 = tcg_temp_new_i32();
6498 tcg_gen_addi_i32(tmp2, addr, 4);
6499 tmp = gen_ld32(tmp2, IS_USER(s));
6500 tcg_temp_free_i32(tmp2);
6501 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6502 store_reg(s, rt2, tmp);
6503 }
6504 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6505 }
6506
6507 static void gen_clrex(DisasContext *s)
6508 {
6509 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6510 }
6511
6512 #ifdef CONFIG_USER_ONLY
6513 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6514 TCGv addr, int size)
6515 {
6516 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6517 tcg_gen_movi_i32(cpu_exclusive_info,
6518 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6519 gen_exception_insn(s, 4, EXCP_STREX);
6520 }
6521 #else
6522 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6523 TCGv addr, int size)
6524 {
6525 TCGv tmp;
6526 int done_label;
6527 int fail_label;
6528
6529 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6530 [addr] = {Rt};
6531 {Rd} = 0;
6532 } else {
6533 {Rd} = 1;
6534 } */
6535 fail_label = gen_new_label();
6536 done_label = gen_new_label();
6537 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6538 switch (size) {
6539 case 0:
6540 tmp = gen_ld8u(addr, IS_USER(s));
6541 break;
6542 case 1:
6543 tmp = gen_ld16u(addr, IS_USER(s));
6544 break;
6545 case 2:
6546 case 3:
6547 tmp = gen_ld32(addr, IS_USER(s));
6548 break;
6549 default:
6550 abort();
6551 }
6552 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6553 tcg_temp_free_i32(tmp);
6554 if (size == 3) {
6555 TCGv tmp2 = tcg_temp_new_i32();
6556 tcg_gen_addi_i32(tmp2, addr, 4);
6557 tmp = gen_ld32(tmp2, IS_USER(s));
6558 tcg_temp_free_i32(tmp2);
6559 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6560 tcg_temp_free_i32(tmp);
6561 }
6562 tmp = load_reg(s, rt);
6563 switch (size) {
6564 case 0:
6565 gen_st8(tmp, addr, IS_USER(s));
6566 break;
6567 case 1:
6568 gen_st16(tmp, addr, IS_USER(s));
6569 break;
6570 case 2:
6571 case 3:
6572 gen_st32(tmp, addr, IS_USER(s));
6573 break;
6574 default:
6575 abort();
6576 }
6577 if (size == 3) {
6578 tcg_gen_addi_i32(addr, addr, 4);
6579 tmp = load_reg(s, rt2);
6580 gen_st32(tmp, addr, IS_USER(s));
6581 }
6582 tcg_gen_movi_i32(cpu_R[rd], 0);
6583 tcg_gen_br(done_label);
6584 gen_set_label(fail_label);
6585 tcg_gen_movi_i32(cpu_R[rd], 1);
6586 gen_set_label(done_label);
6587 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6588 }
6589 #endif
6590
6591 static void disas_arm_insn(CPUState * env, DisasContext *s)
6592 {
6593 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6594 TCGv tmp;
6595 TCGv tmp2;
6596 TCGv tmp3;
6597 TCGv addr;
6598 TCGv_i64 tmp64;
6599
6600 insn = ldl_code(s->pc);
6601 s->pc += 4;
6602
6603 /* M variants do not implement ARM mode. */
6604 if (IS_M(env))
6605 goto illegal_op;
6606 cond = insn >> 28;
6607 if (cond == 0xf){
6608 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6609 * choose to UNDEF. In ARMv5 and above the space is used
6610 * for miscellaneous unconditional instructions.
6611 */
6612 ARCH(5);
6613
6614 /* Unconditional instructions. */
6615 if (((insn >> 25) & 7) == 1) {
6616 /* NEON Data processing. */
6617 if (!arm_feature(env, ARM_FEATURE_NEON))
6618 goto illegal_op;
6619
6620 if (disas_neon_data_insn(env, s, insn))
6621 goto illegal_op;
6622 return;
6623 }
6624 if ((insn & 0x0f100000) == 0x04000000) {
6625 /* NEON load/store. */
6626 if (!arm_feature(env, ARM_FEATURE_NEON))
6627 goto illegal_op;
6628
6629 if (disas_neon_ls_insn(env, s, insn))
6630 goto illegal_op;
6631 return;
6632 }
6633 if (((insn & 0x0f30f000) == 0x0510f000) ||
6634 ((insn & 0x0f30f010) == 0x0710f000)) {
6635 if ((insn & (1 << 22)) == 0) {
6636 /* PLDW; v7MP */
6637 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6638 goto illegal_op;
6639 }
6640 }
6641 /* Otherwise PLD; v5TE+ */
6642 ARCH(5TE);
6643 return;
6644 }
6645 if (((insn & 0x0f70f000) == 0x0450f000) ||
6646 ((insn & 0x0f70f010) == 0x0650f000)) {
6647 ARCH(7);
6648 return; /* PLI; V7 */
6649 }
6650 if (((insn & 0x0f700000) == 0x04100000) ||
6651 ((insn & 0x0f700010) == 0x06100000)) {
6652 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6653 goto illegal_op;
6654 }
6655 return; /* v7MP: Unallocated memory hint: must NOP */
6656 }
6657
6658 if ((insn & 0x0ffffdff) == 0x01010000) {
6659 ARCH(6);
6660 /* setend */
6661 if (insn & (1 << 9)) {
6662 /* BE8 mode not implemented. */
6663 goto illegal_op;
6664 }
6665 return;
6666 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6667 switch ((insn >> 4) & 0xf) {
6668 case 1: /* clrex */
6669 ARCH(6K);
6670 gen_clrex(s);
6671 return;
6672 case 4: /* dsb */
6673 case 5: /* dmb */
6674 case 6: /* isb */
6675 ARCH(7);
6676 /* We don't emulate caches so these are a no-op. */
6677 return;
6678 default:
6679 goto illegal_op;
6680 }
6681 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6682 /* srs */
6683 int32_t offset;
6684 if (IS_USER(s))
6685 goto illegal_op;
6686 ARCH(6);
6687 op1 = (insn & 0x1f);
6688 addr = tcg_temp_new_i32();
6689 tmp = tcg_const_i32(op1);
6690 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6691 tcg_temp_free_i32(tmp);
6692 i = (insn >> 23) & 3;
6693 switch (i) {
6694 case 0: offset = -4; break; /* DA */
6695 case 1: offset = 0; break; /* IA */
6696 case 2: offset = -8; break; /* DB */
6697 case 3: offset = 4; break; /* IB */
6698 default: abort();
6699 }
6700 if (offset)
6701 tcg_gen_addi_i32(addr, addr, offset);
6702 tmp = load_reg(s, 14);
6703 gen_st32(tmp, addr, 0);
6704 tmp = load_cpu_field(spsr);
6705 tcg_gen_addi_i32(addr, addr, 4);
6706 gen_st32(tmp, addr, 0);
6707 if (insn & (1 << 21)) {
6708 /* Base writeback. */
6709 switch (i) {
6710 case 0: offset = -8; break;
6711 case 1: offset = 4; break;
6712 case 2: offset = -4; break;
6713 case 3: offset = 0; break;
6714 default: abort();
6715 }
6716 if (offset)
6717 tcg_gen_addi_i32(addr, addr, offset);
6718 tmp = tcg_const_i32(op1);
6719 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6720 tcg_temp_free_i32(tmp);
6721 tcg_temp_free_i32(addr);
6722 } else {
6723 tcg_temp_free_i32(addr);
6724 }
6725 return;
6726 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6727 /* rfe */
6728 int32_t offset;
6729 if (IS_USER(s))
6730 goto illegal_op;
6731 ARCH(6);
6732 rn = (insn >> 16) & 0xf;
6733 addr = load_reg(s, rn);
6734 i = (insn >> 23) & 3;
6735 switch (i) {
6736 case 0: offset = -4; break; /* DA */
6737 case 1: offset = 0; break; /* IA */
6738 case 2: offset = -8; break; /* DB */
6739 case 3: offset = 4; break; /* IB */
6740 default: abort();
6741 }
6742 if (offset)
6743 tcg_gen_addi_i32(addr, addr, offset);
6744 /* Load PC into tmp and CPSR into tmp2. */
6745 tmp = gen_ld32(addr, 0);
6746 tcg_gen_addi_i32(addr, addr, 4);
6747 tmp2 = gen_ld32(addr, 0);
6748 if (insn & (1 << 21)) {
6749 /* Base writeback. */
6750 switch (i) {
6751 case 0: offset = -8; break;
6752 case 1: offset = 4; break;
6753 case 2: offset = -4; break;
6754 case 3: offset = 0; break;
6755 default: abort();
6756 }
6757 if (offset)
6758 tcg_gen_addi_i32(addr, addr, offset);
6759 store_reg(s, rn, addr);
6760 } else {
6761 tcg_temp_free_i32(addr);
6762 }
6763 gen_rfe(s, tmp, tmp2);
6764 return;
6765 } else if ((insn & 0x0e000000) == 0x0a000000) {
6766 /* branch link and change to thumb (blx <offset>) */
6767 int32_t offset;
6768
6769 val = (uint32_t)s->pc;
6770 tmp = tcg_temp_new_i32();
6771 tcg_gen_movi_i32(tmp, val);
6772 store_reg(s, 14, tmp);
6773 /* Sign-extend the 24-bit offset */
6774 offset = (((int32_t)insn) << 8) >> 8;
6775 /* offset * 4 + bit24 * 2 + (thumb bit) */
6776 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6777 /* pipeline offset */
6778 val += 4;
6779 /* protected by ARCH(5); above, near the start of uncond block */
6780 gen_bx_im(s, val);
6781 return;
6782 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6783 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6784 /* iWMMXt register transfer. */
6785 if (env->cp15.c15_cpar & (1 << 1))
6786 if (!disas_iwmmxt_insn(env, s, insn))
6787 return;
6788 }
6789 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6790 /* Coprocessor double register transfer. */
6791 ARCH(5TE);
6792 } else if ((insn & 0x0f000010) == 0x0e000010) {
6793 /* Additional coprocessor register transfer. */
6794 } else if ((insn & 0x0ff10020) == 0x01000000) {
6795 uint32_t mask;
6796 uint32_t val;
6797 /* cps (privileged) */
6798 if (IS_USER(s))
6799 return;
6800 mask = val = 0;
6801 if (insn & (1 << 19)) {
6802 if (insn & (1 << 8))
6803 mask |= CPSR_A;
6804 if (insn & (1 << 7))
6805 mask |= CPSR_I;
6806 if (insn & (1 << 6))
6807 mask |= CPSR_F;
6808 if (insn & (1 << 18))
6809 val |= mask;
6810 }
6811 if (insn & (1 << 17)) {
6812 mask |= CPSR_M;
6813 val |= (insn & 0x1f);
6814 }
6815 if (mask) {
6816 gen_set_psr_im(s, mask, 0, val);
6817 }
6818 return;
6819 }
6820 goto illegal_op;
6821 }
6822 if (cond != 0xe) {
6823 /* if not always execute, we generate a conditional jump to
6824 next instruction */
6825 s->condlabel = gen_new_label();
6826 gen_test_cc(cond ^ 1, s->condlabel);
6827 s->condjmp = 1;
6828 }
6829 if ((insn & 0x0f900000) == 0x03000000) {
6830 if ((insn & (1 << 21)) == 0) {
6831 ARCH(6T2);
6832 rd = (insn >> 12) & 0xf;
6833 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6834 if ((insn & (1 << 22)) == 0) {
6835 /* MOVW */
6836 tmp = tcg_temp_new_i32();
6837 tcg_gen_movi_i32(tmp, val);
6838 } else {
6839 /* MOVT */
6840 tmp = load_reg(s, rd);
6841 tcg_gen_ext16u_i32(tmp, tmp);
6842 tcg_gen_ori_i32(tmp, tmp, val << 16);
6843 }
6844 store_reg(s, rd, tmp);
6845 } else {
6846 if (((insn >> 12) & 0xf) != 0xf)
6847 goto illegal_op;
6848 if (((insn >> 16) & 0xf) == 0) {
6849 gen_nop_hint(s, insn & 0xff);
6850 } else {
6851 /* CPSR = immediate */
6852 val = insn & 0xff;
6853 shift = ((insn >> 8) & 0xf) * 2;
6854 if (shift)
6855 val = (val >> shift) | (val << (32 - shift));
6856 i = ((insn & (1 << 22)) != 0);
6857 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6858 goto illegal_op;
6859 }
6860 }
6861 } else if ((insn & 0x0f900000) == 0x01000000
6862 && (insn & 0x00000090) != 0x00000090) {
6863 /* miscellaneous instructions */
6864 op1 = (insn >> 21) & 3;
6865 sh = (insn >> 4) & 0xf;
6866 rm = insn & 0xf;
6867 switch (sh) {
6868 case 0x0: /* move program status register */
6869 if (op1 & 1) {
6870 /* PSR = reg */
6871 tmp = load_reg(s, rm);
6872 i = ((op1 & 2) != 0);
6873 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6874 goto illegal_op;
6875 } else {
6876 /* reg = PSR */
6877 rd = (insn >> 12) & 0xf;
6878 if (op1 & 2) {
6879 if (IS_USER(s))
6880 goto illegal_op;
6881 tmp = load_cpu_field(spsr);
6882 } else {
6883 tmp = tcg_temp_new_i32();
6884 gen_helper_cpsr_read(tmp);
6885 }
6886 store_reg(s, rd, tmp);
6887 }
6888 break;
6889 case 0x1:
6890 if (op1 == 1) {
6891 /* branch/exchange thumb (bx). */
6892 ARCH(4T);
6893 tmp = load_reg(s, rm);
6894 gen_bx(s, tmp);
6895 } else if (op1 == 3) {
6896 /* clz */
6897 ARCH(5);
6898 rd = (insn >> 12) & 0xf;
6899 tmp = load_reg(s, rm);
6900 gen_helper_clz(tmp, tmp);
6901 store_reg(s, rd, tmp);
6902 } else {
6903 goto illegal_op;
6904 }
6905 break;
6906 case 0x2:
6907 if (op1 == 1) {
6908 ARCH(5J); /* bxj */
6909 /* Trivial implementation equivalent to bx. */
6910 tmp = load_reg(s, rm);
6911 gen_bx(s, tmp);
6912 } else {
6913 goto illegal_op;
6914 }
6915 break;
6916 case 0x3:
6917 if (op1 != 1)
6918 goto illegal_op;
6919
6920 ARCH(5);
6921 /* branch link/exchange thumb (blx) */
6922 tmp = load_reg(s, rm);
6923 tmp2 = tcg_temp_new_i32();
6924 tcg_gen_movi_i32(tmp2, s->pc);
6925 store_reg(s, 14, tmp2);
6926 gen_bx(s, tmp);
6927 break;
6928 case 0x5: /* saturating add/subtract */
6929 ARCH(5TE);
6930 rd = (insn >> 12) & 0xf;
6931 rn = (insn >> 16) & 0xf;
6932 tmp = load_reg(s, rm);
6933 tmp2 = load_reg(s, rn);
6934 if (op1 & 2)
6935 gen_helper_double_saturate(tmp2, tmp2);
6936 if (op1 & 1)
6937 gen_helper_sub_saturate(tmp, tmp, tmp2);
6938 else
6939 gen_helper_add_saturate(tmp, tmp, tmp2);
6940 tcg_temp_free_i32(tmp2);
6941 store_reg(s, rd, tmp);
6942 break;
6943 case 7:
6944 /* SMC instruction (op1 == 3)
6945 and undefined instructions (op1 == 0 || op1 == 2)
6946 will trap */
6947 if (op1 != 1) {
6948 goto illegal_op;
6949 }
6950 /* bkpt */
6951 ARCH(5);
6952 gen_exception_insn(s, 4, EXCP_BKPT);
6953 break;
6954 case 0x8: /* signed multiply */
6955 case 0xa:
6956 case 0xc:
6957 case 0xe:
6958 ARCH(5TE);
6959 rs = (insn >> 8) & 0xf;
6960 rn = (insn >> 12) & 0xf;
6961 rd = (insn >> 16) & 0xf;
6962 if (op1 == 1) {
6963 /* (32 * 16) >> 16 */
6964 tmp = load_reg(s, rm);
6965 tmp2 = load_reg(s, rs);
6966 if (sh & 4)
6967 tcg_gen_sari_i32(tmp2, tmp2, 16);
6968 else
6969 gen_sxth(tmp2);
6970 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6971 tcg_gen_shri_i64(tmp64, tmp64, 16);
6972 tmp = tcg_temp_new_i32();
6973 tcg_gen_trunc_i64_i32(tmp, tmp64);
6974 tcg_temp_free_i64(tmp64);
6975 if ((sh & 2) == 0) {
6976 tmp2 = load_reg(s, rn);
6977 gen_helper_add_setq(tmp, tmp, tmp2);
6978 tcg_temp_free_i32(tmp2);
6979 }
6980 store_reg(s, rd, tmp);
6981 } else {
6982 /* 16 * 16 */
6983 tmp = load_reg(s, rm);
6984 tmp2 = load_reg(s, rs);
6985 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6986 tcg_temp_free_i32(tmp2);
6987 if (op1 == 2) {
6988 tmp64 = tcg_temp_new_i64();
6989 tcg_gen_ext_i32_i64(tmp64, tmp);
6990 tcg_temp_free_i32(tmp);
6991 gen_addq(s, tmp64, rn, rd);
6992 gen_storeq_reg(s, rn, rd, tmp64);
6993 tcg_temp_free_i64(tmp64);
6994 } else {
6995 if (op1 == 0) {
6996 tmp2 = load_reg(s, rn);
6997 gen_helper_add_setq(tmp, tmp, tmp2);
6998 tcg_temp_free_i32(tmp2);
6999 }
7000 store_reg(s, rd, tmp);
7001 }
7002 }
7003 break;
7004 default:
7005 goto illegal_op;
7006 }
7007 } else if (((insn & 0x0e000000) == 0 &&
7008 (insn & 0x00000090) != 0x90) ||
7009 ((insn & 0x0e000000) == (1 << 25))) {
7010 int set_cc, logic_cc, shiftop;
7011
7012 op1 = (insn >> 21) & 0xf;
7013 set_cc = (insn >> 20) & 1;
7014 logic_cc = table_logic_cc[op1] & set_cc;
7015
7016 /* data processing instruction */
7017 if (insn & (1 << 25)) {
7018 /* immediate operand */
7019 val = insn & 0xff;
7020 shift = ((insn >> 8) & 0xf) * 2;
7021 if (shift) {
7022 val = (val >> shift) | (val << (32 - shift));
7023 }
7024 tmp2 = tcg_temp_new_i32();
7025 tcg_gen_movi_i32(tmp2, val);
7026 if (logic_cc && shift) {
7027 gen_set_CF_bit31(tmp2);
7028 }
7029 } else {
7030 /* register */
7031 rm = (insn) & 0xf;
7032 tmp2 = load_reg(s, rm);
7033 shiftop = (insn >> 5) & 3;
7034 if (!(insn & (1 << 4))) {
7035 shift = (insn >> 7) & 0x1f;
7036 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7037 } else {
7038 rs = (insn >> 8) & 0xf;
7039 tmp = load_reg(s, rs);
7040 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7041 }
7042 }
7043 if (op1 != 0x0f && op1 != 0x0d) {
7044 rn = (insn >> 16) & 0xf;
7045 tmp = load_reg(s, rn);
7046 } else {
7047 TCGV_UNUSED(tmp);
7048 }
7049 rd = (insn >> 12) & 0xf;
7050 switch(op1) {
7051 case 0x00:
7052 tcg_gen_and_i32(tmp, tmp, tmp2);
7053 if (logic_cc) {
7054 gen_logic_CC(tmp);
7055 }
7056 store_reg_bx(env, s, rd, tmp);
7057 break;
7058 case 0x01:
7059 tcg_gen_xor_i32(tmp, tmp, tmp2);
7060 if (logic_cc) {
7061 gen_logic_CC(tmp);
7062 }
7063 store_reg_bx(env, s, rd, tmp);
7064 break;
7065 case 0x02:
7066 if (set_cc && rd == 15) {
7067 /* SUBS r15, ... is used for exception return. */
7068 if (IS_USER(s)) {
7069 goto illegal_op;
7070 }
7071 gen_helper_sub_cc(tmp, tmp, tmp2);
7072 gen_exception_return(s, tmp);
7073 } else {
7074 if (set_cc) {
7075 gen_helper_sub_cc(tmp, tmp, tmp2);
7076 } else {
7077 tcg_gen_sub_i32(tmp, tmp, tmp2);
7078 }
7079 store_reg_bx(env, s, rd, tmp);
7080 }
7081 break;
7082 case 0x03:
7083 if (set_cc) {
7084 gen_helper_sub_cc(tmp, tmp2, tmp);
7085 } else {
7086 tcg_gen_sub_i32(tmp, tmp2, tmp);
7087 }
7088 store_reg_bx(env, s, rd, tmp);
7089 break;
7090 case 0x04:
7091 if (set_cc) {
7092 gen_helper_add_cc(tmp, tmp, tmp2);
7093 } else {
7094 tcg_gen_add_i32(tmp, tmp, tmp2);
7095 }
7096 store_reg_bx(env, s, rd, tmp);
7097 break;
7098 case 0x05:
7099 if (set_cc) {
7100 gen_helper_adc_cc(tmp, tmp, tmp2);
7101 } else {
7102 gen_add_carry(tmp, tmp, tmp2);
7103 }
7104 store_reg_bx(env, s, rd, tmp);
7105 break;
7106 case 0x06:
7107 if (set_cc) {
7108 gen_helper_sbc_cc(tmp, tmp, tmp2);
7109 } else {
7110 gen_sub_carry(tmp, tmp, tmp2);
7111 }
7112 store_reg_bx(env, s, rd, tmp);
7113 break;
7114 case 0x07:
7115 if (set_cc) {
7116 gen_helper_sbc_cc(tmp, tmp2, tmp);
7117 } else {
7118 gen_sub_carry(tmp, tmp2, tmp);
7119 }
7120 store_reg_bx(env, s, rd, tmp);
7121 break;
7122 case 0x08:
7123 if (set_cc) {
7124 tcg_gen_and_i32(tmp, tmp, tmp2);
7125 gen_logic_CC(tmp);
7126 }
7127 tcg_temp_free_i32(tmp);
7128 break;
7129 case 0x09:
7130 if (set_cc) {
7131 tcg_gen_xor_i32(tmp, tmp, tmp2);
7132 gen_logic_CC(tmp);
7133 }
7134 tcg_temp_free_i32(tmp);
7135 break;
7136 case 0x0a:
7137 if (set_cc) {
7138 gen_helper_sub_cc(tmp, tmp, tmp2);
7139 }
7140 tcg_temp_free_i32(tmp);
7141 break;
7142 case 0x0b:
7143 if (set_cc) {
7144 gen_helper_add_cc(tmp, tmp, tmp2);
7145 }
7146 tcg_temp_free_i32(tmp);
7147 break;
7148 case 0x0c:
7149 tcg_gen_or_i32(tmp, tmp, tmp2);
7150 if (logic_cc) {
7151 gen_logic_CC(tmp);
7152 }
7153 store_reg_bx(env, s, rd, tmp);
7154 break;
7155 case 0x0d:
7156 if (logic_cc && rd == 15) {
7157 /* MOVS r15, ... is used for exception return. */
7158 if (IS_USER(s)) {
7159 goto illegal_op;
7160 }
7161 gen_exception_return(s, tmp2);
7162 } else {
7163 if (logic_cc) {
7164 gen_logic_CC(tmp2);
7165 }
7166 store_reg_bx(env, s, rd, tmp2);
7167 }
7168 break;
7169 case 0x0e:
7170 tcg_gen_andc_i32(tmp, tmp, tmp2);
7171 if (logic_cc) {
7172 gen_logic_CC(tmp);
7173 }
7174 store_reg_bx(env, s, rd, tmp);
7175 break;
7176 default:
7177 case 0x0f:
7178 tcg_gen_not_i32(tmp2, tmp2);
7179 if (logic_cc) {
7180 gen_logic_CC(tmp2);
7181 }
7182 store_reg_bx(env, s, rd, tmp2);
7183 break;
7184 }
7185 if (op1 != 0x0f && op1 != 0x0d) {
7186 tcg_temp_free_i32(tmp2);
7187 }
7188 } else {
7189 /* other instructions */
7190 op1 = (insn >> 24) & 0xf;
7191 switch(op1) {
7192 case 0x0:
7193 case 0x1:
7194 /* multiplies, extra load/stores */
7195 sh = (insn >> 5) & 3;
7196 if (sh == 0) {
7197 if (op1 == 0x0) {
7198 rd = (insn >> 16) & 0xf;
7199 rn = (insn >> 12) & 0xf;
7200 rs = (insn >> 8) & 0xf;
7201 rm = (insn) & 0xf;
7202 op1 = (insn >> 20) & 0xf;
7203 switch (op1) {
7204 case 0: case 1: case 2: case 3: case 6:
7205 /* 32 bit mul */
7206 tmp = load_reg(s, rs);
7207 tmp2 = load_reg(s, rm);
7208 tcg_gen_mul_i32(tmp, tmp, tmp2);
7209 tcg_temp_free_i32(tmp2);
7210 if (insn & (1 << 22)) {
7211 /* Subtract (mls) */
7212 ARCH(6T2);
7213 tmp2 = load_reg(s, rn);
7214 tcg_gen_sub_i32(tmp, tmp2, tmp);
7215 tcg_temp_free_i32(tmp2);
7216 } else if (insn & (1 << 21)) {
7217 /* Add */
7218 tmp2 = load_reg(s, rn);
7219 tcg_gen_add_i32(tmp, tmp, tmp2);
7220 tcg_temp_free_i32(tmp2);
7221 }
7222 if (insn & (1 << 20))
7223 gen_logic_CC(tmp);
7224 store_reg(s, rd, tmp);
7225 break;
7226 case 4:
7227 /* 64 bit mul double accumulate (UMAAL) */
7228 ARCH(6);
7229 tmp = load_reg(s, rs);
7230 tmp2 = load_reg(s, rm);
7231 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7232 gen_addq_lo(s, tmp64, rn);
7233 gen_addq_lo(s, tmp64, rd);
7234 gen_storeq_reg(s, rn, rd, tmp64);
7235 tcg_temp_free_i64(tmp64);
7236 break;
7237 case 8: case 9: case 10: case 11:
7238 case 12: case 13: case 14: case 15:
7239 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7240 tmp = load_reg(s, rs);
7241 tmp2 = load_reg(s, rm);
7242 if (insn & (1 << 22)) {
7243 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7244 } else {
7245 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7246 }
7247 if (insn & (1 << 21)) { /* mult accumulate */
7248 gen_addq(s, tmp64, rn, rd);
7249 }
7250 if (insn & (1 << 20)) {
7251 gen_logicq_cc(tmp64);
7252 }
7253 gen_storeq_reg(s, rn, rd, tmp64);
7254 tcg_temp_free_i64(tmp64);
7255 break;
7256 default:
7257 goto illegal_op;
7258 }
7259 } else {
7260 rn = (insn >> 16) & 0xf;
7261 rd = (insn >> 12) & 0xf;
7262 if (insn & (1 << 23)) {
7263 /* load/store exclusive */
7264 op1 = (insn >> 21) & 0x3;
7265 if (op1)
7266 ARCH(6K);
7267 else
7268 ARCH(6);
7269 addr = tcg_temp_local_new_i32();
7270 load_reg_var(s, addr, rn);
7271 if (insn & (1 << 20)) {
7272 switch (op1) {
7273 case 0: /* ldrex */
7274 gen_load_exclusive(s, rd, 15, addr, 2);
7275 break;
7276 case 1: /* ldrexd */
7277 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7278 break;
7279 case 2: /* ldrexb */
7280 gen_load_exclusive(s, rd, 15, addr, 0);
7281 break;
7282 case 3: /* ldrexh */
7283 gen_load_exclusive(s, rd, 15, addr, 1);
7284 break;
7285 default:
7286 abort();
7287 }
7288 } else {
7289 rm = insn & 0xf;
7290 switch (op1) {
7291 case 0: /* strex */
7292 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7293 break;
7294 case 1: /* strexd */
7295 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7296 break;
7297 case 2: /* strexb */
7298 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7299 break;
7300 case 3: /* strexh */
7301 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7302 break;
7303 default:
7304 abort();
7305 }
7306 }
7307 tcg_temp_free(addr);
7308 } else {
7309 /* SWP instruction */
7310 rm = (insn) & 0xf;
7311
7312 /* ??? This is not really atomic. However we know
7313 we never have multiple CPUs running in parallel,
7314 so it is good enough. */
7315 addr = load_reg(s, rn);
7316 tmp = load_reg(s, rm);
7317 if (insn & (1 << 22)) {
7318 tmp2 = gen_ld8u(addr, IS_USER(s));
7319 gen_st8(tmp, addr, IS_USER(s));
7320 } else {
7321 tmp2 = gen_ld32(addr, IS_USER(s));
7322 gen_st32(tmp, addr, IS_USER(s));
7323 }
7324 tcg_temp_free_i32(addr);
7325 store_reg(s, rd, tmp2);
7326 }
7327 }
7328 } else {
7329 int address_offset;
7330 int load;
7331 /* Misc load/store */
7332 rn = (insn >> 16) & 0xf;
7333 rd = (insn >> 12) & 0xf;
7334 addr = load_reg(s, rn);
7335 if (insn & (1 << 24))
7336 gen_add_datah_offset(s, insn, 0, addr);
7337 address_offset = 0;
7338 if (insn & (1 << 20)) {
7339 /* load */
7340 switch(sh) {
7341 case 1:
7342 tmp = gen_ld16u(addr, IS_USER(s));
7343 break;
7344 case 2:
7345 tmp = gen_ld8s(addr, IS_USER(s));
7346 break;
7347 default:
7348 case 3:
7349 tmp = gen_ld16s(addr, IS_USER(s));
7350 break;
7351 }
7352 load = 1;
7353 } else if (sh & 2) {
7354 ARCH(5TE);
7355 /* doubleword */
7356 if (sh & 1) {
7357 /* store */
7358 tmp = load_reg(s, rd);
7359 gen_st32(tmp, addr, IS_USER(s));
7360 tcg_gen_addi_i32(addr, addr, 4);
7361 tmp = load_reg(s, rd + 1);
7362 gen_st32(tmp, addr, IS_USER(s));
7363 load = 0;
7364 } else {
7365 /* load */
7366 tmp = gen_ld32(addr, IS_USER(s));
7367 store_reg(s, rd, tmp);
7368 tcg_gen_addi_i32(addr, addr, 4);
7369 tmp = gen_ld32(addr, IS_USER(s));
7370 rd++;
7371 load = 1;
7372 }
7373 address_offset = -4;
7374 } else {
7375 /* store */
7376 tmp = load_reg(s, rd);
7377 gen_st16(tmp, addr, IS_USER(s));
7378 load = 0;
7379 }
7380 /* Perform base writeback before the loaded value to
7381 ensure correct behavior with overlapping index registers.
7382 ldrd with base writeback is is undefined if the
7383 destination and index registers overlap. */
7384 if (!(insn & (1 << 24))) {
7385 gen_add_datah_offset(s, insn, address_offset, addr);
7386 store_reg(s, rn, addr);
7387 } else if (insn & (1 << 21)) {
7388 if (address_offset)
7389 tcg_gen_addi_i32(addr, addr, address_offset);
7390 store_reg(s, rn, addr);
7391 } else {
7392 tcg_temp_free_i32(addr);
7393 }
7394 if (load) {
7395 /* Complete the load. */
7396 store_reg(s, rd, tmp);
7397 }
7398 }
7399 break;
7400 case 0x4:
7401 case 0x5:
7402 goto do_ldst;
7403 case 0x6:
7404 case 0x7:
7405 if (insn & (1 << 4)) {
7406 ARCH(6);
7407 /* Armv6 Media instructions. */
7408 rm = insn & 0xf;
7409 rn = (insn >> 16) & 0xf;
7410 rd = (insn >> 12) & 0xf;
7411 rs = (insn >> 8) & 0xf;
7412 switch ((insn >> 23) & 3) {
7413 case 0: /* Parallel add/subtract. */
7414 op1 = (insn >> 20) & 7;
7415 tmp = load_reg(s, rn);
7416 tmp2 = load_reg(s, rm);
7417 sh = (insn >> 5) & 7;
7418 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7419 goto illegal_op;
7420 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7421 tcg_temp_free_i32(tmp2);
7422 store_reg(s, rd, tmp);
7423 break;
7424 case 1:
7425 if ((insn & 0x00700020) == 0) {
7426 /* Halfword pack. */
7427 tmp = load_reg(s, rn);
7428 tmp2 = load_reg(s, rm);
7429 shift = (insn >> 7) & 0x1f;
7430 if (insn & (1 << 6)) {
7431 /* pkhtb */
7432 if (shift == 0)
7433 shift = 31;
7434 tcg_gen_sari_i32(tmp2, tmp2, shift);
7435 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7436 tcg_gen_ext16u_i32(tmp2, tmp2);
7437 } else {
7438 /* pkhbt */
7439 if (shift)
7440 tcg_gen_shli_i32(tmp2, tmp2, shift);
7441 tcg_gen_ext16u_i32(tmp, tmp);
7442 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7443 }
7444 tcg_gen_or_i32(tmp, tmp, tmp2);
7445 tcg_temp_free_i32(tmp2);
7446 store_reg(s, rd, tmp);
7447 } else if ((insn & 0x00200020) == 0x00200000) {
7448 /* [us]sat */
7449 tmp = load_reg(s, rm);
7450 shift = (insn >> 7) & 0x1f;
7451 if (insn & (1 << 6)) {
7452 if (shift == 0)
7453 shift = 31;
7454 tcg_gen_sari_i32(tmp, tmp, shift);
7455 } else {
7456 tcg_gen_shli_i32(tmp, tmp, shift);
7457 }
7458 sh = (insn >> 16) & 0x1f;
7459 tmp2 = tcg_const_i32(sh);
7460 if (insn & (1 << 22))
7461 gen_helper_usat(tmp, tmp, tmp2);
7462 else
7463 gen_helper_ssat(tmp, tmp, tmp2);
7464 tcg_temp_free_i32(tmp2);
7465 store_reg(s, rd, tmp);
7466 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7467 /* [us]sat16 */
7468 tmp = load_reg(s, rm);
7469 sh = (insn >> 16) & 0x1f;
7470 tmp2 = tcg_const_i32(sh);
7471 if (insn & (1 << 22))
7472 gen_helper_usat16(tmp, tmp, tmp2);
7473 else
7474 gen_helper_ssat16(tmp, tmp, tmp2);
7475 tcg_temp_free_i32(tmp2);
7476 store_reg(s, rd, tmp);
7477 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7478 /* Select bytes. */
7479 tmp = load_reg(s, rn);
7480 tmp2 = load_reg(s, rm);
7481 tmp3 = tcg_temp_new_i32();
7482 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7483 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7484 tcg_temp_free_i32(tmp3);
7485 tcg_temp_free_i32(tmp2);
7486 store_reg(s, rd, tmp);
7487 } else if ((insn & 0x000003e0) == 0x00000060) {
7488 tmp = load_reg(s, rm);
7489 shift = (insn >> 10) & 3;
7490 /* ??? In many cases it's not necessary to do a
7491 rotate, a shift is sufficient. */
7492 if (shift != 0)
7493 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7494 op1 = (insn >> 20) & 7;
7495 switch (op1) {
7496 case 0: gen_sxtb16(tmp); break;
7497 case 2: gen_sxtb(tmp); break;
7498 case 3: gen_sxth(tmp); break;
7499 case 4: gen_uxtb16(tmp); break;
7500 case 6: gen_uxtb(tmp); break;
7501 case 7: gen_uxth(tmp); break;
7502 default: goto illegal_op;
7503 }
7504 if (rn != 15) {
7505 tmp2 = load_reg(s, rn);
7506 if ((op1 & 3) == 0) {
7507 gen_add16(tmp, tmp2);
7508 } else {
7509 tcg_gen_add_i32(tmp, tmp, tmp2);
7510 tcg_temp_free_i32(tmp2);
7511 }
7512 }
7513 store_reg(s, rd, tmp);
7514 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7515 /* rev */
7516 tmp = load_reg(s, rm);
7517 if (insn & (1 << 22)) {
7518 if (insn & (1 << 7)) {
7519 gen_revsh(tmp);
7520 } else {
7521 ARCH(6T2);
7522 gen_helper_rbit(tmp, tmp);
7523 }
7524 } else {
7525 if (insn & (1 << 7))
7526 gen_rev16(tmp);
7527 else
7528 tcg_gen_bswap32_i32(tmp, tmp);
7529 }
7530 store_reg(s, rd, tmp);
7531 } else {
7532 goto illegal_op;
7533 }
7534 break;
7535 case 2: /* Multiplies (Type 3). */
7536 tmp = load_reg(s, rm);
7537 tmp2 = load_reg(s, rs);
7538 if (insn & (1 << 20)) {
7539 /* Signed multiply most significant [accumulate].
7540 (SMMUL, SMMLA, SMMLS) */
7541 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7542
7543 if (rd != 15) {
7544 tmp = load_reg(s, rd);
7545 if (insn & (1 << 6)) {
7546 tmp64 = gen_subq_msw(tmp64, tmp);
7547 } else {
7548 tmp64 = gen_addq_msw(tmp64, tmp);
7549 }
7550 }
7551 if (insn & (1 << 5)) {
7552 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7553 }
7554 tcg_gen_shri_i64(tmp64, tmp64, 32);
7555 tmp = tcg_temp_new_i32();
7556 tcg_gen_trunc_i64_i32(tmp, tmp64);
7557 tcg_temp_free_i64(tmp64);
7558 store_reg(s, rn, tmp);
7559 } else {
7560 if (insn & (1 << 5))
7561 gen_swap_half(tmp2);
7562 gen_smul_dual(tmp, tmp2);
7563 if (insn & (1 << 6)) {
7564 /* This subtraction cannot overflow. */
7565 tcg_gen_sub_i32(tmp, tmp, tmp2);
7566 } else {
7567 /* This addition cannot overflow 32 bits;
7568 * however it may overflow considered as a signed
7569 * operation, in which case we must set the Q flag.
7570 */
7571 gen_helper_add_setq(tmp, tmp, tmp2);
7572 }
7573 tcg_temp_free_i32(tmp2);
7574 if (insn & (1 << 22)) {
7575 /* smlald, smlsld */
7576 tmp64 = tcg_temp_new_i64();
7577 tcg_gen_ext_i32_i64(tmp64, tmp);
7578 tcg_temp_free_i32(tmp);
7579 gen_addq(s, tmp64, rd, rn);
7580 gen_storeq_reg(s, rd, rn, tmp64);
7581 tcg_temp_free_i64(tmp64);
7582 } else {
7583 /* smuad, smusd, smlad, smlsd */
7584 if (rd != 15)
7585 {
7586 tmp2 = load_reg(s, rd);
7587 gen_helper_add_setq(tmp, tmp, tmp2);
7588 tcg_temp_free_i32(tmp2);
7589 }
7590 store_reg(s, rn, tmp);
7591 }
7592 }
7593 break;
7594 case 3:
7595 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7596 switch (op1) {
7597 case 0: /* Unsigned sum of absolute differences. */
7598 ARCH(6);
7599 tmp = load_reg(s, rm);
7600 tmp2 = load_reg(s, rs);
7601 gen_helper_usad8(tmp, tmp, tmp2);
7602 tcg_temp_free_i32(tmp2);
7603 if (rd != 15) {
7604 tmp2 = load_reg(s, rd);
7605 tcg_gen_add_i32(tmp, tmp, tmp2);
7606 tcg_temp_free_i32(tmp2);
7607 }
7608 store_reg(s, rn, tmp);
7609 break;
7610 case 0x20: case 0x24: case 0x28: case 0x2c:
7611 /* Bitfield insert/clear. */
7612 ARCH(6T2);
7613 shift = (insn >> 7) & 0x1f;
7614 i = (insn >> 16) & 0x1f;
7615 i = i + 1 - shift;
7616 if (rm == 15) {
7617 tmp = tcg_temp_new_i32();
7618 tcg_gen_movi_i32(tmp, 0);
7619 } else {
7620 tmp = load_reg(s, rm);
7621 }
7622 if (i != 32) {
7623 tmp2 = load_reg(s, rd);
7624 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7625 tcg_temp_free_i32(tmp2);
7626 }
7627 store_reg(s, rd, tmp);
7628 break;
7629 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7630 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7631 ARCH(6T2);
7632 tmp = load_reg(s, rm);
7633 shift = (insn >> 7) & 0x1f;
7634 i = ((insn >> 16) & 0x1f) + 1;
7635 if (shift + i > 32)
7636 goto illegal_op;
7637 if (i < 32) {
7638 if (op1 & 0x20) {
7639 gen_ubfx(tmp, shift, (1u << i) - 1);
7640 } else {
7641 gen_sbfx(tmp, shift, i);
7642 }
7643 }
7644 store_reg(s, rd, tmp);
7645 break;
7646 default:
7647 goto illegal_op;
7648 }
7649 break;
7650 }
7651 break;
7652 }
7653 do_ldst:
7654 /* Check for undefined extension instructions
7655 * per the ARM Bible IE:
7656 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7657 */
7658 sh = (0xf << 20) | (0xf << 4);
7659 if (op1 == 0x7 && ((insn & sh) == sh))
7660 {
7661 goto illegal_op;
7662 }
7663 /* load/store byte/word */
7664 rn = (insn >> 16) & 0xf;
7665 rd = (insn >> 12) & 0xf;
7666 tmp2 = load_reg(s, rn);
7667 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7668 if (insn & (1 << 24))
7669 gen_add_data_offset(s, insn, tmp2);
7670 if (insn & (1 << 20)) {
7671 /* load */
7672 if (insn & (1 << 22)) {
7673 tmp = gen_ld8u(tmp2, i);
7674 } else {
7675 tmp = gen_ld32(tmp2, i);
7676 }
7677 } else {
7678 /* store */
7679 tmp = load_reg(s, rd);
7680 if (insn & (1 << 22))
7681 gen_st8(tmp, tmp2, i);
7682 else
7683 gen_st32(tmp, tmp2, i);
7684 }
7685 if (!(insn & (1 << 24))) {
7686 gen_add_data_offset(s, insn, tmp2);
7687 store_reg(s, rn, tmp2);
7688 } else if (insn & (1 << 21)) {
7689 store_reg(s, rn, tmp2);
7690 } else {
7691 tcg_temp_free_i32(tmp2);
7692 }
7693 if (insn & (1 << 20)) {
7694 /* Complete the load. */
7695 store_reg_from_load(env, s, rd, tmp);
7696 }
7697 break;
7698 case 0x08:
7699 case 0x09:
7700 {
7701 int j, n, user, loaded_base;
7702 TCGv loaded_var;
7703 /* load/store multiple words */
7704 /* XXX: store correct base if write back */
7705 user = 0;
7706 if (insn & (1 << 22)) {
7707 if (IS_USER(s))
7708 goto illegal_op; /* only usable in supervisor mode */
7709
7710 if ((insn & (1 << 15)) == 0)
7711 user = 1;
7712 }
7713 rn = (insn >> 16) & 0xf;
7714 addr = load_reg(s, rn);
7715
7716 /* compute total size */
7717 loaded_base = 0;
7718 TCGV_UNUSED(loaded_var);
7719 n = 0;
7720 for(i=0;i<16;i++) {
7721 if (insn & (1 << i))
7722 n++;
7723 }
7724 /* XXX: test invalid n == 0 case ? */
7725 if (insn & (1 << 23)) {
7726 if (insn & (1 << 24)) {
7727 /* pre increment */
7728 tcg_gen_addi_i32(addr, addr, 4);
7729 } else {
7730 /* post increment */
7731 }
7732 } else {
7733 if (insn & (1 << 24)) {
7734 /* pre decrement */
7735 tcg_gen_addi_i32(addr, addr, -(n * 4));
7736 } else {
7737 /* post decrement */
7738 if (n != 1)
7739 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7740 }
7741 }
7742 j = 0;
7743 for(i=0;i<16;i++) {
7744 if (insn & (1 << i)) {
7745 if (insn & (1 << 20)) {
7746 /* load */
7747 tmp = gen_ld32(addr, IS_USER(s));
7748 if (user) {
7749 tmp2 = tcg_const_i32(i);
7750 gen_helper_set_user_reg(tmp2, tmp);
7751 tcg_temp_free_i32(tmp2);
7752 tcg_temp_free_i32(tmp);
7753 } else if (i == rn) {
7754 loaded_var = tmp;
7755 loaded_base = 1;
7756 } else {
7757 store_reg_from_load(env, s, i, tmp);
7758 }
7759 } else {
7760 /* store */
7761 if (i == 15) {
7762 /* special case: r15 = PC + 8 */
7763 val = (long)s->pc + 4;
7764 tmp = tcg_temp_new_i32();
7765 tcg_gen_movi_i32(tmp, val);
7766 } else if (user) {
7767 tmp = tcg_temp_new_i32();
7768 tmp2 = tcg_const_i32(i);
7769 gen_helper_get_user_reg(tmp, tmp2);
7770 tcg_temp_free_i32(tmp2);
7771 } else {
7772 tmp = load_reg(s, i);
7773 }
7774 gen_st32(tmp, addr, IS_USER(s));
7775 }
7776 j++;
7777 /* no need to add after the last transfer */
7778 if (j != n)
7779 tcg_gen_addi_i32(addr, addr, 4);
7780 }
7781 }
7782 if (insn & (1 << 21)) {
7783 /* write back */
7784 if (insn & (1 << 23)) {
7785 if (insn & (1 << 24)) {
7786 /* pre increment */
7787 } else {
7788 /* post increment */
7789 tcg_gen_addi_i32(addr, addr, 4);
7790 }
7791 } else {
7792 if (insn & (1 << 24)) {
7793 /* pre decrement */
7794 if (n != 1)
7795 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7796 } else {
7797 /* post decrement */
7798 tcg_gen_addi_i32(addr, addr, -(n * 4));
7799 }
7800 }
7801 store_reg(s, rn, addr);
7802 } else {
7803 tcg_temp_free_i32(addr);
7804 }
7805 if (loaded_base) {
7806 store_reg(s, rn, loaded_var);
7807 }
7808 if ((insn & (1 << 22)) && !user) {
7809 /* Restore CPSR from SPSR. */
7810 tmp = load_cpu_field(spsr);
7811 gen_set_cpsr(tmp, 0xffffffff);
7812 tcg_temp_free_i32(tmp);
7813 s->is_jmp = DISAS_UPDATE;
7814 }
7815 }
7816 break;
7817 case 0xa:
7818 case 0xb:
7819 {
7820 int32_t offset;
7821
7822 /* branch (and link) */
7823 val = (int32_t)s->pc;
7824 if (insn & (1 << 24)) {
7825 tmp = tcg_temp_new_i32();
7826 tcg_gen_movi_i32(tmp, val);
7827 store_reg(s, 14, tmp);
7828 }
7829 offset = (((int32_t)insn << 8) >> 8);
7830 val += (offset << 2) + 4;
7831 gen_jmp(s, val);
7832 }
7833 break;
7834 case 0xc:
7835 case 0xd:
7836 case 0xe:
7837 /* Coprocessor. */
7838 if (disas_coproc_insn(env, s, insn))
7839 goto illegal_op;
7840 break;
7841 case 0xf:
7842 /* swi */
7843 gen_set_pc_im(s->pc);
7844 s->is_jmp = DISAS_SWI;
7845 break;
7846 default:
7847 illegal_op:
7848 gen_exception_insn(s, 4, EXCP_UDEF);
7849 break;
7850 }
7851 }
7852 }
7853
7854 /* Return true if this is a Thumb-2 logical op. */
7855 static int
7856 thumb2_logic_op(int op)
7857 {
7858 return (op < 8);
7859 }
7860
7861 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7862 then set condition code flags based on the result of the operation.
7863 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7864 to the high bit of T1.
7865 Returns zero if the opcode is valid. */
7866
7867 static int
7868 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7869 {
7870 int logic_cc;
7871
7872 logic_cc = 0;
7873 switch (op) {
7874 case 0: /* and */
7875 tcg_gen_and_i32(t0, t0, t1);
7876 logic_cc = conds;
7877 break;
7878 case 1: /* bic */
7879 tcg_gen_andc_i32(t0, t0, t1);
7880 logic_cc = conds;
7881 break;
7882 case 2: /* orr */
7883 tcg_gen_or_i32(t0, t0, t1);
7884 logic_cc = conds;
7885 break;
7886 case 3: /* orn */
7887 tcg_gen_orc_i32(t0, t0, t1);
7888 logic_cc = conds;
7889 break;
7890 case 4: /* eor */
7891 tcg_gen_xor_i32(t0, t0, t1);
7892 logic_cc = conds;
7893 break;
7894 case 8: /* add */
7895 if (conds)
7896 gen_helper_add_cc(t0, t0, t1);
7897 else
7898 tcg_gen_add_i32(t0, t0, t1);
7899 break;
7900 case 10: /* adc */
7901 if (conds)
7902 gen_helper_adc_cc(t0, t0, t1);
7903 else
7904 gen_adc(t0, t1);
7905 break;
7906 case 11: /* sbc */
7907 if (conds)
7908 gen_helper_sbc_cc(t0, t0, t1);
7909 else
7910 gen_sub_carry(t0, t0, t1);
7911 break;
7912 case 13: /* sub */
7913 if (conds)
7914 gen_helper_sub_cc(t0, t0, t1);
7915 else
7916 tcg_gen_sub_i32(t0, t0, t1);
7917 break;
7918 case 14: /* rsb */
7919 if (conds)
7920 gen_helper_sub_cc(t0, t1, t0);
7921 else
7922 tcg_gen_sub_i32(t0, t1, t0);
7923 break;
7924 default: /* 5, 6, 7, 9, 12, 15. */
7925 return 1;
7926 }
7927 if (logic_cc) {
7928 gen_logic_CC(t0);
7929 if (shifter_out)
7930 gen_set_CF_bit31(t1);
7931 }
7932 return 0;
7933 }
7934
7935 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7936 is not legal. */
7937 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7938 {
7939 uint32_t insn, imm, shift, offset;
7940 uint32_t rd, rn, rm, rs;
7941 TCGv tmp;
7942 TCGv tmp2;
7943 TCGv tmp3;
7944 TCGv addr;
7945 TCGv_i64 tmp64;
7946 int op;
7947 int shiftop;
7948 int conds;
7949 int logic_cc;
7950
7951 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7952 || arm_feature (env, ARM_FEATURE_M))) {
7953 /* Thumb-1 cores may need to treat bl and blx as a pair of
7954 16-bit instructions to get correct prefetch abort behavior. */
7955 insn = insn_hw1;
7956 if ((insn & (1 << 12)) == 0) {
7957 ARCH(5);
7958 /* Second half of blx. */
7959 offset = ((insn & 0x7ff) << 1);
7960 tmp = load_reg(s, 14);
7961 tcg_gen_addi_i32(tmp, tmp, offset);
7962 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7963
7964 tmp2 = tcg_temp_new_i32();
7965 tcg_gen_movi_i32(tmp2, s->pc | 1);
7966 store_reg(s, 14, tmp2);
7967 gen_bx(s, tmp);
7968 return 0;
7969 }
7970 if (insn & (1 << 11)) {
7971 /* Second half of bl. */
7972 offset = ((insn & 0x7ff) << 1) | 1;
7973 tmp = load_reg(s, 14);
7974 tcg_gen_addi_i32(tmp, tmp, offset);
7975
7976 tmp2 = tcg_temp_new_i32();
7977 tcg_gen_movi_i32(tmp2, s->pc | 1);
7978 store_reg(s, 14, tmp2);
7979 gen_bx(s, tmp);
7980 return 0;
7981 }
7982 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7983 /* Instruction spans a page boundary. Implement it as two
7984 16-bit instructions in case the second half causes an
7985 prefetch abort. */
7986 offset = ((int32_t)insn << 21) >> 9;
7987 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7988 return 0;
7989 }
7990 /* Fall through to 32-bit decode. */
7991 }
7992
7993 insn = lduw_code(s->pc);
7994 s->pc += 2;
7995 insn |= (uint32_t)insn_hw1 << 16;
7996
7997 if ((insn & 0xf800e800) != 0xf000e800) {
7998 ARCH(6T2);
7999 }
8000
8001 rn = (insn >> 16) & 0xf;
8002 rs = (insn >> 12) & 0xf;
8003 rd = (insn >> 8) & 0xf;
8004 rm = insn & 0xf;
8005 switch ((insn >> 25) & 0xf) {
8006 case 0: case 1: case 2: case 3:
8007 /* 16-bit instructions. Should never happen. */
8008 abort();
8009 case 4:
8010 if (insn & (1 << 22)) {
8011 /* Other load/store, table branch. */
8012 if (insn & 0x01200000) {
8013 /* Load/store doubleword. */
8014 if (rn == 15) {
8015 addr = tcg_temp_new_i32();
8016 tcg_gen_movi_i32(addr, s->pc & ~3);
8017 } else {
8018 addr = load_reg(s, rn);
8019 }
8020 offset = (insn & 0xff) * 4;
8021 if ((insn & (1 << 23)) == 0)
8022 offset = -offset;
8023 if (insn & (1 << 24)) {
8024 tcg_gen_addi_i32(addr, addr, offset);
8025 offset = 0;
8026 }
8027 if (insn & (1 << 20)) {
8028 /* ldrd */
8029 tmp = gen_ld32(addr, IS_USER(s));
8030 store_reg(s, rs, tmp);
8031 tcg_gen_addi_i32(addr, addr, 4);
8032 tmp = gen_ld32(addr, IS_USER(s));
8033 store_reg(s, rd, tmp);
8034 } else {
8035 /* strd */
8036 tmp = load_reg(s, rs);
8037 gen_st32(tmp, addr, IS_USER(s));
8038 tcg_gen_addi_i32(addr, addr, 4);
8039 tmp = load_reg(s, rd);
8040 gen_st32(tmp, addr, IS_USER(s));
8041 }
8042 if (insn & (1 << 21)) {
8043 /* Base writeback. */
8044 if (rn == 15)
8045 goto illegal_op;
8046 tcg_gen_addi_i32(addr, addr, offset - 4);
8047 store_reg(s, rn, addr);
8048 } else {
8049 tcg_temp_free_i32(addr);
8050 }
8051 } else if ((insn & (1 << 23)) == 0) {
8052 /* Load/store exclusive word. */
8053 addr = tcg_temp_local_new();
8054 load_reg_var(s, addr, rn);
8055 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8056 if (insn & (1 << 20)) {
8057 gen_load_exclusive(s, rs, 15, addr, 2);
8058 } else {
8059 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8060 }
8061 tcg_temp_free(addr);
8062 } else if ((insn & (1 << 6)) == 0) {
8063 /* Table Branch. */
8064 if (rn == 15) {
8065 addr = tcg_temp_new_i32();
8066 tcg_gen_movi_i32(addr, s->pc);
8067 } else {
8068 addr = load_reg(s, rn);
8069 }
8070 tmp = load_reg(s, rm);
8071 tcg_gen_add_i32(addr, addr, tmp);
8072 if (insn & (1 << 4)) {
8073 /* tbh */
8074 tcg_gen_add_i32(addr, addr, tmp);
8075 tcg_temp_free_i32(tmp);
8076 tmp = gen_ld16u(addr, IS_USER(s));
8077 } else { /* tbb */
8078 tcg_temp_free_i32(tmp);
8079 tmp = gen_ld8u(addr, IS_USER(s));
8080 }
8081 tcg_temp_free_i32(addr);
8082 tcg_gen_shli_i32(tmp, tmp, 1);
8083 tcg_gen_addi_i32(tmp, tmp, s->pc);
8084 store_reg(s, 15, tmp);
8085 } else {
8086 /* Load/store exclusive byte/halfword/doubleword. */
8087 ARCH(7);
8088 op = (insn >> 4) & 0x3;
8089 if (op == 2) {
8090 goto illegal_op;
8091 }
8092 addr = tcg_temp_local_new();
8093 load_reg_var(s, addr, rn);
8094 if (insn & (1 << 20)) {
8095 gen_load_exclusive(s, rs, rd, addr, op);
8096 } else {
8097 gen_store_exclusive(s, rm, rs, rd, addr, op);
8098 }
8099 tcg_temp_free(addr);
8100 }
8101 } else {
8102 /* Load/store multiple, RFE, SRS. */
8103 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8104 /* Not available in user mode. */
8105 if (IS_USER(s))
8106 goto illegal_op;
8107 if (insn & (1 << 20)) {
8108 /* rfe */
8109 addr = load_reg(s, rn);
8110 if ((insn & (1 << 24)) == 0)
8111 tcg_gen_addi_i32(addr, addr, -8);
8112 /* Load PC into tmp and CPSR into tmp2. */
8113 tmp = gen_ld32(addr, 0);
8114 tcg_gen_addi_i32(addr, addr, 4);
8115 tmp2 = gen_ld32(addr, 0);
8116 if (insn & (1 << 21)) {
8117 /* Base writeback. */
8118 if (insn & (1 << 24)) {
8119 tcg_gen_addi_i32(addr, addr, 4);
8120 } else {
8121 tcg_gen_addi_i32(addr, addr, -4);
8122 }
8123 store_reg(s, rn, addr);
8124 } else {
8125 tcg_temp_free_i32(addr);
8126 }
8127 gen_rfe(s, tmp, tmp2);
8128 } else {
8129 /* srs */
8130 op = (insn & 0x1f);
8131 addr = tcg_temp_new_i32();
8132 tmp = tcg_const_i32(op);
8133 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8134 tcg_temp_free_i32(tmp);
8135 if ((insn & (1 << 24)) == 0) {
8136 tcg_gen_addi_i32(addr, addr, -8);
8137 }
8138 tmp = load_reg(s, 14);
8139 gen_st32(tmp, addr, 0);
8140 tcg_gen_addi_i32(addr, addr, 4);
8141 tmp = tcg_temp_new_i32();
8142 gen_helper_cpsr_read(tmp);
8143 gen_st32(tmp, addr, 0);
8144 if (insn & (1 << 21)) {
8145 if ((insn & (1 << 24)) == 0) {
8146 tcg_gen_addi_i32(addr, addr, -4);
8147 } else {
8148 tcg_gen_addi_i32(addr, addr, 4);
8149 }
8150 tmp = tcg_const_i32(op);
8151 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8152 tcg_temp_free_i32(tmp);
8153 } else {
8154 tcg_temp_free_i32(addr);
8155 }
8156 }
8157 } else {
8158 int i, loaded_base = 0;
8159 TCGv loaded_var;
8160 /* Load/store multiple. */
8161 addr = load_reg(s, rn);
8162 offset = 0;
8163 for (i = 0; i < 16; i++) {
8164 if (insn & (1 << i))
8165 offset += 4;
8166 }
8167 if (insn & (1 << 24)) {
8168 tcg_gen_addi_i32(addr, addr, -offset);
8169 }
8170
8171 TCGV_UNUSED(loaded_var);
8172 for (i = 0; i < 16; i++) {
8173 if ((insn & (1 << i)) == 0)
8174 continue;
8175 if (insn & (1 << 20)) {
8176 /* Load. */
8177 tmp = gen_ld32(addr, IS_USER(s));
8178 if (i == 15) {
8179 gen_bx(s, tmp);
8180 } else if (i == rn) {
8181 loaded_var = tmp;
8182 loaded_base = 1;
8183 } else {
8184 store_reg(s, i, tmp);
8185 }
8186 } else {
8187 /* Store. */
8188 tmp = load_reg(s, i);
8189 gen_st32(tmp, addr, IS_USER(s));
8190 }
8191 tcg_gen_addi_i32(addr, addr, 4);
8192 }
8193 if (loaded_base) {
8194 store_reg(s, rn, loaded_var);
8195 }
8196 if (insn & (1 << 21)) {
8197 /* Base register writeback. */
8198 if (insn & (1 << 24)) {
8199 tcg_gen_addi_i32(addr, addr, -offset);
8200 }
8201 /* Fault if writeback register is in register list. */
8202 if (insn & (1 << rn))
8203 goto illegal_op;
8204 store_reg(s, rn, addr);
8205 } else {
8206 tcg_temp_free_i32(addr);
8207 }
8208 }
8209 }
8210 break;
8211 case 5:
8212
8213 op = (insn >> 21) & 0xf;
8214 if (op == 6) {
8215 /* Halfword pack. */
8216 tmp = load_reg(s, rn);
8217 tmp2 = load_reg(s, rm);
8218 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8219 if (insn & (1 << 5)) {
8220 /* pkhtb */
8221 if (shift == 0)
8222 shift = 31;
8223 tcg_gen_sari_i32(tmp2, tmp2, shift);
8224 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8225 tcg_gen_ext16u_i32(tmp2, tmp2);
8226 } else {
8227 /* pkhbt */
8228 if (shift)
8229 tcg_gen_shli_i32(tmp2, tmp2, shift);
8230 tcg_gen_ext16u_i32(tmp, tmp);
8231 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8232 }
8233 tcg_gen_or_i32(tmp, tmp, tmp2);
8234 tcg_temp_free_i32(tmp2);
8235 store_reg(s, rd, tmp);
8236 } else {
8237 /* Data processing register constant shift. */
8238 if (rn == 15) {
8239 tmp = tcg_temp_new_i32();
8240 tcg_gen_movi_i32(tmp, 0);
8241 } else {
8242 tmp = load_reg(s, rn);
8243 }
8244 tmp2 = load_reg(s, rm);
8245
8246 shiftop = (insn >> 4) & 3;
8247 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8248 conds = (insn & (1 << 20)) != 0;
8249 logic_cc = (conds && thumb2_logic_op(op));
8250 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8251 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8252 goto illegal_op;
8253 tcg_temp_free_i32(tmp2);
8254 if (rd != 15) {
8255 store_reg(s, rd, tmp);
8256 } else {
8257 tcg_temp_free_i32(tmp);
8258 }
8259 }
8260 break;
8261 case 13: /* Misc data processing. */
8262 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8263 if (op < 4 && (insn & 0xf000) != 0xf000)
8264 goto illegal_op;
8265 switch (op) {
8266 case 0: /* Register controlled shift. */
8267 tmp = load_reg(s, rn);
8268 tmp2 = load_reg(s, rm);
8269 if ((insn & 0x70) != 0)
8270 goto illegal_op;
8271 op = (insn >> 21) & 3;
8272 logic_cc = (insn & (1 << 20)) != 0;
8273 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8274 if (logic_cc)
8275 gen_logic_CC(tmp);
8276 store_reg_bx(env, s, rd, tmp);
8277 break;
8278 case 1: /* Sign/zero extend. */
8279 tmp = load_reg(s, rm);
8280 shift = (insn >> 4) & 3;
8281 /* ??? In many cases it's not necessary to do a
8282 rotate, a shift is sufficient. */
8283 if (shift != 0)
8284 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8285 op = (insn >> 20) & 7;
8286 switch (op) {
8287 case 0: gen_sxth(tmp); break;
8288 case 1: gen_uxth(tmp); break;
8289 case 2: gen_sxtb16(tmp); break;
8290 case 3: gen_uxtb16(tmp); break;
8291 case 4: gen_sxtb(tmp); break;
8292 case 5: gen_uxtb(tmp); break;
8293 default: goto illegal_op;
8294 }
8295 if (rn != 15) {
8296 tmp2 = load_reg(s, rn);
8297 if ((op >> 1) == 1) {
8298 gen_add16(tmp, tmp2);
8299 } else {
8300 tcg_gen_add_i32(tmp, tmp, tmp2);
8301 tcg_temp_free_i32(tmp2);
8302 }
8303 }
8304 store_reg(s, rd, tmp);
8305 break;
8306 case 2: /* SIMD add/subtract. */
8307 op = (insn >> 20) & 7;
8308 shift = (insn >> 4) & 7;
8309 if ((op & 3) == 3 || (shift & 3) == 3)
8310 goto illegal_op;
8311 tmp = load_reg(s, rn);
8312 tmp2 = load_reg(s, rm);
8313 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8314 tcg_temp_free_i32(tmp2);
8315 store_reg(s, rd, tmp);
8316 break;
8317 case 3: /* Other data processing. */
8318 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8319 if (op < 4) {
8320 /* Saturating add/subtract. */
8321 tmp = load_reg(s, rn);
8322 tmp2 = load_reg(s, rm);
8323 if (op & 1)
8324 gen_helper_double_saturate(tmp, tmp);
8325 if (op & 2)
8326 gen_helper_sub_saturate(tmp, tmp2, tmp);
8327 else
8328 gen_helper_add_saturate(tmp, tmp, tmp2);
8329 tcg_temp_free_i32(tmp2);
8330 } else {
8331 tmp = load_reg(s, rn);
8332 switch (op) {
8333 case 0x0a: /* rbit */
8334 gen_helper_rbit(tmp, tmp);
8335 break;
8336 case 0x08: /* rev */
8337 tcg_gen_bswap32_i32(tmp, tmp);
8338 break;
8339 case 0x09: /* rev16 */
8340 gen_rev16(tmp);
8341 break;
8342 case 0x0b: /* revsh */
8343 gen_revsh(tmp);
8344 break;
8345 case 0x10: /* sel */
8346 tmp2 = load_reg(s, rm);
8347 tmp3 = tcg_temp_new_i32();
8348 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8349 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8350 tcg_temp_free_i32(tmp3);
8351 tcg_temp_free_i32(tmp2);
8352 break;
8353 case 0x18: /* clz */
8354 gen_helper_clz(tmp, tmp);
8355 break;
8356 default:
8357 goto illegal_op;
8358 }
8359 }
8360 store_reg(s, rd, tmp);
8361 break;
8362 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8363 op = (insn >> 4) & 0xf;
8364 tmp = load_reg(s, rn);
8365 tmp2 = load_reg(s, rm);
8366 switch ((insn >> 20) & 7) {
8367 case 0: /* 32 x 32 -> 32 */
8368 tcg_gen_mul_i32(tmp, tmp, tmp2);
8369 tcg_temp_free_i32(tmp2);
8370 if (rs != 15) {
8371 tmp2 = load_reg(s, rs);
8372 if (op)
8373 tcg_gen_sub_i32(tmp, tmp2, tmp);
8374 else
8375 tcg_gen_add_i32(tmp, tmp, tmp2);
8376 tcg_temp_free_i32(tmp2);
8377 }
8378 break;
8379 case 1: /* 16 x 16 -> 32 */
8380 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8381 tcg_temp_free_i32(tmp2);
8382 if (rs != 15) {
8383 tmp2 = load_reg(s, rs);
8384 gen_helper_add_setq(tmp, tmp, tmp2);
8385 tcg_temp_free_i32(tmp2);
8386 }
8387 break;
8388 case 2: /* Dual multiply add. */
8389 case 4: /* Dual multiply subtract. */
8390 if (op)
8391 gen_swap_half(tmp2);
8392 gen_smul_dual(tmp, tmp2);
8393 if (insn & (1 << 22)) {
8394 /* This subtraction cannot overflow. */
8395 tcg_gen_sub_i32(tmp, tmp, tmp2);
8396 } else {
8397 /* This addition cannot overflow 32 bits;
8398 * however it may overflow considered as a signed
8399 * operation, in which case we must set the Q flag.
8400 */
8401 gen_helper_add_setq(tmp, tmp, tmp2);
8402 }
8403 tcg_temp_free_i32(tmp2);
8404 if (rs != 15)
8405 {
8406 tmp2 = load_reg(s, rs);
8407 gen_helper_add_setq(tmp, tmp, tmp2);
8408 tcg_temp_free_i32(tmp2);
8409 }
8410 break;
8411 case 3: /* 32 * 16 -> 32msb */
8412 if (op)
8413 tcg_gen_sari_i32(tmp2, tmp2, 16);
8414 else
8415 gen_sxth(tmp2);
8416 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8417 tcg_gen_shri_i64(tmp64, tmp64, 16);
8418 tmp = tcg_temp_new_i32();
8419 tcg_gen_trunc_i64_i32(tmp, tmp64);
8420 tcg_temp_free_i64(tmp64);
8421 if (rs != 15)
8422 {
8423 tmp2 = load_reg(s, rs);
8424 gen_helper_add_setq(tmp, tmp, tmp2);
8425 tcg_temp_free_i32(tmp2);
8426 }
8427 break;
8428 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8429 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8430 if (rs != 15) {
8431 tmp = load_reg(s, rs);
8432 if (insn & (1 << 20)) {
8433 tmp64 = gen_addq_msw(tmp64, tmp);
8434 } else {
8435 tmp64 = gen_subq_msw(tmp64, tmp);
8436 }
8437 }
8438 if (insn & (1 << 4)) {
8439 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8440 }
8441 tcg_gen_shri_i64(tmp64, tmp64, 32);
8442 tmp = tcg_temp_new_i32();
8443 tcg_gen_trunc_i64_i32(tmp, tmp64);
8444 tcg_temp_free_i64(tmp64);
8445 break;
8446 case 7: /* Unsigned sum of absolute differences. */
8447 gen_helper_usad8(tmp, tmp, tmp2);
8448 tcg_temp_free_i32(tmp2);
8449 if (rs != 15) {
8450 tmp2 = load_reg(s, rs);
8451 tcg_gen_add_i32(tmp, tmp, tmp2);
8452 tcg_temp_free_i32(tmp2);
8453 }
8454 break;
8455 }
8456 store_reg(s, rd, tmp);
8457 break;
8458 case 6: case 7: /* 64-bit multiply, Divide. */
8459 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8460 tmp = load_reg(s, rn);
8461 tmp2 = load_reg(s, rm);
8462 if ((op & 0x50) == 0x10) {
8463 /* sdiv, udiv */
8464 if (!arm_feature(env, ARM_FEATURE_DIV))
8465 goto illegal_op;
8466 if (op & 0x20)
8467 gen_helper_udiv(tmp, tmp, tmp2);
8468 else
8469 gen_helper_sdiv(tmp, tmp, tmp2);
8470 tcg_temp_free_i32(tmp2);
8471 store_reg(s, rd, tmp);
8472 } else if ((op & 0xe) == 0xc) {
8473 /* Dual multiply accumulate long. */
8474 if (op & 1)
8475 gen_swap_half(tmp2);
8476 gen_smul_dual(tmp, tmp2);
8477 if (op & 0x10) {
8478 tcg_gen_sub_i32(tmp, tmp, tmp2);
8479 } else {
8480 tcg_gen_add_i32(tmp, tmp, tmp2);
8481 }
8482 tcg_temp_free_i32(tmp2);
8483 /* BUGFIX */
8484 tmp64 = tcg_temp_new_i64();
8485 tcg_gen_ext_i32_i64(tmp64, tmp);
8486 tcg_temp_free_i32(tmp);
8487 gen_addq(s, tmp64, rs, rd);
8488 gen_storeq_reg(s, rs, rd, tmp64);
8489 tcg_temp_free_i64(tmp64);
8490 } else {
8491 if (op & 0x20) {
8492 /* Unsigned 64-bit multiply */
8493 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8494 } else {
8495 if (op & 8) {
8496 /* smlalxy */
8497 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8498 tcg_temp_free_i32(tmp2);
8499 tmp64 = tcg_temp_new_i64();
8500 tcg_gen_ext_i32_i64(tmp64, tmp);
8501 tcg_temp_free_i32(tmp);
8502 } else {
8503 /* Signed 64-bit multiply */
8504 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8505 }
8506 }
8507 if (op & 4) {
8508 /* umaal */
8509 gen_addq_lo(s, tmp64, rs);
8510 gen_addq_lo(s, tmp64, rd);
8511 } else if (op & 0x40) {
8512 /* 64-bit accumulate. */
8513 gen_addq(s, tmp64, rs, rd);
8514 }
8515 gen_storeq_reg(s, rs, rd, tmp64);
8516 tcg_temp_free_i64(tmp64);
8517 }
8518 break;
8519 }
8520 break;
8521 case 6: case 7: case 14: case 15:
8522 /* Coprocessor. */
8523 if (((insn >> 24) & 3) == 3) {
8524 /* Translate into the equivalent ARM encoding. */
8525 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8526 if (disas_neon_data_insn(env, s, insn))
8527 goto illegal_op;
8528 } else {
8529 if (insn & (1 << 28))
8530 goto illegal_op;
8531 if (disas_coproc_insn (env, s, insn))
8532 goto illegal_op;
8533 }
8534 break;
8535 case 8: case 9: case 10: case 11:
8536 if (insn & (1 << 15)) {
8537 /* Branches, misc control. */
8538 if (insn & 0x5000) {
8539 /* Unconditional branch. */
8540 /* signextend(hw1[10:0]) -> offset[:12]. */
8541 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8542 /* hw1[10:0] -> offset[11:1]. */
8543 offset |= (insn & 0x7ff) << 1;
8544 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8545 offset[24:22] already have the same value because of the
8546 sign extension above. */
8547 offset ^= ((~insn) & (1 << 13)) << 10;
8548 offset ^= ((~insn) & (1 << 11)) << 11;
8549
8550 if (insn & (1 << 14)) {
8551 /* Branch and link. */
8552 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8553 }
8554
8555 offset += s->pc;
8556 if (insn & (1 << 12)) {
8557 /* b/bl */
8558 gen_jmp(s, offset);
8559 } else {
8560 /* blx */
8561 offset &= ~(uint32_t)2;
8562 /* thumb2 bx, no need to check */
8563 gen_bx_im(s, offset);
8564 }
8565 } else if (((insn >> 23) & 7) == 7) {
8566 /* Misc control */
8567 if (insn & (1 << 13))
8568 goto illegal_op;
8569
8570 if (insn & (1 << 26)) {
8571 /* Secure monitor call (v6Z) */
8572 goto illegal_op; /* not implemented. */
8573 } else {
8574 op = (insn >> 20) & 7;
8575 switch (op) {
8576 case 0: /* msr cpsr. */
8577 if (IS_M(env)) {
8578 tmp = load_reg(s, rn);
8579 addr = tcg_const_i32(insn & 0xff);
8580 gen_helper_v7m_msr(cpu_env, addr, tmp);
8581 tcg_temp_free_i32(addr);
8582 tcg_temp_free_i32(tmp);
8583 gen_lookup_tb(s);
8584 break;
8585 }
8586 /* fall through */
8587 case 1: /* msr spsr. */
8588 if (IS_M(env))
8589 goto illegal_op;
8590 tmp = load_reg(s, rn);
8591 if (gen_set_psr(s,
8592 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8593 op == 1, tmp))
8594 goto illegal_op;
8595 break;
8596 case 2: /* cps, nop-hint. */
8597 if (((insn >> 8) & 7) == 0) {
8598 gen_nop_hint(s, insn & 0xff);
8599 }
8600 /* Implemented as NOP in user mode. */
8601 if (IS_USER(s))
8602 break;
8603 offset = 0;
8604 imm = 0;
8605 if (insn & (1 << 10)) {
8606 if (insn & (1 << 7))
8607 offset |= CPSR_A;
8608 if (insn & (1 << 6))
8609 offset |= CPSR_I;
8610 if (insn & (1 << 5))
8611 offset |= CPSR_F;
8612 if (insn & (1 << 9))
8613 imm = CPSR_A | CPSR_I | CPSR_F;
8614 }
8615 if (insn & (1 << 8)) {
8616 offset |= 0x1f;
8617 imm |= (insn & 0x1f);
8618 }
8619 if (offset) {
8620 gen_set_psr_im(s, offset, 0, imm);
8621 }
8622 break;
8623 case 3: /* Special control operations. */
8624 ARCH(7);
8625 op = (insn >> 4) & 0xf;
8626 switch (op) {
8627 case 2: /* clrex */
8628 gen_clrex(s);
8629 break;
8630 case 4: /* dsb */
8631 case 5: /* dmb */
8632 case 6: /* isb */
8633 /* These execute as NOPs. */
8634 break;
8635 default:
8636 goto illegal_op;
8637 }
8638 break;
8639 case 4: /* bxj */
8640 /* Trivial implementation equivalent to bx. */
8641 tmp = load_reg(s, rn);
8642 gen_bx(s, tmp);
8643 break;
8644 case 5: /* Exception return. */
8645 if (IS_USER(s)) {
8646 goto illegal_op;
8647 }
8648 if (rn != 14 || rd != 15) {
8649 goto illegal_op;
8650 }
8651 tmp = load_reg(s, rn);
8652 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8653 gen_exception_return(s, tmp);
8654 break;
8655 case 6: /* mrs cpsr. */
8656 tmp = tcg_temp_new_i32();
8657 if (IS_M(env)) {
8658 addr = tcg_const_i32(insn & 0xff);
8659 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8660 tcg_temp_free_i32(addr);
8661 } else {
8662 gen_helper_cpsr_read(tmp);
8663 }
8664 store_reg(s, rd, tmp);
8665 break;
8666 case 7: /* mrs spsr. */
8667 /* Not accessible in user mode. */
8668 if (IS_USER(s) || IS_M(env))
8669 goto illegal_op;
8670 tmp = load_cpu_field(spsr);
8671 store_reg(s, rd, tmp);
8672 break;
8673 }
8674 }
8675 } else {
8676 /* Conditional branch. */
8677 op = (insn >> 22) & 0xf;
8678 /* Generate a conditional jump to next instruction. */
8679 s->condlabel = gen_new_label();
8680 gen_test_cc(op ^ 1, s->condlabel);
8681 s->condjmp = 1;
8682
8683 /* offset[11:1] = insn[10:0] */
8684 offset = (insn & 0x7ff) << 1;
8685 /* offset[17:12] = insn[21:16]. */
8686 offset |= (insn & 0x003f0000) >> 4;
8687 /* offset[31:20] = insn[26]. */
8688 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8689 /* offset[18] = insn[13]. */
8690 offset |= (insn & (1 << 13)) << 5;
8691 /* offset[19] = insn[11]. */
8692 offset |= (insn & (1 << 11)) << 8;
8693
8694 /* jump to the offset */
8695 gen_jmp(s, s->pc + offset);
8696 }
8697 } else {
8698 /* Data processing immediate. */
8699 if (insn & (1 << 25)) {
8700 if (insn & (1 << 24)) {
8701 if (insn & (1 << 20))
8702 goto illegal_op;
8703 /* Bitfield/Saturate. */
8704 op = (insn >> 21) & 7;
8705 imm = insn & 0x1f;
8706 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8707 if (rn == 15) {
8708 tmp = tcg_temp_new_i32();
8709 tcg_gen_movi_i32(tmp, 0);
8710 } else {
8711 tmp = load_reg(s, rn);
8712 }
8713 switch (op) {
8714 case 2: /* Signed bitfield extract. */
8715 imm++;
8716 if (shift + imm > 32)
8717 goto illegal_op;
8718 if (imm < 32)
8719 gen_sbfx(tmp, shift, imm);
8720 break;
8721 case 6: /* Unsigned bitfield extract. */
8722 imm++;
8723 if (shift + imm > 32)
8724 goto illegal_op;
8725 if (imm < 32)
8726 gen_ubfx(tmp, shift, (1u << imm) - 1);
8727 break;
8728 case 3: /* Bitfield insert/clear. */
8729 if (imm < shift)
8730 goto illegal_op;
8731 imm = imm + 1 - shift;
8732 if (imm != 32) {
8733 tmp2 = load_reg(s, rd);
8734 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8735 tcg_temp_free_i32(tmp2);
8736 }
8737 break;
8738 case 7:
8739 goto illegal_op;
8740 default: /* Saturate. */
8741 if (shift) {
8742 if (op & 1)
8743 tcg_gen_sari_i32(tmp, tmp, shift);
8744 else
8745 tcg_gen_shli_i32(tmp, tmp, shift);
8746 }
8747 tmp2 = tcg_const_i32(imm);
8748 if (op & 4) {
8749 /* Unsigned. */
8750 if ((op & 1) && shift == 0)
8751 gen_helper_usat16(tmp, tmp, tmp2);
8752 else
8753 gen_helper_usat(tmp, tmp, tmp2);
8754 } else {
8755 /* Signed. */
8756 if ((op & 1) && shift == 0)
8757 gen_helper_ssat16(tmp, tmp, tmp2);
8758 else
8759 gen_helper_ssat(tmp, tmp, tmp2);
8760 }
8761 tcg_temp_free_i32(tmp2);
8762 break;
8763 }
8764 store_reg(s, rd, tmp);
8765 } else {
8766 imm = ((insn & 0x04000000) >> 15)
8767 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8768 if (insn & (1 << 22)) {
8769 /* 16-bit immediate. */
8770 imm |= (insn >> 4) & 0xf000;
8771 if (insn & (1 << 23)) {
8772 /* movt */
8773 tmp = load_reg(s, rd);
8774 tcg_gen_ext16u_i32(tmp, tmp);
8775 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8776 } else {
8777 /* movw */
8778 tmp = tcg_temp_new_i32();
8779 tcg_gen_movi_i32(tmp, imm);
8780 }
8781 } else {
8782 /* Add/sub 12-bit immediate. */
8783 if (rn == 15) {
8784 offset = s->pc & ~(uint32_t)3;
8785 if (insn & (1 << 23))
8786 offset -= imm;
8787 else
8788 offset += imm;
8789 tmp = tcg_temp_new_i32();
8790 tcg_gen_movi_i32(tmp, offset);
8791 } else {
8792 tmp = load_reg(s, rn);
8793 if (insn & (1 << 23))
8794 tcg_gen_subi_i32(tmp, tmp, imm);
8795 else
8796 tcg_gen_addi_i32(tmp, tmp, imm);
8797 }
8798 }
8799 store_reg(s, rd, tmp);
8800 }
8801 } else {
8802 int shifter_out = 0;
8803 /* modified 12-bit immediate. */
8804 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8805 imm = (insn & 0xff);
8806 switch (shift) {
8807 case 0: /* XY */
8808 /* Nothing to do. */
8809 break;
8810 case 1: /* 00XY00XY */
8811 imm |= imm << 16;
8812 break;
8813 case 2: /* XY00XY00 */
8814 imm |= imm << 16;
8815 imm <<= 8;
8816 break;
8817 case 3: /* XYXYXYXY */
8818 imm |= imm << 16;
8819 imm |= imm << 8;
8820 break;
8821 default: /* Rotated constant. */
8822 shift = (shift << 1) | (imm >> 7);
8823 imm |= 0x80;
8824 imm = imm << (32 - shift);
8825 shifter_out = 1;
8826 break;
8827 }
8828 tmp2 = tcg_temp_new_i32();
8829 tcg_gen_movi_i32(tmp2, imm);
8830 rn = (insn >> 16) & 0xf;
8831 if (rn == 15) {
8832 tmp = tcg_temp_new_i32();
8833 tcg_gen_movi_i32(tmp, 0);
8834 } else {
8835 tmp = load_reg(s, rn);
8836 }
8837 op = (insn >> 21) & 0xf;
8838 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8839 shifter_out, tmp, tmp2))
8840 goto illegal_op;
8841 tcg_temp_free_i32(tmp2);
8842 rd = (insn >> 8) & 0xf;
8843 if (rd != 15) {
8844 store_reg(s, rd, tmp);
8845 } else {
8846 tcg_temp_free_i32(tmp);
8847 }
8848 }
8849 }
8850 break;
8851 case 12: /* Load/store single data item. */
8852 {
8853 int postinc = 0;
8854 int writeback = 0;
8855 int user;
8856 if ((insn & 0x01100000) == 0x01000000) {
8857 if (disas_neon_ls_insn(env, s, insn))
8858 goto illegal_op;
8859 break;
8860 }
8861 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8862 if (rs == 15) {
8863 if (!(insn & (1 << 20))) {
8864 goto illegal_op;
8865 }
8866 if (op != 2) {
8867 /* Byte or halfword load space with dest == r15 : memory hints.
8868 * Catch them early so we don't emit pointless addressing code.
8869 * This space is a mix of:
8870 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8871 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8872 * cores)
8873 * unallocated hints, which must be treated as NOPs
8874 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8875 * which is easiest for the decoding logic
8876 * Some space which must UNDEF
8877 */
8878 int op1 = (insn >> 23) & 3;
8879 int op2 = (insn >> 6) & 0x3f;
8880 if (op & 2) {
8881 goto illegal_op;
8882 }
8883 if (rn == 15) {
8884 /* UNPREDICTABLE or unallocated hint */
8885 return 0;
8886 }
8887 if (op1 & 1) {
8888 return 0; /* PLD* or unallocated hint */
8889 }
8890 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8891 return 0; /* PLD* or unallocated hint */
8892 }
8893 /* UNDEF space, or an UNPREDICTABLE */
8894 return 1;
8895 }
8896 }
8897 user = IS_USER(s);
8898 if (rn == 15) {
8899 addr = tcg_temp_new_i32();
8900 /* PC relative. */
8901 /* s->pc has already been incremented by 4. */
8902 imm = s->pc & 0xfffffffc;
8903 if (insn & (1 << 23))
8904 imm += insn & 0xfff;
8905 else
8906 imm -= insn & 0xfff;
8907 tcg_gen_movi_i32(addr, imm);
8908 } else {
8909 addr = load_reg(s, rn);
8910 if (insn & (1 << 23)) {
8911 /* Positive offset. */
8912 imm = insn & 0xfff;
8913 tcg_gen_addi_i32(addr, addr, imm);
8914 } else {
8915 imm = insn & 0xff;
8916 switch ((insn >> 8) & 0xf) {
8917 case 0x0: /* Shifted Register. */
8918 shift = (insn >> 4) & 0xf;
8919 if (shift > 3) {
8920 tcg_temp_free_i32(addr);
8921 goto illegal_op;
8922 }
8923 tmp = load_reg(s, rm);
8924 if (shift)
8925 tcg_gen_shli_i32(tmp, tmp, shift);
8926 tcg_gen_add_i32(addr, addr, tmp);
8927 tcg_temp_free_i32(tmp);
8928 break;
8929 case 0xc: /* Negative offset. */
8930 tcg_gen_addi_i32(addr, addr, -imm);
8931 break;
8932 case 0xe: /* User privilege. */
8933 tcg_gen_addi_i32(addr, addr, imm);
8934 user = 1;
8935 break;
8936 case 0x9: /* Post-decrement. */
8937 imm = -imm;
8938 /* Fall through. */
8939 case 0xb: /* Post-increment. */
8940 postinc = 1;
8941 writeback = 1;
8942 break;
8943 case 0xd: /* Pre-decrement. */
8944 imm = -imm;
8945 /* Fall through. */
8946 case 0xf: /* Pre-increment. */
8947 tcg_gen_addi_i32(addr, addr, imm);
8948 writeback = 1;
8949 break;
8950 default:
8951 tcg_temp_free_i32(addr);
8952 goto illegal_op;
8953 }
8954 }
8955 }
8956 if (insn & (1 << 20)) {
8957 /* Load. */
8958 switch (op) {
8959 case 0: tmp = gen_ld8u(addr, user); break;
8960 case 4: tmp = gen_ld8s(addr, user); break;
8961 case 1: tmp = gen_ld16u(addr, user); break;
8962 case 5: tmp = gen_ld16s(addr, user); break;
8963 case 2: tmp = gen_ld32(addr, user); break;
8964 default:
8965 tcg_temp_free_i32(addr);
8966 goto illegal_op;
8967 }
8968 if (rs == 15) {
8969 gen_bx(s, tmp);
8970 } else {
8971 store_reg(s, rs, tmp);
8972 }
8973 } else {
8974 /* Store. */
8975 tmp = load_reg(s, rs);
8976 switch (op) {
8977 case 0: gen_st8(tmp, addr, user); break;
8978 case 1: gen_st16(tmp, addr, user); break;
8979 case 2: gen_st32(tmp, addr, user); break;
8980 default:
8981 tcg_temp_free_i32(addr);
8982 goto illegal_op;
8983 }
8984 }
8985 if (postinc)
8986 tcg_gen_addi_i32(addr, addr, imm);
8987 if (writeback) {
8988 store_reg(s, rn, addr);
8989 } else {
8990 tcg_temp_free_i32(addr);
8991 }
8992 }
8993 break;
8994 default:
8995 goto illegal_op;
8996 }
8997 return 0;
8998 illegal_op:
8999 return 1;
9000 }
9001
9002 static void disas_thumb_insn(CPUState *env, DisasContext *s)
9003 {
9004 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9005 int32_t offset;
9006 int i;
9007 TCGv tmp;
9008 TCGv tmp2;
9009 TCGv addr;
9010
9011 if (s->condexec_mask) {
9012 cond = s->condexec_cond;
9013 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9014 s->condlabel = gen_new_label();
9015 gen_test_cc(cond ^ 1, s->condlabel);
9016 s->condjmp = 1;
9017 }
9018 }
9019
9020 insn = lduw_code(s->pc);
9021 s->pc += 2;
9022
9023 switch (insn >> 12) {
9024 case 0: case 1:
9025
9026 rd = insn & 7;
9027 op = (insn >> 11) & 3;
9028 if (op == 3) {
9029 /* add/subtract */
9030 rn = (insn >> 3) & 7;
9031 tmp = load_reg(s, rn);
9032 if (insn & (1 << 10)) {
9033 /* immediate */
9034 tmp2 = tcg_temp_new_i32();
9035 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9036 } else {
9037 /* reg */
9038 rm = (insn >> 6) & 7;
9039 tmp2 = load_reg(s, rm);
9040 }
9041 if (insn & (1 << 9)) {
9042 if (s->condexec_mask)
9043 tcg_gen_sub_i32(tmp, tmp, tmp2);
9044 else
9045 gen_helper_sub_cc(tmp, tmp, tmp2);
9046 } else {
9047 if (s->condexec_mask)
9048 tcg_gen_add_i32(tmp, tmp, tmp2);
9049 else
9050 gen_helper_add_cc(tmp, tmp, tmp2);
9051 }
9052 tcg_temp_free_i32(tmp2);
9053 store_reg(s, rd, tmp);
9054 } else {
9055 /* shift immediate */
9056 rm = (insn >> 3) & 7;
9057 shift = (insn >> 6) & 0x1f;
9058 tmp = load_reg(s, rm);
9059 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9060 if (!s->condexec_mask)
9061 gen_logic_CC(tmp);
9062 store_reg(s, rd, tmp);
9063 }
9064 break;
9065 case 2: case 3:
9066 /* arithmetic large immediate */
9067 op = (insn >> 11) & 3;
9068 rd = (insn >> 8) & 0x7;
9069 if (op == 0) { /* mov */
9070 tmp = tcg_temp_new_i32();
9071 tcg_gen_movi_i32(tmp, insn & 0xff);
9072 if (!s->condexec_mask)
9073 gen_logic_CC(tmp);
9074 store_reg(s, rd, tmp);
9075 } else {
9076 tmp = load_reg(s, rd);
9077 tmp2 = tcg_temp_new_i32();
9078 tcg_gen_movi_i32(tmp2, insn & 0xff);
9079 switch (op) {
9080 case 1: /* cmp */
9081 gen_helper_sub_cc(tmp, tmp, tmp2);
9082 tcg_temp_free_i32(tmp);
9083 tcg_temp_free_i32(tmp2);
9084 break;
9085 case 2: /* add */
9086 if (s->condexec_mask)
9087 tcg_gen_add_i32(tmp, tmp, tmp2);
9088 else
9089 gen_helper_add_cc(tmp, tmp, tmp2);
9090 tcg_temp_free_i32(tmp2);
9091 store_reg(s, rd, tmp);
9092 break;
9093 case 3: /* sub */
9094 if (s->condexec_mask)
9095 tcg_gen_sub_i32(tmp, tmp, tmp2);
9096 else
9097 gen_helper_sub_cc(tmp, tmp, tmp2);
9098 tcg_temp_free_i32(tmp2);
9099 store_reg(s, rd, tmp);
9100 break;
9101 }
9102 }
9103 break;
9104 case 4:
9105 if (insn & (1 << 11)) {
9106 rd = (insn >> 8) & 7;
9107 /* load pc-relative. Bit 1 of PC is ignored. */
9108 val = s->pc + 2 + ((insn & 0xff) * 4);
9109 val &= ~(uint32_t)2;
9110 addr = tcg_temp_new_i32();
9111 tcg_gen_movi_i32(addr, val);
9112 tmp = gen_ld32(addr, IS_USER(s));
9113 tcg_temp_free_i32(addr);
9114 store_reg(s, rd, tmp);
9115 break;
9116 }
9117 if (insn & (1 << 10)) {
9118 /* data processing extended or blx */
9119 rd = (insn & 7) | ((insn >> 4) & 8);
9120 rm = (insn >> 3) & 0xf;
9121 op = (insn >> 8) & 3;
9122 switch (op) {
9123 case 0: /* add */
9124 tmp = load_reg(s, rd);
9125 tmp2 = load_reg(s, rm);
9126 tcg_gen_add_i32(tmp, tmp, tmp2);
9127 tcg_temp_free_i32(tmp2);
9128 store_reg(s, rd, tmp);
9129 break;
9130 case 1: /* cmp */
9131 tmp = load_reg(s, rd);
9132 tmp2 = load_reg(s, rm);
9133 gen_helper_sub_cc(tmp, tmp, tmp2);
9134 tcg_temp_free_i32(tmp2);
9135 tcg_temp_free_i32(tmp);
9136 break;
9137 case 2: /* mov/cpy */
9138 tmp = load_reg(s, rm);
9139 store_reg(s, rd, tmp);
9140 break;
9141 case 3:/* branch [and link] exchange thumb register */
9142 tmp = load_reg(s, rm);
9143 if (insn & (1 << 7)) {
9144 ARCH(5);
9145 val = (uint32_t)s->pc | 1;
9146 tmp2 = tcg_temp_new_i32();
9147 tcg_gen_movi_i32(tmp2, val);
9148 store_reg(s, 14, tmp2);
9149 }
9150 /* already thumb, no need to check */
9151 gen_bx(s, tmp);
9152 break;
9153 }
9154 break;
9155 }
9156
9157 /* data processing register */
9158 rd = insn & 7;
9159 rm = (insn >> 3) & 7;
9160 op = (insn >> 6) & 0xf;
9161 if (op == 2 || op == 3 || op == 4 || op == 7) {
9162 /* the shift/rotate ops want the operands backwards */
9163 val = rm;
9164 rm = rd;
9165 rd = val;
9166 val = 1;
9167 } else {
9168 val = 0;
9169 }
9170
9171 if (op == 9) { /* neg */
9172 tmp = tcg_temp_new_i32();
9173 tcg_gen_movi_i32(tmp, 0);
9174 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9175 tmp = load_reg(s, rd);
9176 } else {
9177 TCGV_UNUSED(tmp);
9178 }
9179
9180 tmp2 = load_reg(s, rm);
9181 switch (op) {
9182 case 0x0: /* and */
9183 tcg_gen_and_i32(tmp, tmp, tmp2);
9184 if (!s->condexec_mask)
9185 gen_logic_CC(tmp);
9186 break;
9187 case 0x1: /* eor */
9188 tcg_gen_xor_i32(tmp, tmp, tmp2);
9189 if (!s->condexec_mask)
9190 gen_logic_CC(tmp);
9191 break;
9192 case 0x2: /* lsl */
9193 if (s->condexec_mask) {
9194 gen_helper_shl(tmp2, tmp2, tmp);
9195 } else {
9196 gen_helper_shl_cc(tmp2, tmp2, tmp);
9197 gen_logic_CC(tmp2);
9198 }
9199 break;
9200 case 0x3: /* lsr */
9201 if (s->condexec_mask) {
9202 gen_helper_shr(tmp2, tmp2, tmp);
9203 } else {
9204 gen_helper_shr_cc(tmp2, tmp2, tmp);
9205 gen_logic_CC(tmp2);
9206 }
9207 break;
9208 case 0x4: /* asr */
9209 if (s->condexec_mask) {
9210 gen_helper_sar(tmp2, tmp2, tmp);
9211 } else {
9212 gen_helper_sar_cc(tmp2, tmp2, tmp);
9213 gen_logic_CC(tmp2);
9214 }
9215 break;
9216 case 0x5: /* adc */
9217 if (s->condexec_mask)
9218 gen_adc(tmp, tmp2);
9219 else
9220 gen_helper_adc_cc(tmp, tmp, tmp2);
9221 break;
9222 case 0x6: /* sbc */
9223 if (s->condexec_mask)
9224 gen_sub_carry(tmp, tmp, tmp2);
9225 else
9226 gen_helper_sbc_cc(tmp, tmp, tmp2);
9227 break;
9228 case 0x7: /* ror */
9229 if (s->condexec_mask) {
9230 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9231 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9232 } else {
9233 gen_helper_ror_cc(tmp2, tmp2, tmp);
9234 gen_logic_CC(tmp2);
9235 }
9236 break;
9237 case 0x8: /* tst */
9238 tcg_gen_and_i32(tmp, tmp, tmp2);
9239 gen_logic_CC(tmp);
9240 rd = 16;
9241 break;
9242 case 0x9: /* neg */
9243 if (s->condexec_mask)
9244 tcg_gen_neg_i32(tmp, tmp2);
9245 else
9246 gen_helper_sub_cc(tmp, tmp, tmp2);
9247 break;
9248 case 0xa: /* cmp */
9249 gen_helper_sub_cc(tmp, tmp, tmp2);
9250 rd = 16;
9251 break;
9252 case 0xb: /* cmn */
9253 gen_helper_add_cc(tmp, tmp, tmp2);
9254 rd = 16;
9255 break;
9256 case 0xc: /* orr */
9257 tcg_gen_or_i32(tmp, tmp, tmp2);
9258 if (!s->condexec_mask)
9259 gen_logic_CC(tmp);
9260 break;
9261 case 0xd: /* mul */
9262 tcg_gen_mul_i32(tmp, tmp, tmp2);
9263 if (!s->condexec_mask)
9264 gen_logic_CC(tmp);
9265 break;
9266 case 0xe: /* bic */
9267 tcg_gen_andc_i32(tmp, tmp, tmp2);
9268 if (!s->condexec_mask)
9269 gen_logic_CC(tmp);
9270 break;
9271 case 0xf: /* mvn */
9272 tcg_gen_not_i32(tmp2, tmp2);
9273 if (!s->condexec_mask)
9274 gen_logic_CC(tmp2);
9275 val = 1;
9276 rm = rd;
9277 break;
9278 }
9279 if (rd != 16) {
9280 if (val) {
9281 store_reg(s, rm, tmp2);
9282 if (op != 0xf)
9283 tcg_temp_free_i32(tmp);
9284 } else {
9285 store_reg(s, rd, tmp);
9286 tcg_temp_free_i32(tmp2);
9287 }
9288 } else {
9289 tcg_temp_free_i32(tmp);
9290 tcg_temp_free_i32(tmp2);
9291 }
9292 break;
9293
9294 case 5:
9295 /* load/store register offset. */
9296 rd = insn & 7;
9297 rn = (insn >> 3) & 7;
9298 rm = (insn >> 6) & 7;
9299 op = (insn >> 9) & 7;
9300 addr = load_reg(s, rn);
9301 tmp = load_reg(s, rm);
9302 tcg_gen_add_i32(addr, addr, tmp);
9303 tcg_temp_free_i32(tmp);
9304
9305 if (op < 3) /* store */
9306 tmp = load_reg(s, rd);
9307
9308 switch (op) {
9309 case 0: /* str */
9310 gen_st32(tmp, addr, IS_USER(s));
9311 break;
9312 case 1: /* strh */
9313 gen_st16(tmp, addr, IS_USER(s));
9314 break;
9315 case 2: /* strb */
9316 gen_st8(tmp, addr, IS_USER(s));
9317 break;
9318 case 3: /* ldrsb */
9319 tmp = gen_ld8s(addr, IS_USER(s));
9320 break;
9321 case 4: /* ldr */
9322 tmp = gen_ld32(addr, IS_USER(s));
9323 break;
9324 case 5: /* ldrh */
9325 tmp = gen_ld16u(addr, IS_USER(s));
9326 break;
9327 case 6: /* ldrb */
9328 tmp = gen_ld8u(addr, IS_USER(s));
9329 break;
9330 case 7: /* ldrsh */
9331 tmp = gen_ld16s(addr, IS_USER(s));
9332 break;
9333 }
9334 if (op >= 3) /* load */
9335 store_reg(s, rd, tmp);
9336 tcg_temp_free_i32(addr);
9337 break;
9338
9339 case 6:
9340 /* load/store word immediate offset */
9341 rd = insn & 7;
9342 rn = (insn >> 3) & 7;
9343 addr = load_reg(s, rn);
9344 val = (insn >> 4) & 0x7c;
9345 tcg_gen_addi_i32(addr, addr, val);
9346
9347 if (insn & (1 << 11)) {
9348 /* load */
9349 tmp = gen_ld32(addr, IS_USER(s));
9350 store_reg(s, rd, tmp);
9351 } else {
9352 /* store */
9353 tmp = load_reg(s, rd);
9354 gen_st32(tmp, addr, IS_USER(s));
9355 }
9356 tcg_temp_free_i32(addr);
9357 break;
9358
9359 case 7:
9360 /* load/store byte immediate offset */
9361 rd = insn & 7;
9362 rn = (insn >> 3) & 7;
9363 addr = load_reg(s, rn);
9364 val = (insn >> 6) & 0x1f;
9365 tcg_gen_addi_i32(addr, addr, val);
9366
9367 if (insn & (1 << 11)) {
9368 /* load */
9369 tmp = gen_ld8u(addr, IS_USER(s));
9370 store_reg(s, rd, tmp);
9371 } else {
9372 /* store */
9373 tmp = load_reg(s, rd);
9374 gen_st8(tmp, addr, IS_USER(s));
9375 }
9376 tcg_temp_free_i32(addr);
9377 break;
9378
9379 case 8:
9380 /* load/store halfword immediate offset */
9381 rd = insn & 7;
9382 rn = (insn >> 3) & 7;
9383 addr = load_reg(s, rn);
9384 val = (insn >> 5) & 0x3e;
9385 tcg_gen_addi_i32(addr, addr, val);
9386
9387 if (insn & (1 << 11)) {
9388 /* load */
9389 tmp = gen_ld16u(addr, IS_USER(s));
9390 store_reg(s, rd, tmp);
9391 } else {
9392 /* store */
9393 tmp = load_reg(s, rd);
9394 gen_st16(tmp, addr, IS_USER(s));
9395 }
9396 tcg_temp_free_i32(addr);
9397 break;
9398
9399 case 9:
9400 /* load/store from stack */
9401 rd = (insn >> 8) & 7;
9402 addr = load_reg(s, 13);
9403 val = (insn & 0xff) * 4;
9404 tcg_gen_addi_i32(addr, addr, val);
9405
9406 if (insn & (1 << 11)) {
9407 /* load */
9408 tmp = gen_ld32(addr, IS_USER(s));
9409 store_reg(s, rd, tmp);
9410 } else {
9411 /* store */
9412 tmp = load_reg(s, rd);
9413 gen_st32(tmp, addr, IS_USER(s));
9414 }
9415 tcg_temp_free_i32(addr);
9416 break;
9417
9418 case 10:
9419 /* add to high reg */
9420 rd = (insn >> 8) & 7;
9421 if (insn & (1 << 11)) {
9422 /* SP */
9423 tmp = load_reg(s, 13);
9424 } else {
9425 /* PC. bit 1 is ignored. */
9426 tmp = tcg_temp_new_i32();
9427 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9428 }
9429 val = (insn & 0xff) * 4;
9430 tcg_gen_addi_i32(tmp, tmp, val);
9431 store_reg(s, rd, tmp);
9432 break;
9433
9434 case 11:
9435 /* misc */
9436 op = (insn >> 8) & 0xf;
9437 switch (op) {
9438 case 0:
9439 /* adjust stack pointer */
9440 tmp = load_reg(s, 13);
9441 val = (insn & 0x7f) * 4;
9442 if (insn & (1 << 7))
9443 val = -(int32_t)val;
9444 tcg_gen_addi_i32(tmp, tmp, val);
9445 store_reg(s, 13, tmp);
9446 break;
9447
9448 case 2: /* sign/zero extend. */
9449 ARCH(6);
9450 rd = insn & 7;
9451 rm = (insn >> 3) & 7;
9452 tmp = load_reg(s, rm);
9453 switch ((insn >> 6) & 3) {
9454 case 0: gen_sxth(tmp); break;
9455 case 1: gen_sxtb(tmp); break;
9456 case 2: gen_uxth(tmp); break;
9457 case 3: gen_uxtb(tmp); break;
9458 }
9459 store_reg(s, rd, tmp);
9460 break;
9461 case 4: case 5: case 0xc: case 0xd:
9462 /* push/pop */
9463 addr = load_reg(s, 13);
9464 if (insn & (1 << 8))
9465 offset = 4;
9466 else
9467 offset = 0;
9468 for (i = 0; i < 8; i++) {
9469 if (insn & (1 << i))
9470 offset += 4;
9471 }
9472 if ((insn & (1 << 11)) == 0) {
9473 tcg_gen_addi_i32(addr, addr, -offset);
9474 }
9475 for (i = 0; i < 8; i++) {
9476 if (insn & (1 << i)) {
9477 if (insn & (1 << 11)) {
9478 /* pop */
9479 tmp = gen_ld32(addr, IS_USER(s));
9480 store_reg(s, i, tmp);
9481 } else {
9482 /* push */
9483 tmp = load_reg(s, i);
9484 gen_st32(tmp, addr, IS_USER(s));
9485 }
9486 /* advance to the next address. */
9487 tcg_gen_addi_i32(addr, addr, 4);
9488 }
9489 }
9490 TCGV_UNUSED(tmp);
9491 if (insn & (1 << 8)) {
9492 if (insn & (1 << 11)) {
9493 /* pop pc */
9494 tmp = gen_ld32(addr, IS_USER(s));
9495 /* don't set the pc until the rest of the instruction
9496 has completed */
9497 } else {
9498 /* push lr */
9499 tmp = load_reg(s, 14);
9500 gen_st32(tmp, addr, IS_USER(s));
9501 }
9502 tcg_gen_addi_i32(addr, addr, 4);
9503 }
9504 if ((insn & (1 << 11)) == 0) {
9505 tcg_gen_addi_i32(addr, addr, -offset);
9506 }
9507 /* write back the new stack pointer */
9508 store_reg(s, 13, addr);
9509 /* set the new PC value */
9510 if ((insn & 0x0900) == 0x0900) {
9511 store_reg_from_load(env, s, 15, tmp);
9512 }
9513 break;
9514
9515 case 1: case 3: case 9: case 11: /* czb */
9516 rm = insn & 7;
9517 tmp = load_reg(s, rm);
9518 s->condlabel = gen_new_label();
9519 s->condjmp = 1;
9520 if (insn & (1 << 11))
9521 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9522 else
9523 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9524 tcg_temp_free_i32(tmp);
9525 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9526 val = (uint32_t)s->pc + 2;
9527 val += offset;
9528 gen_jmp(s, val);
9529 break;
9530
9531 case 15: /* IT, nop-hint. */
9532 if ((insn & 0xf) == 0) {
9533 gen_nop_hint(s, (insn >> 4) & 0xf);
9534 break;
9535 }
9536 /* If Then. */
9537 s->condexec_cond = (insn >> 4) & 0xe;
9538 s->condexec_mask = insn & 0x1f;
9539 /* No actual code generated for this insn, just setup state. */
9540 break;
9541
9542 case 0xe: /* bkpt */
9543 ARCH(5);
9544 gen_exception_insn(s, 2, EXCP_BKPT);
9545 break;
9546
9547 case 0xa: /* rev */
9548 ARCH(6);
9549 rn = (insn >> 3) & 0x7;
9550 rd = insn & 0x7;
9551 tmp = load_reg(s, rn);
9552 switch ((insn >> 6) & 3) {
9553 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9554 case 1: gen_rev16(tmp); break;
9555 case 3: gen_revsh(tmp); break;
9556 default: goto illegal_op;
9557 }
9558 store_reg(s, rd, tmp);
9559 break;
9560
9561 case 6: /* cps */
9562 ARCH(6);
9563 if (IS_USER(s))
9564 break;
9565 if (IS_M(env)) {
9566 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9567 /* PRIMASK */
9568 if (insn & 1) {
9569 addr = tcg_const_i32(16);
9570 gen_helper_v7m_msr(cpu_env, addr, tmp);
9571 tcg_temp_free_i32(addr);
9572 }
9573 /* FAULTMASK */
9574 if (insn & 2) {
9575 addr = tcg_const_i32(17);
9576 gen_helper_v7m_msr(cpu_env, addr, tmp);
9577 tcg_temp_free_i32(addr);
9578 }
9579 tcg_temp_free_i32(tmp);
9580 gen_lookup_tb(s);
9581 } else {
9582 if (insn & (1 << 4))
9583 shift = CPSR_A | CPSR_I | CPSR_F;
9584 else
9585 shift = 0;
9586 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9587 }
9588 break;
9589
9590 default:
9591 goto undef;
9592 }
9593 break;
9594
9595 case 12:
9596 {
9597 /* load/store multiple */
9598 TCGv loaded_var;
9599 TCGV_UNUSED(loaded_var);
9600 rn = (insn >> 8) & 0x7;
9601 addr = load_reg(s, rn);
9602 for (i = 0; i < 8; i++) {
9603 if (insn & (1 << i)) {
9604 if (insn & (1 << 11)) {
9605 /* load */
9606 tmp = gen_ld32(addr, IS_USER(s));
9607 if (i == rn) {
9608 loaded_var = tmp;
9609 } else {
9610 store_reg(s, i, tmp);
9611 }
9612 } else {
9613 /* store */
9614 tmp = load_reg(s, i);
9615 gen_st32(tmp, addr, IS_USER(s));
9616 }
9617 /* advance to the next address */
9618 tcg_gen_addi_i32(addr, addr, 4);
9619 }
9620 }
9621 if ((insn & (1 << rn)) == 0) {
9622 /* base reg not in list: base register writeback */
9623 store_reg(s, rn, addr);
9624 } else {
9625 /* base reg in list: if load, complete it now */
9626 if (insn & (1 << 11)) {
9627 store_reg(s, rn, loaded_var);
9628 }
9629 tcg_temp_free_i32(addr);
9630 }
9631 break;
9632 }
9633 case 13:
9634 /* conditional branch or swi */
9635 cond = (insn >> 8) & 0xf;
9636 if (cond == 0xe)
9637 goto undef;
9638
9639 if (cond == 0xf) {
9640 /* swi */
9641 gen_set_pc_im(s->pc);
9642 s->is_jmp = DISAS_SWI;
9643 break;
9644 }
9645 /* generate a conditional jump to next instruction */
9646 s->condlabel = gen_new_label();
9647 gen_test_cc(cond ^ 1, s->condlabel);
9648 s->condjmp = 1;
9649
9650 /* jump to the offset */
9651 val = (uint32_t)s->pc + 2;
9652 offset = ((int32_t)insn << 24) >> 24;
9653 val += offset << 1;
9654 gen_jmp(s, val);
9655 break;
9656
9657 case 14:
9658 if (insn & (1 << 11)) {
9659 if (disas_thumb2_insn(env, s, insn))
9660 goto undef32;
9661 break;
9662 }
9663 /* unconditional branch */
9664 val = (uint32_t)s->pc;
9665 offset = ((int32_t)insn << 21) >> 21;
9666 val += (offset << 1) + 2;
9667 gen_jmp(s, val);
9668 break;
9669
9670 case 15:
9671 if (disas_thumb2_insn(env, s, insn))
9672 goto undef32;
9673 break;
9674 }
9675 return;
9676 undef32:
9677 gen_exception_insn(s, 4, EXCP_UDEF);
9678 return;
9679 illegal_op:
9680 undef:
9681 gen_exception_insn(s, 2, EXCP_UDEF);
9682 }
9683
9684 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9685 basic block 'tb'. If search_pc is TRUE, also generate PC
9686 information for each intermediate instruction. */
9687 static inline void gen_intermediate_code_internal(CPUState *env,
9688 TranslationBlock *tb,
9689 int search_pc)
9690 {
9691 DisasContext dc1, *dc = &dc1;
9692 CPUBreakpoint *bp;
9693 uint16_t *gen_opc_end;
9694 int j, lj;
9695 target_ulong pc_start;
9696 uint32_t next_page_start;
9697 int num_insns;
9698 int max_insns;
9699
9700 /* generate intermediate code */
9701 pc_start = tb->pc;
9702
9703 dc->tb = tb;
9704
9705 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9706
9707 dc->is_jmp = DISAS_NEXT;
9708 dc->pc = pc_start;
9709 dc->singlestep_enabled = env->singlestep_enabled;
9710 dc->condjmp = 0;
9711 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9712 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9713 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9714 #if !defined(CONFIG_USER_ONLY)
9715 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9716 #endif
9717 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9718 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9719 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9720 cpu_F0s = tcg_temp_new_i32();
9721 cpu_F1s = tcg_temp_new_i32();
9722 cpu_F0d = tcg_temp_new_i64();
9723 cpu_F1d = tcg_temp_new_i64();
9724 cpu_V0 = cpu_F0d;
9725 cpu_V1 = cpu_F1d;
9726 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9727 cpu_M0 = tcg_temp_new_i64();
9728 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9729 lj = -1;
9730 num_insns = 0;
9731 max_insns = tb->cflags & CF_COUNT_MASK;
9732 if (max_insns == 0)
9733 max_insns = CF_COUNT_MASK;
9734
9735 gen_icount_start();
9736
9737 tcg_clear_temp_count();
9738
9739 /* A note on handling of the condexec (IT) bits:
9740 *
9741 * We want to avoid the overhead of having to write the updated condexec
9742 * bits back to the CPUState for every instruction in an IT block. So:
9743 * (1) if the condexec bits are not already zero then we write
9744 * zero back into the CPUState now. This avoids complications trying
9745 * to do it at the end of the block. (For example if we don't do this
9746 * it's hard to identify whether we can safely skip writing condexec
9747 * at the end of the TB, which we definitely want to do for the case
9748 * where a TB doesn't do anything with the IT state at all.)
9749 * (2) if we are going to leave the TB then we call gen_set_condexec()
9750 * which will write the correct value into CPUState if zero is wrong.
9751 * This is done both for leaving the TB at the end, and for leaving
9752 * it because of an exception we know will happen, which is done in
9753 * gen_exception_insn(). The latter is necessary because we need to
9754 * leave the TB with the PC/IT state just prior to execution of the
9755 * instruction which caused the exception.
9756 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9757 * then the CPUState will be wrong and we need to reset it.
9758 * This is handled in the same way as restoration of the
9759 * PC in these situations: we will be called again with search_pc=1
9760 * and generate a mapping of the condexec bits for each PC in
9761 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9762 * this to restore the condexec bits.
9763 *
9764 * Note that there are no instructions which can read the condexec
9765 * bits, and none which can write non-static values to them, so
9766 * we don't need to care about whether CPUState is correct in the
9767 * middle of a TB.
9768 */
9769
9770 /* Reset the conditional execution bits immediately. This avoids
9771 complications trying to do it at the end of the block. */
9772 if (dc->condexec_mask || dc->condexec_cond)
9773 {
9774 TCGv tmp = tcg_temp_new_i32();
9775 tcg_gen_movi_i32(tmp, 0);
9776 store_cpu_field(tmp, condexec_bits);
9777 }
9778 do {
9779 #ifdef CONFIG_USER_ONLY
9780 /* Intercept jump to the magic kernel page. */
9781 if (dc->pc >= 0xffff0000) {
9782 /* We always get here via a jump, so know we are not in a
9783 conditional execution block. */
9784 gen_exception(EXCP_KERNEL_TRAP);
9785 dc->is_jmp = DISAS_UPDATE;
9786 break;
9787 }
9788 #else
9789 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9790 /* We always get here via a jump, so know we are not in a
9791 conditional execution block. */
9792 gen_exception(EXCP_EXCEPTION_EXIT);
9793 dc->is_jmp = DISAS_UPDATE;
9794 break;
9795 }
9796 #endif
9797
9798 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9800 if (bp->pc == dc->pc) {
9801 gen_exception_insn(dc, 0, EXCP_DEBUG);
9802 /* Advance PC so that clearing the breakpoint will
9803 invalidate this TB. */
9804 dc->pc += 2;
9805 goto done_generating;
9806 break;
9807 }
9808 }
9809 }
9810 if (search_pc) {
9811 j = gen_opc_ptr - gen_opc_buf;
9812 if (lj < j) {
9813 lj++;
9814 while (lj < j)
9815 gen_opc_instr_start[lj++] = 0;
9816 }
9817 gen_opc_pc[lj] = dc->pc;
9818 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9819 gen_opc_instr_start[lj] = 1;
9820 gen_opc_icount[lj] = num_insns;
9821 }
9822
9823 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9824 gen_io_start();
9825
9826 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9827 tcg_gen_debug_insn_start(dc->pc);
9828 }
9829
9830 if (dc->thumb) {
9831 disas_thumb_insn(env, dc);
9832 if (dc->condexec_mask) {
9833 dc->condexec_cond = (dc->condexec_cond & 0xe)
9834 | ((dc->condexec_mask >> 4) & 1);
9835 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9836 if (dc->condexec_mask == 0) {
9837 dc->condexec_cond = 0;
9838 }
9839 }
9840 } else {
9841 disas_arm_insn(env, dc);
9842 }
9843
9844 if (dc->condjmp && !dc->is_jmp) {
9845 gen_set_label(dc->condlabel);
9846 dc->condjmp = 0;
9847 }
9848
9849 if (tcg_check_temp_count()) {
9850 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9851 }
9852
9853 /* Translation stops when a conditional branch is encountered.
9854 * Otherwise the subsequent code could get translated several times.
9855 * Also stop translation when a page boundary is reached. This
9856 * ensures prefetch aborts occur at the right place. */
9857 num_insns ++;
9858 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9859 !env->singlestep_enabled &&
9860 !singlestep &&
9861 dc->pc < next_page_start &&
9862 num_insns < max_insns);
9863
9864 if (tb->cflags & CF_LAST_IO) {
9865 if (dc->condjmp) {
9866 /* FIXME: This can theoretically happen with self-modifying
9867 code. */
9868 cpu_abort(env, "IO on conditional branch instruction");
9869 }
9870 gen_io_end();
9871 }
9872
9873 /* At this stage dc->condjmp will only be set when the skipped
9874 instruction was a conditional branch or trap, and the PC has
9875 already been written. */
9876 if (unlikely(env->singlestep_enabled)) {
9877 /* Make sure the pc is updated, and raise a debug exception. */
9878 if (dc->condjmp) {
9879 gen_set_condexec(dc);
9880 if (dc->is_jmp == DISAS_SWI) {
9881 gen_exception(EXCP_SWI);
9882 } else {
9883 gen_exception(EXCP_DEBUG);
9884 }
9885 gen_set_label(dc->condlabel);
9886 }
9887 if (dc->condjmp || !dc->is_jmp) {
9888 gen_set_pc_im(dc->pc);
9889 dc->condjmp = 0;
9890 }
9891 gen_set_condexec(dc);
9892 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9893 gen_exception(EXCP_SWI);
9894 } else {
9895 /* FIXME: Single stepping a WFI insn will not halt
9896 the CPU. */
9897 gen_exception(EXCP_DEBUG);
9898 }
9899 } else {
9900 /* While branches must always occur at the end of an IT block,
9901 there are a few other things that can cause us to terminate
9902 the TB in the middel of an IT block:
9903 - Exception generating instructions (bkpt, swi, undefined).
9904 - Page boundaries.
9905 - Hardware watchpoints.
9906 Hardware breakpoints have already been handled and skip this code.
9907 */
9908 gen_set_condexec(dc);
9909 switch(dc->is_jmp) {
9910 case DISAS_NEXT:
9911 gen_goto_tb(dc, 1, dc->pc);
9912 break;
9913 default:
9914 case DISAS_JUMP:
9915 case DISAS_UPDATE:
9916 /* indicate that the hash table must be used to find the next TB */
9917 tcg_gen_exit_tb(0);
9918 break;
9919 case DISAS_TB_JUMP:
9920 /* nothing more to generate */
9921 break;
9922 case DISAS_WFI:
9923 gen_helper_wfi();
9924 break;
9925 case DISAS_SWI:
9926 gen_exception(EXCP_SWI);
9927 break;
9928 }
9929 if (dc->condjmp) {
9930 gen_set_label(dc->condlabel);
9931 gen_set_condexec(dc);
9932 gen_goto_tb(dc, 1, dc->pc);
9933 dc->condjmp = 0;
9934 }
9935 }
9936
9937 done_generating:
9938 gen_icount_end(tb, num_insns);
9939 *gen_opc_ptr = INDEX_op_end;
9940
9941 #ifdef DEBUG_DISAS
9942 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9943 qemu_log("----------------\n");
9944 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9945 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9946 qemu_log("\n");
9947 }
9948 #endif
9949 if (search_pc) {
9950 j = gen_opc_ptr - gen_opc_buf;
9951 lj++;
9952 while (lj <= j)
9953 gen_opc_instr_start[lj++] = 0;
9954 } else {
9955 tb->size = dc->pc - pc_start;
9956 tb->icount = num_insns;
9957 }
9958 }
9959
9960 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9961 {
9962 gen_intermediate_code_internal(env, tb, 0);
9963 }
9964
9965 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9966 {
9967 gen_intermediate_code_internal(env, tb, 1);
9968 }
9969
9970 static const char *cpu_mode_names[16] = {
9971 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9972 "???", "???", "???", "und", "???", "???", "???", "sys"
9973 };
9974
9975 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9976 int flags)
9977 {
9978 int i;
9979 #if 0
9980 union {
9981 uint32_t i;
9982 float s;
9983 } s0, s1;
9984 CPU_DoubleU d;
9985 /* ??? This assumes float64 and double have the same layout.
9986 Oh well, it's only debug dumps. */
9987 union {
9988 float64 f64;
9989 double d;
9990 } d0;
9991 #endif
9992 uint32_t psr;
9993
9994 for(i=0;i<16;i++) {
9995 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9996 if ((i % 4) == 3)
9997 cpu_fprintf(f, "\n");
9998 else
9999 cpu_fprintf(f, " ");
10000 }
10001 psr = cpsr_read(env);
10002 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10003 psr,
10004 psr & (1 << 31) ? 'N' : '-',
10005 psr & (1 << 30) ? 'Z' : '-',
10006 psr & (1 << 29) ? 'C' : '-',
10007 psr & (1 << 28) ? 'V' : '-',
10008 psr & CPSR_T ? 'T' : 'A',
10009 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10010
10011 #if 0
10012 for (i = 0; i < 16; i++) {
10013 d.d = env->vfp.regs[i];
10014 s0.i = d.l.lower;
10015 s1.i = d.l.upper;
10016 d0.f64 = d.d;
10017 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10018 i * 2, (int)s0.i, s0.s,
10019 i * 2 + 1, (int)s1.i, s1.s,
10020 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10021 d0.d);
10022 }
10023 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10024 #endif
10025 }
10026
10027 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
10028 {
10029 env->regs[15] = gen_opc_pc[pc_pos];
10030 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
10031 }