]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Merge branch 'ppc-next' of git://repo.or.cz/qemu/agraf
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
47
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 #if !defined(CONFIG_USER_ONLY)
63 int user;
64 #endif
65 int vfp_enabled;
66 int vec_len;
67 int vec_stride;
68 } DisasContext;
69
70 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
72 #if defined(CONFIG_USER_ONLY)
73 #define IS_USER(s) 1
74 #else
75 #define IS_USER(s) (s->user)
76 #endif
77
78 /* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80 #define DISAS_WFI 4
81 #define DISAS_SWI 5
82
83 static TCGv_ptr cpu_env;
84 /* We reuse the same 64-bit temporaries for efficiency. */
85 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
86 static TCGv_i32 cpu_R[16];
87 static TCGv_i32 cpu_exclusive_addr;
88 static TCGv_i32 cpu_exclusive_val;
89 static TCGv_i32 cpu_exclusive_high;
90 #ifdef CONFIG_USER_ONLY
91 static TCGv_i32 cpu_exclusive_test;
92 static TCGv_i32 cpu_exclusive_info;
93 #endif
94
95 /* FIXME: These should be removed. */
96 static TCGv cpu_F0s, cpu_F1s;
97 static TCGv_i64 cpu_F0d, cpu_F1d;
98
99 #include "gen-icount.h"
100
101 static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
105 /* initialize TCG globals. */
106 void arm_translate_init(void)
107 {
108 int i;
109
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123 #ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128 #endif
129
130 #define GEN_HELPER 2
131 #include "helper.h"
132 }
133
134 static inline TCGv load_cpu_offset(int offset)
135 {
136 TCGv tmp = tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139 }
140
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143 static inline void store_cpu_offset(TCGv var, int offset)
144 {
145 tcg_gen_st_i32(var, cpu_env, offset);
146 tcg_temp_free_i32(var);
147 }
148
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext *s, TCGv var, int reg)
154 {
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_mov_i32(var, cpu_R[reg]);
165 }
166 }
167
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv load_reg(DisasContext *s, int reg)
170 {
171 TCGv tmp = tcg_temp_new_i32();
172 load_reg_var(s, tmp, reg);
173 return tmp;
174 }
175
176 /* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178 static void store_reg(DisasContext *s, int reg, TCGv var)
179 {
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
184 tcg_gen_mov_i32(cpu_R[reg], var);
185 tcg_temp_free_i32(var);
186 }
187
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
196
197
198 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199 {
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203 }
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207 static void gen_exception(int excp)
208 {
209 TCGv tmp = tcg_temp_new_i32();
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
212 tcg_temp_free_i32(tmp);
213 }
214
215 static void gen_smul_dual(TCGv a, TCGv b)
216 {
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
222 tcg_temp_free_i32(tmp2);
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
227 tcg_temp_free_i32(tmp1);
228 }
229
230 /* Byteswap each halfword. */
231 static void gen_rev16(TCGv var)
232 {
233 TCGv tmp = tcg_temp_new_i32();
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
239 tcg_temp_free_i32(tmp);
240 }
241
242 /* Byteswap low halfword and sign extend. */
243 static void gen_revsh(TCGv var)
244 {
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
248 }
249
250 /* Unsigned bitfield extract. */
251 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252 {
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256 }
257
258 /* Signed bitfield extract. */
259 static void gen_sbfx(TCGv var, int shift, int width)
260 {
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271 }
272
273 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
274 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275 {
276 tcg_gen_andi_i32(val, val, mask);
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
279 tcg_gen_or_i32(dest, base, val);
280 }
281
282 /* Return (b << 32) + a. Mark inputs as dead */
283 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
284 {
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
288 tcg_temp_free_i32(b);
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294 }
295
296 /* Return (b << 32) - a. Mark inputs as dead. */
297 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298 {
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
302 tcg_temp_free_i32(b);
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
308 }
309
310 /* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
312 /* 32x32->64 multiply. Marks inputs as dead. */
313 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
314 {
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
317
318 tcg_gen_extu_i32_i64(tmp1, a);
319 tcg_temp_free_i32(a);
320 tcg_gen_extu_i32_i64(tmp2, b);
321 tcg_temp_free_i32(b);
322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
323 tcg_temp_free_i64(tmp2);
324 return tmp1;
325 }
326
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
328 {
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
331
332 tcg_gen_ext_i32_i64(tmp1, a);
333 tcg_temp_free_i32(a);
334 tcg_gen_ext_i32_i64(tmp2, b);
335 tcg_temp_free_i32(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 tcg_temp_free_i64(tmp2);
338 return tmp1;
339 }
340
341 /* Swap low and high halfwords. */
342 static void gen_swap_half(TCGv var)
343 {
344 TCGv tmp = tcg_temp_new_i32();
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
348 tcg_temp_free_i32(tmp);
349 }
350
351 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358 static void gen_add16(TCGv t0, TCGv t1)
359 {
360 TCGv tmp = tcg_temp_new_i32();
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
369 }
370
371 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
373 /* Set CF to the top bit of var. */
374 static void gen_set_CF_bit31(TCGv var)
375 {
376 TCGv tmp = tcg_temp_new_i32();
377 tcg_gen_shri_i32(tmp, var, 31);
378 gen_set_CF(tmp);
379 tcg_temp_free_i32(tmp);
380 }
381
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv var)
384 {
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
387 }
388
389 /* T0 += T1 + CF. */
390 static void gen_adc(TCGv t0, TCGv t1)
391 {
392 TCGv tmp;
393 tcg_gen_add_i32(t0, t0, t1);
394 tmp = load_cpu_field(CF);
395 tcg_gen_add_i32(t0, t0, tmp);
396 tcg_temp_free_i32(tmp);
397 }
398
399 /* dest = T0 + T1 + CF. */
400 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401 {
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
406 tcg_temp_free_i32(tmp);
407 }
408
409 /* dest = T0 - T1 + CF - 1. */
410 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411 {
412 TCGv tmp;
413 tcg_gen_sub_i32(dest, t0, t1);
414 tmp = load_cpu_field(CF);
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
417 tcg_temp_free_i32(tmp);
418 }
419
420 /* FIXME: Implement this natively. */
421 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
423 static void shifter_out_im(TCGv var, int shift)
424 {
425 TCGv tmp = tcg_temp_new_i32();
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
428 } else {
429 tcg_gen_shri_i32(tmp, var, shift);
430 if (shift != 31)
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
434 tcg_temp_free_i32(tmp);
435 }
436
437 /* Shift by immediate. Includes special handling for shift == 0. */
438 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439 {
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
474 tcg_gen_rotri_i32(var, var, shift); break;
475 } else {
476 TCGv tmp = load_cpu_field(CF);
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
482 tcg_temp_free_i32(tmp);
483 }
484 }
485 };
486
487 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489 {
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
504 }
505 }
506 tcg_temp_free_i32(shift);
507 }
508
509 #define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
518 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
519 {
520 TCGv_ptr tmp;
521
522 switch (op1) {
523 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
525 tmp = tcg_temp_new_ptr();
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
528 tcg_temp_free_ptr(tmp);
529 break;
530 case 5:
531 tmp = tcg_temp_new_ptr();
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
534 tcg_temp_free_ptr(tmp);
535 break;
536 #undef gen_pas_helper
537 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550 #undef gen_pas_helper
551 }
552 }
553 #undef PAS_OP
554
555 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556 #define PAS_OP(pfx) \
557 switch (op1) { \
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
565 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
566 {
567 TCGv_ptr tmp;
568
569 switch (op2) {
570 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
572 tmp = tcg_temp_new_ptr();
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
575 tcg_temp_free_ptr(tmp);
576 break;
577 case 4:
578 tmp = tcg_temp_new_ptr();
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
581 tcg_temp_free_ptr(tmp);
582 break;
583 #undef gen_pas_helper
584 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597 #undef gen_pas_helper
598 }
599 }
600 #undef PAS_OP
601
602 static void gen_test_cc(int cc, int label)
603 {
604 TCGv tmp;
605 TCGv tmp2;
606 int inv;
607
608 switch (cc) {
609 case 0: /* eq: Z */
610 tmp = load_cpu_field(ZF);
611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
612 break;
613 case 1: /* ne: !Z */
614 tmp = load_cpu_field(ZF);
615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
624 break;
625 case 4: /* mi: N */
626 tmp = load_cpu_field(NF);
627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
628 break;
629 case 5: /* pl: !N */
630 tmp = load_cpu_field(NF);
631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
645 tcg_temp_free_i32(tmp);
646 tmp = load_cpu_field(ZF);
647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
653 tcg_temp_free_i32(tmp);
654 tmp = load_cpu_field(ZF);
655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
659 tmp2 = load_cpu_field(NF);
660 tcg_gen_xor_i32(tmp, tmp, tmp2);
661 tcg_temp_free_i32(tmp2);
662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
666 tmp2 = load_cpu_field(NF);
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 tcg_temp_free_i32(tmp2);
669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
673 tmp = load_cpu_field(ZF);
674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
675 tcg_temp_free_i32(tmp);
676 tmp = load_cpu_field(VF);
677 tmp2 = load_cpu_field(NF);
678 tcg_gen_xor_i32(tmp, tmp, tmp2);
679 tcg_temp_free_i32(tmp2);
680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
684 tmp = load_cpu_field(ZF);
685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
686 tcg_temp_free_i32(tmp);
687 tmp = load_cpu_field(VF);
688 tmp2 = load_cpu_field(NF);
689 tcg_gen_xor_i32(tmp, tmp, tmp2);
690 tcg_temp_free_i32(tmp2);
691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
697 tcg_temp_free_i32(tmp);
698 }
699
700 static const uint8_t table_logic_cc[16] = {
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717 };
718
719 /* Set PC and Thumb state from an immediate address. */
720 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
721 {
722 TCGv tmp;
723
724 s->is_jmp = DISAS_UPDATE;
725 if (s->thumb != (addr & 1)) {
726 tmp = tcg_temp_new_i32();
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
729 tcg_temp_free_i32(tmp);
730 }
731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
732 }
733
734 /* Set PC and Thumb state from var. var is marked as dead. */
735 static inline void gen_bx(DisasContext *s, TCGv var)
736 {
737 s->is_jmp = DISAS_UPDATE;
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
741 }
742
743 /* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746 static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748 {
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754 }
755
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762 {
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768 }
769
770 static inline TCGv gen_ld8s(TCGv addr, int index)
771 {
772 TCGv tmp = tcg_temp_new_i32();
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775 }
776 static inline TCGv gen_ld8u(TCGv addr, int index)
777 {
778 TCGv tmp = tcg_temp_new_i32();
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781 }
782 static inline TCGv gen_ld16s(TCGv addr, int index)
783 {
784 TCGv tmp = tcg_temp_new_i32();
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787 }
788 static inline TCGv gen_ld16u(TCGv addr, int index)
789 {
790 TCGv tmp = tcg_temp_new_i32();
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793 }
794 static inline TCGv gen_ld32(TCGv addr, int index)
795 {
796 TCGv tmp = tcg_temp_new_i32();
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799 }
800 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801 {
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805 }
806 static inline void gen_st8(TCGv val, TCGv addr, int index)
807 {
808 tcg_gen_qemu_st8(val, addr, index);
809 tcg_temp_free_i32(val);
810 }
811 static inline void gen_st16(TCGv val, TCGv addr, int index)
812 {
813 tcg_gen_qemu_st16(val, addr, index);
814 tcg_temp_free_i32(val);
815 }
816 static inline void gen_st32(TCGv val, TCGv addr, int index)
817 {
818 tcg_gen_qemu_st32(val, addr, index);
819 tcg_temp_free_i32(val);
820 }
821 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822 {
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825 }
826
827 static inline void gen_set_pc_im(uint32_t val)
828 {
829 tcg_gen_movi_i32(cpu_R[15], val);
830 }
831
832 /* Force a TB lookup after an instruction that changes the CPU state. */
833 static inline void gen_lookup_tb(DisasContext *s)
834 {
835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
836 s->is_jmp = DISAS_UPDATE;
837 }
838
839 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
841 {
842 int val, rm, shift, shiftop;
843 TCGv offset;
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
850 if (val != 0)
851 tcg_gen_addi_i32(var, var, val);
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
856 shiftop = (insn >> 5) & 3;
857 offset = load_reg(s, rm);
858 gen_arm_shift_im(offset, shiftop, shift, 0);
859 if (!(insn & (1 << 23)))
860 tcg_gen_sub_i32(var, var, offset);
861 else
862 tcg_gen_add_i32(var, var, offset);
863 tcg_temp_free_i32(offset);
864 }
865 }
866
867 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
868 int extra, TCGv var)
869 {
870 int val, rm;
871 TCGv offset;
872
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
878 val += extra;
879 if (val != 0)
880 tcg_gen_addi_i32(var, var, val);
881 } else {
882 /* register */
883 if (extra)
884 tcg_gen_addi_i32(var, var, extra);
885 rm = (insn) & 0xf;
886 offset = load_reg(s, rm);
887 if (!(insn & (1 << 23)))
888 tcg_gen_sub_i32(var, var, offset);
889 else
890 tcg_gen_add_i32(var, var, offset);
891 tcg_temp_free_i32(offset);
892 }
893 }
894
895 #define VFP_OP2(name) \
896 static inline void gen_vfp_##name(int dp) \
897 { \
898 if (dp) \
899 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
900 else \
901 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
902 }
903
904 VFP_OP2(add)
905 VFP_OP2(sub)
906 VFP_OP2(mul)
907 VFP_OP2(div)
908
909 #undef VFP_OP2
910
911 static inline void gen_vfp_F1_mul(int dp)
912 {
913 /* Like gen_vfp_mul() but put result in F1 */
914 if (dp) {
915 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, cpu_env);
916 } else {
917 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, cpu_env);
918 }
919 }
920
921 static inline void gen_vfp_F1_neg(int dp)
922 {
923 /* Like gen_vfp_neg() but put result in F1 */
924 if (dp) {
925 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
926 } else {
927 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
928 }
929 }
930
931 static inline void gen_vfp_abs(int dp)
932 {
933 if (dp)
934 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
935 else
936 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
937 }
938
939 static inline void gen_vfp_neg(int dp)
940 {
941 if (dp)
942 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
943 else
944 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
945 }
946
947 static inline void gen_vfp_sqrt(int dp)
948 {
949 if (dp)
950 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
951 else
952 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
953 }
954
955 static inline void gen_vfp_cmp(int dp)
956 {
957 if (dp)
958 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
959 else
960 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
961 }
962
963 static inline void gen_vfp_cmpe(int dp)
964 {
965 if (dp)
966 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
967 else
968 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
969 }
970
971 static inline void gen_vfp_F1_ld0(int dp)
972 {
973 if (dp)
974 tcg_gen_movi_i64(cpu_F1d, 0);
975 else
976 tcg_gen_movi_i32(cpu_F1s, 0);
977 }
978
979 #define VFP_GEN_ITOF(name) \
980 static inline void gen_vfp_##name(int dp, int neon) \
981 { \
982 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
983 int offset; \
984 if (neon) { \
985 offset = offsetof(CPUState, vfp.standard_fp_status); \
986 } else { \
987 offset = offsetof(CPUState, vfp.fp_status); \
988 } \
989 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
990 if (dp) { \
991 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
992 } else { \
993 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
994 } \
995 tcg_temp_free_ptr(statusptr); \
996 }
997
998 VFP_GEN_ITOF(uito)
999 VFP_GEN_ITOF(sito)
1000 #undef VFP_GEN_ITOF
1001
1002 #define VFP_GEN_FTOI(name) \
1003 static inline void gen_vfp_##name(int dp, int neon) \
1004 { \
1005 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
1006 int offset; \
1007 if (neon) { \
1008 offset = offsetof(CPUState, vfp.standard_fp_status); \
1009 } else { \
1010 offset = offsetof(CPUState, vfp.fp_status); \
1011 } \
1012 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
1013 if (dp) { \
1014 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1015 } else { \
1016 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1017 } \
1018 tcg_temp_free_ptr(statusptr); \
1019 }
1020
1021 VFP_GEN_FTOI(toui)
1022 VFP_GEN_FTOI(touiz)
1023 VFP_GEN_FTOI(tosi)
1024 VFP_GEN_FTOI(tosiz)
1025 #undef VFP_GEN_FTOI
1026
1027 #define VFP_GEN_FIX(name) \
1028 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1029 { \
1030 TCGv tmp_shift = tcg_const_i32(shift); \
1031 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
1032 int offset; \
1033 if (neon) { \
1034 offset = offsetof(CPUState, vfp.standard_fp_status); \
1035 } else { \
1036 offset = offsetof(CPUState, vfp.fp_status); \
1037 } \
1038 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
1039 if (dp) { \
1040 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1041 } else { \
1042 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1043 } \
1044 tcg_temp_free_i32(tmp_shift); \
1045 tcg_temp_free_ptr(statusptr); \
1046 }
1047 VFP_GEN_FIX(tosh)
1048 VFP_GEN_FIX(tosl)
1049 VFP_GEN_FIX(touh)
1050 VFP_GEN_FIX(toul)
1051 VFP_GEN_FIX(shto)
1052 VFP_GEN_FIX(slto)
1053 VFP_GEN_FIX(uhto)
1054 VFP_GEN_FIX(ulto)
1055 #undef VFP_GEN_FIX
1056
1057 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1058 {
1059 if (dp)
1060 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1061 else
1062 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1063 }
1064
1065 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1066 {
1067 if (dp)
1068 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1069 else
1070 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1071 }
1072
1073 static inline long
1074 vfp_reg_offset (int dp, int reg)
1075 {
1076 if (dp)
1077 return offsetof(CPUARMState, vfp.regs[reg]);
1078 else if (reg & 1) {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.upper);
1081 } else {
1082 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1083 + offsetof(CPU_DoubleU, l.lower);
1084 }
1085 }
1086
1087 /* Return the offset of a 32-bit piece of a NEON register.
1088 zero is the least significant end of the register. */
1089 static inline long
1090 neon_reg_offset (int reg, int n)
1091 {
1092 int sreg;
1093 sreg = reg * 2 + n;
1094 return vfp_reg_offset(0, sreg);
1095 }
1096
1097 static TCGv neon_load_reg(int reg, int pass)
1098 {
1099 TCGv tmp = tcg_temp_new_i32();
1100 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1101 return tmp;
1102 }
1103
1104 static void neon_store_reg(int reg, int pass, TCGv var)
1105 {
1106 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1107 tcg_temp_free_i32(var);
1108 }
1109
1110 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1111 {
1112 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1113 }
1114
1115 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1116 {
1117 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1118 }
1119
1120 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1121 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1122 #define tcg_gen_st_f32 tcg_gen_st_i32
1123 #define tcg_gen_st_f64 tcg_gen_st_i64
1124
1125 static inline void gen_mov_F0_vreg(int dp, int reg)
1126 {
1127 if (dp)
1128 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1129 else
1130 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1131 }
1132
1133 static inline void gen_mov_F1_vreg(int dp, int reg)
1134 {
1135 if (dp)
1136 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1137 else
1138 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1139 }
1140
1141 static inline void gen_mov_vreg_F0(int dp, int reg)
1142 {
1143 if (dp)
1144 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1145 else
1146 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1147 }
1148
1149 #define ARM_CP_RW_BIT (1 << 20)
1150
1151 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1152 {
1153 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1154 }
1155
1156 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1157 {
1158 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1159 }
1160
1161 static inline TCGv iwmmxt_load_creg(int reg)
1162 {
1163 TCGv var = tcg_temp_new_i32();
1164 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1165 return var;
1166 }
1167
1168 static inline void iwmmxt_store_creg(int reg, TCGv var)
1169 {
1170 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1171 tcg_temp_free_i32(var);
1172 }
1173
1174 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1175 {
1176 iwmmxt_store_reg(cpu_M0, rn);
1177 }
1178
1179 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1180 {
1181 iwmmxt_load_reg(cpu_M0, rn);
1182 }
1183
1184 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1185 {
1186 iwmmxt_load_reg(cpu_V1, rn);
1187 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1188 }
1189
1190 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1191 {
1192 iwmmxt_load_reg(cpu_V1, rn);
1193 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1194 }
1195
1196 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1197 {
1198 iwmmxt_load_reg(cpu_V1, rn);
1199 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1200 }
1201
1202 #define IWMMXT_OP(name) \
1203 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1204 { \
1205 iwmmxt_load_reg(cpu_V1, rn); \
1206 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 }
1208
1209 #define IWMMXT_OP_SIZE(name) \
1210 IWMMXT_OP(name##b) \
1211 IWMMXT_OP(name##w) \
1212 IWMMXT_OP(name##l)
1213
1214 #define IWMMXT_OP_1(name) \
1215 static inline void gen_op_iwmmxt_##name##_M0(void) \
1216 { \
1217 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1218 }
1219
1220 IWMMXT_OP(maddsq)
1221 IWMMXT_OP(madduq)
1222 IWMMXT_OP(sadb)
1223 IWMMXT_OP(sadw)
1224 IWMMXT_OP(mulslw)
1225 IWMMXT_OP(mulshw)
1226 IWMMXT_OP(mululw)
1227 IWMMXT_OP(muluhw)
1228 IWMMXT_OP(macsw)
1229 IWMMXT_OP(macuw)
1230
1231 IWMMXT_OP_SIZE(unpackl)
1232 IWMMXT_OP_SIZE(unpackh)
1233
1234 IWMMXT_OP_1(unpacklub)
1235 IWMMXT_OP_1(unpackluw)
1236 IWMMXT_OP_1(unpacklul)
1237 IWMMXT_OP_1(unpackhub)
1238 IWMMXT_OP_1(unpackhuw)
1239 IWMMXT_OP_1(unpackhul)
1240 IWMMXT_OP_1(unpacklsb)
1241 IWMMXT_OP_1(unpacklsw)
1242 IWMMXT_OP_1(unpacklsl)
1243 IWMMXT_OP_1(unpackhsb)
1244 IWMMXT_OP_1(unpackhsw)
1245 IWMMXT_OP_1(unpackhsl)
1246
1247 IWMMXT_OP_SIZE(cmpeq)
1248 IWMMXT_OP_SIZE(cmpgtu)
1249 IWMMXT_OP_SIZE(cmpgts)
1250
1251 IWMMXT_OP_SIZE(mins)
1252 IWMMXT_OP_SIZE(minu)
1253 IWMMXT_OP_SIZE(maxs)
1254 IWMMXT_OP_SIZE(maxu)
1255
1256 IWMMXT_OP_SIZE(subn)
1257 IWMMXT_OP_SIZE(addn)
1258 IWMMXT_OP_SIZE(subu)
1259 IWMMXT_OP_SIZE(addu)
1260 IWMMXT_OP_SIZE(subs)
1261 IWMMXT_OP_SIZE(adds)
1262
1263 IWMMXT_OP(avgb0)
1264 IWMMXT_OP(avgb1)
1265 IWMMXT_OP(avgw0)
1266 IWMMXT_OP(avgw1)
1267
1268 IWMMXT_OP(msadb)
1269
1270 IWMMXT_OP(packuw)
1271 IWMMXT_OP(packul)
1272 IWMMXT_OP(packuq)
1273 IWMMXT_OP(packsw)
1274 IWMMXT_OP(packsl)
1275 IWMMXT_OP(packsq)
1276
1277 static void gen_op_iwmmxt_set_mup(void)
1278 {
1279 TCGv tmp;
1280 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1281 tcg_gen_ori_i32(tmp, tmp, 2);
1282 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1283 }
1284
1285 static void gen_op_iwmmxt_set_cup(void)
1286 {
1287 TCGv tmp;
1288 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1289 tcg_gen_ori_i32(tmp, tmp, 1);
1290 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1291 }
1292
1293 static void gen_op_iwmmxt_setpsr_nz(void)
1294 {
1295 TCGv tmp = tcg_temp_new_i32();
1296 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1297 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1298 }
1299
1300 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1301 {
1302 iwmmxt_load_reg(cpu_V1, rn);
1303 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1304 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1305 }
1306
1307 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1308 {
1309 int rd;
1310 uint32_t offset;
1311 TCGv tmp;
1312
1313 rd = (insn >> 16) & 0xf;
1314 tmp = load_reg(s, rd);
1315
1316 offset = (insn & 0xff) << ((insn >> 7) & 2);
1317 if (insn & (1 << 24)) {
1318 /* Pre indexed */
1319 if (insn & (1 << 23))
1320 tcg_gen_addi_i32(tmp, tmp, offset);
1321 else
1322 tcg_gen_addi_i32(tmp, tmp, -offset);
1323 tcg_gen_mov_i32(dest, tmp);
1324 if (insn & (1 << 21))
1325 store_reg(s, rd, tmp);
1326 else
1327 tcg_temp_free_i32(tmp);
1328 } else if (insn & (1 << 21)) {
1329 /* Post indexed */
1330 tcg_gen_mov_i32(dest, tmp);
1331 if (insn & (1 << 23))
1332 tcg_gen_addi_i32(tmp, tmp, offset);
1333 else
1334 tcg_gen_addi_i32(tmp, tmp, -offset);
1335 store_reg(s, rd, tmp);
1336 } else if (!(insn & (1 << 23)))
1337 return 1;
1338 return 0;
1339 }
1340
1341 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1342 {
1343 int rd = (insn >> 0) & 0xf;
1344 TCGv tmp;
1345
1346 if (insn & (1 << 8)) {
1347 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1348 return 1;
1349 } else {
1350 tmp = iwmmxt_load_creg(rd);
1351 }
1352 } else {
1353 tmp = tcg_temp_new_i32();
1354 iwmmxt_load_reg(cpu_V0, rd);
1355 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1356 }
1357 tcg_gen_andi_i32(tmp, tmp, mask);
1358 tcg_gen_mov_i32(dest, tmp);
1359 tcg_temp_free_i32(tmp);
1360 return 0;
1361 }
1362
1363 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1364 (ie. an undefined instruction). */
1365 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1366 {
1367 int rd, wrd;
1368 int rdhi, rdlo, rd0, rd1, i;
1369 TCGv addr;
1370 TCGv tmp, tmp2, tmp3;
1371
1372 if ((insn & 0x0e000e00) == 0x0c000000) {
1373 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1374 wrd = insn & 0xf;
1375 rdlo = (insn >> 12) & 0xf;
1376 rdhi = (insn >> 16) & 0xf;
1377 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1378 iwmmxt_load_reg(cpu_V0, wrd);
1379 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1380 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1381 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1382 } else { /* TMCRR */
1383 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1384 iwmmxt_store_reg(cpu_V0, wrd);
1385 gen_op_iwmmxt_set_mup();
1386 }
1387 return 0;
1388 }
1389
1390 wrd = (insn >> 12) & 0xf;
1391 addr = tcg_temp_new_i32();
1392 if (gen_iwmmxt_address(s, insn, addr)) {
1393 tcg_temp_free_i32(addr);
1394 return 1;
1395 }
1396 if (insn & ARM_CP_RW_BIT) {
1397 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1398 tmp = tcg_temp_new_i32();
1399 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1400 iwmmxt_store_creg(wrd, tmp);
1401 } else {
1402 i = 1;
1403 if (insn & (1 << 8)) {
1404 if (insn & (1 << 22)) { /* WLDRD */
1405 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1406 i = 0;
1407 } else { /* WLDRW wRd */
1408 tmp = gen_ld32(addr, IS_USER(s));
1409 }
1410 } else {
1411 if (insn & (1 << 22)) { /* WLDRH */
1412 tmp = gen_ld16u(addr, IS_USER(s));
1413 } else { /* WLDRB */
1414 tmp = gen_ld8u(addr, IS_USER(s));
1415 }
1416 }
1417 if (i) {
1418 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1419 tcg_temp_free_i32(tmp);
1420 }
1421 gen_op_iwmmxt_movq_wRn_M0(wrd);
1422 }
1423 } else {
1424 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1425 tmp = iwmmxt_load_creg(wrd);
1426 gen_st32(tmp, addr, IS_USER(s));
1427 } else {
1428 gen_op_iwmmxt_movq_M0_wRn(wrd);
1429 tmp = tcg_temp_new_i32();
1430 if (insn & (1 << 8)) {
1431 if (insn & (1 << 22)) { /* WSTRD */
1432 tcg_temp_free_i32(tmp);
1433 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1434 } else { /* WSTRW wRd */
1435 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1436 gen_st32(tmp, addr, IS_USER(s));
1437 }
1438 } else {
1439 if (insn & (1 << 22)) { /* WSTRH */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st16(tmp, addr, IS_USER(s));
1442 } else { /* WSTRB */
1443 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1444 gen_st8(tmp, addr, IS_USER(s));
1445 }
1446 }
1447 }
1448 }
1449 tcg_temp_free_i32(addr);
1450 return 0;
1451 }
1452
1453 if ((insn & 0x0f000000) != 0x0e000000)
1454 return 1;
1455
1456 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1457 case 0x000: /* WOR */
1458 wrd = (insn >> 12) & 0xf;
1459 rd0 = (insn >> 0) & 0xf;
1460 rd1 = (insn >> 16) & 0xf;
1461 gen_op_iwmmxt_movq_M0_wRn(rd0);
1462 gen_op_iwmmxt_orq_M0_wRn(rd1);
1463 gen_op_iwmmxt_setpsr_nz();
1464 gen_op_iwmmxt_movq_wRn_M0(wrd);
1465 gen_op_iwmmxt_set_mup();
1466 gen_op_iwmmxt_set_cup();
1467 break;
1468 case 0x011: /* TMCR */
1469 if (insn & 0xf)
1470 return 1;
1471 rd = (insn >> 12) & 0xf;
1472 wrd = (insn >> 16) & 0xf;
1473 switch (wrd) {
1474 case ARM_IWMMXT_wCID:
1475 case ARM_IWMMXT_wCASF:
1476 break;
1477 case ARM_IWMMXT_wCon:
1478 gen_op_iwmmxt_set_cup();
1479 /* Fall through. */
1480 case ARM_IWMMXT_wCSSF:
1481 tmp = iwmmxt_load_creg(wrd);
1482 tmp2 = load_reg(s, rd);
1483 tcg_gen_andc_i32(tmp, tmp, tmp2);
1484 tcg_temp_free_i32(tmp2);
1485 iwmmxt_store_creg(wrd, tmp);
1486 break;
1487 case ARM_IWMMXT_wCGR0:
1488 case ARM_IWMMXT_wCGR1:
1489 case ARM_IWMMXT_wCGR2:
1490 case ARM_IWMMXT_wCGR3:
1491 gen_op_iwmmxt_set_cup();
1492 tmp = load_reg(s, rd);
1493 iwmmxt_store_creg(wrd, tmp);
1494 break;
1495 default:
1496 return 1;
1497 }
1498 break;
1499 case 0x100: /* WXOR */
1500 wrd = (insn >> 12) & 0xf;
1501 rd0 = (insn >> 0) & 0xf;
1502 rd1 = (insn >> 16) & 0xf;
1503 gen_op_iwmmxt_movq_M0_wRn(rd0);
1504 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1505 gen_op_iwmmxt_setpsr_nz();
1506 gen_op_iwmmxt_movq_wRn_M0(wrd);
1507 gen_op_iwmmxt_set_mup();
1508 gen_op_iwmmxt_set_cup();
1509 break;
1510 case 0x111: /* TMRC */
1511 if (insn & 0xf)
1512 return 1;
1513 rd = (insn >> 12) & 0xf;
1514 wrd = (insn >> 16) & 0xf;
1515 tmp = iwmmxt_load_creg(wrd);
1516 store_reg(s, rd, tmp);
1517 break;
1518 case 0x300: /* WANDN */
1519 wrd = (insn >> 12) & 0xf;
1520 rd0 = (insn >> 0) & 0xf;
1521 rd1 = (insn >> 16) & 0xf;
1522 gen_op_iwmmxt_movq_M0_wRn(rd0);
1523 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1524 gen_op_iwmmxt_andq_M0_wRn(rd1);
1525 gen_op_iwmmxt_setpsr_nz();
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 gen_op_iwmmxt_set_cup();
1529 break;
1530 case 0x200: /* WAND */
1531 wrd = (insn >> 12) & 0xf;
1532 rd0 = (insn >> 0) & 0xf;
1533 rd1 = (insn >> 16) & 0xf;
1534 gen_op_iwmmxt_movq_M0_wRn(rd0);
1535 gen_op_iwmmxt_andq_M0_wRn(rd1);
1536 gen_op_iwmmxt_setpsr_nz();
1537 gen_op_iwmmxt_movq_wRn_M0(wrd);
1538 gen_op_iwmmxt_set_mup();
1539 gen_op_iwmmxt_set_cup();
1540 break;
1541 case 0x810: case 0xa10: /* WMADD */
1542 wrd = (insn >> 12) & 0xf;
1543 rd0 = (insn >> 0) & 0xf;
1544 rd1 = (insn >> 16) & 0xf;
1545 gen_op_iwmmxt_movq_M0_wRn(rd0);
1546 if (insn & (1 << 21))
1547 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1548 else
1549 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 gen_op_iwmmxt_set_mup();
1552 break;
1553 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1554 wrd = (insn >> 12) & 0xf;
1555 rd0 = (insn >> 16) & 0xf;
1556 rd1 = (insn >> 0) & 0xf;
1557 gen_op_iwmmxt_movq_M0_wRn(rd0);
1558 switch ((insn >> 22) & 3) {
1559 case 0:
1560 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1561 break;
1562 case 1:
1563 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1564 break;
1565 case 2:
1566 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1567 break;
1568 case 3:
1569 return 1;
1570 }
1571 gen_op_iwmmxt_movq_wRn_M0(wrd);
1572 gen_op_iwmmxt_set_mup();
1573 gen_op_iwmmxt_set_cup();
1574 break;
1575 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1576 wrd = (insn >> 12) & 0xf;
1577 rd0 = (insn >> 16) & 0xf;
1578 rd1 = (insn >> 0) & 0xf;
1579 gen_op_iwmmxt_movq_M0_wRn(rd0);
1580 switch ((insn >> 22) & 3) {
1581 case 0:
1582 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1583 break;
1584 case 1:
1585 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1586 break;
1587 case 2:
1588 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1589 break;
1590 case 3:
1591 return 1;
1592 }
1593 gen_op_iwmmxt_movq_wRn_M0(wrd);
1594 gen_op_iwmmxt_set_mup();
1595 gen_op_iwmmxt_set_cup();
1596 break;
1597 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1598 wrd = (insn >> 12) & 0xf;
1599 rd0 = (insn >> 16) & 0xf;
1600 rd1 = (insn >> 0) & 0xf;
1601 gen_op_iwmmxt_movq_M0_wRn(rd0);
1602 if (insn & (1 << 22))
1603 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1604 else
1605 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1606 if (!(insn & (1 << 20)))
1607 gen_op_iwmmxt_addl_M0_wRn(wrd);
1608 gen_op_iwmmxt_movq_wRn_M0(wrd);
1609 gen_op_iwmmxt_set_mup();
1610 break;
1611 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 16) & 0xf;
1614 rd1 = (insn >> 0) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
1616 if (insn & (1 << 21)) {
1617 if (insn & (1 << 20))
1618 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1619 else
1620 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1621 } else {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1626 }
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 break;
1630 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
1635 if (insn & (1 << 21))
1636 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1637 else
1638 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1639 if (!(insn & (1 << 20))) {
1640 iwmmxt_load_reg(cpu_V1, wrd);
1641 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1642 }
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 switch ((insn >> 22) & 3) {
1652 case 0:
1653 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1654 break;
1655 case 1:
1656 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1657 break;
1658 case 2:
1659 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1660 break;
1661 case 3:
1662 return 1;
1663 }
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 if (insn & (1 << 22)) {
1674 if (insn & (1 << 20))
1675 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1676 else
1677 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1678 } else {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1683 }
1684 gen_op_iwmmxt_movq_wRn_M0(wrd);
1685 gen_op_iwmmxt_set_mup();
1686 gen_op_iwmmxt_set_cup();
1687 break;
1688 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 16) & 0xf;
1691 rd1 = (insn >> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
1693 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1694 tcg_gen_andi_i32(tmp, tmp, 7);
1695 iwmmxt_load_reg(cpu_V1, rd1);
1696 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1697 tcg_temp_free_i32(tmp);
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 break;
1701 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1702 if (((insn >> 6) & 3) == 3)
1703 return 1;
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 tmp = load_reg(s, rd);
1707 gen_op_iwmmxt_movq_M0_wRn(wrd);
1708 switch ((insn >> 6) & 3) {
1709 case 0:
1710 tmp2 = tcg_const_i32(0xff);
1711 tmp3 = tcg_const_i32((insn & 7) << 3);
1712 break;
1713 case 1:
1714 tmp2 = tcg_const_i32(0xffff);
1715 tmp3 = tcg_const_i32((insn & 3) << 4);
1716 break;
1717 case 2:
1718 tmp2 = tcg_const_i32(0xffffffff);
1719 tmp3 = tcg_const_i32((insn & 1) << 5);
1720 break;
1721 default:
1722 TCGV_UNUSED(tmp2);
1723 TCGV_UNUSED(tmp3);
1724 }
1725 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1726 tcg_temp_free(tmp3);
1727 tcg_temp_free(tmp2);
1728 tcg_temp_free_i32(tmp);
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 break;
1732 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1733 rd = (insn >> 12) & 0xf;
1734 wrd = (insn >> 16) & 0xf;
1735 if (rd == 15 || ((insn >> 22) & 3) == 3)
1736 return 1;
1737 gen_op_iwmmxt_movq_M0_wRn(wrd);
1738 tmp = tcg_temp_new_i32();
1739 switch ((insn >> 22) & 3) {
1740 case 0:
1741 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1742 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1743 if (insn & 8) {
1744 tcg_gen_ext8s_i32(tmp, tmp);
1745 } else {
1746 tcg_gen_andi_i32(tmp, tmp, 0xff);
1747 }
1748 break;
1749 case 1:
1750 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1751 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1752 if (insn & 8) {
1753 tcg_gen_ext16s_i32(tmp, tmp);
1754 } else {
1755 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1756 }
1757 break;
1758 case 2:
1759 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1760 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1761 break;
1762 }
1763 store_reg(s, rd, tmp);
1764 break;
1765 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1766 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1767 return 1;
1768 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1769 switch ((insn >> 22) & 3) {
1770 case 0:
1771 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1772 break;
1773 case 1:
1774 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1775 break;
1776 case 2:
1777 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1778 break;
1779 }
1780 tcg_gen_shli_i32(tmp, tmp, 28);
1781 gen_set_nzcv(tmp);
1782 tcg_temp_free_i32(tmp);
1783 break;
1784 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1785 if (((insn >> 6) & 3) == 3)
1786 return 1;
1787 rd = (insn >> 12) & 0xf;
1788 wrd = (insn >> 16) & 0xf;
1789 tmp = load_reg(s, rd);
1790 switch ((insn >> 6) & 3) {
1791 case 0:
1792 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1793 break;
1794 case 1:
1795 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1796 break;
1797 case 2:
1798 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1799 break;
1800 }
1801 tcg_temp_free_i32(tmp);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 break;
1805 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1806 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1807 return 1;
1808 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1809 tmp2 = tcg_temp_new_i32();
1810 tcg_gen_mov_i32(tmp2, tmp);
1811 switch ((insn >> 22) & 3) {
1812 case 0:
1813 for (i = 0; i < 7; i ++) {
1814 tcg_gen_shli_i32(tmp2, tmp2, 4);
1815 tcg_gen_and_i32(tmp, tmp, tmp2);
1816 }
1817 break;
1818 case 1:
1819 for (i = 0; i < 3; i ++) {
1820 tcg_gen_shli_i32(tmp2, tmp2, 8);
1821 tcg_gen_and_i32(tmp, tmp, tmp2);
1822 }
1823 break;
1824 case 2:
1825 tcg_gen_shli_i32(tmp2, tmp2, 16);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1827 break;
1828 }
1829 gen_set_nzcv(tmp);
1830 tcg_temp_free_i32(tmp2);
1831 tcg_temp_free_i32(tmp);
1832 break;
1833 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1834 wrd = (insn >> 12) & 0xf;
1835 rd0 = (insn >> 16) & 0xf;
1836 gen_op_iwmmxt_movq_M0_wRn(rd0);
1837 switch ((insn >> 22) & 3) {
1838 case 0:
1839 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1840 break;
1841 case 1:
1842 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1843 break;
1844 case 2:
1845 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1846 break;
1847 case 3:
1848 return 1;
1849 }
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1854 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1855 return 1;
1856 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1857 tmp2 = tcg_temp_new_i32();
1858 tcg_gen_mov_i32(tmp2, tmp);
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 for (i = 0; i < 7; i ++) {
1862 tcg_gen_shli_i32(tmp2, tmp2, 4);
1863 tcg_gen_or_i32(tmp, tmp, tmp2);
1864 }
1865 break;
1866 case 1:
1867 for (i = 0; i < 3; i ++) {
1868 tcg_gen_shli_i32(tmp2, tmp2, 8);
1869 tcg_gen_or_i32(tmp, tmp, tmp2);
1870 }
1871 break;
1872 case 2:
1873 tcg_gen_shli_i32(tmp2, tmp2, 16);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1875 break;
1876 }
1877 gen_set_nzcv(tmp);
1878 tcg_temp_free_i32(tmp2);
1879 tcg_temp_free_i32(tmp);
1880 break;
1881 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1882 rd = (insn >> 12) & 0xf;
1883 rd0 = (insn >> 16) & 0xf;
1884 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1885 return 1;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
1887 tmp = tcg_temp_new_i32();
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1891 break;
1892 case 1:
1893 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1894 break;
1895 case 2:
1896 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1897 break;
1898 }
1899 store_reg(s, rd, tmp);
1900 break;
1901 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1902 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 rd1 = (insn >> 0) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 switch ((insn >> 22) & 3) {
1908 case 0:
1909 if (insn & (1 << 21))
1910 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1911 else
1912 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1913 break;
1914 case 1:
1915 if (insn & (1 << 21))
1916 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1917 else
1918 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1919 break;
1920 case 2:
1921 if (insn & (1 << 21))
1922 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1923 else
1924 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1925 break;
1926 case 3:
1927 return 1;
1928 }
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1932 break;
1933 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1934 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 if (insn & (1 << 21))
1941 gen_op_iwmmxt_unpacklsb_M0();
1942 else
1943 gen_op_iwmmxt_unpacklub_M0();
1944 break;
1945 case 1:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_unpacklsw_M0();
1948 else
1949 gen_op_iwmmxt_unpackluw_M0();
1950 break;
1951 case 2:
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_unpacklsl_M0();
1954 else
1955 gen_op_iwmmxt_unpacklul_M0();
1956 break;
1957 case 3:
1958 return 1;
1959 }
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 gen_op_iwmmxt_set_cup();
1963 break;
1964 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1965 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1966 wrd = (insn >> 12) & 0xf;
1967 rd0 = (insn >> 16) & 0xf;
1968 gen_op_iwmmxt_movq_M0_wRn(rd0);
1969 switch ((insn >> 22) & 3) {
1970 case 0:
1971 if (insn & (1 << 21))
1972 gen_op_iwmmxt_unpackhsb_M0();
1973 else
1974 gen_op_iwmmxt_unpackhub_M0();
1975 break;
1976 case 1:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpackhsw_M0();
1979 else
1980 gen_op_iwmmxt_unpackhuw_M0();
1981 break;
1982 case 2:
1983 if (insn & (1 << 21))
1984 gen_op_iwmmxt_unpackhsl_M0();
1985 else
1986 gen_op_iwmmxt_unpackhul_M0();
1987 break;
1988 case 3:
1989 return 1;
1990 }
1991 gen_op_iwmmxt_movq_wRn_M0(wrd);
1992 gen_op_iwmmxt_set_mup();
1993 gen_op_iwmmxt_set_cup();
1994 break;
1995 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1996 case 0x214: case 0x614: case 0xa14: case 0xe14:
1997 if (((insn >> 22) & 3) == 0)
1998 return 1;
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0);
2002 tmp = tcg_temp_new_i32();
2003 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2004 tcg_temp_free_i32(tmp);
2005 return 1;
2006 }
2007 switch ((insn >> 22) & 3) {
2008 case 1:
2009 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
2010 break;
2011 case 2:
2012 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
2013 break;
2014 case 3:
2015 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
2016 break;
2017 }
2018 tcg_temp_free_i32(tmp);
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2024 case 0x014: case 0x414: case 0x814: case 0xc14:
2025 if (((insn >> 22) & 3) == 0)
2026 return 1;
2027 wrd = (insn >> 12) & 0xf;
2028 rd0 = (insn >> 16) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0);
2030 tmp = tcg_temp_new_i32();
2031 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2032 tcg_temp_free_i32(tmp);
2033 return 1;
2034 }
2035 switch ((insn >> 22) & 3) {
2036 case 1:
2037 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2038 break;
2039 case 2:
2040 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2041 break;
2042 case 3:
2043 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2044 break;
2045 }
2046 tcg_temp_free_i32(tmp);
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2050 break;
2051 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2052 case 0x114: case 0x514: case 0x914: case 0xd14:
2053 if (((insn >> 22) & 3) == 0)
2054 return 1;
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
2058 tmp = tcg_temp_new_i32();
2059 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2060 tcg_temp_free_i32(tmp);
2061 return 1;
2062 }
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2066 break;
2067 case 2:
2068 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2069 break;
2070 case 3:
2071 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2072 break;
2073 }
2074 tcg_temp_free_i32(tmp);
2075 gen_op_iwmmxt_movq_wRn_M0(wrd);
2076 gen_op_iwmmxt_set_mup();
2077 gen_op_iwmmxt_set_cup();
2078 break;
2079 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2080 case 0x314: case 0x714: case 0xb14: case 0xf14:
2081 if (((insn >> 22) & 3) == 0)
2082 return 1;
2083 wrd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 tmp = tcg_temp_new_i32();
2087 switch ((insn >> 22) & 3) {
2088 case 1:
2089 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2090 tcg_temp_free_i32(tmp);
2091 return 1;
2092 }
2093 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2094 break;
2095 case 2:
2096 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2097 tcg_temp_free_i32(tmp);
2098 return 1;
2099 }
2100 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2101 break;
2102 case 3:
2103 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2104 tcg_temp_free_i32(tmp);
2105 return 1;
2106 }
2107 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2108 break;
2109 }
2110 tcg_temp_free_i32(tmp);
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 gen_op_iwmmxt_set_cup();
2114 break;
2115 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2116 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 rd1 = (insn >> 0) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 if (insn & (1 << 21))
2124 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2125 else
2126 gen_op_iwmmxt_minub_M0_wRn(rd1);
2127 break;
2128 case 1:
2129 if (insn & (1 << 21))
2130 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2131 else
2132 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2133 break;
2134 case 2:
2135 if (insn & (1 << 21))
2136 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2137 else
2138 gen_op_iwmmxt_minul_M0_wRn(rd1);
2139 break;
2140 case 3:
2141 return 1;
2142 }
2143 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup();
2145 break;
2146 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2147 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 rd1 = (insn >> 0) & 0xf;
2151 gen_op_iwmmxt_movq_M0_wRn(rd0);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2158 break;
2159 case 1:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2164 break;
2165 case 2:
2166 if (insn & (1 << 21))
2167 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2168 else
2169 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2170 break;
2171 case 3:
2172 return 1;
2173 }
2174 gen_op_iwmmxt_movq_wRn_M0(wrd);
2175 gen_op_iwmmxt_set_mup();
2176 break;
2177 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2178 case 0x402: case 0x502: case 0x602: case 0x702:
2179 wrd = (insn >> 12) & 0xf;
2180 rd0 = (insn >> 16) & 0xf;
2181 rd1 = (insn >> 0) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
2183 tmp = tcg_const_i32((insn >> 20) & 3);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2186 tcg_temp_free(tmp);
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2191 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2192 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2193 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 20) & 0xf) {
2199 case 0x0:
2200 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2201 break;
2202 case 0x1:
2203 gen_op_iwmmxt_subub_M0_wRn(rd1);
2204 break;
2205 case 0x3:
2206 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2207 break;
2208 case 0x4:
2209 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2210 break;
2211 case 0x5:
2212 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2213 break;
2214 case 0x7:
2215 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2216 break;
2217 case 0x8:
2218 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2219 break;
2220 case 0x9:
2221 gen_op_iwmmxt_subul_M0_wRn(rd1);
2222 break;
2223 case 0xb:
2224 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2225 break;
2226 default:
2227 return 1;
2228 }
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2234 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2235 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2236 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2241 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2242 tcg_temp_free(tmp);
2243 gen_op_iwmmxt_movq_wRn_M0(wrd);
2244 gen_op_iwmmxt_set_mup();
2245 gen_op_iwmmxt_set_cup();
2246 break;
2247 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2248 case 0x418: case 0x518: case 0x618: case 0x718:
2249 case 0x818: case 0x918: case 0xa18: case 0xb18:
2250 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2251 wrd = (insn >> 12) & 0xf;
2252 rd0 = (insn >> 16) & 0xf;
2253 rd1 = (insn >> 0) & 0xf;
2254 gen_op_iwmmxt_movq_M0_wRn(rd0);
2255 switch ((insn >> 20) & 0xf) {
2256 case 0x0:
2257 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2258 break;
2259 case 0x1:
2260 gen_op_iwmmxt_addub_M0_wRn(rd1);
2261 break;
2262 case 0x3:
2263 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2264 break;
2265 case 0x4:
2266 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2267 break;
2268 case 0x5:
2269 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2270 break;
2271 case 0x7:
2272 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2273 break;
2274 case 0x8:
2275 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2276 break;
2277 case 0x9:
2278 gen_op_iwmmxt_addul_M0_wRn(rd1);
2279 break;
2280 case 0xb:
2281 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2282 break;
2283 default:
2284 return 1;
2285 }
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2288 gen_op_iwmmxt_set_cup();
2289 break;
2290 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2291 case 0x408: case 0x508: case 0x608: case 0x708:
2292 case 0x808: case 0x908: case 0xa08: case 0xb08:
2293 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2294 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2295 return 1;
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 switch ((insn >> 22) & 3) {
2301 case 1:
2302 if (insn & (1 << 21))
2303 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2304 else
2305 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2306 break;
2307 case 2:
2308 if (insn & (1 << 21))
2309 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2310 else
2311 gen_op_iwmmxt_packul_M0_wRn(rd1);
2312 break;
2313 case 3:
2314 if (insn & (1 << 21))
2315 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2316 else
2317 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2318 break;
2319 }
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x201: case 0x203: case 0x205: case 0x207:
2325 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2326 case 0x211: case 0x213: case 0x215: case 0x217:
2327 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2328 wrd = (insn >> 5) & 0xf;
2329 rd0 = (insn >> 12) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 if (rd0 == 0xf || rd1 == 0xf)
2332 return 1;
2333 gen_op_iwmmxt_movq_M0_wRn(wrd);
2334 tmp = load_reg(s, rd0);
2335 tmp2 = load_reg(s, rd1);
2336 switch ((insn >> 16) & 0xf) {
2337 case 0x0: /* TMIA */
2338 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2339 break;
2340 case 0x8: /* TMIAPH */
2341 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2342 break;
2343 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2344 if (insn & (1 << 16))
2345 tcg_gen_shri_i32(tmp, tmp, 16);
2346 if (insn & (1 << 17))
2347 tcg_gen_shri_i32(tmp2, tmp2, 16);
2348 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2349 break;
2350 default:
2351 tcg_temp_free_i32(tmp2);
2352 tcg_temp_free_i32(tmp);
2353 return 1;
2354 }
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 break;
2360 default:
2361 return 1;
2362 }
2363
2364 return 0;
2365 }
2366
2367 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2368 (ie. an undefined instruction). */
2369 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2370 {
2371 int acc, rd0, rd1, rdhi, rdlo;
2372 TCGv tmp, tmp2;
2373
2374 if ((insn & 0x0ff00f10) == 0x0e200010) {
2375 /* Multiply with Internal Accumulate Format */
2376 rd0 = (insn >> 12) & 0xf;
2377 rd1 = insn & 0xf;
2378 acc = (insn >> 5) & 7;
2379
2380 if (acc != 0)
2381 return 1;
2382
2383 tmp = load_reg(s, rd0);
2384 tmp2 = load_reg(s, rd1);
2385 switch ((insn >> 16) & 0xf) {
2386 case 0x0: /* MIA */
2387 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2388 break;
2389 case 0x8: /* MIAPH */
2390 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2391 break;
2392 case 0xc: /* MIABB */
2393 case 0xd: /* MIABT */
2394 case 0xe: /* MIATB */
2395 case 0xf: /* MIATT */
2396 if (insn & (1 << 16))
2397 tcg_gen_shri_i32(tmp, tmp, 16);
2398 if (insn & (1 << 17))
2399 tcg_gen_shri_i32(tmp2, tmp2, 16);
2400 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2401 break;
2402 default:
2403 return 1;
2404 }
2405 tcg_temp_free_i32(tmp2);
2406 tcg_temp_free_i32(tmp);
2407
2408 gen_op_iwmmxt_movq_wRn_M0(acc);
2409 return 0;
2410 }
2411
2412 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2413 /* Internal Accumulator Access Format */
2414 rdhi = (insn >> 16) & 0xf;
2415 rdlo = (insn >> 12) & 0xf;
2416 acc = insn & 7;
2417
2418 if (acc != 0)
2419 return 1;
2420
2421 if (insn & ARM_CP_RW_BIT) { /* MRA */
2422 iwmmxt_load_reg(cpu_V0, acc);
2423 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2424 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2425 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2426 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2427 } else { /* MAR */
2428 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2429 iwmmxt_store_reg(cpu_V0, acc);
2430 }
2431 return 0;
2432 }
2433
2434 return 1;
2435 }
2436
2437 /* Disassemble system coprocessor instruction. Return nonzero if
2438 instruction is not defined. */
2439 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2440 {
2441 TCGv tmp, tmp2;
2442 uint32_t rd = (insn >> 12) & 0xf;
2443 uint32_t cp = (insn >> 8) & 0xf;
2444 if (IS_USER(s)) {
2445 return 1;
2446 }
2447
2448 if (insn & ARM_CP_RW_BIT) {
2449 if (!env->cp[cp].cp_read)
2450 return 1;
2451 gen_set_pc_im(s->pc);
2452 tmp = tcg_temp_new_i32();
2453 tmp2 = tcg_const_i32(insn);
2454 gen_helper_get_cp(tmp, cpu_env, tmp2);
2455 tcg_temp_free(tmp2);
2456 store_reg(s, rd, tmp);
2457 } else {
2458 if (!env->cp[cp].cp_write)
2459 return 1;
2460 gen_set_pc_im(s->pc);
2461 tmp = load_reg(s, rd);
2462 tmp2 = tcg_const_i32(insn);
2463 gen_helper_set_cp(cpu_env, tmp2, tmp);
2464 tcg_temp_free(tmp2);
2465 tcg_temp_free_i32(tmp);
2466 }
2467 return 0;
2468 }
2469
2470 static int cp15_user_ok(uint32_t insn)
2471 {
2472 int cpn = (insn >> 16) & 0xf;
2473 int cpm = insn & 0xf;
2474 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2475
2476 if (cpn == 13 && cpm == 0) {
2477 /* TLS register. */
2478 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2479 return 1;
2480 }
2481 if (cpn == 7) {
2482 /* ISB, DSB, DMB. */
2483 if ((cpm == 5 && op == 4)
2484 || (cpm == 10 && (op == 4 || op == 5)))
2485 return 1;
2486 }
2487 return 0;
2488 }
2489
2490 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2491 {
2492 TCGv tmp;
2493 int cpn = (insn >> 16) & 0xf;
2494 int cpm = insn & 0xf;
2495 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2496
2497 if (!arm_feature(env, ARM_FEATURE_V6K))
2498 return 0;
2499
2500 if (!(cpn == 13 && cpm == 0))
2501 return 0;
2502
2503 if (insn & ARM_CP_RW_BIT) {
2504 switch (op) {
2505 case 2:
2506 tmp = load_cpu_field(cp15.c13_tls1);
2507 break;
2508 case 3:
2509 tmp = load_cpu_field(cp15.c13_tls2);
2510 break;
2511 case 4:
2512 tmp = load_cpu_field(cp15.c13_tls3);
2513 break;
2514 default:
2515 return 0;
2516 }
2517 store_reg(s, rd, tmp);
2518
2519 } else {
2520 tmp = load_reg(s, rd);
2521 switch (op) {
2522 case 2:
2523 store_cpu_field(tmp, cp15.c13_tls1);
2524 break;
2525 case 3:
2526 store_cpu_field(tmp, cp15.c13_tls2);
2527 break;
2528 case 4:
2529 store_cpu_field(tmp, cp15.c13_tls3);
2530 break;
2531 default:
2532 tcg_temp_free_i32(tmp);
2533 return 0;
2534 }
2535 }
2536 return 1;
2537 }
2538
2539 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2540 instruction is not defined. */
2541 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2542 {
2543 uint32_t rd;
2544 TCGv tmp, tmp2;
2545
2546 /* M profile cores use memory mapped registers instead of cp15. */
2547 if (arm_feature(env, ARM_FEATURE_M))
2548 return 1;
2549
2550 if ((insn & (1 << 25)) == 0) {
2551 if (insn & (1 << 20)) {
2552 /* mrrc */
2553 return 1;
2554 }
2555 /* mcrr. Used for block cache operations, so implement as no-op. */
2556 return 0;
2557 }
2558 if ((insn & (1 << 4)) == 0) {
2559 /* cdp */
2560 return 1;
2561 }
2562 if (IS_USER(s) && !cp15_user_ok(insn)) {
2563 return 1;
2564 }
2565
2566 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2567 * instructions rather than a separate instruction.
2568 */
2569 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2570 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2571 * In v7, this must NOP.
2572 */
2573 if (!arm_feature(env, ARM_FEATURE_V7)) {
2574 /* Wait for interrupt. */
2575 gen_set_pc_im(s->pc);
2576 s->is_jmp = DISAS_WFI;
2577 }
2578 return 0;
2579 }
2580
2581 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2582 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2583 * so this is slightly over-broad.
2584 */
2585 if (!arm_feature(env, ARM_FEATURE_V6)) {
2586 /* Wait for interrupt. */
2587 gen_set_pc_im(s->pc);
2588 s->is_jmp = DISAS_WFI;
2589 return 0;
2590 }
2591 /* Otherwise fall through to handle via helper function.
2592 * In particular, on v7 and some v6 cores this is one of
2593 * the VA-PA registers.
2594 */
2595 }
2596
2597 rd = (insn >> 12) & 0xf;
2598
2599 if (cp15_tls_load_store(env, s, insn, rd))
2600 return 0;
2601
2602 tmp2 = tcg_const_i32(insn);
2603 if (insn & ARM_CP_RW_BIT) {
2604 tmp = tcg_temp_new_i32();
2605 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2606 /* If the destination register is r15 then sets condition codes. */
2607 if (rd != 15)
2608 store_reg(s, rd, tmp);
2609 else
2610 tcg_temp_free_i32(tmp);
2611 } else {
2612 tmp = load_reg(s, rd);
2613 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2614 tcg_temp_free_i32(tmp);
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2620 gen_lookup_tb(s);
2621 }
2622 tcg_temp_free_i32(tmp2);
2623 return 0;
2624 }
2625
2626 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2627 #define VFP_SREG(insn, bigbit, smallbit) \
2628 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2629 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2630 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2631 reg = (((insn) >> (bigbit)) & 0x0f) \
2632 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2633 } else { \
2634 if (insn & (1 << (smallbit))) \
2635 return 1; \
2636 reg = ((insn) >> (bigbit)) & 0x0f; \
2637 }} while (0)
2638
2639 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2640 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2641 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2642 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2643 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2644 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645
2646 /* Move between integer and VFP cores. */
2647 static TCGv gen_vfp_mrs(void)
2648 {
2649 TCGv tmp = tcg_temp_new_i32();
2650 tcg_gen_mov_i32(tmp, cpu_F0s);
2651 return tmp;
2652 }
2653
2654 static void gen_vfp_msr(TCGv tmp)
2655 {
2656 tcg_gen_mov_i32(cpu_F0s, tmp);
2657 tcg_temp_free_i32(tmp);
2658 }
2659
2660 static void gen_neon_dup_u8(TCGv var, int shift)
2661 {
2662 TCGv tmp = tcg_temp_new_i32();
2663 if (shift)
2664 tcg_gen_shri_i32(var, var, shift);
2665 tcg_gen_ext8u_i32(var, var);
2666 tcg_gen_shli_i32(tmp, var, 8);
2667 tcg_gen_or_i32(var, var, tmp);
2668 tcg_gen_shli_i32(tmp, var, 16);
2669 tcg_gen_or_i32(var, var, tmp);
2670 tcg_temp_free_i32(tmp);
2671 }
2672
2673 static void gen_neon_dup_low16(TCGv var)
2674 {
2675 TCGv tmp = tcg_temp_new_i32();
2676 tcg_gen_ext16u_i32(var, var);
2677 tcg_gen_shli_i32(tmp, var, 16);
2678 tcg_gen_or_i32(var, var, tmp);
2679 tcg_temp_free_i32(tmp);
2680 }
2681
2682 static void gen_neon_dup_high16(TCGv var)
2683 {
2684 TCGv tmp = tcg_temp_new_i32();
2685 tcg_gen_andi_i32(var, var, 0xffff0000);
2686 tcg_gen_shri_i32(tmp, var, 16);
2687 tcg_gen_or_i32(var, var, tmp);
2688 tcg_temp_free_i32(tmp);
2689 }
2690
2691 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2692 {
2693 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2694 TCGv tmp;
2695 switch (size) {
2696 case 0:
2697 tmp = gen_ld8u(addr, IS_USER(s));
2698 gen_neon_dup_u8(tmp, 0);
2699 break;
2700 case 1:
2701 tmp = gen_ld16u(addr, IS_USER(s));
2702 gen_neon_dup_low16(tmp);
2703 break;
2704 case 2:
2705 tmp = gen_ld32(addr, IS_USER(s));
2706 break;
2707 default: /* Avoid compiler warnings. */
2708 abort();
2709 }
2710 return tmp;
2711 }
2712
2713 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2714 (ie. an undefined instruction). */
2715 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2716 {
2717 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2718 int dp, veclen;
2719 TCGv addr;
2720 TCGv tmp;
2721 TCGv tmp2;
2722
2723 if (!arm_feature(env, ARM_FEATURE_VFP))
2724 return 1;
2725
2726 if (!s->vfp_enabled) {
2727 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2728 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2729 return 1;
2730 rn = (insn >> 16) & 0xf;
2731 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2732 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2733 return 1;
2734 }
2735 dp = ((insn & 0xf00) == 0xb00);
2736 switch ((insn >> 24) & 0xf) {
2737 case 0xe:
2738 if (insn & (1 << 4)) {
2739 /* single register transfer */
2740 rd = (insn >> 12) & 0xf;
2741 if (dp) {
2742 int size;
2743 int pass;
2744
2745 VFP_DREG_N(rn, insn);
2746 if (insn & 0xf)
2747 return 1;
2748 if (insn & 0x00c00060
2749 && !arm_feature(env, ARM_FEATURE_NEON))
2750 return 1;
2751
2752 pass = (insn >> 21) & 1;
2753 if (insn & (1 << 22)) {
2754 size = 0;
2755 offset = ((insn >> 5) & 3) * 8;
2756 } else if (insn & (1 << 5)) {
2757 size = 1;
2758 offset = (insn & (1 << 6)) ? 16 : 0;
2759 } else {
2760 size = 2;
2761 offset = 0;
2762 }
2763 if (insn & ARM_CP_RW_BIT) {
2764 /* vfp->arm */
2765 tmp = neon_load_reg(rn, pass);
2766 switch (size) {
2767 case 0:
2768 if (offset)
2769 tcg_gen_shri_i32(tmp, tmp, offset);
2770 if (insn & (1 << 23))
2771 gen_uxtb(tmp);
2772 else
2773 gen_sxtb(tmp);
2774 break;
2775 case 1:
2776 if (insn & (1 << 23)) {
2777 if (offset) {
2778 tcg_gen_shri_i32(tmp, tmp, 16);
2779 } else {
2780 gen_uxth(tmp);
2781 }
2782 } else {
2783 if (offset) {
2784 tcg_gen_sari_i32(tmp, tmp, 16);
2785 } else {
2786 gen_sxth(tmp);
2787 }
2788 }
2789 break;
2790 case 2:
2791 break;
2792 }
2793 store_reg(s, rd, tmp);
2794 } else {
2795 /* arm->vfp */
2796 tmp = load_reg(s, rd);
2797 if (insn & (1 << 23)) {
2798 /* VDUP */
2799 if (size == 0) {
2800 gen_neon_dup_u8(tmp, 0);
2801 } else if (size == 1) {
2802 gen_neon_dup_low16(tmp);
2803 }
2804 for (n = 0; n <= pass * 2; n++) {
2805 tmp2 = tcg_temp_new_i32();
2806 tcg_gen_mov_i32(tmp2, tmp);
2807 neon_store_reg(rn, n, tmp2);
2808 }
2809 neon_store_reg(rn, n, tmp);
2810 } else {
2811 /* VMOV */
2812 switch (size) {
2813 case 0:
2814 tmp2 = neon_load_reg(rn, pass);
2815 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2816 tcg_temp_free_i32(tmp2);
2817 break;
2818 case 1:
2819 tmp2 = neon_load_reg(rn, pass);
2820 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2821 tcg_temp_free_i32(tmp2);
2822 break;
2823 case 2:
2824 break;
2825 }
2826 neon_store_reg(rn, pass, tmp);
2827 }
2828 }
2829 } else { /* !dp */
2830 if ((insn & 0x6f) != 0x00)
2831 return 1;
2832 rn = VFP_SREG_N(insn);
2833 if (insn & ARM_CP_RW_BIT) {
2834 /* vfp->arm */
2835 if (insn & (1 << 21)) {
2836 /* system register */
2837 rn >>= 1;
2838
2839 switch (rn) {
2840 case ARM_VFP_FPSID:
2841 /* VFP2 allows access to FSID from userspace.
2842 VFP3 restricts all id registers to privileged
2843 accesses. */
2844 if (IS_USER(s)
2845 && arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 case ARM_VFP_FPEXC:
2850 if (IS_USER(s))
2851 return 1;
2852 tmp = load_cpu_field(vfp.xregs[rn]);
2853 break;
2854 case ARM_VFP_FPINST:
2855 case ARM_VFP_FPINST2:
2856 /* Not present in VFP3. */
2857 if (IS_USER(s)
2858 || arm_feature(env, ARM_FEATURE_VFP3))
2859 return 1;
2860 tmp = load_cpu_field(vfp.xregs[rn]);
2861 break;
2862 case ARM_VFP_FPSCR:
2863 if (rd == 15) {
2864 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2865 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2866 } else {
2867 tmp = tcg_temp_new_i32();
2868 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2869 }
2870 break;
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 if (IS_USER(s)
2874 || !arm_feature(env, ARM_FEATURE_VFP3))
2875 return 1;
2876 tmp = load_cpu_field(vfp.xregs[rn]);
2877 break;
2878 default:
2879 return 1;
2880 }
2881 } else {
2882 gen_mov_F0_vreg(0, rn);
2883 tmp = gen_vfp_mrs();
2884 }
2885 if (rd == 15) {
2886 /* Set the 4 flag bits in the CPSR. */
2887 gen_set_nzcv(tmp);
2888 tcg_temp_free_i32(tmp);
2889 } else {
2890 store_reg(s, rd, tmp);
2891 }
2892 } else {
2893 /* arm->vfp */
2894 tmp = load_reg(s, rd);
2895 if (insn & (1 << 21)) {
2896 rn >>= 1;
2897 /* system register */
2898 switch (rn) {
2899 case ARM_VFP_FPSID:
2900 case ARM_VFP_MVFR0:
2901 case ARM_VFP_MVFR1:
2902 /* Writes are ignored. */
2903 break;
2904 case ARM_VFP_FPSCR:
2905 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2906 tcg_temp_free_i32(tmp);
2907 gen_lookup_tb(s);
2908 break;
2909 case ARM_VFP_FPEXC:
2910 if (IS_USER(s))
2911 return 1;
2912 /* TODO: VFP subarchitecture support.
2913 * For now, keep the EN bit only */
2914 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2915 store_cpu_field(tmp, vfp.xregs[rn]);
2916 gen_lookup_tb(s);
2917 break;
2918 case ARM_VFP_FPINST:
2919 case ARM_VFP_FPINST2:
2920 store_cpu_field(tmp, vfp.xregs[rn]);
2921 break;
2922 default:
2923 return 1;
2924 }
2925 } else {
2926 gen_vfp_msr(tmp);
2927 gen_mov_vreg_F0(0, rn);
2928 }
2929 }
2930 }
2931 } else {
2932 /* data processing */
2933 /* The opcode is in bits 23, 21, 20 and 6. */
2934 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2935 if (dp) {
2936 if (op == 15) {
2937 /* rn is opcode */
2938 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2939 } else {
2940 /* rn is register number */
2941 VFP_DREG_N(rn, insn);
2942 }
2943
2944 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2945 /* Integer or single precision destination. */
2946 rd = VFP_SREG_D(insn);
2947 } else {
2948 VFP_DREG_D(rd, insn);
2949 }
2950 if (op == 15 &&
2951 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2952 /* VCVT from int is always from S reg regardless of dp bit.
2953 * VCVT with immediate frac_bits has same format as SREG_M
2954 */
2955 rm = VFP_SREG_M(insn);
2956 } else {
2957 VFP_DREG_M(rm, insn);
2958 }
2959 } else {
2960 rn = VFP_SREG_N(insn);
2961 if (op == 15 && rn == 15) {
2962 /* Double precision destination. */
2963 VFP_DREG_D(rd, insn);
2964 } else {
2965 rd = VFP_SREG_D(insn);
2966 }
2967 /* NB that we implicitly rely on the encoding for the frac_bits
2968 * in VCVT of fixed to float being the same as that of an SREG_M
2969 */
2970 rm = VFP_SREG_M(insn);
2971 }
2972
2973 veclen = s->vec_len;
2974 if (op == 15 && rn > 3)
2975 veclen = 0;
2976
2977 /* Shut up compiler warnings. */
2978 delta_m = 0;
2979 delta_d = 0;
2980 bank_mask = 0;
2981
2982 if (veclen > 0) {
2983 if (dp)
2984 bank_mask = 0xc;
2985 else
2986 bank_mask = 0x18;
2987
2988 /* Figure out what type of vector operation this is. */
2989 if ((rd & bank_mask) == 0) {
2990 /* scalar */
2991 veclen = 0;
2992 } else {
2993 if (dp)
2994 delta_d = (s->vec_stride >> 1) + 1;
2995 else
2996 delta_d = s->vec_stride + 1;
2997
2998 if ((rm & bank_mask) == 0) {
2999 /* mixed scalar/vector */
3000 delta_m = 0;
3001 } else {
3002 /* vector */
3003 delta_m = delta_d;
3004 }
3005 }
3006 }
3007
3008 /* Load the initial operands. */
3009 if (op == 15) {
3010 switch (rn) {
3011 case 16:
3012 case 17:
3013 /* Integer source */
3014 gen_mov_F0_vreg(0, rm);
3015 break;
3016 case 8:
3017 case 9:
3018 /* Compare */
3019 gen_mov_F0_vreg(dp, rd);
3020 gen_mov_F1_vreg(dp, rm);
3021 break;
3022 case 10:
3023 case 11:
3024 /* Compare with zero */
3025 gen_mov_F0_vreg(dp, rd);
3026 gen_vfp_F1_ld0(dp);
3027 break;
3028 case 20:
3029 case 21:
3030 case 22:
3031 case 23:
3032 case 28:
3033 case 29:
3034 case 30:
3035 case 31:
3036 /* Source and destination the same. */
3037 gen_mov_F0_vreg(dp, rd);
3038 break;
3039 default:
3040 /* One source operand. */
3041 gen_mov_F0_vreg(dp, rm);
3042 break;
3043 }
3044 } else {
3045 /* Two source operands. */
3046 gen_mov_F0_vreg(dp, rn);
3047 gen_mov_F1_vreg(dp, rm);
3048 }
3049
3050 for (;;) {
3051 /* Perform the calculation. */
3052 switch (op) {
3053 case 0: /* VMLA: fd + (fn * fm) */
3054 /* Note that order of inputs to the add matters for NaNs */
3055 gen_vfp_F1_mul(dp);
3056 gen_mov_F0_vreg(dp, rd);
3057 gen_vfp_add(dp);
3058 break;
3059 case 1: /* VMLS: fd + -(fn * fm) */
3060 gen_vfp_mul(dp);
3061 gen_vfp_F1_neg(dp);
3062 gen_mov_F0_vreg(dp, rd);
3063 gen_vfp_add(dp);
3064 break;
3065 case 2: /* VNMLS: -fd + (fn * fm) */
3066 /* Note that it isn't valid to replace (-A + B) with (B - A)
3067 * or similar plausible looking simplifications
3068 * because this will give wrong results for NaNs.
3069 */
3070 gen_vfp_F1_mul(dp);
3071 gen_mov_F0_vreg(dp, rd);
3072 gen_vfp_neg(dp);
3073 gen_vfp_add(dp);
3074 break;
3075 case 3: /* VNMLA: -fd + -(fn * fm) */
3076 gen_vfp_mul(dp);
3077 gen_vfp_F1_neg(dp);
3078 gen_mov_F0_vreg(dp, rd);
3079 gen_vfp_neg(dp);
3080 gen_vfp_add(dp);
3081 break;
3082 case 4: /* mul: fn * fm */
3083 gen_vfp_mul(dp);
3084 break;
3085 case 5: /* nmul: -(fn * fm) */
3086 gen_vfp_mul(dp);
3087 gen_vfp_neg(dp);
3088 break;
3089 case 6: /* add: fn + fm */
3090 gen_vfp_add(dp);
3091 break;
3092 case 7: /* sub: fn - fm */
3093 gen_vfp_sub(dp);
3094 break;
3095 case 8: /* div: fn / fm */
3096 gen_vfp_div(dp);
3097 break;
3098 case 14: /* fconst */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
3101
3102 n = (insn << 12) & 0x80000000;
3103 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3104 if (dp) {
3105 if (i & 0x40)
3106 i |= 0x3f80;
3107 else
3108 i |= 0x4000;
3109 n |= i << 16;
3110 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3111 } else {
3112 if (i & 0x40)
3113 i |= 0x780;
3114 else
3115 i |= 0x800;
3116 n |= i << 19;
3117 tcg_gen_movi_i32(cpu_F0s, n);
3118 }
3119 break;
3120 case 15: /* extension space */
3121 switch (rn) {
3122 case 0: /* cpy */
3123 /* no-op */
3124 break;
3125 case 1: /* abs */
3126 gen_vfp_abs(dp);
3127 break;
3128 case 2: /* neg */
3129 gen_vfp_neg(dp);
3130 break;
3131 case 3: /* sqrt */
3132 gen_vfp_sqrt(dp);
3133 break;
3134 case 4: /* vcvtb.f32.f16 */
3135 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3136 return 1;
3137 tmp = gen_vfp_mrs();
3138 tcg_gen_ext16u_i32(tmp, tmp);
3139 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3140 tcg_temp_free_i32(tmp);
3141 break;
3142 case 5: /* vcvtt.f32.f16 */
3143 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3144 return 1;
3145 tmp = gen_vfp_mrs();
3146 tcg_gen_shri_i32(tmp, tmp, 16);
3147 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3148 tcg_temp_free_i32(tmp);
3149 break;
3150 case 6: /* vcvtb.f16.f32 */
3151 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3152 return 1;
3153 tmp = tcg_temp_new_i32();
3154 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3155 gen_mov_F0_vreg(0, rd);
3156 tmp2 = gen_vfp_mrs();
3157 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3158 tcg_gen_or_i32(tmp, tmp, tmp2);
3159 tcg_temp_free_i32(tmp2);
3160 gen_vfp_msr(tmp);
3161 break;
3162 case 7: /* vcvtt.f16.f32 */
3163 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3164 return 1;
3165 tmp = tcg_temp_new_i32();
3166 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3167 tcg_gen_shli_i32(tmp, tmp, 16);
3168 gen_mov_F0_vreg(0, rd);
3169 tmp2 = gen_vfp_mrs();
3170 tcg_gen_ext16u_i32(tmp2, tmp2);
3171 tcg_gen_or_i32(tmp, tmp, tmp2);
3172 tcg_temp_free_i32(tmp2);
3173 gen_vfp_msr(tmp);
3174 break;
3175 case 8: /* cmp */
3176 gen_vfp_cmp(dp);
3177 break;
3178 case 9: /* cmpe */
3179 gen_vfp_cmpe(dp);
3180 break;
3181 case 10: /* cmpz */
3182 gen_vfp_cmp(dp);
3183 break;
3184 case 11: /* cmpez */
3185 gen_vfp_F1_ld0(dp);
3186 gen_vfp_cmpe(dp);
3187 break;
3188 case 15: /* single<->double conversion */
3189 if (dp)
3190 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3191 else
3192 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3193 break;
3194 case 16: /* fuito */
3195 gen_vfp_uito(dp, 0);
3196 break;
3197 case 17: /* fsito */
3198 gen_vfp_sito(dp, 0);
3199 break;
3200 case 20: /* fshto */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
3203 gen_vfp_shto(dp, 16 - rm, 0);
3204 break;
3205 case 21: /* fslto */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
3208 gen_vfp_slto(dp, 32 - rm, 0);
3209 break;
3210 case 22: /* fuhto */
3211 if (!arm_feature(env, ARM_FEATURE_VFP3))
3212 return 1;
3213 gen_vfp_uhto(dp, 16 - rm, 0);
3214 break;
3215 case 23: /* fulto */
3216 if (!arm_feature(env, ARM_FEATURE_VFP3))
3217 return 1;
3218 gen_vfp_ulto(dp, 32 - rm, 0);
3219 break;
3220 case 24: /* ftoui */
3221 gen_vfp_toui(dp, 0);
3222 break;
3223 case 25: /* ftouiz */
3224 gen_vfp_touiz(dp, 0);
3225 break;
3226 case 26: /* ftosi */
3227 gen_vfp_tosi(dp, 0);
3228 break;
3229 case 27: /* ftosiz */
3230 gen_vfp_tosiz(dp, 0);
3231 break;
3232 case 28: /* ftosh */
3233 if (!arm_feature(env, ARM_FEATURE_VFP3))
3234 return 1;
3235 gen_vfp_tosh(dp, 16 - rm, 0);
3236 break;
3237 case 29: /* ftosl */
3238 if (!arm_feature(env, ARM_FEATURE_VFP3))
3239 return 1;
3240 gen_vfp_tosl(dp, 32 - rm, 0);
3241 break;
3242 case 30: /* ftouh */
3243 if (!arm_feature(env, ARM_FEATURE_VFP3))
3244 return 1;
3245 gen_vfp_touh(dp, 16 - rm, 0);
3246 break;
3247 case 31: /* ftoul */
3248 if (!arm_feature(env, ARM_FEATURE_VFP3))
3249 return 1;
3250 gen_vfp_toul(dp, 32 - rm, 0);
3251 break;
3252 default: /* undefined */
3253 printf ("rn:%d\n", rn);
3254 return 1;
3255 }
3256 break;
3257 default: /* undefined */
3258 printf ("op:%d\n", op);
3259 return 1;
3260 }
3261
3262 /* Write back the result. */
3263 if (op == 15 && (rn >= 8 && rn <= 11))
3264 ; /* Comparison, do nothing. */
3265 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3266 /* VCVT double to int: always integer result. */
3267 gen_mov_vreg_F0(0, rd);
3268 else if (op == 15 && rn == 15)
3269 /* conversion */
3270 gen_mov_vreg_F0(!dp, rd);
3271 else
3272 gen_mov_vreg_F0(dp, rd);
3273
3274 /* break out of the loop if we have finished */
3275 if (veclen == 0)
3276 break;
3277
3278 if (op == 15 && delta_m == 0) {
3279 /* single source one-many */
3280 while (veclen--) {
3281 rd = ((rd + delta_d) & (bank_mask - 1))
3282 | (rd & bank_mask);
3283 gen_mov_vreg_F0(dp, rd);
3284 }
3285 break;
3286 }
3287 /* Setup the next operands. */
3288 veclen--;
3289 rd = ((rd + delta_d) & (bank_mask - 1))
3290 | (rd & bank_mask);
3291
3292 if (op == 15) {
3293 /* One source operand. */
3294 rm = ((rm + delta_m) & (bank_mask - 1))
3295 | (rm & bank_mask);
3296 gen_mov_F0_vreg(dp, rm);
3297 } else {
3298 /* Two source operands. */
3299 rn = ((rn + delta_d) & (bank_mask - 1))
3300 | (rn & bank_mask);
3301 gen_mov_F0_vreg(dp, rn);
3302 if (delta_m) {
3303 rm = ((rm + delta_m) & (bank_mask - 1))
3304 | (rm & bank_mask);
3305 gen_mov_F1_vreg(dp, rm);
3306 }
3307 }
3308 }
3309 }
3310 break;
3311 case 0xc:
3312 case 0xd:
3313 if ((insn & 0x03e00000) == 0x00400000) {
3314 /* two-register transfer */
3315 rn = (insn >> 16) & 0xf;
3316 rd = (insn >> 12) & 0xf;
3317 if (dp) {
3318 VFP_DREG_M(rm, insn);
3319 } else {
3320 rm = VFP_SREG_M(insn);
3321 }
3322
3323 if (insn & ARM_CP_RW_BIT) {
3324 /* vfp->arm */
3325 if (dp) {
3326 gen_mov_F0_vreg(0, rm * 2);
3327 tmp = gen_vfp_mrs();
3328 store_reg(s, rd, tmp);
3329 gen_mov_F0_vreg(0, rm * 2 + 1);
3330 tmp = gen_vfp_mrs();
3331 store_reg(s, rn, tmp);
3332 } else {
3333 gen_mov_F0_vreg(0, rm);
3334 tmp = gen_vfp_mrs();
3335 store_reg(s, rd, tmp);
3336 gen_mov_F0_vreg(0, rm + 1);
3337 tmp = gen_vfp_mrs();
3338 store_reg(s, rn, tmp);
3339 }
3340 } else {
3341 /* arm->vfp */
3342 if (dp) {
3343 tmp = load_reg(s, rd);
3344 gen_vfp_msr(tmp);
3345 gen_mov_vreg_F0(0, rm * 2);
3346 tmp = load_reg(s, rn);
3347 gen_vfp_msr(tmp);
3348 gen_mov_vreg_F0(0, rm * 2 + 1);
3349 } else {
3350 tmp = load_reg(s, rd);
3351 gen_vfp_msr(tmp);
3352 gen_mov_vreg_F0(0, rm);
3353 tmp = load_reg(s, rn);
3354 gen_vfp_msr(tmp);
3355 gen_mov_vreg_F0(0, rm + 1);
3356 }
3357 }
3358 } else {
3359 /* Load/store */
3360 rn = (insn >> 16) & 0xf;
3361 if (dp)
3362 VFP_DREG_D(rd, insn);
3363 else
3364 rd = VFP_SREG_D(insn);
3365 if (s->thumb && rn == 15) {
3366 addr = tcg_temp_new_i32();
3367 tcg_gen_movi_i32(addr, s->pc & ~2);
3368 } else {
3369 addr = load_reg(s, rn);
3370 }
3371 if ((insn & 0x01200000) == 0x01000000) {
3372 /* Single load/store */
3373 offset = (insn & 0xff) << 2;
3374 if ((insn & (1 << 23)) == 0)
3375 offset = -offset;
3376 tcg_gen_addi_i32(addr, addr, offset);
3377 if (insn & (1 << 20)) {
3378 gen_vfp_ld(s, dp, addr);
3379 gen_mov_vreg_F0(dp, rd);
3380 } else {
3381 gen_mov_F0_vreg(dp, rd);
3382 gen_vfp_st(s, dp, addr);
3383 }
3384 tcg_temp_free_i32(addr);
3385 } else {
3386 /* load/store multiple */
3387 if (dp)
3388 n = (insn >> 1) & 0x7f;
3389 else
3390 n = insn & 0xff;
3391
3392 if (insn & (1 << 24)) /* pre-decrement */
3393 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3394
3395 if (dp)
3396 offset = 8;
3397 else
3398 offset = 4;
3399 for (i = 0; i < n; i++) {
3400 if (insn & ARM_CP_RW_BIT) {
3401 /* load */
3402 gen_vfp_ld(s, dp, addr);
3403 gen_mov_vreg_F0(dp, rd + i);
3404 } else {
3405 /* store */
3406 gen_mov_F0_vreg(dp, rd + i);
3407 gen_vfp_st(s, dp, addr);
3408 }
3409 tcg_gen_addi_i32(addr, addr, offset);
3410 }
3411 if (insn & (1 << 21)) {
3412 /* writeback */
3413 if (insn & (1 << 24))
3414 offset = -offset * n;
3415 else if (dp && (insn & 1))
3416 offset = 4;
3417 else
3418 offset = 0;
3419
3420 if (offset != 0)
3421 tcg_gen_addi_i32(addr, addr, offset);
3422 store_reg(s, rn, addr);
3423 } else {
3424 tcg_temp_free_i32(addr);
3425 }
3426 }
3427 }
3428 break;
3429 default:
3430 /* Should never happen. */
3431 return 1;
3432 }
3433 return 0;
3434 }
3435
3436 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3437 {
3438 TranslationBlock *tb;
3439
3440 tb = s->tb;
3441 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3442 tcg_gen_goto_tb(n);
3443 gen_set_pc_im(dest);
3444 tcg_gen_exit_tb((tcg_target_long)tb + n);
3445 } else {
3446 gen_set_pc_im(dest);
3447 tcg_gen_exit_tb(0);
3448 }
3449 }
3450
3451 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3452 {
3453 if (unlikely(s->singlestep_enabled)) {
3454 /* An indirect jump so that we still trigger the debug exception. */
3455 if (s->thumb)
3456 dest |= 1;
3457 gen_bx_im(s, dest);
3458 } else {
3459 gen_goto_tb(s, 0, dest);
3460 s->is_jmp = DISAS_TB_JUMP;
3461 }
3462 }
3463
3464 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3465 {
3466 if (x)
3467 tcg_gen_sari_i32(t0, t0, 16);
3468 else
3469 gen_sxth(t0);
3470 if (y)
3471 tcg_gen_sari_i32(t1, t1, 16);
3472 else
3473 gen_sxth(t1);
3474 tcg_gen_mul_i32(t0, t0, t1);
3475 }
3476
3477 /* Return the mask of PSR bits set by a MSR instruction. */
3478 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3479 uint32_t mask;
3480
3481 mask = 0;
3482 if (flags & (1 << 0))
3483 mask |= 0xff;
3484 if (flags & (1 << 1))
3485 mask |= 0xff00;
3486 if (flags & (1 << 2))
3487 mask |= 0xff0000;
3488 if (flags & (1 << 3))
3489 mask |= 0xff000000;
3490
3491 /* Mask out undefined bits. */
3492 mask &= ~CPSR_RESERVED;
3493 if (!arm_feature(env, ARM_FEATURE_V4T))
3494 mask &= ~CPSR_T;
3495 if (!arm_feature(env, ARM_FEATURE_V5))
3496 mask &= ~CPSR_Q; /* V5TE in reality*/
3497 if (!arm_feature(env, ARM_FEATURE_V6))
3498 mask &= ~(CPSR_E | CPSR_GE);
3499 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3500 mask &= ~CPSR_IT;
3501 /* Mask out execution state bits. */
3502 if (!spsr)
3503 mask &= ~CPSR_EXEC;
3504 /* Mask out privileged bits. */
3505 if (IS_USER(s))
3506 mask &= CPSR_USER;
3507 return mask;
3508 }
3509
3510 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3511 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3512 {
3513 TCGv tmp;
3514 if (spsr) {
3515 /* ??? This is also undefined in system mode. */
3516 if (IS_USER(s))
3517 return 1;
3518
3519 tmp = load_cpu_field(spsr);
3520 tcg_gen_andi_i32(tmp, tmp, ~mask);
3521 tcg_gen_andi_i32(t0, t0, mask);
3522 tcg_gen_or_i32(tmp, tmp, t0);
3523 store_cpu_field(tmp, spsr);
3524 } else {
3525 gen_set_cpsr(t0, mask);
3526 }
3527 tcg_temp_free_i32(t0);
3528 gen_lookup_tb(s);
3529 return 0;
3530 }
3531
3532 /* Returns nonzero if access to the PSR is not permitted. */
3533 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3534 {
3535 TCGv tmp;
3536 tmp = tcg_temp_new_i32();
3537 tcg_gen_movi_i32(tmp, val);
3538 return gen_set_psr(s, mask, spsr, tmp);
3539 }
3540
3541 /* Generate an old-style exception return. Marks pc as dead. */
3542 static void gen_exception_return(DisasContext *s, TCGv pc)
3543 {
3544 TCGv tmp;
3545 store_reg(s, 15, pc);
3546 tmp = load_cpu_field(spsr);
3547 gen_set_cpsr(tmp, 0xffffffff);
3548 tcg_temp_free_i32(tmp);
3549 s->is_jmp = DISAS_UPDATE;
3550 }
3551
3552 /* Generate a v6 exception return. Marks both values as dead. */
3553 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3554 {
3555 gen_set_cpsr(cpsr, 0xffffffff);
3556 tcg_temp_free_i32(cpsr);
3557 store_reg(s, 15, pc);
3558 s->is_jmp = DISAS_UPDATE;
3559 }
3560
3561 static inline void
3562 gen_set_condexec (DisasContext *s)
3563 {
3564 if (s->condexec_mask) {
3565 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3566 TCGv tmp = tcg_temp_new_i32();
3567 tcg_gen_movi_i32(tmp, val);
3568 store_cpu_field(tmp, condexec_bits);
3569 }
3570 }
3571
3572 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3573 {
3574 gen_set_condexec(s);
3575 gen_set_pc_im(s->pc - offset);
3576 gen_exception(excp);
3577 s->is_jmp = DISAS_JUMP;
3578 }
3579
3580 static void gen_nop_hint(DisasContext *s, int val)
3581 {
3582 switch (val) {
3583 case 3: /* wfi */
3584 gen_set_pc_im(s->pc);
3585 s->is_jmp = DISAS_WFI;
3586 break;
3587 case 2: /* wfe */
3588 case 4: /* sev */
3589 /* TODO: Implement SEV and WFE. May help SMP performance. */
3590 default: /* nop */
3591 break;
3592 }
3593 }
3594
3595 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3596
3597 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3598 {
3599 switch (size) {
3600 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3601 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3602 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3603 default: abort();
3604 }
3605 }
3606
3607 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3608 {
3609 switch (size) {
3610 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3611 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3612 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3613 default: return;
3614 }
3615 }
3616
3617 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3618 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3619 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3620 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3621 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3622
3623 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3624 switch ((size << 1) | u) { \
3625 case 0: \
3626 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3627 break; \
3628 case 1: \
3629 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3630 break; \
3631 case 2: \
3632 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3633 break; \
3634 case 3: \
3635 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3636 break; \
3637 case 4: \
3638 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3639 break; \
3640 case 5: \
3641 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3642 break; \
3643 default: return 1; \
3644 }} while (0)
3645
3646 #define GEN_NEON_INTEGER_OP(name) do { \
3647 switch ((size << 1) | u) { \
3648 case 0: \
3649 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3650 break; \
3651 case 1: \
3652 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3653 break; \
3654 case 2: \
3655 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3656 break; \
3657 case 3: \
3658 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3659 break; \
3660 case 4: \
3661 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3662 break; \
3663 case 5: \
3664 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3665 break; \
3666 default: return 1; \
3667 }} while (0)
3668
3669 static TCGv neon_load_scratch(int scratch)
3670 {
3671 TCGv tmp = tcg_temp_new_i32();
3672 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3673 return tmp;
3674 }
3675
3676 static void neon_store_scratch(int scratch, TCGv var)
3677 {
3678 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3679 tcg_temp_free_i32(var);
3680 }
3681
3682 static inline TCGv neon_get_scalar(int size, int reg)
3683 {
3684 TCGv tmp;
3685 if (size == 1) {
3686 tmp = neon_load_reg(reg & 7, reg >> 4);
3687 if (reg & 8) {
3688 gen_neon_dup_high16(tmp);
3689 } else {
3690 gen_neon_dup_low16(tmp);
3691 }
3692 } else {
3693 tmp = neon_load_reg(reg & 15, reg >> 4);
3694 }
3695 return tmp;
3696 }
3697
3698 static int gen_neon_unzip(int rd, int rm, int size, int q)
3699 {
3700 TCGv tmp, tmp2;
3701 if (!q && size == 2) {
3702 return 1;
3703 }
3704 tmp = tcg_const_i32(rd);
3705 tmp2 = tcg_const_i32(rm);
3706 if (q) {
3707 switch (size) {
3708 case 0:
3709 gen_helper_neon_qunzip8(tmp, tmp2);
3710 break;
3711 case 1:
3712 gen_helper_neon_qunzip16(tmp, tmp2);
3713 break;
3714 case 2:
3715 gen_helper_neon_qunzip32(tmp, tmp2);
3716 break;
3717 default:
3718 abort();
3719 }
3720 } else {
3721 switch (size) {
3722 case 0:
3723 gen_helper_neon_unzip8(tmp, tmp2);
3724 break;
3725 case 1:
3726 gen_helper_neon_unzip16(tmp, tmp2);
3727 break;
3728 default:
3729 abort();
3730 }
3731 }
3732 tcg_temp_free_i32(tmp);
3733 tcg_temp_free_i32(tmp2);
3734 return 0;
3735 }
3736
3737 static int gen_neon_zip(int rd, int rm, int size, int q)
3738 {
3739 TCGv tmp, tmp2;
3740 if (!q && size == 2) {
3741 return 1;
3742 }
3743 tmp = tcg_const_i32(rd);
3744 tmp2 = tcg_const_i32(rm);
3745 if (q) {
3746 switch (size) {
3747 case 0:
3748 gen_helper_neon_qzip8(tmp, tmp2);
3749 break;
3750 case 1:
3751 gen_helper_neon_qzip16(tmp, tmp2);
3752 break;
3753 case 2:
3754 gen_helper_neon_qzip32(tmp, tmp2);
3755 break;
3756 default:
3757 abort();
3758 }
3759 } else {
3760 switch (size) {
3761 case 0:
3762 gen_helper_neon_zip8(tmp, tmp2);
3763 break;
3764 case 1:
3765 gen_helper_neon_zip16(tmp, tmp2);
3766 break;
3767 default:
3768 abort();
3769 }
3770 }
3771 tcg_temp_free_i32(tmp);
3772 tcg_temp_free_i32(tmp2);
3773 return 0;
3774 }
3775
3776 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3777 {
3778 TCGv rd, tmp;
3779
3780 rd = tcg_temp_new_i32();
3781 tmp = tcg_temp_new_i32();
3782
3783 tcg_gen_shli_i32(rd, t0, 8);
3784 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3785 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3786 tcg_gen_or_i32(rd, rd, tmp);
3787
3788 tcg_gen_shri_i32(t1, t1, 8);
3789 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3790 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3791 tcg_gen_or_i32(t1, t1, tmp);
3792 tcg_gen_mov_i32(t0, rd);
3793
3794 tcg_temp_free_i32(tmp);
3795 tcg_temp_free_i32(rd);
3796 }
3797
3798 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3799 {
3800 TCGv rd, tmp;
3801
3802 rd = tcg_temp_new_i32();
3803 tmp = tcg_temp_new_i32();
3804
3805 tcg_gen_shli_i32(rd, t0, 16);
3806 tcg_gen_andi_i32(tmp, t1, 0xffff);
3807 tcg_gen_or_i32(rd, rd, tmp);
3808 tcg_gen_shri_i32(t1, t1, 16);
3809 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3810 tcg_gen_or_i32(t1, t1, tmp);
3811 tcg_gen_mov_i32(t0, rd);
3812
3813 tcg_temp_free_i32(tmp);
3814 tcg_temp_free_i32(rd);
3815 }
3816
3817
3818 static struct {
3819 int nregs;
3820 int interleave;
3821 int spacing;
3822 } neon_ls_element_type[11] = {
3823 {4, 4, 1},
3824 {4, 4, 2},
3825 {4, 1, 1},
3826 {4, 2, 1},
3827 {3, 3, 1},
3828 {3, 3, 2},
3829 {3, 1, 1},
3830 {1, 1, 1},
3831 {2, 2, 1},
3832 {2, 2, 2},
3833 {2, 1, 1}
3834 };
3835
3836 /* Translate a NEON load/store element instruction. Return nonzero if the
3837 instruction is invalid. */
3838 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3839 {
3840 int rd, rn, rm;
3841 int op;
3842 int nregs;
3843 int interleave;
3844 int spacing;
3845 int stride;
3846 int size;
3847 int reg;
3848 int pass;
3849 int load;
3850 int shift;
3851 int n;
3852 TCGv addr;
3853 TCGv tmp;
3854 TCGv tmp2;
3855 TCGv_i64 tmp64;
3856
3857 if (!s->vfp_enabled)
3858 return 1;
3859 VFP_DREG_D(rd, insn);
3860 rn = (insn >> 16) & 0xf;
3861 rm = insn & 0xf;
3862 load = (insn & (1 << 21)) != 0;
3863 if ((insn & (1 << 23)) == 0) {
3864 /* Load store all elements. */
3865 op = (insn >> 8) & 0xf;
3866 size = (insn >> 6) & 3;
3867 if (op > 10)
3868 return 1;
3869 /* Catch UNDEF cases for bad values of align field */
3870 switch (op & 0xc) {
3871 case 4:
3872 if (((insn >> 5) & 1) == 1) {
3873 return 1;
3874 }
3875 break;
3876 case 8:
3877 if (((insn >> 4) & 3) == 3) {
3878 return 1;
3879 }
3880 break;
3881 default:
3882 break;
3883 }
3884 nregs = neon_ls_element_type[op].nregs;
3885 interleave = neon_ls_element_type[op].interleave;
3886 spacing = neon_ls_element_type[op].spacing;
3887 if (size == 3 && (interleave | spacing) != 1)
3888 return 1;
3889 addr = tcg_temp_new_i32();
3890 load_reg_var(s, addr, rn);
3891 stride = (1 << size) * interleave;
3892 for (reg = 0; reg < nregs; reg++) {
3893 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3894 load_reg_var(s, addr, rn);
3895 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3896 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3897 load_reg_var(s, addr, rn);
3898 tcg_gen_addi_i32(addr, addr, 1 << size);
3899 }
3900 if (size == 3) {
3901 if (load) {
3902 tmp64 = gen_ld64(addr, IS_USER(s));
3903 neon_store_reg64(tmp64, rd);
3904 tcg_temp_free_i64(tmp64);
3905 } else {
3906 tmp64 = tcg_temp_new_i64();
3907 neon_load_reg64(tmp64, rd);
3908 gen_st64(tmp64, addr, IS_USER(s));
3909 }
3910 tcg_gen_addi_i32(addr, addr, stride);
3911 } else {
3912 for (pass = 0; pass < 2; pass++) {
3913 if (size == 2) {
3914 if (load) {
3915 tmp = gen_ld32(addr, IS_USER(s));
3916 neon_store_reg(rd, pass, tmp);
3917 } else {
3918 tmp = neon_load_reg(rd, pass);
3919 gen_st32(tmp, addr, IS_USER(s));
3920 }
3921 tcg_gen_addi_i32(addr, addr, stride);
3922 } else if (size == 1) {
3923 if (load) {
3924 tmp = gen_ld16u(addr, IS_USER(s));
3925 tcg_gen_addi_i32(addr, addr, stride);
3926 tmp2 = gen_ld16u(addr, IS_USER(s));
3927 tcg_gen_addi_i32(addr, addr, stride);
3928 tcg_gen_shli_i32(tmp2, tmp2, 16);
3929 tcg_gen_or_i32(tmp, tmp, tmp2);
3930 tcg_temp_free_i32(tmp2);
3931 neon_store_reg(rd, pass, tmp);
3932 } else {
3933 tmp = neon_load_reg(rd, pass);
3934 tmp2 = tcg_temp_new_i32();
3935 tcg_gen_shri_i32(tmp2, tmp, 16);
3936 gen_st16(tmp, addr, IS_USER(s));
3937 tcg_gen_addi_i32(addr, addr, stride);
3938 gen_st16(tmp2, addr, IS_USER(s));
3939 tcg_gen_addi_i32(addr, addr, stride);
3940 }
3941 } else /* size == 0 */ {
3942 if (load) {
3943 TCGV_UNUSED(tmp2);
3944 for (n = 0; n < 4; n++) {
3945 tmp = gen_ld8u(addr, IS_USER(s));
3946 tcg_gen_addi_i32(addr, addr, stride);
3947 if (n == 0) {
3948 tmp2 = tmp;
3949 } else {
3950 tcg_gen_shli_i32(tmp, tmp, n * 8);
3951 tcg_gen_or_i32(tmp2, tmp2, tmp);
3952 tcg_temp_free_i32(tmp);
3953 }
3954 }
3955 neon_store_reg(rd, pass, tmp2);
3956 } else {
3957 tmp2 = neon_load_reg(rd, pass);
3958 for (n = 0; n < 4; n++) {
3959 tmp = tcg_temp_new_i32();
3960 if (n == 0) {
3961 tcg_gen_mov_i32(tmp, tmp2);
3962 } else {
3963 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3964 }
3965 gen_st8(tmp, addr, IS_USER(s));
3966 tcg_gen_addi_i32(addr, addr, stride);
3967 }
3968 tcg_temp_free_i32(tmp2);
3969 }
3970 }
3971 }
3972 }
3973 rd += spacing;
3974 }
3975 tcg_temp_free_i32(addr);
3976 stride = nregs * 8;
3977 } else {
3978 size = (insn >> 10) & 3;
3979 if (size == 3) {
3980 /* Load single element to all lanes. */
3981 int a = (insn >> 4) & 1;
3982 if (!load) {
3983 return 1;
3984 }
3985 size = (insn >> 6) & 3;
3986 nregs = ((insn >> 8) & 3) + 1;
3987
3988 if (size == 3) {
3989 if (nregs != 4 || a == 0) {
3990 return 1;
3991 }
3992 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3993 size = 2;
3994 }
3995 if (nregs == 1 && a == 1 && size == 0) {
3996 return 1;
3997 }
3998 if (nregs == 3 && a == 1) {
3999 return 1;
4000 }
4001 addr = tcg_temp_new_i32();
4002 load_reg_var(s, addr, rn);
4003 if (nregs == 1) {
4004 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4005 tmp = gen_load_and_replicate(s, addr, size);
4006 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4008 if (insn & (1 << 5)) {
4009 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4011 }
4012 tcg_temp_free_i32(tmp);
4013 } else {
4014 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4015 stride = (insn & (1 << 5)) ? 2 : 1;
4016 for (reg = 0; reg < nregs; reg++) {
4017 tmp = gen_load_and_replicate(s, addr, size);
4018 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4020 tcg_temp_free_i32(tmp);
4021 tcg_gen_addi_i32(addr, addr, 1 << size);
4022 rd += stride;
4023 }
4024 }
4025 tcg_temp_free_i32(addr);
4026 stride = (1 << size) * nregs;
4027 } else {
4028 /* Single element. */
4029 int idx = (insn >> 4) & 0xf;
4030 pass = (insn >> 7) & 1;
4031 switch (size) {
4032 case 0:
4033 shift = ((insn >> 5) & 3) * 8;
4034 stride = 1;
4035 break;
4036 case 1:
4037 shift = ((insn >> 6) & 1) * 16;
4038 stride = (insn & (1 << 5)) ? 2 : 1;
4039 break;
4040 case 2:
4041 shift = 0;
4042 stride = (insn & (1 << 6)) ? 2 : 1;
4043 break;
4044 default:
4045 abort();
4046 }
4047 nregs = ((insn >> 8) & 3) + 1;
4048 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4049 switch (nregs) {
4050 case 1:
4051 if (((idx & (1 << size)) != 0) ||
4052 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4053 return 1;
4054 }
4055 break;
4056 case 3:
4057 if ((idx & 1) != 0) {
4058 return 1;
4059 }
4060 /* fall through */
4061 case 2:
4062 if (size == 2 && (idx & 2) != 0) {
4063 return 1;
4064 }
4065 break;
4066 case 4:
4067 if ((size == 2) && ((idx & 3) == 3)) {
4068 return 1;
4069 }
4070 break;
4071 default:
4072 abort();
4073 }
4074 if ((rd + stride * (nregs - 1)) > 31) {
4075 /* Attempts to write off the end of the register file
4076 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4077 * the neon_load_reg() would write off the end of the array.
4078 */
4079 return 1;
4080 }
4081 addr = tcg_temp_new_i32();
4082 load_reg_var(s, addr, rn);
4083 for (reg = 0; reg < nregs; reg++) {
4084 if (load) {
4085 switch (size) {
4086 case 0:
4087 tmp = gen_ld8u(addr, IS_USER(s));
4088 break;
4089 case 1:
4090 tmp = gen_ld16u(addr, IS_USER(s));
4091 break;
4092 case 2:
4093 tmp = gen_ld32(addr, IS_USER(s));
4094 break;
4095 default: /* Avoid compiler warnings. */
4096 abort();
4097 }
4098 if (size != 2) {
4099 tmp2 = neon_load_reg(rd, pass);
4100 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4101 tcg_temp_free_i32(tmp2);
4102 }
4103 neon_store_reg(rd, pass, tmp);
4104 } else { /* Store */
4105 tmp = neon_load_reg(rd, pass);
4106 if (shift)
4107 tcg_gen_shri_i32(tmp, tmp, shift);
4108 switch (size) {
4109 case 0:
4110 gen_st8(tmp, addr, IS_USER(s));
4111 break;
4112 case 1:
4113 gen_st16(tmp, addr, IS_USER(s));
4114 break;
4115 case 2:
4116 gen_st32(tmp, addr, IS_USER(s));
4117 break;
4118 }
4119 }
4120 rd += stride;
4121 tcg_gen_addi_i32(addr, addr, 1 << size);
4122 }
4123 tcg_temp_free_i32(addr);
4124 stride = nregs * (1 << size);
4125 }
4126 }
4127 if (rm != 15) {
4128 TCGv base;
4129
4130 base = load_reg(s, rn);
4131 if (rm == 13) {
4132 tcg_gen_addi_i32(base, base, stride);
4133 } else {
4134 TCGv index;
4135 index = load_reg(s, rm);
4136 tcg_gen_add_i32(base, base, index);
4137 tcg_temp_free_i32(index);
4138 }
4139 store_reg(s, rn, base);
4140 }
4141 return 0;
4142 }
4143
4144 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4145 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4146 {
4147 tcg_gen_and_i32(t, t, c);
4148 tcg_gen_andc_i32(f, f, c);
4149 tcg_gen_or_i32(dest, t, f);
4150 }
4151
4152 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4153 {
4154 switch (size) {
4155 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4156 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4157 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4158 default: abort();
4159 }
4160 }
4161
4162 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4163 {
4164 switch (size) {
4165 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4166 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4167 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4168 default: abort();
4169 }
4170 }
4171
4172 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4173 {
4174 switch (size) {
4175 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4176 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4177 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4178 default: abort();
4179 }
4180 }
4181
4182 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4183 {
4184 switch (size) {
4185 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4186 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4187 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4188 default: abort();
4189 }
4190 }
4191
4192 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4193 int q, int u)
4194 {
4195 if (q) {
4196 if (u) {
4197 switch (size) {
4198 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4199 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4200 default: abort();
4201 }
4202 } else {
4203 switch (size) {
4204 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4205 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4206 default: abort();
4207 }
4208 }
4209 } else {
4210 if (u) {
4211 switch (size) {
4212 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4213 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4214 default: abort();
4215 }
4216 } else {
4217 switch (size) {
4218 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4219 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4220 default: abort();
4221 }
4222 }
4223 }
4224 }
4225
4226 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4227 {
4228 if (u) {
4229 switch (size) {
4230 case 0: gen_helper_neon_widen_u8(dest, src); break;
4231 case 1: gen_helper_neon_widen_u16(dest, src); break;
4232 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4233 default: abort();
4234 }
4235 } else {
4236 switch (size) {
4237 case 0: gen_helper_neon_widen_s8(dest, src); break;
4238 case 1: gen_helper_neon_widen_s16(dest, src); break;
4239 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4240 default: abort();
4241 }
4242 }
4243 tcg_temp_free_i32(src);
4244 }
4245
4246 static inline void gen_neon_addl(int size)
4247 {
4248 switch (size) {
4249 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4250 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4251 case 2: tcg_gen_add_i64(CPU_V001); break;
4252 default: abort();
4253 }
4254 }
4255
4256 static inline void gen_neon_subl(int size)
4257 {
4258 switch (size) {
4259 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4260 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4261 case 2: tcg_gen_sub_i64(CPU_V001); break;
4262 default: abort();
4263 }
4264 }
4265
4266 static inline void gen_neon_negl(TCGv_i64 var, int size)
4267 {
4268 switch (size) {
4269 case 0: gen_helper_neon_negl_u16(var, var); break;
4270 case 1: gen_helper_neon_negl_u32(var, var); break;
4271 case 2: gen_helper_neon_negl_u64(var, var); break;
4272 default: abort();
4273 }
4274 }
4275
4276 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4277 {
4278 switch (size) {
4279 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4280 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4281 default: abort();
4282 }
4283 }
4284
4285 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4286 {
4287 TCGv_i64 tmp;
4288
4289 switch ((size << 1) | u) {
4290 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4291 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4292 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4293 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4294 case 4:
4295 tmp = gen_muls_i64_i32(a, b);
4296 tcg_gen_mov_i64(dest, tmp);
4297 tcg_temp_free_i64(tmp);
4298 break;
4299 case 5:
4300 tmp = gen_mulu_i64_i32(a, b);
4301 tcg_gen_mov_i64(dest, tmp);
4302 tcg_temp_free_i64(tmp);
4303 break;
4304 default: abort();
4305 }
4306
4307 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4308 Don't forget to clean them now. */
4309 if (size < 2) {
4310 tcg_temp_free_i32(a);
4311 tcg_temp_free_i32(b);
4312 }
4313 }
4314
4315 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4316 {
4317 if (op) {
4318 if (u) {
4319 gen_neon_unarrow_sats(size, dest, src);
4320 } else {
4321 gen_neon_narrow(size, dest, src);
4322 }
4323 } else {
4324 if (u) {
4325 gen_neon_narrow_satu(size, dest, src);
4326 } else {
4327 gen_neon_narrow_sats(size, dest, src);
4328 }
4329 }
4330 }
4331
4332 /* Symbolic constants for op fields for Neon 3-register same-length.
4333 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4334 * table A7-9.
4335 */
4336 #define NEON_3R_VHADD 0
4337 #define NEON_3R_VQADD 1
4338 #define NEON_3R_VRHADD 2
4339 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4340 #define NEON_3R_VHSUB 4
4341 #define NEON_3R_VQSUB 5
4342 #define NEON_3R_VCGT 6
4343 #define NEON_3R_VCGE 7
4344 #define NEON_3R_VSHL 8
4345 #define NEON_3R_VQSHL 9
4346 #define NEON_3R_VRSHL 10
4347 #define NEON_3R_VQRSHL 11
4348 #define NEON_3R_VMAX 12
4349 #define NEON_3R_VMIN 13
4350 #define NEON_3R_VABD 14
4351 #define NEON_3R_VABA 15
4352 #define NEON_3R_VADD_VSUB 16
4353 #define NEON_3R_VTST_VCEQ 17
4354 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4355 #define NEON_3R_VMUL 19
4356 #define NEON_3R_VPMAX 20
4357 #define NEON_3R_VPMIN 21
4358 #define NEON_3R_VQDMULH_VQRDMULH 22
4359 #define NEON_3R_VPADD 23
4360 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4361 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4362 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4363 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4364 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4365 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4366
4367 static const uint8_t neon_3r_sizes[] = {
4368 [NEON_3R_VHADD] = 0x7,
4369 [NEON_3R_VQADD] = 0xf,
4370 [NEON_3R_VRHADD] = 0x7,
4371 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4372 [NEON_3R_VHSUB] = 0x7,
4373 [NEON_3R_VQSUB] = 0xf,
4374 [NEON_3R_VCGT] = 0x7,
4375 [NEON_3R_VCGE] = 0x7,
4376 [NEON_3R_VSHL] = 0xf,
4377 [NEON_3R_VQSHL] = 0xf,
4378 [NEON_3R_VRSHL] = 0xf,
4379 [NEON_3R_VQRSHL] = 0xf,
4380 [NEON_3R_VMAX] = 0x7,
4381 [NEON_3R_VMIN] = 0x7,
4382 [NEON_3R_VABD] = 0x7,
4383 [NEON_3R_VABA] = 0x7,
4384 [NEON_3R_VADD_VSUB] = 0xf,
4385 [NEON_3R_VTST_VCEQ] = 0x7,
4386 [NEON_3R_VML] = 0x7,
4387 [NEON_3R_VMUL] = 0x7,
4388 [NEON_3R_VPMAX] = 0x7,
4389 [NEON_3R_VPMIN] = 0x7,
4390 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4391 [NEON_3R_VPADD] = 0x7,
4392 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4393 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4396 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4397 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4398 };
4399
4400 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4401 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4402 * table A7-13.
4403 */
4404 #define NEON_2RM_VREV64 0
4405 #define NEON_2RM_VREV32 1
4406 #define NEON_2RM_VREV16 2
4407 #define NEON_2RM_VPADDL 4
4408 #define NEON_2RM_VPADDL_U 5
4409 #define NEON_2RM_VCLS 8
4410 #define NEON_2RM_VCLZ 9
4411 #define NEON_2RM_VCNT 10
4412 #define NEON_2RM_VMVN 11
4413 #define NEON_2RM_VPADAL 12
4414 #define NEON_2RM_VPADAL_U 13
4415 #define NEON_2RM_VQABS 14
4416 #define NEON_2RM_VQNEG 15
4417 #define NEON_2RM_VCGT0 16
4418 #define NEON_2RM_VCGE0 17
4419 #define NEON_2RM_VCEQ0 18
4420 #define NEON_2RM_VCLE0 19
4421 #define NEON_2RM_VCLT0 20
4422 #define NEON_2RM_VABS 22
4423 #define NEON_2RM_VNEG 23
4424 #define NEON_2RM_VCGT0_F 24
4425 #define NEON_2RM_VCGE0_F 25
4426 #define NEON_2RM_VCEQ0_F 26
4427 #define NEON_2RM_VCLE0_F 27
4428 #define NEON_2RM_VCLT0_F 28
4429 #define NEON_2RM_VABS_F 30
4430 #define NEON_2RM_VNEG_F 31
4431 #define NEON_2RM_VSWP 32
4432 #define NEON_2RM_VTRN 33
4433 #define NEON_2RM_VUZP 34
4434 #define NEON_2RM_VZIP 35
4435 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4436 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4437 #define NEON_2RM_VSHLL 38
4438 #define NEON_2RM_VCVT_F16_F32 44
4439 #define NEON_2RM_VCVT_F32_F16 46
4440 #define NEON_2RM_VRECPE 56
4441 #define NEON_2RM_VRSQRTE 57
4442 #define NEON_2RM_VRECPE_F 58
4443 #define NEON_2RM_VRSQRTE_F 59
4444 #define NEON_2RM_VCVT_FS 60
4445 #define NEON_2RM_VCVT_FU 61
4446 #define NEON_2RM_VCVT_SF 62
4447 #define NEON_2RM_VCVT_UF 63
4448
4449 static int neon_2rm_is_float_op(int op)
4450 {
4451 /* Return true if this neon 2reg-misc op is float-to-float */
4452 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4453 op >= NEON_2RM_VRECPE_F);
4454 }
4455
4456 /* Each entry in this array has bit n set if the insn allows
4457 * size value n (otherwise it will UNDEF). Since unallocated
4458 * op values will have no bits set they always UNDEF.
4459 */
4460 static const uint8_t neon_2rm_sizes[] = {
4461 [NEON_2RM_VREV64] = 0x7,
4462 [NEON_2RM_VREV32] = 0x3,
4463 [NEON_2RM_VREV16] = 0x1,
4464 [NEON_2RM_VPADDL] = 0x7,
4465 [NEON_2RM_VPADDL_U] = 0x7,
4466 [NEON_2RM_VCLS] = 0x7,
4467 [NEON_2RM_VCLZ] = 0x7,
4468 [NEON_2RM_VCNT] = 0x1,
4469 [NEON_2RM_VMVN] = 0x1,
4470 [NEON_2RM_VPADAL] = 0x7,
4471 [NEON_2RM_VPADAL_U] = 0x7,
4472 [NEON_2RM_VQABS] = 0x7,
4473 [NEON_2RM_VQNEG] = 0x7,
4474 [NEON_2RM_VCGT0] = 0x7,
4475 [NEON_2RM_VCGE0] = 0x7,
4476 [NEON_2RM_VCEQ0] = 0x7,
4477 [NEON_2RM_VCLE0] = 0x7,
4478 [NEON_2RM_VCLT0] = 0x7,
4479 [NEON_2RM_VABS] = 0x7,
4480 [NEON_2RM_VNEG] = 0x7,
4481 [NEON_2RM_VCGT0_F] = 0x4,
4482 [NEON_2RM_VCGE0_F] = 0x4,
4483 [NEON_2RM_VCEQ0_F] = 0x4,
4484 [NEON_2RM_VCLE0_F] = 0x4,
4485 [NEON_2RM_VCLT0_F] = 0x4,
4486 [NEON_2RM_VABS_F] = 0x4,
4487 [NEON_2RM_VNEG_F] = 0x4,
4488 [NEON_2RM_VSWP] = 0x1,
4489 [NEON_2RM_VTRN] = 0x7,
4490 [NEON_2RM_VUZP] = 0x7,
4491 [NEON_2RM_VZIP] = 0x7,
4492 [NEON_2RM_VMOVN] = 0x7,
4493 [NEON_2RM_VQMOVN] = 0x7,
4494 [NEON_2RM_VSHLL] = 0x7,
4495 [NEON_2RM_VCVT_F16_F32] = 0x2,
4496 [NEON_2RM_VCVT_F32_F16] = 0x2,
4497 [NEON_2RM_VRECPE] = 0x4,
4498 [NEON_2RM_VRSQRTE] = 0x4,
4499 [NEON_2RM_VRECPE_F] = 0x4,
4500 [NEON_2RM_VRSQRTE_F] = 0x4,
4501 [NEON_2RM_VCVT_FS] = 0x4,
4502 [NEON_2RM_VCVT_FU] = 0x4,
4503 [NEON_2RM_VCVT_SF] = 0x4,
4504 [NEON_2RM_VCVT_UF] = 0x4,
4505 };
4506
4507 /* Translate a NEON data processing instruction. Return nonzero if the
4508 instruction is invalid.
4509 We process data in a mixture of 32-bit and 64-bit chunks.
4510 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4511
4512 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4513 {
4514 int op;
4515 int q;
4516 int rd, rn, rm;
4517 int size;
4518 int shift;
4519 int pass;
4520 int count;
4521 int pairwise;
4522 int u;
4523 uint32_t imm, mask;
4524 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4525 TCGv_i64 tmp64;
4526
4527 if (!s->vfp_enabled)
4528 return 1;
4529 q = (insn & (1 << 6)) != 0;
4530 u = (insn >> 24) & 1;
4531 VFP_DREG_D(rd, insn);
4532 VFP_DREG_N(rn, insn);
4533 VFP_DREG_M(rm, insn);
4534 size = (insn >> 20) & 3;
4535 if ((insn & (1 << 23)) == 0) {
4536 /* Three register same length. */
4537 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4538 /* Catch invalid op and bad size combinations: UNDEF */
4539 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4540 return 1;
4541 }
4542 /* All insns of this form UNDEF for either this condition or the
4543 * superset of cases "Q==1"; we catch the latter later.
4544 */
4545 if (q && ((rd | rn | rm) & 1)) {
4546 return 1;
4547 }
4548 if (size == 3 && op != NEON_3R_LOGIC) {
4549 /* 64-bit element instructions. */
4550 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4551 neon_load_reg64(cpu_V0, rn + pass);
4552 neon_load_reg64(cpu_V1, rm + pass);
4553 switch (op) {
4554 case NEON_3R_VQADD:
4555 if (u) {
4556 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4557 } else {
4558 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4559 }
4560 break;
4561 case NEON_3R_VQSUB:
4562 if (u) {
4563 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4564 } else {
4565 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4566 }
4567 break;
4568 case NEON_3R_VSHL:
4569 if (u) {
4570 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4571 } else {
4572 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4573 }
4574 break;
4575 case NEON_3R_VQSHL:
4576 if (u) {
4577 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4578 } else {
4579 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4580 }
4581 break;
4582 case NEON_3R_VRSHL:
4583 if (u) {
4584 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4585 } else {
4586 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4587 }
4588 break;
4589 case NEON_3R_VQRSHL:
4590 if (u) {
4591 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4592 } else {
4593 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4594 }
4595 break;
4596 case NEON_3R_VADD_VSUB:
4597 if (u) {
4598 tcg_gen_sub_i64(CPU_V001);
4599 } else {
4600 tcg_gen_add_i64(CPU_V001);
4601 }
4602 break;
4603 default:
4604 abort();
4605 }
4606 neon_store_reg64(cpu_V0, rd + pass);
4607 }
4608 return 0;
4609 }
4610 pairwise = 0;
4611 switch (op) {
4612 case NEON_3R_VSHL:
4613 case NEON_3R_VQSHL:
4614 case NEON_3R_VRSHL:
4615 case NEON_3R_VQRSHL:
4616 {
4617 int rtmp;
4618 /* Shift instruction operands are reversed. */
4619 rtmp = rn;
4620 rn = rm;
4621 rm = rtmp;
4622 }
4623 break;
4624 case NEON_3R_VPADD:
4625 if (u) {
4626 return 1;
4627 }
4628 /* Fall through */
4629 case NEON_3R_VPMAX:
4630 case NEON_3R_VPMIN:
4631 pairwise = 1;
4632 break;
4633 case NEON_3R_FLOAT_ARITH:
4634 pairwise = (u && size < 2); /* if VPADD (float) */
4635 break;
4636 case NEON_3R_FLOAT_MINMAX:
4637 pairwise = u; /* if VPMIN/VPMAX (float) */
4638 break;
4639 case NEON_3R_FLOAT_CMP:
4640 if (!u && size) {
4641 /* no encoding for U=0 C=1x */
4642 return 1;
4643 }
4644 break;
4645 case NEON_3R_FLOAT_ACMP:
4646 if (!u) {
4647 return 1;
4648 }
4649 break;
4650 case NEON_3R_VRECPS_VRSQRTS:
4651 if (u) {
4652 return 1;
4653 }
4654 break;
4655 case NEON_3R_VMUL:
4656 if (u && (size != 0)) {
4657 /* UNDEF on invalid size for polynomial subcase */
4658 return 1;
4659 }
4660 break;
4661 default:
4662 break;
4663 }
4664
4665 if (pairwise && q) {
4666 /* All the pairwise insns UNDEF if Q is set */
4667 return 1;
4668 }
4669
4670 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4671
4672 if (pairwise) {
4673 /* Pairwise. */
4674 if (pass < 1) {
4675 tmp = neon_load_reg(rn, 0);
4676 tmp2 = neon_load_reg(rn, 1);
4677 } else {
4678 tmp = neon_load_reg(rm, 0);
4679 tmp2 = neon_load_reg(rm, 1);
4680 }
4681 } else {
4682 /* Elementwise. */
4683 tmp = neon_load_reg(rn, pass);
4684 tmp2 = neon_load_reg(rm, pass);
4685 }
4686 switch (op) {
4687 case NEON_3R_VHADD:
4688 GEN_NEON_INTEGER_OP(hadd);
4689 break;
4690 case NEON_3R_VQADD:
4691 GEN_NEON_INTEGER_OP(qadd);
4692 break;
4693 case NEON_3R_VRHADD:
4694 GEN_NEON_INTEGER_OP(rhadd);
4695 break;
4696 case NEON_3R_LOGIC: /* Logic ops. */
4697 switch ((u << 2) | size) {
4698 case 0: /* VAND */
4699 tcg_gen_and_i32(tmp, tmp, tmp2);
4700 break;
4701 case 1: /* BIC */
4702 tcg_gen_andc_i32(tmp, tmp, tmp2);
4703 break;
4704 case 2: /* VORR */
4705 tcg_gen_or_i32(tmp, tmp, tmp2);
4706 break;
4707 case 3: /* VORN */
4708 tcg_gen_orc_i32(tmp, tmp, tmp2);
4709 break;
4710 case 4: /* VEOR */
4711 tcg_gen_xor_i32(tmp, tmp, tmp2);
4712 break;
4713 case 5: /* VBSL */
4714 tmp3 = neon_load_reg(rd, pass);
4715 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4716 tcg_temp_free_i32(tmp3);
4717 break;
4718 case 6: /* VBIT */
4719 tmp3 = neon_load_reg(rd, pass);
4720 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4721 tcg_temp_free_i32(tmp3);
4722 break;
4723 case 7: /* VBIF */
4724 tmp3 = neon_load_reg(rd, pass);
4725 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4726 tcg_temp_free_i32(tmp3);
4727 break;
4728 }
4729 break;
4730 case NEON_3R_VHSUB:
4731 GEN_NEON_INTEGER_OP(hsub);
4732 break;
4733 case NEON_3R_VQSUB:
4734 GEN_NEON_INTEGER_OP(qsub);
4735 break;
4736 case NEON_3R_VCGT:
4737 GEN_NEON_INTEGER_OP(cgt);
4738 break;
4739 case NEON_3R_VCGE:
4740 GEN_NEON_INTEGER_OP(cge);
4741 break;
4742 case NEON_3R_VSHL:
4743 GEN_NEON_INTEGER_OP(shl);
4744 break;
4745 case NEON_3R_VQSHL:
4746 GEN_NEON_INTEGER_OP(qshl);
4747 break;
4748 case NEON_3R_VRSHL:
4749 GEN_NEON_INTEGER_OP(rshl);
4750 break;
4751 case NEON_3R_VQRSHL:
4752 GEN_NEON_INTEGER_OP(qrshl);
4753 break;
4754 case NEON_3R_VMAX:
4755 GEN_NEON_INTEGER_OP(max);
4756 break;
4757 case NEON_3R_VMIN:
4758 GEN_NEON_INTEGER_OP(min);
4759 break;
4760 case NEON_3R_VABD:
4761 GEN_NEON_INTEGER_OP(abd);
4762 break;
4763 case NEON_3R_VABA:
4764 GEN_NEON_INTEGER_OP(abd);
4765 tcg_temp_free_i32(tmp2);
4766 tmp2 = neon_load_reg(rd, pass);
4767 gen_neon_add(size, tmp, tmp2);
4768 break;
4769 case NEON_3R_VADD_VSUB:
4770 if (!u) { /* VADD */
4771 gen_neon_add(size, tmp, tmp2);
4772 } else { /* VSUB */
4773 switch (size) {
4774 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4775 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4776 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4777 default: abort();
4778 }
4779 }
4780 break;
4781 case NEON_3R_VTST_VCEQ:
4782 if (!u) { /* VTST */
4783 switch (size) {
4784 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4785 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4786 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4787 default: abort();
4788 }
4789 } else { /* VCEQ */
4790 switch (size) {
4791 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4792 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4793 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4794 default: abort();
4795 }
4796 }
4797 break;
4798 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4799 switch (size) {
4800 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4801 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4802 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4803 default: abort();
4804 }
4805 tcg_temp_free_i32(tmp2);
4806 tmp2 = neon_load_reg(rd, pass);
4807 if (u) { /* VMLS */
4808 gen_neon_rsb(size, tmp, tmp2);
4809 } else { /* VMLA */
4810 gen_neon_add(size, tmp, tmp2);
4811 }
4812 break;
4813 case NEON_3R_VMUL:
4814 if (u) { /* polynomial */
4815 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4816 } else { /* Integer */
4817 switch (size) {
4818 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4819 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4820 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4821 default: abort();
4822 }
4823 }
4824 break;
4825 case NEON_3R_VPMAX:
4826 GEN_NEON_INTEGER_OP(pmax);
4827 break;
4828 case NEON_3R_VPMIN:
4829 GEN_NEON_INTEGER_OP(pmin);
4830 break;
4831 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4832 if (!u) { /* VQDMULH */
4833 switch (size) {
4834 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4835 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4836 default: abort();
4837 }
4838 } else { /* VQRDMULH */
4839 switch (size) {
4840 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4841 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4842 default: abort();
4843 }
4844 }
4845 break;
4846 case NEON_3R_VPADD:
4847 switch (size) {
4848 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4849 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4850 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4851 default: abort();
4852 }
4853 break;
4854 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4855 switch ((u << 2) | size) {
4856 case 0: /* VADD */
4857 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4858 break;
4859 case 2: /* VSUB */
4860 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4861 break;
4862 case 4: /* VPADD */
4863 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4864 break;
4865 case 6: /* VABD */
4866 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4867 break;
4868 default:
4869 abort();
4870 }
4871 break;
4872 case NEON_3R_FLOAT_MULTIPLY:
4873 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4874 if (!u) {
4875 tcg_temp_free_i32(tmp2);
4876 tmp2 = neon_load_reg(rd, pass);
4877 if (size == 0) {
4878 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4879 } else {
4880 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4881 }
4882 }
4883 break;
4884 case NEON_3R_FLOAT_CMP:
4885 if (!u) {
4886 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4887 } else {
4888 if (size == 0)
4889 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4890 else
4891 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4892 }
4893 break;
4894 case NEON_3R_FLOAT_ACMP:
4895 if (size == 0)
4896 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4897 else
4898 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4899 break;
4900 case NEON_3R_FLOAT_MINMAX:
4901 if (size == 0)
4902 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4903 else
4904 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4905 break;
4906 case NEON_3R_VRECPS_VRSQRTS:
4907 if (size == 0)
4908 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4909 else
4910 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4911 break;
4912 default:
4913 abort();
4914 }
4915 tcg_temp_free_i32(tmp2);
4916
4917 /* Save the result. For elementwise operations we can put it
4918 straight into the destination register. For pairwise operations
4919 we have to be careful to avoid clobbering the source operands. */
4920 if (pairwise && rd == rm) {
4921 neon_store_scratch(pass, tmp);
4922 } else {
4923 neon_store_reg(rd, pass, tmp);
4924 }
4925
4926 } /* for pass */
4927 if (pairwise && rd == rm) {
4928 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4929 tmp = neon_load_scratch(pass);
4930 neon_store_reg(rd, pass, tmp);
4931 }
4932 }
4933 /* End of 3 register same size operations. */
4934 } else if (insn & (1 << 4)) {
4935 if ((insn & 0x00380080) != 0) {
4936 /* Two registers and shift. */
4937 op = (insn >> 8) & 0xf;
4938 if (insn & (1 << 7)) {
4939 /* 64-bit shift. */
4940 if (op > 7) {
4941 return 1;
4942 }
4943 size = 3;
4944 } else {
4945 size = 2;
4946 while ((insn & (1 << (size + 19))) == 0)
4947 size--;
4948 }
4949 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4950 /* To avoid excessive dumplication of ops we implement shift
4951 by immediate using the variable shift operations. */
4952 if (op < 8) {
4953 /* Shift by immediate:
4954 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4955 if (q && ((rd | rm) & 1)) {
4956 return 1;
4957 }
4958 if (!u && (op == 4 || op == 6)) {
4959 return 1;
4960 }
4961 /* Right shifts are encoded as N - shift, where N is the
4962 element size in bits. */
4963 if (op <= 4)
4964 shift = shift - (1 << (size + 3));
4965 if (size == 3) {
4966 count = q + 1;
4967 } else {
4968 count = q ? 4: 2;
4969 }
4970 switch (size) {
4971 case 0:
4972 imm = (uint8_t) shift;
4973 imm |= imm << 8;
4974 imm |= imm << 16;
4975 break;
4976 case 1:
4977 imm = (uint16_t) shift;
4978 imm |= imm << 16;
4979 break;
4980 case 2:
4981 case 3:
4982 imm = shift;
4983 break;
4984 default:
4985 abort();
4986 }
4987
4988 for (pass = 0; pass < count; pass++) {
4989 if (size == 3) {
4990 neon_load_reg64(cpu_V0, rm + pass);
4991 tcg_gen_movi_i64(cpu_V1, imm);
4992 switch (op) {
4993 case 0: /* VSHR */
4994 case 1: /* VSRA */
4995 if (u)
4996 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4997 else
4998 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4999 break;
5000 case 2: /* VRSHR */
5001 case 3: /* VRSRA */
5002 if (u)
5003 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5004 else
5005 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5006 break;
5007 case 4: /* VSRI */
5008 case 5: /* VSHL, VSLI */
5009 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5010 break;
5011 case 6: /* VQSHLU */
5012 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
5013 break;
5014 case 7: /* VQSHL */
5015 if (u) {
5016 gen_helper_neon_qshl_u64(cpu_V0,
5017 cpu_V0, cpu_V1);
5018 } else {
5019 gen_helper_neon_qshl_s64(cpu_V0,
5020 cpu_V0, cpu_V1);
5021 }
5022 break;
5023 }
5024 if (op == 1 || op == 3) {
5025 /* Accumulate. */
5026 neon_load_reg64(cpu_V1, rd + pass);
5027 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5028 } else if (op == 4 || (op == 5 && u)) {
5029 /* Insert */
5030 neon_load_reg64(cpu_V1, rd + pass);
5031 uint64_t mask;
5032 if (shift < -63 || shift > 63) {
5033 mask = 0;
5034 } else {
5035 if (op == 4) {
5036 mask = 0xffffffffffffffffull >> -shift;
5037 } else {
5038 mask = 0xffffffffffffffffull << shift;
5039 }
5040 }
5041 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5042 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5043 }
5044 neon_store_reg64(cpu_V0, rd + pass);
5045 } else { /* size < 3 */
5046 /* Operands in T0 and T1. */
5047 tmp = neon_load_reg(rm, pass);
5048 tmp2 = tcg_temp_new_i32();
5049 tcg_gen_movi_i32(tmp2, imm);
5050 switch (op) {
5051 case 0: /* VSHR */
5052 case 1: /* VSRA */
5053 GEN_NEON_INTEGER_OP(shl);
5054 break;
5055 case 2: /* VRSHR */
5056 case 3: /* VRSRA */
5057 GEN_NEON_INTEGER_OP(rshl);
5058 break;
5059 case 4: /* VSRI */
5060 case 5: /* VSHL, VSLI */
5061 switch (size) {
5062 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5063 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5064 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5065 default: abort();
5066 }
5067 break;
5068 case 6: /* VQSHLU */
5069 switch (size) {
5070 case 0:
5071 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
5072 break;
5073 case 1:
5074 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
5075 break;
5076 case 2:
5077 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
5078 break;
5079 default:
5080 abort();
5081 }
5082 break;
5083 case 7: /* VQSHL */
5084 GEN_NEON_INTEGER_OP(qshl);
5085 break;
5086 }
5087 tcg_temp_free_i32(tmp2);
5088
5089 if (op == 1 || op == 3) {
5090 /* Accumulate. */
5091 tmp2 = neon_load_reg(rd, pass);
5092 gen_neon_add(size, tmp, tmp2);
5093 tcg_temp_free_i32(tmp2);
5094 } else if (op == 4 || (op == 5 && u)) {
5095 /* Insert */
5096 switch (size) {
5097 case 0:
5098 if (op == 4)
5099 mask = 0xff >> -shift;
5100 else
5101 mask = (uint8_t)(0xff << shift);
5102 mask |= mask << 8;
5103 mask |= mask << 16;
5104 break;
5105 case 1:
5106 if (op == 4)
5107 mask = 0xffff >> -shift;
5108 else
5109 mask = (uint16_t)(0xffff << shift);
5110 mask |= mask << 16;
5111 break;
5112 case 2:
5113 if (shift < -31 || shift > 31) {
5114 mask = 0;
5115 } else {
5116 if (op == 4)
5117 mask = 0xffffffffu >> -shift;
5118 else
5119 mask = 0xffffffffu << shift;
5120 }
5121 break;
5122 default:
5123 abort();
5124 }
5125 tmp2 = neon_load_reg(rd, pass);
5126 tcg_gen_andi_i32(tmp, tmp, mask);
5127 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5128 tcg_gen_or_i32(tmp, tmp, tmp2);
5129 tcg_temp_free_i32(tmp2);
5130 }
5131 neon_store_reg(rd, pass, tmp);
5132 }
5133 } /* for pass */
5134 } else if (op < 10) {
5135 /* Shift by immediate and narrow:
5136 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5137 int input_unsigned = (op == 8) ? !u : u;
5138 if (rm & 1) {
5139 return 1;
5140 }
5141 shift = shift - (1 << (size + 3));
5142 size++;
5143 if (size == 3) {
5144 tmp64 = tcg_const_i64(shift);
5145 neon_load_reg64(cpu_V0, rm);
5146 neon_load_reg64(cpu_V1, rm + 1);
5147 for (pass = 0; pass < 2; pass++) {
5148 TCGv_i64 in;
5149 if (pass == 0) {
5150 in = cpu_V0;
5151 } else {
5152 in = cpu_V1;
5153 }
5154 if (q) {
5155 if (input_unsigned) {
5156 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5157 } else {
5158 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5159 }
5160 } else {
5161 if (input_unsigned) {
5162 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5163 } else {
5164 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5165 }
5166 }
5167 tmp = tcg_temp_new_i32();
5168 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5169 neon_store_reg(rd, pass, tmp);
5170 } /* for pass */
5171 tcg_temp_free_i64(tmp64);
5172 } else {
5173 if (size == 1) {
5174 imm = (uint16_t)shift;
5175 imm |= imm << 16;
5176 } else {
5177 /* size == 2 */
5178 imm = (uint32_t)shift;
5179 }
5180 tmp2 = tcg_const_i32(imm);
5181 tmp4 = neon_load_reg(rm + 1, 0);
5182 tmp5 = neon_load_reg(rm + 1, 1);
5183 for (pass = 0; pass < 2; pass++) {
5184 if (pass == 0) {
5185 tmp = neon_load_reg(rm, 0);
5186 } else {
5187 tmp = tmp4;
5188 }
5189 gen_neon_shift_narrow(size, tmp, tmp2, q,
5190 input_unsigned);
5191 if (pass == 0) {
5192 tmp3 = neon_load_reg(rm, 1);
5193 } else {
5194 tmp3 = tmp5;
5195 }
5196 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5197 input_unsigned);
5198 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5199 tcg_temp_free_i32(tmp);
5200 tcg_temp_free_i32(tmp3);
5201 tmp = tcg_temp_new_i32();
5202 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5203 neon_store_reg(rd, pass, tmp);
5204 } /* for pass */
5205 tcg_temp_free_i32(tmp2);
5206 }
5207 } else if (op == 10) {
5208 /* VSHLL, VMOVL */
5209 if (q || (rd & 1)) {
5210 return 1;
5211 }
5212 tmp = neon_load_reg(rm, 0);
5213 tmp2 = neon_load_reg(rm, 1);
5214 for (pass = 0; pass < 2; pass++) {
5215 if (pass == 1)
5216 tmp = tmp2;
5217
5218 gen_neon_widen(cpu_V0, tmp, size, u);
5219
5220 if (shift != 0) {
5221 /* The shift is less than the width of the source
5222 type, so we can just shift the whole register. */
5223 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5224 /* Widen the result of shift: we need to clear
5225 * the potential overflow bits resulting from
5226 * left bits of the narrow input appearing as
5227 * right bits of left the neighbour narrow
5228 * input. */
5229 if (size < 2 || !u) {
5230 uint64_t imm64;
5231 if (size == 0) {
5232 imm = (0xffu >> (8 - shift));
5233 imm |= imm << 16;
5234 } else if (size == 1) {
5235 imm = 0xffff >> (16 - shift);
5236 } else {
5237 /* size == 2 */
5238 imm = 0xffffffff >> (32 - shift);
5239 }
5240 if (size < 2) {
5241 imm64 = imm | (((uint64_t)imm) << 32);
5242 } else {
5243 imm64 = imm;
5244 }
5245 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5246 }
5247 }
5248 neon_store_reg64(cpu_V0, rd + pass);
5249 }
5250 } else if (op >= 14) {
5251 /* VCVT fixed-point. */
5252 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5253 return 1;
5254 }
5255 /* We have already masked out the must-be-1 top bit of imm6,
5256 * hence this 32-shift where the ARM ARM has 64-imm6.
5257 */
5258 shift = 32 - shift;
5259 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5260 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5261 if (!(op & 1)) {
5262 if (u)
5263 gen_vfp_ulto(0, shift, 1);
5264 else
5265 gen_vfp_slto(0, shift, 1);
5266 } else {
5267 if (u)
5268 gen_vfp_toul(0, shift, 1);
5269 else
5270 gen_vfp_tosl(0, shift, 1);
5271 }
5272 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5273 }
5274 } else {
5275 return 1;
5276 }
5277 } else { /* (insn & 0x00380080) == 0 */
5278 int invert;
5279 if (q && (rd & 1)) {
5280 return 1;
5281 }
5282
5283 op = (insn >> 8) & 0xf;
5284 /* One register and immediate. */
5285 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5286 invert = (insn & (1 << 5)) != 0;
5287 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5288 * We choose to not special-case this and will behave as if a
5289 * valid constant encoding of 0 had been given.
5290 */
5291 switch (op) {
5292 case 0: case 1:
5293 /* no-op */
5294 break;
5295 case 2: case 3:
5296 imm <<= 8;
5297 break;
5298 case 4: case 5:
5299 imm <<= 16;
5300 break;
5301 case 6: case 7:
5302 imm <<= 24;
5303 break;
5304 case 8: case 9:
5305 imm |= imm << 16;
5306 break;
5307 case 10: case 11:
5308 imm = (imm << 8) | (imm << 24);
5309 break;
5310 case 12:
5311 imm = (imm << 8) | 0xff;
5312 break;
5313 case 13:
5314 imm = (imm << 16) | 0xffff;
5315 break;
5316 case 14:
5317 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5318 if (invert)
5319 imm = ~imm;
5320 break;
5321 case 15:
5322 if (invert) {
5323 return 1;
5324 }
5325 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5326 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5327 break;
5328 }
5329 if (invert)
5330 imm = ~imm;
5331
5332 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5333 if (op & 1 && op < 12) {
5334 tmp = neon_load_reg(rd, pass);
5335 if (invert) {
5336 /* The immediate value has already been inverted, so
5337 BIC becomes AND. */
5338 tcg_gen_andi_i32(tmp, tmp, imm);
5339 } else {
5340 tcg_gen_ori_i32(tmp, tmp, imm);
5341 }
5342 } else {
5343 /* VMOV, VMVN. */
5344 tmp = tcg_temp_new_i32();
5345 if (op == 14 && invert) {
5346 int n;
5347 uint32_t val;
5348 val = 0;
5349 for (n = 0; n < 4; n++) {
5350 if (imm & (1 << (n + (pass & 1) * 4)))
5351 val |= 0xff << (n * 8);
5352 }
5353 tcg_gen_movi_i32(tmp, val);
5354 } else {
5355 tcg_gen_movi_i32(tmp, imm);
5356 }
5357 }
5358 neon_store_reg(rd, pass, tmp);
5359 }
5360 }
5361 } else { /* (insn & 0x00800010 == 0x00800000) */
5362 if (size != 3) {
5363 op = (insn >> 8) & 0xf;
5364 if ((insn & (1 << 6)) == 0) {
5365 /* Three registers of different lengths. */
5366 int src1_wide;
5367 int src2_wide;
5368 int prewiden;
5369 /* undefreq: bit 0 : UNDEF if size != 0
5370 * bit 1 : UNDEF if size == 0
5371 * bit 2 : UNDEF if U == 1
5372 * Note that [1:0] set implies 'always UNDEF'
5373 */
5374 int undefreq;
5375 /* prewiden, src1_wide, src2_wide, undefreq */
5376 static const int neon_3reg_wide[16][4] = {
5377 {1, 0, 0, 0}, /* VADDL */
5378 {1, 1, 0, 0}, /* VADDW */
5379 {1, 0, 0, 0}, /* VSUBL */
5380 {1, 1, 0, 0}, /* VSUBW */
5381 {0, 1, 1, 0}, /* VADDHN */
5382 {0, 0, 0, 0}, /* VABAL */
5383 {0, 1, 1, 0}, /* VSUBHN */
5384 {0, 0, 0, 0}, /* VABDL */
5385 {0, 0, 0, 0}, /* VMLAL */
5386 {0, 0, 0, 6}, /* VQDMLAL */
5387 {0, 0, 0, 0}, /* VMLSL */
5388 {0, 0, 0, 6}, /* VQDMLSL */
5389 {0, 0, 0, 0}, /* Integer VMULL */
5390 {0, 0, 0, 2}, /* VQDMULL */
5391 {0, 0, 0, 5}, /* Polynomial VMULL */
5392 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5393 };
5394
5395 prewiden = neon_3reg_wide[op][0];
5396 src1_wide = neon_3reg_wide[op][1];
5397 src2_wide = neon_3reg_wide[op][2];
5398 undefreq = neon_3reg_wide[op][3];
5399
5400 if (((undefreq & 1) && (size != 0)) ||
5401 ((undefreq & 2) && (size == 0)) ||
5402 ((undefreq & 4) && u)) {
5403 return 1;
5404 }
5405 if ((src1_wide && (rn & 1)) ||
5406 (src2_wide && (rm & 1)) ||
5407 (!src2_wide && (rd & 1))) {
5408 return 1;
5409 }
5410
5411 /* Avoid overlapping operands. Wide source operands are
5412 always aligned so will never overlap with wide
5413 destinations in problematic ways. */
5414 if (rd == rm && !src2_wide) {
5415 tmp = neon_load_reg(rm, 1);
5416 neon_store_scratch(2, tmp);
5417 } else if (rd == rn && !src1_wide) {
5418 tmp = neon_load_reg(rn, 1);
5419 neon_store_scratch(2, tmp);
5420 }
5421 TCGV_UNUSED(tmp3);
5422 for (pass = 0; pass < 2; pass++) {
5423 if (src1_wide) {
5424 neon_load_reg64(cpu_V0, rn + pass);
5425 TCGV_UNUSED(tmp);
5426 } else {
5427 if (pass == 1 && rd == rn) {
5428 tmp = neon_load_scratch(2);
5429 } else {
5430 tmp = neon_load_reg(rn, pass);
5431 }
5432 if (prewiden) {
5433 gen_neon_widen(cpu_V0, tmp, size, u);
5434 }
5435 }
5436 if (src2_wide) {
5437 neon_load_reg64(cpu_V1, rm + pass);
5438 TCGV_UNUSED(tmp2);
5439 } else {
5440 if (pass == 1 && rd == rm) {
5441 tmp2 = neon_load_scratch(2);
5442 } else {
5443 tmp2 = neon_load_reg(rm, pass);
5444 }
5445 if (prewiden) {
5446 gen_neon_widen(cpu_V1, tmp2, size, u);
5447 }
5448 }
5449 switch (op) {
5450 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5451 gen_neon_addl(size);
5452 break;
5453 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5454 gen_neon_subl(size);
5455 break;
5456 case 5: case 7: /* VABAL, VABDL */
5457 switch ((size << 1) | u) {
5458 case 0:
5459 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5460 break;
5461 case 1:
5462 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5463 break;
5464 case 2:
5465 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5466 break;
5467 case 3:
5468 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5469 break;
5470 case 4:
5471 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5472 break;
5473 case 5:
5474 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5475 break;
5476 default: abort();
5477 }
5478 tcg_temp_free_i32(tmp2);
5479 tcg_temp_free_i32(tmp);
5480 break;
5481 case 8: case 9: case 10: case 11: case 12: case 13:
5482 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5483 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5484 break;
5485 case 14: /* Polynomial VMULL */
5486 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5487 tcg_temp_free_i32(tmp2);
5488 tcg_temp_free_i32(tmp);
5489 break;
5490 default: /* 15 is RESERVED: caught earlier */
5491 abort();
5492 }
5493 if (op == 13) {
5494 /* VQDMULL */
5495 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5496 neon_store_reg64(cpu_V0, rd + pass);
5497 } else if (op == 5 || (op >= 8 && op <= 11)) {
5498 /* Accumulate. */
5499 neon_load_reg64(cpu_V1, rd + pass);
5500 switch (op) {
5501 case 10: /* VMLSL */
5502 gen_neon_negl(cpu_V0, size);
5503 /* Fall through */
5504 case 5: case 8: /* VABAL, VMLAL */
5505 gen_neon_addl(size);
5506 break;
5507 case 9: case 11: /* VQDMLAL, VQDMLSL */
5508 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5509 if (op == 11) {
5510 gen_neon_negl(cpu_V0, size);
5511 }
5512 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5513 break;
5514 default:
5515 abort();
5516 }
5517 neon_store_reg64(cpu_V0, rd + pass);
5518 } else if (op == 4 || op == 6) {
5519 /* Narrowing operation. */
5520 tmp = tcg_temp_new_i32();
5521 if (!u) {
5522 switch (size) {
5523 case 0:
5524 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5525 break;
5526 case 1:
5527 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5528 break;
5529 case 2:
5530 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5531 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5532 break;
5533 default: abort();
5534 }
5535 } else {
5536 switch (size) {
5537 case 0:
5538 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5539 break;
5540 case 1:
5541 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5542 break;
5543 case 2:
5544 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5545 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5546 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5547 break;
5548 default: abort();
5549 }
5550 }
5551 if (pass == 0) {
5552 tmp3 = tmp;
5553 } else {
5554 neon_store_reg(rd, 0, tmp3);
5555 neon_store_reg(rd, 1, tmp);
5556 }
5557 } else {
5558 /* Write back the result. */
5559 neon_store_reg64(cpu_V0, rd + pass);
5560 }
5561 }
5562 } else {
5563 /* Two registers and a scalar. NB that for ops of this form
5564 * the ARM ARM labels bit 24 as Q, but it is in our variable
5565 * 'u', not 'q'.
5566 */
5567 if (size == 0) {
5568 return 1;
5569 }
5570 switch (op) {
5571 case 1: /* Float VMLA scalar */
5572 case 5: /* Floating point VMLS scalar */
5573 case 9: /* Floating point VMUL scalar */
5574 if (size == 1) {
5575 return 1;
5576 }
5577 /* fall through */
5578 case 0: /* Integer VMLA scalar */
5579 case 4: /* Integer VMLS scalar */
5580 case 8: /* Integer VMUL scalar */
5581 case 12: /* VQDMULH scalar */
5582 case 13: /* VQRDMULH scalar */
5583 if (u && ((rd | rn) & 1)) {
5584 return 1;
5585 }
5586 tmp = neon_get_scalar(size, rm);
5587 neon_store_scratch(0, tmp);
5588 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5589 tmp = neon_load_scratch(0);
5590 tmp2 = neon_load_reg(rn, pass);
5591 if (op == 12) {
5592 if (size == 1) {
5593 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5594 } else {
5595 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5596 }
5597 } else if (op == 13) {
5598 if (size == 1) {
5599 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5600 } else {
5601 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5602 }
5603 } else if (op & 1) {
5604 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5605 } else {
5606 switch (size) {
5607 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5608 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5609 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5610 default: abort();
5611 }
5612 }
5613 tcg_temp_free_i32(tmp2);
5614 if (op < 8) {
5615 /* Accumulate. */
5616 tmp2 = neon_load_reg(rd, pass);
5617 switch (op) {
5618 case 0:
5619 gen_neon_add(size, tmp, tmp2);
5620 break;
5621 case 1:
5622 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5623 break;
5624 case 4:
5625 gen_neon_rsb(size, tmp, tmp2);
5626 break;
5627 case 5:
5628 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5629 break;
5630 default:
5631 abort();
5632 }
5633 tcg_temp_free_i32(tmp2);
5634 }
5635 neon_store_reg(rd, pass, tmp);
5636 }
5637 break;
5638 case 3: /* VQDMLAL scalar */
5639 case 7: /* VQDMLSL scalar */
5640 case 11: /* VQDMULL scalar */
5641 if (u == 1) {
5642 return 1;
5643 }
5644 /* fall through */
5645 case 2: /* VMLAL sclar */
5646 case 6: /* VMLSL scalar */
5647 case 10: /* VMULL scalar */
5648 if (rd & 1) {
5649 return 1;
5650 }
5651 tmp2 = neon_get_scalar(size, rm);
5652 /* We need a copy of tmp2 because gen_neon_mull
5653 * deletes it during pass 0. */
5654 tmp4 = tcg_temp_new_i32();
5655 tcg_gen_mov_i32(tmp4, tmp2);
5656 tmp3 = neon_load_reg(rn, 1);
5657
5658 for (pass = 0; pass < 2; pass++) {
5659 if (pass == 0) {
5660 tmp = neon_load_reg(rn, 0);
5661 } else {
5662 tmp = tmp3;
5663 tmp2 = tmp4;
5664 }
5665 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5666 if (op != 11) {
5667 neon_load_reg64(cpu_V1, rd + pass);
5668 }
5669 switch (op) {
5670 case 6:
5671 gen_neon_negl(cpu_V0, size);
5672 /* Fall through */
5673 case 2:
5674 gen_neon_addl(size);
5675 break;
5676 case 3: case 7:
5677 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5678 if (op == 7) {
5679 gen_neon_negl(cpu_V0, size);
5680 }
5681 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5682 break;
5683 case 10:
5684 /* no-op */
5685 break;
5686 case 11:
5687 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5688 break;
5689 default:
5690 abort();
5691 }
5692 neon_store_reg64(cpu_V0, rd + pass);
5693 }
5694
5695
5696 break;
5697 default: /* 14 and 15 are RESERVED */
5698 return 1;
5699 }
5700 }
5701 } else { /* size == 3 */
5702 if (!u) {
5703 /* Extract. */
5704 imm = (insn >> 8) & 0xf;
5705
5706 if (imm > 7 && !q)
5707 return 1;
5708
5709 if (q && ((rd | rn | rm) & 1)) {
5710 return 1;
5711 }
5712
5713 if (imm == 0) {
5714 neon_load_reg64(cpu_V0, rn);
5715 if (q) {
5716 neon_load_reg64(cpu_V1, rn + 1);
5717 }
5718 } else if (imm == 8) {
5719 neon_load_reg64(cpu_V0, rn + 1);
5720 if (q) {
5721 neon_load_reg64(cpu_V1, rm);
5722 }
5723 } else if (q) {
5724 tmp64 = tcg_temp_new_i64();
5725 if (imm < 8) {
5726 neon_load_reg64(cpu_V0, rn);
5727 neon_load_reg64(tmp64, rn + 1);
5728 } else {
5729 neon_load_reg64(cpu_V0, rn + 1);
5730 neon_load_reg64(tmp64, rm);
5731 }
5732 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5733 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5734 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5735 if (imm < 8) {
5736 neon_load_reg64(cpu_V1, rm);
5737 } else {
5738 neon_load_reg64(cpu_V1, rm + 1);
5739 imm -= 8;
5740 }
5741 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5742 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5743 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5744 tcg_temp_free_i64(tmp64);
5745 } else {
5746 /* BUGFIX */
5747 neon_load_reg64(cpu_V0, rn);
5748 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5749 neon_load_reg64(cpu_V1, rm);
5750 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5751 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5752 }
5753 neon_store_reg64(cpu_V0, rd);
5754 if (q) {
5755 neon_store_reg64(cpu_V1, rd + 1);
5756 }
5757 } else if ((insn & (1 << 11)) == 0) {
5758 /* Two register misc. */
5759 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5760 size = (insn >> 18) & 3;
5761 /* UNDEF for unknown op values and bad op-size combinations */
5762 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5763 return 1;
5764 }
5765 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5766 q && ((rm | rd) & 1)) {
5767 return 1;
5768 }
5769 switch (op) {
5770 case NEON_2RM_VREV64:
5771 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5772 tmp = neon_load_reg(rm, pass * 2);
5773 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5774 switch (size) {
5775 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5776 case 1: gen_swap_half(tmp); break;
5777 case 2: /* no-op */ break;
5778 default: abort();
5779 }
5780 neon_store_reg(rd, pass * 2 + 1, tmp);
5781 if (size == 2) {
5782 neon_store_reg(rd, pass * 2, tmp2);
5783 } else {
5784 switch (size) {
5785 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5786 case 1: gen_swap_half(tmp2); break;
5787 default: abort();
5788 }
5789 neon_store_reg(rd, pass * 2, tmp2);
5790 }
5791 }
5792 break;
5793 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5794 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5795 for (pass = 0; pass < q + 1; pass++) {
5796 tmp = neon_load_reg(rm, pass * 2);
5797 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5798 tmp = neon_load_reg(rm, pass * 2 + 1);
5799 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5800 switch (size) {
5801 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5802 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5803 case 2: tcg_gen_add_i64(CPU_V001); break;
5804 default: abort();
5805 }
5806 if (op >= NEON_2RM_VPADAL) {
5807 /* Accumulate. */
5808 neon_load_reg64(cpu_V1, rd + pass);
5809 gen_neon_addl(size);
5810 }
5811 neon_store_reg64(cpu_V0, rd + pass);
5812 }
5813 break;
5814 case NEON_2RM_VTRN:
5815 if (size == 2) {
5816 int n;
5817 for (n = 0; n < (q ? 4 : 2); n += 2) {
5818 tmp = neon_load_reg(rm, n);
5819 tmp2 = neon_load_reg(rd, n + 1);
5820 neon_store_reg(rm, n, tmp2);
5821 neon_store_reg(rd, n + 1, tmp);
5822 }
5823 } else {
5824 goto elementwise;
5825 }
5826 break;
5827 case NEON_2RM_VUZP:
5828 if (gen_neon_unzip(rd, rm, size, q)) {
5829 return 1;
5830 }
5831 break;
5832 case NEON_2RM_VZIP:
5833 if (gen_neon_zip(rd, rm, size, q)) {
5834 return 1;
5835 }
5836 break;
5837 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5838 /* also VQMOVUN; op field and mnemonics don't line up */
5839 if (rm & 1) {
5840 return 1;
5841 }
5842 TCGV_UNUSED(tmp2);
5843 for (pass = 0; pass < 2; pass++) {
5844 neon_load_reg64(cpu_V0, rm + pass);
5845 tmp = tcg_temp_new_i32();
5846 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5847 tmp, cpu_V0);
5848 if (pass == 0) {
5849 tmp2 = tmp;
5850 } else {
5851 neon_store_reg(rd, 0, tmp2);
5852 neon_store_reg(rd, 1, tmp);
5853 }
5854 }
5855 break;
5856 case NEON_2RM_VSHLL:
5857 if (q || (rd & 1)) {
5858 return 1;
5859 }
5860 tmp = neon_load_reg(rm, 0);
5861 tmp2 = neon_load_reg(rm, 1);
5862 for (pass = 0; pass < 2; pass++) {
5863 if (pass == 1)
5864 tmp = tmp2;
5865 gen_neon_widen(cpu_V0, tmp, size, 1);
5866 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5867 neon_store_reg64(cpu_V0, rd + pass);
5868 }
5869 break;
5870 case NEON_2RM_VCVT_F16_F32:
5871 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5872 q || (rm & 1)) {
5873 return 1;
5874 }
5875 tmp = tcg_temp_new_i32();
5876 tmp2 = tcg_temp_new_i32();
5877 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5878 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5879 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5880 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5881 tcg_gen_shli_i32(tmp2, tmp2, 16);
5882 tcg_gen_or_i32(tmp2, tmp2, tmp);
5883 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5884 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5885 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5886 neon_store_reg(rd, 0, tmp2);
5887 tmp2 = tcg_temp_new_i32();
5888 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5889 tcg_gen_shli_i32(tmp2, tmp2, 16);
5890 tcg_gen_or_i32(tmp2, tmp2, tmp);
5891 neon_store_reg(rd, 1, tmp2);
5892 tcg_temp_free_i32(tmp);
5893 break;
5894 case NEON_2RM_VCVT_F32_F16:
5895 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5896 q || (rd & 1)) {
5897 return 1;
5898 }
5899 tmp3 = tcg_temp_new_i32();
5900 tmp = neon_load_reg(rm, 0);
5901 tmp2 = neon_load_reg(rm, 1);
5902 tcg_gen_ext16u_i32(tmp3, tmp);
5903 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5904 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5905 tcg_gen_shri_i32(tmp3, tmp, 16);
5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5908 tcg_temp_free_i32(tmp);
5909 tcg_gen_ext16u_i32(tmp3, tmp2);
5910 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5911 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5912 tcg_gen_shri_i32(tmp3, tmp2, 16);
5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5914 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5915 tcg_temp_free_i32(tmp2);
5916 tcg_temp_free_i32(tmp3);
5917 break;
5918 default:
5919 elementwise:
5920 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5921 if (neon_2rm_is_float_op(op)) {
5922 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5923 neon_reg_offset(rm, pass));
5924 TCGV_UNUSED(tmp);
5925 } else {
5926 tmp = neon_load_reg(rm, pass);
5927 }
5928 switch (op) {
5929 case NEON_2RM_VREV32:
5930 switch (size) {
5931 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5932 case 1: gen_swap_half(tmp); break;
5933 default: abort();
5934 }
5935 break;
5936 case NEON_2RM_VREV16:
5937 gen_rev16(tmp);
5938 break;
5939 case NEON_2RM_VCLS:
5940 switch (size) {
5941 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5942 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5943 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5944 default: abort();
5945 }
5946 break;
5947 case NEON_2RM_VCLZ:
5948 switch (size) {
5949 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5950 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5951 case 2: gen_helper_clz(tmp, tmp); break;
5952 default: abort();
5953 }
5954 break;
5955 case NEON_2RM_VCNT:
5956 gen_helper_neon_cnt_u8(tmp, tmp);
5957 break;
5958 case NEON_2RM_VMVN:
5959 tcg_gen_not_i32(tmp, tmp);
5960 break;
5961 case NEON_2RM_VQABS:
5962 switch (size) {
5963 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5964 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5965 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5966 default: abort();
5967 }
5968 break;
5969 case NEON_2RM_VQNEG:
5970 switch (size) {
5971 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5972 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5973 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5974 default: abort();
5975 }
5976 break;
5977 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5978 tmp2 = tcg_const_i32(0);
5979 switch(size) {
5980 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5981 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5982 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5983 default: abort();
5984 }
5985 tcg_temp_free(tmp2);
5986 if (op == NEON_2RM_VCLE0) {
5987 tcg_gen_not_i32(tmp, tmp);
5988 }
5989 break;
5990 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
5991 tmp2 = tcg_const_i32(0);
5992 switch(size) {
5993 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5994 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5995 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5996 default: abort();
5997 }
5998 tcg_temp_free(tmp2);
5999 if (op == NEON_2RM_VCLT0) {
6000 tcg_gen_not_i32(tmp, tmp);
6001 }
6002 break;
6003 case NEON_2RM_VCEQ0:
6004 tmp2 = tcg_const_i32(0);
6005 switch(size) {
6006 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6007 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6008 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6009 default: abort();
6010 }
6011 tcg_temp_free(tmp2);
6012 break;
6013 case NEON_2RM_VABS:
6014 switch(size) {
6015 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6016 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6017 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6018 default: abort();
6019 }
6020 break;
6021 case NEON_2RM_VNEG:
6022 tmp2 = tcg_const_i32(0);
6023 gen_neon_rsb(size, tmp, tmp2);
6024 tcg_temp_free(tmp2);
6025 break;
6026 case NEON_2RM_VCGT0_F:
6027 tmp2 = tcg_const_i32(0);
6028 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
6029 tcg_temp_free(tmp2);
6030 break;
6031 case NEON_2RM_VCGE0_F:
6032 tmp2 = tcg_const_i32(0);
6033 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
6034 tcg_temp_free(tmp2);
6035 break;
6036 case NEON_2RM_VCEQ0_F:
6037 tmp2 = tcg_const_i32(0);
6038 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
6039 tcg_temp_free(tmp2);
6040 break;
6041 case NEON_2RM_VCLE0_F:
6042 tmp2 = tcg_const_i32(0);
6043 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
6044 tcg_temp_free(tmp2);
6045 break;
6046 case NEON_2RM_VCLT0_F:
6047 tmp2 = tcg_const_i32(0);
6048 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
6049 tcg_temp_free(tmp2);
6050 break;
6051 case NEON_2RM_VABS_F:
6052 gen_vfp_abs(0);
6053 break;
6054 case NEON_2RM_VNEG_F:
6055 gen_vfp_neg(0);
6056 break;
6057 case NEON_2RM_VSWP:
6058 tmp2 = neon_load_reg(rd, pass);
6059 neon_store_reg(rm, pass, tmp2);
6060 break;
6061 case NEON_2RM_VTRN:
6062 tmp2 = neon_load_reg(rd, pass);
6063 switch (size) {
6064 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6065 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6066 default: abort();
6067 }
6068 neon_store_reg(rm, pass, tmp2);
6069 break;
6070 case NEON_2RM_VRECPE:
6071 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6072 break;
6073 case NEON_2RM_VRSQRTE:
6074 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6075 break;
6076 case NEON_2RM_VRECPE_F:
6077 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6078 break;
6079 case NEON_2RM_VRSQRTE_F:
6080 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6081 break;
6082 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6083 gen_vfp_sito(0, 1);
6084 break;
6085 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6086 gen_vfp_uito(0, 1);
6087 break;
6088 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6089 gen_vfp_tosiz(0, 1);
6090 break;
6091 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6092 gen_vfp_touiz(0, 1);
6093 break;
6094 default:
6095 /* Reserved op values were caught by the
6096 * neon_2rm_sizes[] check earlier.
6097 */
6098 abort();
6099 }
6100 if (neon_2rm_is_float_op(op)) {
6101 tcg_gen_st_f32(cpu_F0s, cpu_env,
6102 neon_reg_offset(rd, pass));
6103 } else {
6104 neon_store_reg(rd, pass, tmp);
6105 }
6106 }
6107 break;
6108 }
6109 } else if ((insn & (1 << 10)) == 0) {
6110 /* VTBL, VTBX. */
6111 int n = ((insn >> 8) & 3) + 1;
6112 if ((rn + n) > 32) {
6113 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6114 * helper function running off the end of the register file.
6115 */
6116 return 1;
6117 }
6118 n <<= 3;
6119 if (insn & (1 << 6)) {
6120 tmp = neon_load_reg(rd, 0);
6121 } else {
6122 tmp = tcg_temp_new_i32();
6123 tcg_gen_movi_i32(tmp, 0);
6124 }
6125 tmp2 = neon_load_reg(rm, 0);
6126 tmp4 = tcg_const_i32(rn);
6127 tmp5 = tcg_const_i32(n);
6128 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6129 tcg_temp_free_i32(tmp);
6130 if (insn & (1 << 6)) {
6131 tmp = neon_load_reg(rd, 1);
6132 } else {
6133 tmp = tcg_temp_new_i32();
6134 tcg_gen_movi_i32(tmp, 0);
6135 }
6136 tmp3 = neon_load_reg(rm, 1);
6137 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6138 tcg_temp_free_i32(tmp5);
6139 tcg_temp_free_i32(tmp4);
6140 neon_store_reg(rd, 0, tmp2);
6141 neon_store_reg(rd, 1, tmp3);
6142 tcg_temp_free_i32(tmp);
6143 } else if ((insn & 0x380) == 0) {
6144 /* VDUP */
6145 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6146 return 1;
6147 }
6148 if (insn & (1 << 19)) {
6149 tmp = neon_load_reg(rm, 1);
6150 } else {
6151 tmp = neon_load_reg(rm, 0);
6152 }
6153 if (insn & (1 << 16)) {
6154 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6155 } else if (insn & (1 << 17)) {
6156 if ((insn >> 18) & 1)
6157 gen_neon_dup_high16(tmp);
6158 else
6159 gen_neon_dup_low16(tmp);
6160 }
6161 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6162 tmp2 = tcg_temp_new_i32();
6163 tcg_gen_mov_i32(tmp2, tmp);
6164 neon_store_reg(rd, pass, tmp2);
6165 }
6166 tcg_temp_free_i32(tmp);
6167 } else {
6168 return 1;
6169 }
6170 }
6171 }
6172 return 0;
6173 }
6174
6175 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6176 {
6177 int crn = (insn >> 16) & 0xf;
6178 int crm = insn & 0xf;
6179 int op1 = (insn >> 21) & 7;
6180 int op2 = (insn >> 5) & 7;
6181 int rt = (insn >> 12) & 0xf;
6182 TCGv tmp;
6183
6184 /* Minimal set of debug registers, since we don't support debug */
6185 if (op1 == 0 && crn == 0 && op2 == 0) {
6186 switch (crm) {
6187 case 0:
6188 /* DBGDIDR: just RAZ. In particular this means the
6189 * "debug architecture version" bits will read as
6190 * a reserved value, which should cause Linux to
6191 * not try to use the debug hardware.
6192 */
6193 tmp = tcg_const_i32(0);
6194 store_reg(s, rt, tmp);
6195 return 0;
6196 case 1:
6197 case 2:
6198 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6199 * don't implement memory mapped debug components
6200 */
6201 if (ENABLE_ARCH_7) {
6202 tmp = tcg_const_i32(0);
6203 store_reg(s, rt, tmp);
6204 return 0;
6205 }
6206 break;
6207 default:
6208 break;
6209 }
6210 }
6211
6212 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6213 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6214 /* TEECR */
6215 if (IS_USER(s))
6216 return 1;
6217 tmp = load_cpu_field(teecr);
6218 store_reg(s, rt, tmp);
6219 return 0;
6220 }
6221 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6222 /* TEEHBR */
6223 if (IS_USER(s) && (env->teecr & 1))
6224 return 1;
6225 tmp = load_cpu_field(teehbr);
6226 store_reg(s, rt, tmp);
6227 return 0;
6228 }
6229 }
6230 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6231 op1, crn, crm, op2);
6232 return 1;
6233 }
6234
6235 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6236 {
6237 int crn = (insn >> 16) & 0xf;
6238 int crm = insn & 0xf;
6239 int op1 = (insn >> 21) & 7;
6240 int op2 = (insn >> 5) & 7;
6241 int rt = (insn >> 12) & 0xf;
6242 TCGv tmp;
6243
6244 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6245 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6246 /* TEECR */
6247 if (IS_USER(s))
6248 return 1;
6249 tmp = load_reg(s, rt);
6250 gen_helper_set_teecr(cpu_env, tmp);
6251 tcg_temp_free_i32(tmp);
6252 return 0;
6253 }
6254 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6255 /* TEEHBR */
6256 if (IS_USER(s) && (env->teecr & 1))
6257 return 1;
6258 tmp = load_reg(s, rt);
6259 store_cpu_field(tmp, teehbr);
6260 return 0;
6261 }
6262 }
6263 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6264 op1, crn, crm, op2);
6265 return 1;
6266 }
6267
6268 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6269 {
6270 int cpnum;
6271
6272 cpnum = (insn >> 8) & 0xf;
6273 if (arm_feature(env, ARM_FEATURE_XSCALE)
6274 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6275 return 1;
6276
6277 switch (cpnum) {
6278 case 0:
6279 case 1:
6280 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6281 return disas_iwmmxt_insn(env, s, insn);
6282 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6283 return disas_dsp_insn(env, s, insn);
6284 }
6285 return 1;
6286 case 10:
6287 case 11:
6288 return disas_vfp_insn (env, s, insn);
6289 case 14:
6290 /* Coprocessors 7-15 are architecturally reserved by ARM.
6291 Unfortunately Intel decided to ignore this. */
6292 if (arm_feature(env, ARM_FEATURE_XSCALE))
6293 goto board;
6294 if (insn & (1 << 20))
6295 return disas_cp14_read(env, s, insn);
6296 else
6297 return disas_cp14_write(env, s, insn);
6298 case 15:
6299 return disas_cp15_insn (env, s, insn);
6300 default:
6301 board:
6302 /* Unknown coprocessor. See if the board has hooked it. */
6303 return disas_cp_insn (env, s, insn);
6304 }
6305 }
6306
6307
6308 /* Store a 64-bit value to a register pair. Clobbers val. */
6309 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6310 {
6311 TCGv tmp;
6312 tmp = tcg_temp_new_i32();
6313 tcg_gen_trunc_i64_i32(tmp, val);
6314 store_reg(s, rlow, tmp);
6315 tmp = tcg_temp_new_i32();
6316 tcg_gen_shri_i64(val, val, 32);
6317 tcg_gen_trunc_i64_i32(tmp, val);
6318 store_reg(s, rhigh, tmp);
6319 }
6320
6321 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6322 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6323 {
6324 TCGv_i64 tmp;
6325 TCGv tmp2;
6326
6327 /* Load value and extend to 64 bits. */
6328 tmp = tcg_temp_new_i64();
6329 tmp2 = load_reg(s, rlow);
6330 tcg_gen_extu_i32_i64(tmp, tmp2);
6331 tcg_temp_free_i32(tmp2);
6332 tcg_gen_add_i64(val, val, tmp);
6333 tcg_temp_free_i64(tmp);
6334 }
6335
6336 /* load and add a 64-bit value from a register pair. */
6337 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6338 {
6339 TCGv_i64 tmp;
6340 TCGv tmpl;
6341 TCGv tmph;
6342
6343 /* Load 64-bit value rd:rn. */
6344 tmpl = load_reg(s, rlow);
6345 tmph = load_reg(s, rhigh);
6346 tmp = tcg_temp_new_i64();
6347 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6348 tcg_temp_free_i32(tmpl);
6349 tcg_temp_free_i32(tmph);
6350 tcg_gen_add_i64(val, val, tmp);
6351 tcg_temp_free_i64(tmp);
6352 }
6353
6354 /* Set N and Z flags from a 64-bit value. */
6355 static void gen_logicq_cc(TCGv_i64 val)
6356 {
6357 TCGv tmp = tcg_temp_new_i32();
6358 gen_helper_logicq_cc(tmp, val);
6359 gen_logic_CC(tmp);
6360 tcg_temp_free_i32(tmp);
6361 }
6362
6363 /* Load/Store exclusive instructions are implemented by remembering
6364 the value/address loaded, and seeing if these are the same
6365 when the store is performed. This should be is sufficient to implement
6366 the architecturally mandated semantics, and avoids having to monitor
6367 regular stores.
6368
6369 In system emulation mode only one CPU will be running at once, so
6370 this sequence is effectively atomic. In user emulation mode we
6371 throw an exception and handle the atomic operation elsewhere. */
6372 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6373 TCGv addr, int size)
6374 {
6375 TCGv tmp;
6376
6377 switch (size) {
6378 case 0:
6379 tmp = gen_ld8u(addr, IS_USER(s));
6380 break;
6381 case 1:
6382 tmp = gen_ld16u(addr, IS_USER(s));
6383 break;
6384 case 2:
6385 case 3:
6386 tmp = gen_ld32(addr, IS_USER(s));
6387 break;
6388 default:
6389 abort();
6390 }
6391 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6392 store_reg(s, rt, tmp);
6393 if (size == 3) {
6394 TCGv tmp2 = tcg_temp_new_i32();
6395 tcg_gen_addi_i32(tmp2, addr, 4);
6396 tmp = gen_ld32(tmp2, IS_USER(s));
6397 tcg_temp_free_i32(tmp2);
6398 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6399 store_reg(s, rt2, tmp);
6400 }
6401 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6402 }
6403
6404 static void gen_clrex(DisasContext *s)
6405 {
6406 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6407 }
6408
6409 #ifdef CONFIG_USER_ONLY
6410 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6411 TCGv addr, int size)
6412 {
6413 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6414 tcg_gen_movi_i32(cpu_exclusive_info,
6415 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6416 gen_exception_insn(s, 4, EXCP_STREX);
6417 }
6418 #else
6419 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6420 TCGv addr, int size)
6421 {
6422 TCGv tmp;
6423 int done_label;
6424 int fail_label;
6425
6426 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6427 [addr] = {Rt};
6428 {Rd} = 0;
6429 } else {
6430 {Rd} = 1;
6431 } */
6432 fail_label = gen_new_label();
6433 done_label = gen_new_label();
6434 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6435 switch (size) {
6436 case 0:
6437 tmp = gen_ld8u(addr, IS_USER(s));
6438 break;
6439 case 1:
6440 tmp = gen_ld16u(addr, IS_USER(s));
6441 break;
6442 case 2:
6443 case 3:
6444 tmp = gen_ld32(addr, IS_USER(s));
6445 break;
6446 default:
6447 abort();
6448 }
6449 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6450 tcg_temp_free_i32(tmp);
6451 if (size == 3) {
6452 TCGv tmp2 = tcg_temp_new_i32();
6453 tcg_gen_addi_i32(tmp2, addr, 4);
6454 tmp = gen_ld32(tmp2, IS_USER(s));
6455 tcg_temp_free_i32(tmp2);
6456 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6457 tcg_temp_free_i32(tmp);
6458 }
6459 tmp = load_reg(s, rt);
6460 switch (size) {
6461 case 0:
6462 gen_st8(tmp, addr, IS_USER(s));
6463 break;
6464 case 1:
6465 gen_st16(tmp, addr, IS_USER(s));
6466 break;
6467 case 2:
6468 case 3:
6469 gen_st32(tmp, addr, IS_USER(s));
6470 break;
6471 default:
6472 abort();
6473 }
6474 if (size == 3) {
6475 tcg_gen_addi_i32(addr, addr, 4);
6476 tmp = load_reg(s, rt2);
6477 gen_st32(tmp, addr, IS_USER(s));
6478 }
6479 tcg_gen_movi_i32(cpu_R[rd], 0);
6480 tcg_gen_br(done_label);
6481 gen_set_label(fail_label);
6482 tcg_gen_movi_i32(cpu_R[rd], 1);
6483 gen_set_label(done_label);
6484 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6485 }
6486 #endif
6487
6488 static void disas_arm_insn(CPUState * env, DisasContext *s)
6489 {
6490 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6491 TCGv tmp;
6492 TCGv tmp2;
6493 TCGv tmp3;
6494 TCGv addr;
6495 TCGv_i64 tmp64;
6496
6497 insn = ldl_code(s->pc);
6498 s->pc += 4;
6499
6500 /* M variants do not implement ARM mode. */
6501 if (IS_M(env))
6502 goto illegal_op;
6503 cond = insn >> 28;
6504 if (cond == 0xf){
6505 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6506 * choose to UNDEF. In ARMv5 and above the space is used
6507 * for miscellaneous unconditional instructions.
6508 */
6509 ARCH(5);
6510
6511 /* Unconditional instructions. */
6512 if (((insn >> 25) & 7) == 1) {
6513 /* NEON Data processing. */
6514 if (!arm_feature(env, ARM_FEATURE_NEON))
6515 goto illegal_op;
6516
6517 if (disas_neon_data_insn(env, s, insn))
6518 goto illegal_op;
6519 return;
6520 }
6521 if ((insn & 0x0f100000) == 0x04000000) {
6522 /* NEON load/store. */
6523 if (!arm_feature(env, ARM_FEATURE_NEON))
6524 goto illegal_op;
6525
6526 if (disas_neon_ls_insn(env, s, insn))
6527 goto illegal_op;
6528 return;
6529 }
6530 if (((insn & 0x0f30f000) == 0x0510f000) ||
6531 ((insn & 0x0f30f010) == 0x0710f000)) {
6532 if ((insn & (1 << 22)) == 0) {
6533 /* PLDW; v7MP */
6534 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6535 goto illegal_op;
6536 }
6537 }
6538 /* Otherwise PLD; v5TE+ */
6539 ARCH(5TE);
6540 return;
6541 }
6542 if (((insn & 0x0f70f000) == 0x0450f000) ||
6543 ((insn & 0x0f70f010) == 0x0650f000)) {
6544 ARCH(7);
6545 return; /* PLI; V7 */
6546 }
6547 if (((insn & 0x0f700000) == 0x04100000) ||
6548 ((insn & 0x0f700010) == 0x06100000)) {
6549 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6550 goto illegal_op;
6551 }
6552 return; /* v7MP: Unallocated memory hint: must NOP */
6553 }
6554
6555 if ((insn & 0x0ffffdff) == 0x01010000) {
6556 ARCH(6);
6557 /* setend */
6558 if (insn & (1 << 9)) {
6559 /* BE8 mode not implemented. */
6560 goto illegal_op;
6561 }
6562 return;
6563 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6564 switch ((insn >> 4) & 0xf) {
6565 case 1: /* clrex */
6566 ARCH(6K);
6567 gen_clrex(s);
6568 return;
6569 case 4: /* dsb */
6570 case 5: /* dmb */
6571 case 6: /* isb */
6572 ARCH(7);
6573 /* We don't emulate caches so these are a no-op. */
6574 return;
6575 default:
6576 goto illegal_op;
6577 }
6578 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6579 /* srs */
6580 int32_t offset;
6581 if (IS_USER(s))
6582 goto illegal_op;
6583 ARCH(6);
6584 op1 = (insn & 0x1f);
6585 addr = tcg_temp_new_i32();
6586 tmp = tcg_const_i32(op1);
6587 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6588 tcg_temp_free_i32(tmp);
6589 i = (insn >> 23) & 3;
6590 switch (i) {
6591 case 0: offset = -4; break; /* DA */
6592 case 1: offset = 0; break; /* IA */
6593 case 2: offset = -8; break; /* DB */
6594 case 3: offset = 4; break; /* IB */
6595 default: abort();
6596 }
6597 if (offset)
6598 tcg_gen_addi_i32(addr, addr, offset);
6599 tmp = load_reg(s, 14);
6600 gen_st32(tmp, addr, 0);
6601 tmp = load_cpu_field(spsr);
6602 tcg_gen_addi_i32(addr, addr, 4);
6603 gen_st32(tmp, addr, 0);
6604 if (insn & (1 << 21)) {
6605 /* Base writeback. */
6606 switch (i) {
6607 case 0: offset = -8; break;
6608 case 1: offset = 4; break;
6609 case 2: offset = -4; break;
6610 case 3: offset = 0; break;
6611 default: abort();
6612 }
6613 if (offset)
6614 tcg_gen_addi_i32(addr, addr, offset);
6615 tmp = tcg_const_i32(op1);
6616 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6617 tcg_temp_free_i32(tmp);
6618 tcg_temp_free_i32(addr);
6619 } else {
6620 tcg_temp_free_i32(addr);
6621 }
6622 return;
6623 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6624 /* rfe */
6625 int32_t offset;
6626 if (IS_USER(s))
6627 goto illegal_op;
6628 ARCH(6);
6629 rn = (insn >> 16) & 0xf;
6630 addr = load_reg(s, rn);
6631 i = (insn >> 23) & 3;
6632 switch (i) {
6633 case 0: offset = -4; break; /* DA */
6634 case 1: offset = 0; break; /* IA */
6635 case 2: offset = -8; break; /* DB */
6636 case 3: offset = 4; break; /* IB */
6637 default: abort();
6638 }
6639 if (offset)
6640 tcg_gen_addi_i32(addr, addr, offset);
6641 /* Load PC into tmp and CPSR into tmp2. */
6642 tmp = gen_ld32(addr, 0);
6643 tcg_gen_addi_i32(addr, addr, 4);
6644 tmp2 = gen_ld32(addr, 0);
6645 if (insn & (1 << 21)) {
6646 /* Base writeback. */
6647 switch (i) {
6648 case 0: offset = -8; break;
6649 case 1: offset = 4; break;
6650 case 2: offset = -4; break;
6651 case 3: offset = 0; break;
6652 default: abort();
6653 }
6654 if (offset)
6655 tcg_gen_addi_i32(addr, addr, offset);
6656 store_reg(s, rn, addr);
6657 } else {
6658 tcg_temp_free_i32(addr);
6659 }
6660 gen_rfe(s, tmp, tmp2);
6661 return;
6662 } else if ((insn & 0x0e000000) == 0x0a000000) {
6663 /* branch link and change to thumb (blx <offset>) */
6664 int32_t offset;
6665
6666 val = (uint32_t)s->pc;
6667 tmp = tcg_temp_new_i32();
6668 tcg_gen_movi_i32(tmp, val);
6669 store_reg(s, 14, tmp);
6670 /* Sign-extend the 24-bit offset */
6671 offset = (((int32_t)insn) << 8) >> 8;
6672 /* offset * 4 + bit24 * 2 + (thumb bit) */
6673 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6674 /* pipeline offset */
6675 val += 4;
6676 /* protected by ARCH(5); above, near the start of uncond block */
6677 gen_bx_im(s, val);
6678 return;
6679 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6680 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6681 /* iWMMXt register transfer. */
6682 if (env->cp15.c15_cpar & (1 << 1))
6683 if (!disas_iwmmxt_insn(env, s, insn))
6684 return;
6685 }
6686 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6687 /* Coprocessor double register transfer. */
6688 ARCH(5TE);
6689 } else if ((insn & 0x0f000010) == 0x0e000010) {
6690 /* Additional coprocessor register transfer. */
6691 } else if ((insn & 0x0ff10020) == 0x01000000) {
6692 uint32_t mask;
6693 uint32_t val;
6694 /* cps (privileged) */
6695 if (IS_USER(s))
6696 return;
6697 mask = val = 0;
6698 if (insn & (1 << 19)) {
6699 if (insn & (1 << 8))
6700 mask |= CPSR_A;
6701 if (insn & (1 << 7))
6702 mask |= CPSR_I;
6703 if (insn & (1 << 6))
6704 mask |= CPSR_F;
6705 if (insn & (1 << 18))
6706 val |= mask;
6707 }
6708 if (insn & (1 << 17)) {
6709 mask |= CPSR_M;
6710 val |= (insn & 0x1f);
6711 }
6712 if (mask) {
6713 gen_set_psr_im(s, mask, 0, val);
6714 }
6715 return;
6716 }
6717 goto illegal_op;
6718 }
6719 if (cond != 0xe) {
6720 /* if not always execute, we generate a conditional jump to
6721 next instruction */
6722 s->condlabel = gen_new_label();
6723 gen_test_cc(cond ^ 1, s->condlabel);
6724 s->condjmp = 1;
6725 }
6726 if ((insn & 0x0f900000) == 0x03000000) {
6727 if ((insn & (1 << 21)) == 0) {
6728 ARCH(6T2);
6729 rd = (insn >> 12) & 0xf;
6730 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6731 if ((insn & (1 << 22)) == 0) {
6732 /* MOVW */
6733 tmp = tcg_temp_new_i32();
6734 tcg_gen_movi_i32(tmp, val);
6735 } else {
6736 /* MOVT */
6737 tmp = load_reg(s, rd);
6738 tcg_gen_ext16u_i32(tmp, tmp);
6739 tcg_gen_ori_i32(tmp, tmp, val << 16);
6740 }
6741 store_reg(s, rd, tmp);
6742 } else {
6743 if (((insn >> 12) & 0xf) != 0xf)
6744 goto illegal_op;
6745 if (((insn >> 16) & 0xf) == 0) {
6746 gen_nop_hint(s, insn & 0xff);
6747 } else {
6748 /* CPSR = immediate */
6749 val = insn & 0xff;
6750 shift = ((insn >> 8) & 0xf) * 2;
6751 if (shift)
6752 val = (val >> shift) | (val << (32 - shift));
6753 i = ((insn & (1 << 22)) != 0);
6754 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6755 goto illegal_op;
6756 }
6757 }
6758 } else if ((insn & 0x0f900000) == 0x01000000
6759 && (insn & 0x00000090) != 0x00000090) {
6760 /* miscellaneous instructions */
6761 op1 = (insn >> 21) & 3;
6762 sh = (insn >> 4) & 0xf;
6763 rm = insn & 0xf;
6764 switch (sh) {
6765 case 0x0: /* move program status register */
6766 if (op1 & 1) {
6767 /* PSR = reg */
6768 tmp = load_reg(s, rm);
6769 i = ((op1 & 2) != 0);
6770 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6771 goto illegal_op;
6772 } else {
6773 /* reg = PSR */
6774 rd = (insn >> 12) & 0xf;
6775 if (op1 & 2) {
6776 if (IS_USER(s))
6777 goto illegal_op;
6778 tmp = load_cpu_field(spsr);
6779 } else {
6780 tmp = tcg_temp_new_i32();
6781 gen_helper_cpsr_read(tmp);
6782 }
6783 store_reg(s, rd, tmp);
6784 }
6785 break;
6786 case 0x1:
6787 if (op1 == 1) {
6788 /* branch/exchange thumb (bx). */
6789 ARCH(4T);
6790 tmp = load_reg(s, rm);
6791 gen_bx(s, tmp);
6792 } else if (op1 == 3) {
6793 /* clz */
6794 ARCH(5);
6795 rd = (insn >> 12) & 0xf;
6796 tmp = load_reg(s, rm);
6797 gen_helper_clz(tmp, tmp);
6798 store_reg(s, rd, tmp);
6799 } else {
6800 goto illegal_op;
6801 }
6802 break;
6803 case 0x2:
6804 if (op1 == 1) {
6805 ARCH(5J); /* bxj */
6806 /* Trivial implementation equivalent to bx. */
6807 tmp = load_reg(s, rm);
6808 gen_bx(s, tmp);
6809 } else {
6810 goto illegal_op;
6811 }
6812 break;
6813 case 0x3:
6814 if (op1 != 1)
6815 goto illegal_op;
6816
6817 ARCH(5);
6818 /* branch link/exchange thumb (blx) */
6819 tmp = load_reg(s, rm);
6820 tmp2 = tcg_temp_new_i32();
6821 tcg_gen_movi_i32(tmp2, s->pc);
6822 store_reg(s, 14, tmp2);
6823 gen_bx(s, tmp);
6824 break;
6825 case 0x5: /* saturating add/subtract */
6826 ARCH(5TE);
6827 rd = (insn >> 12) & 0xf;
6828 rn = (insn >> 16) & 0xf;
6829 tmp = load_reg(s, rm);
6830 tmp2 = load_reg(s, rn);
6831 if (op1 & 2)
6832 gen_helper_double_saturate(tmp2, tmp2);
6833 if (op1 & 1)
6834 gen_helper_sub_saturate(tmp, tmp, tmp2);
6835 else
6836 gen_helper_add_saturate(tmp, tmp, tmp2);
6837 tcg_temp_free_i32(tmp2);
6838 store_reg(s, rd, tmp);
6839 break;
6840 case 7:
6841 /* SMC instruction (op1 == 3)
6842 and undefined instructions (op1 == 0 || op1 == 2)
6843 will trap */
6844 if (op1 != 1) {
6845 goto illegal_op;
6846 }
6847 /* bkpt */
6848 ARCH(5);
6849 gen_exception_insn(s, 4, EXCP_BKPT);
6850 break;
6851 case 0x8: /* signed multiply */
6852 case 0xa:
6853 case 0xc:
6854 case 0xe:
6855 ARCH(5TE);
6856 rs = (insn >> 8) & 0xf;
6857 rn = (insn >> 12) & 0xf;
6858 rd = (insn >> 16) & 0xf;
6859 if (op1 == 1) {
6860 /* (32 * 16) >> 16 */
6861 tmp = load_reg(s, rm);
6862 tmp2 = load_reg(s, rs);
6863 if (sh & 4)
6864 tcg_gen_sari_i32(tmp2, tmp2, 16);
6865 else
6866 gen_sxth(tmp2);
6867 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6868 tcg_gen_shri_i64(tmp64, tmp64, 16);
6869 tmp = tcg_temp_new_i32();
6870 tcg_gen_trunc_i64_i32(tmp, tmp64);
6871 tcg_temp_free_i64(tmp64);
6872 if ((sh & 2) == 0) {
6873 tmp2 = load_reg(s, rn);
6874 gen_helper_add_setq(tmp, tmp, tmp2);
6875 tcg_temp_free_i32(tmp2);
6876 }
6877 store_reg(s, rd, tmp);
6878 } else {
6879 /* 16 * 16 */
6880 tmp = load_reg(s, rm);
6881 tmp2 = load_reg(s, rs);
6882 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6883 tcg_temp_free_i32(tmp2);
6884 if (op1 == 2) {
6885 tmp64 = tcg_temp_new_i64();
6886 tcg_gen_ext_i32_i64(tmp64, tmp);
6887 tcg_temp_free_i32(tmp);
6888 gen_addq(s, tmp64, rn, rd);
6889 gen_storeq_reg(s, rn, rd, tmp64);
6890 tcg_temp_free_i64(tmp64);
6891 } else {
6892 if (op1 == 0) {
6893 tmp2 = load_reg(s, rn);
6894 gen_helper_add_setq(tmp, tmp, tmp2);
6895 tcg_temp_free_i32(tmp2);
6896 }
6897 store_reg(s, rd, tmp);
6898 }
6899 }
6900 break;
6901 default:
6902 goto illegal_op;
6903 }
6904 } else if (((insn & 0x0e000000) == 0 &&
6905 (insn & 0x00000090) != 0x90) ||
6906 ((insn & 0x0e000000) == (1 << 25))) {
6907 int set_cc, logic_cc, shiftop;
6908
6909 op1 = (insn >> 21) & 0xf;
6910 set_cc = (insn >> 20) & 1;
6911 logic_cc = table_logic_cc[op1] & set_cc;
6912
6913 /* data processing instruction */
6914 if (insn & (1 << 25)) {
6915 /* immediate operand */
6916 val = insn & 0xff;
6917 shift = ((insn >> 8) & 0xf) * 2;
6918 if (shift) {
6919 val = (val >> shift) | (val << (32 - shift));
6920 }
6921 tmp2 = tcg_temp_new_i32();
6922 tcg_gen_movi_i32(tmp2, val);
6923 if (logic_cc && shift) {
6924 gen_set_CF_bit31(tmp2);
6925 }
6926 } else {
6927 /* register */
6928 rm = (insn) & 0xf;
6929 tmp2 = load_reg(s, rm);
6930 shiftop = (insn >> 5) & 3;
6931 if (!(insn & (1 << 4))) {
6932 shift = (insn >> 7) & 0x1f;
6933 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6934 } else {
6935 rs = (insn >> 8) & 0xf;
6936 tmp = load_reg(s, rs);
6937 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6938 }
6939 }
6940 if (op1 != 0x0f && op1 != 0x0d) {
6941 rn = (insn >> 16) & 0xf;
6942 tmp = load_reg(s, rn);
6943 } else {
6944 TCGV_UNUSED(tmp);
6945 }
6946 rd = (insn >> 12) & 0xf;
6947 switch(op1) {
6948 case 0x00:
6949 tcg_gen_and_i32(tmp, tmp, tmp2);
6950 if (logic_cc) {
6951 gen_logic_CC(tmp);
6952 }
6953 store_reg_bx(env, s, rd, tmp);
6954 break;
6955 case 0x01:
6956 tcg_gen_xor_i32(tmp, tmp, tmp2);
6957 if (logic_cc) {
6958 gen_logic_CC(tmp);
6959 }
6960 store_reg_bx(env, s, rd, tmp);
6961 break;
6962 case 0x02:
6963 if (set_cc && rd == 15) {
6964 /* SUBS r15, ... is used for exception return. */
6965 if (IS_USER(s)) {
6966 goto illegal_op;
6967 }
6968 gen_helper_sub_cc(tmp, tmp, tmp2);
6969 gen_exception_return(s, tmp);
6970 } else {
6971 if (set_cc) {
6972 gen_helper_sub_cc(tmp, tmp, tmp2);
6973 } else {
6974 tcg_gen_sub_i32(tmp, tmp, tmp2);
6975 }
6976 store_reg_bx(env, s, rd, tmp);
6977 }
6978 break;
6979 case 0x03:
6980 if (set_cc) {
6981 gen_helper_sub_cc(tmp, tmp2, tmp);
6982 } else {
6983 tcg_gen_sub_i32(tmp, tmp2, tmp);
6984 }
6985 store_reg_bx(env, s, rd, tmp);
6986 break;
6987 case 0x04:
6988 if (set_cc) {
6989 gen_helper_add_cc(tmp, tmp, tmp2);
6990 } else {
6991 tcg_gen_add_i32(tmp, tmp, tmp2);
6992 }
6993 store_reg_bx(env, s, rd, tmp);
6994 break;
6995 case 0x05:
6996 if (set_cc) {
6997 gen_helper_adc_cc(tmp, tmp, tmp2);
6998 } else {
6999 gen_add_carry(tmp, tmp, tmp2);
7000 }
7001 store_reg_bx(env, s, rd, tmp);
7002 break;
7003 case 0x06:
7004 if (set_cc) {
7005 gen_helper_sbc_cc(tmp, tmp, tmp2);
7006 } else {
7007 gen_sub_carry(tmp, tmp, tmp2);
7008 }
7009 store_reg_bx(env, s, rd, tmp);
7010 break;
7011 case 0x07:
7012 if (set_cc) {
7013 gen_helper_sbc_cc(tmp, tmp2, tmp);
7014 } else {
7015 gen_sub_carry(tmp, tmp2, tmp);
7016 }
7017 store_reg_bx(env, s, rd, tmp);
7018 break;
7019 case 0x08:
7020 if (set_cc) {
7021 tcg_gen_and_i32(tmp, tmp, tmp2);
7022 gen_logic_CC(tmp);
7023 }
7024 tcg_temp_free_i32(tmp);
7025 break;
7026 case 0x09:
7027 if (set_cc) {
7028 tcg_gen_xor_i32(tmp, tmp, tmp2);
7029 gen_logic_CC(tmp);
7030 }
7031 tcg_temp_free_i32(tmp);
7032 break;
7033 case 0x0a:
7034 if (set_cc) {
7035 gen_helper_sub_cc(tmp, tmp, tmp2);
7036 }
7037 tcg_temp_free_i32(tmp);
7038 break;
7039 case 0x0b:
7040 if (set_cc) {
7041 gen_helper_add_cc(tmp, tmp, tmp2);
7042 }
7043 tcg_temp_free_i32(tmp);
7044 break;
7045 case 0x0c:
7046 tcg_gen_or_i32(tmp, tmp, tmp2);
7047 if (logic_cc) {
7048 gen_logic_CC(tmp);
7049 }
7050 store_reg_bx(env, s, rd, tmp);
7051 break;
7052 case 0x0d:
7053 if (logic_cc && rd == 15) {
7054 /* MOVS r15, ... is used for exception return. */
7055 if (IS_USER(s)) {
7056 goto illegal_op;
7057 }
7058 gen_exception_return(s, tmp2);
7059 } else {
7060 if (logic_cc) {
7061 gen_logic_CC(tmp2);
7062 }
7063 store_reg_bx(env, s, rd, tmp2);
7064 }
7065 break;
7066 case 0x0e:
7067 tcg_gen_andc_i32(tmp, tmp, tmp2);
7068 if (logic_cc) {
7069 gen_logic_CC(tmp);
7070 }
7071 store_reg_bx(env, s, rd, tmp);
7072 break;
7073 default:
7074 case 0x0f:
7075 tcg_gen_not_i32(tmp2, tmp2);
7076 if (logic_cc) {
7077 gen_logic_CC(tmp2);
7078 }
7079 store_reg_bx(env, s, rd, tmp2);
7080 break;
7081 }
7082 if (op1 != 0x0f && op1 != 0x0d) {
7083 tcg_temp_free_i32(tmp2);
7084 }
7085 } else {
7086 /* other instructions */
7087 op1 = (insn >> 24) & 0xf;
7088 switch(op1) {
7089 case 0x0:
7090 case 0x1:
7091 /* multiplies, extra load/stores */
7092 sh = (insn >> 5) & 3;
7093 if (sh == 0) {
7094 if (op1 == 0x0) {
7095 rd = (insn >> 16) & 0xf;
7096 rn = (insn >> 12) & 0xf;
7097 rs = (insn >> 8) & 0xf;
7098 rm = (insn) & 0xf;
7099 op1 = (insn >> 20) & 0xf;
7100 switch (op1) {
7101 case 0: case 1: case 2: case 3: case 6:
7102 /* 32 bit mul */
7103 tmp = load_reg(s, rs);
7104 tmp2 = load_reg(s, rm);
7105 tcg_gen_mul_i32(tmp, tmp, tmp2);
7106 tcg_temp_free_i32(tmp2);
7107 if (insn & (1 << 22)) {
7108 /* Subtract (mls) */
7109 ARCH(6T2);
7110 tmp2 = load_reg(s, rn);
7111 tcg_gen_sub_i32(tmp, tmp2, tmp);
7112 tcg_temp_free_i32(tmp2);
7113 } else if (insn & (1 << 21)) {
7114 /* Add */
7115 tmp2 = load_reg(s, rn);
7116 tcg_gen_add_i32(tmp, tmp, tmp2);
7117 tcg_temp_free_i32(tmp2);
7118 }
7119 if (insn & (1 << 20))
7120 gen_logic_CC(tmp);
7121 store_reg(s, rd, tmp);
7122 break;
7123 case 4:
7124 /* 64 bit mul double accumulate (UMAAL) */
7125 ARCH(6);
7126 tmp = load_reg(s, rs);
7127 tmp2 = load_reg(s, rm);
7128 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7129 gen_addq_lo(s, tmp64, rn);
7130 gen_addq_lo(s, tmp64, rd);
7131 gen_storeq_reg(s, rn, rd, tmp64);
7132 tcg_temp_free_i64(tmp64);
7133 break;
7134 case 8: case 9: case 10: case 11:
7135 case 12: case 13: case 14: case 15:
7136 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7137 tmp = load_reg(s, rs);
7138 tmp2 = load_reg(s, rm);
7139 if (insn & (1 << 22)) {
7140 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7141 } else {
7142 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7143 }
7144 if (insn & (1 << 21)) { /* mult accumulate */
7145 gen_addq(s, tmp64, rn, rd);
7146 }
7147 if (insn & (1 << 20)) {
7148 gen_logicq_cc(tmp64);
7149 }
7150 gen_storeq_reg(s, rn, rd, tmp64);
7151 tcg_temp_free_i64(tmp64);
7152 break;
7153 default:
7154 goto illegal_op;
7155 }
7156 } else {
7157 rn = (insn >> 16) & 0xf;
7158 rd = (insn >> 12) & 0xf;
7159 if (insn & (1 << 23)) {
7160 /* load/store exclusive */
7161 op1 = (insn >> 21) & 0x3;
7162 if (op1)
7163 ARCH(6K);
7164 else
7165 ARCH(6);
7166 addr = tcg_temp_local_new_i32();
7167 load_reg_var(s, addr, rn);
7168 if (insn & (1 << 20)) {
7169 switch (op1) {
7170 case 0: /* ldrex */
7171 gen_load_exclusive(s, rd, 15, addr, 2);
7172 break;
7173 case 1: /* ldrexd */
7174 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7175 break;
7176 case 2: /* ldrexb */
7177 gen_load_exclusive(s, rd, 15, addr, 0);
7178 break;
7179 case 3: /* ldrexh */
7180 gen_load_exclusive(s, rd, 15, addr, 1);
7181 break;
7182 default:
7183 abort();
7184 }
7185 } else {
7186 rm = insn & 0xf;
7187 switch (op1) {
7188 case 0: /* strex */
7189 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7190 break;
7191 case 1: /* strexd */
7192 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7193 break;
7194 case 2: /* strexb */
7195 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7196 break;
7197 case 3: /* strexh */
7198 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7199 break;
7200 default:
7201 abort();
7202 }
7203 }
7204 tcg_temp_free(addr);
7205 } else {
7206 /* SWP instruction */
7207 rm = (insn) & 0xf;
7208
7209 /* ??? This is not really atomic. However we know
7210 we never have multiple CPUs running in parallel,
7211 so it is good enough. */
7212 addr = load_reg(s, rn);
7213 tmp = load_reg(s, rm);
7214 if (insn & (1 << 22)) {
7215 tmp2 = gen_ld8u(addr, IS_USER(s));
7216 gen_st8(tmp, addr, IS_USER(s));
7217 } else {
7218 tmp2 = gen_ld32(addr, IS_USER(s));
7219 gen_st32(tmp, addr, IS_USER(s));
7220 }
7221 tcg_temp_free_i32(addr);
7222 store_reg(s, rd, tmp2);
7223 }
7224 }
7225 } else {
7226 int address_offset;
7227 int load;
7228 /* Misc load/store */
7229 rn = (insn >> 16) & 0xf;
7230 rd = (insn >> 12) & 0xf;
7231 addr = load_reg(s, rn);
7232 if (insn & (1 << 24))
7233 gen_add_datah_offset(s, insn, 0, addr);
7234 address_offset = 0;
7235 if (insn & (1 << 20)) {
7236 /* load */
7237 switch(sh) {
7238 case 1:
7239 tmp = gen_ld16u(addr, IS_USER(s));
7240 break;
7241 case 2:
7242 tmp = gen_ld8s(addr, IS_USER(s));
7243 break;
7244 default:
7245 case 3:
7246 tmp = gen_ld16s(addr, IS_USER(s));
7247 break;
7248 }
7249 load = 1;
7250 } else if (sh & 2) {
7251 ARCH(5TE);
7252 /* doubleword */
7253 if (sh & 1) {
7254 /* store */
7255 tmp = load_reg(s, rd);
7256 gen_st32(tmp, addr, IS_USER(s));
7257 tcg_gen_addi_i32(addr, addr, 4);
7258 tmp = load_reg(s, rd + 1);
7259 gen_st32(tmp, addr, IS_USER(s));
7260 load = 0;
7261 } else {
7262 /* load */
7263 tmp = gen_ld32(addr, IS_USER(s));
7264 store_reg(s, rd, tmp);
7265 tcg_gen_addi_i32(addr, addr, 4);
7266 tmp = gen_ld32(addr, IS_USER(s));
7267 rd++;
7268 load = 1;
7269 }
7270 address_offset = -4;
7271 } else {
7272 /* store */
7273 tmp = load_reg(s, rd);
7274 gen_st16(tmp, addr, IS_USER(s));
7275 load = 0;
7276 }
7277 /* Perform base writeback before the loaded value to
7278 ensure correct behavior with overlapping index registers.
7279 ldrd with base writeback is is undefined if the
7280 destination and index registers overlap. */
7281 if (!(insn & (1 << 24))) {
7282 gen_add_datah_offset(s, insn, address_offset, addr);
7283 store_reg(s, rn, addr);
7284 } else if (insn & (1 << 21)) {
7285 if (address_offset)
7286 tcg_gen_addi_i32(addr, addr, address_offset);
7287 store_reg(s, rn, addr);
7288 } else {
7289 tcg_temp_free_i32(addr);
7290 }
7291 if (load) {
7292 /* Complete the load. */
7293 store_reg(s, rd, tmp);
7294 }
7295 }
7296 break;
7297 case 0x4:
7298 case 0x5:
7299 goto do_ldst;
7300 case 0x6:
7301 case 0x7:
7302 if (insn & (1 << 4)) {
7303 ARCH(6);
7304 /* Armv6 Media instructions. */
7305 rm = insn & 0xf;
7306 rn = (insn >> 16) & 0xf;
7307 rd = (insn >> 12) & 0xf;
7308 rs = (insn >> 8) & 0xf;
7309 switch ((insn >> 23) & 3) {
7310 case 0: /* Parallel add/subtract. */
7311 op1 = (insn >> 20) & 7;
7312 tmp = load_reg(s, rn);
7313 tmp2 = load_reg(s, rm);
7314 sh = (insn >> 5) & 7;
7315 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7316 goto illegal_op;
7317 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7318 tcg_temp_free_i32(tmp2);
7319 store_reg(s, rd, tmp);
7320 break;
7321 case 1:
7322 if ((insn & 0x00700020) == 0) {
7323 /* Halfword pack. */
7324 tmp = load_reg(s, rn);
7325 tmp2 = load_reg(s, rm);
7326 shift = (insn >> 7) & 0x1f;
7327 if (insn & (1 << 6)) {
7328 /* pkhtb */
7329 if (shift == 0)
7330 shift = 31;
7331 tcg_gen_sari_i32(tmp2, tmp2, shift);
7332 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7333 tcg_gen_ext16u_i32(tmp2, tmp2);
7334 } else {
7335 /* pkhbt */
7336 if (shift)
7337 tcg_gen_shli_i32(tmp2, tmp2, shift);
7338 tcg_gen_ext16u_i32(tmp, tmp);
7339 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7340 }
7341 tcg_gen_or_i32(tmp, tmp, tmp2);
7342 tcg_temp_free_i32(tmp2);
7343 store_reg(s, rd, tmp);
7344 } else if ((insn & 0x00200020) == 0x00200000) {
7345 /* [us]sat */
7346 tmp = load_reg(s, rm);
7347 shift = (insn >> 7) & 0x1f;
7348 if (insn & (1 << 6)) {
7349 if (shift == 0)
7350 shift = 31;
7351 tcg_gen_sari_i32(tmp, tmp, shift);
7352 } else {
7353 tcg_gen_shli_i32(tmp, tmp, shift);
7354 }
7355 sh = (insn >> 16) & 0x1f;
7356 tmp2 = tcg_const_i32(sh);
7357 if (insn & (1 << 22))
7358 gen_helper_usat(tmp, tmp, tmp2);
7359 else
7360 gen_helper_ssat(tmp, tmp, tmp2);
7361 tcg_temp_free_i32(tmp2);
7362 store_reg(s, rd, tmp);
7363 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7364 /* [us]sat16 */
7365 tmp = load_reg(s, rm);
7366 sh = (insn >> 16) & 0x1f;
7367 tmp2 = tcg_const_i32(sh);
7368 if (insn & (1 << 22))
7369 gen_helper_usat16(tmp, tmp, tmp2);
7370 else
7371 gen_helper_ssat16(tmp, tmp, tmp2);
7372 tcg_temp_free_i32(tmp2);
7373 store_reg(s, rd, tmp);
7374 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7375 /* Select bytes. */
7376 tmp = load_reg(s, rn);
7377 tmp2 = load_reg(s, rm);
7378 tmp3 = tcg_temp_new_i32();
7379 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7380 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7381 tcg_temp_free_i32(tmp3);
7382 tcg_temp_free_i32(tmp2);
7383 store_reg(s, rd, tmp);
7384 } else if ((insn & 0x000003e0) == 0x00000060) {
7385 tmp = load_reg(s, rm);
7386 shift = (insn >> 10) & 3;
7387 /* ??? In many cases it's not necessary to do a
7388 rotate, a shift is sufficient. */
7389 if (shift != 0)
7390 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7391 op1 = (insn >> 20) & 7;
7392 switch (op1) {
7393 case 0: gen_sxtb16(tmp); break;
7394 case 2: gen_sxtb(tmp); break;
7395 case 3: gen_sxth(tmp); break;
7396 case 4: gen_uxtb16(tmp); break;
7397 case 6: gen_uxtb(tmp); break;
7398 case 7: gen_uxth(tmp); break;
7399 default: goto illegal_op;
7400 }
7401 if (rn != 15) {
7402 tmp2 = load_reg(s, rn);
7403 if ((op1 & 3) == 0) {
7404 gen_add16(tmp, tmp2);
7405 } else {
7406 tcg_gen_add_i32(tmp, tmp, tmp2);
7407 tcg_temp_free_i32(tmp2);
7408 }
7409 }
7410 store_reg(s, rd, tmp);
7411 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7412 /* rev */
7413 tmp = load_reg(s, rm);
7414 if (insn & (1 << 22)) {
7415 if (insn & (1 << 7)) {
7416 gen_revsh(tmp);
7417 } else {
7418 ARCH(6T2);
7419 gen_helper_rbit(tmp, tmp);
7420 }
7421 } else {
7422 if (insn & (1 << 7))
7423 gen_rev16(tmp);
7424 else
7425 tcg_gen_bswap32_i32(tmp, tmp);
7426 }
7427 store_reg(s, rd, tmp);
7428 } else {
7429 goto illegal_op;
7430 }
7431 break;
7432 case 2: /* Multiplies (Type 3). */
7433 tmp = load_reg(s, rm);
7434 tmp2 = load_reg(s, rs);
7435 if (insn & (1 << 20)) {
7436 /* Signed multiply most significant [accumulate].
7437 (SMMUL, SMMLA, SMMLS) */
7438 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7439
7440 if (rd != 15) {
7441 tmp = load_reg(s, rd);
7442 if (insn & (1 << 6)) {
7443 tmp64 = gen_subq_msw(tmp64, tmp);
7444 } else {
7445 tmp64 = gen_addq_msw(tmp64, tmp);
7446 }
7447 }
7448 if (insn & (1 << 5)) {
7449 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7450 }
7451 tcg_gen_shri_i64(tmp64, tmp64, 32);
7452 tmp = tcg_temp_new_i32();
7453 tcg_gen_trunc_i64_i32(tmp, tmp64);
7454 tcg_temp_free_i64(tmp64);
7455 store_reg(s, rn, tmp);
7456 } else {
7457 if (insn & (1 << 5))
7458 gen_swap_half(tmp2);
7459 gen_smul_dual(tmp, tmp2);
7460 if (insn & (1 << 6)) {
7461 /* This subtraction cannot overflow. */
7462 tcg_gen_sub_i32(tmp, tmp, tmp2);
7463 } else {
7464 /* This addition cannot overflow 32 bits;
7465 * however it may overflow considered as a signed
7466 * operation, in which case we must set the Q flag.
7467 */
7468 gen_helper_add_setq(tmp, tmp, tmp2);
7469 }
7470 tcg_temp_free_i32(tmp2);
7471 if (insn & (1 << 22)) {
7472 /* smlald, smlsld */
7473 tmp64 = tcg_temp_new_i64();
7474 tcg_gen_ext_i32_i64(tmp64, tmp);
7475 tcg_temp_free_i32(tmp);
7476 gen_addq(s, tmp64, rd, rn);
7477 gen_storeq_reg(s, rd, rn, tmp64);
7478 tcg_temp_free_i64(tmp64);
7479 } else {
7480 /* smuad, smusd, smlad, smlsd */
7481 if (rd != 15)
7482 {
7483 tmp2 = load_reg(s, rd);
7484 gen_helper_add_setq(tmp, tmp, tmp2);
7485 tcg_temp_free_i32(tmp2);
7486 }
7487 store_reg(s, rn, tmp);
7488 }
7489 }
7490 break;
7491 case 3:
7492 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7493 switch (op1) {
7494 case 0: /* Unsigned sum of absolute differences. */
7495 ARCH(6);
7496 tmp = load_reg(s, rm);
7497 tmp2 = load_reg(s, rs);
7498 gen_helper_usad8(tmp, tmp, tmp2);
7499 tcg_temp_free_i32(tmp2);
7500 if (rd != 15) {
7501 tmp2 = load_reg(s, rd);
7502 tcg_gen_add_i32(tmp, tmp, tmp2);
7503 tcg_temp_free_i32(tmp2);
7504 }
7505 store_reg(s, rn, tmp);
7506 break;
7507 case 0x20: case 0x24: case 0x28: case 0x2c:
7508 /* Bitfield insert/clear. */
7509 ARCH(6T2);
7510 shift = (insn >> 7) & 0x1f;
7511 i = (insn >> 16) & 0x1f;
7512 i = i + 1 - shift;
7513 if (rm == 15) {
7514 tmp = tcg_temp_new_i32();
7515 tcg_gen_movi_i32(tmp, 0);
7516 } else {
7517 tmp = load_reg(s, rm);
7518 }
7519 if (i != 32) {
7520 tmp2 = load_reg(s, rd);
7521 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7522 tcg_temp_free_i32(tmp2);
7523 }
7524 store_reg(s, rd, tmp);
7525 break;
7526 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7527 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7528 ARCH(6T2);
7529 tmp = load_reg(s, rm);
7530 shift = (insn >> 7) & 0x1f;
7531 i = ((insn >> 16) & 0x1f) + 1;
7532 if (shift + i > 32)
7533 goto illegal_op;
7534 if (i < 32) {
7535 if (op1 & 0x20) {
7536 gen_ubfx(tmp, shift, (1u << i) - 1);
7537 } else {
7538 gen_sbfx(tmp, shift, i);
7539 }
7540 }
7541 store_reg(s, rd, tmp);
7542 break;
7543 default:
7544 goto illegal_op;
7545 }
7546 break;
7547 }
7548 break;
7549 }
7550 do_ldst:
7551 /* Check for undefined extension instructions
7552 * per the ARM Bible IE:
7553 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7554 */
7555 sh = (0xf << 20) | (0xf << 4);
7556 if (op1 == 0x7 && ((insn & sh) == sh))
7557 {
7558 goto illegal_op;
7559 }
7560 /* load/store byte/word */
7561 rn = (insn >> 16) & 0xf;
7562 rd = (insn >> 12) & 0xf;
7563 tmp2 = load_reg(s, rn);
7564 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7565 if (insn & (1 << 24))
7566 gen_add_data_offset(s, insn, tmp2);
7567 if (insn & (1 << 20)) {
7568 /* load */
7569 if (insn & (1 << 22)) {
7570 tmp = gen_ld8u(tmp2, i);
7571 } else {
7572 tmp = gen_ld32(tmp2, i);
7573 }
7574 } else {
7575 /* store */
7576 tmp = load_reg(s, rd);
7577 if (insn & (1 << 22))
7578 gen_st8(tmp, tmp2, i);
7579 else
7580 gen_st32(tmp, tmp2, i);
7581 }
7582 if (!(insn & (1 << 24))) {
7583 gen_add_data_offset(s, insn, tmp2);
7584 store_reg(s, rn, tmp2);
7585 } else if (insn & (1 << 21)) {
7586 store_reg(s, rn, tmp2);
7587 } else {
7588 tcg_temp_free_i32(tmp2);
7589 }
7590 if (insn & (1 << 20)) {
7591 /* Complete the load. */
7592 store_reg_from_load(env, s, rd, tmp);
7593 }
7594 break;
7595 case 0x08:
7596 case 0x09:
7597 {
7598 int j, n, user, loaded_base;
7599 TCGv loaded_var;
7600 /* load/store multiple words */
7601 /* XXX: store correct base if write back */
7602 user = 0;
7603 if (insn & (1 << 22)) {
7604 if (IS_USER(s))
7605 goto illegal_op; /* only usable in supervisor mode */
7606
7607 if ((insn & (1 << 15)) == 0)
7608 user = 1;
7609 }
7610 rn = (insn >> 16) & 0xf;
7611 addr = load_reg(s, rn);
7612
7613 /* compute total size */
7614 loaded_base = 0;
7615 TCGV_UNUSED(loaded_var);
7616 n = 0;
7617 for(i=0;i<16;i++) {
7618 if (insn & (1 << i))
7619 n++;
7620 }
7621 /* XXX: test invalid n == 0 case ? */
7622 if (insn & (1 << 23)) {
7623 if (insn & (1 << 24)) {
7624 /* pre increment */
7625 tcg_gen_addi_i32(addr, addr, 4);
7626 } else {
7627 /* post increment */
7628 }
7629 } else {
7630 if (insn & (1 << 24)) {
7631 /* pre decrement */
7632 tcg_gen_addi_i32(addr, addr, -(n * 4));
7633 } else {
7634 /* post decrement */
7635 if (n != 1)
7636 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7637 }
7638 }
7639 j = 0;
7640 for(i=0;i<16;i++) {
7641 if (insn & (1 << i)) {
7642 if (insn & (1 << 20)) {
7643 /* load */
7644 tmp = gen_ld32(addr, IS_USER(s));
7645 if (user) {
7646 tmp2 = tcg_const_i32(i);
7647 gen_helper_set_user_reg(tmp2, tmp);
7648 tcg_temp_free_i32(tmp2);
7649 tcg_temp_free_i32(tmp);
7650 } else if (i == rn) {
7651 loaded_var = tmp;
7652 loaded_base = 1;
7653 } else {
7654 store_reg_from_load(env, s, i, tmp);
7655 }
7656 } else {
7657 /* store */
7658 if (i == 15) {
7659 /* special case: r15 = PC + 8 */
7660 val = (long)s->pc + 4;
7661 tmp = tcg_temp_new_i32();
7662 tcg_gen_movi_i32(tmp, val);
7663 } else if (user) {
7664 tmp = tcg_temp_new_i32();
7665 tmp2 = tcg_const_i32(i);
7666 gen_helper_get_user_reg(tmp, tmp2);
7667 tcg_temp_free_i32(tmp2);
7668 } else {
7669 tmp = load_reg(s, i);
7670 }
7671 gen_st32(tmp, addr, IS_USER(s));
7672 }
7673 j++;
7674 /* no need to add after the last transfer */
7675 if (j != n)
7676 tcg_gen_addi_i32(addr, addr, 4);
7677 }
7678 }
7679 if (insn & (1 << 21)) {
7680 /* write back */
7681 if (insn & (1 << 23)) {
7682 if (insn & (1 << 24)) {
7683 /* pre increment */
7684 } else {
7685 /* post increment */
7686 tcg_gen_addi_i32(addr, addr, 4);
7687 }
7688 } else {
7689 if (insn & (1 << 24)) {
7690 /* pre decrement */
7691 if (n != 1)
7692 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7693 } else {
7694 /* post decrement */
7695 tcg_gen_addi_i32(addr, addr, -(n * 4));
7696 }
7697 }
7698 store_reg(s, rn, addr);
7699 } else {
7700 tcg_temp_free_i32(addr);
7701 }
7702 if (loaded_base) {
7703 store_reg(s, rn, loaded_var);
7704 }
7705 if ((insn & (1 << 22)) && !user) {
7706 /* Restore CPSR from SPSR. */
7707 tmp = load_cpu_field(spsr);
7708 gen_set_cpsr(tmp, 0xffffffff);
7709 tcg_temp_free_i32(tmp);
7710 s->is_jmp = DISAS_UPDATE;
7711 }
7712 }
7713 break;
7714 case 0xa:
7715 case 0xb:
7716 {
7717 int32_t offset;
7718
7719 /* branch (and link) */
7720 val = (int32_t)s->pc;
7721 if (insn & (1 << 24)) {
7722 tmp = tcg_temp_new_i32();
7723 tcg_gen_movi_i32(tmp, val);
7724 store_reg(s, 14, tmp);
7725 }
7726 offset = (((int32_t)insn << 8) >> 8);
7727 val += (offset << 2) + 4;
7728 gen_jmp(s, val);
7729 }
7730 break;
7731 case 0xc:
7732 case 0xd:
7733 case 0xe:
7734 /* Coprocessor. */
7735 if (disas_coproc_insn(env, s, insn))
7736 goto illegal_op;
7737 break;
7738 case 0xf:
7739 /* swi */
7740 gen_set_pc_im(s->pc);
7741 s->is_jmp = DISAS_SWI;
7742 break;
7743 default:
7744 illegal_op:
7745 gen_exception_insn(s, 4, EXCP_UDEF);
7746 break;
7747 }
7748 }
7749 }
7750
7751 /* Return true if this is a Thumb-2 logical op. */
7752 static int
7753 thumb2_logic_op(int op)
7754 {
7755 return (op < 8);
7756 }
7757
7758 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7759 then set condition code flags based on the result of the operation.
7760 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7761 to the high bit of T1.
7762 Returns zero if the opcode is valid. */
7763
7764 static int
7765 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7766 {
7767 int logic_cc;
7768
7769 logic_cc = 0;
7770 switch (op) {
7771 case 0: /* and */
7772 tcg_gen_and_i32(t0, t0, t1);
7773 logic_cc = conds;
7774 break;
7775 case 1: /* bic */
7776 tcg_gen_andc_i32(t0, t0, t1);
7777 logic_cc = conds;
7778 break;
7779 case 2: /* orr */
7780 tcg_gen_or_i32(t0, t0, t1);
7781 logic_cc = conds;
7782 break;
7783 case 3: /* orn */
7784 tcg_gen_orc_i32(t0, t0, t1);
7785 logic_cc = conds;
7786 break;
7787 case 4: /* eor */
7788 tcg_gen_xor_i32(t0, t0, t1);
7789 logic_cc = conds;
7790 break;
7791 case 8: /* add */
7792 if (conds)
7793 gen_helper_add_cc(t0, t0, t1);
7794 else
7795 tcg_gen_add_i32(t0, t0, t1);
7796 break;
7797 case 10: /* adc */
7798 if (conds)
7799 gen_helper_adc_cc(t0, t0, t1);
7800 else
7801 gen_adc(t0, t1);
7802 break;
7803 case 11: /* sbc */
7804 if (conds)
7805 gen_helper_sbc_cc(t0, t0, t1);
7806 else
7807 gen_sub_carry(t0, t0, t1);
7808 break;
7809 case 13: /* sub */
7810 if (conds)
7811 gen_helper_sub_cc(t0, t0, t1);
7812 else
7813 tcg_gen_sub_i32(t0, t0, t1);
7814 break;
7815 case 14: /* rsb */
7816 if (conds)
7817 gen_helper_sub_cc(t0, t1, t0);
7818 else
7819 tcg_gen_sub_i32(t0, t1, t0);
7820 break;
7821 default: /* 5, 6, 7, 9, 12, 15. */
7822 return 1;
7823 }
7824 if (logic_cc) {
7825 gen_logic_CC(t0);
7826 if (shifter_out)
7827 gen_set_CF_bit31(t1);
7828 }
7829 return 0;
7830 }
7831
7832 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7833 is not legal. */
7834 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7835 {
7836 uint32_t insn, imm, shift, offset;
7837 uint32_t rd, rn, rm, rs;
7838 TCGv tmp;
7839 TCGv tmp2;
7840 TCGv tmp3;
7841 TCGv addr;
7842 TCGv_i64 tmp64;
7843 int op;
7844 int shiftop;
7845 int conds;
7846 int logic_cc;
7847
7848 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7849 || arm_feature (env, ARM_FEATURE_M))) {
7850 /* Thumb-1 cores may need to treat bl and blx as a pair of
7851 16-bit instructions to get correct prefetch abort behavior. */
7852 insn = insn_hw1;
7853 if ((insn & (1 << 12)) == 0) {
7854 ARCH(5);
7855 /* Second half of blx. */
7856 offset = ((insn & 0x7ff) << 1);
7857 tmp = load_reg(s, 14);
7858 tcg_gen_addi_i32(tmp, tmp, offset);
7859 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7860
7861 tmp2 = tcg_temp_new_i32();
7862 tcg_gen_movi_i32(tmp2, s->pc | 1);
7863 store_reg(s, 14, tmp2);
7864 gen_bx(s, tmp);
7865 return 0;
7866 }
7867 if (insn & (1 << 11)) {
7868 /* Second half of bl. */
7869 offset = ((insn & 0x7ff) << 1) | 1;
7870 tmp = load_reg(s, 14);
7871 tcg_gen_addi_i32(tmp, tmp, offset);
7872
7873 tmp2 = tcg_temp_new_i32();
7874 tcg_gen_movi_i32(tmp2, s->pc | 1);
7875 store_reg(s, 14, tmp2);
7876 gen_bx(s, tmp);
7877 return 0;
7878 }
7879 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7880 /* Instruction spans a page boundary. Implement it as two
7881 16-bit instructions in case the second half causes an
7882 prefetch abort. */
7883 offset = ((int32_t)insn << 21) >> 9;
7884 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7885 return 0;
7886 }
7887 /* Fall through to 32-bit decode. */
7888 }
7889
7890 insn = lduw_code(s->pc);
7891 s->pc += 2;
7892 insn |= (uint32_t)insn_hw1 << 16;
7893
7894 if ((insn & 0xf800e800) != 0xf000e800) {
7895 ARCH(6T2);
7896 }
7897
7898 rn = (insn >> 16) & 0xf;
7899 rs = (insn >> 12) & 0xf;
7900 rd = (insn >> 8) & 0xf;
7901 rm = insn & 0xf;
7902 switch ((insn >> 25) & 0xf) {
7903 case 0: case 1: case 2: case 3:
7904 /* 16-bit instructions. Should never happen. */
7905 abort();
7906 case 4:
7907 if (insn & (1 << 22)) {
7908 /* Other load/store, table branch. */
7909 if (insn & 0x01200000) {
7910 /* Load/store doubleword. */
7911 if (rn == 15) {
7912 addr = tcg_temp_new_i32();
7913 tcg_gen_movi_i32(addr, s->pc & ~3);
7914 } else {
7915 addr = load_reg(s, rn);
7916 }
7917 offset = (insn & 0xff) * 4;
7918 if ((insn & (1 << 23)) == 0)
7919 offset = -offset;
7920 if (insn & (1 << 24)) {
7921 tcg_gen_addi_i32(addr, addr, offset);
7922 offset = 0;
7923 }
7924 if (insn & (1 << 20)) {
7925 /* ldrd */
7926 tmp = gen_ld32(addr, IS_USER(s));
7927 store_reg(s, rs, tmp);
7928 tcg_gen_addi_i32(addr, addr, 4);
7929 tmp = gen_ld32(addr, IS_USER(s));
7930 store_reg(s, rd, tmp);
7931 } else {
7932 /* strd */
7933 tmp = load_reg(s, rs);
7934 gen_st32(tmp, addr, IS_USER(s));
7935 tcg_gen_addi_i32(addr, addr, 4);
7936 tmp = load_reg(s, rd);
7937 gen_st32(tmp, addr, IS_USER(s));
7938 }
7939 if (insn & (1 << 21)) {
7940 /* Base writeback. */
7941 if (rn == 15)
7942 goto illegal_op;
7943 tcg_gen_addi_i32(addr, addr, offset - 4);
7944 store_reg(s, rn, addr);
7945 } else {
7946 tcg_temp_free_i32(addr);
7947 }
7948 } else if ((insn & (1 << 23)) == 0) {
7949 /* Load/store exclusive word. */
7950 addr = tcg_temp_local_new();
7951 load_reg_var(s, addr, rn);
7952 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7953 if (insn & (1 << 20)) {
7954 gen_load_exclusive(s, rs, 15, addr, 2);
7955 } else {
7956 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7957 }
7958 tcg_temp_free(addr);
7959 } else if ((insn & (1 << 6)) == 0) {
7960 /* Table Branch. */
7961 if (rn == 15) {
7962 addr = tcg_temp_new_i32();
7963 tcg_gen_movi_i32(addr, s->pc);
7964 } else {
7965 addr = load_reg(s, rn);
7966 }
7967 tmp = load_reg(s, rm);
7968 tcg_gen_add_i32(addr, addr, tmp);
7969 if (insn & (1 << 4)) {
7970 /* tbh */
7971 tcg_gen_add_i32(addr, addr, tmp);
7972 tcg_temp_free_i32(tmp);
7973 tmp = gen_ld16u(addr, IS_USER(s));
7974 } else { /* tbb */
7975 tcg_temp_free_i32(tmp);
7976 tmp = gen_ld8u(addr, IS_USER(s));
7977 }
7978 tcg_temp_free_i32(addr);
7979 tcg_gen_shli_i32(tmp, tmp, 1);
7980 tcg_gen_addi_i32(tmp, tmp, s->pc);
7981 store_reg(s, 15, tmp);
7982 } else {
7983 /* Load/store exclusive byte/halfword/doubleword. */
7984 ARCH(7);
7985 op = (insn >> 4) & 0x3;
7986 if (op == 2) {
7987 goto illegal_op;
7988 }
7989 addr = tcg_temp_local_new();
7990 load_reg_var(s, addr, rn);
7991 if (insn & (1 << 20)) {
7992 gen_load_exclusive(s, rs, rd, addr, op);
7993 } else {
7994 gen_store_exclusive(s, rm, rs, rd, addr, op);
7995 }
7996 tcg_temp_free(addr);
7997 }
7998 } else {
7999 /* Load/store multiple, RFE, SRS. */
8000 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8001 /* Not available in user mode. */
8002 if (IS_USER(s))
8003 goto illegal_op;
8004 if (insn & (1 << 20)) {
8005 /* rfe */
8006 addr = load_reg(s, rn);
8007 if ((insn & (1 << 24)) == 0)
8008 tcg_gen_addi_i32(addr, addr, -8);
8009 /* Load PC into tmp and CPSR into tmp2. */
8010 tmp = gen_ld32(addr, 0);
8011 tcg_gen_addi_i32(addr, addr, 4);
8012 tmp2 = gen_ld32(addr, 0);
8013 if (insn & (1 << 21)) {
8014 /* Base writeback. */
8015 if (insn & (1 << 24)) {
8016 tcg_gen_addi_i32(addr, addr, 4);
8017 } else {
8018 tcg_gen_addi_i32(addr, addr, -4);
8019 }
8020 store_reg(s, rn, addr);
8021 } else {
8022 tcg_temp_free_i32(addr);
8023 }
8024 gen_rfe(s, tmp, tmp2);
8025 } else {
8026 /* srs */
8027 op = (insn & 0x1f);
8028 addr = tcg_temp_new_i32();
8029 tmp = tcg_const_i32(op);
8030 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8031 tcg_temp_free_i32(tmp);
8032 if ((insn & (1 << 24)) == 0) {
8033 tcg_gen_addi_i32(addr, addr, -8);
8034 }
8035 tmp = load_reg(s, 14);
8036 gen_st32(tmp, addr, 0);
8037 tcg_gen_addi_i32(addr, addr, 4);
8038 tmp = tcg_temp_new_i32();
8039 gen_helper_cpsr_read(tmp);
8040 gen_st32(tmp, addr, 0);
8041 if (insn & (1 << 21)) {
8042 if ((insn & (1 << 24)) == 0) {
8043 tcg_gen_addi_i32(addr, addr, -4);
8044 } else {
8045 tcg_gen_addi_i32(addr, addr, 4);
8046 }
8047 tmp = tcg_const_i32(op);
8048 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8049 tcg_temp_free_i32(tmp);
8050 } else {
8051 tcg_temp_free_i32(addr);
8052 }
8053 }
8054 } else {
8055 int i, loaded_base = 0;
8056 TCGv loaded_var;
8057 /* Load/store multiple. */
8058 addr = load_reg(s, rn);
8059 offset = 0;
8060 for (i = 0; i < 16; i++) {
8061 if (insn & (1 << i))
8062 offset += 4;
8063 }
8064 if (insn & (1 << 24)) {
8065 tcg_gen_addi_i32(addr, addr, -offset);
8066 }
8067
8068 TCGV_UNUSED(loaded_var);
8069 for (i = 0; i < 16; i++) {
8070 if ((insn & (1 << i)) == 0)
8071 continue;
8072 if (insn & (1 << 20)) {
8073 /* Load. */
8074 tmp = gen_ld32(addr, IS_USER(s));
8075 if (i == 15) {
8076 gen_bx(s, tmp);
8077 } else if (i == rn) {
8078 loaded_var = tmp;
8079 loaded_base = 1;
8080 } else {
8081 store_reg(s, i, tmp);
8082 }
8083 } else {
8084 /* Store. */
8085 tmp = load_reg(s, i);
8086 gen_st32(tmp, addr, IS_USER(s));
8087 }
8088 tcg_gen_addi_i32(addr, addr, 4);
8089 }
8090 if (loaded_base) {
8091 store_reg(s, rn, loaded_var);
8092 }
8093 if (insn & (1 << 21)) {
8094 /* Base register writeback. */
8095 if (insn & (1 << 24)) {
8096 tcg_gen_addi_i32(addr, addr, -offset);
8097 }
8098 /* Fault if writeback register is in register list. */
8099 if (insn & (1 << rn))
8100 goto illegal_op;
8101 store_reg(s, rn, addr);
8102 } else {
8103 tcg_temp_free_i32(addr);
8104 }
8105 }
8106 }
8107 break;
8108 case 5:
8109
8110 op = (insn >> 21) & 0xf;
8111 if (op == 6) {
8112 /* Halfword pack. */
8113 tmp = load_reg(s, rn);
8114 tmp2 = load_reg(s, rm);
8115 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8116 if (insn & (1 << 5)) {
8117 /* pkhtb */
8118 if (shift == 0)
8119 shift = 31;
8120 tcg_gen_sari_i32(tmp2, tmp2, shift);
8121 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8122 tcg_gen_ext16u_i32(tmp2, tmp2);
8123 } else {
8124 /* pkhbt */
8125 if (shift)
8126 tcg_gen_shli_i32(tmp2, tmp2, shift);
8127 tcg_gen_ext16u_i32(tmp, tmp);
8128 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8129 }
8130 tcg_gen_or_i32(tmp, tmp, tmp2);
8131 tcg_temp_free_i32(tmp2);
8132 store_reg(s, rd, tmp);
8133 } else {
8134 /* Data processing register constant shift. */
8135 if (rn == 15) {
8136 tmp = tcg_temp_new_i32();
8137 tcg_gen_movi_i32(tmp, 0);
8138 } else {
8139 tmp = load_reg(s, rn);
8140 }
8141 tmp2 = load_reg(s, rm);
8142
8143 shiftop = (insn >> 4) & 3;
8144 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8145 conds = (insn & (1 << 20)) != 0;
8146 logic_cc = (conds && thumb2_logic_op(op));
8147 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8148 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8149 goto illegal_op;
8150 tcg_temp_free_i32(tmp2);
8151 if (rd != 15) {
8152 store_reg(s, rd, tmp);
8153 } else {
8154 tcg_temp_free_i32(tmp);
8155 }
8156 }
8157 break;
8158 case 13: /* Misc data processing. */
8159 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8160 if (op < 4 && (insn & 0xf000) != 0xf000)
8161 goto illegal_op;
8162 switch (op) {
8163 case 0: /* Register controlled shift. */
8164 tmp = load_reg(s, rn);
8165 tmp2 = load_reg(s, rm);
8166 if ((insn & 0x70) != 0)
8167 goto illegal_op;
8168 op = (insn >> 21) & 3;
8169 logic_cc = (insn & (1 << 20)) != 0;
8170 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8171 if (logic_cc)
8172 gen_logic_CC(tmp);
8173 store_reg_bx(env, s, rd, tmp);
8174 break;
8175 case 1: /* Sign/zero extend. */
8176 tmp = load_reg(s, rm);
8177 shift = (insn >> 4) & 3;
8178 /* ??? In many cases it's not necessary to do a
8179 rotate, a shift is sufficient. */
8180 if (shift != 0)
8181 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8182 op = (insn >> 20) & 7;
8183 switch (op) {
8184 case 0: gen_sxth(tmp); break;
8185 case 1: gen_uxth(tmp); break;
8186 case 2: gen_sxtb16(tmp); break;
8187 case 3: gen_uxtb16(tmp); break;
8188 case 4: gen_sxtb(tmp); break;
8189 case 5: gen_uxtb(tmp); break;
8190 default: goto illegal_op;
8191 }
8192 if (rn != 15) {
8193 tmp2 = load_reg(s, rn);
8194 if ((op >> 1) == 1) {
8195 gen_add16(tmp, tmp2);
8196 } else {
8197 tcg_gen_add_i32(tmp, tmp, tmp2);
8198 tcg_temp_free_i32(tmp2);
8199 }
8200 }
8201 store_reg(s, rd, tmp);
8202 break;
8203 case 2: /* SIMD add/subtract. */
8204 op = (insn >> 20) & 7;
8205 shift = (insn >> 4) & 7;
8206 if ((op & 3) == 3 || (shift & 3) == 3)
8207 goto illegal_op;
8208 tmp = load_reg(s, rn);
8209 tmp2 = load_reg(s, rm);
8210 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8211 tcg_temp_free_i32(tmp2);
8212 store_reg(s, rd, tmp);
8213 break;
8214 case 3: /* Other data processing. */
8215 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8216 if (op < 4) {
8217 /* Saturating add/subtract. */
8218 tmp = load_reg(s, rn);
8219 tmp2 = load_reg(s, rm);
8220 if (op & 1)
8221 gen_helper_double_saturate(tmp, tmp);
8222 if (op & 2)
8223 gen_helper_sub_saturate(tmp, tmp2, tmp);
8224 else
8225 gen_helper_add_saturate(tmp, tmp, tmp2);
8226 tcg_temp_free_i32(tmp2);
8227 } else {
8228 tmp = load_reg(s, rn);
8229 switch (op) {
8230 case 0x0a: /* rbit */
8231 gen_helper_rbit(tmp, tmp);
8232 break;
8233 case 0x08: /* rev */
8234 tcg_gen_bswap32_i32(tmp, tmp);
8235 break;
8236 case 0x09: /* rev16 */
8237 gen_rev16(tmp);
8238 break;
8239 case 0x0b: /* revsh */
8240 gen_revsh(tmp);
8241 break;
8242 case 0x10: /* sel */
8243 tmp2 = load_reg(s, rm);
8244 tmp3 = tcg_temp_new_i32();
8245 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8246 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8247 tcg_temp_free_i32(tmp3);
8248 tcg_temp_free_i32(tmp2);
8249 break;
8250 case 0x18: /* clz */
8251 gen_helper_clz(tmp, tmp);
8252 break;
8253 default:
8254 goto illegal_op;
8255 }
8256 }
8257 store_reg(s, rd, tmp);
8258 break;
8259 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8260 op = (insn >> 4) & 0xf;
8261 tmp = load_reg(s, rn);
8262 tmp2 = load_reg(s, rm);
8263 switch ((insn >> 20) & 7) {
8264 case 0: /* 32 x 32 -> 32 */
8265 tcg_gen_mul_i32(tmp, tmp, tmp2);
8266 tcg_temp_free_i32(tmp2);
8267 if (rs != 15) {
8268 tmp2 = load_reg(s, rs);
8269 if (op)
8270 tcg_gen_sub_i32(tmp, tmp2, tmp);
8271 else
8272 tcg_gen_add_i32(tmp, tmp, tmp2);
8273 tcg_temp_free_i32(tmp2);
8274 }
8275 break;
8276 case 1: /* 16 x 16 -> 32 */
8277 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8278 tcg_temp_free_i32(tmp2);
8279 if (rs != 15) {
8280 tmp2 = load_reg(s, rs);
8281 gen_helper_add_setq(tmp, tmp, tmp2);
8282 tcg_temp_free_i32(tmp2);
8283 }
8284 break;
8285 case 2: /* Dual multiply add. */
8286 case 4: /* Dual multiply subtract. */
8287 if (op)
8288 gen_swap_half(tmp2);
8289 gen_smul_dual(tmp, tmp2);
8290 if (insn & (1 << 22)) {
8291 /* This subtraction cannot overflow. */
8292 tcg_gen_sub_i32(tmp, tmp, tmp2);
8293 } else {
8294 /* This addition cannot overflow 32 bits;
8295 * however it may overflow considered as a signed
8296 * operation, in which case we must set the Q flag.
8297 */
8298 gen_helper_add_setq(tmp, tmp, tmp2);
8299 }
8300 tcg_temp_free_i32(tmp2);
8301 if (rs != 15)
8302 {
8303 tmp2 = load_reg(s, rs);
8304 gen_helper_add_setq(tmp, tmp, tmp2);
8305 tcg_temp_free_i32(tmp2);
8306 }
8307 break;
8308 case 3: /* 32 * 16 -> 32msb */
8309 if (op)
8310 tcg_gen_sari_i32(tmp2, tmp2, 16);
8311 else
8312 gen_sxth(tmp2);
8313 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8314 tcg_gen_shri_i64(tmp64, tmp64, 16);
8315 tmp = tcg_temp_new_i32();
8316 tcg_gen_trunc_i64_i32(tmp, tmp64);
8317 tcg_temp_free_i64(tmp64);
8318 if (rs != 15)
8319 {
8320 tmp2 = load_reg(s, rs);
8321 gen_helper_add_setq(tmp, tmp, tmp2);
8322 tcg_temp_free_i32(tmp2);
8323 }
8324 break;
8325 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8326 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8327 if (rs != 15) {
8328 tmp = load_reg(s, rs);
8329 if (insn & (1 << 20)) {
8330 tmp64 = gen_addq_msw(tmp64, tmp);
8331 } else {
8332 tmp64 = gen_subq_msw(tmp64, tmp);
8333 }
8334 }
8335 if (insn & (1 << 4)) {
8336 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8337 }
8338 tcg_gen_shri_i64(tmp64, tmp64, 32);
8339 tmp = tcg_temp_new_i32();
8340 tcg_gen_trunc_i64_i32(tmp, tmp64);
8341 tcg_temp_free_i64(tmp64);
8342 break;
8343 case 7: /* Unsigned sum of absolute differences. */
8344 gen_helper_usad8(tmp, tmp, tmp2);
8345 tcg_temp_free_i32(tmp2);
8346 if (rs != 15) {
8347 tmp2 = load_reg(s, rs);
8348 tcg_gen_add_i32(tmp, tmp, tmp2);
8349 tcg_temp_free_i32(tmp2);
8350 }
8351 break;
8352 }
8353 store_reg(s, rd, tmp);
8354 break;
8355 case 6: case 7: /* 64-bit multiply, Divide. */
8356 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8357 tmp = load_reg(s, rn);
8358 tmp2 = load_reg(s, rm);
8359 if ((op & 0x50) == 0x10) {
8360 /* sdiv, udiv */
8361 if (!arm_feature(env, ARM_FEATURE_DIV))
8362 goto illegal_op;
8363 if (op & 0x20)
8364 gen_helper_udiv(tmp, tmp, tmp2);
8365 else
8366 gen_helper_sdiv(tmp, tmp, tmp2);
8367 tcg_temp_free_i32(tmp2);
8368 store_reg(s, rd, tmp);
8369 } else if ((op & 0xe) == 0xc) {
8370 /* Dual multiply accumulate long. */
8371 if (op & 1)
8372 gen_swap_half(tmp2);
8373 gen_smul_dual(tmp, tmp2);
8374 if (op & 0x10) {
8375 tcg_gen_sub_i32(tmp, tmp, tmp2);
8376 } else {
8377 tcg_gen_add_i32(tmp, tmp, tmp2);
8378 }
8379 tcg_temp_free_i32(tmp2);
8380 /* BUGFIX */
8381 tmp64 = tcg_temp_new_i64();
8382 tcg_gen_ext_i32_i64(tmp64, tmp);
8383 tcg_temp_free_i32(tmp);
8384 gen_addq(s, tmp64, rs, rd);
8385 gen_storeq_reg(s, rs, rd, tmp64);
8386 tcg_temp_free_i64(tmp64);
8387 } else {
8388 if (op & 0x20) {
8389 /* Unsigned 64-bit multiply */
8390 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8391 } else {
8392 if (op & 8) {
8393 /* smlalxy */
8394 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8395 tcg_temp_free_i32(tmp2);
8396 tmp64 = tcg_temp_new_i64();
8397 tcg_gen_ext_i32_i64(tmp64, tmp);
8398 tcg_temp_free_i32(tmp);
8399 } else {
8400 /* Signed 64-bit multiply */
8401 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8402 }
8403 }
8404 if (op & 4) {
8405 /* umaal */
8406 gen_addq_lo(s, tmp64, rs);
8407 gen_addq_lo(s, tmp64, rd);
8408 } else if (op & 0x40) {
8409 /* 64-bit accumulate. */
8410 gen_addq(s, tmp64, rs, rd);
8411 }
8412 gen_storeq_reg(s, rs, rd, tmp64);
8413 tcg_temp_free_i64(tmp64);
8414 }
8415 break;
8416 }
8417 break;
8418 case 6: case 7: case 14: case 15:
8419 /* Coprocessor. */
8420 if (((insn >> 24) & 3) == 3) {
8421 /* Translate into the equivalent ARM encoding. */
8422 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8423 if (disas_neon_data_insn(env, s, insn))
8424 goto illegal_op;
8425 } else {
8426 if (insn & (1 << 28))
8427 goto illegal_op;
8428 if (disas_coproc_insn (env, s, insn))
8429 goto illegal_op;
8430 }
8431 break;
8432 case 8: case 9: case 10: case 11:
8433 if (insn & (1 << 15)) {
8434 /* Branches, misc control. */
8435 if (insn & 0x5000) {
8436 /* Unconditional branch. */
8437 /* signextend(hw1[10:0]) -> offset[:12]. */
8438 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8439 /* hw1[10:0] -> offset[11:1]. */
8440 offset |= (insn & 0x7ff) << 1;
8441 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8442 offset[24:22] already have the same value because of the
8443 sign extension above. */
8444 offset ^= ((~insn) & (1 << 13)) << 10;
8445 offset ^= ((~insn) & (1 << 11)) << 11;
8446
8447 if (insn & (1 << 14)) {
8448 /* Branch and link. */
8449 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8450 }
8451
8452 offset += s->pc;
8453 if (insn & (1 << 12)) {
8454 /* b/bl */
8455 gen_jmp(s, offset);
8456 } else {
8457 /* blx */
8458 offset &= ~(uint32_t)2;
8459 /* thumb2 bx, no need to check */
8460 gen_bx_im(s, offset);
8461 }
8462 } else if (((insn >> 23) & 7) == 7) {
8463 /* Misc control */
8464 if (insn & (1 << 13))
8465 goto illegal_op;
8466
8467 if (insn & (1 << 26)) {
8468 /* Secure monitor call (v6Z) */
8469 goto illegal_op; /* not implemented. */
8470 } else {
8471 op = (insn >> 20) & 7;
8472 switch (op) {
8473 case 0: /* msr cpsr. */
8474 if (IS_M(env)) {
8475 tmp = load_reg(s, rn);
8476 addr = tcg_const_i32(insn & 0xff);
8477 gen_helper_v7m_msr(cpu_env, addr, tmp);
8478 tcg_temp_free_i32(addr);
8479 tcg_temp_free_i32(tmp);
8480 gen_lookup_tb(s);
8481 break;
8482 }
8483 /* fall through */
8484 case 1: /* msr spsr. */
8485 if (IS_M(env))
8486 goto illegal_op;
8487 tmp = load_reg(s, rn);
8488 if (gen_set_psr(s,
8489 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8490 op == 1, tmp))
8491 goto illegal_op;
8492 break;
8493 case 2: /* cps, nop-hint. */
8494 if (((insn >> 8) & 7) == 0) {
8495 gen_nop_hint(s, insn & 0xff);
8496 }
8497 /* Implemented as NOP in user mode. */
8498 if (IS_USER(s))
8499 break;
8500 offset = 0;
8501 imm = 0;
8502 if (insn & (1 << 10)) {
8503 if (insn & (1 << 7))
8504 offset |= CPSR_A;
8505 if (insn & (1 << 6))
8506 offset |= CPSR_I;
8507 if (insn & (1 << 5))
8508 offset |= CPSR_F;
8509 if (insn & (1 << 9))
8510 imm = CPSR_A | CPSR_I | CPSR_F;
8511 }
8512 if (insn & (1 << 8)) {
8513 offset |= 0x1f;
8514 imm |= (insn & 0x1f);
8515 }
8516 if (offset) {
8517 gen_set_psr_im(s, offset, 0, imm);
8518 }
8519 break;
8520 case 3: /* Special control operations. */
8521 ARCH(7);
8522 op = (insn >> 4) & 0xf;
8523 switch (op) {
8524 case 2: /* clrex */
8525 gen_clrex(s);
8526 break;
8527 case 4: /* dsb */
8528 case 5: /* dmb */
8529 case 6: /* isb */
8530 /* These execute as NOPs. */
8531 break;
8532 default:
8533 goto illegal_op;
8534 }
8535 break;
8536 case 4: /* bxj */
8537 /* Trivial implementation equivalent to bx. */
8538 tmp = load_reg(s, rn);
8539 gen_bx(s, tmp);
8540 break;
8541 case 5: /* Exception return. */
8542 if (IS_USER(s)) {
8543 goto illegal_op;
8544 }
8545 if (rn != 14 || rd != 15) {
8546 goto illegal_op;
8547 }
8548 tmp = load_reg(s, rn);
8549 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8550 gen_exception_return(s, tmp);
8551 break;
8552 case 6: /* mrs cpsr. */
8553 tmp = tcg_temp_new_i32();
8554 if (IS_M(env)) {
8555 addr = tcg_const_i32(insn & 0xff);
8556 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8557 tcg_temp_free_i32(addr);
8558 } else {
8559 gen_helper_cpsr_read(tmp);
8560 }
8561 store_reg(s, rd, tmp);
8562 break;
8563 case 7: /* mrs spsr. */
8564 /* Not accessible in user mode. */
8565 if (IS_USER(s) || IS_M(env))
8566 goto illegal_op;
8567 tmp = load_cpu_field(spsr);
8568 store_reg(s, rd, tmp);
8569 break;
8570 }
8571 }
8572 } else {
8573 /* Conditional branch. */
8574 op = (insn >> 22) & 0xf;
8575 /* Generate a conditional jump to next instruction. */
8576 s->condlabel = gen_new_label();
8577 gen_test_cc(op ^ 1, s->condlabel);
8578 s->condjmp = 1;
8579
8580 /* offset[11:1] = insn[10:0] */
8581 offset = (insn & 0x7ff) << 1;
8582 /* offset[17:12] = insn[21:16]. */
8583 offset |= (insn & 0x003f0000) >> 4;
8584 /* offset[31:20] = insn[26]. */
8585 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8586 /* offset[18] = insn[13]. */
8587 offset |= (insn & (1 << 13)) << 5;
8588 /* offset[19] = insn[11]. */
8589 offset |= (insn & (1 << 11)) << 8;
8590
8591 /* jump to the offset */
8592 gen_jmp(s, s->pc + offset);
8593 }
8594 } else {
8595 /* Data processing immediate. */
8596 if (insn & (1 << 25)) {
8597 if (insn & (1 << 24)) {
8598 if (insn & (1 << 20))
8599 goto illegal_op;
8600 /* Bitfield/Saturate. */
8601 op = (insn >> 21) & 7;
8602 imm = insn & 0x1f;
8603 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8604 if (rn == 15) {
8605 tmp = tcg_temp_new_i32();
8606 tcg_gen_movi_i32(tmp, 0);
8607 } else {
8608 tmp = load_reg(s, rn);
8609 }
8610 switch (op) {
8611 case 2: /* Signed bitfield extract. */
8612 imm++;
8613 if (shift + imm > 32)
8614 goto illegal_op;
8615 if (imm < 32)
8616 gen_sbfx(tmp, shift, imm);
8617 break;
8618 case 6: /* Unsigned bitfield extract. */
8619 imm++;
8620 if (shift + imm > 32)
8621 goto illegal_op;
8622 if (imm < 32)
8623 gen_ubfx(tmp, shift, (1u << imm) - 1);
8624 break;
8625 case 3: /* Bitfield insert/clear. */
8626 if (imm < shift)
8627 goto illegal_op;
8628 imm = imm + 1 - shift;
8629 if (imm != 32) {
8630 tmp2 = load_reg(s, rd);
8631 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8632 tcg_temp_free_i32(tmp2);
8633 }
8634 break;
8635 case 7:
8636 goto illegal_op;
8637 default: /* Saturate. */
8638 if (shift) {
8639 if (op & 1)
8640 tcg_gen_sari_i32(tmp, tmp, shift);
8641 else
8642 tcg_gen_shli_i32(tmp, tmp, shift);
8643 }
8644 tmp2 = tcg_const_i32(imm);
8645 if (op & 4) {
8646 /* Unsigned. */
8647 if ((op & 1) && shift == 0)
8648 gen_helper_usat16(tmp, tmp, tmp2);
8649 else
8650 gen_helper_usat(tmp, tmp, tmp2);
8651 } else {
8652 /* Signed. */
8653 if ((op & 1) && shift == 0)
8654 gen_helper_ssat16(tmp, tmp, tmp2);
8655 else
8656 gen_helper_ssat(tmp, tmp, tmp2);
8657 }
8658 tcg_temp_free_i32(tmp2);
8659 break;
8660 }
8661 store_reg(s, rd, tmp);
8662 } else {
8663 imm = ((insn & 0x04000000) >> 15)
8664 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8665 if (insn & (1 << 22)) {
8666 /* 16-bit immediate. */
8667 imm |= (insn >> 4) & 0xf000;
8668 if (insn & (1 << 23)) {
8669 /* movt */
8670 tmp = load_reg(s, rd);
8671 tcg_gen_ext16u_i32(tmp, tmp);
8672 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8673 } else {
8674 /* movw */
8675 tmp = tcg_temp_new_i32();
8676 tcg_gen_movi_i32(tmp, imm);
8677 }
8678 } else {
8679 /* Add/sub 12-bit immediate. */
8680 if (rn == 15) {
8681 offset = s->pc & ~(uint32_t)3;
8682 if (insn & (1 << 23))
8683 offset -= imm;
8684 else
8685 offset += imm;
8686 tmp = tcg_temp_new_i32();
8687 tcg_gen_movi_i32(tmp, offset);
8688 } else {
8689 tmp = load_reg(s, rn);
8690 if (insn & (1 << 23))
8691 tcg_gen_subi_i32(tmp, tmp, imm);
8692 else
8693 tcg_gen_addi_i32(tmp, tmp, imm);
8694 }
8695 }
8696 store_reg(s, rd, tmp);
8697 }
8698 } else {
8699 int shifter_out = 0;
8700 /* modified 12-bit immediate. */
8701 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8702 imm = (insn & 0xff);
8703 switch (shift) {
8704 case 0: /* XY */
8705 /* Nothing to do. */
8706 break;
8707 case 1: /* 00XY00XY */
8708 imm |= imm << 16;
8709 break;
8710 case 2: /* XY00XY00 */
8711 imm |= imm << 16;
8712 imm <<= 8;
8713 break;
8714 case 3: /* XYXYXYXY */
8715 imm |= imm << 16;
8716 imm |= imm << 8;
8717 break;
8718 default: /* Rotated constant. */
8719 shift = (shift << 1) | (imm >> 7);
8720 imm |= 0x80;
8721 imm = imm << (32 - shift);
8722 shifter_out = 1;
8723 break;
8724 }
8725 tmp2 = tcg_temp_new_i32();
8726 tcg_gen_movi_i32(tmp2, imm);
8727 rn = (insn >> 16) & 0xf;
8728 if (rn == 15) {
8729 tmp = tcg_temp_new_i32();
8730 tcg_gen_movi_i32(tmp, 0);
8731 } else {
8732 tmp = load_reg(s, rn);
8733 }
8734 op = (insn >> 21) & 0xf;
8735 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8736 shifter_out, tmp, tmp2))
8737 goto illegal_op;
8738 tcg_temp_free_i32(tmp2);
8739 rd = (insn >> 8) & 0xf;
8740 if (rd != 15) {
8741 store_reg(s, rd, tmp);
8742 } else {
8743 tcg_temp_free_i32(tmp);
8744 }
8745 }
8746 }
8747 break;
8748 case 12: /* Load/store single data item. */
8749 {
8750 int postinc = 0;
8751 int writeback = 0;
8752 int user;
8753 if ((insn & 0x01100000) == 0x01000000) {
8754 if (disas_neon_ls_insn(env, s, insn))
8755 goto illegal_op;
8756 break;
8757 }
8758 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8759 if (rs == 15) {
8760 if (!(insn & (1 << 20))) {
8761 goto illegal_op;
8762 }
8763 if (op != 2) {
8764 /* Byte or halfword load space with dest == r15 : memory hints.
8765 * Catch them early so we don't emit pointless addressing code.
8766 * This space is a mix of:
8767 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8768 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8769 * cores)
8770 * unallocated hints, which must be treated as NOPs
8771 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8772 * which is easiest for the decoding logic
8773 * Some space which must UNDEF
8774 */
8775 int op1 = (insn >> 23) & 3;
8776 int op2 = (insn >> 6) & 0x3f;
8777 if (op & 2) {
8778 goto illegal_op;
8779 }
8780 if (rn == 15) {
8781 /* UNPREDICTABLE or unallocated hint */
8782 return 0;
8783 }
8784 if (op1 & 1) {
8785 return 0; /* PLD* or unallocated hint */
8786 }
8787 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8788 return 0; /* PLD* or unallocated hint */
8789 }
8790 /* UNDEF space, or an UNPREDICTABLE */
8791 return 1;
8792 }
8793 }
8794 user = IS_USER(s);
8795 if (rn == 15) {
8796 addr = tcg_temp_new_i32();
8797 /* PC relative. */
8798 /* s->pc has already been incremented by 4. */
8799 imm = s->pc & 0xfffffffc;
8800 if (insn & (1 << 23))
8801 imm += insn & 0xfff;
8802 else
8803 imm -= insn & 0xfff;
8804 tcg_gen_movi_i32(addr, imm);
8805 } else {
8806 addr = load_reg(s, rn);
8807 if (insn & (1 << 23)) {
8808 /* Positive offset. */
8809 imm = insn & 0xfff;
8810 tcg_gen_addi_i32(addr, addr, imm);
8811 } else {
8812 imm = insn & 0xff;
8813 switch ((insn >> 8) & 0xf) {
8814 case 0x0: /* Shifted Register. */
8815 shift = (insn >> 4) & 0xf;
8816 if (shift > 3) {
8817 tcg_temp_free_i32(addr);
8818 goto illegal_op;
8819 }
8820 tmp = load_reg(s, rm);
8821 if (shift)
8822 tcg_gen_shli_i32(tmp, tmp, shift);
8823 tcg_gen_add_i32(addr, addr, tmp);
8824 tcg_temp_free_i32(tmp);
8825 break;
8826 case 0xc: /* Negative offset. */
8827 tcg_gen_addi_i32(addr, addr, -imm);
8828 break;
8829 case 0xe: /* User privilege. */
8830 tcg_gen_addi_i32(addr, addr, imm);
8831 user = 1;
8832 break;
8833 case 0x9: /* Post-decrement. */
8834 imm = -imm;
8835 /* Fall through. */
8836 case 0xb: /* Post-increment. */
8837 postinc = 1;
8838 writeback = 1;
8839 break;
8840 case 0xd: /* Pre-decrement. */
8841 imm = -imm;
8842 /* Fall through. */
8843 case 0xf: /* Pre-increment. */
8844 tcg_gen_addi_i32(addr, addr, imm);
8845 writeback = 1;
8846 break;
8847 default:
8848 tcg_temp_free_i32(addr);
8849 goto illegal_op;
8850 }
8851 }
8852 }
8853 if (insn & (1 << 20)) {
8854 /* Load. */
8855 switch (op) {
8856 case 0: tmp = gen_ld8u(addr, user); break;
8857 case 4: tmp = gen_ld8s(addr, user); break;
8858 case 1: tmp = gen_ld16u(addr, user); break;
8859 case 5: tmp = gen_ld16s(addr, user); break;
8860 case 2: tmp = gen_ld32(addr, user); break;
8861 default:
8862 tcg_temp_free_i32(addr);
8863 goto illegal_op;
8864 }
8865 if (rs == 15) {
8866 gen_bx(s, tmp);
8867 } else {
8868 store_reg(s, rs, tmp);
8869 }
8870 } else {
8871 /* Store. */
8872 tmp = load_reg(s, rs);
8873 switch (op) {
8874 case 0: gen_st8(tmp, addr, user); break;
8875 case 1: gen_st16(tmp, addr, user); break;
8876 case 2: gen_st32(tmp, addr, user); break;
8877 default:
8878 tcg_temp_free_i32(addr);
8879 goto illegal_op;
8880 }
8881 }
8882 if (postinc)
8883 tcg_gen_addi_i32(addr, addr, imm);
8884 if (writeback) {
8885 store_reg(s, rn, addr);
8886 } else {
8887 tcg_temp_free_i32(addr);
8888 }
8889 }
8890 break;
8891 default:
8892 goto illegal_op;
8893 }
8894 return 0;
8895 illegal_op:
8896 return 1;
8897 }
8898
8899 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8900 {
8901 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8902 int32_t offset;
8903 int i;
8904 TCGv tmp;
8905 TCGv tmp2;
8906 TCGv addr;
8907
8908 if (s->condexec_mask) {
8909 cond = s->condexec_cond;
8910 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8911 s->condlabel = gen_new_label();
8912 gen_test_cc(cond ^ 1, s->condlabel);
8913 s->condjmp = 1;
8914 }
8915 }
8916
8917 insn = lduw_code(s->pc);
8918 s->pc += 2;
8919
8920 switch (insn >> 12) {
8921 case 0: case 1:
8922
8923 rd = insn & 7;
8924 op = (insn >> 11) & 3;
8925 if (op == 3) {
8926 /* add/subtract */
8927 rn = (insn >> 3) & 7;
8928 tmp = load_reg(s, rn);
8929 if (insn & (1 << 10)) {
8930 /* immediate */
8931 tmp2 = tcg_temp_new_i32();
8932 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8933 } else {
8934 /* reg */
8935 rm = (insn >> 6) & 7;
8936 tmp2 = load_reg(s, rm);
8937 }
8938 if (insn & (1 << 9)) {
8939 if (s->condexec_mask)
8940 tcg_gen_sub_i32(tmp, tmp, tmp2);
8941 else
8942 gen_helper_sub_cc(tmp, tmp, tmp2);
8943 } else {
8944 if (s->condexec_mask)
8945 tcg_gen_add_i32(tmp, tmp, tmp2);
8946 else
8947 gen_helper_add_cc(tmp, tmp, tmp2);
8948 }
8949 tcg_temp_free_i32(tmp2);
8950 store_reg(s, rd, tmp);
8951 } else {
8952 /* shift immediate */
8953 rm = (insn >> 3) & 7;
8954 shift = (insn >> 6) & 0x1f;
8955 tmp = load_reg(s, rm);
8956 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8957 if (!s->condexec_mask)
8958 gen_logic_CC(tmp);
8959 store_reg(s, rd, tmp);
8960 }
8961 break;
8962 case 2: case 3:
8963 /* arithmetic large immediate */
8964 op = (insn >> 11) & 3;
8965 rd = (insn >> 8) & 0x7;
8966 if (op == 0) { /* mov */
8967 tmp = tcg_temp_new_i32();
8968 tcg_gen_movi_i32(tmp, insn & 0xff);
8969 if (!s->condexec_mask)
8970 gen_logic_CC(tmp);
8971 store_reg(s, rd, tmp);
8972 } else {
8973 tmp = load_reg(s, rd);
8974 tmp2 = tcg_temp_new_i32();
8975 tcg_gen_movi_i32(tmp2, insn & 0xff);
8976 switch (op) {
8977 case 1: /* cmp */
8978 gen_helper_sub_cc(tmp, tmp, tmp2);
8979 tcg_temp_free_i32(tmp);
8980 tcg_temp_free_i32(tmp2);
8981 break;
8982 case 2: /* add */
8983 if (s->condexec_mask)
8984 tcg_gen_add_i32(tmp, tmp, tmp2);
8985 else
8986 gen_helper_add_cc(tmp, tmp, tmp2);
8987 tcg_temp_free_i32(tmp2);
8988 store_reg(s, rd, tmp);
8989 break;
8990 case 3: /* sub */
8991 if (s->condexec_mask)
8992 tcg_gen_sub_i32(tmp, tmp, tmp2);
8993 else
8994 gen_helper_sub_cc(tmp, tmp, tmp2);
8995 tcg_temp_free_i32(tmp2);
8996 store_reg(s, rd, tmp);
8997 break;
8998 }
8999 }
9000 break;
9001 case 4:
9002 if (insn & (1 << 11)) {
9003 rd = (insn >> 8) & 7;
9004 /* load pc-relative. Bit 1 of PC is ignored. */
9005 val = s->pc + 2 + ((insn & 0xff) * 4);
9006 val &= ~(uint32_t)2;
9007 addr = tcg_temp_new_i32();
9008 tcg_gen_movi_i32(addr, val);
9009 tmp = gen_ld32(addr, IS_USER(s));
9010 tcg_temp_free_i32(addr);
9011 store_reg(s, rd, tmp);
9012 break;
9013 }
9014 if (insn & (1 << 10)) {
9015 /* data processing extended or blx */
9016 rd = (insn & 7) | ((insn >> 4) & 8);
9017 rm = (insn >> 3) & 0xf;
9018 op = (insn >> 8) & 3;
9019 switch (op) {
9020 case 0: /* add */
9021 tmp = load_reg(s, rd);
9022 tmp2 = load_reg(s, rm);
9023 tcg_gen_add_i32(tmp, tmp, tmp2);
9024 tcg_temp_free_i32(tmp2);
9025 store_reg(s, rd, tmp);
9026 break;
9027 case 1: /* cmp */
9028 tmp = load_reg(s, rd);
9029 tmp2 = load_reg(s, rm);
9030 gen_helper_sub_cc(tmp, tmp, tmp2);
9031 tcg_temp_free_i32(tmp2);
9032 tcg_temp_free_i32(tmp);
9033 break;
9034 case 2: /* mov/cpy */
9035 tmp = load_reg(s, rm);
9036 store_reg(s, rd, tmp);
9037 break;
9038 case 3:/* branch [and link] exchange thumb register */
9039 tmp = load_reg(s, rm);
9040 if (insn & (1 << 7)) {
9041 ARCH(5);
9042 val = (uint32_t)s->pc | 1;
9043 tmp2 = tcg_temp_new_i32();
9044 tcg_gen_movi_i32(tmp2, val);
9045 store_reg(s, 14, tmp2);
9046 }
9047 /* already thumb, no need to check */
9048 gen_bx(s, tmp);
9049 break;
9050 }
9051 break;
9052 }
9053
9054 /* data processing register */
9055 rd = insn & 7;
9056 rm = (insn >> 3) & 7;
9057 op = (insn >> 6) & 0xf;
9058 if (op == 2 || op == 3 || op == 4 || op == 7) {
9059 /* the shift/rotate ops want the operands backwards */
9060 val = rm;
9061 rm = rd;
9062 rd = val;
9063 val = 1;
9064 } else {
9065 val = 0;
9066 }
9067
9068 if (op == 9) { /* neg */
9069 tmp = tcg_temp_new_i32();
9070 tcg_gen_movi_i32(tmp, 0);
9071 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9072 tmp = load_reg(s, rd);
9073 } else {
9074 TCGV_UNUSED(tmp);
9075 }
9076
9077 tmp2 = load_reg(s, rm);
9078 switch (op) {
9079 case 0x0: /* and */
9080 tcg_gen_and_i32(tmp, tmp, tmp2);
9081 if (!s->condexec_mask)
9082 gen_logic_CC(tmp);
9083 break;
9084 case 0x1: /* eor */
9085 tcg_gen_xor_i32(tmp, tmp, tmp2);
9086 if (!s->condexec_mask)
9087 gen_logic_CC(tmp);
9088 break;
9089 case 0x2: /* lsl */
9090 if (s->condexec_mask) {
9091 gen_helper_shl(tmp2, tmp2, tmp);
9092 } else {
9093 gen_helper_shl_cc(tmp2, tmp2, tmp);
9094 gen_logic_CC(tmp2);
9095 }
9096 break;
9097 case 0x3: /* lsr */
9098 if (s->condexec_mask) {
9099 gen_helper_shr(tmp2, tmp2, tmp);
9100 } else {
9101 gen_helper_shr_cc(tmp2, tmp2, tmp);
9102 gen_logic_CC(tmp2);
9103 }
9104 break;
9105 case 0x4: /* asr */
9106 if (s->condexec_mask) {
9107 gen_helper_sar(tmp2, tmp2, tmp);
9108 } else {
9109 gen_helper_sar_cc(tmp2, tmp2, tmp);
9110 gen_logic_CC(tmp2);
9111 }
9112 break;
9113 case 0x5: /* adc */
9114 if (s->condexec_mask)
9115 gen_adc(tmp, tmp2);
9116 else
9117 gen_helper_adc_cc(tmp, tmp, tmp2);
9118 break;
9119 case 0x6: /* sbc */
9120 if (s->condexec_mask)
9121 gen_sub_carry(tmp, tmp, tmp2);
9122 else
9123 gen_helper_sbc_cc(tmp, tmp, tmp2);
9124 break;
9125 case 0x7: /* ror */
9126 if (s->condexec_mask) {
9127 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9128 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9129 } else {
9130 gen_helper_ror_cc(tmp2, tmp2, tmp);
9131 gen_logic_CC(tmp2);
9132 }
9133 break;
9134 case 0x8: /* tst */
9135 tcg_gen_and_i32(tmp, tmp, tmp2);
9136 gen_logic_CC(tmp);
9137 rd = 16;
9138 break;
9139 case 0x9: /* neg */
9140 if (s->condexec_mask)
9141 tcg_gen_neg_i32(tmp, tmp2);
9142 else
9143 gen_helper_sub_cc(tmp, tmp, tmp2);
9144 break;
9145 case 0xa: /* cmp */
9146 gen_helper_sub_cc(tmp, tmp, tmp2);
9147 rd = 16;
9148 break;
9149 case 0xb: /* cmn */
9150 gen_helper_add_cc(tmp, tmp, tmp2);
9151 rd = 16;
9152 break;
9153 case 0xc: /* orr */
9154 tcg_gen_or_i32(tmp, tmp, tmp2);
9155 if (!s->condexec_mask)
9156 gen_logic_CC(tmp);
9157 break;
9158 case 0xd: /* mul */
9159 tcg_gen_mul_i32(tmp, tmp, tmp2);
9160 if (!s->condexec_mask)
9161 gen_logic_CC(tmp);
9162 break;
9163 case 0xe: /* bic */
9164 tcg_gen_andc_i32(tmp, tmp, tmp2);
9165 if (!s->condexec_mask)
9166 gen_logic_CC(tmp);
9167 break;
9168 case 0xf: /* mvn */
9169 tcg_gen_not_i32(tmp2, tmp2);
9170 if (!s->condexec_mask)
9171 gen_logic_CC(tmp2);
9172 val = 1;
9173 rm = rd;
9174 break;
9175 }
9176 if (rd != 16) {
9177 if (val) {
9178 store_reg(s, rm, tmp2);
9179 if (op != 0xf)
9180 tcg_temp_free_i32(tmp);
9181 } else {
9182 store_reg(s, rd, tmp);
9183 tcg_temp_free_i32(tmp2);
9184 }
9185 } else {
9186 tcg_temp_free_i32(tmp);
9187 tcg_temp_free_i32(tmp2);
9188 }
9189 break;
9190
9191 case 5:
9192 /* load/store register offset. */
9193 rd = insn & 7;
9194 rn = (insn >> 3) & 7;
9195 rm = (insn >> 6) & 7;
9196 op = (insn >> 9) & 7;
9197 addr = load_reg(s, rn);
9198 tmp = load_reg(s, rm);
9199 tcg_gen_add_i32(addr, addr, tmp);
9200 tcg_temp_free_i32(tmp);
9201
9202 if (op < 3) /* store */
9203 tmp = load_reg(s, rd);
9204
9205 switch (op) {
9206 case 0: /* str */
9207 gen_st32(tmp, addr, IS_USER(s));
9208 break;
9209 case 1: /* strh */
9210 gen_st16(tmp, addr, IS_USER(s));
9211 break;
9212 case 2: /* strb */
9213 gen_st8(tmp, addr, IS_USER(s));
9214 break;
9215 case 3: /* ldrsb */
9216 tmp = gen_ld8s(addr, IS_USER(s));
9217 break;
9218 case 4: /* ldr */
9219 tmp = gen_ld32(addr, IS_USER(s));
9220 break;
9221 case 5: /* ldrh */
9222 tmp = gen_ld16u(addr, IS_USER(s));
9223 break;
9224 case 6: /* ldrb */
9225 tmp = gen_ld8u(addr, IS_USER(s));
9226 break;
9227 case 7: /* ldrsh */
9228 tmp = gen_ld16s(addr, IS_USER(s));
9229 break;
9230 }
9231 if (op >= 3) /* load */
9232 store_reg(s, rd, tmp);
9233 tcg_temp_free_i32(addr);
9234 break;
9235
9236 case 6:
9237 /* load/store word immediate offset */
9238 rd = insn & 7;
9239 rn = (insn >> 3) & 7;
9240 addr = load_reg(s, rn);
9241 val = (insn >> 4) & 0x7c;
9242 tcg_gen_addi_i32(addr, addr, val);
9243
9244 if (insn & (1 << 11)) {
9245 /* load */
9246 tmp = gen_ld32(addr, IS_USER(s));
9247 store_reg(s, rd, tmp);
9248 } else {
9249 /* store */
9250 tmp = load_reg(s, rd);
9251 gen_st32(tmp, addr, IS_USER(s));
9252 }
9253 tcg_temp_free_i32(addr);
9254 break;
9255
9256 case 7:
9257 /* load/store byte immediate offset */
9258 rd = insn & 7;
9259 rn = (insn >> 3) & 7;
9260 addr = load_reg(s, rn);
9261 val = (insn >> 6) & 0x1f;
9262 tcg_gen_addi_i32(addr, addr, val);
9263
9264 if (insn & (1 << 11)) {
9265 /* load */
9266 tmp = gen_ld8u(addr, IS_USER(s));
9267 store_reg(s, rd, tmp);
9268 } else {
9269 /* store */
9270 tmp = load_reg(s, rd);
9271 gen_st8(tmp, addr, IS_USER(s));
9272 }
9273 tcg_temp_free_i32(addr);
9274 break;
9275
9276 case 8:
9277 /* load/store halfword immediate offset */
9278 rd = insn & 7;
9279 rn = (insn >> 3) & 7;
9280 addr = load_reg(s, rn);
9281 val = (insn >> 5) & 0x3e;
9282 tcg_gen_addi_i32(addr, addr, val);
9283
9284 if (insn & (1 << 11)) {
9285 /* load */
9286 tmp = gen_ld16u(addr, IS_USER(s));
9287 store_reg(s, rd, tmp);
9288 } else {
9289 /* store */
9290 tmp = load_reg(s, rd);
9291 gen_st16(tmp, addr, IS_USER(s));
9292 }
9293 tcg_temp_free_i32(addr);
9294 break;
9295
9296 case 9:
9297 /* load/store from stack */
9298 rd = (insn >> 8) & 7;
9299 addr = load_reg(s, 13);
9300 val = (insn & 0xff) * 4;
9301 tcg_gen_addi_i32(addr, addr, val);
9302
9303 if (insn & (1 << 11)) {
9304 /* load */
9305 tmp = gen_ld32(addr, IS_USER(s));
9306 store_reg(s, rd, tmp);
9307 } else {
9308 /* store */
9309 tmp = load_reg(s, rd);
9310 gen_st32(tmp, addr, IS_USER(s));
9311 }
9312 tcg_temp_free_i32(addr);
9313 break;
9314
9315 case 10:
9316 /* add to high reg */
9317 rd = (insn >> 8) & 7;
9318 if (insn & (1 << 11)) {
9319 /* SP */
9320 tmp = load_reg(s, 13);
9321 } else {
9322 /* PC. bit 1 is ignored. */
9323 tmp = tcg_temp_new_i32();
9324 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9325 }
9326 val = (insn & 0xff) * 4;
9327 tcg_gen_addi_i32(tmp, tmp, val);
9328 store_reg(s, rd, tmp);
9329 break;
9330
9331 case 11:
9332 /* misc */
9333 op = (insn >> 8) & 0xf;
9334 switch (op) {
9335 case 0:
9336 /* adjust stack pointer */
9337 tmp = load_reg(s, 13);
9338 val = (insn & 0x7f) * 4;
9339 if (insn & (1 << 7))
9340 val = -(int32_t)val;
9341 tcg_gen_addi_i32(tmp, tmp, val);
9342 store_reg(s, 13, tmp);
9343 break;
9344
9345 case 2: /* sign/zero extend. */
9346 ARCH(6);
9347 rd = insn & 7;
9348 rm = (insn >> 3) & 7;
9349 tmp = load_reg(s, rm);
9350 switch ((insn >> 6) & 3) {
9351 case 0: gen_sxth(tmp); break;
9352 case 1: gen_sxtb(tmp); break;
9353 case 2: gen_uxth(tmp); break;
9354 case 3: gen_uxtb(tmp); break;
9355 }
9356 store_reg(s, rd, tmp);
9357 break;
9358 case 4: case 5: case 0xc: case 0xd:
9359 /* push/pop */
9360 addr = load_reg(s, 13);
9361 if (insn & (1 << 8))
9362 offset = 4;
9363 else
9364 offset = 0;
9365 for (i = 0; i < 8; i++) {
9366 if (insn & (1 << i))
9367 offset += 4;
9368 }
9369 if ((insn & (1 << 11)) == 0) {
9370 tcg_gen_addi_i32(addr, addr, -offset);
9371 }
9372 for (i = 0; i < 8; i++) {
9373 if (insn & (1 << i)) {
9374 if (insn & (1 << 11)) {
9375 /* pop */
9376 tmp = gen_ld32(addr, IS_USER(s));
9377 store_reg(s, i, tmp);
9378 } else {
9379 /* push */
9380 tmp = load_reg(s, i);
9381 gen_st32(tmp, addr, IS_USER(s));
9382 }
9383 /* advance to the next address. */
9384 tcg_gen_addi_i32(addr, addr, 4);
9385 }
9386 }
9387 TCGV_UNUSED(tmp);
9388 if (insn & (1 << 8)) {
9389 if (insn & (1 << 11)) {
9390 /* pop pc */
9391 tmp = gen_ld32(addr, IS_USER(s));
9392 /* don't set the pc until the rest of the instruction
9393 has completed */
9394 } else {
9395 /* push lr */
9396 tmp = load_reg(s, 14);
9397 gen_st32(tmp, addr, IS_USER(s));
9398 }
9399 tcg_gen_addi_i32(addr, addr, 4);
9400 }
9401 if ((insn & (1 << 11)) == 0) {
9402 tcg_gen_addi_i32(addr, addr, -offset);
9403 }
9404 /* write back the new stack pointer */
9405 store_reg(s, 13, addr);
9406 /* set the new PC value */
9407 if ((insn & 0x0900) == 0x0900) {
9408 store_reg_from_load(env, s, 15, tmp);
9409 }
9410 break;
9411
9412 case 1: case 3: case 9: case 11: /* czb */
9413 rm = insn & 7;
9414 tmp = load_reg(s, rm);
9415 s->condlabel = gen_new_label();
9416 s->condjmp = 1;
9417 if (insn & (1 << 11))
9418 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9419 else
9420 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9421 tcg_temp_free_i32(tmp);
9422 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9423 val = (uint32_t)s->pc + 2;
9424 val += offset;
9425 gen_jmp(s, val);
9426 break;
9427
9428 case 15: /* IT, nop-hint. */
9429 if ((insn & 0xf) == 0) {
9430 gen_nop_hint(s, (insn >> 4) & 0xf);
9431 break;
9432 }
9433 /* If Then. */
9434 s->condexec_cond = (insn >> 4) & 0xe;
9435 s->condexec_mask = insn & 0x1f;
9436 /* No actual code generated for this insn, just setup state. */
9437 break;
9438
9439 case 0xe: /* bkpt */
9440 ARCH(5);
9441 gen_exception_insn(s, 2, EXCP_BKPT);
9442 break;
9443
9444 case 0xa: /* rev */
9445 ARCH(6);
9446 rn = (insn >> 3) & 0x7;
9447 rd = insn & 0x7;
9448 tmp = load_reg(s, rn);
9449 switch ((insn >> 6) & 3) {
9450 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9451 case 1: gen_rev16(tmp); break;
9452 case 3: gen_revsh(tmp); break;
9453 default: goto illegal_op;
9454 }
9455 store_reg(s, rd, tmp);
9456 break;
9457
9458 case 6: /* cps */
9459 ARCH(6);
9460 if (IS_USER(s))
9461 break;
9462 if (IS_M(env)) {
9463 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9464 /* PRIMASK */
9465 if (insn & 1) {
9466 addr = tcg_const_i32(16);
9467 gen_helper_v7m_msr(cpu_env, addr, tmp);
9468 tcg_temp_free_i32(addr);
9469 }
9470 /* FAULTMASK */
9471 if (insn & 2) {
9472 addr = tcg_const_i32(17);
9473 gen_helper_v7m_msr(cpu_env, addr, tmp);
9474 tcg_temp_free_i32(addr);
9475 }
9476 tcg_temp_free_i32(tmp);
9477 gen_lookup_tb(s);
9478 } else {
9479 if (insn & (1 << 4))
9480 shift = CPSR_A | CPSR_I | CPSR_F;
9481 else
9482 shift = 0;
9483 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9484 }
9485 break;
9486
9487 default:
9488 goto undef;
9489 }
9490 break;
9491
9492 case 12:
9493 {
9494 /* load/store multiple */
9495 TCGv loaded_var;
9496 TCGV_UNUSED(loaded_var);
9497 rn = (insn >> 8) & 0x7;
9498 addr = load_reg(s, rn);
9499 for (i = 0; i < 8; i++) {
9500 if (insn & (1 << i)) {
9501 if (insn & (1 << 11)) {
9502 /* load */
9503 tmp = gen_ld32(addr, IS_USER(s));
9504 if (i == rn) {
9505 loaded_var = tmp;
9506 } else {
9507 store_reg(s, i, tmp);
9508 }
9509 } else {
9510 /* store */
9511 tmp = load_reg(s, i);
9512 gen_st32(tmp, addr, IS_USER(s));
9513 }
9514 /* advance to the next address */
9515 tcg_gen_addi_i32(addr, addr, 4);
9516 }
9517 }
9518 if ((insn & (1 << rn)) == 0) {
9519 /* base reg not in list: base register writeback */
9520 store_reg(s, rn, addr);
9521 } else {
9522 /* base reg in list: if load, complete it now */
9523 if (insn & (1 << 11)) {
9524 store_reg(s, rn, loaded_var);
9525 }
9526 tcg_temp_free_i32(addr);
9527 }
9528 break;
9529 }
9530 case 13:
9531 /* conditional branch or swi */
9532 cond = (insn >> 8) & 0xf;
9533 if (cond == 0xe)
9534 goto undef;
9535
9536 if (cond == 0xf) {
9537 /* swi */
9538 gen_set_pc_im(s->pc);
9539 s->is_jmp = DISAS_SWI;
9540 break;
9541 }
9542 /* generate a conditional jump to next instruction */
9543 s->condlabel = gen_new_label();
9544 gen_test_cc(cond ^ 1, s->condlabel);
9545 s->condjmp = 1;
9546
9547 /* jump to the offset */
9548 val = (uint32_t)s->pc + 2;
9549 offset = ((int32_t)insn << 24) >> 24;
9550 val += offset << 1;
9551 gen_jmp(s, val);
9552 break;
9553
9554 case 14:
9555 if (insn & (1 << 11)) {
9556 if (disas_thumb2_insn(env, s, insn))
9557 goto undef32;
9558 break;
9559 }
9560 /* unconditional branch */
9561 val = (uint32_t)s->pc;
9562 offset = ((int32_t)insn << 21) >> 21;
9563 val += (offset << 1) + 2;
9564 gen_jmp(s, val);
9565 break;
9566
9567 case 15:
9568 if (disas_thumb2_insn(env, s, insn))
9569 goto undef32;
9570 break;
9571 }
9572 return;
9573 undef32:
9574 gen_exception_insn(s, 4, EXCP_UDEF);
9575 return;
9576 illegal_op:
9577 undef:
9578 gen_exception_insn(s, 2, EXCP_UDEF);
9579 }
9580
9581 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9582 basic block 'tb'. If search_pc is TRUE, also generate PC
9583 information for each intermediate instruction. */
9584 static inline void gen_intermediate_code_internal(CPUState *env,
9585 TranslationBlock *tb,
9586 int search_pc)
9587 {
9588 DisasContext dc1, *dc = &dc1;
9589 CPUBreakpoint *bp;
9590 uint16_t *gen_opc_end;
9591 int j, lj;
9592 target_ulong pc_start;
9593 uint32_t next_page_start;
9594 int num_insns;
9595 int max_insns;
9596
9597 /* generate intermediate code */
9598 pc_start = tb->pc;
9599
9600 dc->tb = tb;
9601
9602 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9603
9604 dc->is_jmp = DISAS_NEXT;
9605 dc->pc = pc_start;
9606 dc->singlestep_enabled = env->singlestep_enabled;
9607 dc->condjmp = 0;
9608 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9609 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9610 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9611 #if !defined(CONFIG_USER_ONLY)
9612 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9613 #endif
9614 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9615 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9616 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9617 cpu_F0s = tcg_temp_new_i32();
9618 cpu_F1s = tcg_temp_new_i32();
9619 cpu_F0d = tcg_temp_new_i64();
9620 cpu_F1d = tcg_temp_new_i64();
9621 cpu_V0 = cpu_F0d;
9622 cpu_V1 = cpu_F1d;
9623 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9624 cpu_M0 = tcg_temp_new_i64();
9625 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9626 lj = -1;
9627 num_insns = 0;
9628 max_insns = tb->cflags & CF_COUNT_MASK;
9629 if (max_insns == 0)
9630 max_insns = CF_COUNT_MASK;
9631
9632 gen_icount_start();
9633
9634 tcg_clear_temp_count();
9635
9636 /* A note on handling of the condexec (IT) bits:
9637 *
9638 * We want to avoid the overhead of having to write the updated condexec
9639 * bits back to the CPUState for every instruction in an IT block. So:
9640 * (1) if the condexec bits are not already zero then we write
9641 * zero back into the CPUState now. This avoids complications trying
9642 * to do it at the end of the block. (For example if we don't do this
9643 * it's hard to identify whether we can safely skip writing condexec
9644 * at the end of the TB, which we definitely want to do for the case
9645 * where a TB doesn't do anything with the IT state at all.)
9646 * (2) if we are going to leave the TB then we call gen_set_condexec()
9647 * which will write the correct value into CPUState if zero is wrong.
9648 * This is done both for leaving the TB at the end, and for leaving
9649 * it because of an exception we know will happen, which is done in
9650 * gen_exception_insn(). The latter is necessary because we need to
9651 * leave the TB with the PC/IT state just prior to execution of the
9652 * instruction which caused the exception.
9653 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9654 * then the CPUState will be wrong and we need to reset it.
9655 * This is handled in the same way as restoration of the
9656 * PC in these situations: we will be called again with search_pc=1
9657 * and generate a mapping of the condexec bits for each PC in
9658 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9659 * this to restore the condexec bits.
9660 *
9661 * Note that there are no instructions which can read the condexec
9662 * bits, and none which can write non-static values to them, so
9663 * we don't need to care about whether CPUState is correct in the
9664 * middle of a TB.
9665 */
9666
9667 /* Reset the conditional execution bits immediately. This avoids
9668 complications trying to do it at the end of the block. */
9669 if (dc->condexec_mask || dc->condexec_cond)
9670 {
9671 TCGv tmp = tcg_temp_new_i32();
9672 tcg_gen_movi_i32(tmp, 0);
9673 store_cpu_field(tmp, condexec_bits);
9674 }
9675 do {
9676 #ifdef CONFIG_USER_ONLY
9677 /* Intercept jump to the magic kernel page. */
9678 if (dc->pc >= 0xffff0000) {
9679 /* We always get here via a jump, so know we are not in a
9680 conditional execution block. */
9681 gen_exception(EXCP_KERNEL_TRAP);
9682 dc->is_jmp = DISAS_UPDATE;
9683 break;
9684 }
9685 #else
9686 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9687 /* We always get here via a jump, so know we are not in a
9688 conditional execution block. */
9689 gen_exception(EXCP_EXCEPTION_EXIT);
9690 dc->is_jmp = DISAS_UPDATE;
9691 break;
9692 }
9693 #endif
9694
9695 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9696 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9697 if (bp->pc == dc->pc) {
9698 gen_exception_insn(dc, 0, EXCP_DEBUG);
9699 /* Advance PC so that clearing the breakpoint will
9700 invalidate this TB. */
9701 dc->pc += 2;
9702 goto done_generating;
9703 break;
9704 }
9705 }
9706 }
9707 if (search_pc) {
9708 j = gen_opc_ptr - gen_opc_buf;
9709 if (lj < j) {
9710 lj++;
9711 while (lj < j)
9712 gen_opc_instr_start[lj++] = 0;
9713 }
9714 gen_opc_pc[lj] = dc->pc;
9715 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9716 gen_opc_instr_start[lj] = 1;
9717 gen_opc_icount[lj] = num_insns;
9718 }
9719
9720 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9721 gen_io_start();
9722
9723 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9724 tcg_gen_debug_insn_start(dc->pc);
9725 }
9726
9727 if (dc->thumb) {
9728 disas_thumb_insn(env, dc);
9729 if (dc->condexec_mask) {
9730 dc->condexec_cond = (dc->condexec_cond & 0xe)
9731 | ((dc->condexec_mask >> 4) & 1);
9732 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9733 if (dc->condexec_mask == 0) {
9734 dc->condexec_cond = 0;
9735 }
9736 }
9737 } else {
9738 disas_arm_insn(env, dc);
9739 }
9740
9741 if (dc->condjmp && !dc->is_jmp) {
9742 gen_set_label(dc->condlabel);
9743 dc->condjmp = 0;
9744 }
9745
9746 if (tcg_check_temp_count()) {
9747 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9748 }
9749
9750 /* Translation stops when a conditional branch is encountered.
9751 * Otherwise the subsequent code could get translated several times.
9752 * Also stop translation when a page boundary is reached. This
9753 * ensures prefetch aborts occur at the right place. */
9754 num_insns ++;
9755 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9756 !env->singlestep_enabled &&
9757 !singlestep &&
9758 dc->pc < next_page_start &&
9759 num_insns < max_insns);
9760
9761 if (tb->cflags & CF_LAST_IO) {
9762 if (dc->condjmp) {
9763 /* FIXME: This can theoretically happen with self-modifying
9764 code. */
9765 cpu_abort(env, "IO on conditional branch instruction");
9766 }
9767 gen_io_end();
9768 }
9769
9770 /* At this stage dc->condjmp will only be set when the skipped
9771 instruction was a conditional branch or trap, and the PC has
9772 already been written. */
9773 if (unlikely(env->singlestep_enabled)) {
9774 /* Make sure the pc is updated, and raise a debug exception. */
9775 if (dc->condjmp) {
9776 gen_set_condexec(dc);
9777 if (dc->is_jmp == DISAS_SWI) {
9778 gen_exception(EXCP_SWI);
9779 } else {
9780 gen_exception(EXCP_DEBUG);
9781 }
9782 gen_set_label(dc->condlabel);
9783 }
9784 if (dc->condjmp || !dc->is_jmp) {
9785 gen_set_pc_im(dc->pc);
9786 dc->condjmp = 0;
9787 }
9788 gen_set_condexec(dc);
9789 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9790 gen_exception(EXCP_SWI);
9791 } else {
9792 /* FIXME: Single stepping a WFI insn will not halt
9793 the CPU. */
9794 gen_exception(EXCP_DEBUG);
9795 }
9796 } else {
9797 /* While branches must always occur at the end of an IT block,
9798 there are a few other things that can cause us to terminate
9799 the TB in the middel of an IT block:
9800 - Exception generating instructions (bkpt, swi, undefined).
9801 - Page boundaries.
9802 - Hardware watchpoints.
9803 Hardware breakpoints have already been handled and skip this code.
9804 */
9805 gen_set_condexec(dc);
9806 switch(dc->is_jmp) {
9807 case DISAS_NEXT:
9808 gen_goto_tb(dc, 1, dc->pc);
9809 break;
9810 default:
9811 case DISAS_JUMP:
9812 case DISAS_UPDATE:
9813 /* indicate that the hash table must be used to find the next TB */
9814 tcg_gen_exit_tb(0);
9815 break;
9816 case DISAS_TB_JUMP:
9817 /* nothing more to generate */
9818 break;
9819 case DISAS_WFI:
9820 gen_helper_wfi();
9821 break;
9822 case DISAS_SWI:
9823 gen_exception(EXCP_SWI);
9824 break;
9825 }
9826 if (dc->condjmp) {
9827 gen_set_label(dc->condlabel);
9828 gen_set_condexec(dc);
9829 gen_goto_tb(dc, 1, dc->pc);
9830 dc->condjmp = 0;
9831 }
9832 }
9833
9834 done_generating:
9835 gen_icount_end(tb, num_insns);
9836 *gen_opc_ptr = INDEX_op_end;
9837
9838 #ifdef DEBUG_DISAS
9839 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9840 qemu_log("----------------\n");
9841 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9842 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9843 qemu_log("\n");
9844 }
9845 #endif
9846 if (search_pc) {
9847 j = gen_opc_ptr - gen_opc_buf;
9848 lj++;
9849 while (lj <= j)
9850 gen_opc_instr_start[lj++] = 0;
9851 } else {
9852 tb->size = dc->pc - pc_start;
9853 tb->icount = num_insns;
9854 }
9855 }
9856
9857 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9858 {
9859 gen_intermediate_code_internal(env, tb, 0);
9860 }
9861
9862 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9863 {
9864 gen_intermediate_code_internal(env, tb, 1);
9865 }
9866
9867 static const char *cpu_mode_names[16] = {
9868 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9869 "???", "???", "???", "und", "???", "???", "???", "sys"
9870 };
9871
9872 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9873 int flags)
9874 {
9875 int i;
9876 #if 0
9877 union {
9878 uint32_t i;
9879 float s;
9880 } s0, s1;
9881 CPU_DoubleU d;
9882 /* ??? This assumes float64 and double have the same layout.
9883 Oh well, it's only debug dumps. */
9884 union {
9885 float64 f64;
9886 double d;
9887 } d0;
9888 #endif
9889 uint32_t psr;
9890
9891 for(i=0;i<16;i++) {
9892 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9893 if ((i % 4) == 3)
9894 cpu_fprintf(f, "\n");
9895 else
9896 cpu_fprintf(f, " ");
9897 }
9898 psr = cpsr_read(env);
9899 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9900 psr,
9901 psr & (1 << 31) ? 'N' : '-',
9902 psr & (1 << 30) ? 'Z' : '-',
9903 psr & (1 << 29) ? 'C' : '-',
9904 psr & (1 << 28) ? 'V' : '-',
9905 psr & CPSR_T ? 'T' : 'A',
9906 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9907
9908 #if 0
9909 for (i = 0; i < 16; i++) {
9910 d.d = env->vfp.regs[i];
9911 s0.i = d.l.lower;
9912 s1.i = d.l.upper;
9913 d0.f64 = d.d;
9914 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9915 i * 2, (int)s0.i, s0.s,
9916 i * 2 + 1, (int)s1.i, s1.s,
9917 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9918 d0.d);
9919 }
9920 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9921 #endif
9922 }
9923
9924 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
9925 {
9926 env->regs[15] = gen_opc_pc[pc_pos];
9927 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9928 }