]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: Pass fp status pointer explicitly to neon fp helpers
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
36
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
70
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
78
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
83
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
95
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
99
100 #include "gen-icount.h"
101
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
108 {
109 int i;
110
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
130
131 #define GEN_HELPER 2
132 #include "helper.h"
133 }
134
135 static inline TCGv load_cpu_offset(int offset)
136 {
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140 }
141
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144 static inline void store_cpu_offset(TCGv var, int offset)
145 {
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
148 }
149
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
155 {
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
166 }
167 }
168
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
171 {
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
175 }
176
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
180 {
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
187 }
188
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
197
198
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200 {
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204 }
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208 static void gen_exception(int excp)
209 {
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
214 }
215
216 static void gen_smul_dual(TCGv a, TCGv b)
217 {
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
229 }
230
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
233 {
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
241 }
242
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
245 {
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
249 }
250
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253 {
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257 }
258
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
261 {
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272 }
273
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 {
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
281 }
282
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
285 {
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295 }
296
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299 {
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
309 }
310
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
315 {
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
318
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
326 }
327
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 {
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
332
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
340 }
341
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
344 {
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
350 }
351
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359 static void gen_add16(TCGv t0, TCGv t1)
360 {
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
370 }
371
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
376 {
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
381 }
382
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
385 {
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
388 }
389
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
392 {
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
398 }
399
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402 {
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
408 }
409
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412 {
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
419 }
420
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
424 static void shifter_out_im(TCGv var, int shift)
425 {
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
436 }
437
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440 {
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
484 }
485 }
486 };
487
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490 {
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
505 }
506 }
507 tcg_temp_free_i32(shift);
508 }
509
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
520 {
521 TCGv_ptr tmp;
522
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
552 }
553 }
554 #undef PAS_OP
555
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
567 {
568 TCGv_ptr tmp;
569
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
599 }
600 }
601 #undef PAS_OP
602
603 static void gen_test_cc(int cc, int label)
604 {
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
608
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
698 tcg_temp_free_i32(tmp);
699 }
700
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718 };
719
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
722 {
723 TCGv tmp;
724
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
731 }
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
733 }
734
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
737 {
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
742 }
743
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749 {
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755 }
756
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763 {
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769 }
770
771 static inline TCGv gen_ld8s(TCGv addr, int index)
772 {
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776 }
777 static inline TCGv gen_ld8u(TCGv addr, int index)
778 {
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782 }
783 static inline TCGv gen_ld16s(TCGv addr, int index)
784 {
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788 }
789 static inline TCGv gen_ld16u(TCGv addr, int index)
790 {
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794 }
795 static inline TCGv gen_ld32(TCGv addr, int index)
796 {
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800 }
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802 {
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806 }
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
808 {
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
811 }
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
813 {
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
816 }
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
818 {
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
821 }
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823 {
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826 }
827
828 static inline void gen_set_pc_im(uint32_t val)
829 {
830 tcg_gen_movi_i32(cpu_R[15], val);
831 }
832
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
835 {
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
838 }
839
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
842 {
843 int val, rm, shift, shiftop;
844 TCGv offset;
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
865 }
866 }
867
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
870 {
871 int val, rm;
872 TCGv offset;
873
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
893 }
894 }
895
896 static TCGv_ptr get_fpstatus_ptr(int neon)
897 {
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
899 int offset;
900 if (neon) {
901 offset = offsetof(CPUState, vfp.standard_fp_status);
902 } else {
903 offset = offsetof(CPUState, vfp.fp_status);
904 }
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
906 return statusptr;
907 }
908
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
911 { \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 if (dp) { \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 } else { \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 } \
918 tcg_temp_free_ptr(fpst); \
919 }
920
921 VFP_OP2(add)
922 VFP_OP2(sub)
923 VFP_OP2(mul)
924 VFP_OP2(div)
925
926 #undef VFP_OP2
927
928 static inline void gen_vfp_F1_mul(int dp)
929 {
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst = get_fpstatus_ptr(0);
932 if (dp) {
933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
934 } else {
935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
936 }
937 tcg_temp_free_ptr(fpst);
938 }
939
940 static inline void gen_vfp_F1_neg(int dp)
941 {
942 /* Like gen_vfp_neg() but put result in F1 */
943 if (dp) {
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
945 } else {
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
947 }
948 }
949
950 static inline void gen_vfp_abs(int dp)
951 {
952 if (dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
954 else
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
956 }
957
958 static inline void gen_vfp_neg(int dp)
959 {
960 if (dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
962 else
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
964 }
965
966 static inline void gen_vfp_sqrt(int dp)
967 {
968 if (dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
972 }
973
974 static inline void gen_vfp_cmp(int dp)
975 {
976 if (dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
978 else
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
980 }
981
982 static inline void gen_vfp_cmpe(int dp)
983 {
984 if (dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
986 else
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
988 }
989
990 static inline void gen_vfp_F1_ld0(int dp)
991 {
992 if (dp)
993 tcg_gen_movi_i64(cpu_F1d, 0);
994 else
995 tcg_gen_movi_i32(cpu_F1s, 0);
996 }
997
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1000 { \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1002 if (dp) { \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 } else { \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 } \
1007 tcg_temp_free_ptr(statusptr); \
1008 }
1009
1010 VFP_GEN_ITOF(uito)
1011 VFP_GEN_ITOF(sito)
1012 #undef VFP_GEN_ITOF
1013
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1016 { \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 } \
1023 tcg_temp_free_ptr(statusptr); \
1024 }
1025
1026 VFP_GEN_FTOI(toui)
1027 VFP_GEN_FTOI(touiz)
1028 VFP_GEN_FTOI(tosi)
1029 VFP_GEN_FTOI(tosiz)
1030 #undef VFP_GEN_FTOI
1031
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1034 { \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1037 if (dp) { \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 } else { \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 } \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1044 }
1045 VFP_GEN_FIX(tosh)
1046 VFP_GEN_FIX(tosl)
1047 VFP_GEN_FIX(touh)
1048 VFP_GEN_FIX(toul)
1049 VFP_GEN_FIX(shto)
1050 VFP_GEN_FIX(slto)
1051 VFP_GEN_FIX(uhto)
1052 VFP_GEN_FIX(ulto)
1053 #undef VFP_GEN_FIX
1054
1055 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1056 {
1057 if (dp)
1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1059 else
1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1061 }
1062
1063 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1064 {
1065 if (dp)
1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1067 else
1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1069 }
1070
1071 static inline long
1072 vfp_reg_offset (int dp, int reg)
1073 {
1074 if (dp)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1076 else if (reg & 1) {
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1079 } else {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1082 }
1083 }
1084
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1087 static inline long
1088 neon_reg_offset (int reg, int n)
1089 {
1090 int sreg;
1091 sreg = reg * 2 + n;
1092 return vfp_reg_offset(0, sreg);
1093 }
1094
1095 static TCGv neon_load_reg(int reg, int pass)
1096 {
1097 TCGv tmp = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1099 return tmp;
1100 }
1101
1102 static void neon_store_reg(int reg, int pass, TCGv var)
1103 {
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1105 tcg_temp_free_i32(var);
1106 }
1107
1108 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1109 {
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1111 }
1112
1113 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1114 {
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1116 }
1117
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1122
1123 static inline void gen_mov_F0_vreg(int dp, int reg)
1124 {
1125 if (dp)
1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1127 else
1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1129 }
1130
1131 static inline void gen_mov_F1_vreg(int dp, int reg)
1132 {
1133 if (dp)
1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1135 else
1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1137 }
1138
1139 static inline void gen_mov_vreg_F0(int dp, int reg)
1140 {
1141 if (dp)
1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1143 else
1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1145 }
1146
1147 #define ARM_CP_RW_BIT (1 << 20)
1148
1149 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1150 {
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1152 }
1153
1154 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1155 {
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1157 }
1158
1159 static inline TCGv iwmmxt_load_creg(int reg)
1160 {
1161 TCGv var = tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1163 return var;
1164 }
1165
1166 static inline void iwmmxt_store_creg(int reg, TCGv var)
1167 {
1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1169 tcg_temp_free_i32(var);
1170 }
1171
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1173 {
1174 iwmmxt_store_reg(cpu_M0, rn);
1175 }
1176
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1178 {
1179 iwmmxt_load_reg(cpu_M0, rn);
1180 }
1181
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1183 {
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1186 }
1187
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1189 {
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1192 }
1193
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1195 {
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1198 }
1199
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202 { \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1205 }
1206
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209 { \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1212 }
1213
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1218
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1221 { \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1223 }
1224
1225 IWMMXT_OP(maddsq)
1226 IWMMXT_OP(madduq)
1227 IWMMXT_OP(sadb)
1228 IWMMXT_OP(sadw)
1229 IWMMXT_OP(mulslw)
1230 IWMMXT_OP(mulshw)
1231 IWMMXT_OP(mululw)
1232 IWMMXT_OP(muluhw)
1233 IWMMXT_OP(macsw)
1234 IWMMXT_OP(macuw)
1235
1236 IWMMXT_OP_ENV_SIZE(unpackl)
1237 IWMMXT_OP_ENV_SIZE(unpackh)
1238
1239 IWMMXT_OP_ENV1(unpacklub)
1240 IWMMXT_OP_ENV1(unpackluw)
1241 IWMMXT_OP_ENV1(unpacklul)
1242 IWMMXT_OP_ENV1(unpackhub)
1243 IWMMXT_OP_ENV1(unpackhuw)
1244 IWMMXT_OP_ENV1(unpackhul)
1245 IWMMXT_OP_ENV1(unpacklsb)
1246 IWMMXT_OP_ENV1(unpacklsw)
1247 IWMMXT_OP_ENV1(unpacklsl)
1248 IWMMXT_OP_ENV1(unpackhsb)
1249 IWMMXT_OP_ENV1(unpackhsw)
1250 IWMMXT_OP_ENV1(unpackhsl)
1251
1252 IWMMXT_OP_ENV_SIZE(cmpeq)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu)
1254 IWMMXT_OP_ENV_SIZE(cmpgts)
1255
1256 IWMMXT_OP_ENV_SIZE(mins)
1257 IWMMXT_OP_ENV_SIZE(minu)
1258 IWMMXT_OP_ENV_SIZE(maxs)
1259 IWMMXT_OP_ENV_SIZE(maxu)
1260
1261 IWMMXT_OP_ENV_SIZE(subn)
1262 IWMMXT_OP_ENV_SIZE(addn)
1263 IWMMXT_OP_ENV_SIZE(subu)
1264 IWMMXT_OP_ENV_SIZE(addu)
1265 IWMMXT_OP_ENV_SIZE(subs)
1266 IWMMXT_OP_ENV_SIZE(adds)
1267
1268 IWMMXT_OP_ENV(avgb0)
1269 IWMMXT_OP_ENV(avgb1)
1270 IWMMXT_OP_ENV(avgw0)
1271 IWMMXT_OP_ENV(avgw1)
1272
1273 IWMMXT_OP(msadb)
1274
1275 IWMMXT_OP_ENV(packuw)
1276 IWMMXT_OP_ENV(packul)
1277 IWMMXT_OP_ENV(packuq)
1278 IWMMXT_OP_ENV(packsw)
1279 IWMMXT_OP_ENV(packsl)
1280 IWMMXT_OP_ENV(packsq)
1281
1282 static void gen_op_iwmmxt_set_mup(void)
1283 {
1284 TCGv tmp;
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1288 }
1289
1290 static void gen_op_iwmmxt_set_cup(void)
1291 {
1292 TCGv tmp;
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1296 }
1297
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1299 {
1300 TCGv tmp = tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1303 }
1304
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1306 {
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1310 }
1311
1312 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1313 {
1314 int rd;
1315 uint32_t offset;
1316 TCGv tmp;
1317
1318 rd = (insn >> 16) & 0xf;
1319 tmp = load_reg(s, rd);
1320
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1323 /* Pre indexed */
1324 if (insn & (1 << 23))
1325 tcg_gen_addi_i32(tmp, tmp, offset);
1326 else
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
1329 if (insn & (1 << 21))
1330 store_reg(s, rd, tmp);
1331 else
1332 tcg_temp_free_i32(tmp);
1333 } else if (insn & (1 << 21)) {
1334 /* Post indexed */
1335 tcg_gen_mov_i32(dest, tmp);
1336 if (insn & (1 << 23))
1337 tcg_gen_addi_i32(tmp, tmp, offset);
1338 else
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
1341 } else if (!(insn & (1 << 23)))
1342 return 1;
1343 return 0;
1344 }
1345
1346 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1347 {
1348 int rd = (insn >> 0) & 0xf;
1349 TCGv tmp;
1350
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1353 return 1;
1354 } else {
1355 tmp = iwmmxt_load_creg(rd);
1356 }
1357 } else {
1358 tmp = tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1361 }
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
1364 tcg_temp_free_i32(tmp);
1365 return 0;
1366 }
1367
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1371 {
1372 int rd, wrd;
1373 int rdhi, rdlo, rd0, rd1, i;
1374 TCGv addr;
1375 TCGv tmp, tmp2, tmp3;
1376
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1379 wrd = insn & 0xf;
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
1390 gen_op_iwmmxt_set_mup();
1391 }
1392 return 0;
1393 }
1394
1395 wrd = (insn >> 12) & 0xf;
1396 addr = tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s, insn, addr)) {
1398 tcg_temp_free_i32(addr);
1399 return 1;
1400 }
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1403 tmp = tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
1406 } else {
1407 i = 1;
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1411 i = 0;
1412 } else { /* WLDRW wRd */
1413 tmp = gen_ld32(addr, IS_USER(s));
1414 }
1415 } else {
1416 if (insn & (1 << 22)) { /* WLDRH */
1417 tmp = gen_ld16u(addr, IS_USER(s));
1418 } else { /* WLDRB */
1419 tmp = gen_ld8u(addr, IS_USER(s));
1420 }
1421 }
1422 if (i) {
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1424 tcg_temp_free_i32(tmp);
1425 }
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 }
1428 } else {
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
1432 } else {
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
1434 tmp = tcg_temp_new_i32();
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp);
1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st32(tmp, addr, IS_USER(s));
1442 }
1443 } else {
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1446 gen_st16(tmp, addr, IS_USER(s));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1449 gen_st8(tmp, addr, IS_USER(s));
1450 }
1451 }
1452 }
1453 }
1454 tcg_temp_free_i32(addr);
1455 return 0;
1456 }
1457
1458 if ((insn & 0x0f000000) != 0x0e000000)
1459 return 1;
1460
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1472 break;
1473 case 0x011: /* TMCR */
1474 if (insn & 0xf)
1475 return 1;
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1478 switch (wrd) {
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1481 break;
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1484 /* Fall through. */
1485 case ARM_IWMMXT_wCSSF:
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
1489 tcg_temp_free_i32(tmp2);
1490 iwmmxt_store_creg(wrd, tmp);
1491 break;
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
1499 break;
1500 default:
1501 return 1;
1502 }
1503 break;
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x111: /* TMRC */
1516 if (insn & 0xf)
1517 return 1;
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
1522 break;
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 break;
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1564 case 0:
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1566 break;
1567 case 1:
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1569 break;
1570 case 2:
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1572 break;
1573 case 3:
1574 return 1;
1575 }
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1586 case 0:
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1591 break;
1592 case 2:
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1594 break;
1595 case 3:
1596 return 1;
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1601 break;
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1609 else
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1626 } else {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1631 }
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 break;
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1657 case 0:
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1659 break;
1660 case 1:
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1662 break;
1663 case 2:
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1665 break;
1666 case 3:
1667 return 1;
1668 }
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1672 break;
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1683 } else {
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1688 }
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1702 tcg_temp_free_i32(tmp);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn >> 6) & 3) == 3)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 tmp = load_reg(s, rd);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1714 case 0:
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
1717 break;
1718 case 1:
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
1721 break;
1722 case 2:
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
1725 break;
1726 default:
1727 TCGV_UNUSED(tmp2);
1728 TCGV_UNUSED(tmp3);
1729 }
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
1733 tcg_temp_free_i32(tmp);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 break;
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
1741 return 1;
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 tmp = tcg_temp_new_i32();
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1748 if (insn & 8) {
1749 tcg_gen_ext8s_i32(tmp, tmp);
1750 } else {
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
1752 }
1753 break;
1754 case 1:
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1757 if (insn & 8) {
1758 tcg_gen_ext16s_i32(tmp, tmp);
1759 } else {
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1761 }
1762 break;
1763 case 2:
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1766 break;
1767 }
1768 store_reg(s, rd, tmp);
1769 break;
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1772 return 1;
1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1774 switch ((insn >> 22) & 3) {
1775 case 0:
1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1777 break;
1778 case 1:
1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1780 break;
1781 case 2:
1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1783 break;
1784 }
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1786 gen_set_nzcv(tmp);
1787 tcg_temp_free_i32(tmp);
1788 break;
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn >> 6) & 3) == 3)
1791 return 1;
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
1794 tmp = load_reg(s, rd);
1795 switch ((insn >> 6) & 3) {
1796 case 0:
1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1798 break;
1799 case 1:
1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1801 break;
1802 case 2:
1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1804 break;
1805 }
1806 tcg_temp_free_i32(tmp);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 break;
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1812 return 1;
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1814 tmp2 = tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2, tmp);
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 for (i = 0; i < 7; i ++) {
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
1821 }
1822 break;
1823 case 1:
1824 for (i = 0; i < 3; i ++) {
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1827 }
1828 break;
1829 case 2:
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
1832 break;
1833 }
1834 gen_set_nzcv(tmp);
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
1837 break;
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1843 case 0:
1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1845 break;
1846 case 1:
1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1848 break;
1849 case 2:
1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1851 break;
1852 case 3:
1853 return 1;
1854 }
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1860 return 1;
1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1862 tmp2 = tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2, tmp);
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 for (i = 0; i < 7; i ++) {
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
1869 }
1870 break;
1871 case 1:
1872 for (i = 0; i < 3; i ++) {
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1875 }
1876 break;
1877 case 2:
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
1880 break;
1881 }
1882 gen_set_nzcv(tmp);
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
1885 break;
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1890 return 1;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 tmp = tcg_temp_new_i32();
1893 switch ((insn >> 22) & 3) {
1894 case 0:
1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1896 break;
1897 case 1:
1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1899 break;
1900 case 2:
1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1902 break;
1903 }
1904 store_reg(s, rd, tmp);
1905 break;
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1930 break;
1931 case 3:
1932 return 1;
1933 }
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1947 else
1948 gen_op_iwmmxt_unpacklub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackluw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1959 else
1960 gen_op_iwmmxt_unpacklul_M0();
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1978 else
1979 gen_op_iwmmxt_unpackhub_M0();
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1984 else
1985 gen_op_iwmmxt_unpackhuw_M0();
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1990 else
1991 gen_op_iwmmxt_unpackhul_M0();
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 tmp = tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 tcg_temp_free_i32(tmp);
2010 return 1;
2011 }
2012 switch ((insn >> 22) & 3) {
2013 case 1:
2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2015 break;
2016 case 2:
2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2018 break;
2019 case 3:
2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2021 break;
2022 }
2023 tcg_temp_free_i32(tmp);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 tmp = tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2037 tcg_temp_free_i32(tmp);
2038 return 1;
2039 }
2040 switch ((insn >> 22) & 3) {
2041 case 1:
2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2043 break;
2044 case 2:
2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2046 break;
2047 case 3:
2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2049 break;
2050 }
2051 tcg_temp_free_i32(tmp);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2055 break;
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn >> 22) & 3) == 0)
2059 return 1;
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2065 tcg_temp_free_i32(tmp);
2066 return 1;
2067 }
2068 switch ((insn >> 22) & 3) {
2069 case 1:
2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2071 break;
2072 case 2:
2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2074 break;
2075 case 3:
2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 }
2079 tcg_temp_free_i32(tmp);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn >> 22) & 3) == 0)
2087 return 1;
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 tmp = tcg_temp_new_i32();
2092 switch ((insn >> 22) & 3) {
2093 case 1:
2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2095 tcg_temp_free_i32(tmp);
2096 return 1;
2097 }
2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2099 break;
2100 case 2:
2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2102 tcg_temp_free_i32(tmp);
2103 return 1;
2104 }
2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2106 break;
2107 case 3:
2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2109 tcg_temp_free_i32(tmp);
2110 return 1;
2111 }
2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2113 break;
2114 }
2115 tcg_temp_free_i32(tmp);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2163 break;
2164 case 1:
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2167 else
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2169 break;
2170 case 2:
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2173 else
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2175 break;
2176 case 3:
2177 return 1;
2178 }
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2191 tcg_temp_free(tmp);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2204 case 0x0:
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2206 break;
2207 case 0x1:
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2209 break;
2210 case 0x3:
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2212 break;
2213 case 0x4:
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2215 break;
2216 case 0x5:
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2218 break;
2219 case 0x7:
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2221 break;
2222 case 0x8:
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2224 break;
2225 case 0x9:
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2227 break;
2228 case 0xb:
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2230 break;
2231 default:
2232 return 1;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2247 tcg_temp_free(tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2261 case 0x0:
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2263 break;
2264 case 0x1:
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2266 break;
2267 case 0x3:
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2269 break;
2270 case 0x4:
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2272 break;
2273 case 0x5:
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2275 break;
2276 case 0x7:
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2278 break;
2279 case 0x8:
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2281 break;
2282 case 0x9:
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2284 break;
2285 case 0xb:
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2287 break;
2288 default:
2289 return 1;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2300 return 1;
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
2305 switch ((insn >> 22) & 3) {
2306 case 1:
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2309 else
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2311 break;
2312 case 2:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2317 break;
2318 case 3:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2323 break;
2324 }
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2337 return 1;
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2344 break;
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2347 break;
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn & (1 << 16))
2350 tcg_gen_shri_i32(tmp, tmp, 16);
2351 if (insn & (1 << 17))
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2354 break;
2355 default:
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
2358 return 1;
2359 }
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2364 break;
2365 default:
2366 return 1;
2367 }
2368
2369 return 0;
2370 }
2371
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2375 {
2376 int acc, rd0, rd1, rdhi, rdlo;
2377 TCGv tmp, tmp2;
2378
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2382 rd1 = insn & 0xf;
2383 acc = (insn >> 5) & 7;
2384
2385 if (acc != 0)
2386 return 1;
2387
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
2390 switch ((insn >> 16) & 0xf) {
2391 case 0x0: /* MIA */
2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2393 break;
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2396 break;
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn & (1 << 16))
2402 tcg_gen_shri_i32(tmp, tmp, 16);
2403 if (insn & (1 << 17))
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2406 break;
2407 default:
2408 return 1;
2409 }
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
2412
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2414 return 0;
2415 }
2416
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2421 acc = insn & 7;
2422
2423 if (acc != 0)
2424 return 1;
2425
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2432 } else { /* MAR */
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
2435 }
2436 return 0;
2437 }
2438
2439 return 1;
2440 }
2441
2442 /* Disassemble system coprocessor instruction. Return nonzero if
2443 instruction is not defined. */
2444 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2445 {
2446 TCGv tmp, tmp2;
2447 uint32_t rd = (insn >> 12) & 0xf;
2448 uint32_t cp = (insn >> 8) & 0xf;
2449 if (IS_USER(s)) {
2450 return 1;
2451 }
2452
2453 if (insn & ARM_CP_RW_BIT) {
2454 if (!env->cp[cp].cp_read)
2455 return 1;
2456 gen_set_pc_im(s->pc);
2457 tmp = tcg_temp_new_i32();
2458 tmp2 = tcg_const_i32(insn);
2459 gen_helper_get_cp(tmp, cpu_env, tmp2);
2460 tcg_temp_free(tmp2);
2461 store_reg(s, rd, tmp);
2462 } else {
2463 if (!env->cp[cp].cp_write)
2464 return 1;
2465 gen_set_pc_im(s->pc);
2466 tmp = load_reg(s, rd);
2467 tmp2 = tcg_const_i32(insn);
2468 gen_helper_set_cp(cpu_env, tmp2, tmp);
2469 tcg_temp_free(tmp2);
2470 tcg_temp_free_i32(tmp);
2471 }
2472 return 0;
2473 }
2474
2475 static int cp15_user_ok(uint32_t insn)
2476 {
2477 int cpn = (insn >> 16) & 0xf;
2478 int cpm = insn & 0xf;
2479 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2480
2481 if (cpn == 13 && cpm == 0) {
2482 /* TLS register. */
2483 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2484 return 1;
2485 }
2486 if (cpn == 7) {
2487 /* ISB, DSB, DMB. */
2488 if ((cpm == 5 && op == 4)
2489 || (cpm == 10 && (op == 4 || op == 5)))
2490 return 1;
2491 }
2492 return 0;
2493 }
2494
2495 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2496 {
2497 TCGv tmp;
2498 int cpn = (insn >> 16) & 0xf;
2499 int cpm = insn & 0xf;
2500 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2501
2502 if (!arm_feature(env, ARM_FEATURE_V6K))
2503 return 0;
2504
2505 if (!(cpn == 13 && cpm == 0))
2506 return 0;
2507
2508 if (insn & ARM_CP_RW_BIT) {
2509 switch (op) {
2510 case 2:
2511 tmp = load_cpu_field(cp15.c13_tls1);
2512 break;
2513 case 3:
2514 tmp = load_cpu_field(cp15.c13_tls2);
2515 break;
2516 case 4:
2517 tmp = load_cpu_field(cp15.c13_tls3);
2518 break;
2519 default:
2520 return 0;
2521 }
2522 store_reg(s, rd, tmp);
2523
2524 } else {
2525 tmp = load_reg(s, rd);
2526 switch (op) {
2527 case 2:
2528 store_cpu_field(tmp, cp15.c13_tls1);
2529 break;
2530 case 3:
2531 store_cpu_field(tmp, cp15.c13_tls2);
2532 break;
2533 case 4:
2534 store_cpu_field(tmp, cp15.c13_tls3);
2535 break;
2536 default:
2537 tcg_temp_free_i32(tmp);
2538 return 0;
2539 }
2540 }
2541 return 1;
2542 }
2543
2544 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2545 instruction is not defined. */
2546 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2547 {
2548 uint32_t rd;
2549 TCGv tmp, tmp2;
2550
2551 /* M profile cores use memory mapped registers instead of cp15. */
2552 if (arm_feature(env, ARM_FEATURE_M))
2553 return 1;
2554
2555 if ((insn & (1 << 25)) == 0) {
2556 if (insn & (1 << 20)) {
2557 /* mrrc */
2558 return 1;
2559 }
2560 /* mcrr. Used for block cache operations, so implement as no-op. */
2561 return 0;
2562 }
2563 if ((insn & (1 << 4)) == 0) {
2564 /* cdp */
2565 return 1;
2566 }
2567 if (IS_USER(s) && !cp15_user_ok(insn)) {
2568 return 1;
2569 }
2570
2571 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2572 * instructions rather than a separate instruction.
2573 */
2574 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2575 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2576 * In v7, this must NOP.
2577 */
2578 if (!arm_feature(env, ARM_FEATURE_V7)) {
2579 /* Wait for interrupt. */
2580 gen_set_pc_im(s->pc);
2581 s->is_jmp = DISAS_WFI;
2582 }
2583 return 0;
2584 }
2585
2586 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2587 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2588 * so this is slightly over-broad.
2589 */
2590 if (!arm_feature(env, ARM_FEATURE_V6)) {
2591 /* Wait for interrupt. */
2592 gen_set_pc_im(s->pc);
2593 s->is_jmp = DISAS_WFI;
2594 return 0;
2595 }
2596 /* Otherwise fall through to handle via helper function.
2597 * In particular, on v7 and some v6 cores this is one of
2598 * the VA-PA registers.
2599 */
2600 }
2601
2602 rd = (insn >> 12) & 0xf;
2603
2604 if (cp15_tls_load_store(env, s, insn, rd))
2605 return 0;
2606
2607 tmp2 = tcg_const_i32(insn);
2608 if (insn & ARM_CP_RW_BIT) {
2609 tmp = tcg_temp_new_i32();
2610 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2611 /* If the destination register is r15 then sets condition codes. */
2612 if (rd != 15)
2613 store_reg(s, rd, tmp);
2614 else
2615 tcg_temp_free_i32(tmp);
2616 } else {
2617 tmp = load_reg(s, rd);
2618 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2619 tcg_temp_free_i32(tmp);
2620 /* Normally we would always end the TB here, but Linux
2621 * arch/arm/mach-pxa/sleep.S expects two instructions following
2622 * an MMU enable to execute from cache. Imitate this behaviour. */
2623 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2624 (insn & 0x0fff0fff) != 0x0e010f10)
2625 gen_lookup_tb(s);
2626 }
2627 tcg_temp_free_i32(tmp2);
2628 return 0;
2629 }
2630
2631 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2632 #define VFP_SREG(insn, bigbit, smallbit) \
2633 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2634 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2635 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2636 reg = (((insn) >> (bigbit)) & 0x0f) \
2637 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2638 } else { \
2639 if (insn & (1 << (smallbit))) \
2640 return 1; \
2641 reg = ((insn) >> (bigbit)) & 0x0f; \
2642 }} while (0)
2643
2644 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2645 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2646 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2647 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2648 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2649 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2650
2651 /* Move between integer and VFP cores. */
2652 static TCGv gen_vfp_mrs(void)
2653 {
2654 TCGv tmp = tcg_temp_new_i32();
2655 tcg_gen_mov_i32(tmp, cpu_F0s);
2656 return tmp;
2657 }
2658
2659 static void gen_vfp_msr(TCGv tmp)
2660 {
2661 tcg_gen_mov_i32(cpu_F0s, tmp);
2662 tcg_temp_free_i32(tmp);
2663 }
2664
2665 static void gen_neon_dup_u8(TCGv var, int shift)
2666 {
2667 TCGv tmp = tcg_temp_new_i32();
2668 if (shift)
2669 tcg_gen_shri_i32(var, var, shift);
2670 tcg_gen_ext8u_i32(var, var);
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
2675 tcg_temp_free_i32(tmp);
2676 }
2677
2678 static void gen_neon_dup_low16(TCGv var)
2679 {
2680 TCGv tmp = tcg_temp_new_i32();
2681 tcg_gen_ext16u_i32(var, var);
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
2684 tcg_temp_free_i32(tmp);
2685 }
2686
2687 static void gen_neon_dup_high16(TCGv var)
2688 {
2689 TCGv tmp = tcg_temp_new_i32();
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
2693 tcg_temp_free_i32(tmp);
2694 }
2695
2696 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2697 {
2698 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2699 TCGv tmp;
2700 switch (size) {
2701 case 0:
2702 tmp = gen_ld8u(addr, IS_USER(s));
2703 gen_neon_dup_u8(tmp, 0);
2704 break;
2705 case 1:
2706 tmp = gen_ld16u(addr, IS_USER(s));
2707 gen_neon_dup_low16(tmp);
2708 break;
2709 case 2:
2710 tmp = gen_ld32(addr, IS_USER(s));
2711 break;
2712 default: /* Avoid compiler warnings. */
2713 abort();
2714 }
2715 return tmp;
2716 }
2717
2718 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2719 (ie. an undefined instruction). */
2720 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2721 {
2722 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2723 int dp, veclen;
2724 TCGv addr;
2725 TCGv tmp;
2726 TCGv tmp2;
2727
2728 if (!arm_feature(env, ARM_FEATURE_VFP))
2729 return 1;
2730
2731 if (!s->vfp_enabled) {
2732 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2733 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2734 return 1;
2735 rn = (insn >> 16) & 0xf;
2736 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2737 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2738 return 1;
2739 }
2740 dp = ((insn & 0xf00) == 0xb00);
2741 switch ((insn >> 24) & 0xf) {
2742 case 0xe:
2743 if (insn & (1 << 4)) {
2744 /* single register transfer */
2745 rd = (insn >> 12) & 0xf;
2746 if (dp) {
2747 int size;
2748 int pass;
2749
2750 VFP_DREG_N(rn, insn);
2751 if (insn & 0xf)
2752 return 1;
2753 if (insn & 0x00c00060
2754 && !arm_feature(env, ARM_FEATURE_NEON))
2755 return 1;
2756
2757 pass = (insn >> 21) & 1;
2758 if (insn & (1 << 22)) {
2759 size = 0;
2760 offset = ((insn >> 5) & 3) * 8;
2761 } else if (insn & (1 << 5)) {
2762 size = 1;
2763 offset = (insn & (1 << 6)) ? 16 : 0;
2764 } else {
2765 size = 2;
2766 offset = 0;
2767 }
2768 if (insn & ARM_CP_RW_BIT) {
2769 /* vfp->arm */
2770 tmp = neon_load_reg(rn, pass);
2771 switch (size) {
2772 case 0:
2773 if (offset)
2774 tcg_gen_shri_i32(tmp, tmp, offset);
2775 if (insn & (1 << 23))
2776 gen_uxtb(tmp);
2777 else
2778 gen_sxtb(tmp);
2779 break;
2780 case 1:
2781 if (insn & (1 << 23)) {
2782 if (offset) {
2783 tcg_gen_shri_i32(tmp, tmp, 16);
2784 } else {
2785 gen_uxth(tmp);
2786 }
2787 } else {
2788 if (offset) {
2789 tcg_gen_sari_i32(tmp, tmp, 16);
2790 } else {
2791 gen_sxth(tmp);
2792 }
2793 }
2794 break;
2795 case 2:
2796 break;
2797 }
2798 store_reg(s, rd, tmp);
2799 } else {
2800 /* arm->vfp */
2801 tmp = load_reg(s, rd);
2802 if (insn & (1 << 23)) {
2803 /* VDUP */
2804 if (size == 0) {
2805 gen_neon_dup_u8(tmp, 0);
2806 } else if (size == 1) {
2807 gen_neon_dup_low16(tmp);
2808 }
2809 for (n = 0; n <= pass * 2; n++) {
2810 tmp2 = tcg_temp_new_i32();
2811 tcg_gen_mov_i32(tmp2, tmp);
2812 neon_store_reg(rn, n, tmp2);
2813 }
2814 neon_store_reg(rn, n, tmp);
2815 } else {
2816 /* VMOV */
2817 switch (size) {
2818 case 0:
2819 tmp2 = neon_load_reg(rn, pass);
2820 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2821 tcg_temp_free_i32(tmp2);
2822 break;
2823 case 1:
2824 tmp2 = neon_load_reg(rn, pass);
2825 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2826 tcg_temp_free_i32(tmp2);
2827 break;
2828 case 2:
2829 break;
2830 }
2831 neon_store_reg(rn, pass, tmp);
2832 }
2833 }
2834 } else { /* !dp */
2835 if ((insn & 0x6f) != 0x00)
2836 return 1;
2837 rn = VFP_SREG_N(insn);
2838 if (insn & ARM_CP_RW_BIT) {
2839 /* vfp->arm */
2840 if (insn & (1 << 21)) {
2841 /* system register */
2842 rn >>= 1;
2843
2844 switch (rn) {
2845 case ARM_VFP_FPSID:
2846 /* VFP2 allows access to FSID from userspace.
2847 VFP3 restricts all id registers to privileged
2848 accesses. */
2849 if (IS_USER(s)
2850 && arm_feature(env, ARM_FEATURE_VFP3))
2851 return 1;
2852 tmp = load_cpu_field(vfp.xregs[rn]);
2853 break;
2854 case ARM_VFP_FPEXC:
2855 if (IS_USER(s))
2856 return 1;
2857 tmp = load_cpu_field(vfp.xregs[rn]);
2858 break;
2859 case ARM_VFP_FPINST:
2860 case ARM_VFP_FPINST2:
2861 /* Not present in VFP3. */
2862 if (IS_USER(s)
2863 || arm_feature(env, ARM_FEATURE_VFP3))
2864 return 1;
2865 tmp = load_cpu_field(vfp.xregs[rn]);
2866 break;
2867 case ARM_VFP_FPSCR:
2868 if (rd == 15) {
2869 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2870 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2871 } else {
2872 tmp = tcg_temp_new_i32();
2873 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2874 }
2875 break;
2876 case ARM_VFP_MVFR0:
2877 case ARM_VFP_MVFR1:
2878 if (IS_USER(s)
2879 || !arm_feature(env, ARM_FEATURE_VFP3))
2880 return 1;
2881 tmp = load_cpu_field(vfp.xregs[rn]);
2882 break;
2883 default:
2884 return 1;
2885 }
2886 } else {
2887 gen_mov_F0_vreg(0, rn);
2888 tmp = gen_vfp_mrs();
2889 }
2890 if (rd == 15) {
2891 /* Set the 4 flag bits in the CPSR. */
2892 gen_set_nzcv(tmp);
2893 tcg_temp_free_i32(tmp);
2894 } else {
2895 store_reg(s, rd, tmp);
2896 }
2897 } else {
2898 /* arm->vfp */
2899 tmp = load_reg(s, rd);
2900 if (insn & (1 << 21)) {
2901 rn >>= 1;
2902 /* system register */
2903 switch (rn) {
2904 case ARM_VFP_FPSID:
2905 case ARM_VFP_MVFR0:
2906 case ARM_VFP_MVFR1:
2907 /* Writes are ignored. */
2908 break;
2909 case ARM_VFP_FPSCR:
2910 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2911 tcg_temp_free_i32(tmp);
2912 gen_lookup_tb(s);
2913 break;
2914 case ARM_VFP_FPEXC:
2915 if (IS_USER(s))
2916 return 1;
2917 /* TODO: VFP subarchitecture support.
2918 * For now, keep the EN bit only */
2919 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2920 store_cpu_field(tmp, vfp.xregs[rn]);
2921 gen_lookup_tb(s);
2922 break;
2923 case ARM_VFP_FPINST:
2924 case ARM_VFP_FPINST2:
2925 store_cpu_field(tmp, vfp.xregs[rn]);
2926 break;
2927 default:
2928 return 1;
2929 }
2930 } else {
2931 gen_vfp_msr(tmp);
2932 gen_mov_vreg_F0(0, rn);
2933 }
2934 }
2935 }
2936 } else {
2937 /* data processing */
2938 /* The opcode is in bits 23, 21, 20 and 6. */
2939 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2940 if (dp) {
2941 if (op == 15) {
2942 /* rn is opcode */
2943 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2944 } else {
2945 /* rn is register number */
2946 VFP_DREG_N(rn, insn);
2947 }
2948
2949 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2950 /* Integer or single precision destination. */
2951 rd = VFP_SREG_D(insn);
2952 } else {
2953 VFP_DREG_D(rd, insn);
2954 }
2955 if (op == 15 &&
2956 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2957 /* VCVT from int is always from S reg regardless of dp bit.
2958 * VCVT with immediate frac_bits has same format as SREG_M
2959 */
2960 rm = VFP_SREG_M(insn);
2961 } else {
2962 VFP_DREG_M(rm, insn);
2963 }
2964 } else {
2965 rn = VFP_SREG_N(insn);
2966 if (op == 15 && rn == 15) {
2967 /* Double precision destination. */
2968 VFP_DREG_D(rd, insn);
2969 } else {
2970 rd = VFP_SREG_D(insn);
2971 }
2972 /* NB that we implicitly rely on the encoding for the frac_bits
2973 * in VCVT of fixed to float being the same as that of an SREG_M
2974 */
2975 rm = VFP_SREG_M(insn);
2976 }
2977
2978 veclen = s->vec_len;
2979 if (op == 15 && rn > 3)
2980 veclen = 0;
2981
2982 /* Shut up compiler warnings. */
2983 delta_m = 0;
2984 delta_d = 0;
2985 bank_mask = 0;
2986
2987 if (veclen > 0) {
2988 if (dp)
2989 bank_mask = 0xc;
2990 else
2991 bank_mask = 0x18;
2992
2993 /* Figure out what type of vector operation this is. */
2994 if ((rd & bank_mask) == 0) {
2995 /* scalar */
2996 veclen = 0;
2997 } else {
2998 if (dp)
2999 delta_d = (s->vec_stride >> 1) + 1;
3000 else
3001 delta_d = s->vec_stride + 1;
3002
3003 if ((rm & bank_mask) == 0) {
3004 /* mixed scalar/vector */
3005 delta_m = 0;
3006 } else {
3007 /* vector */
3008 delta_m = delta_d;
3009 }
3010 }
3011 }
3012
3013 /* Load the initial operands. */
3014 if (op == 15) {
3015 switch (rn) {
3016 case 16:
3017 case 17:
3018 /* Integer source */
3019 gen_mov_F0_vreg(0, rm);
3020 break;
3021 case 8:
3022 case 9:
3023 /* Compare */
3024 gen_mov_F0_vreg(dp, rd);
3025 gen_mov_F1_vreg(dp, rm);
3026 break;
3027 case 10:
3028 case 11:
3029 /* Compare with zero */
3030 gen_mov_F0_vreg(dp, rd);
3031 gen_vfp_F1_ld0(dp);
3032 break;
3033 case 20:
3034 case 21:
3035 case 22:
3036 case 23:
3037 case 28:
3038 case 29:
3039 case 30:
3040 case 31:
3041 /* Source and destination the same. */
3042 gen_mov_F0_vreg(dp, rd);
3043 break;
3044 default:
3045 /* One source operand. */
3046 gen_mov_F0_vreg(dp, rm);
3047 break;
3048 }
3049 } else {
3050 /* Two source operands. */
3051 gen_mov_F0_vreg(dp, rn);
3052 gen_mov_F1_vreg(dp, rm);
3053 }
3054
3055 for (;;) {
3056 /* Perform the calculation. */
3057 switch (op) {
3058 case 0: /* VMLA: fd + (fn * fm) */
3059 /* Note that order of inputs to the add matters for NaNs */
3060 gen_vfp_F1_mul(dp);
3061 gen_mov_F0_vreg(dp, rd);
3062 gen_vfp_add(dp);
3063 break;
3064 case 1: /* VMLS: fd + -(fn * fm) */
3065 gen_vfp_mul(dp);
3066 gen_vfp_F1_neg(dp);
3067 gen_mov_F0_vreg(dp, rd);
3068 gen_vfp_add(dp);
3069 break;
3070 case 2: /* VNMLS: -fd + (fn * fm) */
3071 /* Note that it isn't valid to replace (-A + B) with (B - A)
3072 * or similar plausible looking simplifications
3073 * because this will give wrong results for NaNs.
3074 */
3075 gen_vfp_F1_mul(dp);
3076 gen_mov_F0_vreg(dp, rd);
3077 gen_vfp_neg(dp);
3078 gen_vfp_add(dp);
3079 break;
3080 case 3: /* VNMLA: -fd + -(fn * fm) */
3081 gen_vfp_mul(dp);
3082 gen_vfp_F1_neg(dp);
3083 gen_mov_F0_vreg(dp, rd);
3084 gen_vfp_neg(dp);
3085 gen_vfp_add(dp);
3086 break;
3087 case 4: /* mul: fn * fm */
3088 gen_vfp_mul(dp);
3089 break;
3090 case 5: /* nmul: -(fn * fm) */
3091 gen_vfp_mul(dp);
3092 gen_vfp_neg(dp);
3093 break;
3094 case 6: /* add: fn + fm */
3095 gen_vfp_add(dp);
3096 break;
3097 case 7: /* sub: fn - fm */
3098 gen_vfp_sub(dp);
3099 break;
3100 case 8: /* div: fn / fm */
3101 gen_vfp_div(dp);
3102 break;
3103 case 14: /* fconst */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
3106
3107 n = (insn << 12) & 0x80000000;
3108 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3109 if (dp) {
3110 if (i & 0x40)
3111 i |= 0x3f80;
3112 else
3113 i |= 0x4000;
3114 n |= i << 16;
3115 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3116 } else {
3117 if (i & 0x40)
3118 i |= 0x780;
3119 else
3120 i |= 0x800;
3121 n |= i << 19;
3122 tcg_gen_movi_i32(cpu_F0s, n);
3123 }
3124 break;
3125 case 15: /* extension space */
3126 switch (rn) {
3127 case 0: /* cpy */
3128 /* no-op */
3129 break;
3130 case 1: /* abs */
3131 gen_vfp_abs(dp);
3132 break;
3133 case 2: /* neg */
3134 gen_vfp_neg(dp);
3135 break;
3136 case 3: /* sqrt */
3137 gen_vfp_sqrt(dp);
3138 break;
3139 case 4: /* vcvtb.f32.f16 */
3140 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3141 return 1;
3142 tmp = gen_vfp_mrs();
3143 tcg_gen_ext16u_i32(tmp, tmp);
3144 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3145 tcg_temp_free_i32(tmp);
3146 break;
3147 case 5: /* vcvtt.f32.f16 */
3148 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3149 return 1;
3150 tmp = gen_vfp_mrs();
3151 tcg_gen_shri_i32(tmp, tmp, 16);
3152 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3153 tcg_temp_free_i32(tmp);
3154 break;
3155 case 6: /* vcvtb.f16.f32 */
3156 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3157 return 1;
3158 tmp = tcg_temp_new_i32();
3159 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3160 gen_mov_F0_vreg(0, rd);
3161 tmp2 = gen_vfp_mrs();
3162 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3163 tcg_gen_or_i32(tmp, tmp, tmp2);
3164 tcg_temp_free_i32(tmp2);
3165 gen_vfp_msr(tmp);
3166 break;
3167 case 7: /* vcvtt.f16.f32 */
3168 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3169 return 1;
3170 tmp = tcg_temp_new_i32();
3171 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3172 tcg_gen_shli_i32(tmp, tmp, 16);
3173 gen_mov_F0_vreg(0, rd);
3174 tmp2 = gen_vfp_mrs();
3175 tcg_gen_ext16u_i32(tmp2, tmp2);
3176 tcg_gen_or_i32(tmp, tmp, tmp2);
3177 tcg_temp_free_i32(tmp2);
3178 gen_vfp_msr(tmp);
3179 break;
3180 case 8: /* cmp */
3181 gen_vfp_cmp(dp);
3182 break;
3183 case 9: /* cmpe */
3184 gen_vfp_cmpe(dp);
3185 break;
3186 case 10: /* cmpz */
3187 gen_vfp_cmp(dp);
3188 break;
3189 case 11: /* cmpez */
3190 gen_vfp_F1_ld0(dp);
3191 gen_vfp_cmpe(dp);
3192 break;
3193 case 15: /* single<->double conversion */
3194 if (dp)
3195 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3196 else
3197 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3198 break;
3199 case 16: /* fuito */
3200 gen_vfp_uito(dp, 0);
3201 break;
3202 case 17: /* fsito */
3203 gen_vfp_sito(dp, 0);
3204 break;
3205 case 20: /* fshto */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
3208 gen_vfp_shto(dp, 16 - rm, 0);
3209 break;
3210 case 21: /* fslto */
3211 if (!arm_feature(env, ARM_FEATURE_VFP3))
3212 return 1;
3213 gen_vfp_slto(dp, 32 - rm, 0);
3214 break;
3215 case 22: /* fuhto */
3216 if (!arm_feature(env, ARM_FEATURE_VFP3))
3217 return 1;
3218 gen_vfp_uhto(dp, 16 - rm, 0);
3219 break;
3220 case 23: /* fulto */
3221 if (!arm_feature(env, ARM_FEATURE_VFP3))
3222 return 1;
3223 gen_vfp_ulto(dp, 32 - rm, 0);
3224 break;
3225 case 24: /* ftoui */
3226 gen_vfp_toui(dp, 0);
3227 break;
3228 case 25: /* ftouiz */
3229 gen_vfp_touiz(dp, 0);
3230 break;
3231 case 26: /* ftosi */
3232 gen_vfp_tosi(dp, 0);
3233 break;
3234 case 27: /* ftosiz */
3235 gen_vfp_tosiz(dp, 0);
3236 break;
3237 case 28: /* ftosh */
3238 if (!arm_feature(env, ARM_FEATURE_VFP3))
3239 return 1;
3240 gen_vfp_tosh(dp, 16 - rm, 0);
3241 break;
3242 case 29: /* ftosl */
3243 if (!arm_feature(env, ARM_FEATURE_VFP3))
3244 return 1;
3245 gen_vfp_tosl(dp, 32 - rm, 0);
3246 break;
3247 case 30: /* ftouh */
3248 if (!arm_feature(env, ARM_FEATURE_VFP3))
3249 return 1;
3250 gen_vfp_touh(dp, 16 - rm, 0);
3251 break;
3252 case 31: /* ftoul */
3253 if (!arm_feature(env, ARM_FEATURE_VFP3))
3254 return 1;
3255 gen_vfp_toul(dp, 32 - rm, 0);
3256 break;
3257 default: /* undefined */
3258 printf ("rn:%d\n", rn);
3259 return 1;
3260 }
3261 break;
3262 default: /* undefined */
3263 printf ("op:%d\n", op);
3264 return 1;
3265 }
3266
3267 /* Write back the result. */
3268 if (op == 15 && (rn >= 8 && rn <= 11))
3269 ; /* Comparison, do nothing. */
3270 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3271 /* VCVT double to int: always integer result. */
3272 gen_mov_vreg_F0(0, rd);
3273 else if (op == 15 && rn == 15)
3274 /* conversion */
3275 gen_mov_vreg_F0(!dp, rd);
3276 else
3277 gen_mov_vreg_F0(dp, rd);
3278
3279 /* break out of the loop if we have finished */
3280 if (veclen == 0)
3281 break;
3282
3283 if (op == 15 && delta_m == 0) {
3284 /* single source one-many */
3285 while (veclen--) {
3286 rd = ((rd + delta_d) & (bank_mask - 1))
3287 | (rd & bank_mask);
3288 gen_mov_vreg_F0(dp, rd);
3289 }
3290 break;
3291 }
3292 /* Setup the next operands. */
3293 veclen--;
3294 rd = ((rd + delta_d) & (bank_mask - 1))
3295 | (rd & bank_mask);
3296
3297 if (op == 15) {
3298 /* One source operand. */
3299 rm = ((rm + delta_m) & (bank_mask - 1))
3300 | (rm & bank_mask);
3301 gen_mov_F0_vreg(dp, rm);
3302 } else {
3303 /* Two source operands. */
3304 rn = ((rn + delta_d) & (bank_mask - 1))
3305 | (rn & bank_mask);
3306 gen_mov_F0_vreg(dp, rn);
3307 if (delta_m) {
3308 rm = ((rm + delta_m) & (bank_mask - 1))
3309 | (rm & bank_mask);
3310 gen_mov_F1_vreg(dp, rm);
3311 }
3312 }
3313 }
3314 }
3315 break;
3316 case 0xc:
3317 case 0xd:
3318 if ((insn & 0x03e00000) == 0x00400000) {
3319 /* two-register transfer */
3320 rn = (insn >> 16) & 0xf;
3321 rd = (insn >> 12) & 0xf;
3322 if (dp) {
3323 VFP_DREG_M(rm, insn);
3324 } else {
3325 rm = VFP_SREG_M(insn);
3326 }
3327
3328 if (insn & ARM_CP_RW_BIT) {
3329 /* vfp->arm */
3330 if (dp) {
3331 gen_mov_F0_vreg(0, rm * 2);
3332 tmp = gen_vfp_mrs();
3333 store_reg(s, rd, tmp);
3334 gen_mov_F0_vreg(0, rm * 2 + 1);
3335 tmp = gen_vfp_mrs();
3336 store_reg(s, rn, tmp);
3337 } else {
3338 gen_mov_F0_vreg(0, rm);
3339 tmp = gen_vfp_mrs();
3340 store_reg(s, rd, tmp);
3341 gen_mov_F0_vreg(0, rm + 1);
3342 tmp = gen_vfp_mrs();
3343 store_reg(s, rn, tmp);
3344 }
3345 } else {
3346 /* arm->vfp */
3347 if (dp) {
3348 tmp = load_reg(s, rd);
3349 gen_vfp_msr(tmp);
3350 gen_mov_vreg_F0(0, rm * 2);
3351 tmp = load_reg(s, rn);
3352 gen_vfp_msr(tmp);
3353 gen_mov_vreg_F0(0, rm * 2 + 1);
3354 } else {
3355 tmp = load_reg(s, rd);
3356 gen_vfp_msr(tmp);
3357 gen_mov_vreg_F0(0, rm);
3358 tmp = load_reg(s, rn);
3359 gen_vfp_msr(tmp);
3360 gen_mov_vreg_F0(0, rm + 1);
3361 }
3362 }
3363 } else {
3364 /* Load/store */
3365 rn = (insn >> 16) & 0xf;
3366 if (dp)
3367 VFP_DREG_D(rd, insn);
3368 else
3369 rd = VFP_SREG_D(insn);
3370 if (s->thumb && rn == 15) {
3371 addr = tcg_temp_new_i32();
3372 tcg_gen_movi_i32(addr, s->pc & ~2);
3373 } else {
3374 addr = load_reg(s, rn);
3375 }
3376 if ((insn & 0x01200000) == 0x01000000) {
3377 /* Single load/store */
3378 offset = (insn & 0xff) << 2;
3379 if ((insn & (1 << 23)) == 0)
3380 offset = -offset;
3381 tcg_gen_addi_i32(addr, addr, offset);
3382 if (insn & (1 << 20)) {
3383 gen_vfp_ld(s, dp, addr);
3384 gen_mov_vreg_F0(dp, rd);
3385 } else {
3386 gen_mov_F0_vreg(dp, rd);
3387 gen_vfp_st(s, dp, addr);
3388 }
3389 tcg_temp_free_i32(addr);
3390 } else {
3391 /* load/store multiple */
3392 if (dp)
3393 n = (insn >> 1) & 0x7f;
3394 else
3395 n = insn & 0xff;
3396
3397 if (insn & (1 << 24)) /* pre-decrement */
3398 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3399
3400 if (dp)
3401 offset = 8;
3402 else
3403 offset = 4;
3404 for (i = 0; i < n; i++) {
3405 if (insn & ARM_CP_RW_BIT) {
3406 /* load */
3407 gen_vfp_ld(s, dp, addr);
3408 gen_mov_vreg_F0(dp, rd + i);
3409 } else {
3410 /* store */
3411 gen_mov_F0_vreg(dp, rd + i);
3412 gen_vfp_st(s, dp, addr);
3413 }
3414 tcg_gen_addi_i32(addr, addr, offset);
3415 }
3416 if (insn & (1 << 21)) {
3417 /* writeback */
3418 if (insn & (1 << 24))
3419 offset = -offset * n;
3420 else if (dp && (insn & 1))
3421 offset = 4;
3422 else
3423 offset = 0;
3424
3425 if (offset != 0)
3426 tcg_gen_addi_i32(addr, addr, offset);
3427 store_reg(s, rn, addr);
3428 } else {
3429 tcg_temp_free_i32(addr);
3430 }
3431 }
3432 }
3433 break;
3434 default:
3435 /* Should never happen. */
3436 return 1;
3437 }
3438 return 0;
3439 }
3440
3441 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3442 {
3443 TranslationBlock *tb;
3444
3445 tb = s->tb;
3446 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3447 tcg_gen_goto_tb(n);
3448 gen_set_pc_im(dest);
3449 tcg_gen_exit_tb((tcg_target_long)tb + n);
3450 } else {
3451 gen_set_pc_im(dest);
3452 tcg_gen_exit_tb(0);
3453 }
3454 }
3455
3456 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3457 {
3458 if (unlikely(s->singlestep_enabled)) {
3459 /* An indirect jump so that we still trigger the debug exception. */
3460 if (s->thumb)
3461 dest |= 1;
3462 gen_bx_im(s, dest);
3463 } else {
3464 gen_goto_tb(s, 0, dest);
3465 s->is_jmp = DISAS_TB_JUMP;
3466 }
3467 }
3468
3469 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3470 {
3471 if (x)
3472 tcg_gen_sari_i32(t0, t0, 16);
3473 else
3474 gen_sxth(t0);
3475 if (y)
3476 tcg_gen_sari_i32(t1, t1, 16);
3477 else
3478 gen_sxth(t1);
3479 tcg_gen_mul_i32(t0, t0, t1);
3480 }
3481
3482 /* Return the mask of PSR bits set by a MSR instruction. */
3483 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3484 uint32_t mask;
3485
3486 mask = 0;
3487 if (flags & (1 << 0))
3488 mask |= 0xff;
3489 if (flags & (1 << 1))
3490 mask |= 0xff00;
3491 if (flags & (1 << 2))
3492 mask |= 0xff0000;
3493 if (flags & (1 << 3))
3494 mask |= 0xff000000;
3495
3496 /* Mask out undefined bits. */
3497 mask &= ~CPSR_RESERVED;
3498 if (!arm_feature(env, ARM_FEATURE_V4T))
3499 mask &= ~CPSR_T;
3500 if (!arm_feature(env, ARM_FEATURE_V5))
3501 mask &= ~CPSR_Q; /* V5TE in reality*/
3502 if (!arm_feature(env, ARM_FEATURE_V6))
3503 mask &= ~(CPSR_E | CPSR_GE);
3504 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3505 mask &= ~CPSR_IT;
3506 /* Mask out execution state bits. */
3507 if (!spsr)
3508 mask &= ~CPSR_EXEC;
3509 /* Mask out privileged bits. */
3510 if (IS_USER(s))
3511 mask &= CPSR_USER;
3512 return mask;
3513 }
3514
3515 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3516 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3517 {
3518 TCGv tmp;
3519 if (spsr) {
3520 /* ??? This is also undefined in system mode. */
3521 if (IS_USER(s))
3522 return 1;
3523
3524 tmp = load_cpu_field(spsr);
3525 tcg_gen_andi_i32(tmp, tmp, ~mask);
3526 tcg_gen_andi_i32(t0, t0, mask);
3527 tcg_gen_or_i32(tmp, tmp, t0);
3528 store_cpu_field(tmp, spsr);
3529 } else {
3530 gen_set_cpsr(t0, mask);
3531 }
3532 tcg_temp_free_i32(t0);
3533 gen_lookup_tb(s);
3534 return 0;
3535 }
3536
3537 /* Returns nonzero if access to the PSR is not permitted. */
3538 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3539 {
3540 TCGv tmp;
3541 tmp = tcg_temp_new_i32();
3542 tcg_gen_movi_i32(tmp, val);
3543 return gen_set_psr(s, mask, spsr, tmp);
3544 }
3545
3546 /* Generate an old-style exception return. Marks pc as dead. */
3547 static void gen_exception_return(DisasContext *s, TCGv pc)
3548 {
3549 TCGv tmp;
3550 store_reg(s, 15, pc);
3551 tmp = load_cpu_field(spsr);
3552 gen_set_cpsr(tmp, 0xffffffff);
3553 tcg_temp_free_i32(tmp);
3554 s->is_jmp = DISAS_UPDATE;
3555 }
3556
3557 /* Generate a v6 exception return. Marks both values as dead. */
3558 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3559 {
3560 gen_set_cpsr(cpsr, 0xffffffff);
3561 tcg_temp_free_i32(cpsr);
3562 store_reg(s, 15, pc);
3563 s->is_jmp = DISAS_UPDATE;
3564 }
3565
3566 static inline void
3567 gen_set_condexec (DisasContext *s)
3568 {
3569 if (s->condexec_mask) {
3570 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3571 TCGv tmp = tcg_temp_new_i32();
3572 tcg_gen_movi_i32(tmp, val);
3573 store_cpu_field(tmp, condexec_bits);
3574 }
3575 }
3576
3577 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3578 {
3579 gen_set_condexec(s);
3580 gen_set_pc_im(s->pc - offset);
3581 gen_exception(excp);
3582 s->is_jmp = DISAS_JUMP;
3583 }
3584
3585 static void gen_nop_hint(DisasContext *s, int val)
3586 {
3587 switch (val) {
3588 case 3: /* wfi */
3589 gen_set_pc_im(s->pc);
3590 s->is_jmp = DISAS_WFI;
3591 break;
3592 case 2: /* wfe */
3593 case 4: /* sev */
3594 /* TODO: Implement SEV and WFE. May help SMP performance. */
3595 default: /* nop */
3596 break;
3597 }
3598 }
3599
3600 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3601
3602 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3603 {
3604 switch (size) {
3605 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3606 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3607 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3608 default: abort();
3609 }
3610 }
3611
3612 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3613 {
3614 switch (size) {
3615 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3616 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3617 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3618 default: return;
3619 }
3620 }
3621
3622 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3623 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3624 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3625 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3626 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3627
3628 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3629 switch ((size << 1) | u) { \
3630 case 0: \
3631 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3632 break; \
3633 case 1: \
3634 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3635 break; \
3636 case 2: \
3637 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3638 break; \
3639 case 3: \
3640 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3641 break; \
3642 case 4: \
3643 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3644 break; \
3645 case 5: \
3646 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3647 break; \
3648 default: return 1; \
3649 }} while (0)
3650
3651 #define GEN_NEON_INTEGER_OP(name) do { \
3652 switch ((size << 1) | u) { \
3653 case 0: \
3654 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3655 break; \
3656 case 1: \
3657 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3658 break; \
3659 case 2: \
3660 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3661 break; \
3662 case 3: \
3663 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3664 break; \
3665 case 4: \
3666 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3667 break; \
3668 case 5: \
3669 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3670 break; \
3671 default: return 1; \
3672 }} while (0)
3673
3674 static TCGv neon_load_scratch(int scratch)
3675 {
3676 TCGv tmp = tcg_temp_new_i32();
3677 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3678 return tmp;
3679 }
3680
3681 static void neon_store_scratch(int scratch, TCGv var)
3682 {
3683 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3684 tcg_temp_free_i32(var);
3685 }
3686
3687 static inline TCGv neon_get_scalar(int size, int reg)
3688 {
3689 TCGv tmp;
3690 if (size == 1) {
3691 tmp = neon_load_reg(reg & 7, reg >> 4);
3692 if (reg & 8) {
3693 gen_neon_dup_high16(tmp);
3694 } else {
3695 gen_neon_dup_low16(tmp);
3696 }
3697 } else {
3698 tmp = neon_load_reg(reg & 15, reg >> 4);
3699 }
3700 return tmp;
3701 }
3702
3703 static int gen_neon_unzip(int rd, int rm, int size, int q)
3704 {
3705 TCGv tmp, tmp2;
3706 if (!q && size == 2) {
3707 return 1;
3708 }
3709 tmp = tcg_const_i32(rd);
3710 tmp2 = tcg_const_i32(rm);
3711 if (q) {
3712 switch (size) {
3713 case 0:
3714 gen_helper_neon_qunzip8(tmp, tmp2);
3715 break;
3716 case 1:
3717 gen_helper_neon_qunzip16(tmp, tmp2);
3718 break;
3719 case 2:
3720 gen_helper_neon_qunzip32(tmp, tmp2);
3721 break;
3722 default:
3723 abort();
3724 }
3725 } else {
3726 switch (size) {
3727 case 0:
3728 gen_helper_neon_unzip8(tmp, tmp2);
3729 break;
3730 case 1:
3731 gen_helper_neon_unzip16(tmp, tmp2);
3732 break;
3733 default:
3734 abort();
3735 }
3736 }
3737 tcg_temp_free_i32(tmp);
3738 tcg_temp_free_i32(tmp2);
3739 return 0;
3740 }
3741
3742 static int gen_neon_zip(int rd, int rm, int size, int q)
3743 {
3744 TCGv tmp, tmp2;
3745 if (!q && size == 2) {
3746 return 1;
3747 }
3748 tmp = tcg_const_i32(rd);
3749 tmp2 = tcg_const_i32(rm);
3750 if (q) {
3751 switch (size) {
3752 case 0:
3753 gen_helper_neon_qzip8(tmp, tmp2);
3754 break;
3755 case 1:
3756 gen_helper_neon_qzip16(tmp, tmp2);
3757 break;
3758 case 2:
3759 gen_helper_neon_qzip32(tmp, tmp2);
3760 break;
3761 default:
3762 abort();
3763 }
3764 } else {
3765 switch (size) {
3766 case 0:
3767 gen_helper_neon_zip8(tmp, tmp2);
3768 break;
3769 case 1:
3770 gen_helper_neon_zip16(tmp, tmp2);
3771 break;
3772 default:
3773 abort();
3774 }
3775 }
3776 tcg_temp_free_i32(tmp);
3777 tcg_temp_free_i32(tmp2);
3778 return 0;
3779 }
3780
3781 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3782 {
3783 TCGv rd, tmp;
3784
3785 rd = tcg_temp_new_i32();
3786 tmp = tcg_temp_new_i32();
3787
3788 tcg_gen_shli_i32(rd, t0, 8);
3789 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3790 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3791 tcg_gen_or_i32(rd, rd, tmp);
3792
3793 tcg_gen_shri_i32(t1, t1, 8);
3794 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3795 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3796 tcg_gen_or_i32(t1, t1, tmp);
3797 tcg_gen_mov_i32(t0, rd);
3798
3799 tcg_temp_free_i32(tmp);
3800 tcg_temp_free_i32(rd);
3801 }
3802
3803 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3804 {
3805 TCGv rd, tmp;
3806
3807 rd = tcg_temp_new_i32();
3808 tmp = tcg_temp_new_i32();
3809
3810 tcg_gen_shli_i32(rd, t0, 16);
3811 tcg_gen_andi_i32(tmp, t1, 0xffff);
3812 tcg_gen_or_i32(rd, rd, tmp);
3813 tcg_gen_shri_i32(t1, t1, 16);
3814 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3815 tcg_gen_or_i32(t1, t1, tmp);
3816 tcg_gen_mov_i32(t0, rd);
3817
3818 tcg_temp_free_i32(tmp);
3819 tcg_temp_free_i32(rd);
3820 }
3821
3822
3823 static struct {
3824 int nregs;
3825 int interleave;
3826 int spacing;
3827 } neon_ls_element_type[11] = {
3828 {4, 4, 1},
3829 {4, 4, 2},
3830 {4, 1, 1},
3831 {4, 2, 1},
3832 {3, 3, 1},
3833 {3, 3, 2},
3834 {3, 1, 1},
3835 {1, 1, 1},
3836 {2, 2, 1},
3837 {2, 2, 2},
3838 {2, 1, 1}
3839 };
3840
3841 /* Translate a NEON load/store element instruction. Return nonzero if the
3842 instruction is invalid. */
3843 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3844 {
3845 int rd, rn, rm;
3846 int op;
3847 int nregs;
3848 int interleave;
3849 int spacing;
3850 int stride;
3851 int size;
3852 int reg;
3853 int pass;
3854 int load;
3855 int shift;
3856 int n;
3857 TCGv addr;
3858 TCGv tmp;
3859 TCGv tmp2;
3860 TCGv_i64 tmp64;
3861
3862 if (!s->vfp_enabled)
3863 return 1;
3864 VFP_DREG_D(rd, insn);
3865 rn = (insn >> 16) & 0xf;
3866 rm = insn & 0xf;
3867 load = (insn & (1 << 21)) != 0;
3868 if ((insn & (1 << 23)) == 0) {
3869 /* Load store all elements. */
3870 op = (insn >> 8) & 0xf;
3871 size = (insn >> 6) & 3;
3872 if (op > 10)
3873 return 1;
3874 /* Catch UNDEF cases for bad values of align field */
3875 switch (op & 0xc) {
3876 case 4:
3877 if (((insn >> 5) & 1) == 1) {
3878 return 1;
3879 }
3880 break;
3881 case 8:
3882 if (((insn >> 4) & 3) == 3) {
3883 return 1;
3884 }
3885 break;
3886 default:
3887 break;
3888 }
3889 nregs = neon_ls_element_type[op].nregs;
3890 interleave = neon_ls_element_type[op].interleave;
3891 spacing = neon_ls_element_type[op].spacing;
3892 if (size == 3 && (interleave | spacing) != 1)
3893 return 1;
3894 addr = tcg_temp_new_i32();
3895 load_reg_var(s, addr, rn);
3896 stride = (1 << size) * interleave;
3897 for (reg = 0; reg < nregs; reg++) {
3898 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3899 load_reg_var(s, addr, rn);
3900 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3901 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3902 load_reg_var(s, addr, rn);
3903 tcg_gen_addi_i32(addr, addr, 1 << size);
3904 }
3905 if (size == 3) {
3906 if (load) {
3907 tmp64 = gen_ld64(addr, IS_USER(s));
3908 neon_store_reg64(tmp64, rd);
3909 tcg_temp_free_i64(tmp64);
3910 } else {
3911 tmp64 = tcg_temp_new_i64();
3912 neon_load_reg64(tmp64, rd);
3913 gen_st64(tmp64, addr, IS_USER(s));
3914 }
3915 tcg_gen_addi_i32(addr, addr, stride);
3916 } else {
3917 for (pass = 0; pass < 2; pass++) {
3918 if (size == 2) {
3919 if (load) {
3920 tmp = gen_ld32(addr, IS_USER(s));
3921 neon_store_reg(rd, pass, tmp);
3922 } else {
3923 tmp = neon_load_reg(rd, pass);
3924 gen_st32(tmp, addr, IS_USER(s));
3925 }
3926 tcg_gen_addi_i32(addr, addr, stride);
3927 } else if (size == 1) {
3928 if (load) {
3929 tmp = gen_ld16u(addr, IS_USER(s));
3930 tcg_gen_addi_i32(addr, addr, stride);
3931 tmp2 = gen_ld16u(addr, IS_USER(s));
3932 tcg_gen_addi_i32(addr, addr, stride);
3933 tcg_gen_shli_i32(tmp2, tmp2, 16);
3934 tcg_gen_or_i32(tmp, tmp, tmp2);
3935 tcg_temp_free_i32(tmp2);
3936 neon_store_reg(rd, pass, tmp);
3937 } else {
3938 tmp = neon_load_reg(rd, pass);
3939 tmp2 = tcg_temp_new_i32();
3940 tcg_gen_shri_i32(tmp2, tmp, 16);
3941 gen_st16(tmp, addr, IS_USER(s));
3942 tcg_gen_addi_i32(addr, addr, stride);
3943 gen_st16(tmp2, addr, IS_USER(s));
3944 tcg_gen_addi_i32(addr, addr, stride);
3945 }
3946 } else /* size == 0 */ {
3947 if (load) {
3948 TCGV_UNUSED(tmp2);
3949 for (n = 0; n < 4; n++) {
3950 tmp = gen_ld8u(addr, IS_USER(s));
3951 tcg_gen_addi_i32(addr, addr, stride);
3952 if (n == 0) {
3953 tmp2 = tmp;
3954 } else {
3955 tcg_gen_shli_i32(tmp, tmp, n * 8);
3956 tcg_gen_or_i32(tmp2, tmp2, tmp);
3957 tcg_temp_free_i32(tmp);
3958 }
3959 }
3960 neon_store_reg(rd, pass, tmp2);
3961 } else {
3962 tmp2 = neon_load_reg(rd, pass);
3963 for (n = 0; n < 4; n++) {
3964 tmp = tcg_temp_new_i32();
3965 if (n == 0) {
3966 tcg_gen_mov_i32(tmp, tmp2);
3967 } else {
3968 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3969 }
3970 gen_st8(tmp, addr, IS_USER(s));
3971 tcg_gen_addi_i32(addr, addr, stride);
3972 }
3973 tcg_temp_free_i32(tmp2);
3974 }
3975 }
3976 }
3977 }
3978 rd += spacing;
3979 }
3980 tcg_temp_free_i32(addr);
3981 stride = nregs * 8;
3982 } else {
3983 size = (insn >> 10) & 3;
3984 if (size == 3) {
3985 /* Load single element to all lanes. */
3986 int a = (insn >> 4) & 1;
3987 if (!load) {
3988 return 1;
3989 }
3990 size = (insn >> 6) & 3;
3991 nregs = ((insn >> 8) & 3) + 1;
3992
3993 if (size == 3) {
3994 if (nregs != 4 || a == 0) {
3995 return 1;
3996 }
3997 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3998 size = 2;
3999 }
4000 if (nregs == 1 && a == 1 && size == 0) {
4001 return 1;
4002 }
4003 if (nregs == 3 && a == 1) {
4004 return 1;
4005 }
4006 addr = tcg_temp_new_i32();
4007 load_reg_var(s, addr, rn);
4008 if (nregs == 1) {
4009 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4010 tmp = gen_load_and_replicate(s, addr, size);
4011 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4012 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4013 if (insn & (1 << 5)) {
4014 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4015 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4016 }
4017 tcg_temp_free_i32(tmp);
4018 } else {
4019 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4020 stride = (insn & (1 << 5)) ? 2 : 1;
4021 for (reg = 0; reg < nregs; reg++) {
4022 tmp = gen_load_and_replicate(s, addr, size);
4023 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4024 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4025 tcg_temp_free_i32(tmp);
4026 tcg_gen_addi_i32(addr, addr, 1 << size);
4027 rd += stride;
4028 }
4029 }
4030 tcg_temp_free_i32(addr);
4031 stride = (1 << size) * nregs;
4032 } else {
4033 /* Single element. */
4034 int idx = (insn >> 4) & 0xf;
4035 pass = (insn >> 7) & 1;
4036 switch (size) {
4037 case 0:
4038 shift = ((insn >> 5) & 3) * 8;
4039 stride = 1;
4040 break;
4041 case 1:
4042 shift = ((insn >> 6) & 1) * 16;
4043 stride = (insn & (1 << 5)) ? 2 : 1;
4044 break;
4045 case 2:
4046 shift = 0;
4047 stride = (insn & (1 << 6)) ? 2 : 1;
4048 break;
4049 default:
4050 abort();
4051 }
4052 nregs = ((insn >> 8) & 3) + 1;
4053 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4054 switch (nregs) {
4055 case 1:
4056 if (((idx & (1 << size)) != 0) ||
4057 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4058 return 1;
4059 }
4060 break;
4061 case 3:
4062 if ((idx & 1) != 0) {
4063 return 1;
4064 }
4065 /* fall through */
4066 case 2:
4067 if (size == 2 && (idx & 2) != 0) {
4068 return 1;
4069 }
4070 break;
4071 case 4:
4072 if ((size == 2) && ((idx & 3) == 3)) {
4073 return 1;
4074 }
4075 break;
4076 default:
4077 abort();
4078 }
4079 if ((rd + stride * (nregs - 1)) > 31) {
4080 /* Attempts to write off the end of the register file
4081 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4082 * the neon_load_reg() would write off the end of the array.
4083 */
4084 return 1;
4085 }
4086 addr = tcg_temp_new_i32();
4087 load_reg_var(s, addr, rn);
4088 for (reg = 0; reg < nregs; reg++) {
4089 if (load) {
4090 switch (size) {
4091 case 0:
4092 tmp = gen_ld8u(addr, IS_USER(s));
4093 break;
4094 case 1:
4095 tmp = gen_ld16u(addr, IS_USER(s));
4096 break;
4097 case 2:
4098 tmp = gen_ld32(addr, IS_USER(s));
4099 break;
4100 default: /* Avoid compiler warnings. */
4101 abort();
4102 }
4103 if (size != 2) {
4104 tmp2 = neon_load_reg(rd, pass);
4105 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4106 tcg_temp_free_i32(tmp2);
4107 }
4108 neon_store_reg(rd, pass, tmp);
4109 } else { /* Store */
4110 tmp = neon_load_reg(rd, pass);
4111 if (shift)
4112 tcg_gen_shri_i32(tmp, tmp, shift);
4113 switch (size) {
4114 case 0:
4115 gen_st8(tmp, addr, IS_USER(s));
4116 break;
4117 case 1:
4118 gen_st16(tmp, addr, IS_USER(s));
4119 break;
4120 case 2:
4121 gen_st32(tmp, addr, IS_USER(s));
4122 break;
4123 }
4124 }
4125 rd += stride;
4126 tcg_gen_addi_i32(addr, addr, 1 << size);
4127 }
4128 tcg_temp_free_i32(addr);
4129 stride = nregs * (1 << size);
4130 }
4131 }
4132 if (rm != 15) {
4133 TCGv base;
4134
4135 base = load_reg(s, rn);
4136 if (rm == 13) {
4137 tcg_gen_addi_i32(base, base, stride);
4138 } else {
4139 TCGv index;
4140 index = load_reg(s, rm);
4141 tcg_gen_add_i32(base, base, index);
4142 tcg_temp_free_i32(index);
4143 }
4144 store_reg(s, rn, base);
4145 }
4146 return 0;
4147 }
4148
4149 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4150 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4151 {
4152 tcg_gen_and_i32(t, t, c);
4153 tcg_gen_andc_i32(f, f, c);
4154 tcg_gen_or_i32(dest, t, f);
4155 }
4156
4157 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4158 {
4159 switch (size) {
4160 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4161 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4162 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4163 default: abort();
4164 }
4165 }
4166
4167 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4168 {
4169 switch (size) {
4170 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4171 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4172 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4173 default: abort();
4174 }
4175 }
4176
4177 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4178 {
4179 switch (size) {
4180 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4181 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4182 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4183 default: abort();
4184 }
4185 }
4186
4187 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4188 {
4189 switch (size) {
4190 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4191 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4192 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4193 default: abort();
4194 }
4195 }
4196
4197 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4198 int q, int u)
4199 {
4200 if (q) {
4201 if (u) {
4202 switch (size) {
4203 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4204 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4205 default: abort();
4206 }
4207 } else {
4208 switch (size) {
4209 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4210 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4211 default: abort();
4212 }
4213 }
4214 } else {
4215 if (u) {
4216 switch (size) {
4217 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4218 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4219 default: abort();
4220 }
4221 } else {
4222 switch (size) {
4223 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4224 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4225 default: abort();
4226 }
4227 }
4228 }
4229 }
4230
4231 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4232 {
4233 if (u) {
4234 switch (size) {
4235 case 0: gen_helper_neon_widen_u8(dest, src); break;
4236 case 1: gen_helper_neon_widen_u16(dest, src); break;
4237 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4238 default: abort();
4239 }
4240 } else {
4241 switch (size) {
4242 case 0: gen_helper_neon_widen_s8(dest, src); break;
4243 case 1: gen_helper_neon_widen_s16(dest, src); break;
4244 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4245 default: abort();
4246 }
4247 }
4248 tcg_temp_free_i32(src);
4249 }
4250
4251 static inline void gen_neon_addl(int size)
4252 {
4253 switch (size) {
4254 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4255 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4256 case 2: tcg_gen_add_i64(CPU_V001); break;
4257 default: abort();
4258 }
4259 }
4260
4261 static inline void gen_neon_subl(int size)
4262 {
4263 switch (size) {
4264 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4265 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4266 case 2: tcg_gen_sub_i64(CPU_V001); break;
4267 default: abort();
4268 }
4269 }
4270
4271 static inline void gen_neon_negl(TCGv_i64 var, int size)
4272 {
4273 switch (size) {
4274 case 0: gen_helper_neon_negl_u16(var, var); break;
4275 case 1: gen_helper_neon_negl_u32(var, var); break;
4276 case 2: gen_helper_neon_negl_u64(var, var); break;
4277 default: abort();
4278 }
4279 }
4280
4281 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4282 {
4283 switch (size) {
4284 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4285 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4286 default: abort();
4287 }
4288 }
4289
4290 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4291 {
4292 TCGv_i64 tmp;
4293
4294 switch ((size << 1) | u) {
4295 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4296 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4297 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4298 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4299 case 4:
4300 tmp = gen_muls_i64_i32(a, b);
4301 tcg_gen_mov_i64(dest, tmp);
4302 tcg_temp_free_i64(tmp);
4303 break;
4304 case 5:
4305 tmp = gen_mulu_i64_i32(a, b);
4306 tcg_gen_mov_i64(dest, tmp);
4307 tcg_temp_free_i64(tmp);
4308 break;
4309 default: abort();
4310 }
4311
4312 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4313 Don't forget to clean them now. */
4314 if (size < 2) {
4315 tcg_temp_free_i32(a);
4316 tcg_temp_free_i32(b);
4317 }
4318 }
4319
4320 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4321 {
4322 if (op) {
4323 if (u) {
4324 gen_neon_unarrow_sats(size, dest, src);
4325 } else {
4326 gen_neon_narrow(size, dest, src);
4327 }
4328 } else {
4329 if (u) {
4330 gen_neon_narrow_satu(size, dest, src);
4331 } else {
4332 gen_neon_narrow_sats(size, dest, src);
4333 }
4334 }
4335 }
4336
4337 /* Symbolic constants for op fields for Neon 3-register same-length.
4338 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4339 * table A7-9.
4340 */
4341 #define NEON_3R_VHADD 0
4342 #define NEON_3R_VQADD 1
4343 #define NEON_3R_VRHADD 2
4344 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4345 #define NEON_3R_VHSUB 4
4346 #define NEON_3R_VQSUB 5
4347 #define NEON_3R_VCGT 6
4348 #define NEON_3R_VCGE 7
4349 #define NEON_3R_VSHL 8
4350 #define NEON_3R_VQSHL 9
4351 #define NEON_3R_VRSHL 10
4352 #define NEON_3R_VQRSHL 11
4353 #define NEON_3R_VMAX 12
4354 #define NEON_3R_VMIN 13
4355 #define NEON_3R_VABD 14
4356 #define NEON_3R_VABA 15
4357 #define NEON_3R_VADD_VSUB 16
4358 #define NEON_3R_VTST_VCEQ 17
4359 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4360 #define NEON_3R_VMUL 19
4361 #define NEON_3R_VPMAX 20
4362 #define NEON_3R_VPMIN 21
4363 #define NEON_3R_VQDMULH_VQRDMULH 22
4364 #define NEON_3R_VPADD 23
4365 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4366 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4367 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4368 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4369 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4370 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4371
4372 static const uint8_t neon_3r_sizes[] = {
4373 [NEON_3R_VHADD] = 0x7,
4374 [NEON_3R_VQADD] = 0xf,
4375 [NEON_3R_VRHADD] = 0x7,
4376 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4377 [NEON_3R_VHSUB] = 0x7,
4378 [NEON_3R_VQSUB] = 0xf,
4379 [NEON_3R_VCGT] = 0x7,
4380 [NEON_3R_VCGE] = 0x7,
4381 [NEON_3R_VSHL] = 0xf,
4382 [NEON_3R_VQSHL] = 0xf,
4383 [NEON_3R_VRSHL] = 0xf,
4384 [NEON_3R_VQRSHL] = 0xf,
4385 [NEON_3R_VMAX] = 0x7,
4386 [NEON_3R_VMIN] = 0x7,
4387 [NEON_3R_VABD] = 0x7,
4388 [NEON_3R_VABA] = 0x7,
4389 [NEON_3R_VADD_VSUB] = 0xf,
4390 [NEON_3R_VTST_VCEQ] = 0x7,
4391 [NEON_3R_VML] = 0x7,
4392 [NEON_3R_VMUL] = 0x7,
4393 [NEON_3R_VPMAX] = 0x7,
4394 [NEON_3R_VPMIN] = 0x7,
4395 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4396 [NEON_3R_VPADD] = 0x7,
4397 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4398 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4399 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4400 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4401 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4402 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4403 };
4404
4405 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4406 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4407 * table A7-13.
4408 */
4409 #define NEON_2RM_VREV64 0
4410 #define NEON_2RM_VREV32 1
4411 #define NEON_2RM_VREV16 2
4412 #define NEON_2RM_VPADDL 4
4413 #define NEON_2RM_VPADDL_U 5
4414 #define NEON_2RM_VCLS 8
4415 #define NEON_2RM_VCLZ 9
4416 #define NEON_2RM_VCNT 10
4417 #define NEON_2RM_VMVN 11
4418 #define NEON_2RM_VPADAL 12
4419 #define NEON_2RM_VPADAL_U 13
4420 #define NEON_2RM_VQABS 14
4421 #define NEON_2RM_VQNEG 15
4422 #define NEON_2RM_VCGT0 16
4423 #define NEON_2RM_VCGE0 17
4424 #define NEON_2RM_VCEQ0 18
4425 #define NEON_2RM_VCLE0 19
4426 #define NEON_2RM_VCLT0 20
4427 #define NEON_2RM_VABS 22
4428 #define NEON_2RM_VNEG 23
4429 #define NEON_2RM_VCGT0_F 24
4430 #define NEON_2RM_VCGE0_F 25
4431 #define NEON_2RM_VCEQ0_F 26
4432 #define NEON_2RM_VCLE0_F 27
4433 #define NEON_2RM_VCLT0_F 28
4434 #define NEON_2RM_VABS_F 30
4435 #define NEON_2RM_VNEG_F 31
4436 #define NEON_2RM_VSWP 32
4437 #define NEON_2RM_VTRN 33
4438 #define NEON_2RM_VUZP 34
4439 #define NEON_2RM_VZIP 35
4440 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4441 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4442 #define NEON_2RM_VSHLL 38
4443 #define NEON_2RM_VCVT_F16_F32 44
4444 #define NEON_2RM_VCVT_F32_F16 46
4445 #define NEON_2RM_VRECPE 56
4446 #define NEON_2RM_VRSQRTE 57
4447 #define NEON_2RM_VRECPE_F 58
4448 #define NEON_2RM_VRSQRTE_F 59
4449 #define NEON_2RM_VCVT_FS 60
4450 #define NEON_2RM_VCVT_FU 61
4451 #define NEON_2RM_VCVT_SF 62
4452 #define NEON_2RM_VCVT_UF 63
4453
4454 static int neon_2rm_is_float_op(int op)
4455 {
4456 /* Return true if this neon 2reg-misc op is float-to-float */
4457 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4458 op >= NEON_2RM_VRECPE_F);
4459 }
4460
4461 /* Each entry in this array has bit n set if the insn allows
4462 * size value n (otherwise it will UNDEF). Since unallocated
4463 * op values will have no bits set they always UNDEF.
4464 */
4465 static const uint8_t neon_2rm_sizes[] = {
4466 [NEON_2RM_VREV64] = 0x7,
4467 [NEON_2RM_VREV32] = 0x3,
4468 [NEON_2RM_VREV16] = 0x1,
4469 [NEON_2RM_VPADDL] = 0x7,
4470 [NEON_2RM_VPADDL_U] = 0x7,
4471 [NEON_2RM_VCLS] = 0x7,
4472 [NEON_2RM_VCLZ] = 0x7,
4473 [NEON_2RM_VCNT] = 0x1,
4474 [NEON_2RM_VMVN] = 0x1,
4475 [NEON_2RM_VPADAL] = 0x7,
4476 [NEON_2RM_VPADAL_U] = 0x7,
4477 [NEON_2RM_VQABS] = 0x7,
4478 [NEON_2RM_VQNEG] = 0x7,
4479 [NEON_2RM_VCGT0] = 0x7,
4480 [NEON_2RM_VCGE0] = 0x7,
4481 [NEON_2RM_VCEQ0] = 0x7,
4482 [NEON_2RM_VCLE0] = 0x7,
4483 [NEON_2RM_VCLT0] = 0x7,
4484 [NEON_2RM_VABS] = 0x7,
4485 [NEON_2RM_VNEG] = 0x7,
4486 [NEON_2RM_VCGT0_F] = 0x4,
4487 [NEON_2RM_VCGE0_F] = 0x4,
4488 [NEON_2RM_VCEQ0_F] = 0x4,
4489 [NEON_2RM_VCLE0_F] = 0x4,
4490 [NEON_2RM_VCLT0_F] = 0x4,
4491 [NEON_2RM_VABS_F] = 0x4,
4492 [NEON_2RM_VNEG_F] = 0x4,
4493 [NEON_2RM_VSWP] = 0x1,
4494 [NEON_2RM_VTRN] = 0x7,
4495 [NEON_2RM_VUZP] = 0x7,
4496 [NEON_2RM_VZIP] = 0x7,
4497 [NEON_2RM_VMOVN] = 0x7,
4498 [NEON_2RM_VQMOVN] = 0x7,
4499 [NEON_2RM_VSHLL] = 0x7,
4500 [NEON_2RM_VCVT_F16_F32] = 0x2,
4501 [NEON_2RM_VCVT_F32_F16] = 0x2,
4502 [NEON_2RM_VRECPE] = 0x4,
4503 [NEON_2RM_VRSQRTE] = 0x4,
4504 [NEON_2RM_VRECPE_F] = 0x4,
4505 [NEON_2RM_VRSQRTE_F] = 0x4,
4506 [NEON_2RM_VCVT_FS] = 0x4,
4507 [NEON_2RM_VCVT_FU] = 0x4,
4508 [NEON_2RM_VCVT_SF] = 0x4,
4509 [NEON_2RM_VCVT_UF] = 0x4,
4510 };
4511
4512 /* Translate a NEON data processing instruction. Return nonzero if the
4513 instruction is invalid.
4514 We process data in a mixture of 32-bit and 64-bit chunks.
4515 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4516
4517 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4518 {
4519 int op;
4520 int q;
4521 int rd, rn, rm;
4522 int size;
4523 int shift;
4524 int pass;
4525 int count;
4526 int pairwise;
4527 int u;
4528 uint32_t imm, mask;
4529 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4530 TCGv_i64 tmp64;
4531
4532 if (!s->vfp_enabled)
4533 return 1;
4534 q = (insn & (1 << 6)) != 0;
4535 u = (insn >> 24) & 1;
4536 VFP_DREG_D(rd, insn);
4537 VFP_DREG_N(rn, insn);
4538 VFP_DREG_M(rm, insn);
4539 size = (insn >> 20) & 3;
4540 if ((insn & (1 << 23)) == 0) {
4541 /* Three register same length. */
4542 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4543 /* Catch invalid op and bad size combinations: UNDEF */
4544 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4545 return 1;
4546 }
4547 /* All insns of this form UNDEF for either this condition or the
4548 * superset of cases "Q==1"; we catch the latter later.
4549 */
4550 if (q && ((rd | rn | rm) & 1)) {
4551 return 1;
4552 }
4553 if (size == 3 && op != NEON_3R_LOGIC) {
4554 /* 64-bit element instructions. */
4555 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4556 neon_load_reg64(cpu_V0, rn + pass);
4557 neon_load_reg64(cpu_V1, rm + pass);
4558 switch (op) {
4559 case NEON_3R_VQADD:
4560 if (u) {
4561 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4562 } else {
4563 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4564 }
4565 break;
4566 case NEON_3R_VQSUB:
4567 if (u) {
4568 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4569 } else {
4570 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4571 }
4572 break;
4573 case NEON_3R_VSHL:
4574 if (u) {
4575 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4576 } else {
4577 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4578 }
4579 break;
4580 case NEON_3R_VQSHL:
4581 if (u) {
4582 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4583 } else {
4584 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4585 }
4586 break;
4587 case NEON_3R_VRSHL:
4588 if (u) {
4589 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4590 } else {
4591 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4592 }
4593 break;
4594 case NEON_3R_VQRSHL:
4595 if (u) {
4596 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4597 } else {
4598 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4599 }
4600 break;
4601 case NEON_3R_VADD_VSUB:
4602 if (u) {
4603 tcg_gen_sub_i64(CPU_V001);
4604 } else {
4605 tcg_gen_add_i64(CPU_V001);
4606 }
4607 break;
4608 default:
4609 abort();
4610 }
4611 neon_store_reg64(cpu_V0, rd + pass);
4612 }
4613 return 0;
4614 }
4615 pairwise = 0;
4616 switch (op) {
4617 case NEON_3R_VSHL:
4618 case NEON_3R_VQSHL:
4619 case NEON_3R_VRSHL:
4620 case NEON_3R_VQRSHL:
4621 {
4622 int rtmp;
4623 /* Shift instruction operands are reversed. */
4624 rtmp = rn;
4625 rn = rm;
4626 rm = rtmp;
4627 }
4628 break;
4629 case NEON_3R_VPADD:
4630 if (u) {
4631 return 1;
4632 }
4633 /* Fall through */
4634 case NEON_3R_VPMAX:
4635 case NEON_3R_VPMIN:
4636 pairwise = 1;
4637 break;
4638 case NEON_3R_FLOAT_ARITH:
4639 pairwise = (u && size < 2); /* if VPADD (float) */
4640 break;
4641 case NEON_3R_FLOAT_MINMAX:
4642 pairwise = u; /* if VPMIN/VPMAX (float) */
4643 break;
4644 case NEON_3R_FLOAT_CMP:
4645 if (!u && size) {
4646 /* no encoding for U=0 C=1x */
4647 return 1;
4648 }
4649 break;
4650 case NEON_3R_FLOAT_ACMP:
4651 if (!u) {
4652 return 1;
4653 }
4654 break;
4655 case NEON_3R_VRECPS_VRSQRTS:
4656 if (u) {
4657 return 1;
4658 }
4659 break;
4660 case NEON_3R_VMUL:
4661 if (u && (size != 0)) {
4662 /* UNDEF on invalid size for polynomial subcase */
4663 return 1;
4664 }
4665 break;
4666 default:
4667 break;
4668 }
4669
4670 if (pairwise && q) {
4671 /* All the pairwise insns UNDEF if Q is set */
4672 return 1;
4673 }
4674
4675 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4676
4677 if (pairwise) {
4678 /* Pairwise. */
4679 if (pass < 1) {
4680 tmp = neon_load_reg(rn, 0);
4681 tmp2 = neon_load_reg(rn, 1);
4682 } else {
4683 tmp = neon_load_reg(rm, 0);
4684 tmp2 = neon_load_reg(rm, 1);
4685 }
4686 } else {
4687 /* Elementwise. */
4688 tmp = neon_load_reg(rn, pass);
4689 tmp2 = neon_load_reg(rm, pass);
4690 }
4691 switch (op) {
4692 case NEON_3R_VHADD:
4693 GEN_NEON_INTEGER_OP(hadd);
4694 break;
4695 case NEON_3R_VQADD:
4696 GEN_NEON_INTEGER_OP(qadd);
4697 break;
4698 case NEON_3R_VRHADD:
4699 GEN_NEON_INTEGER_OP(rhadd);
4700 break;
4701 case NEON_3R_LOGIC: /* Logic ops. */
4702 switch ((u << 2) | size) {
4703 case 0: /* VAND */
4704 tcg_gen_and_i32(tmp, tmp, tmp2);
4705 break;
4706 case 1: /* BIC */
4707 tcg_gen_andc_i32(tmp, tmp, tmp2);
4708 break;
4709 case 2: /* VORR */
4710 tcg_gen_or_i32(tmp, tmp, tmp2);
4711 break;
4712 case 3: /* VORN */
4713 tcg_gen_orc_i32(tmp, tmp, tmp2);
4714 break;
4715 case 4: /* VEOR */
4716 tcg_gen_xor_i32(tmp, tmp, tmp2);
4717 break;
4718 case 5: /* VBSL */
4719 tmp3 = neon_load_reg(rd, pass);
4720 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4721 tcg_temp_free_i32(tmp3);
4722 break;
4723 case 6: /* VBIT */
4724 tmp3 = neon_load_reg(rd, pass);
4725 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4726 tcg_temp_free_i32(tmp3);
4727 break;
4728 case 7: /* VBIF */
4729 tmp3 = neon_load_reg(rd, pass);
4730 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4731 tcg_temp_free_i32(tmp3);
4732 break;
4733 }
4734 break;
4735 case NEON_3R_VHSUB:
4736 GEN_NEON_INTEGER_OP(hsub);
4737 break;
4738 case NEON_3R_VQSUB:
4739 GEN_NEON_INTEGER_OP(qsub);
4740 break;
4741 case NEON_3R_VCGT:
4742 GEN_NEON_INTEGER_OP(cgt);
4743 break;
4744 case NEON_3R_VCGE:
4745 GEN_NEON_INTEGER_OP(cge);
4746 break;
4747 case NEON_3R_VSHL:
4748 GEN_NEON_INTEGER_OP(shl);
4749 break;
4750 case NEON_3R_VQSHL:
4751 GEN_NEON_INTEGER_OP(qshl);
4752 break;
4753 case NEON_3R_VRSHL:
4754 GEN_NEON_INTEGER_OP(rshl);
4755 break;
4756 case NEON_3R_VQRSHL:
4757 GEN_NEON_INTEGER_OP(qrshl);
4758 break;
4759 case NEON_3R_VMAX:
4760 GEN_NEON_INTEGER_OP(max);
4761 break;
4762 case NEON_3R_VMIN:
4763 GEN_NEON_INTEGER_OP(min);
4764 break;
4765 case NEON_3R_VABD:
4766 GEN_NEON_INTEGER_OP(abd);
4767 break;
4768 case NEON_3R_VABA:
4769 GEN_NEON_INTEGER_OP(abd);
4770 tcg_temp_free_i32(tmp2);
4771 tmp2 = neon_load_reg(rd, pass);
4772 gen_neon_add(size, tmp, tmp2);
4773 break;
4774 case NEON_3R_VADD_VSUB:
4775 if (!u) { /* VADD */
4776 gen_neon_add(size, tmp, tmp2);
4777 } else { /* VSUB */
4778 switch (size) {
4779 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4780 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4781 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4782 default: abort();
4783 }
4784 }
4785 break;
4786 case NEON_3R_VTST_VCEQ:
4787 if (!u) { /* VTST */
4788 switch (size) {
4789 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4790 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4791 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4792 default: abort();
4793 }
4794 } else { /* VCEQ */
4795 switch (size) {
4796 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4797 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4798 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4799 default: abort();
4800 }
4801 }
4802 break;
4803 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4804 switch (size) {
4805 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4806 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4807 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4808 default: abort();
4809 }
4810 tcg_temp_free_i32(tmp2);
4811 tmp2 = neon_load_reg(rd, pass);
4812 if (u) { /* VMLS */
4813 gen_neon_rsb(size, tmp, tmp2);
4814 } else { /* VMLA */
4815 gen_neon_add(size, tmp, tmp2);
4816 }
4817 break;
4818 case NEON_3R_VMUL:
4819 if (u) { /* polynomial */
4820 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4821 } else { /* Integer */
4822 switch (size) {
4823 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4824 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4825 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4826 default: abort();
4827 }
4828 }
4829 break;
4830 case NEON_3R_VPMAX:
4831 GEN_NEON_INTEGER_OP(pmax);
4832 break;
4833 case NEON_3R_VPMIN:
4834 GEN_NEON_INTEGER_OP(pmin);
4835 break;
4836 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4837 if (!u) { /* VQDMULH */
4838 switch (size) {
4839 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4840 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4841 default: abort();
4842 }
4843 } else { /* VQRDMULH */
4844 switch (size) {
4845 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4846 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4847 default: abort();
4848 }
4849 }
4850 break;
4851 case NEON_3R_VPADD:
4852 switch (size) {
4853 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4854 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4855 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4856 default: abort();
4857 }
4858 break;
4859 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4860 {
4861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4862 switch ((u << 2) | size) {
4863 case 0: /* VADD */
4864 case 4: /* VPADD */
4865 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4866 break;
4867 case 2: /* VSUB */
4868 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4869 break;
4870 case 6: /* VABD */
4871 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4872 break;
4873 default:
4874 abort();
4875 }
4876 tcg_temp_free_ptr(fpstatus);
4877 break;
4878 }
4879 case NEON_3R_FLOAT_MULTIPLY:
4880 {
4881 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4882 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4883 if (!u) {
4884 tcg_temp_free_i32(tmp2);
4885 tmp2 = neon_load_reg(rd, pass);
4886 if (size == 0) {
4887 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4888 } else {
4889 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4890 }
4891 }
4892 tcg_temp_free_ptr(fpstatus);
4893 break;
4894 }
4895 case NEON_3R_FLOAT_CMP:
4896 {
4897 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4898 if (!u) {
4899 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4900 } else {
4901 if (size == 0) {
4902 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4903 } else {
4904 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4905 }
4906 }
4907 tcg_temp_free_ptr(fpstatus);
4908 break;
4909 }
4910 case NEON_3R_FLOAT_ACMP:
4911 {
4912 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4913 if (size == 0) {
4914 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4915 } else {
4916 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4917 }
4918 tcg_temp_free_ptr(fpstatus);
4919 break;
4920 }
4921 case NEON_3R_FLOAT_MINMAX:
4922 {
4923 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4924 if (size == 0) {
4925 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4926 } else {
4927 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4928 }
4929 tcg_temp_free_ptr(fpstatus);
4930 break;
4931 }
4932 case NEON_3R_VRECPS_VRSQRTS:
4933 if (size == 0)
4934 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4935 else
4936 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4937 break;
4938 default:
4939 abort();
4940 }
4941 tcg_temp_free_i32(tmp2);
4942
4943 /* Save the result. For elementwise operations we can put it
4944 straight into the destination register. For pairwise operations
4945 we have to be careful to avoid clobbering the source operands. */
4946 if (pairwise && rd == rm) {
4947 neon_store_scratch(pass, tmp);
4948 } else {
4949 neon_store_reg(rd, pass, tmp);
4950 }
4951
4952 } /* for pass */
4953 if (pairwise && rd == rm) {
4954 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4955 tmp = neon_load_scratch(pass);
4956 neon_store_reg(rd, pass, tmp);
4957 }
4958 }
4959 /* End of 3 register same size operations. */
4960 } else if (insn & (1 << 4)) {
4961 if ((insn & 0x00380080) != 0) {
4962 /* Two registers and shift. */
4963 op = (insn >> 8) & 0xf;
4964 if (insn & (1 << 7)) {
4965 /* 64-bit shift. */
4966 if (op > 7) {
4967 return 1;
4968 }
4969 size = 3;
4970 } else {
4971 size = 2;
4972 while ((insn & (1 << (size + 19))) == 0)
4973 size--;
4974 }
4975 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4976 /* To avoid excessive dumplication of ops we implement shift
4977 by immediate using the variable shift operations. */
4978 if (op < 8) {
4979 /* Shift by immediate:
4980 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4981 if (q && ((rd | rm) & 1)) {
4982 return 1;
4983 }
4984 if (!u && (op == 4 || op == 6)) {
4985 return 1;
4986 }
4987 /* Right shifts are encoded as N - shift, where N is the
4988 element size in bits. */
4989 if (op <= 4)
4990 shift = shift - (1 << (size + 3));
4991 if (size == 3) {
4992 count = q + 1;
4993 } else {
4994 count = q ? 4: 2;
4995 }
4996 switch (size) {
4997 case 0:
4998 imm = (uint8_t) shift;
4999 imm |= imm << 8;
5000 imm |= imm << 16;
5001 break;
5002 case 1:
5003 imm = (uint16_t) shift;
5004 imm |= imm << 16;
5005 break;
5006 case 2:
5007 case 3:
5008 imm = shift;
5009 break;
5010 default:
5011 abort();
5012 }
5013
5014 for (pass = 0; pass < count; pass++) {
5015 if (size == 3) {
5016 neon_load_reg64(cpu_V0, rm + pass);
5017 tcg_gen_movi_i64(cpu_V1, imm);
5018 switch (op) {
5019 case 0: /* VSHR */
5020 case 1: /* VSRA */
5021 if (u)
5022 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5023 else
5024 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5025 break;
5026 case 2: /* VRSHR */
5027 case 3: /* VRSRA */
5028 if (u)
5029 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5030 else
5031 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5032 break;
5033 case 4: /* VSRI */
5034 case 5: /* VSHL, VSLI */
5035 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5036 break;
5037 case 6: /* VQSHLU */
5038 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
5039 break;
5040 case 7: /* VQSHL */
5041 if (u) {
5042 gen_helper_neon_qshl_u64(cpu_V0,
5043 cpu_V0, cpu_V1);
5044 } else {
5045 gen_helper_neon_qshl_s64(cpu_V0,
5046 cpu_V0, cpu_V1);
5047 }
5048 break;
5049 }
5050 if (op == 1 || op == 3) {
5051 /* Accumulate. */
5052 neon_load_reg64(cpu_V1, rd + pass);
5053 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5054 } else if (op == 4 || (op == 5 && u)) {
5055 /* Insert */
5056 neon_load_reg64(cpu_V1, rd + pass);
5057 uint64_t mask;
5058 if (shift < -63 || shift > 63) {
5059 mask = 0;
5060 } else {
5061 if (op == 4) {
5062 mask = 0xffffffffffffffffull >> -shift;
5063 } else {
5064 mask = 0xffffffffffffffffull << shift;
5065 }
5066 }
5067 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5068 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5069 }
5070 neon_store_reg64(cpu_V0, rd + pass);
5071 } else { /* size < 3 */
5072 /* Operands in T0 and T1. */
5073 tmp = neon_load_reg(rm, pass);
5074 tmp2 = tcg_temp_new_i32();
5075 tcg_gen_movi_i32(tmp2, imm);
5076 switch (op) {
5077 case 0: /* VSHR */
5078 case 1: /* VSRA */
5079 GEN_NEON_INTEGER_OP(shl);
5080 break;
5081 case 2: /* VRSHR */
5082 case 3: /* VRSRA */
5083 GEN_NEON_INTEGER_OP(rshl);
5084 break;
5085 case 4: /* VSRI */
5086 case 5: /* VSHL, VSLI */
5087 switch (size) {
5088 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5089 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5090 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5091 default: abort();
5092 }
5093 break;
5094 case 6: /* VQSHLU */
5095 switch (size) {
5096 case 0:
5097 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
5098 break;
5099 case 1:
5100 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
5101 break;
5102 case 2:
5103 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
5104 break;
5105 default:
5106 abort();
5107 }
5108 break;
5109 case 7: /* VQSHL */
5110 GEN_NEON_INTEGER_OP(qshl);
5111 break;
5112 }
5113 tcg_temp_free_i32(tmp2);
5114
5115 if (op == 1 || op == 3) {
5116 /* Accumulate. */
5117 tmp2 = neon_load_reg(rd, pass);
5118 gen_neon_add(size, tmp, tmp2);
5119 tcg_temp_free_i32(tmp2);
5120 } else if (op == 4 || (op == 5 && u)) {
5121 /* Insert */
5122 switch (size) {
5123 case 0:
5124 if (op == 4)
5125 mask = 0xff >> -shift;
5126 else
5127 mask = (uint8_t)(0xff << shift);
5128 mask |= mask << 8;
5129 mask |= mask << 16;
5130 break;
5131 case 1:
5132 if (op == 4)
5133 mask = 0xffff >> -shift;
5134 else
5135 mask = (uint16_t)(0xffff << shift);
5136 mask |= mask << 16;
5137 break;
5138 case 2:
5139 if (shift < -31 || shift > 31) {
5140 mask = 0;
5141 } else {
5142 if (op == 4)
5143 mask = 0xffffffffu >> -shift;
5144 else
5145 mask = 0xffffffffu << shift;
5146 }
5147 break;
5148 default:
5149 abort();
5150 }
5151 tmp2 = neon_load_reg(rd, pass);
5152 tcg_gen_andi_i32(tmp, tmp, mask);
5153 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5154 tcg_gen_or_i32(tmp, tmp, tmp2);
5155 tcg_temp_free_i32(tmp2);
5156 }
5157 neon_store_reg(rd, pass, tmp);
5158 }
5159 } /* for pass */
5160 } else if (op < 10) {
5161 /* Shift by immediate and narrow:
5162 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5163 int input_unsigned = (op == 8) ? !u : u;
5164 if (rm & 1) {
5165 return 1;
5166 }
5167 shift = shift - (1 << (size + 3));
5168 size++;
5169 if (size == 3) {
5170 tmp64 = tcg_const_i64(shift);
5171 neon_load_reg64(cpu_V0, rm);
5172 neon_load_reg64(cpu_V1, rm + 1);
5173 for (pass = 0; pass < 2; pass++) {
5174 TCGv_i64 in;
5175 if (pass == 0) {
5176 in = cpu_V0;
5177 } else {
5178 in = cpu_V1;
5179 }
5180 if (q) {
5181 if (input_unsigned) {
5182 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5183 } else {
5184 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5185 }
5186 } else {
5187 if (input_unsigned) {
5188 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5189 } else {
5190 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5191 }
5192 }
5193 tmp = tcg_temp_new_i32();
5194 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5195 neon_store_reg(rd, pass, tmp);
5196 } /* for pass */
5197 tcg_temp_free_i64(tmp64);
5198 } else {
5199 if (size == 1) {
5200 imm = (uint16_t)shift;
5201 imm |= imm << 16;
5202 } else {
5203 /* size == 2 */
5204 imm = (uint32_t)shift;
5205 }
5206 tmp2 = tcg_const_i32(imm);
5207 tmp4 = neon_load_reg(rm + 1, 0);
5208 tmp5 = neon_load_reg(rm + 1, 1);
5209 for (pass = 0; pass < 2; pass++) {
5210 if (pass == 0) {
5211 tmp = neon_load_reg(rm, 0);
5212 } else {
5213 tmp = tmp4;
5214 }
5215 gen_neon_shift_narrow(size, tmp, tmp2, q,
5216 input_unsigned);
5217 if (pass == 0) {
5218 tmp3 = neon_load_reg(rm, 1);
5219 } else {
5220 tmp3 = tmp5;
5221 }
5222 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5223 input_unsigned);
5224 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5225 tcg_temp_free_i32(tmp);
5226 tcg_temp_free_i32(tmp3);
5227 tmp = tcg_temp_new_i32();
5228 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5229 neon_store_reg(rd, pass, tmp);
5230 } /* for pass */
5231 tcg_temp_free_i32(tmp2);
5232 }
5233 } else if (op == 10) {
5234 /* VSHLL, VMOVL */
5235 if (q || (rd & 1)) {
5236 return 1;
5237 }
5238 tmp = neon_load_reg(rm, 0);
5239 tmp2 = neon_load_reg(rm, 1);
5240 for (pass = 0; pass < 2; pass++) {
5241 if (pass == 1)
5242 tmp = tmp2;
5243
5244 gen_neon_widen(cpu_V0, tmp, size, u);
5245
5246 if (shift != 0) {
5247 /* The shift is less than the width of the source
5248 type, so we can just shift the whole register. */
5249 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5250 /* Widen the result of shift: we need to clear
5251 * the potential overflow bits resulting from
5252 * left bits of the narrow input appearing as
5253 * right bits of left the neighbour narrow
5254 * input. */
5255 if (size < 2 || !u) {
5256 uint64_t imm64;
5257 if (size == 0) {
5258 imm = (0xffu >> (8 - shift));
5259 imm |= imm << 16;
5260 } else if (size == 1) {
5261 imm = 0xffff >> (16 - shift);
5262 } else {
5263 /* size == 2 */
5264 imm = 0xffffffff >> (32 - shift);
5265 }
5266 if (size < 2) {
5267 imm64 = imm | (((uint64_t)imm) << 32);
5268 } else {
5269 imm64 = imm;
5270 }
5271 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5272 }
5273 }
5274 neon_store_reg64(cpu_V0, rd + pass);
5275 }
5276 } else if (op >= 14) {
5277 /* VCVT fixed-point. */
5278 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5279 return 1;
5280 }
5281 /* We have already masked out the must-be-1 top bit of imm6,
5282 * hence this 32-shift where the ARM ARM has 64-imm6.
5283 */
5284 shift = 32 - shift;
5285 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5286 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5287 if (!(op & 1)) {
5288 if (u)
5289 gen_vfp_ulto(0, shift, 1);
5290 else
5291 gen_vfp_slto(0, shift, 1);
5292 } else {
5293 if (u)
5294 gen_vfp_toul(0, shift, 1);
5295 else
5296 gen_vfp_tosl(0, shift, 1);
5297 }
5298 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5299 }
5300 } else {
5301 return 1;
5302 }
5303 } else { /* (insn & 0x00380080) == 0 */
5304 int invert;
5305 if (q && (rd & 1)) {
5306 return 1;
5307 }
5308
5309 op = (insn >> 8) & 0xf;
5310 /* One register and immediate. */
5311 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5312 invert = (insn & (1 << 5)) != 0;
5313 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5314 * We choose to not special-case this and will behave as if a
5315 * valid constant encoding of 0 had been given.
5316 */
5317 switch (op) {
5318 case 0: case 1:
5319 /* no-op */
5320 break;
5321 case 2: case 3:
5322 imm <<= 8;
5323 break;
5324 case 4: case 5:
5325 imm <<= 16;
5326 break;
5327 case 6: case 7:
5328 imm <<= 24;
5329 break;
5330 case 8: case 9:
5331 imm |= imm << 16;
5332 break;
5333 case 10: case 11:
5334 imm = (imm << 8) | (imm << 24);
5335 break;
5336 case 12:
5337 imm = (imm << 8) | 0xff;
5338 break;
5339 case 13:
5340 imm = (imm << 16) | 0xffff;
5341 break;
5342 case 14:
5343 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5344 if (invert)
5345 imm = ~imm;
5346 break;
5347 case 15:
5348 if (invert) {
5349 return 1;
5350 }
5351 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5352 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5353 break;
5354 }
5355 if (invert)
5356 imm = ~imm;
5357
5358 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5359 if (op & 1 && op < 12) {
5360 tmp = neon_load_reg(rd, pass);
5361 if (invert) {
5362 /* The immediate value has already been inverted, so
5363 BIC becomes AND. */
5364 tcg_gen_andi_i32(tmp, tmp, imm);
5365 } else {
5366 tcg_gen_ori_i32(tmp, tmp, imm);
5367 }
5368 } else {
5369 /* VMOV, VMVN. */
5370 tmp = tcg_temp_new_i32();
5371 if (op == 14 && invert) {
5372 int n;
5373 uint32_t val;
5374 val = 0;
5375 for (n = 0; n < 4; n++) {
5376 if (imm & (1 << (n + (pass & 1) * 4)))
5377 val |= 0xff << (n * 8);
5378 }
5379 tcg_gen_movi_i32(tmp, val);
5380 } else {
5381 tcg_gen_movi_i32(tmp, imm);
5382 }
5383 }
5384 neon_store_reg(rd, pass, tmp);
5385 }
5386 }
5387 } else { /* (insn & 0x00800010 == 0x00800000) */
5388 if (size != 3) {
5389 op = (insn >> 8) & 0xf;
5390 if ((insn & (1 << 6)) == 0) {
5391 /* Three registers of different lengths. */
5392 int src1_wide;
5393 int src2_wide;
5394 int prewiden;
5395 /* undefreq: bit 0 : UNDEF if size != 0
5396 * bit 1 : UNDEF if size == 0
5397 * bit 2 : UNDEF if U == 1
5398 * Note that [1:0] set implies 'always UNDEF'
5399 */
5400 int undefreq;
5401 /* prewiden, src1_wide, src2_wide, undefreq */
5402 static const int neon_3reg_wide[16][4] = {
5403 {1, 0, 0, 0}, /* VADDL */
5404 {1, 1, 0, 0}, /* VADDW */
5405 {1, 0, 0, 0}, /* VSUBL */
5406 {1, 1, 0, 0}, /* VSUBW */
5407 {0, 1, 1, 0}, /* VADDHN */
5408 {0, 0, 0, 0}, /* VABAL */
5409 {0, 1, 1, 0}, /* VSUBHN */
5410 {0, 0, 0, 0}, /* VABDL */
5411 {0, 0, 0, 0}, /* VMLAL */
5412 {0, 0, 0, 6}, /* VQDMLAL */
5413 {0, 0, 0, 0}, /* VMLSL */
5414 {0, 0, 0, 6}, /* VQDMLSL */
5415 {0, 0, 0, 0}, /* Integer VMULL */
5416 {0, 0, 0, 2}, /* VQDMULL */
5417 {0, 0, 0, 5}, /* Polynomial VMULL */
5418 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5419 };
5420
5421 prewiden = neon_3reg_wide[op][0];
5422 src1_wide = neon_3reg_wide[op][1];
5423 src2_wide = neon_3reg_wide[op][2];
5424 undefreq = neon_3reg_wide[op][3];
5425
5426 if (((undefreq & 1) && (size != 0)) ||
5427 ((undefreq & 2) && (size == 0)) ||
5428 ((undefreq & 4) && u)) {
5429 return 1;
5430 }
5431 if ((src1_wide && (rn & 1)) ||
5432 (src2_wide && (rm & 1)) ||
5433 (!src2_wide && (rd & 1))) {
5434 return 1;
5435 }
5436
5437 /* Avoid overlapping operands. Wide source operands are
5438 always aligned so will never overlap with wide
5439 destinations in problematic ways. */
5440 if (rd == rm && !src2_wide) {
5441 tmp = neon_load_reg(rm, 1);
5442 neon_store_scratch(2, tmp);
5443 } else if (rd == rn && !src1_wide) {
5444 tmp = neon_load_reg(rn, 1);
5445 neon_store_scratch(2, tmp);
5446 }
5447 TCGV_UNUSED(tmp3);
5448 for (pass = 0; pass < 2; pass++) {
5449 if (src1_wide) {
5450 neon_load_reg64(cpu_V0, rn + pass);
5451 TCGV_UNUSED(tmp);
5452 } else {
5453 if (pass == 1 && rd == rn) {
5454 tmp = neon_load_scratch(2);
5455 } else {
5456 tmp = neon_load_reg(rn, pass);
5457 }
5458 if (prewiden) {
5459 gen_neon_widen(cpu_V0, tmp, size, u);
5460 }
5461 }
5462 if (src2_wide) {
5463 neon_load_reg64(cpu_V1, rm + pass);
5464 TCGV_UNUSED(tmp2);
5465 } else {
5466 if (pass == 1 && rd == rm) {
5467 tmp2 = neon_load_scratch(2);
5468 } else {
5469 tmp2 = neon_load_reg(rm, pass);
5470 }
5471 if (prewiden) {
5472 gen_neon_widen(cpu_V1, tmp2, size, u);
5473 }
5474 }
5475 switch (op) {
5476 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5477 gen_neon_addl(size);
5478 break;
5479 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5480 gen_neon_subl(size);
5481 break;
5482 case 5: case 7: /* VABAL, VABDL */
5483 switch ((size << 1) | u) {
5484 case 0:
5485 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5486 break;
5487 case 1:
5488 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5489 break;
5490 case 2:
5491 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5492 break;
5493 case 3:
5494 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5495 break;
5496 case 4:
5497 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5498 break;
5499 case 5:
5500 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5501 break;
5502 default: abort();
5503 }
5504 tcg_temp_free_i32(tmp2);
5505 tcg_temp_free_i32(tmp);
5506 break;
5507 case 8: case 9: case 10: case 11: case 12: case 13:
5508 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5509 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5510 break;
5511 case 14: /* Polynomial VMULL */
5512 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5513 tcg_temp_free_i32(tmp2);
5514 tcg_temp_free_i32(tmp);
5515 break;
5516 default: /* 15 is RESERVED: caught earlier */
5517 abort();
5518 }
5519 if (op == 13) {
5520 /* VQDMULL */
5521 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5522 neon_store_reg64(cpu_V0, rd + pass);
5523 } else if (op == 5 || (op >= 8 && op <= 11)) {
5524 /* Accumulate. */
5525 neon_load_reg64(cpu_V1, rd + pass);
5526 switch (op) {
5527 case 10: /* VMLSL */
5528 gen_neon_negl(cpu_V0, size);
5529 /* Fall through */
5530 case 5: case 8: /* VABAL, VMLAL */
5531 gen_neon_addl(size);
5532 break;
5533 case 9: case 11: /* VQDMLAL, VQDMLSL */
5534 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5535 if (op == 11) {
5536 gen_neon_negl(cpu_V0, size);
5537 }
5538 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5539 break;
5540 default:
5541 abort();
5542 }
5543 neon_store_reg64(cpu_V0, rd + pass);
5544 } else if (op == 4 || op == 6) {
5545 /* Narrowing operation. */
5546 tmp = tcg_temp_new_i32();
5547 if (!u) {
5548 switch (size) {
5549 case 0:
5550 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5551 break;
5552 case 1:
5553 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5554 break;
5555 case 2:
5556 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5557 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5558 break;
5559 default: abort();
5560 }
5561 } else {
5562 switch (size) {
5563 case 0:
5564 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5565 break;
5566 case 1:
5567 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5568 break;
5569 case 2:
5570 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5571 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5572 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5573 break;
5574 default: abort();
5575 }
5576 }
5577 if (pass == 0) {
5578 tmp3 = tmp;
5579 } else {
5580 neon_store_reg(rd, 0, tmp3);
5581 neon_store_reg(rd, 1, tmp);
5582 }
5583 } else {
5584 /* Write back the result. */
5585 neon_store_reg64(cpu_V0, rd + pass);
5586 }
5587 }
5588 } else {
5589 /* Two registers and a scalar. NB that for ops of this form
5590 * the ARM ARM labels bit 24 as Q, but it is in our variable
5591 * 'u', not 'q'.
5592 */
5593 if (size == 0) {
5594 return 1;
5595 }
5596 switch (op) {
5597 case 1: /* Float VMLA scalar */
5598 case 5: /* Floating point VMLS scalar */
5599 case 9: /* Floating point VMUL scalar */
5600 if (size == 1) {
5601 return 1;
5602 }
5603 /* fall through */
5604 case 0: /* Integer VMLA scalar */
5605 case 4: /* Integer VMLS scalar */
5606 case 8: /* Integer VMUL scalar */
5607 case 12: /* VQDMULH scalar */
5608 case 13: /* VQRDMULH scalar */
5609 if (u && ((rd | rn) & 1)) {
5610 return 1;
5611 }
5612 tmp = neon_get_scalar(size, rm);
5613 neon_store_scratch(0, tmp);
5614 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5615 tmp = neon_load_scratch(0);
5616 tmp2 = neon_load_reg(rn, pass);
5617 if (op == 12) {
5618 if (size == 1) {
5619 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5620 } else {
5621 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5622 }
5623 } else if (op == 13) {
5624 if (size == 1) {
5625 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5626 } else {
5627 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5628 }
5629 } else if (op & 1) {
5630 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5631 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5632 tcg_temp_free_ptr(fpstatus);
5633 } else {
5634 switch (size) {
5635 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5636 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5637 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5638 default: abort();
5639 }
5640 }
5641 tcg_temp_free_i32(tmp2);
5642 if (op < 8) {
5643 /* Accumulate. */
5644 tmp2 = neon_load_reg(rd, pass);
5645 switch (op) {
5646 case 0:
5647 gen_neon_add(size, tmp, tmp2);
5648 break;
5649 case 1:
5650 {
5651 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5652 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5653 tcg_temp_free_ptr(fpstatus);
5654 break;
5655 }
5656 case 4:
5657 gen_neon_rsb(size, tmp, tmp2);
5658 break;
5659 case 5:
5660 {
5661 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5662 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5663 tcg_temp_free_ptr(fpstatus);
5664 break;
5665 }
5666 default:
5667 abort();
5668 }
5669 tcg_temp_free_i32(tmp2);
5670 }
5671 neon_store_reg(rd, pass, tmp);
5672 }
5673 break;
5674 case 3: /* VQDMLAL scalar */
5675 case 7: /* VQDMLSL scalar */
5676 case 11: /* VQDMULL scalar */
5677 if (u == 1) {
5678 return 1;
5679 }
5680 /* fall through */
5681 case 2: /* VMLAL sclar */
5682 case 6: /* VMLSL scalar */
5683 case 10: /* VMULL scalar */
5684 if (rd & 1) {
5685 return 1;
5686 }
5687 tmp2 = neon_get_scalar(size, rm);
5688 /* We need a copy of tmp2 because gen_neon_mull
5689 * deletes it during pass 0. */
5690 tmp4 = tcg_temp_new_i32();
5691 tcg_gen_mov_i32(tmp4, tmp2);
5692 tmp3 = neon_load_reg(rn, 1);
5693
5694 for (pass = 0; pass < 2; pass++) {
5695 if (pass == 0) {
5696 tmp = neon_load_reg(rn, 0);
5697 } else {
5698 tmp = tmp3;
5699 tmp2 = tmp4;
5700 }
5701 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5702 if (op != 11) {
5703 neon_load_reg64(cpu_V1, rd + pass);
5704 }
5705 switch (op) {
5706 case 6:
5707 gen_neon_negl(cpu_V0, size);
5708 /* Fall through */
5709 case 2:
5710 gen_neon_addl(size);
5711 break;
5712 case 3: case 7:
5713 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5714 if (op == 7) {
5715 gen_neon_negl(cpu_V0, size);
5716 }
5717 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5718 break;
5719 case 10:
5720 /* no-op */
5721 break;
5722 case 11:
5723 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5724 break;
5725 default:
5726 abort();
5727 }
5728 neon_store_reg64(cpu_V0, rd + pass);
5729 }
5730
5731
5732 break;
5733 default: /* 14 and 15 are RESERVED */
5734 return 1;
5735 }
5736 }
5737 } else { /* size == 3 */
5738 if (!u) {
5739 /* Extract. */
5740 imm = (insn >> 8) & 0xf;
5741
5742 if (imm > 7 && !q)
5743 return 1;
5744
5745 if (q && ((rd | rn | rm) & 1)) {
5746 return 1;
5747 }
5748
5749 if (imm == 0) {
5750 neon_load_reg64(cpu_V0, rn);
5751 if (q) {
5752 neon_load_reg64(cpu_V1, rn + 1);
5753 }
5754 } else if (imm == 8) {
5755 neon_load_reg64(cpu_V0, rn + 1);
5756 if (q) {
5757 neon_load_reg64(cpu_V1, rm);
5758 }
5759 } else if (q) {
5760 tmp64 = tcg_temp_new_i64();
5761 if (imm < 8) {
5762 neon_load_reg64(cpu_V0, rn);
5763 neon_load_reg64(tmp64, rn + 1);
5764 } else {
5765 neon_load_reg64(cpu_V0, rn + 1);
5766 neon_load_reg64(tmp64, rm);
5767 }
5768 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5769 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5770 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5771 if (imm < 8) {
5772 neon_load_reg64(cpu_V1, rm);
5773 } else {
5774 neon_load_reg64(cpu_V1, rm + 1);
5775 imm -= 8;
5776 }
5777 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5778 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5779 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5780 tcg_temp_free_i64(tmp64);
5781 } else {
5782 /* BUGFIX */
5783 neon_load_reg64(cpu_V0, rn);
5784 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5785 neon_load_reg64(cpu_V1, rm);
5786 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5787 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5788 }
5789 neon_store_reg64(cpu_V0, rd);
5790 if (q) {
5791 neon_store_reg64(cpu_V1, rd + 1);
5792 }
5793 } else if ((insn & (1 << 11)) == 0) {
5794 /* Two register misc. */
5795 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5796 size = (insn >> 18) & 3;
5797 /* UNDEF for unknown op values and bad op-size combinations */
5798 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5799 return 1;
5800 }
5801 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5802 q && ((rm | rd) & 1)) {
5803 return 1;
5804 }
5805 switch (op) {
5806 case NEON_2RM_VREV64:
5807 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5808 tmp = neon_load_reg(rm, pass * 2);
5809 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5810 switch (size) {
5811 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5812 case 1: gen_swap_half(tmp); break;
5813 case 2: /* no-op */ break;
5814 default: abort();
5815 }
5816 neon_store_reg(rd, pass * 2 + 1, tmp);
5817 if (size == 2) {
5818 neon_store_reg(rd, pass * 2, tmp2);
5819 } else {
5820 switch (size) {
5821 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5822 case 1: gen_swap_half(tmp2); break;
5823 default: abort();
5824 }
5825 neon_store_reg(rd, pass * 2, tmp2);
5826 }
5827 }
5828 break;
5829 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5830 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5831 for (pass = 0; pass < q + 1; pass++) {
5832 tmp = neon_load_reg(rm, pass * 2);
5833 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5834 tmp = neon_load_reg(rm, pass * 2 + 1);
5835 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5836 switch (size) {
5837 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5838 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5839 case 2: tcg_gen_add_i64(CPU_V001); break;
5840 default: abort();
5841 }
5842 if (op >= NEON_2RM_VPADAL) {
5843 /* Accumulate. */
5844 neon_load_reg64(cpu_V1, rd + pass);
5845 gen_neon_addl(size);
5846 }
5847 neon_store_reg64(cpu_V0, rd + pass);
5848 }
5849 break;
5850 case NEON_2RM_VTRN:
5851 if (size == 2) {
5852 int n;
5853 for (n = 0; n < (q ? 4 : 2); n += 2) {
5854 tmp = neon_load_reg(rm, n);
5855 tmp2 = neon_load_reg(rd, n + 1);
5856 neon_store_reg(rm, n, tmp2);
5857 neon_store_reg(rd, n + 1, tmp);
5858 }
5859 } else {
5860 goto elementwise;
5861 }
5862 break;
5863 case NEON_2RM_VUZP:
5864 if (gen_neon_unzip(rd, rm, size, q)) {
5865 return 1;
5866 }
5867 break;
5868 case NEON_2RM_VZIP:
5869 if (gen_neon_zip(rd, rm, size, q)) {
5870 return 1;
5871 }
5872 break;
5873 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5874 /* also VQMOVUN; op field and mnemonics don't line up */
5875 if (rm & 1) {
5876 return 1;
5877 }
5878 TCGV_UNUSED(tmp2);
5879 for (pass = 0; pass < 2; pass++) {
5880 neon_load_reg64(cpu_V0, rm + pass);
5881 tmp = tcg_temp_new_i32();
5882 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5883 tmp, cpu_V0);
5884 if (pass == 0) {
5885 tmp2 = tmp;
5886 } else {
5887 neon_store_reg(rd, 0, tmp2);
5888 neon_store_reg(rd, 1, tmp);
5889 }
5890 }
5891 break;
5892 case NEON_2RM_VSHLL:
5893 if (q || (rd & 1)) {
5894 return 1;
5895 }
5896 tmp = neon_load_reg(rm, 0);
5897 tmp2 = neon_load_reg(rm, 1);
5898 for (pass = 0; pass < 2; pass++) {
5899 if (pass == 1)
5900 tmp = tmp2;
5901 gen_neon_widen(cpu_V0, tmp, size, 1);
5902 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5903 neon_store_reg64(cpu_V0, rd + pass);
5904 }
5905 break;
5906 case NEON_2RM_VCVT_F16_F32:
5907 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5908 q || (rm & 1)) {
5909 return 1;
5910 }
5911 tmp = tcg_temp_new_i32();
5912 tmp2 = tcg_temp_new_i32();
5913 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5914 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5915 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5916 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5917 tcg_gen_shli_i32(tmp2, tmp2, 16);
5918 tcg_gen_or_i32(tmp2, tmp2, tmp);
5919 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5920 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5921 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5922 neon_store_reg(rd, 0, tmp2);
5923 tmp2 = tcg_temp_new_i32();
5924 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5925 tcg_gen_shli_i32(tmp2, tmp2, 16);
5926 tcg_gen_or_i32(tmp2, tmp2, tmp);
5927 neon_store_reg(rd, 1, tmp2);
5928 tcg_temp_free_i32(tmp);
5929 break;
5930 case NEON_2RM_VCVT_F32_F16:
5931 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5932 q || (rd & 1)) {
5933 return 1;
5934 }
5935 tmp3 = tcg_temp_new_i32();
5936 tmp = neon_load_reg(rm, 0);
5937 tmp2 = neon_load_reg(rm, 1);
5938 tcg_gen_ext16u_i32(tmp3, tmp);
5939 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5940 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5941 tcg_gen_shri_i32(tmp3, tmp, 16);
5942 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5943 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5944 tcg_temp_free_i32(tmp);
5945 tcg_gen_ext16u_i32(tmp3, tmp2);
5946 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5947 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5948 tcg_gen_shri_i32(tmp3, tmp2, 16);
5949 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5950 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5951 tcg_temp_free_i32(tmp2);
5952 tcg_temp_free_i32(tmp3);
5953 break;
5954 default:
5955 elementwise:
5956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5957 if (neon_2rm_is_float_op(op)) {
5958 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5959 neon_reg_offset(rm, pass));
5960 TCGV_UNUSED(tmp);
5961 } else {
5962 tmp = neon_load_reg(rm, pass);
5963 }
5964 switch (op) {
5965 case NEON_2RM_VREV32:
5966 switch (size) {
5967 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5968 case 1: gen_swap_half(tmp); break;
5969 default: abort();
5970 }
5971 break;
5972 case NEON_2RM_VREV16:
5973 gen_rev16(tmp);
5974 break;
5975 case NEON_2RM_VCLS:
5976 switch (size) {
5977 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5978 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5979 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5980 default: abort();
5981 }
5982 break;
5983 case NEON_2RM_VCLZ:
5984 switch (size) {
5985 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5986 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5987 case 2: gen_helper_clz(tmp, tmp); break;
5988 default: abort();
5989 }
5990 break;
5991 case NEON_2RM_VCNT:
5992 gen_helper_neon_cnt_u8(tmp, tmp);
5993 break;
5994 case NEON_2RM_VMVN:
5995 tcg_gen_not_i32(tmp, tmp);
5996 break;
5997 case NEON_2RM_VQABS:
5998 switch (size) {
5999 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
6000 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
6001 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
6002 default: abort();
6003 }
6004 break;
6005 case NEON_2RM_VQNEG:
6006 switch (size) {
6007 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
6008 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
6009 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
6010 default: abort();
6011 }
6012 break;
6013 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6014 tmp2 = tcg_const_i32(0);
6015 switch(size) {
6016 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6017 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6018 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6019 default: abort();
6020 }
6021 tcg_temp_free(tmp2);
6022 if (op == NEON_2RM_VCLE0) {
6023 tcg_gen_not_i32(tmp, tmp);
6024 }
6025 break;
6026 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6027 tmp2 = tcg_const_i32(0);
6028 switch(size) {
6029 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6030 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6031 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6032 default: abort();
6033 }
6034 tcg_temp_free(tmp2);
6035 if (op == NEON_2RM_VCLT0) {
6036 tcg_gen_not_i32(tmp, tmp);
6037 }
6038 break;
6039 case NEON_2RM_VCEQ0:
6040 tmp2 = tcg_const_i32(0);
6041 switch(size) {
6042 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6043 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6044 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6045 default: abort();
6046 }
6047 tcg_temp_free(tmp2);
6048 break;
6049 case NEON_2RM_VABS:
6050 switch(size) {
6051 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6052 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6053 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6054 default: abort();
6055 }
6056 break;
6057 case NEON_2RM_VNEG:
6058 tmp2 = tcg_const_i32(0);
6059 gen_neon_rsb(size, tmp, tmp2);
6060 tcg_temp_free(tmp2);
6061 break;
6062 case NEON_2RM_VCGT0_F:
6063 {
6064 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6065 tmp2 = tcg_const_i32(0);
6066 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6067 tcg_temp_free(tmp2);
6068 tcg_temp_free_ptr(fpstatus);
6069 break;
6070 }
6071 case NEON_2RM_VCGE0_F:
6072 {
6073 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6074 tmp2 = tcg_const_i32(0);
6075 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6076 tcg_temp_free(tmp2);
6077 tcg_temp_free_ptr(fpstatus);
6078 break;
6079 }
6080 case NEON_2RM_VCEQ0_F:
6081 {
6082 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6083 tmp2 = tcg_const_i32(0);
6084 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6085 tcg_temp_free(tmp2);
6086 tcg_temp_free_ptr(fpstatus);
6087 break;
6088 }
6089 case NEON_2RM_VCLE0_F:
6090 {
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6092 tmp2 = tcg_const_i32(0);
6093 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6094 tcg_temp_free(tmp2);
6095 tcg_temp_free_ptr(fpstatus);
6096 break;
6097 }
6098 case NEON_2RM_VCLT0_F:
6099 {
6100 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6101 tmp2 = tcg_const_i32(0);
6102 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6103 tcg_temp_free(tmp2);
6104 tcg_temp_free_ptr(fpstatus);
6105 break;
6106 }
6107 case NEON_2RM_VABS_F:
6108 gen_vfp_abs(0);
6109 break;
6110 case NEON_2RM_VNEG_F:
6111 gen_vfp_neg(0);
6112 break;
6113 case NEON_2RM_VSWP:
6114 tmp2 = neon_load_reg(rd, pass);
6115 neon_store_reg(rm, pass, tmp2);
6116 break;
6117 case NEON_2RM_VTRN:
6118 tmp2 = neon_load_reg(rd, pass);
6119 switch (size) {
6120 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6121 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6122 default: abort();
6123 }
6124 neon_store_reg(rm, pass, tmp2);
6125 break;
6126 case NEON_2RM_VRECPE:
6127 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6128 break;
6129 case NEON_2RM_VRSQRTE:
6130 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6131 break;
6132 case NEON_2RM_VRECPE_F:
6133 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6134 break;
6135 case NEON_2RM_VRSQRTE_F:
6136 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6137 break;
6138 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6139 gen_vfp_sito(0, 1);
6140 break;
6141 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6142 gen_vfp_uito(0, 1);
6143 break;
6144 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6145 gen_vfp_tosiz(0, 1);
6146 break;
6147 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6148 gen_vfp_touiz(0, 1);
6149 break;
6150 default:
6151 /* Reserved op values were caught by the
6152 * neon_2rm_sizes[] check earlier.
6153 */
6154 abort();
6155 }
6156 if (neon_2rm_is_float_op(op)) {
6157 tcg_gen_st_f32(cpu_F0s, cpu_env,
6158 neon_reg_offset(rd, pass));
6159 } else {
6160 neon_store_reg(rd, pass, tmp);
6161 }
6162 }
6163 break;
6164 }
6165 } else if ((insn & (1 << 10)) == 0) {
6166 /* VTBL, VTBX. */
6167 int n = ((insn >> 8) & 3) + 1;
6168 if ((rn + n) > 32) {
6169 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6170 * helper function running off the end of the register file.
6171 */
6172 return 1;
6173 }
6174 n <<= 3;
6175 if (insn & (1 << 6)) {
6176 tmp = neon_load_reg(rd, 0);
6177 } else {
6178 tmp = tcg_temp_new_i32();
6179 tcg_gen_movi_i32(tmp, 0);
6180 }
6181 tmp2 = neon_load_reg(rm, 0);
6182 tmp4 = tcg_const_i32(rn);
6183 tmp5 = tcg_const_i32(n);
6184 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6185 tcg_temp_free_i32(tmp);
6186 if (insn & (1 << 6)) {
6187 tmp = neon_load_reg(rd, 1);
6188 } else {
6189 tmp = tcg_temp_new_i32();
6190 tcg_gen_movi_i32(tmp, 0);
6191 }
6192 tmp3 = neon_load_reg(rm, 1);
6193 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6194 tcg_temp_free_i32(tmp5);
6195 tcg_temp_free_i32(tmp4);
6196 neon_store_reg(rd, 0, tmp2);
6197 neon_store_reg(rd, 1, tmp3);
6198 tcg_temp_free_i32(tmp);
6199 } else if ((insn & 0x380) == 0) {
6200 /* VDUP */
6201 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6202 return 1;
6203 }
6204 if (insn & (1 << 19)) {
6205 tmp = neon_load_reg(rm, 1);
6206 } else {
6207 tmp = neon_load_reg(rm, 0);
6208 }
6209 if (insn & (1 << 16)) {
6210 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6211 } else if (insn & (1 << 17)) {
6212 if ((insn >> 18) & 1)
6213 gen_neon_dup_high16(tmp);
6214 else
6215 gen_neon_dup_low16(tmp);
6216 }
6217 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6218 tmp2 = tcg_temp_new_i32();
6219 tcg_gen_mov_i32(tmp2, tmp);
6220 neon_store_reg(rd, pass, tmp2);
6221 }
6222 tcg_temp_free_i32(tmp);
6223 } else {
6224 return 1;
6225 }
6226 }
6227 }
6228 return 0;
6229 }
6230
6231 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6232 {
6233 int crn = (insn >> 16) & 0xf;
6234 int crm = insn & 0xf;
6235 int op1 = (insn >> 21) & 7;
6236 int op2 = (insn >> 5) & 7;
6237 int rt = (insn >> 12) & 0xf;
6238 TCGv tmp;
6239
6240 /* Minimal set of debug registers, since we don't support debug */
6241 if (op1 == 0 && crn == 0 && op2 == 0) {
6242 switch (crm) {
6243 case 0:
6244 /* DBGDIDR: just RAZ. In particular this means the
6245 * "debug architecture version" bits will read as
6246 * a reserved value, which should cause Linux to
6247 * not try to use the debug hardware.
6248 */
6249 tmp = tcg_const_i32(0);
6250 store_reg(s, rt, tmp);
6251 return 0;
6252 case 1:
6253 case 2:
6254 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6255 * don't implement memory mapped debug components
6256 */
6257 if (ENABLE_ARCH_7) {
6258 tmp = tcg_const_i32(0);
6259 store_reg(s, rt, tmp);
6260 return 0;
6261 }
6262 break;
6263 default:
6264 break;
6265 }
6266 }
6267
6268 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6269 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6270 /* TEECR */
6271 if (IS_USER(s))
6272 return 1;
6273 tmp = load_cpu_field(teecr);
6274 store_reg(s, rt, tmp);
6275 return 0;
6276 }
6277 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6278 /* TEEHBR */
6279 if (IS_USER(s) && (env->teecr & 1))
6280 return 1;
6281 tmp = load_cpu_field(teehbr);
6282 store_reg(s, rt, tmp);
6283 return 0;
6284 }
6285 }
6286 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6287 op1, crn, crm, op2);
6288 return 1;
6289 }
6290
6291 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6292 {
6293 int crn = (insn >> 16) & 0xf;
6294 int crm = insn & 0xf;
6295 int op1 = (insn >> 21) & 7;
6296 int op2 = (insn >> 5) & 7;
6297 int rt = (insn >> 12) & 0xf;
6298 TCGv tmp;
6299
6300 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6301 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6302 /* TEECR */
6303 if (IS_USER(s))
6304 return 1;
6305 tmp = load_reg(s, rt);
6306 gen_helper_set_teecr(cpu_env, tmp);
6307 tcg_temp_free_i32(tmp);
6308 return 0;
6309 }
6310 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6311 /* TEEHBR */
6312 if (IS_USER(s) && (env->teecr & 1))
6313 return 1;
6314 tmp = load_reg(s, rt);
6315 store_cpu_field(tmp, teehbr);
6316 return 0;
6317 }
6318 }
6319 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6320 op1, crn, crm, op2);
6321 return 1;
6322 }
6323
6324 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6325 {
6326 int cpnum;
6327
6328 cpnum = (insn >> 8) & 0xf;
6329 if (arm_feature(env, ARM_FEATURE_XSCALE)
6330 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6331 return 1;
6332
6333 switch (cpnum) {
6334 case 0:
6335 case 1:
6336 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6337 return disas_iwmmxt_insn(env, s, insn);
6338 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6339 return disas_dsp_insn(env, s, insn);
6340 }
6341 return 1;
6342 case 10:
6343 case 11:
6344 return disas_vfp_insn (env, s, insn);
6345 case 14:
6346 /* Coprocessors 7-15 are architecturally reserved by ARM.
6347 Unfortunately Intel decided to ignore this. */
6348 if (arm_feature(env, ARM_FEATURE_XSCALE))
6349 goto board;
6350 if (insn & (1 << 20))
6351 return disas_cp14_read(env, s, insn);
6352 else
6353 return disas_cp14_write(env, s, insn);
6354 case 15:
6355 return disas_cp15_insn (env, s, insn);
6356 default:
6357 board:
6358 /* Unknown coprocessor. See if the board has hooked it. */
6359 return disas_cp_insn (env, s, insn);
6360 }
6361 }
6362
6363
6364 /* Store a 64-bit value to a register pair. Clobbers val. */
6365 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6366 {
6367 TCGv tmp;
6368 tmp = tcg_temp_new_i32();
6369 tcg_gen_trunc_i64_i32(tmp, val);
6370 store_reg(s, rlow, tmp);
6371 tmp = tcg_temp_new_i32();
6372 tcg_gen_shri_i64(val, val, 32);
6373 tcg_gen_trunc_i64_i32(tmp, val);
6374 store_reg(s, rhigh, tmp);
6375 }
6376
6377 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6378 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6379 {
6380 TCGv_i64 tmp;
6381 TCGv tmp2;
6382
6383 /* Load value and extend to 64 bits. */
6384 tmp = tcg_temp_new_i64();
6385 tmp2 = load_reg(s, rlow);
6386 tcg_gen_extu_i32_i64(tmp, tmp2);
6387 tcg_temp_free_i32(tmp2);
6388 tcg_gen_add_i64(val, val, tmp);
6389 tcg_temp_free_i64(tmp);
6390 }
6391
6392 /* load and add a 64-bit value from a register pair. */
6393 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6394 {
6395 TCGv_i64 tmp;
6396 TCGv tmpl;
6397 TCGv tmph;
6398
6399 /* Load 64-bit value rd:rn. */
6400 tmpl = load_reg(s, rlow);
6401 tmph = load_reg(s, rhigh);
6402 tmp = tcg_temp_new_i64();
6403 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6404 tcg_temp_free_i32(tmpl);
6405 tcg_temp_free_i32(tmph);
6406 tcg_gen_add_i64(val, val, tmp);
6407 tcg_temp_free_i64(tmp);
6408 }
6409
6410 /* Set N and Z flags from a 64-bit value. */
6411 static void gen_logicq_cc(TCGv_i64 val)
6412 {
6413 TCGv tmp = tcg_temp_new_i32();
6414 gen_helper_logicq_cc(tmp, val);
6415 gen_logic_CC(tmp);
6416 tcg_temp_free_i32(tmp);
6417 }
6418
6419 /* Load/Store exclusive instructions are implemented by remembering
6420 the value/address loaded, and seeing if these are the same
6421 when the store is performed. This should be is sufficient to implement
6422 the architecturally mandated semantics, and avoids having to monitor
6423 regular stores.
6424
6425 In system emulation mode only one CPU will be running at once, so
6426 this sequence is effectively atomic. In user emulation mode we
6427 throw an exception and handle the atomic operation elsewhere. */
6428 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6429 TCGv addr, int size)
6430 {
6431 TCGv tmp;
6432
6433 switch (size) {
6434 case 0:
6435 tmp = gen_ld8u(addr, IS_USER(s));
6436 break;
6437 case 1:
6438 tmp = gen_ld16u(addr, IS_USER(s));
6439 break;
6440 case 2:
6441 case 3:
6442 tmp = gen_ld32(addr, IS_USER(s));
6443 break;
6444 default:
6445 abort();
6446 }
6447 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6448 store_reg(s, rt, tmp);
6449 if (size == 3) {
6450 TCGv tmp2 = tcg_temp_new_i32();
6451 tcg_gen_addi_i32(tmp2, addr, 4);
6452 tmp = gen_ld32(tmp2, IS_USER(s));
6453 tcg_temp_free_i32(tmp2);
6454 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6455 store_reg(s, rt2, tmp);
6456 }
6457 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6458 }
6459
6460 static void gen_clrex(DisasContext *s)
6461 {
6462 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6463 }
6464
6465 #ifdef CONFIG_USER_ONLY
6466 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6467 TCGv addr, int size)
6468 {
6469 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6470 tcg_gen_movi_i32(cpu_exclusive_info,
6471 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6472 gen_exception_insn(s, 4, EXCP_STREX);
6473 }
6474 #else
6475 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6476 TCGv addr, int size)
6477 {
6478 TCGv tmp;
6479 int done_label;
6480 int fail_label;
6481
6482 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6483 [addr] = {Rt};
6484 {Rd} = 0;
6485 } else {
6486 {Rd} = 1;
6487 } */
6488 fail_label = gen_new_label();
6489 done_label = gen_new_label();
6490 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6491 switch (size) {
6492 case 0:
6493 tmp = gen_ld8u(addr, IS_USER(s));
6494 break;
6495 case 1:
6496 tmp = gen_ld16u(addr, IS_USER(s));
6497 break;
6498 case 2:
6499 case 3:
6500 tmp = gen_ld32(addr, IS_USER(s));
6501 break;
6502 default:
6503 abort();
6504 }
6505 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6506 tcg_temp_free_i32(tmp);
6507 if (size == 3) {
6508 TCGv tmp2 = tcg_temp_new_i32();
6509 tcg_gen_addi_i32(tmp2, addr, 4);
6510 tmp = gen_ld32(tmp2, IS_USER(s));
6511 tcg_temp_free_i32(tmp2);
6512 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6513 tcg_temp_free_i32(tmp);
6514 }
6515 tmp = load_reg(s, rt);
6516 switch (size) {
6517 case 0:
6518 gen_st8(tmp, addr, IS_USER(s));
6519 break;
6520 case 1:
6521 gen_st16(tmp, addr, IS_USER(s));
6522 break;
6523 case 2:
6524 case 3:
6525 gen_st32(tmp, addr, IS_USER(s));
6526 break;
6527 default:
6528 abort();
6529 }
6530 if (size == 3) {
6531 tcg_gen_addi_i32(addr, addr, 4);
6532 tmp = load_reg(s, rt2);
6533 gen_st32(tmp, addr, IS_USER(s));
6534 }
6535 tcg_gen_movi_i32(cpu_R[rd], 0);
6536 tcg_gen_br(done_label);
6537 gen_set_label(fail_label);
6538 tcg_gen_movi_i32(cpu_R[rd], 1);
6539 gen_set_label(done_label);
6540 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6541 }
6542 #endif
6543
6544 static void disas_arm_insn(CPUState * env, DisasContext *s)
6545 {
6546 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6547 TCGv tmp;
6548 TCGv tmp2;
6549 TCGv tmp3;
6550 TCGv addr;
6551 TCGv_i64 tmp64;
6552
6553 insn = ldl_code(s->pc);
6554 s->pc += 4;
6555
6556 /* M variants do not implement ARM mode. */
6557 if (IS_M(env))
6558 goto illegal_op;
6559 cond = insn >> 28;
6560 if (cond == 0xf){
6561 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6562 * choose to UNDEF. In ARMv5 and above the space is used
6563 * for miscellaneous unconditional instructions.
6564 */
6565 ARCH(5);
6566
6567 /* Unconditional instructions. */
6568 if (((insn >> 25) & 7) == 1) {
6569 /* NEON Data processing. */
6570 if (!arm_feature(env, ARM_FEATURE_NEON))
6571 goto illegal_op;
6572
6573 if (disas_neon_data_insn(env, s, insn))
6574 goto illegal_op;
6575 return;
6576 }
6577 if ((insn & 0x0f100000) == 0x04000000) {
6578 /* NEON load/store. */
6579 if (!arm_feature(env, ARM_FEATURE_NEON))
6580 goto illegal_op;
6581
6582 if (disas_neon_ls_insn(env, s, insn))
6583 goto illegal_op;
6584 return;
6585 }
6586 if (((insn & 0x0f30f000) == 0x0510f000) ||
6587 ((insn & 0x0f30f010) == 0x0710f000)) {
6588 if ((insn & (1 << 22)) == 0) {
6589 /* PLDW; v7MP */
6590 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6591 goto illegal_op;
6592 }
6593 }
6594 /* Otherwise PLD; v5TE+ */
6595 ARCH(5TE);
6596 return;
6597 }
6598 if (((insn & 0x0f70f000) == 0x0450f000) ||
6599 ((insn & 0x0f70f010) == 0x0650f000)) {
6600 ARCH(7);
6601 return; /* PLI; V7 */
6602 }
6603 if (((insn & 0x0f700000) == 0x04100000) ||
6604 ((insn & 0x0f700010) == 0x06100000)) {
6605 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6606 goto illegal_op;
6607 }
6608 return; /* v7MP: Unallocated memory hint: must NOP */
6609 }
6610
6611 if ((insn & 0x0ffffdff) == 0x01010000) {
6612 ARCH(6);
6613 /* setend */
6614 if (insn & (1 << 9)) {
6615 /* BE8 mode not implemented. */
6616 goto illegal_op;
6617 }
6618 return;
6619 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6620 switch ((insn >> 4) & 0xf) {
6621 case 1: /* clrex */
6622 ARCH(6K);
6623 gen_clrex(s);
6624 return;
6625 case 4: /* dsb */
6626 case 5: /* dmb */
6627 case 6: /* isb */
6628 ARCH(7);
6629 /* We don't emulate caches so these are a no-op. */
6630 return;
6631 default:
6632 goto illegal_op;
6633 }
6634 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6635 /* srs */
6636 int32_t offset;
6637 if (IS_USER(s))
6638 goto illegal_op;
6639 ARCH(6);
6640 op1 = (insn & 0x1f);
6641 addr = tcg_temp_new_i32();
6642 tmp = tcg_const_i32(op1);
6643 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6644 tcg_temp_free_i32(tmp);
6645 i = (insn >> 23) & 3;
6646 switch (i) {
6647 case 0: offset = -4; break; /* DA */
6648 case 1: offset = 0; break; /* IA */
6649 case 2: offset = -8; break; /* DB */
6650 case 3: offset = 4; break; /* IB */
6651 default: abort();
6652 }
6653 if (offset)
6654 tcg_gen_addi_i32(addr, addr, offset);
6655 tmp = load_reg(s, 14);
6656 gen_st32(tmp, addr, 0);
6657 tmp = load_cpu_field(spsr);
6658 tcg_gen_addi_i32(addr, addr, 4);
6659 gen_st32(tmp, addr, 0);
6660 if (insn & (1 << 21)) {
6661 /* Base writeback. */
6662 switch (i) {
6663 case 0: offset = -8; break;
6664 case 1: offset = 4; break;
6665 case 2: offset = -4; break;
6666 case 3: offset = 0; break;
6667 default: abort();
6668 }
6669 if (offset)
6670 tcg_gen_addi_i32(addr, addr, offset);
6671 tmp = tcg_const_i32(op1);
6672 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6673 tcg_temp_free_i32(tmp);
6674 tcg_temp_free_i32(addr);
6675 } else {
6676 tcg_temp_free_i32(addr);
6677 }
6678 return;
6679 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6680 /* rfe */
6681 int32_t offset;
6682 if (IS_USER(s))
6683 goto illegal_op;
6684 ARCH(6);
6685 rn = (insn >> 16) & 0xf;
6686 addr = load_reg(s, rn);
6687 i = (insn >> 23) & 3;
6688 switch (i) {
6689 case 0: offset = -4; break; /* DA */
6690 case 1: offset = 0; break; /* IA */
6691 case 2: offset = -8; break; /* DB */
6692 case 3: offset = 4; break; /* IB */
6693 default: abort();
6694 }
6695 if (offset)
6696 tcg_gen_addi_i32(addr, addr, offset);
6697 /* Load PC into tmp and CPSR into tmp2. */
6698 tmp = gen_ld32(addr, 0);
6699 tcg_gen_addi_i32(addr, addr, 4);
6700 tmp2 = gen_ld32(addr, 0);
6701 if (insn & (1 << 21)) {
6702 /* Base writeback. */
6703 switch (i) {
6704 case 0: offset = -8; break;
6705 case 1: offset = 4; break;
6706 case 2: offset = -4; break;
6707 case 3: offset = 0; break;
6708 default: abort();
6709 }
6710 if (offset)
6711 tcg_gen_addi_i32(addr, addr, offset);
6712 store_reg(s, rn, addr);
6713 } else {
6714 tcg_temp_free_i32(addr);
6715 }
6716 gen_rfe(s, tmp, tmp2);
6717 return;
6718 } else if ((insn & 0x0e000000) == 0x0a000000) {
6719 /* branch link and change to thumb (blx <offset>) */
6720 int32_t offset;
6721
6722 val = (uint32_t)s->pc;
6723 tmp = tcg_temp_new_i32();
6724 tcg_gen_movi_i32(tmp, val);
6725 store_reg(s, 14, tmp);
6726 /* Sign-extend the 24-bit offset */
6727 offset = (((int32_t)insn) << 8) >> 8;
6728 /* offset * 4 + bit24 * 2 + (thumb bit) */
6729 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6730 /* pipeline offset */
6731 val += 4;
6732 /* protected by ARCH(5); above, near the start of uncond block */
6733 gen_bx_im(s, val);
6734 return;
6735 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6736 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6737 /* iWMMXt register transfer. */
6738 if (env->cp15.c15_cpar & (1 << 1))
6739 if (!disas_iwmmxt_insn(env, s, insn))
6740 return;
6741 }
6742 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6743 /* Coprocessor double register transfer. */
6744 ARCH(5TE);
6745 } else if ((insn & 0x0f000010) == 0x0e000010) {
6746 /* Additional coprocessor register transfer. */
6747 } else if ((insn & 0x0ff10020) == 0x01000000) {
6748 uint32_t mask;
6749 uint32_t val;
6750 /* cps (privileged) */
6751 if (IS_USER(s))
6752 return;
6753 mask = val = 0;
6754 if (insn & (1 << 19)) {
6755 if (insn & (1 << 8))
6756 mask |= CPSR_A;
6757 if (insn & (1 << 7))
6758 mask |= CPSR_I;
6759 if (insn & (1 << 6))
6760 mask |= CPSR_F;
6761 if (insn & (1 << 18))
6762 val |= mask;
6763 }
6764 if (insn & (1 << 17)) {
6765 mask |= CPSR_M;
6766 val |= (insn & 0x1f);
6767 }
6768 if (mask) {
6769 gen_set_psr_im(s, mask, 0, val);
6770 }
6771 return;
6772 }
6773 goto illegal_op;
6774 }
6775 if (cond != 0xe) {
6776 /* if not always execute, we generate a conditional jump to
6777 next instruction */
6778 s->condlabel = gen_new_label();
6779 gen_test_cc(cond ^ 1, s->condlabel);
6780 s->condjmp = 1;
6781 }
6782 if ((insn & 0x0f900000) == 0x03000000) {
6783 if ((insn & (1 << 21)) == 0) {
6784 ARCH(6T2);
6785 rd = (insn >> 12) & 0xf;
6786 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6787 if ((insn & (1 << 22)) == 0) {
6788 /* MOVW */
6789 tmp = tcg_temp_new_i32();
6790 tcg_gen_movi_i32(tmp, val);
6791 } else {
6792 /* MOVT */
6793 tmp = load_reg(s, rd);
6794 tcg_gen_ext16u_i32(tmp, tmp);
6795 tcg_gen_ori_i32(tmp, tmp, val << 16);
6796 }
6797 store_reg(s, rd, tmp);
6798 } else {
6799 if (((insn >> 12) & 0xf) != 0xf)
6800 goto illegal_op;
6801 if (((insn >> 16) & 0xf) == 0) {
6802 gen_nop_hint(s, insn & 0xff);
6803 } else {
6804 /* CPSR = immediate */
6805 val = insn & 0xff;
6806 shift = ((insn >> 8) & 0xf) * 2;
6807 if (shift)
6808 val = (val >> shift) | (val << (32 - shift));
6809 i = ((insn & (1 << 22)) != 0);
6810 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6811 goto illegal_op;
6812 }
6813 }
6814 } else if ((insn & 0x0f900000) == 0x01000000
6815 && (insn & 0x00000090) != 0x00000090) {
6816 /* miscellaneous instructions */
6817 op1 = (insn >> 21) & 3;
6818 sh = (insn >> 4) & 0xf;
6819 rm = insn & 0xf;
6820 switch (sh) {
6821 case 0x0: /* move program status register */
6822 if (op1 & 1) {
6823 /* PSR = reg */
6824 tmp = load_reg(s, rm);
6825 i = ((op1 & 2) != 0);
6826 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6827 goto illegal_op;
6828 } else {
6829 /* reg = PSR */
6830 rd = (insn >> 12) & 0xf;
6831 if (op1 & 2) {
6832 if (IS_USER(s))
6833 goto illegal_op;
6834 tmp = load_cpu_field(spsr);
6835 } else {
6836 tmp = tcg_temp_new_i32();
6837 gen_helper_cpsr_read(tmp);
6838 }
6839 store_reg(s, rd, tmp);
6840 }
6841 break;
6842 case 0x1:
6843 if (op1 == 1) {
6844 /* branch/exchange thumb (bx). */
6845 ARCH(4T);
6846 tmp = load_reg(s, rm);
6847 gen_bx(s, tmp);
6848 } else if (op1 == 3) {
6849 /* clz */
6850 ARCH(5);
6851 rd = (insn >> 12) & 0xf;
6852 tmp = load_reg(s, rm);
6853 gen_helper_clz(tmp, tmp);
6854 store_reg(s, rd, tmp);
6855 } else {
6856 goto illegal_op;
6857 }
6858 break;
6859 case 0x2:
6860 if (op1 == 1) {
6861 ARCH(5J); /* bxj */
6862 /* Trivial implementation equivalent to bx. */
6863 tmp = load_reg(s, rm);
6864 gen_bx(s, tmp);
6865 } else {
6866 goto illegal_op;
6867 }
6868 break;
6869 case 0x3:
6870 if (op1 != 1)
6871 goto illegal_op;
6872
6873 ARCH(5);
6874 /* branch link/exchange thumb (blx) */
6875 tmp = load_reg(s, rm);
6876 tmp2 = tcg_temp_new_i32();
6877 tcg_gen_movi_i32(tmp2, s->pc);
6878 store_reg(s, 14, tmp2);
6879 gen_bx(s, tmp);
6880 break;
6881 case 0x5: /* saturating add/subtract */
6882 ARCH(5TE);
6883 rd = (insn >> 12) & 0xf;
6884 rn = (insn >> 16) & 0xf;
6885 tmp = load_reg(s, rm);
6886 tmp2 = load_reg(s, rn);
6887 if (op1 & 2)
6888 gen_helper_double_saturate(tmp2, tmp2);
6889 if (op1 & 1)
6890 gen_helper_sub_saturate(tmp, tmp, tmp2);
6891 else
6892 gen_helper_add_saturate(tmp, tmp, tmp2);
6893 tcg_temp_free_i32(tmp2);
6894 store_reg(s, rd, tmp);
6895 break;
6896 case 7:
6897 /* SMC instruction (op1 == 3)
6898 and undefined instructions (op1 == 0 || op1 == 2)
6899 will trap */
6900 if (op1 != 1) {
6901 goto illegal_op;
6902 }
6903 /* bkpt */
6904 ARCH(5);
6905 gen_exception_insn(s, 4, EXCP_BKPT);
6906 break;
6907 case 0x8: /* signed multiply */
6908 case 0xa:
6909 case 0xc:
6910 case 0xe:
6911 ARCH(5TE);
6912 rs = (insn >> 8) & 0xf;
6913 rn = (insn >> 12) & 0xf;
6914 rd = (insn >> 16) & 0xf;
6915 if (op1 == 1) {
6916 /* (32 * 16) >> 16 */
6917 tmp = load_reg(s, rm);
6918 tmp2 = load_reg(s, rs);
6919 if (sh & 4)
6920 tcg_gen_sari_i32(tmp2, tmp2, 16);
6921 else
6922 gen_sxth(tmp2);
6923 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6924 tcg_gen_shri_i64(tmp64, tmp64, 16);
6925 tmp = tcg_temp_new_i32();
6926 tcg_gen_trunc_i64_i32(tmp, tmp64);
6927 tcg_temp_free_i64(tmp64);
6928 if ((sh & 2) == 0) {
6929 tmp2 = load_reg(s, rn);
6930 gen_helper_add_setq(tmp, tmp, tmp2);
6931 tcg_temp_free_i32(tmp2);
6932 }
6933 store_reg(s, rd, tmp);
6934 } else {
6935 /* 16 * 16 */
6936 tmp = load_reg(s, rm);
6937 tmp2 = load_reg(s, rs);
6938 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6939 tcg_temp_free_i32(tmp2);
6940 if (op1 == 2) {
6941 tmp64 = tcg_temp_new_i64();
6942 tcg_gen_ext_i32_i64(tmp64, tmp);
6943 tcg_temp_free_i32(tmp);
6944 gen_addq(s, tmp64, rn, rd);
6945 gen_storeq_reg(s, rn, rd, tmp64);
6946 tcg_temp_free_i64(tmp64);
6947 } else {
6948 if (op1 == 0) {
6949 tmp2 = load_reg(s, rn);
6950 gen_helper_add_setq(tmp, tmp, tmp2);
6951 tcg_temp_free_i32(tmp2);
6952 }
6953 store_reg(s, rd, tmp);
6954 }
6955 }
6956 break;
6957 default:
6958 goto illegal_op;
6959 }
6960 } else if (((insn & 0x0e000000) == 0 &&
6961 (insn & 0x00000090) != 0x90) ||
6962 ((insn & 0x0e000000) == (1 << 25))) {
6963 int set_cc, logic_cc, shiftop;
6964
6965 op1 = (insn >> 21) & 0xf;
6966 set_cc = (insn >> 20) & 1;
6967 logic_cc = table_logic_cc[op1] & set_cc;
6968
6969 /* data processing instruction */
6970 if (insn & (1 << 25)) {
6971 /* immediate operand */
6972 val = insn & 0xff;
6973 shift = ((insn >> 8) & 0xf) * 2;
6974 if (shift) {
6975 val = (val >> shift) | (val << (32 - shift));
6976 }
6977 tmp2 = tcg_temp_new_i32();
6978 tcg_gen_movi_i32(tmp2, val);
6979 if (logic_cc && shift) {
6980 gen_set_CF_bit31(tmp2);
6981 }
6982 } else {
6983 /* register */
6984 rm = (insn) & 0xf;
6985 tmp2 = load_reg(s, rm);
6986 shiftop = (insn >> 5) & 3;
6987 if (!(insn & (1 << 4))) {
6988 shift = (insn >> 7) & 0x1f;
6989 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6990 } else {
6991 rs = (insn >> 8) & 0xf;
6992 tmp = load_reg(s, rs);
6993 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6994 }
6995 }
6996 if (op1 != 0x0f && op1 != 0x0d) {
6997 rn = (insn >> 16) & 0xf;
6998 tmp = load_reg(s, rn);
6999 } else {
7000 TCGV_UNUSED(tmp);
7001 }
7002 rd = (insn >> 12) & 0xf;
7003 switch(op1) {
7004 case 0x00:
7005 tcg_gen_and_i32(tmp, tmp, tmp2);
7006 if (logic_cc) {
7007 gen_logic_CC(tmp);
7008 }
7009 store_reg_bx(env, s, rd, tmp);
7010 break;
7011 case 0x01:
7012 tcg_gen_xor_i32(tmp, tmp, tmp2);
7013 if (logic_cc) {
7014 gen_logic_CC(tmp);
7015 }
7016 store_reg_bx(env, s, rd, tmp);
7017 break;
7018 case 0x02:
7019 if (set_cc && rd == 15) {
7020 /* SUBS r15, ... is used for exception return. */
7021 if (IS_USER(s)) {
7022 goto illegal_op;
7023 }
7024 gen_helper_sub_cc(tmp, tmp, tmp2);
7025 gen_exception_return(s, tmp);
7026 } else {
7027 if (set_cc) {
7028 gen_helper_sub_cc(tmp, tmp, tmp2);
7029 } else {
7030 tcg_gen_sub_i32(tmp, tmp, tmp2);
7031 }
7032 store_reg_bx(env, s, rd, tmp);
7033 }
7034 break;
7035 case 0x03:
7036 if (set_cc) {
7037 gen_helper_sub_cc(tmp, tmp2, tmp);
7038 } else {
7039 tcg_gen_sub_i32(tmp, tmp2, tmp);
7040 }
7041 store_reg_bx(env, s, rd, tmp);
7042 break;
7043 case 0x04:
7044 if (set_cc) {
7045 gen_helper_add_cc(tmp, tmp, tmp2);
7046 } else {
7047 tcg_gen_add_i32(tmp, tmp, tmp2);
7048 }
7049 store_reg_bx(env, s, rd, tmp);
7050 break;
7051 case 0x05:
7052 if (set_cc) {
7053 gen_helper_adc_cc(tmp, tmp, tmp2);
7054 } else {
7055 gen_add_carry(tmp, tmp, tmp2);
7056 }
7057 store_reg_bx(env, s, rd, tmp);
7058 break;
7059 case 0x06:
7060 if (set_cc) {
7061 gen_helper_sbc_cc(tmp, tmp, tmp2);
7062 } else {
7063 gen_sub_carry(tmp, tmp, tmp2);
7064 }
7065 store_reg_bx(env, s, rd, tmp);
7066 break;
7067 case 0x07:
7068 if (set_cc) {
7069 gen_helper_sbc_cc(tmp, tmp2, tmp);
7070 } else {
7071 gen_sub_carry(tmp, tmp2, tmp);
7072 }
7073 store_reg_bx(env, s, rd, tmp);
7074 break;
7075 case 0x08:
7076 if (set_cc) {
7077 tcg_gen_and_i32(tmp, tmp, tmp2);
7078 gen_logic_CC(tmp);
7079 }
7080 tcg_temp_free_i32(tmp);
7081 break;
7082 case 0x09:
7083 if (set_cc) {
7084 tcg_gen_xor_i32(tmp, tmp, tmp2);
7085 gen_logic_CC(tmp);
7086 }
7087 tcg_temp_free_i32(tmp);
7088 break;
7089 case 0x0a:
7090 if (set_cc) {
7091 gen_helper_sub_cc(tmp, tmp, tmp2);
7092 }
7093 tcg_temp_free_i32(tmp);
7094 break;
7095 case 0x0b:
7096 if (set_cc) {
7097 gen_helper_add_cc(tmp, tmp, tmp2);
7098 }
7099 tcg_temp_free_i32(tmp);
7100 break;
7101 case 0x0c:
7102 tcg_gen_or_i32(tmp, tmp, tmp2);
7103 if (logic_cc) {
7104 gen_logic_CC(tmp);
7105 }
7106 store_reg_bx(env, s, rd, tmp);
7107 break;
7108 case 0x0d:
7109 if (logic_cc && rd == 15) {
7110 /* MOVS r15, ... is used for exception return. */
7111 if (IS_USER(s)) {
7112 goto illegal_op;
7113 }
7114 gen_exception_return(s, tmp2);
7115 } else {
7116 if (logic_cc) {
7117 gen_logic_CC(tmp2);
7118 }
7119 store_reg_bx(env, s, rd, tmp2);
7120 }
7121 break;
7122 case 0x0e:
7123 tcg_gen_andc_i32(tmp, tmp, tmp2);
7124 if (logic_cc) {
7125 gen_logic_CC(tmp);
7126 }
7127 store_reg_bx(env, s, rd, tmp);
7128 break;
7129 default:
7130 case 0x0f:
7131 tcg_gen_not_i32(tmp2, tmp2);
7132 if (logic_cc) {
7133 gen_logic_CC(tmp2);
7134 }
7135 store_reg_bx(env, s, rd, tmp2);
7136 break;
7137 }
7138 if (op1 != 0x0f && op1 != 0x0d) {
7139 tcg_temp_free_i32(tmp2);
7140 }
7141 } else {
7142 /* other instructions */
7143 op1 = (insn >> 24) & 0xf;
7144 switch(op1) {
7145 case 0x0:
7146 case 0x1:
7147 /* multiplies, extra load/stores */
7148 sh = (insn >> 5) & 3;
7149 if (sh == 0) {
7150 if (op1 == 0x0) {
7151 rd = (insn >> 16) & 0xf;
7152 rn = (insn >> 12) & 0xf;
7153 rs = (insn >> 8) & 0xf;
7154 rm = (insn) & 0xf;
7155 op1 = (insn >> 20) & 0xf;
7156 switch (op1) {
7157 case 0: case 1: case 2: case 3: case 6:
7158 /* 32 bit mul */
7159 tmp = load_reg(s, rs);
7160 tmp2 = load_reg(s, rm);
7161 tcg_gen_mul_i32(tmp, tmp, tmp2);
7162 tcg_temp_free_i32(tmp2);
7163 if (insn & (1 << 22)) {
7164 /* Subtract (mls) */
7165 ARCH(6T2);
7166 tmp2 = load_reg(s, rn);
7167 tcg_gen_sub_i32(tmp, tmp2, tmp);
7168 tcg_temp_free_i32(tmp2);
7169 } else if (insn & (1 << 21)) {
7170 /* Add */
7171 tmp2 = load_reg(s, rn);
7172 tcg_gen_add_i32(tmp, tmp, tmp2);
7173 tcg_temp_free_i32(tmp2);
7174 }
7175 if (insn & (1 << 20))
7176 gen_logic_CC(tmp);
7177 store_reg(s, rd, tmp);
7178 break;
7179 case 4:
7180 /* 64 bit mul double accumulate (UMAAL) */
7181 ARCH(6);
7182 tmp = load_reg(s, rs);
7183 tmp2 = load_reg(s, rm);
7184 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7185 gen_addq_lo(s, tmp64, rn);
7186 gen_addq_lo(s, tmp64, rd);
7187 gen_storeq_reg(s, rn, rd, tmp64);
7188 tcg_temp_free_i64(tmp64);
7189 break;
7190 case 8: case 9: case 10: case 11:
7191 case 12: case 13: case 14: case 15:
7192 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7193 tmp = load_reg(s, rs);
7194 tmp2 = load_reg(s, rm);
7195 if (insn & (1 << 22)) {
7196 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7197 } else {
7198 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7199 }
7200 if (insn & (1 << 21)) { /* mult accumulate */
7201 gen_addq(s, tmp64, rn, rd);
7202 }
7203 if (insn & (1 << 20)) {
7204 gen_logicq_cc(tmp64);
7205 }
7206 gen_storeq_reg(s, rn, rd, tmp64);
7207 tcg_temp_free_i64(tmp64);
7208 break;
7209 default:
7210 goto illegal_op;
7211 }
7212 } else {
7213 rn = (insn >> 16) & 0xf;
7214 rd = (insn >> 12) & 0xf;
7215 if (insn & (1 << 23)) {
7216 /* load/store exclusive */
7217 op1 = (insn >> 21) & 0x3;
7218 if (op1)
7219 ARCH(6K);
7220 else
7221 ARCH(6);
7222 addr = tcg_temp_local_new_i32();
7223 load_reg_var(s, addr, rn);
7224 if (insn & (1 << 20)) {
7225 switch (op1) {
7226 case 0: /* ldrex */
7227 gen_load_exclusive(s, rd, 15, addr, 2);
7228 break;
7229 case 1: /* ldrexd */
7230 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7231 break;
7232 case 2: /* ldrexb */
7233 gen_load_exclusive(s, rd, 15, addr, 0);
7234 break;
7235 case 3: /* ldrexh */
7236 gen_load_exclusive(s, rd, 15, addr, 1);
7237 break;
7238 default:
7239 abort();
7240 }
7241 } else {
7242 rm = insn & 0xf;
7243 switch (op1) {
7244 case 0: /* strex */
7245 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7246 break;
7247 case 1: /* strexd */
7248 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7249 break;
7250 case 2: /* strexb */
7251 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7252 break;
7253 case 3: /* strexh */
7254 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7255 break;
7256 default:
7257 abort();
7258 }
7259 }
7260 tcg_temp_free(addr);
7261 } else {
7262 /* SWP instruction */
7263 rm = (insn) & 0xf;
7264
7265 /* ??? This is not really atomic. However we know
7266 we never have multiple CPUs running in parallel,
7267 so it is good enough. */
7268 addr = load_reg(s, rn);
7269 tmp = load_reg(s, rm);
7270 if (insn & (1 << 22)) {
7271 tmp2 = gen_ld8u(addr, IS_USER(s));
7272 gen_st8(tmp, addr, IS_USER(s));
7273 } else {
7274 tmp2 = gen_ld32(addr, IS_USER(s));
7275 gen_st32(tmp, addr, IS_USER(s));
7276 }
7277 tcg_temp_free_i32(addr);
7278 store_reg(s, rd, tmp2);
7279 }
7280 }
7281 } else {
7282 int address_offset;
7283 int load;
7284 /* Misc load/store */
7285 rn = (insn >> 16) & 0xf;
7286 rd = (insn >> 12) & 0xf;
7287 addr = load_reg(s, rn);
7288 if (insn & (1 << 24))
7289 gen_add_datah_offset(s, insn, 0, addr);
7290 address_offset = 0;
7291 if (insn & (1 << 20)) {
7292 /* load */
7293 switch(sh) {
7294 case 1:
7295 tmp = gen_ld16u(addr, IS_USER(s));
7296 break;
7297 case 2:
7298 tmp = gen_ld8s(addr, IS_USER(s));
7299 break;
7300 default:
7301 case 3:
7302 tmp = gen_ld16s(addr, IS_USER(s));
7303 break;
7304 }
7305 load = 1;
7306 } else if (sh & 2) {
7307 ARCH(5TE);
7308 /* doubleword */
7309 if (sh & 1) {
7310 /* store */
7311 tmp = load_reg(s, rd);
7312 gen_st32(tmp, addr, IS_USER(s));
7313 tcg_gen_addi_i32(addr, addr, 4);
7314 tmp = load_reg(s, rd + 1);
7315 gen_st32(tmp, addr, IS_USER(s));
7316 load = 0;
7317 } else {
7318 /* load */
7319 tmp = gen_ld32(addr, IS_USER(s));
7320 store_reg(s, rd, tmp);
7321 tcg_gen_addi_i32(addr, addr, 4);
7322 tmp = gen_ld32(addr, IS_USER(s));
7323 rd++;
7324 load = 1;
7325 }
7326 address_offset = -4;
7327 } else {
7328 /* store */
7329 tmp = load_reg(s, rd);
7330 gen_st16(tmp, addr, IS_USER(s));
7331 load = 0;
7332 }
7333 /* Perform base writeback before the loaded value to
7334 ensure correct behavior with overlapping index registers.
7335 ldrd with base writeback is is undefined if the
7336 destination and index registers overlap. */
7337 if (!(insn & (1 << 24))) {
7338 gen_add_datah_offset(s, insn, address_offset, addr);
7339 store_reg(s, rn, addr);
7340 } else if (insn & (1 << 21)) {
7341 if (address_offset)
7342 tcg_gen_addi_i32(addr, addr, address_offset);
7343 store_reg(s, rn, addr);
7344 } else {
7345 tcg_temp_free_i32(addr);
7346 }
7347 if (load) {
7348 /* Complete the load. */
7349 store_reg(s, rd, tmp);
7350 }
7351 }
7352 break;
7353 case 0x4:
7354 case 0x5:
7355 goto do_ldst;
7356 case 0x6:
7357 case 0x7:
7358 if (insn & (1 << 4)) {
7359 ARCH(6);
7360 /* Armv6 Media instructions. */
7361 rm = insn & 0xf;
7362 rn = (insn >> 16) & 0xf;
7363 rd = (insn >> 12) & 0xf;
7364 rs = (insn >> 8) & 0xf;
7365 switch ((insn >> 23) & 3) {
7366 case 0: /* Parallel add/subtract. */
7367 op1 = (insn >> 20) & 7;
7368 tmp = load_reg(s, rn);
7369 tmp2 = load_reg(s, rm);
7370 sh = (insn >> 5) & 7;
7371 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7372 goto illegal_op;
7373 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7374 tcg_temp_free_i32(tmp2);
7375 store_reg(s, rd, tmp);
7376 break;
7377 case 1:
7378 if ((insn & 0x00700020) == 0) {
7379 /* Halfword pack. */
7380 tmp = load_reg(s, rn);
7381 tmp2 = load_reg(s, rm);
7382 shift = (insn >> 7) & 0x1f;
7383 if (insn & (1 << 6)) {
7384 /* pkhtb */
7385 if (shift == 0)
7386 shift = 31;
7387 tcg_gen_sari_i32(tmp2, tmp2, shift);
7388 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7389 tcg_gen_ext16u_i32(tmp2, tmp2);
7390 } else {
7391 /* pkhbt */
7392 if (shift)
7393 tcg_gen_shli_i32(tmp2, tmp2, shift);
7394 tcg_gen_ext16u_i32(tmp, tmp);
7395 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7396 }
7397 tcg_gen_or_i32(tmp, tmp, tmp2);
7398 tcg_temp_free_i32(tmp2);
7399 store_reg(s, rd, tmp);
7400 } else if ((insn & 0x00200020) == 0x00200000) {
7401 /* [us]sat */
7402 tmp = load_reg(s, rm);
7403 shift = (insn >> 7) & 0x1f;
7404 if (insn & (1 << 6)) {
7405 if (shift == 0)
7406 shift = 31;
7407 tcg_gen_sari_i32(tmp, tmp, shift);
7408 } else {
7409 tcg_gen_shli_i32(tmp, tmp, shift);
7410 }
7411 sh = (insn >> 16) & 0x1f;
7412 tmp2 = tcg_const_i32(sh);
7413 if (insn & (1 << 22))
7414 gen_helper_usat(tmp, tmp, tmp2);
7415 else
7416 gen_helper_ssat(tmp, tmp, tmp2);
7417 tcg_temp_free_i32(tmp2);
7418 store_reg(s, rd, tmp);
7419 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7420 /* [us]sat16 */
7421 tmp = load_reg(s, rm);
7422 sh = (insn >> 16) & 0x1f;
7423 tmp2 = tcg_const_i32(sh);
7424 if (insn & (1 << 22))
7425 gen_helper_usat16(tmp, tmp, tmp2);
7426 else
7427 gen_helper_ssat16(tmp, tmp, tmp2);
7428 tcg_temp_free_i32(tmp2);
7429 store_reg(s, rd, tmp);
7430 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7431 /* Select bytes. */
7432 tmp = load_reg(s, rn);
7433 tmp2 = load_reg(s, rm);
7434 tmp3 = tcg_temp_new_i32();
7435 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7436 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7437 tcg_temp_free_i32(tmp3);
7438 tcg_temp_free_i32(tmp2);
7439 store_reg(s, rd, tmp);
7440 } else if ((insn & 0x000003e0) == 0x00000060) {
7441 tmp = load_reg(s, rm);
7442 shift = (insn >> 10) & 3;
7443 /* ??? In many cases it's not necessary to do a
7444 rotate, a shift is sufficient. */
7445 if (shift != 0)
7446 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7447 op1 = (insn >> 20) & 7;
7448 switch (op1) {
7449 case 0: gen_sxtb16(tmp); break;
7450 case 2: gen_sxtb(tmp); break;
7451 case 3: gen_sxth(tmp); break;
7452 case 4: gen_uxtb16(tmp); break;
7453 case 6: gen_uxtb(tmp); break;
7454 case 7: gen_uxth(tmp); break;
7455 default: goto illegal_op;
7456 }
7457 if (rn != 15) {
7458 tmp2 = load_reg(s, rn);
7459 if ((op1 & 3) == 0) {
7460 gen_add16(tmp, tmp2);
7461 } else {
7462 tcg_gen_add_i32(tmp, tmp, tmp2);
7463 tcg_temp_free_i32(tmp2);
7464 }
7465 }
7466 store_reg(s, rd, tmp);
7467 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7468 /* rev */
7469 tmp = load_reg(s, rm);
7470 if (insn & (1 << 22)) {
7471 if (insn & (1 << 7)) {
7472 gen_revsh(tmp);
7473 } else {
7474 ARCH(6T2);
7475 gen_helper_rbit(tmp, tmp);
7476 }
7477 } else {
7478 if (insn & (1 << 7))
7479 gen_rev16(tmp);
7480 else
7481 tcg_gen_bswap32_i32(tmp, tmp);
7482 }
7483 store_reg(s, rd, tmp);
7484 } else {
7485 goto illegal_op;
7486 }
7487 break;
7488 case 2: /* Multiplies (Type 3). */
7489 tmp = load_reg(s, rm);
7490 tmp2 = load_reg(s, rs);
7491 if (insn & (1 << 20)) {
7492 /* Signed multiply most significant [accumulate].
7493 (SMMUL, SMMLA, SMMLS) */
7494 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7495
7496 if (rd != 15) {
7497 tmp = load_reg(s, rd);
7498 if (insn & (1 << 6)) {
7499 tmp64 = gen_subq_msw(tmp64, tmp);
7500 } else {
7501 tmp64 = gen_addq_msw(tmp64, tmp);
7502 }
7503 }
7504 if (insn & (1 << 5)) {
7505 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7506 }
7507 tcg_gen_shri_i64(tmp64, tmp64, 32);
7508 tmp = tcg_temp_new_i32();
7509 tcg_gen_trunc_i64_i32(tmp, tmp64);
7510 tcg_temp_free_i64(tmp64);
7511 store_reg(s, rn, tmp);
7512 } else {
7513 if (insn & (1 << 5))
7514 gen_swap_half(tmp2);
7515 gen_smul_dual(tmp, tmp2);
7516 if (insn & (1 << 6)) {
7517 /* This subtraction cannot overflow. */
7518 tcg_gen_sub_i32(tmp, tmp, tmp2);
7519 } else {
7520 /* This addition cannot overflow 32 bits;
7521 * however it may overflow considered as a signed
7522 * operation, in which case we must set the Q flag.
7523 */
7524 gen_helper_add_setq(tmp, tmp, tmp2);
7525 }
7526 tcg_temp_free_i32(tmp2);
7527 if (insn & (1 << 22)) {
7528 /* smlald, smlsld */
7529 tmp64 = tcg_temp_new_i64();
7530 tcg_gen_ext_i32_i64(tmp64, tmp);
7531 tcg_temp_free_i32(tmp);
7532 gen_addq(s, tmp64, rd, rn);
7533 gen_storeq_reg(s, rd, rn, tmp64);
7534 tcg_temp_free_i64(tmp64);
7535 } else {
7536 /* smuad, smusd, smlad, smlsd */
7537 if (rd != 15)
7538 {
7539 tmp2 = load_reg(s, rd);
7540 gen_helper_add_setq(tmp, tmp, tmp2);
7541 tcg_temp_free_i32(tmp2);
7542 }
7543 store_reg(s, rn, tmp);
7544 }
7545 }
7546 break;
7547 case 3:
7548 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7549 switch (op1) {
7550 case 0: /* Unsigned sum of absolute differences. */
7551 ARCH(6);
7552 tmp = load_reg(s, rm);
7553 tmp2 = load_reg(s, rs);
7554 gen_helper_usad8(tmp, tmp, tmp2);
7555 tcg_temp_free_i32(tmp2);
7556 if (rd != 15) {
7557 tmp2 = load_reg(s, rd);
7558 tcg_gen_add_i32(tmp, tmp, tmp2);
7559 tcg_temp_free_i32(tmp2);
7560 }
7561 store_reg(s, rn, tmp);
7562 break;
7563 case 0x20: case 0x24: case 0x28: case 0x2c:
7564 /* Bitfield insert/clear. */
7565 ARCH(6T2);
7566 shift = (insn >> 7) & 0x1f;
7567 i = (insn >> 16) & 0x1f;
7568 i = i + 1 - shift;
7569 if (rm == 15) {
7570 tmp = tcg_temp_new_i32();
7571 tcg_gen_movi_i32(tmp, 0);
7572 } else {
7573 tmp = load_reg(s, rm);
7574 }
7575 if (i != 32) {
7576 tmp2 = load_reg(s, rd);
7577 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7578 tcg_temp_free_i32(tmp2);
7579 }
7580 store_reg(s, rd, tmp);
7581 break;
7582 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7583 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7584 ARCH(6T2);
7585 tmp = load_reg(s, rm);
7586 shift = (insn >> 7) & 0x1f;
7587 i = ((insn >> 16) & 0x1f) + 1;
7588 if (shift + i > 32)
7589 goto illegal_op;
7590 if (i < 32) {
7591 if (op1 & 0x20) {
7592 gen_ubfx(tmp, shift, (1u << i) - 1);
7593 } else {
7594 gen_sbfx(tmp, shift, i);
7595 }
7596 }
7597 store_reg(s, rd, tmp);
7598 break;
7599 default:
7600 goto illegal_op;
7601 }
7602 break;
7603 }
7604 break;
7605 }
7606 do_ldst:
7607 /* Check for undefined extension instructions
7608 * per the ARM Bible IE:
7609 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7610 */
7611 sh = (0xf << 20) | (0xf << 4);
7612 if (op1 == 0x7 && ((insn & sh) == sh))
7613 {
7614 goto illegal_op;
7615 }
7616 /* load/store byte/word */
7617 rn = (insn >> 16) & 0xf;
7618 rd = (insn >> 12) & 0xf;
7619 tmp2 = load_reg(s, rn);
7620 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7621 if (insn & (1 << 24))
7622 gen_add_data_offset(s, insn, tmp2);
7623 if (insn & (1 << 20)) {
7624 /* load */
7625 if (insn & (1 << 22)) {
7626 tmp = gen_ld8u(tmp2, i);
7627 } else {
7628 tmp = gen_ld32(tmp2, i);
7629 }
7630 } else {
7631 /* store */
7632 tmp = load_reg(s, rd);
7633 if (insn & (1 << 22))
7634 gen_st8(tmp, tmp2, i);
7635 else
7636 gen_st32(tmp, tmp2, i);
7637 }
7638 if (!(insn & (1 << 24))) {
7639 gen_add_data_offset(s, insn, tmp2);
7640 store_reg(s, rn, tmp2);
7641 } else if (insn & (1 << 21)) {
7642 store_reg(s, rn, tmp2);
7643 } else {
7644 tcg_temp_free_i32(tmp2);
7645 }
7646 if (insn & (1 << 20)) {
7647 /* Complete the load. */
7648 store_reg_from_load(env, s, rd, tmp);
7649 }
7650 break;
7651 case 0x08:
7652 case 0x09:
7653 {
7654 int j, n, user, loaded_base;
7655 TCGv loaded_var;
7656 /* load/store multiple words */
7657 /* XXX: store correct base if write back */
7658 user = 0;
7659 if (insn & (1 << 22)) {
7660 if (IS_USER(s))
7661 goto illegal_op; /* only usable in supervisor mode */
7662
7663 if ((insn & (1 << 15)) == 0)
7664 user = 1;
7665 }
7666 rn = (insn >> 16) & 0xf;
7667 addr = load_reg(s, rn);
7668
7669 /* compute total size */
7670 loaded_base = 0;
7671 TCGV_UNUSED(loaded_var);
7672 n = 0;
7673 for(i=0;i<16;i++) {
7674 if (insn & (1 << i))
7675 n++;
7676 }
7677 /* XXX: test invalid n == 0 case ? */
7678 if (insn & (1 << 23)) {
7679 if (insn & (1 << 24)) {
7680 /* pre increment */
7681 tcg_gen_addi_i32(addr, addr, 4);
7682 } else {
7683 /* post increment */
7684 }
7685 } else {
7686 if (insn & (1 << 24)) {
7687 /* pre decrement */
7688 tcg_gen_addi_i32(addr, addr, -(n * 4));
7689 } else {
7690 /* post decrement */
7691 if (n != 1)
7692 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7693 }
7694 }
7695 j = 0;
7696 for(i=0;i<16;i++) {
7697 if (insn & (1 << i)) {
7698 if (insn & (1 << 20)) {
7699 /* load */
7700 tmp = gen_ld32(addr, IS_USER(s));
7701 if (user) {
7702 tmp2 = tcg_const_i32(i);
7703 gen_helper_set_user_reg(tmp2, tmp);
7704 tcg_temp_free_i32(tmp2);
7705 tcg_temp_free_i32(tmp);
7706 } else if (i == rn) {
7707 loaded_var = tmp;
7708 loaded_base = 1;
7709 } else {
7710 store_reg_from_load(env, s, i, tmp);
7711 }
7712 } else {
7713 /* store */
7714 if (i == 15) {
7715 /* special case: r15 = PC + 8 */
7716 val = (long)s->pc + 4;
7717 tmp = tcg_temp_new_i32();
7718 tcg_gen_movi_i32(tmp, val);
7719 } else if (user) {
7720 tmp = tcg_temp_new_i32();
7721 tmp2 = tcg_const_i32(i);
7722 gen_helper_get_user_reg(tmp, tmp2);
7723 tcg_temp_free_i32(tmp2);
7724 } else {
7725 tmp = load_reg(s, i);
7726 }
7727 gen_st32(tmp, addr, IS_USER(s));
7728 }
7729 j++;
7730 /* no need to add after the last transfer */
7731 if (j != n)
7732 tcg_gen_addi_i32(addr, addr, 4);
7733 }
7734 }
7735 if (insn & (1 << 21)) {
7736 /* write back */
7737 if (insn & (1 << 23)) {
7738 if (insn & (1 << 24)) {
7739 /* pre increment */
7740 } else {
7741 /* post increment */
7742 tcg_gen_addi_i32(addr, addr, 4);
7743 }
7744 } else {
7745 if (insn & (1 << 24)) {
7746 /* pre decrement */
7747 if (n != 1)
7748 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7749 } else {
7750 /* post decrement */
7751 tcg_gen_addi_i32(addr, addr, -(n * 4));
7752 }
7753 }
7754 store_reg(s, rn, addr);
7755 } else {
7756 tcg_temp_free_i32(addr);
7757 }
7758 if (loaded_base) {
7759 store_reg(s, rn, loaded_var);
7760 }
7761 if ((insn & (1 << 22)) && !user) {
7762 /* Restore CPSR from SPSR. */
7763 tmp = load_cpu_field(spsr);
7764 gen_set_cpsr(tmp, 0xffffffff);
7765 tcg_temp_free_i32(tmp);
7766 s->is_jmp = DISAS_UPDATE;
7767 }
7768 }
7769 break;
7770 case 0xa:
7771 case 0xb:
7772 {
7773 int32_t offset;
7774
7775 /* branch (and link) */
7776 val = (int32_t)s->pc;
7777 if (insn & (1 << 24)) {
7778 tmp = tcg_temp_new_i32();
7779 tcg_gen_movi_i32(tmp, val);
7780 store_reg(s, 14, tmp);
7781 }
7782 offset = (((int32_t)insn << 8) >> 8);
7783 val += (offset << 2) + 4;
7784 gen_jmp(s, val);
7785 }
7786 break;
7787 case 0xc:
7788 case 0xd:
7789 case 0xe:
7790 /* Coprocessor. */
7791 if (disas_coproc_insn(env, s, insn))
7792 goto illegal_op;
7793 break;
7794 case 0xf:
7795 /* swi */
7796 gen_set_pc_im(s->pc);
7797 s->is_jmp = DISAS_SWI;
7798 break;
7799 default:
7800 illegal_op:
7801 gen_exception_insn(s, 4, EXCP_UDEF);
7802 break;
7803 }
7804 }
7805 }
7806
7807 /* Return true if this is a Thumb-2 logical op. */
7808 static int
7809 thumb2_logic_op(int op)
7810 {
7811 return (op < 8);
7812 }
7813
7814 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7815 then set condition code flags based on the result of the operation.
7816 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7817 to the high bit of T1.
7818 Returns zero if the opcode is valid. */
7819
7820 static int
7821 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7822 {
7823 int logic_cc;
7824
7825 logic_cc = 0;
7826 switch (op) {
7827 case 0: /* and */
7828 tcg_gen_and_i32(t0, t0, t1);
7829 logic_cc = conds;
7830 break;
7831 case 1: /* bic */
7832 tcg_gen_andc_i32(t0, t0, t1);
7833 logic_cc = conds;
7834 break;
7835 case 2: /* orr */
7836 tcg_gen_or_i32(t0, t0, t1);
7837 logic_cc = conds;
7838 break;
7839 case 3: /* orn */
7840 tcg_gen_orc_i32(t0, t0, t1);
7841 logic_cc = conds;
7842 break;
7843 case 4: /* eor */
7844 tcg_gen_xor_i32(t0, t0, t1);
7845 logic_cc = conds;
7846 break;
7847 case 8: /* add */
7848 if (conds)
7849 gen_helper_add_cc(t0, t0, t1);
7850 else
7851 tcg_gen_add_i32(t0, t0, t1);
7852 break;
7853 case 10: /* adc */
7854 if (conds)
7855 gen_helper_adc_cc(t0, t0, t1);
7856 else
7857 gen_adc(t0, t1);
7858 break;
7859 case 11: /* sbc */
7860 if (conds)
7861 gen_helper_sbc_cc(t0, t0, t1);
7862 else
7863 gen_sub_carry(t0, t0, t1);
7864 break;
7865 case 13: /* sub */
7866 if (conds)
7867 gen_helper_sub_cc(t0, t0, t1);
7868 else
7869 tcg_gen_sub_i32(t0, t0, t1);
7870 break;
7871 case 14: /* rsb */
7872 if (conds)
7873 gen_helper_sub_cc(t0, t1, t0);
7874 else
7875 tcg_gen_sub_i32(t0, t1, t0);
7876 break;
7877 default: /* 5, 6, 7, 9, 12, 15. */
7878 return 1;
7879 }
7880 if (logic_cc) {
7881 gen_logic_CC(t0);
7882 if (shifter_out)
7883 gen_set_CF_bit31(t1);
7884 }
7885 return 0;
7886 }
7887
7888 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7889 is not legal. */
7890 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7891 {
7892 uint32_t insn, imm, shift, offset;
7893 uint32_t rd, rn, rm, rs;
7894 TCGv tmp;
7895 TCGv tmp2;
7896 TCGv tmp3;
7897 TCGv addr;
7898 TCGv_i64 tmp64;
7899 int op;
7900 int shiftop;
7901 int conds;
7902 int logic_cc;
7903
7904 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7905 || arm_feature (env, ARM_FEATURE_M))) {
7906 /* Thumb-1 cores may need to treat bl and blx as a pair of
7907 16-bit instructions to get correct prefetch abort behavior. */
7908 insn = insn_hw1;
7909 if ((insn & (1 << 12)) == 0) {
7910 ARCH(5);
7911 /* Second half of blx. */
7912 offset = ((insn & 0x7ff) << 1);
7913 tmp = load_reg(s, 14);
7914 tcg_gen_addi_i32(tmp, tmp, offset);
7915 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7916
7917 tmp2 = tcg_temp_new_i32();
7918 tcg_gen_movi_i32(tmp2, s->pc | 1);
7919 store_reg(s, 14, tmp2);
7920 gen_bx(s, tmp);
7921 return 0;
7922 }
7923 if (insn & (1 << 11)) {
7924 /* Second half of bl. */
7925 offset = ((insn & 0x7ff) << 1) | 1;
7926 tmp = load_reg(s, 14);
7927 tcg_gen_addi_i32(tmp, tmp, offset);
7928
7929 tmp2 = tcg_temp_new_i32();
7930 tcg_gen_movi_i32(tmp2, s->pc | 1);
7931 store_reg(s, 14, tmp2);
7932 gen_bx(s, tmp);
7933 return 0;
7934 }
7935 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7936 /* Instruction spans a page boundary. Implement it as two
7937 16-bit instructions in case the second half causes an
7938 prefetch abort. */
7939 offset = ((int32_t)insn << 21) >> 9;
7940 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7941 return 0;
7942 }
7943 /* Fall through to 32-bit decode. */
7944 }
7945
7946 insn = lduw_code(s->pc);
7947 s->pc += 2;
7948 insn |= (uint32_t)insn_hw1 << 16;
7949
7950 if ((insn & 0xf800e800) != 0xf000e800) {
7951 ARCH(6T2);
7952 }
7953
7954 rn = (insn >> 16) & 0xf;
7955 rs = (insn >> 12) & 0xf;
7956 rd = (insn >> 8) & 0xf;
7957 rm = insn & 0xf;
7958 switch ((insn >> 25) & 0xf) {
7959 case 0: case 1: case 2: case 3:
7960 /* 16-bit instructions. Should never happen. */
7961 abort();
7962 case 4:
7963 if (insn & (1 << 22)) {
7964 /* Other load/store, table branch. */
7965 if (insn & 0x01200000) {
7966 /* Load/store doubleword. */
7967 if (rn == 15) {
7968 addr = tcg_temp_new_i32();
7969 tcg_gen_movi_i32(addr, s->pc & ~3);
7970 } else {
7971 addr = load_reg(s, rn);
7972 }
7973 offset = (insn & 0xff) * 4;
7974 if ((insn & (1 << 23)) == 0)
7975 offset = -offset;
7976 if (insn & (1 << 24)) {
7977 tcg_gen_addi_i32(addr, addr, offset);
7978 offset = 0;
7979 }
7980 if (insn & (1 << 20)) {
7981 /* ldrd */
7982 tmp = gen_ld32(addr, IS_USER(s));
7983 store_reg(s, rs, tmp);
7984 tcg_gen_addi_i32(addr, addr, 4);
7985 tmp = gen_ld32(addr, IS_USER(s));
7986 store_reg(s, rd, tmp);
7987 } else {
7988 /* strd */
7989 tmp = load_reg(s, rs);
7990 gen_st32(tmp, addr, IS_USER(s));
7991 tcg_gen_addi_i32(addr, addr, 4);
7992 tmp = load_reg(s, rd);
7993 gen_st32(tmp, addr, IS_USER(s));
7994 }
7995 if (insn & (1 << 21)) {
7996 /* Base writeback. */
7997 if (rn == 15)
7998 goto illegal_op;
7999 tcg_gen_addi_i32(addr, addr, offset - 4);
8000 store_reg(s, rn, addr);
8001 } else {
8002 tcg_temp_free_i32(addr);
8003 }
8004 } else if ((insn & (1 << 23)) == 0) {
8005 /* Load/store exclusive word. */
8006 addr = tcg_temp_local_new();
8007 load_reg_var(s, addr, rn);
8008 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8009 if (insn & (1 << 20)) {
8010 gen_load_exclusive(s, rs, 15, addr, 2);
8011 } else {
8012 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8013 }
8014 tcg_temp_free(addr);
8015 } else if ((insn & (1 << 6)) == 0) {
8016 /* Table Branch. */
8017 if (rn == 15) {
8018 addr = tcg_temp_new_i32();
8019 tcg_gen_movi_i32(addr, s->pc);
8020 } else {
8021 addr = load_reg(s, rn);
8022 }
8023 tmp = load_reg(s, rm);
8024 tcg_gen_add_i32(addr, addr, tmp);
8025 if (insn & (1 << 4)) {
8026 /* tbh */
8027 tcg_gen_add_i32(addr, addr, tmp);
8028 tcg_temp_free_i32(tmp);
8029 tmp = gen_ld16u(addr, IS_USER(s));
8030 } else { /* tbb */
8031 tcg_temp_free_i32(tmp);
8032 tmp = gen_ld8u(addr, IS_USER(s));
8033 }
8034 tcg_temp_free_i32(addr);
8035 tcg_gen_shli_i32(tmp, tmp, 1);
8036 tcg_gen_addi_i32(tmp, tmp, s->pc);
8037 store_reg(s, 15, tmp);
8038 } else {
8039 /* Load/store exclusive byte/halfword/doubleword. */
8040 ARCH(7);
8041 op = (insn >> 4) & 0x3;
8042 if (op == 2) {
8043 goto illegal_op;
8044 }
8045 addr = tcg_temp_local_new();
8046 load_reg_var(s, addr, rn);
8047 if (insn & (1 << 20)) {
8048 gen_load_exclusive(s, rs, rd, addr, op);
8049 } else {
8050 gen_store_exclusive(s, rm, rs, rd, addr, op);
8051 }
8052 tcg_temp_free(addr);
8053 }
8054 } else {
8055 /* Load/store multiple, RFE, SRS. */
8056 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8057 /* Not available in user mode. */
8058 if (IS_USER(s))
8059 goto illegal_op;
8060 if (insn & (1 << 20)) {
8061 /* rfe */
8062 addr = load_reg(s, rn);
8063 if ((insn & (1 << 24)) == 0)
8064 tcg_gen_addi_i32(addr, addr, -8);
8065 /* Load PC into tmp and CPSR into tmp2. */
8066 tmp = gen_ld32(addr, 0);
8067 tcg_gen_addi_i32(addr, addr, 4);
8068 tmp2 = gen_ld32(addr, 0);
8069 if (insn & (1 << 21)) {
8070 /* Base writeback. */
8071 if (insn & (1 << 24)) {
8072 tcg_gen_addi_i32(addr, addr, 4);
8073 } else {
8074 tcg_gen_addi_i32(addr, addr, -4);
8075 }
8076 store_reg(s, rn, addr);
8077 } else {
8078 tcg_temp_free_i32(addr);
8079 }
8080 gen_rfe(s, tmp, tmp2);
8081 } else {
8082 /* srs */
8083 op = (insn & 0x1f);
8084 addr = tcg_temp_new_i32();
8085 tmp = tcg_const_i32(op);
8086 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8087 tcg_temp_free_i32(tmp);
8088 if ((insn & (1 << 24)) == 0) {
8089 tcg_gen_addi_i32(addr, addr, -8);
8090 }
8091 tmp = load_reg(s, 14);
8092 gen_st32(tmp, addr, 0);
8093 tcg_gen_addi_i32(addr, addr, 4);
8094 tmp = tcg_temp_new_i32();
8095 gen_helper_cpsr_read(tmp);
8096 gen_st32(tmp, addr, 0);
8097 if (insn & (1 << 21)) {
8098 if ((insn & (1 << 24)) == 0) {
8099 tcg_gen_addi_i32(addr, addr, -4);
8100 } else {
8101 tcg_gen_addi_i32(addr, addr, 4);
8102 }
8103 tmp = tcg_const_i32(op);
8104 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8105 tcg_temp_free_i32(tmp);
8106 } else {
8107 tcg_temp_free_i32(addr);
8108 }
8109 }
8110 } else {
8111 int i, loaded_base = 0;
8112 TCGv loaded_var;
8113 /* Load/store multiple. */
8114 addr = load_reg(s, rn);
8115 offset = 0;
8116 for (i = 0; i < 16; i++) {
8117 if (insn & (1 << i))
8118 offset += 4;
8119 }
8120 if (insn & (1 << 24)) {
8121 tcg_gen_addi_i32(addr, addr, -offset);
8122 }
8123
8124 TCGV_UNUSED(loaded_var);
8125 for (i = 0; i < 16; i++) {
8126 if ((insn & (1 << i)) == 0)
8127 continue;
8128 if (insn & (1 << 20)) {
8129 /* Load. */
8130 tmp = gen_ld32(addr, IS_USER(s));
8131 if (i == 15) {
8132 gen_bx(s, tmp);
8133 } else if (i == rn) {
8134 loaded_var = tmp;
8135 loaded_base = 1;
8136 } else {
8137 store_reg(s, i, tmp);
8138 }
8139 } else {
8140 /* Store. */
8141 tmp = load_reg(s, i);
8142 gen_st32(tmp, addr, IS_USER(s));
8143 }
8144 tcg_gen_addi_i32(addr, addr, 4);
8145 }
8146 if (loaded_base) {
8147 store_reg(s, rn, loaded_var);
8148 }
8149 if (insn & (1 << 21)) {
8150 /* Base register writeback. */
8151 if (insn & (1 << 24)) {
8152 tcg_gen_addi_i32(addr, addr, -offset);
8153 }
8154 /* Fault if writeback register is in register list. */
8155 if (insn & (1 << rn))
8156 goto illegal_op;
8157 store_reg(s, rn, addr);
8158 } else {
8159 tcg_temp_free_i32(addr);
8160 }
8161 }
8162 }
8163 break;
8164 case 5:
8165
8166 op = (insn >> 21) & 0xf;
8167 if (op == 6) {
8168 /* Halfword pack. */
8169 tmp = load_reg(s, rn);
8170 tmp2 = load_reg(s, rm);
8171 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8172 if (insn & (1 << 5)) {
8173 /* pkhtb */
8174 if (shift == 0)
8175 shift = 31;
8176 tcg_gen_sari_i32(tmp2, tmp2, shift);
8177 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8178 tcg_gen_ext16u_i32(tmp2, tmp2);
8179 } else {
8180 /* pkhbt */
8181 if (shift)
8182 tcg_gen_shli_i32(tmp2, tmp2, shift);
8183 tcg_gen_ext16u_i32(tmp, tmp);
8184 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8185 }
8186 tcg_gen_or_i32(tmp, tmp, tmp2);
8187 tcg_temp_free_i32(tmp2);
8188 store_reg(s, rd, tmp);
8189 } else {
8190 /* Data processing register constant shift. */
8191 if (rn == 15) {
8192 tmp = tcg_temp_new_i32();
8193 tcg_gen_movi_i32(tmp, 0);
8194 } else {
8195 tmp = load_reg(s, rn);
8196 }
8197 tmp2 = load_reg(s, rm);
8198
8199 shiftop = (insn >> 4) & 3;
8200 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8201 conds = (insn & (1 << 20)) != 0;
8202 logic_cc = (conds && thumb2_logic_op(op));
8203 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8204 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8205 goto illegal_op;
8206 tcg_temp_free_i32(tmp2);
8207 if (rd != 15) {
8208 store_reg(s, rd, tmp);
8209 } else {
8210 tcg_temp_free_i32(tmp);
8211 }
8212 }
8213 break;
8214 case 13: /* Misc data processing. */
8215 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8216 if (op < 4 && (insn & 0xf000) != 0xf000)
8217 goto illegal_op;
8218 switch (op) {
8219 case 0: /* Register controlled shift. */
8220 tmp = load_reg(s, rn);
8221 tmp2 = load_reg(s, rm);
8222 if ((insn & 0x70) != 0)
8223 goto illegal_op;
8224 op = (insn >> 21) & 3;
8225 logic_cc = (insn & (1 << 20)) != 0;
8226 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8227 if (logic_cc)
8228 gen_logic_CC(tmp);
8229 store_reg_bx(env, s, rd, tmp);
8230 break;
8231 case 1: /* Sign/zero extend. */
8232 tmp = load_reg(s, rm);
8233 shift = (insn >> 4) & 3;
8234 /* ??? In many cases it's not necessary to do a
8235 rotate, a shift is sufficient. */
8236 if (shift != 0)
8237 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8238 op = (insn >> 20) & 7;
8239 switch (op) {
8240 case 0: gen_sxth(tmp); break;
8241 case 1: gen_uxth(tmp); break;
8242 case 2: gen_sxtb16(tmp); break;
8243 case 3: gen_uxtb16(tmp); break;
8244 case 4: gen_sxtb(tmp); break;
8245 case 5: gen_uxtb(tmp); break;
8246 default: goto illegal_op;
8247 }
8248 if (rn != 15) {
8249 tmp2 = load_reg(s, rn);
8250 if ((op >> 1) == 1) {
8251 gen_add16(tmp, tmp2);
8252 } else {
8253 tcg_gen_add_i32(tmp, tmp, tmp2);
8254 tcg_temp_free_i32(tmp2);
8255 }
8256 }
8257 store_reg(s, rd, tmp);
8258 break;
8259 case 2: /* SIMD add/subtract. */
8260 op = (insn >> 20) & 7;
8261 shift = (insn >> 4) & 7;
8262 if ((op & 3) == 3 || (shift & 3) == 3)
8263 goto illegal_op;
8264 tmp = load_reg(s, rn);
8265 tmp2 = load_reg(s, rm);
8266 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8267 tcg_temp_free_i32(tmp2);
8268 store_reg(s, rd, tmp);
8269 break;
8270 case 3: /* Other data processing. */
8271 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8272 if (op < 4) {
8273 /* Saturating add/subtract. */
8274 tmp = load_reg(s, rn);
8275 tmp2 = load_reg(s, rm);
8276 if (op & 1)
8277 gen_helper_double_saturate(tmp, tmp);
8278 if (op & 2)
8279 gen_helper_sub_saturate(tmp, tmp2, tmp);
8280 else
8281 gen_helper_add_saturate(tmp, tmp, tmp2);
8282 tcg_temp_free_i32(tmp2);
8283 } else {
8284 tmp = load_reg(s, rn);
8285 switch (op) {
8286 case 0x0a: /* rbit */
8287 gen_helper_rbit(tmp, tmp);
8288 break;
8289 case 0x08: /* rev */
8290 tcg_gen_bswap32_i32(tmp, tmp);
8291 break;
8292 case 0x09: /* rev16 */
8293 gen_rev16(tmp);
8294 break;
8295 case 0x0b: /* revsh */
8296 gen_revsh(tmp);
8297 break;
8298 case 0x10: /* sel */
8299 tmp2 = load_reg(s, rm);
8300 tmp3 = tcg_temp_new_i32();
8301 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8302 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8303 tcg_temp_free_i32(tmp3);
8304 tcg_temp_free_i32(tmp2);
8305 break;
8306 case 0x18: /* clz */
8307 gen_helper_clz(tmp, tmp);
8308 break;
8309 default:
8310 goto illegal_op;
8311 }
8312 }
8313 store_reg(s, rd, tmp);
8314 break;
8315 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8316 op = (insn >> 4) & 0xf;
8317 tmp = load_reg(s, rn);
8318 tmp2 = load_reg(s, rm);
8319 switch ((insn >> 20) & 7) {
8320 case 0: /* 32 x 32 -> 32 */
8321 tcg_gen_mul_i32(tmp, tmp, tmp2);
8322 tcg_temp_free_i32(tmp2);
8323 if (rs != 15) {
8324 tmp2 = load_reg(s, rs);
8325 if (op)
8326 tcg_gen_sub_i32(tmp, tmp2, tmp);
8327 else
8328 tcg_gen_add_i32(tmp, tmp, tmp2);
8329 tcg_temp_free_i32(tmp2);
8330 }
8331 break;
8332 case 1: /* 16 x 16 -> 32 */
8333 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8334 tcg_temp_free_i32(tmp2);
8335 if (rs != 15) {
8336 tmp2 = load_reg(s, rs);
8337 gen_helper_add_setq(tmp, tmp, tmp2);
8338 tcg_temp_free_i32(tmp2);
8339 }
8340 break;
8341 case 2: /* Dual multiply add. */
8342 case 4: /* Dual multiply subtract. */
8343 if (op)
8344 gen_swap_half(tmp2);
8345 gen_smul_dual(tmp, tmp2);
8346 if (insn & (1 << 22)) {
8347 /* This subtraction cannot overflow. */
8348 tcg_gen_sub_i32(tmp, tmp, tmp2);
8349 } else {
8350 /* This addition cannot overflow 32 bits;
8351 * however it may overflow considered as a signed
8352 * operation, in which case we must set the Q flag.
8353 */
8354 gen_helper_add_setq(tmp, tmp, tmp2);
8355 }
8356 tcg_temp_free_i32(tmp2);
8357 if (rs != 15)
8358 {
8359 tmp2 = load_reg(s, rs);
8360 gen_helper_add_setq(tmp, tmp, tmp2);
8361 tcg_temp_free_i32(tmp2);
8362 }
8363 break;
8364 case 3: /* 32 * 16 -> 32msb */
8365 if (op)
8366 tcg_gen_sari_i32(tmp2, tmp2, 16);
8367 else
8368 gen_sxth(tmp2);
8369 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8370 tcg_gen_shri_i64(tmp64, tmp64, 16);
8371 tmp = tcg_temp_new_i32();
8372 tcg_gen_trunc_i64_i32(tmp, tmp64);
8373 tcg_temp_free_i64(tmp64);
8374 if (rs != 15)
8375 {
8376 tmp2 = load_reg(s, rs);
8377 gen_helper_add_setq(tmp, tmp, tmp2);
8378 tcg_temp_free_i32(tmp2);
8379 }
8380 break;
8381 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8382 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8383 if (rs != 15) {
8384 tmp = load_reg(s, rs);
8385 if (insn & (1 << 20)) {
8386 tmp64 = gen_addq_msw(tmp64, tmp);
8387 } else {
8388 tmp64 = gen_subq_msw(tmp64, tmp);
8389 }
8390 }
8391 if (insn & (1 << 4)) {
8392 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8393 }
8394 tcg_gen_shri_i64(tmp64, tmp64, 32);
8395 tmp = tcg_temp_new_i32();
8396 tcg_gen_trunc_i64_i32(tmp, tmp64);
8397 tcg_temp_free_i64(tmp64);
8398 break;
8399 case 7: /* Unsigned sum of absolute differences. */
8400 gen_helper_usad8(tmp, tmp, tmp2);
8401 tcg_temp_free_i32(tmp2);
8402 if (rs != 15) {
8403 tmp2 = load_reg(s, rs);
8404 tcg_gen_add_i32(tmp, tmp, tmp2);
8405 tcg_temp_free_i32(tmp2);
8406 }
8407 break;
8408 }
8409 store_reg(s, rd, tmp);
8410 break;
8411 case 6: case 7: /* 64-bit multiply, Divide. */
8412 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8413 tmp = load_reg(s, rn);
8414 tmp2 = load_reg(s, rm);
8415 if ((op & 0x50) == 0x10) {
8416 /* sdiv, udiv */
8417 if (!arm_feature(env, ARM_FEATURE_DIV))
8418 goto illegal_op;
8419 if (op & 0x20)
8420 gen_helper_udiv(tmp, tmp, tmp2);
8421 else
8422 gen_helper_sdiv(tmp, tmp, tmp2);
8423 tcg_temp_free_i32(tmp2);
8424 store_reg(s, rd, tmp);
8425 } else if ((op & 0xe) == 0xc) {
8426 /* Dual multiply accumulate long. */
8427 if (op & 1)
8428 gen_swap_half(tmp2);
8429 gen_smul_dual(tmp, tmp2);
8430 if (op & 0x10) {
8431 tcg_gen_sub_i32(tmp, tmp, tmp2);
8432 } else {
8433 tcg_gen_add_i32(tmp, tmp, tmp2);
8434 }
8435 tcg_temp_free_i32(tmp2);
8436 /* BUGFIX */
8437 tmp64 = tcg_temp_new_i64();
8438 tcg_gen_ext_i32_i64(tmp64, tmp);
8439 tcg_temp_free_i32(tmp);
8440 gen_addq(s, tmp64, rs, rd);
8441 gen_storeq_reg(s, rs, rd, tmp64);
8442 tcg_temp_free_i64(tmp64);
8443 } else {
8444 if (op & 0x20) {
8445 /* Unsigned 64-bit multiply */
8446 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8447 } else {
8448 if (op & 8) {
8449 /* smlalxy */
8450 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8451 tcg_temp_free_i32(tmp2);
8452 tmp64 = tcg_temp_new_i64();
8453 tcg_gen_ext_i32_i64(tmp64, tmp);
8454 tcg_temp_free_i32(tmp);
8455 } else {
8456 /* Signed 64-bit multiply */
8457 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8458 }
8459 }
8460 if (op & 4) {
8461 /* umaal */
8462 gen_addq_lo(s, tmp64, rs);
8463 gen_addq_lo(s, tmp64, rd);
8464 } else if (op & 0x40) {
8465 /* 64-bit accumulate. */
8466 gen_addq(s, tmp64, rs, rd);
8467 }
8468 gen_storeq_reg(s, rs, rd, tmp64);
8469 tcg_temp_free_i64(tmp64);
8470 }
8471 break;
8472 }
8473 break;
8474 case 6: case 7: case 14: case 15:
8475 /* Coprocessor. */
8476 if (((insn >> 24) & 3) == 3) {
8477 /* Translate into the equivalent ARM encoding. */
8478 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8479 if (disas_neon_data_insn(env, s, insn))
8480 goto illegal_op;
8481 } else {
8482 if (insn & (1 << 28))
8483 goto illegal_op;
8484 if (disas_coproc_insn (env, s, insn))
8485 goto illegal_op;
8486 }
8487 break;
8488 case 8: case 9: case 10: case 11:
8489 if (insn & (1 << 15)) {
8490 /* Branches, misc control. */
8491 if (insn & 0x5000) {
8492 /* Unconditional branch. */
8493 /* signextend(hw1[10:0]) -> offset[:12]. */
8494 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8495 /* hw1[10:0] -> offset[11:1]. */
8496 offset |= (insn & 0x7ff) << 1;
8497 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8498 offset[24:22] already have the same value because of the
8499 sign extension above. */
8500 offset ^= ((~insn) & (1 << 13)) << 10;
8501 offset ^= ((~insn) & (1 << 11)) << 11;
8502
8503 if (insn & (1 << 14)) {
8504 /* Branch and link. */
8505 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8506 }
8507
8508 offset += s->pc;
8509 if (insn & (1 << 12)) {
8510 /* b/bl */
8511 gen_jmp(s, offset);
8512 } else {
8513 /* blx */
8514 offset &= ~(uint32_t)2;
8515 /* thumb2 bx, no need to check */
8516 gen_bx_im(s, offset);
8517 }
8518 } else if (((insn >> 23) & 7) == 7) {
8519 /* Misc control */
8520 if (insn & (1 << 13))
8521 goto illegal_op;
8522
8523 if (insn & (1 << 26)) {
8524 /* Secure monitor call (v6Z) */
8525 goto illegal_op; /* not implemented. */
8526 } else {
8527 op = (insn >> 20) & 7;
8528 switch (op) {
8529 case 0: /* msr cpsr. */
8530 if (IS_M(env)) {
8531 tmp = load_reg(s, rn);
8532 addr = tcg_const_i32(insn & 0xff);
8533 gen_helper_v7m_msr(cpu_env, addr, tmp);
8534 tcg_temp_free_i32(addr);
8535 tcg_temp_free_i32(tmp);
8536 gen_lookup_tb(s);
8537 break;
8538 }
8539 /* fall through */
8540 case 1: /* msr spsr. */
8541 if (IS_M(env))
8542 goto illegal_op;
8543 tmp = load_reg(s, rn);
8544 if (gen_set_psr(s,
8545 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8546 op == 1, tmp))
8547 goto illegal_op;
8548 break;
8549 case 2: /* cps, nop-hint. */
8550 if (((insn >> 8) & 7) == 0) {
8551 gen_nop_hint(s, insn & 0xff);
8552 }
8553 /* Implemented as NOP in user mode. */
8554 if (IS_USER(s))
8555 break;
8556 offset = 0;
8557 imm = 0;
8558 if (insn & (1 << 10)) {
8559 if (insn & (1 << 7))
8560 offset |= CPSR_A;
8561 if (insn & (1 << 6))
8562 offset |= CPSR_I;
8563 if (insn & (1 << 5))
8564 offset |= CPSR_F;
8565 if (insn & (1 << 9))
8566 imm = CPSR_A | CPSR_I | CPSR_F;
8567 }
8568 if (insn & (1 << 8)) {
8569 offset |= 0x1f;
8570 imm |= (insn & 0x1f);
8571 }
8572 if (offset) {
8573 gen_set_psr_im(s, offset, 0, imm);
8574 }
8575 break;
8576 case 3: /* Special control operations. */
8577 ARCH(7);
8578 op = (insn >> 4) & 0xf;
8579 switch (op) {
8580 case 2: /* clrex */
8581 gen_clrex(s);
8582 break;
8583 case 4: /* dsb */
8584 case 5: /* dmb */
8585 case 6: /* isb */
8586 /* These execute as NOPs. */
8587 break;
8588 default:
8589 goto illegal_op;
8590 }
8591 break;
8592 case 4: /* bxj */
8593 /* Trivial implementation equivalent to bx. */
8594 tmp = load_reg(s, rn);
8595 gen_bx(s, tmp);
8596 break;
8597 case 5: /* Exception return. */
8598 if (IS_USER(s)) {
8599 goto illegal_op;
8600 }
8601 if (rn != 14 || rd != 15) {
8602 goto illegal_op;
8603 }
8604 tmp = load_reg(s, rn);
8605 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8606 gen_exception_return(s, tmp);
8607 break;
8608 case 6: /* mrs cpsr. */
8609 tmp = tcg_temp_new_i32();
8610 if (IS_M(env)) {
8611 addr = tcg_const_i32(insn & 0xff);
8612 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8613 tcg_temp_free_i32(addr);
8614 } else {
8615 gen_helper_cpsr_read(tmp);
8616 }
8617 store_reg(s, rd, tmp);
8618 break;
8619 case 7: /* mrs spsr. */
8620 /* Not accessible in user mode. */
8621 if (IS_USER(s) || IS_M(env))
8622 goto illegal_op;
8623 tmp = load_cpu_field(spsr);
8624 store_reg(s, rd, tmp);
8625 break;
8626 }
8627 }
8628 } else {
8629 /* Conditional branch. */
8630 op = (insn >> 22) & 0xf;
8631 /* Generate a conditional jump to next instruction. */
8632 s->condlabel = gen_new_label();
8633 gen_test_cc(op ^ 1, s->condlabel);
8634 s->condjmp = 1;
8635
8636 /* offset[11:1] = insn[10:0] */
8637 offset = (insn & 0x7ff) << 1;
8638 /* offset[17:12] = insn[21:16]. */
8639 offset |= (insn & 0x003f0000) >> 4;
8640 /* offset[31:20] = insn[26]. */
8641 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8642 /* offset[18] = insn[13]. */
8643 offset |= (insn & (1 << 13)) << 5;
8644 /* offset[19] = insn[11]. */
8645 offset |= (insn & (1 << 11)) << 8;
8646
8647 /* jump to the offset */
8648 gen_jmp(s, s->pc + offset);
8649 }
8650 } else {
8651 /* Data processing immediate. */
8652 if (insn & (1 << 25)) {
8653 if (insn & (1 << 24)) {
8654 if (insn & (1 << 20))
8655 goto illegal_op;
8656 /* Bitfield/Saturate. */
8657 op = (insn >> 21) & 7;
8658 imm = insn & 0x1f;
8659 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8660 if (rn == 15) {
8661 tmp = tcg_temp_new_i32();
8662 tcg_gen_movi_i32(tmp, 0);
8663 } else {
8664 tmp = load_reg(s, rn);
8665 }
8666 switch (op) {
8667 case 2: /* Signed bitfield extract. */
8668 imm++;
8669 if (shift + imm > 32)
8670 goto illegal_op;
8671 if (imm < 32)
8672 gen_sbfx(tmp, shift, imm);
8673 break;
8674 case 6: /* Unsigned bitfield extract. */
8675 imm++;
8676 if (shift + imm > 32)
8677 goto illegal_op;
8678 if (imm < 32)
8679 gen_ubfx(tmp, shift, (1u << imm) - 1);
8680 break;
8681 case 3: /* Bitfield insert/clear. */
8682 if (imm < shift)
8683 goto illegal_op;
8684 imm = imm + 1 - shift;
8685 if (imm != 32) {
8686 tmp2 = load_reg(s, rd);
8687 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8688 tcg_temp_free_i32(tmp2);
8689 }
8690 break;
8691 case 7:
8692 goto illegal_op;
8693 default: /* Saturate. */
8694 if (shift) {
8695 if (op & 1)
8696 tcg_gen_sari_i32(tmp, tmp, shift);
8697 else
8698 tcg_gen_shli_i32(tmp, tmp, shift);
8699 }
8700 tmp2 = tcg_const_i32(imm);
8701 if (op & 4) {
8702 /* Unsigned. */
8703 if ((op & 1) && shift == 0)
8704 gen_helper_usat16(tmp, tmp, tmp2);
8705 else
8706 gen_helper_usat(tmp, tmp, tmp2);
8707 } else {
8708 /* Signed. */
8709 if ((op & 1) && shift == 0)
8710 gen_helper_ssat16(tmp, tmp, tmp2);
8711 else
8712 gen_helper_ssat(tmp, tmp, tmp2);
8713 }
8714 tcg_temp_free_i32(tmp2);
8715 break;
8716 }
8717 store_reg(s, rd, tmp);
8718 } else {
8719 imm = ((insn & 0x04000000) >> 15)
8720 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8721 if (insn & (1 << 22)) {
8722 /* 16-bit immediate. */
8723 imm |= (insn >> 4) & 0xf000;
8724 if (insn & (1 << 23)) {
8725 /* movt */
8726 tmp = load_reg(s, rd);
8727 tcg_gen_ext16u_i32(tmp, tmp);
8728 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8729 } else {
8730 /* movw */
8731 tmp = tcg_temp_new_i32();
8732 tcg_gen_movi_i32(tmp, imm);
8733 }
8734 } else {
8735 /* Add/sub 12-bit immediate. */
8736 if (rn == 15) {
8737 offset = s->pc & ~(uint32_t)3;
8738 if (insn & (1 << 23))
8739 offset -= imm;
8740 else
8741 offset += imm;
8742 tmp = tcg_temp_new_i32();
8743 tcg_gen_movi_i32(tmp, offset);
8744 } else {
8745 tmp = load_reg(s, rn);
8746 if (insn & (1 << 23))
8747 tcg_gen_subi_i32(tmp, tmp, imm);
8748 else
8749 tcg_gen_addi_i32(tmp, tmp, imm);
8750 }
8751 }
8752 store_reg(s, rd, tmp);
8753 }
8754 } else {
8755 int shifter_out = 0;
8756 /* modified 12-bit immediate. */
8757 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8758 imm = (insn & 0xff);
8759 switch (shift) {
8760 case 0: /* XY */
8761 /* Nothing to do. */
8762 break;
8763 case 1: /* 00XY00XY */
8764 imm |= imm << 16;
8765 break;
8766 case 2: /* XY00XY00 */
8767 imm |= imm << 16;
8768 imm <<= 8;
8769 break;
8770 case 3: /* XYXYXYXY */
8771 imm |= imm << 16;
8772 imm |= imm << 8;
8773 break;
8774 default: /* Rotated constant. */
8775 shift = (shift << 1) | (imm >> 7);
8776 imm |= 0x80;
8777 imm = imm << (32 - shift);
8778 shifter_out = 1;
8779 break;
8780 }
8781 tmp2 = tcg_temp_new_i32();
8782 tcg_gen_movi_i32(tmp2, imm);
8783 rn = (insn >> 16) & 0xf;
8784 if (rn == 15) {
8785 tmp = tcg_temp_new_i32();
8786 tcg_gen_movi_i32(tmp, 0);
8787 } else {
8788 tmp = load_reg(s, rn);
8789 }
8790 op = (insn >> 21) & 0xf;
8791 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8792 shifter_out, tmp, tmp2))
8793 goto illegal_op;
8794 tcg_temp_free_i32(tmp2);
8795 rd = (insn >> 8) & 0xf;
8796 if (rd != 15) {
8797 store_reg(s, rd, tmp);
8798 } else {
8799 tcg_temp_free_i32(tmp);
8800 }
8801 }
8802 }
8803 break;
8804 case 12: /* Load/store single data item. */
8805 {
8806 int postinc = 0;
8807 int writeback = 0;
8808 int user;
8809 if ((insn & 0x01100000) == 0x01000000) {
8810 if (disas_neon_ls_insn(env, s, insn))
8811 goto illegal_op;
8812 break;
8813 }
8814 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8815 if (rs == 15) {
8816 if (!(insn & (1 << 20))) {
8817 goto illegal_op;
8818 }
8819 if (op != 2) {
8820 /* Byte or halfword load space with dest == r15 : memory hints.
8821 * Catch them early so we don't emit pointless addressing code.
8822 * This space is a mix of:
8823 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8824 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8825 * cores)
8826 * unallocated hints, which must be treated as NOPs
8827 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8828 * which is easiest for the decoding logic
8829 * Some space which must UNDEF
8830 */
8831 int op1 = (insn >> 23) & 3;
8832 int op2 = (insn >> 6) & 0x3f;
8833 if (op & 2) {
8834 goto illegal_op;
8835 }
8836 if (rn == 15) {
8837 /* UNPREDICTABLE or unallocated hint */
8838 return 0;
8839 }
8840 if (op1 & 1) {
8841 return 0; /* PLD* or unallocated hint */
8842 }
8843 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8844 return 0; /* PLD* or unallocated hint */
8845 }
8846 /* UNDEF space, or an UNPREDICTABLE */
8847 return 1;
8848 }
8849 }
8850 user = IS_USER(s);
8851 if (rn == 15) {
8852 addr = tcg_temp_new_i32();
8853 /* PC relative. */
8854 /* s->pc has already been incremented by 4. */
8855 imm = s->pc & 0xfffffffc;
8856 if (insn & (1 << 23))
8857 imm += insn & 0xfff;
8858 else
8859 imm -= insn & 0xfff;
8860 tcg_gen_movi_i32(addr, imm);
8861 } else {
8862 addr = load_reg(s, rn);
8863 if (insn & (1 << 23)) {
8864 /* Positive offset. */
8865 imm = insn & 0xfff;
8866 tcg_gen_addi_i32(addr, addr, imm);
8867 } else {
8868 imm = insn & 0xff;
8869 switch ((insn >> 8) & 0xf) {
8870 case 0x0: /* Shifted Register. */
8871 shift = (insn >> 4) & 0xf;
8872 if (shift > 3) {
8873 tcg_temp_free_i32(addr);
8874 goto illegal_op;
8875 }
8876 tmp = load_reg(s, rm);
8877 if (shift)
8878 tcg_gen_shli_i32(tmp, tmp, shift);
8879 tcg_gen_add_i32(addr, addr, tmp);
8880 tcg_temp_free_i32(tmp);
8881 break;
8882 case 0xc: /* Negative offset. */
8883 tcg_gen_addi_i32(addr, addr, -imm);
8884 break;
8885 case 0xe: /* User privilege. */
8886 tcg_gen_addi_i32(addr, addr, imm);
8887 user = 1;
8888 break;
8889 case 0x9: /* Post-decrement. */
8890 imm = -imm;
8891 /* Fall through. */
8892 case 0xb: /* Post-increment. */
8893 postinc = 1;
8894 writeback = 1;
8895 break;
8896 case 0xd: /* Pre-decrement. */
8897 imm = -imm;
8898 /* Fall through. */
8899 case 0xf: /* Pre-increment. */
8900 tcg_gen_addi_i32(addr, addr, imm);
8901 writeback = 1;
8902 break;
8903 default:
8904 tcg_temp_free_i32(addr);
8905 goto illegal_op;
8906 }
8907 }
8908 }
8909 if (insn & (1 << 20)) {
8910 /* Load. */
8911 switch (op) {
8912 case 0: tmp = gen_ld8u(addr, user); break;
8913 case 4: tmp = gen_ld8s(addr, user); break;
8914 case 1: tmp = gen_ld16u(addr, user); break;
8915 case 5: tmp = gen_ld16s(addr, user); break;
8916 case 2: tmp = gen_ld32(addr, user); break;
8917 default:
8918 tcg_temp_free_i32(addr);
8919 goto illegal_op;
8920 }
8921 if (rs == 15) {
8922 gen_bx(s, tmp);
8923 } else {
8924 store_reg(s, rs, tmp);
8925 }
8926 } else {
8927 /* Store. */
8928 tmp = load_reg(s, rs);
8929 switch (op) {
8930 case 0: gen_st8(tmp, addr, user); break;
8931 case 1: gen_st16(tmp, addr, user); break;
8932 case 2: gen_st32(tmp, addr, user); break;
8933 default:
8934 tcg_temp_free_i32(addr);
8935 goto illegal_op;
8936 }
8937 }
8938 if (postinc)
8939 tcg_gen_addi_i32(addr, addr, imm);
8940 if (writeback) {
8941 store_reg(s, rn, addr);
8942 } else {
8943 tcg_temp_free_i32(addr);
8944 }
8945 }
8946 break;
8947 default:
8948 goto illegal_op;
8949 }
8950 return 0;
8951 illegal_op:
8952 return 1;
8953 }
8954
8955 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8956 {
8957 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8958 int32_t offset;
8959 int i;
8960 TCGv tmp;
8961 TCGv tmp2;
8962 TCGv addr;
8963
8964 if (s->condexec_mask) {
8965 cond = s->condexec_cond;
8966 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8967 s->condlabel = gen_new_label();
8968 gen_test_cc(cond ^ 1, s->condlabel);
8969 s->condjmp = 1;
8970 }
8971 }
8972
8973 insn = lduw_code(s->pc);
8974 s->pc += 2;
8975
8976 switch (insn >> 12) {
8977 case 0: case 1:
8978
8979 rd = insn & 7;
8980 op = (insn >> 11) & 3;
8981 if (op == 3) {
8982 /* add/subtract */
8983 rn = (insn >> 3) & 7;
8984 tmp = load_reg(s, rn);
8985 if (insn & (1 << 10)) {
8986 /* immediate */
8987 tmp2 = tcg_temp_new_i32();
8988 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8989 } else {
8990 /* reg */
8991 rm = (insn >> 6) & 7;
8992 tmp2 = load_reg(s, rm);
8993 }
8994 if (insn & (1 << 9)) {
8995 if (s->condexec_mask)
8996 tcg_gen_sub_i32(tmp, tmp, tmp2);
8997 else
8998 gen_helper_sub_cc(tmp, tmp, tmp2);
8999 } else {
9000 if (s->condexec_mask)
9001 tcg_gen_add_i32(tmp, tmp, tmp2);
9002 else
9003 gen_helper_add_cc(tmp, tmp, tmp2);
9004 }
9005 tcg_temp_free_i32(tmp2);
9006 store_reg(s, rd, tmp);
9007 } else {
9008 /* shift immediate */
9009 rm = (insn >> 3) & 7;
9010 shift = (insn >> 6) & 0x1f;
9011 tmp = load_reg(s, rm);
9012 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9013 if (!s->condexec_mask)
9014 gen_logic_CC(tmp);
9015 store_reg(s, rd, tmp);
9016 }
9017 break;
9018 case 2: case 3:
9019 /* arithmetic large immediate */
9020 op = (insn >> 11) & 3;
9021 rd = (insn >> 8) & 0x7;
9022 if (op == 0) { /* mov */
9023 tmp = tcg_temp_new_i32();
9024 tcg_gen_movi_i32(tmp, insn & 0xff);
9025 if (!s->condexec_mask)
9026 gen_logic_CC(tmp);
9027 store_reg(s, rd, tmp);
9028 } else {
9029 tmp = load_reg(s, rd);
9030 tmp2 = tcg_temp_new_i32();
9031 tcg_gen_movi_i32(tmp2, insn & 0xff);
9032 switch (op) {
9033 case 1: /* cmp */
9034 gen_helper_sub_cc(tmp, tmp, tmp2);
9035 tcg_temp_free_i32(tmp);
9036 tcg_temp_free_i32(tmp2);
9037 break;
9038 case 2: /* add */
9039 if (s->condexec_mask)
9040 tcg_gen_add_i32(tmp, tmp, tmp2);
9041 else
9042 gen_helper_add_cc(tmp, tmp, tmp2);
9043 tcg_temp_free_i32(tmp2);
9044 store_reg(s, rd, tmp);
9045 break;
9046 case 3: /* sub */
9047 if (s->condexec_mask)
9048 tcg_gen_sub_i32(tmp, tmp, tmp2);
9049 else
9050 gen_helper_sub_cc(tmp, tmp, tmp2);
9051 tcg_temp_free_i32(tmp2);
9052 store_reg(s, rd, tmp);
9053 break;
9054 }
9055 }
9056 break;
9057 case 4:
9058 if (insn & (1 << 11)) {
9059 rd = (insn >> 8) & 7;
9060 /* load pc-relative. Bit 1 of PC is ignored. */
9061 val = s->pc + 2 + ((insn & 0xff) * 4);
9062 val &= ~(uint32_t)2;
9063 addr = tcg_temp_new_i32();
9064 tcg_gen_movi_i32(addr, val);
9065 tmp = gen_ld32(addr, IS_USER(s));
9066 tcg_temp_free_i32(addr);
9067 store_reg(s, rd, tmp);
9068 break;
9069 }
9070 if (insn & (1 << 10)) {
9071 /* data processing extended or blx */
9072 rd = (insn & 7) | ((insn >> 4) & 8);
9073 rm = (insn >> 3) & 0xf;
9074 op = (insn >> 8) & 3;
9075 switch (op) {
9076 case 0: /* add */
9077 tmp = load_reg(s, rd);
9078 tmp2 = load_reg(s, rm);
9079 tcg_gen_add_i32(tmp, tmp, tmp2);
9080 tcg_temp_free_i32(tmp2);
9081 store_reg(s, rd, tmp);
9082 break;
9083 case 1: /* cmp */
9084 tmp = load_reg(s, rd);
9085 tmp2 = load_reg(s, rm);
9086 gen_helper_sub_cc(tmp, tmp, tmp2);
9087 tcg_temp_free_i32(tmp2);
9088 tcg_temp_free_i32(tmp);
9089 break;
9090 case 2: /* mov/cpy */
9091 tmp = load_reg(s, rm);
9092 store_reg(s, rd, tmp);
9093 break;
9094 case 3:/* branch [and link] exchange thumb register */
9095 tmp = load_reg(s, rm);
9096 if (insn & (1 << 7)) {
9097 ARCH(5);
9098 val = (uint32_t)s->pc | 1;
9099 tmp2 = tcg_temp_new_i32();
9100 tcg_gen_movi_i32(tmp2, val);
9101 store_reg(s, 14, tmp2);
9102 }
9103 /* already thumb, no need to check */
9104 gen_bx(s, tmp);
9105 break;
9106 }
9107 break;
9108 }
9109
9110 /* data processing register */
9111 rd = insn & 7;
9112 rm = (insn >> 3) & 7;
9113 op = (insn >> 6) & 0xf;
9114 if (op == 2 || op == 3 || op == 4 || op == 7) {
9115 /* the shift/rotate ops want the operands backwards */
9116 val = rm;
9117 rm = rd;
9118 rd = val;
9119 val = 1;
9120 } else {
9121 val = 0;
9122 }
9123
9124 if (op == 9) { /* neg */
9125 tmp = tcg_temp_new_i32();
9126 tcg_gen_movi_i32(tmp, 0);
9127 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9128 tmp = load_reg(s, rd);
9129 } else {
9130 TCGV_UNUSED(tmp);
9131 }
9132
9133 tmp2 = load_reg(s, rm);
9134 switch (op) {
9135 case 0x0: /* and */
9136 tcg_gen_and_i32(tmp, tmp, tmp2);
9137 if (!s->condexec_mask)
9138 gen_logic_CC(tmp);
9139 break;
9140 case 0x1: /* eor */
9141 tcg_gen_xor_i32(tmp, tmp, tmp2);
9142 if (!s->condexec_mask)
9143 gen_logic_CC(tmp);
9144 break;
9145 case 0x2: /* lsl */
9146 if (s->condexec_mask) {
9147 gen_helper_shl(tmp2, tmp2, tmp);
9148 } else {
9149 gen_helper_shl_cc(tmp2, tmp2, tmp);
9150 gen_logic_CC(tmp2);
9151 }
9152 break;
9153 case 0x3: /* lsr */
9154 if (s->condexec_mask) {
9155 gen_helper_shr(tmp2, tmp2, tmp);
9156 } else {
9157 gen_helper_shr_cc(tmp2, tmp2, tmp);
9158 gen_logic_CC(tmp2);
9159 }
9160 break;
9161 case 0x4: /* asr */
9162 if (s->condexec_mask) {
9163 gen_helper_sar(tmp2, tmp2, tmp);
9164 } else {
9165 gen_helper_sar_cc(tmp2, tmp2, tmp);
9166 gen_logic_CC(tmp2);
9167 }
9168 break;
9169 case 0x5: /* adc */
9170 if (s->condexec_mask)
9171 gen_adc(tmp, tmp2);
9172 else
9173 gen_helper_adc_cc(tmp, tmp, tmp2);
9174 break;
9175 case 0x6: /* sbc */
9176 if (s->condexec_mask)
9177 gen_sub_carry(tmp, tmp, tmp2);
9178 else
9179 gen_helper_sbc_cc(tmp, tmp, tmp2);
9180 break;
9181 case 0x7: /* ror */
9182 if (s->condexec_mask) {
9183 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9184 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9185 } else {
9186 gen_helper_ror_cc(tmp2, tmp2, tmp);
9187 gen_logic_CC(tmp2);
9188 }
9189 break;
9190 case 0x8: /* tst */
9191 tcg_gen_and_i32(tmp, tmp, tmp2);
9192 gen_logic_CC(tmp);
9193 rd = 16;
9194 break;
9195 case 0x9: /* neg */
9196 if (s->condexec_mask)
9197 tcg_gen_neg_i32(tmp, tmp2);
9198 else
9199 gen_helper_sub_cc(tmp, tmp, tmp2);
9200 break;
9201 case 0xa: /* cmp */
9202 gen_helper_sub_cc(tmp, tmp, tmp2);
9203 rd = 16;
9204 break;
9205 case 0xb: /* cmn */
9206 gen_helper_add_cc(tmp, tmp, tmp2);
9207 rd = 16;
9208 break;
9209 case 0xc: /* orr */
9210 tcg_gen_or_i32(tmp, tmp, tmp2);
9211 if (!s->condexec_mask)
9212 gen_logic_CC(tmp);
9213 break;
9214 case 0xd: /* mul */
9215 tcg_gen_mul_i32(tmp, tmp, tmp2);
9216 if (!s->condexec_mask)
9217 gen_logic_CC(tmp);
9218 break;
9219 case 0xe: /* bic */
9220 tcg_gen_andc_i32(tmp, tmp, tmp2);
9221 if (!s->condexec_mask)
9222 gen_logic_CC(tmp);
9223 break;
9224 case 0xf: /* mvn */
9225 tcg_gen_not_i32(tmp2, tmp2);
9226 if (!s->condexec_mask)
9227 gen_logic_CC(tmp2);
9228 val = 1;
9229 rm = rd;
9230 break;
9231 }
9232 if (rd != 16) {
9233 if (val) {
9234 store_reg(s, rm, tmp2);
9235 if (op != 0xf)
9236 tcg_temp_free_i32(tmp);
9237 } else {
9238 store_reg(s, rd, tmp);
9239 tcg_temp_free_i32(tmp2);
9240 }
9241 } else {
9242 tcg_temp_free_i32(tmp);
9243 tcg_temp_free_i32(tmp2);
9244 }
9245 break;
9246
9247 case 5:
9248 /* load/store register offset. */
9249 rd = insn & 7;
9250 rn = (insn >> 3) & 7;
9251 rm = (insn >> 6) & 7;
9252 op = (insn >> 9) & 7;
9253 addr = load_reg(s, rn);
9254 tmp = load_reg(s, rm);
9255 tcg_gen_add_i32(addr, addr, tmp);
9256 tcg_temp_free_i32(tmp);
9257
9258 if (op < 3) /* store */
9259 tmp = load_reg(s, rd);
9260
9261 switch (op) {
9262 case 0: /* str */
9263 gen_st32(tmp, addr, IS_USER(s));
9264 break;
9265 case 1: /* strh */
9266 gen_st16(tmp, addr, IS_USER(s));
9267 break;
9268 case 2: /* strb */
9269 gen_st8(tmp, addr, IS_USER(s));
9270 break;
9271 case 3: /* ldrsb */
9272 tmp = gen_ld8s(addr, IS_USER(s));
9273 break;
9274 case 4: /* ldr */
9275 tmp = gen_ld32(addr, IS_USER(s));
9276 break;
9277 case 5: /* ldrh */
9278 tmp = gen_ld16u(addr, IS_USER(s));
9279 break;
9280 case 6: /* ldrb */
9281 tmp = gen_ld8u(addr, IS_USER(s));
9282 break;
9283 case 7: /* ldrsh */
9284 tmp = gen_ld16s(addr, IS_USER(s));
9285 break;
9286 }
9287 if (op >= 3) /* load */
9288 store_reg(s, rd, tmp);
9289 tcg_temp_free_i32(addr);
9290 break;
9291
9292 case 6:
9293 /* load/store word immediate offset */
9294 rd = insn & 7;
9295 rn = (insn >> 3) & 7;
9296 addr = load_reg(s, rn);
9297 val = (insn >> 4) & 0x7c;
9298 tcg_gen_addi_i32(addr, addr, val);
9299
9300 if (insn & (1 << 11)) {
9301 /* load */
9302 tmp = gen_ld32(addr, IS_USER(s));
9303 store_reg(s, rd, tmp);
9304 } else {
9305 /* store */
9306 tmp = load_reg(s, rd);
9307 gen_st32(tmp, addr, IS_USER(s));
9308 }
9309 tcg_temp_free_i32(addr);
9310 break;
9311
9312 case 7:
9313 /* load/store byte immediate offset */
9314 rd = insn & 7;
9315 rn = (insn >> 3) & 7;
9316 addr = load_reg(s, rn);
9317 val = (insn >> 6) & 0x1f;
9318 tcg_gen_addi_i32(addr, addr, val);
9319
9320 if (insn & (1 << 11)) {
9321 /* load */
9322 tmp = gen_ld8u(addr, IS_USER(s));
9323 store_reg(s, rd, tmp);
9324 } else {
9325 /* store */
9326 tmp = load_reg(s, rd);
9327 gen_st8(tmp, addr, IS_USER(s));
9328 }
9329 tcg_temp_free_i32(addr);
9330 break;
9331
9332 case 8:
9333 /* load/store halfword immediate offset */
9334 rd = insn & 7;
9335 rn = (insn >> 3) & 7;
9336 addr = load_reg(s, rn);
9337 val = (insn >> 5) & 0x3e;
9338 tcg_gen_addi_i32(addr, addr, val);
9339
9340 if (insn & (1 << 11)) {
9341 /* load */
9342 tmp = gen_ld16u(addr, IS_USER(s));
9343 store_reg(s, rd, tmp);
9344 } else {
9345 /* store */
9346 tmp = load_reg(s, rd);
9347 gen_st16(tmp, addr, IS_USER(s));
9348 }
9349 tcg_temp_free_i32(addr);
9350 break;
9351
9352 case 9:
9353 /* load/store from stack */
9354 rd = (insn >> 8) & 7;
9355 addr = load_reg(s, 13);
9356 val = (insn & 0xff) * 4;
9357 tcg_gen_addi_i32(addr, addr, val);
9358
9359 if (insn & (1 << 11)) {
9360 /* load */
9361 tmp = gen_ld32(addr, IS_USER(s));
9362 store_reg(s, rd, tmp);
9363 } else {
9364 /* store */
9365 tmp = load_reg(s, rd);
9366 gen_st32(tmp, addr, IS_USER(s));
9367 }
9368 tcg_temp_free_i32(addr);
9369 break;
9370
9371 case 10:
9372 /* add to high reg */
9373 rd = (insn >> 8) & 7;
9374 if (insn & (1 << 11)) {
9375 /* SP */
9376 tmp = load_reg(s, 13);
9377 } else {
9378 /* PC. bit 1 is ignored. */
9379 tmp = tcg_temp_new_i32();
9380 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9381 }
9382 val = (insn & 0xff) * 4;
9383 tcg_gen_addi_i32(tmp, tmp, val);
9384 store_reg(s, rd, tmp);
9385 break;
9386
9387 case 11:
9388 /* misc */
9389 op = (insn >> 8) & 0xf;
9390 switch (op) {
9391 case 0:
9392 /* adjust stack pointer */
9393 tmp = load_reg(s, 13);
9394 val = (insn & 0x7f) * 4;
9395 if (insn & (1 << 7))
9396 val = -(int32_t)val;
9397 tcg_gen_addi_i32(tmp, tmp, val);
9398 store_reg(s, 13, tmp);
9399 break;
9400
9401 case 2: /* sign/zero extend. */
9402 ARCH(6);
9403 rd = insn & 7;
9404 rm = (insn >> 3) & 7;
9405 tmp = load_reg(s, rm);
9406 switch ((insn >> 6) & 3) {
9407 case 0: gen_sxth(tmp); break;
9408 case 1: gen_sxtb(tmp); break;
9409 case 2: gen_uxth(tmp); break;
9410 case 3: gen_uxtb(tmp); break;
9411 }
9412 store_reg(s, rd, tmp);
9413 break;
9414 case 4: case 5: case 0xc: case 0xd:
9415 /* push/pop */
9416 addr = load_reg(s, 13);
9417 if (insn & (1 << 8))
9418 offset = 4;
9419 else
9420 offset = 0;
9421 for (i = 0; i < 8; i++) {
9422 if (insn & (1 << i))
9423 offset += 4;
9424 }
9425 if ((insn & (1 << 11)) == 0) {
9426 tcg_gen_addi_i32(addr, addr, -offset);
9427 }
9428 for (i = 0; i < 8; i++) {
9429 if (insn & (1 << i)) {
9430 if (insn & (1 << 11)) {
9431 /* pop */
9432 tmp = gen_ld32(addr, IS_USER(s));
9433 store_reg(s, i, tmp);
9434 } else {
9435 /* push */
9436 tmp = load_reg(s, i);
9437 gen_st32(tmp, addr, IS_USER(s));
9438 }
9439 /* advance to the next address. */
9440 tcg_gen_addi_i32(addr, addr, 4);
9441 }
9442 }
9443 TCGV_UNUSED(tmp);
9444 if (insn & (1 << 8)) {
9445 if (insn & (1 << 11)) {
9446 /* pop pc */
9447 tmp = gen_ld32(addr, IS_USER(s));
9448 /* don't set the pc until the rest of the instruction
9449 has completed */
9450 } else {
9451 /* push lr */
9452 tmp = load_reg(s, 14);
9453 gen_st32(tmp, addr, IS_USER(s));
9454 }
9455 tcg_gen_addi_i32(addr, addr, 4);
9456 }
9457 if ((insn & (1 << 11)) == 0) {
9458 tcg_gen_addi_i32(addr, addr, -offset);
9459 }
9460 /* write back the new stack pointer */
9461 store_reg(s, 13, addr);
9462 /* set the new PC value */
9463 if ((insn & 0x0900) == 0x0900) {
9464 store_reg_from_load(env, s, 15, tmp);
9465 }
9466 break;
9467
9468 case 1: case 3: case 9: case 11: /* czb */
9469 rm = insn & 7;
9470 tmp = load_reg(s, rm);
9471 s->condlabel = gen_new_label();
9472 s->condjmp = 1;
9473 if (insn & (1 << 11))
9474 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9475 else
9476 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9477 tcg_temp_free_i32(tmp);
9478 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9479 val = (uint32_t)s->pc + 2;
9480 val += offset;
9481 gen_jmp(s, val);
9482 break;
9483
9484 case 15: /* IT, nop-hint. */
9485 if ((insn & 0xf) == 0) {
9486 gen_nop_hint(s, (insn >> 4) & 0xf);
9487 break;
9488 }
9489 /* If Then. */
9490 s->condexec_cond = (insn >> 4) & 0xe;
9491 s->condexec_mask = insn & 0x1f;
9492 /* No actual code generated for this insn, just setup state. */
9493 break;
9494
9495 case 0xe: /* bkpt */
9496 ARCH(5);
9497 gen_exception_insn(s, 2, EXCP_BKPT);
9498 break;
9499
9500 case 0xa: /* rev */
9501 ARCH(6);
9502 rn = (insn >> 3) & 0x7;
9503 rd = insn & 0x7;
9504 tmp = load_reg(s, rn);
9505 switch ((insn >> 6) & 3) {
9506 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9507 case 1: gen_rev16(tmp); break;
9508 case 3: gen_revsh(tmp); break;
9509 default: goto illegal_op;
9510 }
9511 store_reg(s, rd, tmp);
9512 break;
9513
9514 case 6: /* cps */
9515 ARCH(6);
9516 if (IS_USER(s))
9517 break;
9518 if (IS_M(env)) {
9519 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9520 /* PRIMASK */
9521 if (insn & 1) {
9522 addr = tcg_const_i32(16);
9523 gen_helper_v7m_msr(cpu_env, addr, tmp);
9524 tcg_temp_free_i32(addr);
9525 }
9526 /* FAULTMASK */
9527 if (insn & 2) {
9528 addr = tcg_const_i32(17);
9529 gen_helper_v7m_msr(cpu_env, addr, tmp);
9530 tcg_temp_free_i32(addr);
9531 }
9532 tcg_temp_free_i32(tmp);
9533 gen_lookup_tb(s);
9534 } else {
9535 if (insn & (1 << 4))
9536 shift = CPSR_A | CPSR_I | CPSR_F;
9537 else
9538 shift = 0;
9539 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9540 }
9541 break;
9542
9543 default:
9544 goto undef;
9545 }
9546 break;
9547
9548 case 12:
9549 {
9550 /* load/store multiple */
9551 TCGv loaded_var;
9552 TCGV_UNUSED(loaded_var);
9553 rn = (insn >> 8) & 0x7;
9554 addr = load_reg(s, rn);
9555 for (i = 0; i < 8; i++) {
9556 if (insn & (1 << i)) {
9557 if (insn & (1 << 11)) {
9558 /* load */
9559 tmp = gen_ld32(addr, IS_USER(s));
9560 if (i == rn) {
9561 loaded_var = tmp;
9562 } else {
9563 store_reg(s, i, tmp);
9564 }
9565 } else {
9566 /* store */
9567 tmp = load_reg(s, i);
9568 gen_st32(tmp, addr, IS_USER(s));
9569 }
9570 /* advance to the next address */
9571 tcg_gen_addi_i32(addr, addr, 4);
9572 }
9573 }
9574 if ((insn & (1 << rn)) == 0) {
9575 /* base reg not in list: base register writeback */
9576 store_reg(s, rn, addr);
9577 } else {
9578 /* base reg in list: if load, complete it now */
9579 if (insn & (1 << 11)) {
9580 store_reg(s, rn, loaded_var);
9581 }
9582 tcg_temp_free_i32(addr);
9583 }
9584 break;
9585 }
9586 case 13:
9587 /* conditional branch or swi */
9588 cond = (insn >> 8) & 0xf;
9589 if (cond == 0xe)
9590 goto undef;
9591
9592 if (cond == 0xf) {
9593 /* swi */
9594 gen_set_pc_im(s->pc);
9595 s->is_jmp = DISAS_SWI;
9596 break;
9597 }
9598 /* generate a conditional jump to next instruction */
9599 s->condlabel = gen_new_label();
9600 gen_test_cc(cond ^ 1, s->condlabel);
9601 s->condjmp = 1;
9602
9603 /* jump to the offset */
9604 val = (uint32_t)s->pc + 2;
9605 offset = ((int32_t)insn << 24) >> 24;
9606 val += offset << 1;
9607 gen_jmp(s, val);
9608 break;
9609
9610 case 14:
9611 if (insn & (1 << 11)) {
9612 if (disas_thumb2_insn(env, s, insn))
9613 goto undef32;
9614 break;
9615 }
9616 /* unconditional branch */
9617 val = (uint32_t)s->pc;
9618 offset = ((int32_t)insn << 21) >> 21;
9619 val += (offset << 1) + 2;
9620 gen_jmp(s, val);
9621 break;
9622
9623 case 15:
9624 if (disas_thumb2_insn(env, s, insn))
9625 goto undef32;
9626 break;
9627 }
9628 return;
9629 undef32:
9630 gen_exception_insn(s, 4, EXCP_UDEF);
9631 return;
9632 illegal_op:
9633 undef:
9634 gen_exception_insn(s, 2, EXCP_UDEF);
9635 }
9636
9637 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9638 basic block 'tb'. If search_pc is TRUE, also generate PC
9639 information for each intermediate instruction. */
9640 static inline void gen_intermediate_code_internal(CPUState *env,
9641 TranslationBlock *tb,
9642 int search_pc)
9643 {
9644 DisasContext dc1, *dc = &dc1;
9645 CPUBreakpoint *bp;
9646 uint16_t *gen_opc_end;
9647 int j, lj;
9648 target_ulong pc_start;
9649 uint32_t next_page_start;
9650 int num_insns;
9651 int max_insns;
9652
9653 /* generate intermediate code */
9654 pc_start = tb->pc;
9655
9656 dc->tb = tb;
9657
9658 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9659
9660 dc->is_jmp = DISAS_NEXT;
9661 dc->pc = pc_start;
9662 dc->singlestep_enabled = env->singlestep_enabled;
9663 dc->condjmp = 0;
9664 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9665 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9666 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9667 #if !defined(CONFIG_USER_ONLY)
9668 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9669 #endif
9670 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9671 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9672 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9673 cpu_F0s = tcg_temp_new_i32();
9674 cpu_F1s = tcg_temp_new_i32();
9675 cpu_F0d = tcg_temp_new_i64();
9676 cpu_F1d = tcg_temp_new_i64();
9677 cpu_V0 = cpu_F0d;
9678 cpu_V1 = cpu_F1d;
9679 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9680 cpu_M0 = tcg_temp_new_i64();
9681 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9682 lj = -1;
9683 num_insns = 0;
9684 max_insns = tb->cflags & CF_COUNT_MASK;
9685 if (max_insns == 0)
9686 max_insns = CF_COUNT_MASK;
9687
9688 gen_icount_start();
9689
9690 tcg_clear_temp_count();
9691
9692 /* A note on handling of the condexec (IT) bits:
9693 *
9694 * We want to avoid the overhead of having to write the updated condexec
9695 * bits back to the CPUState for every instruction in an IT block. So:
9696 * (1) if the condexec bits are not already zero then we write
9697 * zero back into the CPUState now. This avoids complications trying
9698 * to do it at the end of the block. (For example if we don't do this
9699 * it's hard to identify whether we can safely skip writing condexec
9700 * at the end of the TB, which we definitely want to do for the case
9701 * where a TB doesn't do anything with the IT state at all.)
9702 * (2) if we are going to leave the TB then we call gen_set_condexec()
9703 * which will write the correct value into CPUState if zero is wrong.
9704 * This is done both for leaving the TB at the end, and for leaving
9705 * it because of an exception we know will happen, which is done in
9706 * gen_exception_insn(). The latter is necessary because we need to
9707 * leave the TB with the PC/IT state just prior to execution of the
9708 * instruction which caused the exception.
9709 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9710 * then the CPUState will be wrong and we need to reset it.
9711 * This is handled in the same way as restoration of the
9712 * PC in these situations: we will be called again with search_pc=1
9713 * and generate a mapping of the condexec bits for each PC in
9714 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9715 * this to restore the condexec bits.
9716 *
9717 * Note that there are no instructions which can read the condexec
9718 * bits, and none which can write non-static values to them, so
9719 * we don't need to care about whether CPUState is correct in the
9720 * middle of a TB.
9721 */
9722
9723 /* Reset the conditional execution bits immediately. This avoids
9724 complications trying to do it at the end of the block. */
9725 if (dc->condexec_mask || dc->condexec_cond)
9726 {
9727 TCGv tmp = tcg_temp_new_i32();
9728 tcg_gen_movi_i32(tmp, 0);
9729 store_cpu_field(tmp, condexec_bits);
9730 }
9731 do {
9732 #ifdef CONFIG_USER_ONLY
9733 /* Intercept jump to the magic kernel page. */
9734 if (dc->pc >= 0xffff0000) {
9735 /* We always get here via a jump, so know we are not in a
9736 conditional execution block. */
9737 gen_exception(EXCP_KERNEL_TRAP);
9738 dc->is_jmp = DISAS_UPDATE;
9739 break;
9740 }
9741 #else
9742 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9743 /* We always get here via a jump, so know we are not in a
9744 conditional execution block. */
9745 gen_exception(EXCP_EXCEPTION_EXIT);
9746 dc->is_jmp = DISAS_UPDATE;
9747 break;
9748 }
9749 #endif
9750
9751 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9752 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9753 if (bp->pc == dc->pc) {
9754 gen_exception_insn(dc, 0, EXCP_DEBUG);
9755 /* Advance PC so that clearing the breakpoint will
9756 invalidate this TB. */
9757 dc->pc += 2;
9758 goto done_generating;
9759 break;
9760 }
9761 }
9762 }
9763 if (search_pc) {
9764 j = gen_opc_ptr - gen_opc_buf;
9765 if (lj < j) {
9766 lj++;
9767 while (lj < j)
9768 gen_opc_instr_start[lj++] = 0;
9769 }
9770 gen_opc_pc[lj] = dc->pc;
9771 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9772 gen_opc_instr_start[lj] = 1;
9773 gen_opc_icount[lj] = num_insns;
9774 }
9775
9776 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9777 gen_io_start();
9778
9779 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9780 tcg_gen_debug_insn_start(dc->pc);
9781 }
9782
9783 if (dc->thumb) {
9784 disas_thumb_insn(env, dc);
9785 if (dc->condexec_mask) {
9786 dc->condexec_cond = (dc->condexec_cond & 0xe)
9787 | ((dc->condexec_mask >> 4) & 1);
9788 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9789 if (dc->condexec_mask == 0) {
9790 dc->condexec_cond = 0;
9791 }
9792 }
9793 } else {
9794 disas_arm_insn(env, dc);
9795 }
9796
9797 if (dc->condjmp && !dc->is_jmp) {
9798 gen_set_label(dc->condlabel);
9799 dc->condjmp = 0;
9800 }
9801
9802 if (tcg_check_temp_count()) {
9803 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9804 }
9805
9806 /* Translation stops when a conditional branch is encountered.
9807 * Otherwise the subsequent code could get translated several times.
9808 * Also stop translation when a page boundary is reached. This
9809 * ensures prefetch aborts occur at the right place. */
9810 num_insns ++;
9811 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9812 !env->singlestep_enabled &&
9813 !singlestep &&
9814 dc->pc < next_page_start &&
9815 num_insns < max_insns);
9816
9817 if (tb->cflags & CF_LAST_IO) {
9818 if (dc->condjmp) {
9819 /* FIXME: This can theoretically happen with self-modifying
9820 code. */
9821 cpu_abort(env, "IO on conditional branch instruction");
9822 }
9823 gen_io_end();
9824 }
9825
9826 /* At this stage dc->condjmp will only be set when the skipped
9827 instruction was a conditional branch or trap, and the PC has
9828 already been written. */
9829 if (unlikely(env->singlestep_enabled)) {
9830 /* Make sure the pc is updated, and raise a debug exception. */
9831 if (dc->condjmp) {
9832 gen_set_condexec(dc);
9833 if (dc->is_jmp == DISAS_SWI) {
9834 gen_exception(EXCP_SWI);
9835 } else {
9836 gen_exception(EXCP_DEBUG);
9837 }
9838 gen_set_label(dc->condlabel);
9839 }
9840 if (dc->condjmp || !dc->is_jmp) {
9841 gen_set_pc_im(dc->pc);
9842 dc->condjmp = 0;
9843 }
9844 gen_set_condexec(dc);
9845 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9846 gen_exception(EXCP_SWI);
9847 } else {
9848 /* FIXME: Single stepping a WFI insn will not halt
9849 the CPU. */
9850 gen_exception(EXCP_DEBUG);
9851 }
9852 } else {
9853 /* While branches must always occur at the end of an IT block,
9854 there are a few other things that can cause us to terminate
9855 the TB in the middel of an IT block:
9856 - Exception generating instructions (bkpt, swi, undefined).
9857 - Page boundaries.
9858 - Hardware watchpoints.
9859 Hardware breakpoints have already been handled and skip this code.
9860 */
9861 gen_set_condexec(dc);
9862 switch(dc->is_jmp) {
9863 case DISAS_NEXT:
9864 gen_goto_tb(dc, 1, dc->pc);
9865 break;
9866 default:
9867 case DISAS_JUMP:
9868 case DISAS_UPDATE:
9869 /* indicate that the hash table must be used to find the next TB */
9870 tcg_gen_exit_tb(0);
9871 break;
9872 case DISAS_TB_JUMP:
9873 /* nothing more to generate */
9874 break;
9875 case DISAS_WFI:
9876 gen_helper_wfi();
9877 break;
9878 case DISAS_SWI:
9879 gen_exception(EXCP_SWI);
9880 break;
9881 }
9882 if (dc->condjmp) {
9883 gen_set_label(dc->condlabel);
9884 gen_set_condexec(dc);
9885 gen_goto_tb(dc, 1, dc->pc);
9886 dc->condjmp = 0;
9887 }
9888 }
9889
9890 done_generating:
9891 gen_icount_end(tb, num_insns);
9892 *gen_opc_ptr = INDEX_op_end;
9893
9894 #ifdef DEBUG_DISAS
9895 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9896 qemu_log("----------------\n");
9897 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9898 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9899 qemu_log("\n");
9900 }
9901 #endif
9902 if (search_pc) {
9903 j = gen_opc_ptr - gen_opc_buf;
9904 lj++;
9905 while (lj <= j)
9906 gen_opc_instr_start[lj++] = 0;
9907 } else {
9908 tb->size = dc->pc - pc_start;
9909 tb->icount = num_insns;
9910 }
9911 }
9912
9913 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9914 {
9915 gen_intermediate_code_internal(env, tb, 0);
9916 }
9917
9918 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9919 {
9920 gen_intermediate_code_internal(env, tb, 1);
9921 }
9922
9923 static const char *cpu_mode_names[16] = {
9924 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9925 "???", "???", "???", "und", "???", "???", "???", "sys"
9926 };
9927
9928 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9929 int flags)
9930 {
9931 int i;
9932 #if 0
9933 union {
9934 uint32_t i;
9935 float s;
9936 } s0, s1;
9937 CPU_DoubleU d;
9938 /* ??? This assumes float64 and double have the same layout.
9939 Oh well, it's only debug dumps. */
9940 union {
9941 float64 f64;
9942 double d;
9943 } d0;
9944 #endif
9945 uint32_t psr;
9946
9947 for(i=0;i<16;i++) {
9948 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9949 if ((i % 4) == 3)
9950 cpu_fprintf(f, "\n");
9951 else
9952 cpu_fprintf(f, " ");
9953 }
9954 psr = cpsr_read(env);
9955 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9956 psr,
9957 psr & (1 << 31) ? 'N' : '-',
9958 psr & (1 << 30) ? 'Z' : '-',
9959 psr & (1 << 29) ? 'C' : '-',
9960 psr & (1 << 28) ? 'V' : '-',
9961 psr & CPSR_T ? 'T' : 'A',
9962 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9963
9964 #if 0
9965 for (i = 0; i < 16; i++) {
9966 d.d = env->vfp.regs[i];
9967 s0.i = d.l.lower;
9968 s1.i = d.l.upper;
9969 d0.f64 = d.d;
9970 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9971 i * 2, (int)s0.i, s0.s,
9972 i * 2 + 1, (int)s1.i, s1.s,
9973 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9974 d0.d);
9975 }
9976 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9977 #endif
9978 }
9979
9980 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
9981 {
9982 env->regs[15] = gen_opc_pc[pc_pos];
9983 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
9984 }