]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
target-arm: explicitly decode SEVL instruction
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "qemu/log.h"
31
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
35
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
45 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
46
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 conditional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 int bswap_code;
64 #if !defined(CONFIG_USER_ONLY)
65 int user;
66 #endif
67 int vfp_enabled;
68 int vec_len;
69 int vec_stride;
70 } DisasContext;
71
72 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73
74 #if defined(CONFIG_USER_ONLY)
75 #define IS_USER(s) 1
76 #else
77 #define IS_USER(s) (s->user)
78 #endif
79
80 /* These instructions trap after executing, so defer them until after the
81 conditional execution state has been updated. */
82 #define DISAS_WFI 4
83 #define DISAS_SWI 5
84
85 static TCGv_ptr cpu_env;
86 /* We reuse the same 64-bit temporaries for efficiency. */
87 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
88 static TCGv_i32 cpu_R[16];
89 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
90 static TCGv_i32 cpu_exclusive_addr;
91 static TCGv_i32 cpu_exclusive_val;
92 static TCGv_i32 cpu_exclusive_high;
93 #ifdef CONFIG_USER_ONLY
94 static TCGv_i32 cpu_exclusive_test;
95 static TCGv_i32 cpu_exclusive_info;
96 #endif
97
98 /* FIXME: These should be removed. */
99 static TCGv_i32 cpu_F0s, cpu_F1s;
100 static TCGv_i64 cpu_F0d, cpu_F1d;
101
102 #include "exec/gen-icount.h"
103
104 static const char *regnames[] =
105 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107
108 /* initialize TCG globals. */
109 void arm_translate_init(void)
110 {
111 int i;
112
113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
114
115 for (i = 0; i < 16; i++) {
116 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUARMState, regs[i]),
118 regnames[i]);
119 }
120 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
121 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
122 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
123 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
124
125 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
127 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUARMState, exclusive_val), "exclusive_val");
129 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUARMState, exclusive_high), "exclusive_high");
131 #ifdef CONFIG_USER_ONLY
132 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUARMState, exclusive_test), "exclusive_test");
134 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUARMState, exclusive_info), "exclusive_info");
136 #endif
137
138 #define GEN_HELPER 2
139 #include "helper.h"
140 }
141
142 static inline TCGv_i32 load_cpu_offset(int offset)
143 {
144 TCGv_i32 tmp = tcg_temp_new_i32();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147 }
148
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150
151 static inline void store_cpu_offset(TCGv_i32 var, int offset)
152 {
153 tcg_gen_st_i32(var, cpu_env, offset);
154 tcg_temp_free_i32(var);
155 }
156
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUARMState, name))
159
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
162 {
163 if (reg == 15) {
164 uint32_t addr;
165 /* normally, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
172 tcg_gen_mov_i32(var, cpu_R[reg]);
173 }
174 }
175
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
178 {
179 TCGv_i32 tmp = tcg_temp_new_i32();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182 }
183
184 /* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
187 {
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
192 tcg_gen_mov_i32(cpu_R[reg], var);
193 tcg_temp_free_i32(var);
194 }
195
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
204
205
206 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
207 {
208 TCGv_i32 tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211 }
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215 static void gen_exception(int excp)
216 {
217 TCGv_i32 tmp = tcg_temp_new_i32();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(cpu_env, tmp);
220 tcg_temp_free_i32(tmp);
221 }
222
223 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
224 {
225 TCGv_i32 tmp1 = tcg_temp_new_i32();
226 TCGv_i32 tmp2 = tcg_temp_new_i32();
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 tcg_temp_free_i32(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 tcg_temp_free_i32(tmp1);
236 }
237
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv_i32 var)
240 {
241 TCGv_i32 tmp = tcg_temp_new_i32();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 tcg_temp_free_i32(tmp);
248 }
249
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv_i32 var)
252 {
253 tcg_gen_ext16u_i32(var, var);
254 tcg_gen_bswap16_i32(var, var);
255 tcg_gen_ext16s_i32(var, var);
256 }
257
258 /* Unsigned bitfield extract. */
259 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
260 {
261 if (shift)
262 tcg_gen_shri_i32(var, var, shift);
263 tcg_gen_andi_i32(var, var, mask);
264 }
265
266 /* Signed bitfield extract. */
267 static void gen_sbfx(TCGv_i32 var, int shift, int width)
268 {
269 uint32_t signbit;
270
271 if (shift)
272 tcg_gen_sari_i32(var, var, shift);
273 if (shift + width < 32) {
274 signbit = 1u << (width - 1);
275 tcg_gen_andi_i32(var, var, (1u << width) - 1);
276 tcg_gen_xori_i32(var, var, signbit);
277 tcg_gen_subi_i32(var, var, signbit);
278 }
279 }
280
281 /* Return (b << 32) + a. Mark inputs as dead */
282 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
283 {
284 TCGv_i64 tmp64 = tcg_temp_new_i64();
285
286 tcg_gen_extu_i32_i64(tmp64, b);
287 tcg_temp_free_i32(b);
288 tcg_gen_shli_i64(tmp64, tmp64, 32);
289 tcg_gen_add_i64(a, tmp64, a);
290
291 tcg_temp_free_i64(tmp64);
292 return a;
293 }
294
295 /* Return (b << 32) - a. Mark inputs as dead. */
296 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
297 {
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
301 tcg_temp_free_i32(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_sub_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
307 }
308
309 /* 32x32->64 multiply. Marks inputs as dead. */
310 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
311 {
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
314 TCGv_i64 ret;
315
316 tcg_gen_mulu2_i32(lo, hi, a, b);
317 tcg_temp_free_i32(a);
318 tcg_temp_free_i32(b);
319
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
324
325 return ret;
326 }
327
328 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
329 {
330 TCGv_i32 lo = tcg_temp_new_i32();
331 TCGv_i32 hi = tcg_temp_new_i32();
332 TCGv_i64 ret;
333
334 tcg_gen_muls2_i32(lo, hi, a, b);
335 tcg_temp_free_i32(a);
336 tcg_temp_free_i32(b);
337
338 ret = tcg_temp_new_i64();
339 tcg_gen_concat_i32_i64(ret, lo, hi);
340 tcg_temp_free_i32(lo);
341 tcg_temp_free_i32(hi);
342
343 return ret;
344 }
345
346 /* Swap low and high halfwords. */
347 static void gen_swap_half(TCGv_i32 var)
348 {
349 TCGv_i32 tmp = tcg_temp_new_i32();
350 tcg_gen_shri_i32(tmp, var, 16);
351 tcg_gen_shli_i32(var, var, 16);
352 tcg_gen_or_i32(var, var, tmp);
353 tcg_temp_free_i32(tmp);
354 }
355
356 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
357 tmp = (t0 ^ t1) & 0x8000;
358 t0 &= ~0x8000;
359 t1 &= ~0x8000;
360 t0 = (t0 + t1) ^ tmp;
361 */
362
363 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
364 {
365 TCGv_i32 tmp = tcg_temp_new_i32();
366 tcg_gen_xor_i32(tmp, t0, t1);
367 tcg_gen_andi_i32(tmp, tmp, 0x8000);
368 tcg_gen_andi_i32(t0, t0, ~0x8000);
369 tcg_gen_andi_i32(t1, t1, ~0x8000);
370 tcg_gen_add_i32(t0, t0, t1);
371 tcg_gen_xor_i32(t0, t0, tmp);
372 tcg_temp_free_i32(tmp);
373 tcg_temp_free_i32(t1);
374 }
375
376 /* Set CF to the top bit of var. */
377 static void gen_set_CF_bit31(TCGv_i32 var)
378 {
379 tcg_gen_shri_i32(cpu_CF, var, 31);
380 }
381
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv_i32 var)
384 {
385 tcg_gen_mov_i32(cpu_NF, var);
386 tcg_gen_mov_i32(cpu_ZF, var);
387 }
388
389 /* T0 += T1 + CF. */
390 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
391 {
392 tcg_gen_add_i32(t0, t0, t1);
393 tcg_gen_add_i32(t0, t0, cpu_CF);
394 }
395
396 /* dest = T0 + T1 + CF. */
397 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
398 {
399 tcg_gen_add_i32(dest, t0, t1);
400 tcg_gen_add_i32(dest, dest, cpu_CF);
401 }
402
403 /* dest = T0 - T1 + CF - 1. */
404 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
405 {
406 tcg_gen_sub_i32(dest, t0, t1);
407 tcg_gen_add_i32(dest, dest, cpu_CF);
408 tcg_gen_subi_i32(dest, dest, 1);
409 }
410
411 /* dest = T0 + T1. Compute C, N, V and Z flags */
412 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
413 {
414 TCGv_i32 tmp = tcg_temp_new_i32();
415 tcg_gen_movi_i32(tmp, 0);
416 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
417 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
418 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
419 tcg_gen_xor_i32(tmp, t0, t1);
420 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
421 tcg_temp_free_i32(tmp);
422 tcg_gen_mov_i32(dest, cpu_NF);
423 }
424
425 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
426 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
427 {
428 TCGv_i32 tmp = tcg_temp_new_i32();
429 if (TCG_TARGET_HAS_add2_i32) {
430 tcg_gen_movi_i32(tmp, 0);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
432 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
433 } else {
434 TCGv_i64 q0 = tcg_temp_new_i64();
435 TCGv_i64 q1 = tcg_temp_new_i64();
436 tcg_gen_extu_i32_i64(q0, t0);
437 tcg_gen_extu_i32_i64(q1, t1);
438 tcg_gen_add_i64(q0, q0, q1);
439 tcg_gen_extu_i32_i64(q1, cpu_CF);
440 tcg_gen_add_i64(q0, q0, q1);
441 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
442 tcg_temp_free_i64(q0);
443 tcg_temp_free_i64(q1);
444 }
445 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
446 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
447 tcg_gen_xor_i32(tmp, t0, t1);
448 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
449 tcg_temp_free_i32(tmp);
450 tcg_gen_mov_i32(dest, cpu_NF);
451 }
452
453 /* dest = T0 - T1. Compute C, N, V and Z flags */
454 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
455 {
456 TCGv_i32 tmp;
457 tcg_gen_sub_i32(cpu_NF, t0, t1);
458 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
459 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
460 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
461 tmp = tcg_temp_new_i32();
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_gen_mov_i32(dest, cpu_NF);
466 }
467
468 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
469 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
470 {
471 TCGv_i32 tmp = tcg_temp_new_i32();
472 tcg_gen_not_i32(tmp, t1);
473 gen_adc_CC(dest, t0, tmp);
474 tcg_temp_free_i32(tmp);
475 }
476
477 #define GEN_SHIFT(name) \
478 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
479 { \
480 TCGv_i32 tmp1, tmp2, tmp3; \
481 tmp1 = tcg_temp_new_i32(); \
482 tcg_gen_andi_i32(tmp1, t1, 0xff); \
483 tmp2 = tcg_const_i32(0); \
484 tmp3 = tcg_const_i32(0x1f); \
485 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
486 tcg_temp_free_i32(tmp3); \
487 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
488 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
489 tcg_temp_free_i32(tmp2); \
490 tcg_temp_free_i32(tmp1); \
491 }
492 GEN_SHIFT(shl)
493 GEN_SHIFT(shr)
494 #undef GEN_SHIFT
495
496 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 {
498 TCGv_i32 tmp1, tmp2;
499 tmp1 = tcg_temp_new_i32();
500 tcg_gen_andi_i32(tmp1, t1, 0xff);
501 tmp2 = tcg_const_i32(0x1f);
502 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
503 tcg_temp_free_i32(tmp2);
504 tcg_gen_sar_i32(dest, t0, tmp1);
505 tcg_temp_free_i32(tmp1);
506 }
507
508 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
509 {
510 TCGv_i32 c0 = tcg_const_i32(0);
511 TCGv_i32 tmp = tcg_temp_new_i32();
512 tcg_gen_neg_i32(tmp, src);
513 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
514 tcg_temp_free_i32(c0);
515 tcg_temp_free_i32(tmp);
516 }
517
518 static void shifter_out_im(TCGv_i32 var, int shift)
519 {
520 if (shift == 0) {
521 tcg_gen_andi_i32(cpu_CF, var, 1);
522 } else {
523 tcg_gen_shri_i32(cpu_CF, var, shift);
524 if (shift != 31) {
525 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
526 }
527 }
528 }
529
530 /* Shift by immediate. Includes special handling for shift == 0. */
531 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
532 int shift, int flags)
533 {
534 switch (shiftop) {
535 case 0: /* LSL */
536 if (shift != 0) {
537 if (flags)
538 shifter_out_im(var, 32 - shift);
539 tcg_gen_shli_i32(var, var, shift);
540 }
541 break;
542 case 1: /* LSR */
543 if (shift == 0) {
544 if (flags) {
545 tcg_gen_shri_i32(cpu_CF, var, 31);
546 }
547 tcg_gen_movi_i32(var, 0);
548 } else {
549 if (flags)
550 shifter_out_im(var, shift - 1);
551 tcg_gen_shri_i32(var, var, shift);
552 }
553 break;
554 case 2: /* ASR */
555 if (shift == 0)
556 shift = 32;
557 if (flags)
558 shifter_out_im(var, shift - 1);
559 if (shift == 32)
560 shift = 31;
561 tcg_gen_sari_i32(var, var, shift);
562 break;
563 case 3: /* ROR/RRX */
564 if (shift != 0) {
565 if (flags)
566 shifter_out_im(var, shift - 1);
567 tcg_gen_rotri_i32(var, var, shift); break;
568 } else {
569 TCGv_i32 tmp = tcg_temp_new_i32();
570 tcg_gen_shli_i32(tmp, cpu_CF, 31);
571 if (flags)
572 shifter_out_im(var, 0);
573 tcg_gen_shri_i32(var, var, 1);
574 tcg_gen_or_i32(var, var, tmp);
575 tcg_temp_free_i32(tmp);
576 }
577 }
578 };
579
580 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
581 TCGv_i32 shift, int flags)
582 {
583 if (flags) {
584 switch (shiftop) {
585 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
586 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
587 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
588 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
589 }
590 } else {
591 switch (shiftop) {
592 case 0:
593 gen_shl(var, var, shift);
594 break;
595 case 1:
596 gen_shr(var, var, shift);
597 break;
598 case 2:
599 gen_sar(var, var, shift);
600 break;
601 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
602 tcg_gen_rotr_i32(var, var, shift); break;
603 }
604 }
605 tcg_temp_free_i32(shift);
606 }
607
608 #define PAS_OP(pfx) \
609 switch (op2) { \
610 case 0: gen_pas_helper(glue(pfx,add16)); break; \
611 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
612 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
613 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
614 case 4: gen_pas_helper(glue(pfx,add8)); break; \
615 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
616 }
617 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
618 {
619 TCGv_ptr tmp;
620
621 switch (op1) {
622 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
623 case 1:
624 tmp = tcg_temp_new_ptr();
625 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
626 PAS_OP(s)
627 tcg_temp_free_ptr(tmp);
628 break;
629 case 5:
630 tmp = tcg_temp_new_ptr();
631 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
632 PAS_OP(u)
633 tcg_temp_free_ptr(tmp);
634 break;
635 #undef gen_pas_helper
636 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
637 case 2:
638 PAS_OP(q);
639 break;
640 case 3:
641 PAS_OP(sh);
642 break;
643 case 6:
644 PAS_OP(uq);
645 break;
646 case 7:
647 PAS_OP(uh);
648 break;
649 #undef gen_pas_helper
650 }
651 }
652 #undef PAS_OP
653
654 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
655 #define PAS_OP(pfx) \
656 switch (op1) { \
657 case 0: gen_pas_helper(glue(pfx,add8)); break; \
658 case 1: gen_pas_helper(glue(pfx,add16)); break; \
659 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
660 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
661 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
662 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
663 }
664 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
665 {
666 TCGv_ptr tmp;
667
668 switch (op2) {
669 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
670 case 0:
671 tmp = tcg_temp_new_ptr();
672 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
673 PAS_OP(s)
674 tcg_temp_free_ptr(tmp);
675 break;
676 case 4:
677 tmp = tcg_temp_new_ptr();
678 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
679 PAS_OP(u)
680 tcg_temp_free_ptr(tmp);
681 break;
682 #undef gen_pas_helper
683 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
684 case 1:
685 PAS_OP(q);
686 break;
687 case 2:
688 PAS_OP(sh);
689 break;
690 case 5:
691 PAS_OP(uq);
692 break;
693 case 6:
694 PAS_OP(uh);
695 break;
696 #undef gen_pas_helper
697 }
698 }
699 #undef PAS_OP
700
701 static void gen_test_cc(int cc, int label)
702 {
703 TCGv_i32 tmp;
704 int inv;
705
706 switch (cc) {
707 case 0: /* eq: Z */
708 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
709 break;
710 case 1: /* ne: !Z */
711 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
712 break;
713 case 2: /* cs: C */
714 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
715 break;
716 case 3: /* cc: !C */
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
718 break;
719 case 4: /* mi: N */
720 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
721 break;
722 case 5: /* pl: !N */
723 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
724 break;
725 case 6: /* vs: V */
726 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
727 break;
728 case 7: /* vc: !V */
729 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
730 break;
731 case 8: /* hi: C && !Z */
732 inv = gen_new_label();
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
734 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
735 gen_set_label(inv);
736 break;
737 case 9: /* ls: !C || Z */
738 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
739 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
740 break;
741 case 10: /* ge: N == V -> N ^ V == 0 */
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
745 tcg_temp_free_i32(tmp);
746 break;
747 case 11: /* lt: N != V -> N ^ V != 0 */
748 tmp = tcg_temp_new_i32();
749 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
750 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
751 tcg_temp_free_i32(tmp);
752 break;
753 case 12: /* gt: !Z && N == V */
754 inv = gen_new_label();
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
756 tmp = tcg_temp_new_i32();
757 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
758 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
759 tcg_temp_free_i32(tmp);
760 gen_set_label(inv);
761 break;
762 case 13: /* le: Z || N != V */
763 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
764 tmp = tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
766 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
767 tcg_temp_free_i32(tmp);
768 break;
769 default:
770 fprintf(stderr, "Bad condition code 0x%x\n", cc);
771 abort();
772 }
773 }
774
775 static const uint8_t table_logic_cc[16] = {
776 1, /* and */
777 1, /* xor */
778 0, /* sub */
779 0, /* rsb */
780 0, /* add */
781 0, /* adc */
782 0, /* sbc */
783 0, /* rsc */
784 1, /* andl */
785 1, /* xorl */
786 0, /* cmp */
787 0, /* cmn */
788 1, /* orr */
789 1, /* mov */
790 1, /* bic */
791 1, /* mvn */
792 };
793
794 /* Set PC and Thumb state from an immediate address. */
795 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
796 {
797 TCGv_i32 tmp;
798
799 s->is_jmp = DISAS_UPDATE;
800 if (s->thumb != (addr & 1)) {
801 tmp = tcg_temp_new_i32();
802 tcg_gen_movi_i32(tmp, addr & 1);
803 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
804 tcg_temp_free_i32(tmp);
805 }
806 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
807 }
808
809 /* Set PC and Thumb state from var. var is marked as dead. */
810 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
811 {
812 s->is_jmp = DISAS_UPDATE;
813 tcg_gen_andi_i32(cpu_R[15], var, ~1);
814 tcg_gen_andi_i32(var, var, 1);
815 store_cpu_field(var, thumb);
816 }
817
818 /* Variant of store_reg which uses branch&exchange logic when storing
819 to r15 in ARM architecture v7 and above. The source must be a temporary
820 and will be marked as dead. */
821 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
822 int reg, TCGv_i32 var)
823 {
824 if (reg == 15 && ENABLE_ARCH_7) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
828 }
829 }
830
831 /* Variant of store_reg which uses branch&exchange logic when storing
832 * to r15 in ARM architecture v5T and above. This is used for storing
833 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
834 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
835 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
836 int reg, TCGv_i32 var)
837 {
838 if (reg == 15 && ENABLE_ARCH_5) {
839 gen_bx(s, var);
840 } else {
841 store_reg(s, reg, var);
842 }
843 }
844
845 static inline void gen_set_pc_im(uint32_t val)
846 {
847 tcg_gen_movi_i32(cpu_R[15], val);
848 }
849
850 /* Force a TB lookup after an instruction that changes the CPU state. */
851 static inline void gen_lookup_tb(DisasContext *s)
852 {
853 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
854 s->is_jmp = DISAS_UPDATE;
855 }
856
857 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
858 TCGv_i32 var)
859 {
860 int val, rm, shift, shiftop;
861 TCGv_i32 offset;
862
863 if (!(insn & (1 << 25))) {
864 /* immediate */
865 val = insn & 0xfff;
866 if (!(insn & (1 << 23)))
867 val = -val;
868 if (val != 0)
869 tcg_gen_addi_i32(var, var, val);
870 } else {
871 /* shift/register */
872 rm = (insn) & 0xf;
873 shift = (insn >> 7) & 0x1f;
874 shiftop = (insn >> 5) & 3;
875 offset = load_reg(s, rm);
876 gen_arm_shift_im(offset, shiftop, shift, 0);
877 if (!(insn & (1 << 23)))
878 tcg_gen_sub_i32(var, var, offset);
879 else
880 tcg_gen_add_i32(var, var, offset);
881 tcg_temp_free_i32(offset);
882 }
883 }
884
885 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
886 int extra, TCGv_i32 var)
887 {
888 int val, rm;
889 TCGv_i32 offset;
890
891 if (insn & (1 << 22)) {
892 /* immediate */
893 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
894 if (!(insn & (1 << 23)))
895 val = -val;
896 val += extra;
897 if (val != 0)
898 tcg_gen_addi_i32(var, var, val);
899 } else {
900 /* register */
901 if (extra)
902 tcg_gen_addi_i32(var, var, extra);
903 rm = (insn) & 0xf;
904 offset = load_reg(s, rm);
905 if (!(insn & (1 << 23)))
906 tcg_gen_sub_i32(var, var, offset);
907 else
908 tcg_gen_add_i32(var, var, offset);
909 tcg_temp_free_i32(offset);
910 }
911 }
912
913 static TCGv_ptr get_fpstatus_ptr(int neon)
914 {
915 TCGv_ptr statusptr = tcg_temp_new_ptr();
916 int offset;
917 if (neon) {
918 offset = offsetof(CPUARMState, vfp.standard_fp_status);
919 } else {
920 offset = offsetof(CPUARMState, vfp.fp_status);
921 }
922 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
923 return statusptr;
924 }
925
926 #define VFP_OP2(name) \
927 static inline void gen_vfp_##name(int dp) \
928 { \
929 TCGv_ptr fpst = get_fpstatus_ptr(0); \
930 if (dp) { \
931 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
932 } else { \
933 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
934 } \
935 tcg_temp_free_ptr(fpst); \
936 }
937
938 VFP_OP2(add)
939 VFP_OP2(sub)
940 VFP_OP2(mul)
941 VFP_OP2(div)
942
943 #undef VFP_OP2
944
945 static inline void gen_vfp_F1_mul(int dp)
946 {
947 /* Like gen_vfp_mul() but put result in F1 */
948 TCGv_ptr fpst = get_fpstatus_ptr(0);
949 if (dp) {
950 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
951 } else {
952 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
953 }
954 tcg_temp_free_ptr(fpst);
955 }
956
957 static inline void gen_vfp_F1_neg(int dp)
958 {
959 /* Like gen_vfp_neg() but put result in F1 */
960 if (dp) {
961 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
962 } else {
963 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
964 }
965 }
966
967 static inline void gen_vfp_abs(int dp)
968 {
969 if (dp)
970 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
971 else
972 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
973 }
974
975 static inline void gen_vfp_neg(int dp)
976 {
977 if (dp)
978 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
979 else
980 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
981 }
982
983 static inline void gen_vfp_sqrt(int dp)
984 {
985 if (dp)
986 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
987 else
988 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
989 }
990
991 static inline void gen_vfp_cmp(int dp)
992 {
993 if (dp)
994 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
995 else
996 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
997 }
998
999 static inline void gen_vfp_cmpe(int dp)
1000 {
1001 if (dp)
1002 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1003 else
1004 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1005 }
1006
1007 static inline void gen_vfp_F1_ld0(int dp)
1008 {
1009 if (dp)
1010 tcg_gen_movi_i64(cpu_F1d, 0);
1011 else
1012 tcg_gen_movi_i32(cpu_F1s, 0);
1013 }
1014
1015 #define VFP_GEN_ITOF(name) \
1016 static inline void gen_vfp_##name(int dp, int neon) \
1017 { \
1018 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 if (dp) { \
1020 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1021 } else { \
1022 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 } \
1024 tcg_temp_free_ptr(statusptr); \
1025 }
1026
1027 VFP_GEN_ITOF(uito)
1028 VFP_GEN_ITOF(sito)
1029 #undef VFP_GEN_ITOF
1030
1031 #define VFP_GEN_FTOI(name) \
1032 static inline void gen_vfp_##name(int dp, int neon) \
1033 { \
1034 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1035 if (dp) { \
1036 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1037 } else { \
1038 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1039 } \
1040 tcg_temp_free_ptr(statusptr); \
1041 }
1042
1043 VFP_GEN_FTOI(toui)
1044 VFP_GEN_FTOI(touiz)
1045 VFP_GEN_FTOI(tosi)
1046 VFP_GEN_FTOI(tosiz)
1047 #undef VFP_GEN_FTOI
1048
1049 #define VFP_GEN_FIX(name) \
1050 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1051 { \
1052 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1053 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1054 if (dp) { \
1055 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1056 } else { \
1057 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1058 } \
1059 tcg_temp_free_i32(tmp_shift); \
1060 tcg_temp_free_ptr(statusptr); \
1061 }
1062 VFP_GEN_FIX(tosh)
1063 VFP_GEN_FIX(tosl)
1064 VFP_GEN_FIX(touh)
1065 VFP_GEN_FIX(toul)
1066 VFP_GEN_FIX(shto)
1067 VFP_GEN_FIX(slto)
1068 VFP_GEN_FIX(uhto)
1069 VFP_GEN_FIX(ulto)
1070 #undef VFP_GEN_FIX
1071
1072 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1073 {
1074 if (dp)
1075 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1076 else
1077 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1078 }
1079
1080 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1081 {
1082 if (dp)
1083 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1084 else
1085 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1086 }
1087
1088 static inline long
1089 vfp_reg_offset (int dp, int reg)
1090 {
1091 if (dp)
1092 return offsetof(CPUARMState, vfp.regs[reg]);
1093 else if (reg & 1) {
1094 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1095 + offsetof(CPU_DoubleU, l.upper);
1096 } else {
1097 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1098 + offsetof(CPU_DoubleU, l.lower);
1099 }
1100 }
1101
1102 /* Return the offset of a 32-bit piece of a NEON register.
1103 zero is the least significant end of the register. */
1104 static inline long
1105 neon_reg_offset (int reg, int n)
1106 {
1107 int sreg;
1108 sreg = reg * 2 + n;
1109 return vfp_reg_offset(0, sreg);
1110 }
1111
1112 static TCGv_i32 neon_load_reg(int reg, int pass)
1113 {
1114 TCGv_i32 tmp = tcg_temp_new_i32();
1115 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1116 return tmp;
1117 }
1118
1119 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1120 {
1121 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1122 tcg_temp_free_i32(var);
1123 }
1124
1125 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1126 {
1127 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1128 }
1129
1130 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1131 {
1132 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1133 }
1134
1135 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1136 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1137 #define tcg_gen_st_f32 tcg_gen_st_i32
1138 #define tcg_gen_st_f64 tcg_gen_st_i64
1139
1140 static inline void gen_mov_F0_vreg(int dp, int reg)
1141 {
1142 if (dp)
1143 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1144 else
1145 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1146 }
1147
1148 static inline void gen_mov_F1_vreg(int dp, int reg)
1149 {
1150 if (dp)
1151 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1152 else
1153 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1154 }
1155
1156 static inline void gen_mov_vreg_F0(int dp, int reg)
1157 {
1158 if (dp)
1159 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1160 else
1161 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1162 }
1163
1164 #define ARM_CP_RW_BIT (1 << 20)
1165
1166 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1167 {
1168 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1169 }
1170
1171 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1172 {
1173 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1174 }
1175
1176 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1177 {
1178 TCGv_i32 var = tcg_temp_new_i32();
1179 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1180 return var;
1181 }
1182
1183 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1184 {
1185 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1186 tcg_temp_free_i32(var);
1187 }
1188
1189 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1190 {
1191 iwmmxt_store_reg(cpu_M0, rn);
1192 }
1193
1194 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1195 {
1196 iwmmxt_load_reg(cpu_M0, rn);
1197 }
1198
1199 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1200 {
1201 iwmmxt_load_reg(cpu_V1, rn);
1202 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1203 }
1204
1205 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1206 {
1207 iwmmxt_load_reg(cpu_V1, rn);
1208 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1209 }
1210
1211 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1212 {
1213 iwmmxt_load_reg(cpu_V1, rn);
1214 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1215 }
1216
1217 #define IWMMXT_OP(name) \
1218 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1219 { \
1220 iwmmxt_load_reg(cpu_V1, rn); \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1222 }
1223
1224 #define IWMMXT_OP_ENV(name) \
1225 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1226 { \
1227 iwmmxt_load_reg(cpu_V1, rn); \
1228 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1229 }
1230
1231 #define IWMMXT_OP_ENV_SIZE(name) \
1232 IWMMXT_OP_ENV(name##b) \
1233 IWMMXT_OP_ENV(name##w) \
1234 IWMMXT_OP_ENV(name##l)
1235
1236 #define IWMMXT_OP_ENV1(name) \
1237 static inline void gen_op_iwmmxt_##name##_M0(void) \
1238 { \
1239 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1240 }
1241
1242 IWMMXT_OP(maddsq)
1243 IWMMXT_OP(madduq)
1244 IWMMXT_OP(sadb)
1245 IWMMXT_OP(sadw)
1246 IWMMXT_OP(mulslw)
1247 IWMMXT_OP(mulshw)
1248 IWMMXT_OP(mululw)
1249 IWMMXT_OP(muluhw)
1250 IWMMXT_OP(macsw)
1251 IWMMXT_OP(macuw)
1252
1253 IWMMXT_OP_ENV_SIZE(unpackl)
1254 IWMMXT_OP_ENV_SIZE(unpackh)
1255
1256 IWMMXT_OP_ENV1(unpacklub)
1257 IWMMXT_OP_ENV1(unpackluw)
1258 IWMMXT_OP_ENV1(unpacklul)
1259 IWMMXT_OP_ENV1(unpackhub)
1260 IWMMXT_OP_ENV1(unpackhuw)
1261 IWMMXT_OP_ENV1(unpackhul)
1262 IWMMXT_OP_ENV1(unpacklsb)
1263 IWMMXT_OP_ENV1(unpacklsw)
1264 IWMMXT_OP_ENV1(unpacklsl)
1265 IWMMXT_OP_ENV1(unpackhsb)
1266 IWMMXT_OP_ENV1(unpackhsw)
1267 IWMMXT_OP_ENV1(unpackhsl)
1268
1269 IWMMXT_OP_ENV_SIZE(cmpeq)
1270 IWMMXT_OP_ENV_SIZE(cmpgtu)
1271 IWMMXT_OP_ENV_SIZE(cmpgts)
1272
1273 IWMMXT_OP_ENV_SIZE(mins)
1274 IWMMXT_OP_ENV_SIZE(minu)
1275 IWMMXT_OP_ENV_SIZE(maxs)
1276 IWMMXT_OP_ENV_SIZE(maxu)
1277
1278 IWMMXT_OP_ENV_SIZE(subn)
1279 IWMMXT_OP_ENV_SIZE(addn)
1280 IWMMXT_OP_ENV_SIZE(subu)
1281 IWMMXT_OP_ENV_SIZE(addu)
1282 IWMMXT_OP_ENV_SIZE(subs)
1283 IWMMXT_OP_ENV_SIZE(adds)
1284
1285 IWMMXT_OP_ENV(avgb0)
1286 IWMMXT_OP_ENV(avgb1)
1287 IWMMXT_OP_ENV(avgw0)
1288 IWMMXT_OP_ENV(avgw1)
1289
1290 IWMMXT_OP(msadb)
1291
1292 IWMMXT_OP_ENV(packuw)
1293 IWMMXT_OP_ENV(packul)
1294 IWMMXT_OP_ENV(packuq)
1295 IWMMXT_OP_ENV(packsw)
1296 IWMMXT_OP_ENV(packsl)
1297 IWMMXT_OP_ENV(packsq)
1298
1299 static void gen_op_iwmmxt_set_mup(void)
1300 {
1301 TCGv_i32 tmp;
1302 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1303 tcg_gen_ori_i32(tmp, tmp, 2);
1304 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1305 }
1306
1307 static void gen_op_iwmmxt_set_cup(void)
1308 {
1309 TCGv_i32 tmp;
1310 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1311 tcg_gen_ori_i32(tmp, tmp, 1);
1312 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1313 }
1314
1315 static void gen_op_iwmmxt_setpsr_nz(void)
1316 {
1317 TCGv_i32 tmp = tcg_temp_new_i32();
1318 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1319 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1320 }
1321
1322 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1323 {
1324 iwmmxt_load_reg(cpu_V1, rn);
1325 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1326 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1327 }
1328
1329 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1330 TCGv_i32 dest)
1331 {
1332 int rd;
1333 uint32_t offset;
1334 TCGv_i32 tmp;
1335
1336 rd = (insn >> 16) & 0xf;
1337 tmp = load_reg(s, rd);
1338
1339 offset = (insn & 0xff) << ((insn >> 7) & 2);
1340 if (insn & (1 << 24)) {
1341 /* Pre indexed */
1342 if (insn & (1 << 23))
1343 tcg_gen_addi_i32(tmp, tmp, offset);
1344 else
1345 tcg_gen_addi_i32(tmp, tmp, -offset);
1346 tcg_gen_mov_i32(dest, tmp);
1347 if (insn & (1 << 21))
1348 store_reg(s, rd, tmp);
1349 else
1350 tcg_temp_free_i32(tmp);
1351 } else if (insn & (1 << 21)) {
1352 /* Post indexed */
1353 tcg_gen_mov_i32(dest, tmp);
1354 if (insn & (1 << 23))
1355 tcg_gen_addi_i32(tmp, tmp, offset);
1356 else
1357 tcg_gen_addi_i32(tmp, tmp, -offset);
1358 store_reg(s, rd, tmp);
1359 } else if (!(insn & (1 << 23)))
1360 return 1;
1361 return 0;
1362 }
1363
1364 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1365 {
1366 int rd = (insn >> 0) & 0xf;
1367 TCGv_i32 tmp;
1368
1369 if (insn & (1 << 8)) {
1370 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1371 return 1;
1372 } else {
1373 tmp = iwmmxt_load_creg(rd);
1374 }
1375 } else {
1376 tmp = tcg_temp_new_i32();
1377 iwmmxt_load_reg(cpu_V0, rd);
1378 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1379 }
1380 tcg_gen_andi_i32(tmp, tmp, mask);
1381 tcg_gen_mov_i32(dest, tmp);
1382 tcg_temp_free_i32(tmp);
1383 return 0;
1384 }
1385
1386 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1387 (ie. an undefined instruction). */
1388 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1389 {
1390 int rd, wrd;
1391 int rdhi, rdlo, rd0, rd1, i;
1392 TCGv_i32 addr;
1393 TCGv_i32 tmp, tmp2, tmp3;
1394
1395 if ((insn & 0x0e000e00) == 0x0c000000) {
1396 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1397 wrd = insn & 0xf;
1398 rdlo = (insn >> 12) & 0xf;
1399 rdhi = (insn >> 16) & 0xf;
1400 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1401 iwmmxt_load_reg(cpu_V0, wrd);
1402 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1403 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1404 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1405 } else { /* TMCRR */
1406 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1407 iwmmxt_store_reg(cpu_V0, wrd);
1408 gen_op_iwmmxt_set_mup();
1409 }
1410 return 0;
1411 }
1412
1413 wrd = (insn >> 12) & 0xf;
1414 addr = tcg_temp_new_i32();
1415 if (gen_iwmmxt_address(s, insn, addr)) {
1416 tcg_temp_free_i32(addr);
1417 return 1;
1418 }
1419 if (insn & ARM_CP_RW_BIT) {
1420 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1421 tmp = tcg_temp_new_i32();
1422 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1423 iwmmxt_store_creg(wrd, tmp);
1424 } else {
1425 i = 1;
1426 if (insn & (1 << 8)) {
1427 if (insn & (1 << 22)) { /* WLDRD */
1428 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1429 i = 0;
1430 } else { /* WLDRW wRd */
1431 tmp = tcg_temp_new_i32();
1432 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1433 }
1434 } else {
1435 tmp = tcg_temp_new_i32();
1436 if (insn & (1 << 22)) { /* WLDRH */
1437 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
1438 } else { /* WLDRB */
1439 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
1440 }
1441 }
1442 if (i) {
1443 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1444 tcg_temp_free_i32(tmp);
1445 }
1446 gen_op_iwmmxt_movq_wRn_M0(wrd);
1447 }
1448 } else {
1449 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1450 tmp = iwmmxt_load_creg(wrd);
1451 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
1452 } else {
1453 gen_op_iwmmxt_movq_M0_wRn(wrd);
1454 tmp = tcg_temp_new_i32();
1455 if (insn & (1 << 8)) {
1456 if (insn & (1 << 22)) { /* WSTRD */
1457 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1458 } else { /* WSTRW wRd */
1459 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1460 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
1461 }
1462 } else {
1463 if (insn & (1 << 22)) { /* WSTRH */
1464 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1465 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
1466 } else { /* WSTRB */
1467 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1468 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
1469 }
1470 }
1471 }
1472 tcg_temp_free_i32(tmp);
1473 }
1474 tcg_temp_free_i32(addr);
1475 return 0;
1476 }
1477
1478 if ((insn & 0x0f000000) != 0x0e000000)
1479 return 1;
1480
1481 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1482 case 0x000: /* WOR */
1483 wrd = (insn >> 12) & 0xf;
1484 rd0 = (insn >> 0) & 0xf;
1485 rd1 = (insn >> 16) & 0xf;
1486 gen_op_iwmmxt_movq_M0_wRn(rd0);
1487 gen_op_iwmmxt_orq_M0_wRn(rd1);
1488 gen_op_iwmmxt_setpsr_nz();
1489 gen_op_iwmmxt_movq_wRn_M0(wrd);
1490 gen_op_iwmmxt_set_mup();
1491 gen_op_iwmmxt_set_cup();
1492 break;
1493 case 0x011: /* TMCR */
1494 if (insn & 0xf)
1495 return 1;
1496 rd = (insn >> 12) & 0xf;
1497 wrd = (insn >> 16) & 0xf;
1498 switch (wrd) {
1499 case ARM_IWMMXT_wCID:
1500 case ARM_IWMMXT_wCASF:
1501 break;
1502 case ARM_IWMMXT_wCon:
1503 gen_op_iwmmxt_set_cup();
1504 /* Fall through. */
1505 case ARM_IWMMXT_wCSSF:
1506 tmp = iwmmxt_load_creg(wrd);
1507 tmp2 = load_reg(s, rd);
1508 tcg_gen_andc_i32(tmp, tmp, tmp2);
1509 tcg_temp_free_i32(tmp2);
1510 iwmmxt_store_creg(wrd, tmp);
1511 break;
1512 case ARM_IWMMXT_wCGR0:
1513 case ARM_IWMMXT_wCGR1:
1514 case ARM_IWMMXT_wCGR2:
1515 case ARM_IWMMXT_wCGR3:
1516 gen_op_iwmmxt_set_cup();
1517 tmp = load_reg(s, rd);
1518 iwmmxt_store_creg(wrd, tmp);
1519 break;
1520 default:
1521 return 1;
1522 }
1523 break;
1524 case 0x100: /* WXOR */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 0) & 0xf;
1527 rd1 = (insn >> 16) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x111: /* TMRC */
1536 if (insn & 0xf)
1537 return 1;
1538 rd = (insn >> 12) & 0xf;
1539 wrd = (insn >> 16) & 0xf;
1540 tmp = iwmmxt_load_creg(wrd);
1541 store_reg(s, rd, tmp);
1542 break;
1543 case 0x300: /* WANDN */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 0) & 0xf;
1546 rd1 = (insn >> 16) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
1548 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1549 gen_op_iwmmxt_andq_M0_wRn(rd1);
1550 gen_op_iwmmxt_setpsr_nz();
1551 gen_op_iwmmxt_movq_wRn_M0(wrd);
1552 gen_op_iwmmxt_set_mup();
1553 gen_op_iwmmxt_set_cup();
1554 break;
1555 case 0x200: /* WAND */
1556 wrd = (insn >> 12) & 0xf;
1557 rd0 = (insn >> 0) & 0xf;
1558 rd1 = (insn >> 16) & 0xf;
1559 gen_op_iwmmxt_movq_M0_wRn(rd0);
1560 gen_op_iwmmxt_andq_M0_wRn(rd1);
1561 gen_op_iwmmxt_setpsr_nz();
1562 gen_op_iwmmxt_movq_wRn_M0(wrd);
1563 gen_op_iwmmxt_set_mup();
1564 gen_op_iwmmxt_set_cup();
1565 break;
1566 case 0x810: case 0xa10: /* WMADD */
1567 wrd = (insn >> 12) & 0xf;
1568 rd0 = (insn >> 0) & 0xf;
1569 rd1 = (insn >> 16) & 0xf;
1570 gen_op_iwmmxt_movq_M0_wRn(rd0);
1571 if (insn & (1 << 21))
1572 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1573 else
1574 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 break;
1578 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1579 wrd = (insn >> 12) & 0xf;
1580 rd0 = (insn >> 16) & 0xf;
1581 rd1 = (insn >> 0) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0);
1583 switch ((insn >> 22) & 3) {
1584 case 0:
1585 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1586 break;
1587 case 1:
1588 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1589 break;
1590 case 2:
1591 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1592 break;
1593 case 3:
1594 return 1;
1595 }
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 switch ((insn >> 22) & 3) {
1606 case 0:
1607 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1608 break;
1609 case 1:
1610 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1611 break;
1612 case 2:
1613 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1614 break;
1615 case 3:
1616 return 1;
1617 }
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 if (insn & (1 << 22))
1628 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1631 if (!(insn & (1 << 20)))
1632 gen_op_iwmmxt_addl_M0_wRn(wrd);
1633 gen_op_iwmmxt_movq_wRn_M0(wrd);
1634 gen_op_iwmmxt_set_mup();
1635 break;
1636 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
1641 if (insn & (1 << 21)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1651 }
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 break;
1655 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1656 wrd = (insn >> 12) & 0xf;
1657 rd0 = (insn >> 16) & 0xf;
1658 rd1 = (insn >> 0) & 0xf;
1659 gen_op_iwmmxt_movq_M0_wRn(rd0);
1660 if (insn & (1 << 21))
1661 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1662 else
1663 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1664 if (!(insn & (1 << 20))) {
1665 iwmmxt_load_reg(cpu_V1, wrd);
1666 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 switch ((insn >> 22) & 3) {
1677 case 0:
1678 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1679 break;
1680 case 1:
1681 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1685 break;
1686 case 3:
1687 return 1;
1688 }
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 if (insn & (1 << 22)) {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1703 } else {
1704 if (insn & (1 << 20))
1705 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1706 else
1707 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1708 }
1709 gen_op_iwmmxt_movq_wRn_M0(wrd);
1710 gen_op_iwmmxt_set_mup();
1711 gen_op_iwmmxt_set_cup();
1712 break;
1713 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1714 wrd = (insn >> 12) & 0xf;
1715 rd0 = (insn >> 16) & 0xf;
1716 rd1 = (insn >> 0) & 0xf;
1717 gen_op_iwmmxt_movq_M0_wRn(rd0);
1718 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1719 tcg_gen_andi_i32(tmp, tmp, 7);
1720 iwmmxt_load_reg(cpu_V1, rd1);
1721 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1722 tcg_temp_free_i32(tmp);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1727 if (((insn >> 6) & 3) == 3)
1728 return 1;
1729 rd = (insn >> 12) & 0xf;
1730 wrd = (insn >> 16) & 0xf;
1731 tmp = load_reg(s, rd);
1732 gen_op_iwmmxt_movq_M0_wRn(wrd);
1733 switch ((insn >> 6) & 3) {
1734 case 0:
1735 tmp2 = tcg_const_i32(0xff);
1736 tmp3 = tcg_const_i32((insn & 7) << 3);
1737 break;
1738 case 1:
1739 tmp2 = tcg_const_i32(0xffff);
1740 tmp3 = tcg_const_i32((insn & 3) << 4);
1741 break;
1742 case 2:
1743 tmp2 = tcg_const_i32(0xffffffff);
1744 tmp3 = tcg_const_i32((insn & 1) << 5);
1745 break;
1746 default:
1747 TCGV_UNUSED_I32(tmp2);
1748 TCGV_UNUSED_I32(tmp3);
1749 }
1750 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1751 tcg_temp_free_i32(tmp3);
1752 tcg_temp_free_i32(tmp2);
1753 tcg_temp_free_i32(tmp);
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 break;
1757 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 if (rd == 15 || ((insn >> 22) & 3) == 3)
1761 return 1;
1762 gen_op_iwmmxt_movq_M0_wRn(wrd);
1763 tmp = tcg_temp_new_i32();
1764 switch ((insn >> 22) & 3) {
1765 case 0:
1766 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1767 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1768 if (insn & 8) {
1769 tcg_gen_ext8s_i32(tmp, tmp);
1770 } else {
1771 tcg_gen_andi_i32(tmp, tmp, 0xff);
1772 }
1773 break;
1774 case 1:
1775 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1776 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1777 if (insn & 8) {
1778 tcg_gen_ext16s_i32(tmp, tmp);
1779 } else {
1780 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1781 }
1782 break;
1783 case 2:
1784 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1785 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1786 break;
1787 }
1788 store_reg(s, rd, tmp);
1789 break;
1790 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1791 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1792 return 1;
1793 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1794 switch ((insn >> 22) & 3) {
1795 case 0:
1796 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1797 break;
1798 case 1:
1799 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1800 break;
1801 case 2:
1802 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1803 break;
1804 }
1805 tcg_gen_shli_i32(tmp, tmp, 28);
1806 gen_set_nzcv(tmp);
1807 tcg_temp_free_i32(tmp);
1808 break;
1809 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1810 if (((insn >> 6) & 3) == 3)
1811 return 1;
1812 rd = (insn >> 12) & 0xf;
1813 wrd = (insn >> 16) & 0xf;
1814 tmp = load_reg(s, rd);
1815 switch ((insn >> 6) & 3) {
1816 case 0:
1817 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1818 break;
1819 case 1:
1820 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1821 break;
1822 case 2:
1823 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1824 break;
1825 }
1826 tcg_temp_free_i32(tmp);
1827 gen_op_iwmmxt_movq_wRn_M0(wrd);
1828 gen_op_iwmmxt_set_mup();
1829 break;
1830 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1831 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1832 return 1;
1833 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1834 tmp2 = tcg_temp_new_i32();
1835 tcg_gen_mov_i32(tmp2, tmp);
1836 switch ((insn >> 22) & 3) {
1837 case 0:
1838 for (i = 0; i < 7; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 4);
1840 tcg_gen_and_i32(tmp, tmp, tmp2);
1841 }
1842 break;
1843 case 1:
1844 for (i = 0; i < 3; i ++) {
1845 tcg_gen_shli_i32(tmp2, tmp2, 8);
1846 tcg_gen_and_i32(tmp, tmp, tmp2);
1847 }
1848 break;
1849 case 2:
1850 tcg_gen_shli_i32(tmp2, tmp2, 16);
1851 tcg_gen_and_i32(tmp, tmp, tmp2);
1852 break;
1853 }
1854 gen_set_nzcv(tmp);
1855 tcg_temp_free_i32(tmp2);
1856 tcg_temp_free_i32(tmp);
1857 break;
1858 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 16) & 0xf;
1861 gen_op_iwmmxt_movq_M0_wRn(rd0);
1862 switch ((insn >> 22) & 3) {
1863 case 0:
1864 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1865 break;
1866 case 1:
1867 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1868 break;
1869 case 2:
1870 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1871 break;
1872 case 3:
1873 return 1;
1874 }
1875 gen_op_iwmmxt_movq_wRn_M0(wrd);
1876 gen_op_iwmmxt_set_mup();
1877 break;
1878 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1879 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1880 return 1;
1881 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1882 tmp2 = tcg_temp_new_i32();
1883 tcg_gen_mov_i32(tmp2, tmp);
1884 switch ((insn >> 22) & 3) {
1885 case 0:
1886 for (i = 0; i < 7; i ++) {
1887 tcg_gen_shli_i32(tmp2, tmp2, 4);
1888 tcg_gen_or_i32(tmp, tmp, tmp2);
1889 }
1890 break;
1891 case 1:
1892 for (i = 0; i < 3; i ++) {
1893 tcg_gen_shli_i32(tmp2, tmp2, 8);
1894 tcg_gen_or_i32(tmp, tmp, tmp2);
1895 }
1896 break;
1897 case 2:
1898 tcg_gen_shli_i32(tmp2, tmp2, 16);
1899 tcg_gen_or_i32(tmp, tmp, tmp2);
1900 break;
1901 }
1902 gen_set_nzcv(tmp);
1903 tcg_temp_free_i32(tmp2);
1904 tcg_temp_free_i32(tmp);
1905 break;
1906 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1907 rd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1910 return 1;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 tmp = tcg_temp_new_i32();
1913 switch ((insn >> 22) & 3) {
1914 case 0:
1915 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1916 break;
1917 case 1:
1918 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1919 break;
1920 case 2:
1921 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1922 break;
1923 }
1924 store_reg(s, rd, tmp);
1925 break;
1926 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1927 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1928 wrd = (insn >> 12) & 0xf;
1929 rd0 = (insn >> 16) & 0xf;
1930 rd1 = (insn >> 0) & 0xf;
1931 gen_op_iwmmxt_movq_M0_wRn(rd0);
1932 switch ((insn >> 22) & 3) {
1933 case 0:
1934 if (insn & (1 << 21))
1935 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1938 break;
1939 case 1:
1940 if (insn & (1 << 21))
1941 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1942 else
1943 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1944 break;
1945 case 2:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1950 break;
1951 case 3:
1952 return 1;
1953 }
1954 gen_op_iwmmxt_movq_wRn_M0(wrd);
1955 gen_op_iwmmxt_set_mup();
1956 gen_op_iwmmxt_set_cup();
1957 break;
1958 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1959 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
1963 switch ((insn >> 22) & 3) {
1964 case 0:
1965 if (insn & (1 << 21))
1966 gen_op_iwmmxt_unpacklsb_M0();
1967 else
1968 gen_op_iwmmxt_unpacklub_M0();
1969 break;
1970 case 1:
1971 if (insn & (1 << 21))
1972 gen_op_iwmmxt_unpacklsw_M0();
1973 else
1974 gen_op_iwmmxt_unpackluw_M0();
1975 break;
1976 case 2:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpacklsl_M0();
1979 else
1980 gen_op_iwmmxt_unpacklul_M0();
1981 break;
1982 case 3:
1983 return 1;
1984 }
1985 gen_op_iwmmxt_movq_wRn_M0(wrd);
1986 gen_op_iwmmxt_set_mup();
1987 gen_op_iwmmxt_set_cup();
1988 break;
1989 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1990 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1991 wrd = (insn >> 12) & 0xf;
1992 rd0 = (insn >> 16) & 0xf;
1993 gen_op_iwmmxt_movq_M0_wRn(rd0);
1994 switch ((insn >> 22) & 3) {
1995 case 0:
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_unpackhsb_M0();
1998 else
1999 gen_op_iwmmxt_unpackhub_M0();
2000 break;
2001 case 1:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_unpackhsw_M0();
2004 else
2005 gen_op_iwmmxt_unpackhuw_M0();
2006 break;
2007 case 2:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_unpackhsl_M0();
2010 else
2011 gen_op_iwmmxt_unpackhul_M0();
2012 break;
2013 case 3:
2014 return 1;
2015 }
2016 gen_op_iwmmxt_movq_wRn_M0(wrd);
2017 gen_op_iwmmxt_set_mup();
2018 gen_op_iwmmxt_set_cup();
2019 break;
2020 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2021 case 0x214: case 0x614: case 0xa14: case 0xe14:
2022 if (((insn >> 22) & 3) == 0)
2023 return 1;
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
2027 tmp = tcg_temp_new_i32();
2028 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2029 tcg_temp_free_i32(tmp);
2030 return 1;
2031 }
2032 switch ((insn >> 22) & 3) {
2033 case 1:
2034 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2035 break;
2036 case 2:
2037 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2038 break;
2039 case 3:
2040 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2041 break;
2042 }
2043 tcg_temp_free_i32(tmp);
2044 gen_op_iwmmxt_movq_wRn_M0(wrd);
2045 gen_op_iwmmxt_set_mup();
2046 gen_op_iwmmxt_set_cup();
2047 break;
2048 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2049 case 0x014: case 0x414: case 0x814: case 0xc14:
2050 if (((insn >> 22) & 3) == 0)
2051 return 1;
2052 wrd = (insn >> 12) & 0xf;
2053 rd0 = (insn >> 16) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0);
2055 tmp = tcg_temp_new_i32();
2056 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2057 tcg_temp_free_i32(tmp);
2058 return 1;
2059 }
2060 switch ((insn >> 22) & 3) {
2061 case 1:
2062 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2063 break;
2064 case 2:
2065 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2066 break;
2067 case 3:
2068 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2069 break;
2070 }
2071 tcg_temp_free_i32(tmp);
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2077 case 0x114: case 0x514: case 0x914: case 0xd14:
2078 if (((insn >> 22) & 3) == 0)
2079 return 1;
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 tmp = tcg_temp_new_i32();
2084 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2085 tcg_temp_free_i32(tmp);
2086 return 1;
2087 }
2088 switch ((insn >> 22) & 3) {
2089 case 1:
2090 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2091 break;
2092 case 2:
2093 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2094 break;
2095 case 3:
2096 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2097 break;
2098 }
2099 tcg_temp_free_i32(tmp);
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2103 break;
2104 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2105 case 0x314: case 0x714: case 0xb14: case 0xf14:
2106 if (((insn >> 22) & 3) == 0)
2107 return 1;
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 tmp = tcg_temp_new_i32();
2112 switch ((insn >> 22) & 3) {
2113 case 1:
2114 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2115 tcg_temp_free_i32(tmp);
2116 return 1;
2117 }
2118 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2119 break;
2120 case 2:
2121 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2122 tcg_temp_free_i32(tmp);
2123 return 1;
2124 }
2125 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2126 break;
2127 case 3:
2128 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2129 tcg_temp_free_i32(tmp);
2130 return 1;
2131 }
2132 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2133 break;
2134 }
2135 tcg_temp_free_i32(tmp);
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2141 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 rd1 = (insn >> 0) & 0xf;
2145 gen_op_iwmmxt_movq_M0_wRn(rd0);
2146 switch ((insn >> 22) & 3) {
2147 case 0:
2148 if (insn & (1 << 21))
2149 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2150 else
2151 gen_op_iwmmxt_minub_M0_wRn(rd1);
2152 break;
2153 case 1:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2158 break;
2159 case 2:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_minul_M0_wRn(rd1);
2164 break;
2165 case 3:
2166 return 1;
2167 }
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 break;
2171 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2172 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 rd1 = (insn >> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0);
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 if (insn & (1 << 21))
2180 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2181 else
2182 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2183 break;
2184 case 1:
2185 if (insn & (1 << 21))
2186 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2189 break;
2190 case 2:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2193 else
2194 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2195 break;
2196 case 3:
2197 return 1;
2198 }
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 break;
2202 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2203 case 0x402: case 0x502: case 0x602: case 0x702:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 rd1 = (insn >> 0) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
2208 tmp = tcg_const_i32((insn >> 20) & 3);
2209 iwmmxt_load_reg(cpu_V1, rd1);
2210 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2211 tcg_temp_free_i32(tmp);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 break;
2215 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2216 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2217 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2218 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_subub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_subul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2259 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2260 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2261 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
2265 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2266 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2267 tcg_temp_free_i32(tmp);
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 gen_op_iwmmxt_set_cup();
2271 break;
2272 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2273 case 0x418: case 0x518: case 0x618: case 0x718:
2274 case 0x818: case 0x918: case 0xa18: case 0xb18:
2275 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 switch ((insn >> 20) & 0xf) {
2281 case 0x0:
2282 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2283 break;
2284 case 0x1:
2285 gen_op_iwmmxt_addub_M0_wRn(rd1);
2286 break;
2287 case 0x3:
2288 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2289 break;
2290 case 0x4:
2291 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2292 break;
2293 case 0x5:
2294 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2295 break;
2296 case 0x7:
2297 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2298 break;
2299 case 0x8:
2300 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2301 break;
2302 case 0x9:
2303 gen_op_iwmmxt_addul_M0_wRn(rd1);
2304 break;
2305 case 0xb:
2306 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2307 break;
2308 default:
2309 return 1;
2310 }
2311 gen_op_iwmmxt_movq_wRn_M0(wrd);
2312 gen_op_iwmmxt_set_mup();
2313 gen_op_iwmmxt_set_cup();
2314 break;
2315 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2316 case 0x408: case 0x508: case 0x608: case 0x708:
2317 case 0x808: case 0x908: case 0xa08: case 0xb08:
2318 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2319 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2320 return 1;
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 rd1 = (insn >> 0) & 0xf;
2324 gen_op_iwmmxt_movq_M0_wRn(rd0);
2325 switch ((insn >> 22) & 3) {
2326 case 1:
2327 if (insn & (1 << 21))
2328 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2329 else
2330 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2331 break;
2332 case 2:
2333 if (insn & (1 << 21))
2334 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2335 else
2336 gen_op_iwmmxt_packul_M0_wRn(rd1);
2337 break;
2338 case 3:
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2341 else
2342 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2343 break;
2344 }
2345 gen_op_iwmmxt_movq_wRn_M0(wrd);
2346 gen_op_iwmmxt_set_mup();
2347 gen_op_iwmmxt_set_cup();
2348 break;
2349 case 0x201: case 0x203: case 0x205: case 0x207:
2350 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2351 case 0x211: case 0x213: case 0x215: case 0x217:
2352 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2353 wrd = (insn >> 5) & 0xf;
2354 rd0 = (insn >> 12) & 0xf;
2355 rd1 = (insn >> 0) & 0xf;
2356 if (rd0 == 0xf || rd1 == 0xf)
2357 return 1;
2358 gen_op_iwmmxt_movq_M0_wRn(wrd);
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* TMIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* TMIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2369 if (insn & (1 << 16))
2370 tcg_gen_shri_i32(tmp, tmp, 16);
2371 if (insn & (1 << 17))
2372 tcg_gen_shri_i32(tmp2, tmp2, 16);
2373 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2374 break;
2375 default:
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2378 return 1;
2379 }
2380 tcg_temp_free_i32(tmp2);
2381 tcg_temp_free_i32(tmp);
2382 gen_op_iwmmxt_movq_wRn_M0(wrd);
2383 gen_op_iwmmxt_set_mup();
2384 break;
2385 default:
2386 return 1;
2387 }
2388
2389 return 0;
2390 }
2391
2392 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2393 (ie. an undefined instruction). */
2394 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2395 {
2396 int acc, rd0, rd1, rdhi, rdlo;
2397 TCGv_i32 tmp, tmp2;
2398
2399 if ((insn & 0x0ff00f10) == 0x0e200010) {
2400 /* Multiply with Internal Accumulate Format */
2401 rd0 = (insn >> 12) & 0xf;
2402 rd1 = insn & 0xf;
2403 acc = (insn >> 5) & 7;
2404
2405 if (acc != 0)
2406 return 1;
2407
2408 tmp = load_reg(s, rd0);
2409 tmp2 = load_reg(s, rd1);
2410 switch ((insn >> 16) & 0xf) {
2411 case 0x0: /* MIA */
2412 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2413 break;
2414 case 0x8: /* MIAPH */
2415 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2416 break;
2417 case 0xc: /* MIABB */
2418 case 0xd: /* MIABT */
2419 case 0xe: /* MIATB */
2420 case 0xf: /* MIATT */
2421 if (insn & (1 << 16))
2422 tcg_gen_shri_i32(tmp, tmp, 16);
2423 if (insn & (1 << 17))
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2426 break;
2427 default:
2428 return 1;
2429 }
2430 tcg_temp_free_i32(tmp2);
2431 tcg_temp_free_i32(tmp);
2432
2433 gen_op_iwmmxt_movq_wRn_M0(acc);
2434 return 0;
2435 }
2436
2437 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2438 /* Internal Accumulator Access Format */
2439 rdhi = (insn >> 16) & 0xf;
2440 rdlo = (insn >> 12) & 0xf;
2441 acc = insn & 7;
2442
2443 if (acc != 0)
2444 return 1;
2445
2446 if (insn & ARM_CP_RW_BIT) { /* MRA */
2447 iwmmxt_load_reg(cpu_V0, acc);
2448 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2449 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2450 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2451 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2452 } else { /* MAR */
2453 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2454 iwmmxt_store_reg(cpu_V0, acc);
2455 }
2456 return 0;
2457 }
2458
2459 return 1;
2460 }
2461
2462 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2463 #define VFP_SREG(insn, bigbit, smallbit) \
2464 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2465 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2466 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2467 reg = (((insn) >> (bigbit)) & 0x0f) \
2468 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2469 } else { \
2470 if (insn & (1 << (smallbit))) \
2471 return 1; \
2472 reg = ((insn) >> (bigbit)) & 0x0f; \
2473 }} while (0)
2474
2475 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2476 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2477 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2478 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2479 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2480 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2481
2482 /* Move between integer and VFP cores. */
2483 static TCGv_i32 gen_vfp_mrs(void)
2484 {
2485 TCGv_i32 tmp = tcg_temp_new_i32();
2486 tcg_gen_mov_i32(tmp, cpu_F0s);
2487 return tmp;
2488 }
2489
2490 static void gen_vfp_msr(TCGv_i32 tmp)
2491 {
2492 tcg_gen_mov_i32(cpu_F0s, tmp);
2493 tcg_temp_free_i32(tmp);
2494 }
2495
2496 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2497 {
2498 TCGv_i32 tmp = tcg_temp_new_i32();
2499 if (shift)
2500 tcg_gen_shri_i32(var, var, shift);
2501 tcg_gen_ext8u_i32(var, var);
2502 tcg_gen_shli_i32(tmp, var, 8);
2503 tcg_gen_or_i32(var, var, tmp);
2504 tcg_gen_shli_i32(tmp, var, 16);
2505 tcg_gen_or_i32(var, var, tmp);
2506 tcg_temp_free_i32(tmp);
2507 }
2508
2509 static void gen_neon_dup_low16(TCGv_i32 var)
2510 {
2511 TCGv_i32 tmp = tcg_temp_new_i32();
2512 tcg_gen_ext16u_i32(var, var);
2513 tcg_gen_shli_i32(tmp, var, 16);
2514 tcg_gen_or_i32(var, var, tmp);
2515 tcg_temp_free_i32(tmp);
2516 }
2517
2518 static void gen_neon_dup_high16(TCGv_i32 var)
2519 {
2520 TCGv_i32 tmp = tcg_temp_new_i32();
2521 tcg_gen_andi_i32(var, var, 0xffff0000);
2522 tcg_gen_shri_i32(tmp, var, 16);
2523 tcg_gen_or_i32(var, var, tmp);
2524 tcg_temp_free_i32(tmp);
2525 }
2526
2527 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2528 {
2529 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2530 TCGv_i32 tmp = tcg_temp_new_i32();
2531 switch (size) {
2532 case 0:
2533 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
2534 gen_neon_dup_u8(tmp, 0);
2535 break;
2536 case 1:
2537 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
2538 gen_neon_dup_low16(tmp);
2539 break;
2540 case 2:
2541 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
2542 break;
2543 default: /* Avoid compiler warnings. */
2544 abort();
2545 }
2546 return tmp;
2547 }
2548
2549 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2550 (ie. an undefined instruction). */
2551 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2552 {
2553 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2554 int dp, veclen;
2555 TCGv_i32 addr;
2556 TCGv_i32 tmp;
2557 TCGv_i32 tmp2;
2558
2559 if (!arm_feature(env, ARM_FEATURE_VFP))
2560 return 1;
2561
2562 if (!s->vfp_enabled) {
2563 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2564 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2565 return 1;
2566 rn = (insn >> 16) & 0xf;
2567 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2568 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2569 return 1;
2570 }
2571 dp = ((insn & 0xf00) == 0xb00);
2572 switch ((insn >> 24) & 0xf) {
2573 case 0xe:
2574 if (insn & (1 << 4)) {
2575 /* single register transfer */
2576 rd = (insn >> 12) & 0xf;
2577 if (dp) {
2578 int size;
2579 int pass;
2580
2581 VFP_DREG_N(rn, insn);
2582 if (insn & 0xf)
2583 return 1;
2584 if (insn & 0x00c00060
2585 && !arm_feature(env, ARM_FEATURE_NEON))
2586 return 1;
2587
2588 pass = (insn >> 21) & 1;
2589 if (insn & (1 << 22)) {
2590 size = 0;
2591 offset = ((insn >> 5) & 3) * 8;
2592 } else if (insn & (1 << 5)) {
2593 size = 1;
2594 offset = (insn & (1 << 6)) ? 16 : 0;
2595 } else {
2596 size = 2;
2597 offset = 0;
2598 }
2599 if (insn & ARM_CP_RW_BIT) {
2600 /* vfp->arm */
2601 tmp = neon_load_reg(rn, pass);
2602 switch (size) {
2603 case 0:
2604 if (offset)
2605 tcg_gen_shri_i32(tmp, tmp, offset);
2606 if (insn & (1 << 23))
2607 gen_uxtb(tmp);
2608 else
2609 gen_sxtb(tmp);
2610 break;
2611 case 1:
2612 if (insn & (1 << 23)) {
2613 if (offset) {
2614 tcg_gen_shri_i32(tmp, tmp, 16);
2615 } else {
2616 gen_uxth(tmp);
2617 }
2618 } else {
2619 if (offset) {
2620 tcg_gen_sari_i32(tmp, tmp, 16);
2621 } else {
2622 gen_sxth(tmp);
2623 }
2624 }
2625 break;
2626 case 2:
2627 break;
2628 }
2629 store_reg(s, rd, tmp);
2630 } else {
2631 /* arm->vfp */
2632 tmp = load_reg(s, rd);
2633 if (insn & (1 << 23)) {
2634 /* VDUP */
2635 if (size == 0) {
2636 gen_neon_dup_u8(tmp, 0);
2637 } else if (size == 1) {
2638 gen_neon_dup_low16(tmp);
2639 }
2640 for (n = 0; n <= pass * 2; n++) {
2641 tmp2 = tcg_temp_new_i32();
2642 tcg_gen_mov_i32(tmp2, tmp);
2643 neon_store_reg(rn, n, tmp2);
2644 }
2645 neon_store_reg(rn, n, tmp);
2646 } else {
2647 /* VMOV */
2648 switch (size) {
2649 case 0:
2650 tmp2 = neon_load_reg(rn, pass);
2651 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
2652 tcg_temp_free_i32(tmp2);
2653 break;
2654 case 1:
2655 tmp2 = neon_load_reg(rn, pass);
2656 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
2657 tcg_temp_free_i32(tmp2);
2658 break;
2659 case 2:
2660 break;
2661 }
2662 neon_store_reg(rn, pass, tmp);
2663 }
2664 }
2665 } else { /* !dp */
2666 if ((insn & 0x6f) != 0x00)
2667 return 1;
2668 rn = VFP_SREG_N(insn);
2669 if (insn & ARM_CP_RW_BIT) {
2670 /* vfp->arm */
2671 if (insn & (1 << 21)) {
2672 /* system register */
2673 rn >>= 1;
2674
2675 switch (rn) {
2676 case ARM_VFP_FPSID:
2677 /* VFP2 allows access to FSID from userspace.
2678 VFP3 restricts all id registers to privileged
2679 accesses. */
2680 if (IS_USER(s)
2681 && arm_feature(env, ARM_FEATURE_VFP3))
2682 return 1;
2683 tmp = load_cpu_field(vfp.xregs[rn]);
2684 break;
2685 case ARM_VFP_FPEXC:
2686 if (IS_USER(s))
2687 return 1;
2688 tmp = load_cpu_field(vfp.xregs[rn]);
2689 break;
2690 case ARM_VFP_FPINST:
2691 case ARM_VFP_FPINST2:
2692 /* Not present in VFP3. */
2693 if (IS_USER(s)
2694 || arm_feature(env, ARM_FEATURE_VFP3))
2695 return 1;
2696 tmp = load_cpu_field(vfp.xregs[rn]);
2697 break;
2698 case ARM_VFP_FPSCR:
2699 if (rd == 15) {
2700 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2701 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2702 } else {
2703 tmp = tcg_temp_new_i32();
2704 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2705 }
2706 break;
2707 case ARM_VFP_MVFR0:
2708 case ARM_VFP_MVFR1:
2709 if (IS_USER(s)
2710 || !arm_feature(env, ARM_FEATURE_MVFR))
2711 return 1;
2712 tmp = load_cpu_field(vfp.xregs[rn]);
2713 break;
2714 default:
2715 return 1;
2716 }
2717 } else {
2718 gen_mov_F0_vreg(0, rn);
2719 tmp = gen_vfp_mrs();
2720 }
2721 if (rd == 15) {
2722 /* Set the 4 flag bits in the CPSR. */
2723 gen_set_nzcv(tmp);
2724 tcg_temp_free_i32(tmp);
2725 } else {
2726 store_reg(s, rd, tmp);
2727 }
2728 } else {
2729 /* arm->vfp */
2730 if (insn & (1 << 21)) {
2731 rn >>= 1;
2732 /* system register */
2733 switch (rn) {
2734 case ARM_VFP_FPSID:
2735 case ARM_VFP_MVFR0:
2736 case ARM_VFP_MVFR1:
2737 /* Writes are ignored. */
2738 break;
2739 case ARM_VFP_FPSCR:
2740 tmp = load_reg(s, rd);
2741 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2742 tcg_temp_free_i32(tmp);
2743 gen_lookup_tb(s);
2744 break;
2745 case ARM_VFP_FPEXC:
2746 if (IS_USER(s))
2747 return 1;
2748 /* TODO: VFP subarchitecture support.
2749 * For now, keep the EN bit only */
2750 tmp = load_reg(s, rd);
2751 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2752 store_cpu_field(tmp, vfp.xregs[rn]);
2753 gen_lookup_tb(s);
2754 break;
2755 case ARM_VFP_FPINST:
2756 case ARM_VFP_FPINST2:
2757 tmp = load_reg(s, rd);
2758 store_cpu_field(tmp, vfp.xregs[rn]);
2759 break;
2760 default:
2761 return 1;
2762 }
2763 } else {
2764 tmp = load_reg(s, rd);
2765 gen_vfp_msr(tmp);
2766 gen_mov_vreg_F0(0, rn);
2767 }
2768 }
2769 }
2770 } else {
2771 /* data processing */
2772 /* The opcode is in bits 23, 21, 20 and 6. */
2773 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2774 if (dp) {
2775 if (op == 15) {
2776 /* rn is opcode */
2777 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2778 } else {
2779 /* rn is register number */
2780 VFP_DREG_N(rn, insn);
2781 }
2782
2783 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2784 /* Integer or single precision destination. */
2785 rd = VFP_SREG_D(insn);
2786 } else {
2787 VFP_DREG_D(rd, insn);
2788 }
2789 if (op == 15 &&
2790 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2791 /* VCVT from int is always from S reg regardless of dp bit.
2792 * VCVT with immediate frac_bits has same format as SREG_M
2793 */
2794 rm = VFP_SREG_M(insn);
2795 } else {
2796 VFP_DREG_M(rm, insn);
2797 }
2798 } else {
2799 rn = VFP_SREG_N(insn);
2800 if (op == 15 && rn == 15) {
2801 /* Double precision destination. */
2802 VFP_DREG_D(rd, insn);
2803 } else {
2804 rd = VFP_SREG_D(insn);
2805 }
2806 /* NB that we implicitly rely on the encoding for the frac_bits
2807 * in VCVT of fixed to float being the same as that of an SREG_M
2808 */
2809 rm = VFP_SREG_M(insn);
2810 }
2811
2812 veclen = s->vec_len;
2813 if (op == 15 && rn > 3)
2814 veclen = 0;
2815
2816 /* Shut up compiler warnings. */
2817 delta_m = 0;
2818 delta_d = 0;
2819 bank_mask = 0;
2820
2821 if (veclen > 0) {
2822 if (dp)
2823 bank_mask = 0xc;
2824 else
2825 bank_mask = 0x18;
2826
2827 /* Figure out what type of vector operation this is. */
2828 if ((rd & bank_mask) == 0) {
2829 /* scalar */
2830 veclen = 0;
2831 } else {
2832 if (dp)
2833 delta_d = (s->vec_stride >> 1) + 1;
2834 else
2835 delta_d = s->vec_stride + 1;
2836
2837 if ((rm & bank_mask) == 0) {
2838 /* mixed scalar/vector */
2839 delta_m = 0;
2840 } else {
2841 /* vector */
2842 delta_m = delta_d;
2843 }
2844 }
2845 }
2846
2847 /* Load the initial operands. */
2848 if (op == 15) {
2849 switch (rn) {
2850 case 16:
2851 case 17:
2852 /* Integer source */
2853 gen_mov_F0_vreg(0, rm);
2854 break;
2855 case 8:
2856 case 9:
2857 /* Compare */
2858 gen_mov_F0_vreg(dp, rd);
2859 gen_mov_F1_vreg(dp, rm);
2860 break;
2861 case 10:
2862 case 11:
2863 /* Compare with zero */
2864 gen_mov_F0_vreg(dp, rd);
2865 gen_vfp_F1_ld0(dp);
2866 break;
2867 case 20:
2868 case 21:
2869 case 22:
2870 case 23:
2871 case 28:
2872 case 29:
2873 case 30:
2874 case 31:
2875 /* Source and destination the same. */
2876 gen_mov_F0_vreg(dp, rd);
2877 break;
2878 case 4:
2879 case 5:
2880 case 6:
2881 case 7:
2882 /* VCVTB, VCVTT: only present with the halfprec extension,
2883 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2884 */
2885 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2886 return 1;
2887 }
2888 /* Otherwise fall through */
2889 default:
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp, rm);
2892 break;
2893 }
2894 } else {
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp, rn);
2897 gen_mov_F1_vreg(dp, rm);
2898 }
2899
2900 for (;;) {
2901 /* Perform the calculation. */
2902 switch (op) {
2903 case 0: /* VMLA: fd + (fn * fm) */
2904 /* Note that order of inputs to the add matters for NaNs */
2905 gen_vfp_F1_mul(dp);
2906 gen_mov_F0_vreg(dp, rd);
2907 gen_vfp_add(dp);
2908 break;
2909 case 1: /* VMLS: fd + -(fn * fm) */
2910 gen_vfp_mul(dp);
2911 gen_vfp_F1_neg(dp);
2912 gen_mov_F0_vreg(dp, rd);
2913 gen_vfp_add(dp);
2914 break;
2915 case 2: /* VNMLS: -fd + (fn * fm) */
2916 /* Note that it isn't valid to replace (-A + B) with (B - A)
2917 * or similar plausible looking simplifications
2918 * because this will give wrong results for NaNs.
2919 */
2920 gen_vfp_F1_mul(dp);
2921 gen_mov_F0_vreg(dp, rd);
2922 gen_vfp_neg(dp);
2923 gen_vfp_add(dp);
2924 break;
2925 case 3: /* VNMLA: -fd + -(fn * fm) */
2926 gen_vfp_mul(dp);
2927 gen_vfp_F1_neg(dp);
2928 gen_mov_F0_vreg(dp, rd);
2929 gen_vfp_neg(dp);
2930 gen_vfp_add(dp);
2931 break;
2932 case 4: /* mul: fn * fm */
2933 gen_vfp_mul(dp);
2934 break;
2935 case 5: /* nmul: -(fn * fm) */
2936 gen_vfp_mul(dp);
2937 gen_vfp_neg(dp);
2938 break;
2939 case 6: /* add: fn + fm */
2940 gen_vfp_add(dp);
2941 break;
2942 case 7: /* sub: fn - fm */
2943 gen_vfp_sub(dp);
2944 break;
2945 case 8: /* div: fn / fm */
2946 gen_vfp_div(dp);
2947 break;
2948 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2949 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2950 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2951 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2952 /* These are fused multiply-add, and must be done as one
2953 * floating point operation with no rounding between the
2954 * multiplication and addition steps.
2955 * NB that doing the negations here as separate steps is
2956 * correct : an input NaN should come out with its sign bit
2957 * flipped if it is a negated-input.
2958 */
2959 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2960 return 1;
2961 }
2962 if (dp) {
2963 TCGv_ptr fpst;
2964 TCGv_i64 frd;
2965 if (op & 1) {
2966 /* VFNMS, VFMS */
2967 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2968 }
2969 frd = tcg_temp_new_i64();
2970 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2971 if (op & 2) {
2972 /* VFNMA, VFNMS */
2973 gen_helper_vfp_negd(frd, frd);
2974 }
2975 fpst = get_fpstatus_ptr(0);
2976 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2977 cpu_F1d, frd, fpst);
2978 tcg_temp_free_ptr(fpst);
2979 tcg_temp_free_i64(frd);
2980 } else {
2981 TCGv_ptr fpst;
2982 TCGv_i32 frd;
2983 if (op & 1) {
2984 /* VFNMS, VFMS */
2985 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2986 }
2987 frd = tcg_temp_new_i32();
2988 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2989 if (op & 2) {
2990 gen_helper_vfp_negs(frd, frd);
2991 }
2992 fpst = get_fpstatus_ptr(0);
2993 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
2994 cpu_F1s, frd, fpst);
2995 tcg_temp_free_ptr(fpst);
2996 tcg_temp_free_i32(frd);
2997 }
2998 break;
2999 case 14: /* fconst */
3000 if (!arm_feature(env, ARM_FEATURE_VFP3))
3001 return 1;
3002
3003 n = (insn << 12) & 0x80000000;
3004 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3005 if (dp) {
3006 if (i & 0x40)
3007 i |= 0x3f80;
3008 else
3009 i |= 0x4000;
3010 n |= i << 16;
3011 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3012 } else {
3013 if (i & 0x40)
3014 i |= 0x780;
3015 else
3016 i |= 0x800;
3017 n |= i << 19;
3018 tcg_gen_movi_i32(cpu_F0s, n);
3019 }
3020 break;
3021 case 15: /* extension space */
3022 switch (rn) {
3023 case 0: /* cpy */
3024 /* no-op */
3025 break;
3026 case 1: /* abs */
3027 gen_vfp_abs(dp);
3028 break;
3029 case 2: /* neg */
3030 gen_vfp_neg(dp);
3031 break;
3032 case 3: /* sqrt */
3033 gen_vfp_sqrt(dp);
3034 break;
3035 case 4: /* vcvtb.f32.f16 */
3036 tmp = gen_vfp_mrs();
3037 tcg_gen_ext16u_i32(tmp, tmp);
3038 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3039 tcg_temp_free_i32(tmp);
3040 break;
3041 case 5: /* vcvtt.f32.f16 */
3042 tmp = gen_vfp_mrs();
3043 tcg_gen_shri_i32(tmp, tmp, 16);
3044 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3045 tcg_temp_free_i32(tmp);
3046 break;
3047 case 6: /* vcvtb.f16.f32 */
3048 tmp = tcg_temp_new_i32();
3049 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3050 gen_mov_F0_vreg(0, rd);
3051 tmp2 = gen_vfp_mrs();
3052 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3053 tcg_gen_or_i32(tmp, tmp, tmp2);
3054 tcg_temp_free_i32(tmp2);
3055 gen_vfp_msr(tmp);
3056 break;
3057 case 7: /* vcvtt.f16.f32 */
3058 tmp = tcg_temp_new_i32();
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3060 tcg_gen_shli_i32(tmp, tmp, 16);
3061 gen_mov_F0_vreg(0, rd);
3062 tmp2 = gen_vfp_mrs();
3063 tcg_gen_ext16u_i32(tmp2, tmp2);
3064 tcg_gen_or_i32(tmp, tmp, tmp2);
3065 tcg_temp_free_i32(tmp2);
3066 gen_vfp_msr(tmp);
3067 break;
3068 case 8: /* cmp */
3069 gen_vfp_cmp(dp);
3070 break;
3071 case 9: /* cmpe */
3072 gen_vfp_cmpe(dp);
3073 break;
3074 case 10: /* cmpz */
3075 gen_vfp_cmp(dp);
3076 break;
3077 case 11: /* cmpez */
3078 gen_vfp_F1_ld0(dp);
3079 gen_vfp_cmpe(dp);
3080 break;
3081 case 15: /* single<->double conversion */
3082 if (dp)
3083 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3084 else
3085 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3086 break;
3087 case 16: /* fuito */
3088 gen_vfp_uito(dp, 0);
3089 break;
3090 case 17: /* fsito */
3091 gen_vfp_sito(dp, 0);
3092 break;
3093 case 20: /* fshto */
3094 if (!arm_feature(env, ARM_FEATURE_VFP3))
3095 return 1;
3096 gen_vfp_shto(dp, 16 - rm, 0);
3097 break;
3098 case 21: /* fslto */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
3101 gen_vfp_slto(dp, 32 - rm, 0);
3102 break;
3103 case 22: /* fuhto */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
3106 gen_vfp_uhto(dp, 16 - rm, 0);
3107 break;
3108 case 23: /* fulto */
3109 if (!arm_feature(env, ARM_FEATURE_VFP3))
3110 return 1;
3111 gen_vfp_ulto(dp, 32 - rm, 0);
3112 break;
3113 case 24: /* ftoui */
3114 gen_vfp_toui(dp, 0);
3115 break;
3116 case 25: /* ftouiz */
3117 gen_vfp_touiz(dp, 0);
3118 break;
3119 case 26: /* ftosi */
3120 gen_vfp_tosi(dp, 0);
3121 break;
3122 case 27: /* ftosiz */
3123 gen_vfp_tosiz(dp, 0);
3124 break;
3125 case 28: /* ftosh */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
3128 gen_vfp_tosh(dp, 16 - rm, 0);
3129 break;
3130 case 29: /* ftosl */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
3133 gen_vfp_tosl(dp, 32 - rm, 0);
3134 break;
3135 case 30: /* ftouh */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
3138 gen_vfp_touh(dp, 16 - rm, 0);
3139 break;
3140 case 31: /* ftoul */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
3143 gen_vfp_toul(dp, 32 - rm, 0);
3144 break;
3145 default: /* undefined */
3146 return 1;
3147 }
3148 break;
3149 default: /* undefined */
3150 return 1;
3151 }
3152
3153 /* Write back the result. */
3154 if (op == 15 && (rn >= 8 && rn <= 11))
3155 ; /* Comparison, do nothing. */
3156 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3157 /* VCVT double to int: always integer result. */
3158 gen_mov_vreg_F0(0, rd);
3159 else if (op == 15 && rn == 15)
3160 /* conversion */
3161 gen_mov_vreg_F0(!dp, rd);
3162 else
3163 gen_mov_vreg_F0(dp, rd);
3164
3165 /* break out of the loop if we have finished */
3166 if (veclen == 0)
3167 break;
3168
3169 if (op == 15 && delta_m == 0) {
3170 /* single source one-many */
3171 while (veclen--) {
3172 rd = ((rd + delta_d) & (bank_mask - 1))
3173 | (rd & bank_mask);
3174 gen_mov_vreg_F0(dp, rd);
3175 }
3176 break;
3177 }
3178 /* Setup the next operands. */
3179 veclen--;
3180 rd = ((rd + delta_d) & (bank_mask - 1))
3181 | (rd & bank_mask);
3182
3183 if (op == 15) {
3184 /* One source operand. */
3185 rm = ((rm + delta_m) & (bank_mask - 1))
3186 | (rm & bank_mask);
3187 gen_mov_F0_vreg(dp, rm);
3188 } else {
3189 /* Two source operands. */
3190 rn = ((rn + delta_d) & (bank_mask - 1))
3191 | (rn & bank_mask);
3192 gen_mov_F0_vreg(dp, rn);
3193 if (delta_m) {
3194 rm = ((rm + delta_m) & (bank_mask - 1))
3195 | (rm & bank_mask);
3196 gen_mov_F1_vreg(dp, rm);
3197 }
3198 }
3199 }
3200 }
3201 break;
3202 case 0xc:
3203 case 0xd:
3204 if ((insn & 0x03e00000) == 0x00400000) {
3205 /* two-register transfer */
3206 rn = (insn >> 16) & 0xf;
3207 rd = (insn >> 12) & 0xf;
3208 if (dp) {
3209 VFP_DREG_M(rm, insn);
3210 } else {
3211 rm = VFP_SREG_M(insn);
3212 }
3213
3214 if (insn & ARM_CP_RW_BIT) {
3215 /* vfp->arm */
3216 if (dp) {
3217 gen_mov_F0_vreg(0, rm * 2);
3218 tmp = gen_vfp_mrs();
3219 store_reg(s, rd, tmp);
3220 gen_mov_F0_vreg(0, rm * 2 + 1);
3221 tmp = gen_vfp_mrs();
3222 store_reg(s, rn, tmp);
3223 } else {
3224 gen_mov_F0_vreg(0, rm);
3225 tmp = gen_vfp_mrs();
3226 store_reg(s, rd, tmp);
3227 gen_mov_F0_vreg(0, rm + 1);
3228 tmp = gen_vfp_mrs();
3229 store_reg(s, rn, tmp);
3230 }
3231 } else {
3232 /* arm->vfp */
3233 if (dp) {
3234 tmp = load_reg(s, rd);
3235 gen_vfp_msr(tmp);
3236 gen_mov_vreg_F0(0, rm * 2);
3237 tmp = load_reg(s, rn);
3238 gen_vfp_msr(tmp);
3239 gen_mov_vreg_F0(0, rm * 2 + 1);
3240 } else {
3241 tmp = load_reg(s, rd);
3242 gen_vfp_msr(tmp);
3243 gen_mov_vreg_F0(0, rm);
3244 tmp = load_reg(s, rn);
3245 gen_vfp_msr(tmp);
3246 gen_mov_vreg_F0(0, rm + 1);
3247 }
3248 }
3249 } else {
3250 /* Load/store */
3251 rn = (insn >> 16) & 0xf;
3252 if (dp)
3253 VFP_DREG_D(rd, insn);
3254 else
3255 rd = VFP_SREG_D(insn);
3256 if ((insn & 0x01200000) == 0x01000000) {
3257 /* Single load/store */
3258 offset = (insn & 0xff) << 2;
3259 if ((insn & (1 << 23)) == 0)
3260 offset = -offset;
3261 if (s->thumb && rn == 15) {
3262 /* This is actually UNPREDICTABLE */
3263 addr = tcg_temp_new_i32();
3264 tcg_gen_movi_i32(addr, s->pc & ~2);
3265 } else {
3266 addr = load_reg(s, rn);
3267 }
3268 tcg_gen_addi_i32(addr, addr, offset);
3269 if (insn & (1 << 20)) {
3270 gen_vfp_ld(s, dp, addr);
3271 gen_mov_vreg_F0(dp, rd);
3272 } else {
3273 gen_mov_F0_vreg(dp, rd);
3274 gen_vfp_st(s, dp, addr);
3275 }
3276 tcg_temp_free_i32(addr);
3277 } else {
3278 /* load/store multiple */
3279 int w = insn & (1 << 21);
3280 if (dp)
3281 n = (insn >> 1) & 0x7f;
3282 else
3283 n = insn & 0xff;
3284
3285 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3286 /* P == U , W == 1 => UNDEF */
3287 return 1;
3288 }
3289 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3290 /* UNPREDICTABLE cases for bad immediates: we choose to
3291 * UNDEF to avoid generating huge numbers of TCG ops
3292 */
3293 return 1;
3294 }
3295 if (rn == 15 && w) {
3296 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3297 return 1;
3298 }
3299
3300 if (s->thumb && rn == 15) {
3301 /* This is actually UNPREDICTABLE */
3302 addr = tcg_temp_new_i32();
3303 tcg_gen_movi_i32(addr, s->pc & ~2);
3304 } else {
3305 addr = load_reg(s, rn);
3306 }
3307 if (insn & (1 << 24)) /* pre-decrement */
3308 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3309
3310 if (dp)
3311 offset = 8;
3312 else
3313 offset = 4;
3314 for (i = 0; i < n; i++) {
3315 if (insn & ARM_CP_RW_BIT) {
3316 /* load */
3317 gen_vfp_ld(s, dp, addr);
3318 gen_mov_vreg_F0(dp, rd + i);
3319 } else {
3320 /* store */
3321 gen_mov_F0_vreg(dp, rd + i);
3322 gen_vfp_st(s, dp, addr);
3323 }
3324 tcg_gen_addi_i32(addr, addr, offset);
3325 }
3326 if (w) {
3327 /* writeback */
3328 if (insn & (1 << 24))
3329 offset = -offset * n;
3330 else if (dp && (insn & 1))
3331 offset = 4;
3332 else
3333 offset = 0;
3334
3335 if (offset != 0)
3336 tcg_gen_addi_i32(addr, addr, offset);
3337 store_reg(s, rn, addr);
3338 } else {
3339 tcg_temp_free_i32(addr);
3340 }
3341 }
3342 }
3343 break;
3344 default:
3345 /* Should never happen. */
3346 return 1;
3347 }
3348 return 0;
3349 }
3350
3351 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3352 {
3353 TranslationBlock *tb;
3354
3355 tb = s->tb;
3356 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3357 tcg_gen_goto_tb(n);
3358 gen_set_pc_im(dest);
3359 tcg_gen_exit_tb((tcg_target_long)tb + n);
3360 } else {
3361 gen_set_pc_im(dest);
3362 tcg_gen_exit_tb(0);
3363 }
3364 }
3365
3366 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3367 {
3368 if (unlikely(s->singlestep_enabled)) {
3369 /* An indirect jump so that we still trigger the debug exception. */
3370 if (s->thumb)
3371 dest |= 1;
3372 gen_bx_im(s, dest);
3373 } else {
3374 gen_goto_tb(s, 0, dest);
3375 s->is_jmp = DISAS_TB_JUMP;
3376 }
3377 }
3378
3379 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3380 {
3381 if (x)
3382 tcg_gen_sari_i32(t0, t0, 16);
3383 else
3384 gen_sxth(t0);
3385 if (y)
3386 tcg_gen_sari_i32(t1, t1, 16);
3387 else
3388 gen_sxth(t1);
3389 tcg_gen_mul_i32(t0, t0, t1);
3390 }
3391
3392 /* Return the mask of PSR bits set by a MSR instruction. */
3393 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3394 uint32_t mask;
3395
3396 mask = 0;
3397 if (flags & (1 << 0))
3398 mask |= 0xff;
3399 if (flags & (1 << 1))
3400 mask |= 0xff00;
3401 if (flags & (1 << 2))
3402 mask |= 0xff0000;
3403 if (flags & (1 << 3))
3404 mask |= 0xff000000;
3405
3406 /* Mask out undefined bits. */
3407 mask &= ~CPSR_RESERVED;
3408 if (!arm_feature(env, ARM_FEATURE_V4T))
3409 mask &= ~CPSR_T;
3410 if (!arm_feature(env, ARM_FEATURE_V5))
3411 mask &= ~CPSR_Q; /* V5TE in reality*/
3412 if (!arm_feature(env, ARM_FEATURE_V6))
3413 mask &= ~(CPSR_E | CPSR_GE);
3414 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3415 mask &= ~CPSR_IT;
3416 /* Mask out execution state bits. */
3417 if (!spsr)
3418 mask &= ~CPSR_EXEC;
3419 /* Mask out privileged bits. */
3420 if (IS_USER(s))
3421 mask &= CPSR_USER;
3422 return mask;
3423 }
3424
3425 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3426 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3427 {
3428 TCGv_i32 tmp;
3429 if (spsr) {
3430 /* ??? This is also undefined in system mode. */
3431 if (IS_USER(s))
3432 return 1;
3433
3434 tmp = load_cpu_field(spsr);
3435 tcg_gen_andi_i32(tmp, tmp, ~mask);
3436 tcg_gen_andi_i32(t0, t0, mask);
3437 tcg_gen_or_i32(tmp, tmp, t0);
3438 store_cpu_field(tmp, spsr);
3439 } else {
3440 gen_set_cpsr(t0, mask);
3441 }
3442 tcg_temp_free_i32(t0);
3443 gen_lookup_tb(s);
3444 return 0;
3445 }
3446
3447 /* Returns nonzero if access to the PSR is not permitted. */
3448 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3449 {
3450 TCGv_i32 tmp;
3451 tmp = tcg_temp_new_i32();
3452 tcg_gen_movi_i32(tmp, val);
3453 return gen_set_psr(s, mask, spsr, tmp);
3454 }
3455
3456 /* Generate an old-style exception return. Marks pc as dead. */
3457 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3458 {
3459 TCGv_i32 tmp;
3460 store_reg(s, 15, pc);
3461 tmp = load_cpu_field(spsr);
3462 gen_set_cpsr(tmp, 0xffffffff);
3463 tcg_temp_free_i32(tmp);
3464 s->is_jmp = DISAS_UPDATE;
3465 }
3466
3467 /* Generate a v6 exception return. Marks both values as dead. */
3468 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3469 {
3470 gen_set_cpsr(cpsr, 0xffffffff);
3471 tcg_temp_free_i32(cpsr);
3472 store_reg(s, 15, pc);
3473 s->is_jmp = DISAS_UPDATE;
3474 }
3475
3476 static inline void
3477 gen_set_condexec (DisasContext *s)
3478 {
3479 if (s->condexec_mask) {
3480 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3481 TCGv_i32 tmp = tcg_temp_new_i32();
3482 tcg_gen_movi_i32(tmp, val);
3483 store_cpu_field(tmp, condexec_bits);
3484 }
3485 }
3486
3487 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3488 {
3489 gen_set_condexec(s);
3490 gen_set_pc_im(s->pc - offset);
3491 gen_exception(excp);
3492 s->is_jmp = DISAS_JUMP;
3493 }
3494
3495 static void gen_nop_hint(DisasContext *s, int val)
3496 {
3497 switch (val) {
3498 case 3: /* wfi */
3499 gen_set_pc_im(s->pc);
3500 s->is_jmp = DISAS_WFI;
3501 break;
3502 case 2: /* wfe */
3503 case 4: /* sev */
3504 case 5: /* sevl */
3505 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3506 default: /* nop */
3507 break;
3508 }
3509 }
3510
3511 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3512
3513 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3514 {
3515 switch (size) {
3516 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3517 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3518 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3519 default: abort();
3520 }
3521 }
3522
3523 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3524 {
3525 switch (size) {
3526 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3527 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3528 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3529 default: return;
3530 }
3531 }
3532
3533 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3534 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3535 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3536 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3537 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3538
3539 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3540 switch ((size << 1) | u) { \
3541 case 0: \
3542 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3543 break; \
3544 case 1: \
3545 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3546 break; \
3547 case 2: \
3548 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3549 break; \
3550 case 3: \
3551 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3552 break; \
3553 case 4: \
3554 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3555 break; \
3556 case 5: \
3557 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3558 break; \
3559 default: return 1; \
3560 }} while (0)
3561
3562 #define GEN_NEON_INTEGER_OP(name) do { \
3563 switch ((size << 1) | u) { \
3564 case 0: \
3565 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3566 break; \
3567 case 1: \
3568 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3569 break; \
3570 case 2: \
3571 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3572 break; \
3573 case 3: \
3574 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3575 break; \
3576 case 4: \
3577 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3578 break; \
3579 case 5: \
3580 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3581 break; \
3582 default: return 1; \
3583 }} while (0)
3584
3585 static TCGv_i32 neon_load_scratch(int scratch)
3586 {
3587 TCGv_i32 tmp = tcg_temp_new_i32();
3588 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3589 return tmp;
3590 }
3591
3592 static void neon_store_scratch(int scratch, TCGv_i32 var)
3593 {
3594 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3595 tcg_temp_free_i32(var);
3596 }
3597
3598 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3599 {
3600 TCGv_i32 tmp;
3601 if (size == 1) {
3602 tmp = neon_load_reg(reg & 7, reg >> 4);
3603 if (reg & 8) {
3604 gen_neon_dup_high16(tmp);
3605 } else {
3606 gen_neon_dup_low16(tmp);
3607 }
3608 } else {
3609 tmp = neon_load_reg(reg & 15, reg >> 4);
3610 }
3611 return tmp;
3612 }
3613
3614 static int gen_neon_unzip(int rd, int rm, int size, int q)
3615 {
3616 TCGv_i32 tmp, tmp2;
3617 if (!q && size == 2) {
3618 return 1;
3619 }
3620 tmp = tcg_const_i32(rd);
3621 tmp2 = tcg_const_i32(rm);
3622 if (q) {
3623 switch (size) {
3624 case 0:
3625 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3626 break;
3627 case 1:
3628 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3629 break;
3630 case 2:
3631 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3632 break;
3633 default:
3634 abort();
3635 }
3636 } else {
3637 switch (size) {
3638 case 0:
3639 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3640 break;
3641 case 1:
3642 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3643 break;
3644 default:
3645 abort();
3646 }
3647 }
3648 tcg_temp_free_i32(tmp);
3649 tcg_temp_free_i32(tmp2);
3650 return 0;
3651 }
3652
3653 static int gen_neon_zip(int rd, int rm, int size, int q)
3654 {
3655 TCGv_i32 tmp, tmp2;
3656 if (!q && size == 2) {
3657 return 1;
3658 }
3659 tmp = tcg_const_i32(rd);
3660 tmp2 = tcg_const_i32(rm);
3661 if (q) {
3662 switch (size) {
3663 case 0:
3664 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3665 break;
3666 case 1:
3667 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3668 break;
3669 case 2:
3670 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3671 break;
3672 default:
3673 abort();
3674 }
3675 } else {
3676 switch (size) {
3677 case 0:
3678 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3679 break;
3680 case 1:
3681 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3682 break;
3683 default:
3684 abort();
3685 }
3686 }
3687 tcg_temp_free_i32(tmp);
3688 tcg_temp_free_i32(tmp2);
3689 return 0;
3690 }
3691
3692 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3693 {
3694 TCGv_i32 rd, tmp;
3695
3696 rd = tcg_temp_new_i32();
3697 tmp = tcg_temp_new_i32();
3698
3699 tcg_gen_shli_i32(rd, t0, 8);
3700 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3701 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3702 tcg_gen_or_i32(rd, rd, tmp);
3703
3704 tcg_gen_shri_i32(t1, t1, 8);
3705 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3706 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3707 tcg_gen_or_i32(t1, t1, tmp);
3708 tcg_gen_mov_i32(t0, rd);
3709
3710 tcg_temp_free_i32(tmp);
3711 tcg_temp_free_i32(rd);
3712 }
3713
3714 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3715 {
3716 TCGv_i32 rd, tmp;
3717
3718 rd = tcg_temp_new_i32();
3719 tmp = tcg_temp_new_i32();
3720
3721 tcg_gen_shli_i32(rd, t0, 16);
3722 tcg_gen_andi_i32(tmp, t1, 0xffff);
3723 tcg_gen_or_i32(rd, rd, tmp);
3724 tcg_gen_shri_i32(t1, t1, 16);
3725 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3726 tcg_gen_or_i32(t1, t1, tmp);
3727 tcg_gen_mov_i32(t0, rd);
3728
3729 tcg_temp_free_i32(tmp);
3730 tcg_temp_free_i32(rd);
3731 }
3732
3733
3734 static struct {
3735 int nregs;
3736 int interleave;
3737 int spacing;
3738 } neon_ls_element_type[11] = {
3739 {4, 4, 1},
3740 {4, 4, 2},
3741 {4, 1, 1},
3742 {4, 2, 1},
3743 {3, 3, 1},
3744 {3, 3, 2},
3745 {3, 1, 1},
3746 {1, 1, 1},
3747 {2, 2, 1},
3748 {2, 2, 2},
3749 {2, 1, 1}
3750 };
3751
3752 /* Translate a NEON load/store element instruction. Return nonzero if the
3753 instruction is invalid. */
3754 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3755 {
3756 int rd, rn, rm;
3757 int op;
3758 int nregs;
3759 int interleave;
3760 int spacing;
3761 int stride;
3762 int size;
3763 int reg;
3764 int pass;
3765 int load;
3766 int shift;
3767 int n;
3768 TCGv_i32 addr;
3769 TCGv_i32 tmp;
3770 TCGv_i32 tmp2;
3771 TCGv_i64 tmp64;
3772
3773 if (!s->vfp_enabled)
3774 return 1;
3775 VFP_DREG_D(rd, insn);
3776 rn = (insn >> 16) & 0xf;
3777 rm = insn & 0xf;
3778 load = (insn & (1 << 21)) != 0;
3779 if ((insn & (1 << 23)) == 0) {
3780 /* Load store all elements. */
3781 op = (insn >> 8) & 0xf;
3782 size = (insn >> 6) & 3;
3783 if (op > 10)
3784 return 1;
3785 /* Catch UNDEF cases for bad values of align field */
3786 switch (op & 0xc) {
3787 case 4:
3788 if (((insn >> 5) & 1) == 1) {
3789 return 1;
3790 }
3791 break;
3792 case 8:
3793 if (((insn >> 4) & 3) == 3) {
3794 return 1;
3795 }
3796 break;
3797 default:
3798 break;
3799 }
3800 nregs = neon_ls_element_type[op].nregs;
3801 interleave = neon_ls_element_type[op].interleave;
3802 spacing = neon_ls_element_type[op].spacing;
3803 if (size == 3 && (interleave | spacing) != 1)
3804 return 1;
3805 addr = tcg_temp_new_i32();
3806 load_reg_var(s, addr, rn);
3807 stride = (1 << size) * interleave;
3808 for (reg = 0; reg < nregs; reg++) {
3809 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3810 load_reg_var(s, addr, rn);
3811 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3812 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3813 load_reg_var(s, addr, rn);
3814 tcg_gen_addi_i32(addr, addr, 1 << size);
3815 }
3816 if (size == 3) {
3817 tmp64 = tcg_temp_new_i64();
3818 if (load) {
3819 tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
3820 neon_store_reg64(tmp64, rd);
3821 } else {
3822 neon_load_reg64(tmp64, rd);
3823 tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
3824 }
3825 tcg_temp_free_i64(tmp64);
3826 tcg_gen_addi_i32(addr, addr, stride);
3827 } else {
3828 for (pass = 0; pass < 2; pass++) {
3829 if (size == 2) {
3830 if (load) {
3831 tmp = tcg_temp_new_i32();
3832 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
3833 neon_store_reg(rd, pass, tmp);
3834 } else {
3835 tmp = neon_load_reg(rd, pass);
3836 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
3837 tcg_temp_free_i32(tmp);
3838 }
3839 tcg_gen_addi_i32(addr, addr, stride);
3840 } else if (size == 1) {
3841 if (load) {
3842 tmp = tcg_temp_new_i32();
3843 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
3844 tcg_gen_addi_i32(addr, addr, stride);
3845 tmp2 = tcg_temp_new_i32();
3846 tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
3847 tcg_gen_addi_i32(addr, addr, stride);
3848 tcg_gen_shli_i32(tmp2, tmp2, 16);
3849 tcg_gen_or_i32(tmp, tmp, tmp2);
3850 tcg_temp_free_i32(tmp2);
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 tmp2 = tcg_temp_new_i32();
3855 tcg_gen_shri_i32(tmp2, tmp, 16);
3856 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
3857 tcg_temp_free_i32(tmp);
3858 tcg_gen_addi_i32(addr, addr, stride);
3859 tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
3860 tcg_temp_free_i32(tmp2);
3861 tcg_gen_addi_i32(addr, addr, stride);
3862 }
3863 } else /* size == 0 */ {
3864 if (load) {
3865 TCGV_UNUSED_I32(tmp2);
3866 for (n = 0; n < 4; n++) {
3867 tmp = tcg_temp_new_i32();
3868 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
3869 tcg_gen_addi_i32(addr, addr, stride);
3870 if (n == 0) {
3871 tmp2 = tmp;
3872 } else {
3873 tcg_gen_shli_i32(tmp, tmp, n * 8);
3874 tcg_gen_or_i32(tmp2, tmp2, tmp);
3875 tcg_temp_free_i32(tmp);
3876 }
3877 }
3878 neon_store_reg(rd, pass, tmp2);
3879 } else {
3880 tmp2 = neon_load_reg(rd, pass);
3881 for (n = 0; n < 4; n++) {
3882 tmp = tcg_temp_new_i32();
3883 if (n == 0) {
3884 tcg_gen_mov_i32(tmp, tmp2);
3885 } else {
3886 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3887 }
3888 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
3889 tcg_temp_free_i32(tmp);
3890 tcg_gen_addi_i32(addr, addr, stride);
3891 }
3892 tcg_temp_free_i32(tmp2);
3893 }
3894 }
3895 }
3896 }
3897 rd += spacing;
3898 }
3899 tcg_temp_free_i32(addr);
3900 stride = nregs * 8;
3901 } else {
3902 size = (insn >> 10) & 3;
3903 if (size == 3) {
3904 /* Load single element to all lanes. */
3905 int a = (insn >> 4) & 1;
3906 if (!load) {
3907 return 1;
3908 }
3909 size = (insn >> 6) & 3;
3910 nregs = ((insn >> 8) & 3) + 1;
3911
3912 if (size == 3) {
3913 if (nregs != 4 || a == 0) {
3914 return 1;
3915 }
3916 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3917 size = 2;
3918 }
3919 if (nregs == 1 && a == 1 && size == 0) {
3920 return 1;
3921 }
3922 if (nregs == 3 && a == 1) {
3923 return 1;
3924 }
3925 addr = tcg_temp_new_i32();
3926 load_reg_var(s, addr, rn);
3927 if (nregs == 1) {
3928 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3929 tmp = gen_load_and_replicate(s, addr, size);
3930 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3932 if (insn & (1 << 5)) {
3933 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3935 }
3936 tcg_temp_free_i32(tmp);
3937 } else {
3938 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3939 stride = (insn & (1 << 5)) ? 2 : 1;
3940 for (reg = 0; reg < nregs; reg++) {
3941 tmp = gen_load_and_replicate(s, addr, size);
3942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3943 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3944 tcg_temp_free_i32(tmp);
3945 tcg_gen_addi_i32(addr, addr, 1 << size);
3946 rd += stride;
3947 }
3948 }
3949 tcg_temp_free_i32(addr);
3950 stride = (1 << size) * nregs;
3951 } else {
3952 /* Single element. */
3953 int idx = (insn >> 4) & 0xf;
3954 pass = (insn >> 7) & 1;
3955 switch (size) {
3956 case 0:
3957 shift = ((insn >> 5) & 3) * 8;
3958 stride = 1;
3959 break;
3960 case 1:
3961 shift = ((insn >> 6) & 1) * 16;
3962 stride = (insn & (1 << 5)) ? 2 : 1;
3963 break;
3964 case 2:
3965 shift = 0;
3966 stride = (insn & (1 << 6)) ? 2 : 1;
3967 break;
3968 default:
3969 abort();
3970 }
3971 nregs = ((insn >> 8) & 3) + 1;
3972 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3973 switch (nregs) {
3974 case 1:
3975 if (((idx & (1 << size)) != 0) ||
3976 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3977 return 1;
3978 }
3979 break;
3980 case 3:
3981 if ((idx & 1) != 0) {
3982 return 1;
3983 }
3984 /* fall through */
3985 case 2:
3986 if (size == 2 && (idx & 2) != 0) {
3987 return 1;
3988 }
3989 break;
3990 case 4:
3991 if ((size == 2) && ((idx & 3) == 3)) {
3992 return 1;
3993 }
3994 break;
3995 default:
3996 abort();
3997 }
3998 if ((rd + stride * (nregs - 1)) > 31) {
3999 /* Attempts to write off the end of the register file
4000 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4001 * the neon_load_reg() would write off the end of the array.
4002 */
4003 return 1;
4004 }
4005 addr = tcg_temp_new_i32();
4006 load_reg_var(s, addr, rn);
4007 for (reg = 0; reg < nregs; reg++) {
4008 if (load) {
4009 tmp = tcg_temp_new_i32();
4010 switch (size) {
4011 case 0:
4012 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
4013 break;
4014 case 1:
4015 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
4016 break;
4017 case 2:
4018 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
4019 break;
4020 default: /* Avoid compiler warnings. */
4021 abort();
4022 }
4023 if (size != 2) {
4024 tmp2 = neon_load_reg(rd, pass);
4025 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4026 shift, size ? 16 : 8);
4027 tcg_temp_free_i32(tmp2);
4028 }
4029 neon_store_reg(rd, pass, tmp);
4030 } else { /* Store */
4031 tmp = neon_load_reg(rd, pass);
4032 if (shift)
4033 tcg_gen_shri_i32(tmp, tmp, shift);
4034 switch (size) {
4035 case 0:
4036 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
4037 break;
4038 case 1:
4039 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
4040 break;
4041 case 2:
4042 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
4043 break;
4044 }
4045 tcg_temp_free_i32(tmp);
4046 }
4047 rd += stride;
4048 tcg_gen_addi_i32(addr, addr, 1 << size);
4049 }
4050 tcg_temp_free_i32(addr);
4051 stride = nregs * (1 << size);
4052 }
4053 }
4054 if (rm != 15) {
4055 TCGv_i32 base;
4056
4057 base = load_reg(s, rn);
4058 if (rm == 13) {
4059 tcg_gen_addi_i32(base, base, stride);
4060 } else {
4061 TCGv_i32 index;
4062 index = load_reg(s, rm);
4063 tcg_gen_add_i32(base, base, index);
4064 tcg_temp_free_i32(index);
4065 }
4066 store_reg(s, rn, base);
4067 }
4068 return 0;
4069 }
4070
4071 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4072 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4073 {
4074 tcg_gen_and_i32(t, t, c);
4075 tcg_gen_andc_i32(f, f, c);
4076 tcg_gen_or_i32(dest, t, f);
4077 }
4078
4079 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4080 {
4081 switch (size) {
4082 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4083 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4084 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4085 default: abort();
4086 }
4087 }
4088
4089 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4090 {
4091 switch (size) {
4092 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4093 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4094 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4095 default: abort();
4096 }
4097 }
4098
4099 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4100 {
4101 switch (size) {
4102 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4103 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4104 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4105 default: abort();
4106 }
4107 }
4108
4109 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4110 {
4111 switch (size) {
4112 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4113 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4114 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4115 default: abort();
4116 }
4117 }
4118
4119 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4120 int q, int u)
4121 {
4122 if (q) {
4123 if (u) {
4124 switch (size) {
4125 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4126 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4127 default: abort();
4128 }
4129 } else {
4130 switch (size) {
4131 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4132 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4133 default: abort();
4134 }
4135 }
4136 } else {
4137 if (u) {
4138 switch (size) {
4139 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4140 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4141 default: abort();
4142 }
4143 } else {
4144 switch (size) {
4145 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4146 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4147 default: abort();
4148 }
4149 }
4150 }
4151 }
4152
4153 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4154 {
4155 if (u) {
4156 switch (size) {
4157 case 0: gen_helper_neon_widen_u8(dest, src); break;
4158 case 1: gen_helper_neon_widen_u16(dest, src); break;
4159 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4160 default: abort();
4161 }
4162 } else {
4163 switch (size) {
4164 case 0: gen_helper_neon_widen_s8(dest, src); break;
4165 case 1: gen_helper_neon_widen_s16(dest, src); break;
4166 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4167 default: abort();
4168 }
4169 }
4170 tcg_temp_free_i32(src);
4171 }
4172
4173 static inline void gen_neon_addl(int size)
4174 {
4175 switch (size) {
4176 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4177 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4178 case 2: tcg_gen_add_i64(CPU_V001); break;
4179 default: abort();
4180 }
4181 }
4182
4183 static inline void gen_neon_subl(int size)
4184 {
4185 switch (size) {
4186 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4187 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4188 case 2: tcg_gen_sub_i64(CPU_V001); break;
4189 default: abort();
4190 }
4191 }
4192
4193 static inline void gen_neon_negl(TCGv_i64 var, int size)
4194 {
4195 switch (size) {
4196 case 0: gen_helper_neon_negl_u16(var, var); break;
4197 case 1: gen_helper_neon_negl_u32(var, var); break;
4198 case 2:
4199 tcg_gen_neg_i64(var, var);
4200 break;
4201 default: abort();
4202 }
4203 }
4204
4205 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4206 {
4207 switch (size) {
4208 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4209 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4210 default: abort();
4211 }
4212 }
4213
4214 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4215 int size, int u)
4216 {
4217 TCGv_i64 tmp;
4218
4219 switch ((size << 1) | u) {
4220 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4221 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4222 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4223 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4224 case 4:
4225 tmp = gen_muls_i64_i32(a, b);
4226 tcg_gen_mov_i64(dest, tmp);
4227 tcg_temp_free_i64(tmp);
4228 break;
4229 case 5:
4230 tmp = gen_mulu_i64_i32(a, b);
4231 tcg_gen_mov_i64(dest, tmp);
4232 tcg_temp_free_i64(tmp);
4233 break;
4234 default: abort();
4235 }
4236
4237 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4238 Don't forget to clean them now. */
4239 if (size < 2) {
4240 tcg_temp_free_i32(a);
4241 tcg_temp_free_i32(b);
4242 }
4243 }
4244
4245 static void gen_neon_narrow_op(int op, int u, int size,
4246 TCGv_i32 dest, TCGv_i64 src)
4247 {
4248 if (op) {
4249 if (u) {
4250 gen_neon_unarrow_sats(size, dest, src);
4251 } else {
4252 gen_neon_narrow(size, dest, src);
4253 }
4254 } else {
4255 if (u) {
4256 gen_neon_narrow_satu(size, dest, src);
4257 } else {
4258 gen_neon_narrow_sats(size, dest, src);
4259 }
4260 }
4261 }
4262
4263 /* Symbolic constants for op fields for Neon 3-register same-length.
4264 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4265 * table A7-9.
4266 */
4267 #define NEON_3R_VHADD 0
4268 #define NEON_3R_VQADD 1
4269 #define NEON_3R_VRHADD 2
4270 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4271 #define NEON_3R_VHSUB 4
4272 #define NEON_3R_VQSUB 5
4273 #define NEON_3R_VCGT 6
4274 #define NEON_3R_VCGE 7
4275 #define NEON_3R_VSHL 8
4276 #define NEON_3R_VQSHL 9
4277 #define NEON_3R_VRSHL 10
4278 #define NEON_3R_VQRSHL 11
4279 #define NEON_3R_VMAX 12
4280 #define NEON_3R_VMIN 13
4281 #define NEON_3R_VABD 14
4282 #define NEON_3R_VABA 15
4283 #define NEON_3R_VADD_VSUB 16
4284 #define NEON_3R_VTST_VCEQ 17
4285 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4286 #define NEON_3R_VMUL 19
4287 #define NEON_3R_VPMAX 20
4288 #define NEON_3R_VPMIN 21
4289 #define NEON_3R_VQDMULH_VQRDMULH 22
4290 #define NEON_3R_VPADD 23
4291 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4292 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4293 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4294 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4295 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4296 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4297 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4298
4299 static const uint8_t neon_3r_sizes[] = {
4300 [NEON_3R_VHADD] = 0x7,
4301 [NEON_3R_VQADD] = 0xf,
4302 [NEON_3R_VRHADD] = 0x7,
4303 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4304 [NEON_3R_VHSUB] = 0x7,
4305 [NEON_3R_VQSUB] = 0xf,
4306 [NEON_3R_VCGT] = 0x7,
4307 [NEON_3R_VCGE] = 0x7,
4308 [NEON_3R_VSHL] = 0xf,
4309 [NEON_3R_VQSHL] = 0xf,
4310 [NEON_3R_VRSHL] = 0xf,
4311 [NEON_3R_VQRSHL] = 0xf,
4312 [NEON_3R_VMAX] = 0x7,
4313 [NEON_3R_VMIN] = 0x7,
4314 [NEON_3R_VABD] = 0x7,
4315 [NEON_3R_VABA] = 0x7,
4316 [NEON_3R_VADD_VSUB] = 0xf,
4317 [NEON_3R_VTST_VCEQ] = 0x7,
4318 [NEON_3R_VML] = 0x7,
4319 [NEON_3R_VMUL] = 0x7,
4320 [NEON_3R_VPMAX] = 0x7,
4321 [NEON_3R_VPMIN] = 0x7,
4322 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4323 [NEON_3R_VPADD] = 0x7,
4324 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4325 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4326 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4327 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4328 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4329 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4330 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4331 };
4332
4333 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4334 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4335 * table A7-13.
4336 */
4337 #define NEON_2RM_VREV64 0
4338 #define NEON_2RM_VREV32 1
4339 #define NEON_2RM_VREV16 2
4340 #define NEON_2RM_VPADDL 4
4341 #define NEON_2RM_VPADDL_U 5
4342 #define NEON_2RM_VCLS 8
4343 #define NEON_2RM_VCLZ 9
4344 #define NEON_2RM_VCNT 10
4345 #define NEON_2RM_VMVN 11
4346 #define NEON_2RM_VPADAL 12
4347 #define NEON_2RM_VPADAL_U 13
4348 #define NEON_2RM_VQABS 14
4349 #define NEON_2RM_VQNEG 15
4350 #define NEON_2RM_VCGT0 16
4351 #define NEON_2RM_VCGE0 17
4352 #define NEON_2RM_VCEQ0 18
4353 #define NEON_2RM_VCLE0 19
4354 #define NEON_2RM_VCLT0 20
4355 #define NEON_2RM_VABS 22
4356 #define NEON_2RM_VNEG 23
4357 #define NEON_2RM_VCGT0_F 24
4358 #define NEON_2RM_VCGE0_F 25
4359 #define NEON_2RM_VCEQ0_F 26
4360 #define NEON_2RM_VCLE0_F 27
4361 #define NEON_2RM_VCLT0_F 28
4362 #define NEON_2RM_VABS_F 30
4363 #define NEON_2RM_VNEG_F 31
4364 #define NEON_2RM_VSWP 32
4365 #define NEON_2RM_VTRN 33
4366 #define NEON_2RM_VUZP 34
4367 #define NEON_2RM_VZIP 35
4368 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4369 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4370 #define NEON_2RM_VSHLL 38
4371 #define NEON_2RM_VCVT_F16_F32 44
4372 #define NEON_2RM_VCVT_F32_F16 46
4373 #define NEON_2RM_VRECPE 56
4374 #define NEON_2RM_VRSQRTE 57
4375 #define NEON_2RM_VRECPE_F 58
4376 #define NEON_2RM_VRSQRTE_F 59
4377 #define NEON_2RM_VCVT_FS 60
4378 #define NEON_2RM_VCVT_FU 61
4379 #define NEON_2RM_VCVT_SF 62
4380 #define NEON_2RM_VCVT_UF 63
4381
4382 static int neon_2rm_is_float_op(int op)
4383 {
4384 /* Return true if this neon 2reg-misc op is float-to-float */
4385 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4386 op >= NEON_2RM_VRECPE_F);
4387 }
4388
4389 /* Each entry in this array has bit n set if the insn allows
4390 * size value n (otherwise it will UNDEF). Since unallocated
4391 * op values will have no bits set they always UNDEF.
4392 */
4393 static const uint8_t neon_2rm_sizes[] = {
4394 [NEON_2RM_VREV64] = 0x7,
4395 [NEON_2RM_VREV32] = 0x3,
4396 [NEON_2RM_VREV16] = 0x1,
4397 [NEON_2RM_VPADDL] = 0x7,
4398 [NEON_2RM_VPADDL_U] = 0x7,
4399 [NEON_2RM_VCLS] = 0x7,
4400 [NEON_2RM_VCLZ] = 0x7,
4401 [NEON_2RM_VCNT] = 0x1,
4402 [NEON_2RM_VMVN] = 0x1,
4403 [NEON_2RM_VPADAL] = 0x7,
4404 [NEON_2RM_VPADAL_U] = 0x7,
4405 [NEON_2RM_VQABS] = 0x7,
4406 [NEON_2RM_VQNEG] = 0x7,
4407 [NEON_2RM_VCGT0] = 0x7,
4408 [NEON_2RM_VCGE0] = 0x7,
4409 [NEON_2RM_VCEQ0] = 0x7,
4410 [NEON_2RM_VCLE0] = 0x7,
4411 [NEON_2RM_VCLT0] = 0x7,
4412 [NEON_2RM_VABS] = 0x7,
4413 [NEON_2RM_VNEG] = 0x7,
4414 [NEON_2RM_VCGT0_F] = 0x4,
4415 [NEON_2RM_VCGE0_F] = 0x4,
4416 [NEON_2RM_VCEQ0_F] = 0x4,
4417 [NEON_2RM_VCLE0_F] = 0x4,
4418 [NEON_2RM_VCLT0_F] = 0x4,
4419 [NEON_2RM_VABS_F] = 0x4,
4420 [NEON_2RM_VNEG_F] = 0x4,
4421 [NEON_2RM_VSWP] = 0x1,
4422 [NEON_2RM_VTRN] = 0x7,
4423 [NEON_2RM_VUZP] = 0x7,
4424 [NEON_2RM_VZIP] = 0x7,
4425 [NEON_2RM_VMOVN] = 0x7,
4426 [NEON_2RM_VQMOVN] = 0x7,
4427 [NEON_2RM_VSHLL] = 0x7,
4428 [NEON_2RM_VCVT_F16_F32] = 0x2,
4429 [NEON_2RM_VCVT_F32_F16] = 0x2,
4430 [NEON_2RM_VRECPE] = 0x4,
4431 [NEON_2RM_VRSQRTE] = 0x4,
4432 [NEON_2RM_VRECPE_F] = 0x4,
4433 [NEON_2RM_VRSQRTE_F] = 0x4,
4434 [NEON_2RM_VCVT_FS] = 0x4,
4435 [NEON_2RM_VCVT_FU] = 0x4,
4436 [NEON_2RM_VCVT_SF] = 0x4,
4437 [NEON_2RM_VCVT_UF] = 0x4,
4438 };
4439
4440 /* Translate a NEON data processing instruction. Return nonzero if the
4441 instruction is invalid.
4442 We process data in a mixture of 32-bit and 64-bit chunks.
4443 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4444
4445 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4446 {
4447 int op;
4448 int q;
4449 int rd, rn, rm;
4450 int size;
4451 int shift;
4452 int pass;
4453 int count;
4454 int pairwise;
4455 int u;
4456 uint32_t imm, mask;
4457 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4458 TCGv_i64 tmp64;
4459
4460 if (!s->vfp_enabled)
4461 return 1;
4462 q = (insn & (1 << 6)) != 0;
4463 u = (insn >> 24) & 1;
4464 VFP_DREG_D(rd, insn);
4465 VFP_DREG_N(rn, insn);
4466 VFP_DREG_M(rm, insn);
4467 size = (insn >> 20) & 3;
4468 if ((insn & (1 << 23)) == 0) {
4469 /* Three register same length. */
4470 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4471 /* Catch invalid op and bad size combinations: UNDEF */
4472 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4473 return 1;
4474 }
4475 /* All insns of this form UNDEF for either this condition or the
4476 * superset of cases "Q==1"; we catch the latter later.
4477 */
4478 if (q && ((rd | rn | rm) & 1)) {
4479 return 1;
4480 }
4481 if (size == 3 && op != NEON_3R_LOGIC) {
4482 /* 64-bit element instructions. */
4483 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4484 neon_load_reg64(cpu_V0, rn + pass);
4485 neon_load_reg64(cpu_V1, rm + pass);
4486 switch (op) {
4487 case NEON_3R_VQADD:
4488 if (u) {
4489 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4490 cpu_V0, cpu_V1);
4491 } else {
4492 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4493 cpu_V0, cpu_V1);
4494 }
4495 break;
4496 case NEON_3R_VQSUB:
4497 if (u) {
4498 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4499 cpu_V0, cpu_V1);
4500 } else {
4501 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4502 cpu_V0, cpu_V1);
4503 }
4504 break;
4505 case NEON_3R_VSHL:
4506 if (u) {
4507 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4508 } else {
4509 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4510 }
4511 break;
4512 case NEON_3R_VQSHL:
4513 if (u) {
4514 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4515 cpu_V1, cpu_V0);
4516 } else {
4517 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4518 cpu_V1, cpu_V0);
4519 }
4520 break;
4521 case NEON_3R_VRSHL:
4522 if (u) {
4523 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4524 } else {
4525 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4526 }
4527 break;
4528 case NEON_3R_VQRSHL:
4529 if (u) {
4530 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4531 cpu_V1, cpu_V0);
4532 } else {
4533 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4534 cpu_V1, cpu_V0);
4535 }
4536 break;
4537 case NEON_3R_VADD_VSUB:
4538 if (u) {
4539 tcg_gen_sub_i64(CPU_V001);
4540 } else {
4541 tcg_gen_add_i64(CPU_V001);
4542 }
4543 break;
4544 default:
4545 abort();
4546 }
4547 neon_store_reg64(cpu_V0, rd + pass);
4548 }
4549 return 0;
4550 }
4551 pairwise = 0;
4552 switch (op) {
4553 case NEON_3R_VSHL:
4554 case NEON_3R_VQSHL:
4555 case NEON_3R_VRSHL:
4556 case NEON_3R_VQRSHL:
4557 {
4558 int rtmp;
4559 /* Shift instruction operands are reversed. */
4560 rtmp = rn;
4561 rn = rm;
4562 rm = rtmp;
4563 }
4564 break;
4565 case NEON_3R_VPADD:
4566 if (u) {
4567 return 1;
4568 }
4569 /* Fall through */
4570 case NEON_3R_VPMAX:
4571 case NEON_3R_VPMIN:
4572 pairwise = 1;
4573 break;
4574 case NEON_3R_FLOAT_ARITH:
4575 pairwise = (u && size < 2); /* if VPADD (float) */
4576 break;
4577 case NEON_3R_FLOAT_MINMAX:
4578 pairwise = u; /* if VPMIN/VPMAX (float) */
4579 break;
4580 case NEON_3R_FLOAT_CMP:
4581 if (!u && size) {
4582 /* no encoding for U=0 C=1x */
4583 return 1;
4584 }
4585 break;
4586 case NEON_3R_FLOAT_ACMP:
4587 if (!u) {
4588 return 1;
4589 }
4590 break;
4591 case NEON_3R_VRECPS_VRSQRTS:
4592 if (u) {
4593 return 1;
4594 }
4595 break;
4596 case NEON_3R_VMUL:
4597 if (u && (size != 0)) {
4598 /* UNDEF on invalid size for polynomial subcase */
4599 return 1;
4600 }
4601 break;
4602 case NEON_3R_VFM:
4603 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4604 return 1;
4605 }
4606 break;
4607 default:
4608 break;
4609 }
4610
4611 if (pairwise && q) {
4612 /* All the pairwise insns UNDEF if Q is set */
4613 return 1;
4614 }
4615
4616 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4617
4618 if (pairwise) {
4619 /* Pairwise. */
4620 if (pass < 1) {
4621 tmp = neon_load_reg(rn, 0);
4622 tmp2 = neon_load_reg(rn, 1);
4623 } else {
4624 tmp = neon_load_reg(rm, 0);
4625 tmp2 = neon_load_reg(rm, 1);
4626 }
4627 } else {
4628 /* Elementwise. */
4629 tmp = neon_load_reg(rn, pass);
4630 tmp2 = neon_load_reg(rm, pass);
4631 }
4632 switch (op) {
4633 case NEON_3R_VHADD:
4634 GEN_NEON_INTEGER_OP(hadd);
4635 break;
4636 case NEON_3R_VQADD:
4637 GEN_NEON_INTEGER_OP_ENV(qadd);
4638 break;
4639 case NEON_3R_VRHADD:
4640 GEN_NEON_INTEGER_OP(rhadd);
4641 break;
4642 case NEON_3R_LOGIC: /* Logic ops. */
4643 switch ((u << 2) | size) {
4644 case 0: /* VAND */
4645 tcg_gen_and_i32(tmp, tmp, tmp2);
4646 break;
4647 case 1: /* BIC */
4648 tcg_gen_andc_i32(tmp, tmp, tmp2);
4649 break;
4650 case 2: /* VORR */
4651 tcg_gen_or_i32(tmp, tmp, tmp2);
4652 break;
4653 case 3: /* VORN */
4654 tcg_gen_orc_i32(tmp, tmp, tmp2);
4655 break;
4656 case 4: /* VEOR */
4657 tcg_gen_xor_i32(tmp, tmp, tmp2);
4658 break;
4659 case 5: /* VBSL */
4660 tmp3 = neon_load_reg(rd, pass);
4661 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4662 tcg_temp_free_i32(tmp3);
4663 break;
4664 case 6: /* VBIT */
4665 tmp3 = neon_load_reg(rd, pass);
4666 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4667 tcg_temp_free_i32(tmp3);
4668 break;
4669 case 7: /* VBIF */
4670 tmp3 = neon_load_reg(rd, pass);
4671 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4672 tcg_temp_free_i32(tmp3);
4673 break;
4674 }
4675 break;
4676 case NEON_3R_VHSUB:
4677 GEN_NEON_INTEGER_OP(hsub);
4678 break;
4679 case NEON_3R_VQSUB:
4680 GEN_NEON_INTEGER_OP_ENV(qsub);
4681 break;
4682 case NEON_3R_VCGT:
4683 GEN_NEON_INTEGER_OP(cgt);
4684 break;
4685 case NEON_3R_VCGE:
4686 GEN_NEON_INTEGER_OP(cge);
4687 break;
4688 case NEON_3R_VSHL:
4689 GEN_NEON_INTEGER_OP(shl);
4690 break;
4691 case NEON_3R_VQSHL:
4692 GEN_NEON_INTEGER_OP_ENV(qshl);
4693 break;
4694 case NEON_3R_VRSHL:
4695 GEN_NEON_INTEGER_OP(rshl);
4696 break;
4697 case NEON_3R_VQRSHL:
4698 GEN_NEON_INTEGER_OP_ENV(qrshl);
4699 break;
4700 case NEON_3R_VMAX:
4701 GEN_NEON_INTEGER_OP(max);
4702 break;
4703 case NEON_3R_VMIN:
4704 GEN_NEON_INTEGER_OP(min);
4705 break;
4706 case NEON_3R_VABD:
4707 GEN_NEON_INTEGER_OP(abd);
4708 break;
4709 case NEON_3R_VABA:
4710 GEN_NEON_INTEGER_OP(abd);
4711 tcg_temp_free_i32(tmp2);
4712 tmp2 = neon_load_reg(rd, pass);
4713 gen_neon_add(size, tmp, tmp2);
4714 break;
4715 case NEON_3R_VADD_VSUB:
4716 if (!u) { /* VADD */
4717 gen_neon_add(size, tmp, tmp2);
4718 } else { /* VSUB */
4719 switch (size) {
4720 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4721 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4722 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4723 default: abort();
4724 }
4725 }
4726 break;
4727 case NEON_3R_VTST_VCEQ:
4728 if (!u) { /* VTST */
4729 switch (size) {
4730 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4731 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4732 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4733 default: abort();
4734 }
4735 } else { /* VCEQ */
4736 switch (size) {
4737 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4738 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4739 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4740 default: abort();
4741 }
4742 }
4743 break;
4744 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4745 switch (size) {
4746 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4747 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4748 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4749 default: abort();
4750 }
4751 tcg_temp_free_i32(tmp2);
4752 tmp2 = neon_load_reg(rd, pass);
4753 if (u) { /* VMLS */
4754 gen_neon_rsb(size, tmp, tmp2);
4755 } else { /* VMLA */
4756 gen_neon_add(size, tmp, tmp2);
4757 }
4758 break;
4759 case NEON_3R_VMUL:
4760 if (u) { /* polynomial */
4761 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4762 } else { /* Integer */
4763 switch (size) {
4764 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4765 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4766 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4767 default: abort();
4768 }
4769 }
4770 break;
4771 case NEON_3R_VPMAX:
4772 GEN_NEON_INTEGER_OP(pmax);
4773 break;
4774 case NEON_3R_VPMIN:
4775 GEN_NEON_INTEGER_OP(pmin);
4776 break;
4777 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4778 if (!u) { /* VQDMULH */
4779 switch (size) {
4780 case 1:
4781 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4782 break;
4783 case 2:
4784 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4785 break;
4786 default: abort();
4787 }
4788 } else { /* VQRDMULH */
4789 switch (size) {
4790 case 1:
4791 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4792 break;
4793 case 2:
4794 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4795 break;
4796 default: abort();
4797 }
4798 }
4799 break;
4800 case NEON_3R_VPADD:
4801 switch (size) {
4802 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4804 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4805 default: abort();
4806 }
4807 break;
4808 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4809 {
4810 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4811 switch ((u << 2) | size) {
4812 case 0: /* VADD */
4813 case 4: /* VPADD */
4814 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4815 break;
4816 case 2: /* VSUB */
4817 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4818 break;
4819 case 6: /* VABD */
4820 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4821 break;
4822 default:
4823 abort();
4824 }
4825 tcg_temp_free_ptr(fpstatus);
4826 break;
4827 }
4828 case NEON_3R_FLOAT_MULTIPLY:
4829 {
4830 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4831 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4832 if (!u) {
4833 tcg_temp_free_i32(tmp2);
4834 tmp2 = neon_load_reg(rd, pass);
4835 if (size == 0) {
4836 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4837 } else {
4838 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4839 }
4840 }
4841 tcg_temp_free_ptr(fpstatus);
4842 break;
4843 }
4844 case NEON_3R_FLOAT_CMP:
4845 {
4846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4847 if (!u) {
4848 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4849 } else {
4850 if (size == 0) {
4851 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4852 } else {
4853 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4854 }
4855 }
4856 tcg_temp_free_ptr(fpstatus);
4857 break;
4858 }
4859 case NEON_3R_FLOAT_ACMP:
4860 {
4861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4862 if (size == 0) {
4863 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4864 } else {
4865 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4866 }
4867 tcg_temp_free_ptr(fpstatus);
4868 break;
4869 }
4870 case NEON_3R_FLOAT_MINMAX:
4871 {
4872 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4873 if (size == 0) {
4874 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4875 } else {
4876 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4877 }
4878 tcg_temp_free_ptr(fpstatus);
4879 break;
4880 }
4881 case NEON_3R_VRECPS_VRSQRTS:
4882 if (size == 0)
4883 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4884 else
4885 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4886 break;
4887 case NEON_3R_VFM:
4888 {
4889 /* VFMA, VFMS: fused multiply-add */
4890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4891 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4892 if (size) {
4893 /* VFMS */
4894 gen_helper_vfp_negs(tmp, tmp);
4895 }
4896 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4897 tcg_temp_free_i32(tmp3);
4898 tcg_temp_free_ptr(fpstatus);
4899 break;
4900 }
4901 default:
4902 abort();
4903 }
4904 tcg_temp_free_i32(tmp2);
4905
4906 /* Save the result. For elementwise operations we can put it
4907 straight into the destination register. For pairwise operations
4908 we have to be careful to avoid clobbering the source operands. */
4909 if (pairwise && rd == rm) {
4910 neon_store_scratch(pass, tmp);
4911 } else {
4912 neon_store_reg(rd, pass, tmp);
4913 }
4914
4915 } /* for pass */
4916 if (pairwise && rd == rm) {
4917 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4918 tmp = neon_load_scratch(pass);
4919 neon_store_reg(rd, pass, tmp);
4920 }
4921 }
4922 /* End of 3 register same size operations. */
4923 } else if (insn & (1 << 4)) {
4924 if ((insn & 0x00380080) != 0) {
4925 /* Two registers and shift. */
4926 op = (insn >> 8) & 0xf;
4927 if (insn & (1 << 7)) {
4928 /* 64-bit shift. */
4929 if (op > 7) {
4930 return 1;
4931 }
4932 size = 3;
4933 } else {
4934 size = 2;
4935 while ((insn & (1 << (size + 19))) == 0)
4936 size--;
4937 }
4938 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4939 /* To avoid excessive duplication of ops we implement shift
4940 by immediate using the variable shift operations. */
4941 if (op < 8) {
4942 /* Shift by immediate:
4943 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4944 if (q && ((rd | rm) & 1)) {
4945 return 1;
4946 }
4947 if (!u && (op == 4 || op == 6)) {
4948 return 1;
4949 }
4950 /* Right shifts are encoded as N - shift, where N is the
4951 element size in bits. */
4952 if (op <= 4)
4953 shift = shift - (1 << (size + 3));
4954 if (size == 3) {
4955 count = q + 1;
4956 } else {
4957 count = q ? 4: 2;
4958 }
4959 switch (size) {
4960 case 0:
4961 imm = (uint8_t) shift;
4962 imm |= imm << 8;
4963 imm |= imm << 16;
4964 break;
4965 case 1:
4966 imm = (uint16_t) shift;
4967 imm |= imm << 16;
4968 break;
4969 case 2:
4970 case 3:
4971 imm = shift;
4972 break;
4973 default:
4974 abort();
4975 }
4976
4977 for (pass = 0; pass < count; pass++) {
4978 if (size == 3) {
4979 neon_load_reg64(cpu_V0, rm + pass);
4980 tcg_gen_movi_i64(cpu_V1, imm);
4981 switch (op) {
4982 case 0: /* VSHR */
4983 case 1: /* VSRA */
4984 if (u)
4985 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4986 else
4987 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4988 break;
4989 case 2: /* VRSHR */
4990 case 3: /* VRSRA */
4991 if (u)
4992 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4993 else
4994 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4995 break;
4996 case 4: /* VSRI */
4997 case 5: /* VSHL, VSLI */
4998 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4999 break;
5000 case 6: /* VQSHLU */
5001 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5002 cpu_V0, cpu_V1);
5003 break;
5004 case 7: /* VQSHL */
5005 if (u) {
5006 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5007 cpu_V0, cpu_V1);
5008 } else {
5009 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5010 cpu_V0, cpu_V1);
5011 }
5012 break;
5013 }
5014 if (op == 1 || op == 3) {
5015 /* Accumulate. */
5016 neon_load_reg64(cpu_V1, rd + pass);
5017 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5018 } else if (op == 4 || (op == 5 && u)) {
5019 /* Insert */
5020 neon_load_reg64(cpu_V1, rd + pass);
5021 uint64_t mask;
5022 if (shift < -63 || shift > 63) {
5023 mask = 0;
5024 } else {
5025 if (op == 4) {
5026 mask = 0xffffffffffffffffull >> -shift;
5027 } else {
5028 mask = 0xffffffffffffffffull << shift;
5029 }
5030 }
5031 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5032 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5033 }
5034 neon_store_reg64(cpu_V0, rd + pass);
5035 } else { /* size < 3 */
5036 /* Operands in T0 and T1. */
5037 tmp = neon_load_reg(rm, pass);
5038 tmp2 = tcg_temp_new_i32();
5039 tcg_gen_movi_i32(tmp2, imm);
5040 switch (op) {
5041 case 0: /* VSHR */
5042 case 1: /* VSRA */
5043 GEN_NEON_INTEGER_OP(shl);
5044 break;
5045 case 2: /* VRSHR */
5046 case 3: /* VRSRA */
5047 GEN_NEON_INTEGER_OP(rshl);
5048 break;
5049 case 4: /* VSRI */
5050 case 5: /* VSHL, VSLI */
5051 switch (size) {
5052 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5053 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5054 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5055 default: abort();
5056 }
5057 break;
5058 case 6: /* VQSHLU */
5059 switch (size) {
5060 case 0:
5061 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5062 tmp, tmp2);
5063 break;
5064 case 1:
5065 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5066 tmp, tmp2);
5067 break;
5068 case 2:
5069 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5070 tmp, tmp2);
5071 break;
5072 default:
5073 abort();
5074 }
5075 break;
5076 case 7: /* VQSHL */
5077 GEN_NEON_INTEGER_OP_ENV(qshl);
5078 break;
5079 }
5080 tcg_temp_free_i32(tmp2);
5081
5082 if (op == 1 || op == 3) {
5083 /* Accumulate. */
5084 tmp2 = neon_load_reg(rd, pass);
5085 gen_neon_add(size, tmp, tmp2);
5086 tcg_temp_free_i32(tmp2);
5087 } else if (op == 4 || (op == 5 && u)) {
5088 /* Insert */
5089 switch (size) {
5090 case 0:
5091 if (op == 4)
5092 mask = 0xff >> -shift;
5093 else
5094 mask = (uint8_t)(0xff << shift);
5095 mask |= mask << 8;
5096 mask |= mask << 16;
5097 break;
5098 case 1:
5099 if (op == 4)
5100 mask = 0xffff >> -shift;
5101 else
5102 mask = (uint16_t)(0xffff << shift);
5103 mask |= mask << 16;
5104 break;
5105 case 2:
5106 if (shift < -31 || shift > 31) {
5107 mask = 0;
5108 } else {
5109 if (op == 4)
5110 mask = 0xffffffffu >> -shift;
5111 else
5112 mask = 0xffffffffu << shift;
5113 }
5114 break;
5115 default:
5116 abort();
5117 }
5118 tmp2 = neon_load_reg(rd, pass);
5119 tcg_gen_andi_i32(tmp, tmp, mask);
5120 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5121 tcg_gen_or_i32(tmp, tmp, tmp2);
5122 tcg_temp_free_i32(tmp2);
5123 }
5124 neon_store_reg(rd, pass, tmp);
5125 }
5126 } /* for pass */
5127 } else if (op < 10) {
5128 /* Shift by immediate and narrow:
5129 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5130 int input_unsigned = (op == 8) ? !u : u;
5131 if (rm & 1) {
5132 return 1;
5133 }
5134 shift = shift - (1 << (size + 3));
5135 size++;
5136 if (size == 3) {
5137 tmp64 = tcg_const_i64(shift);
5138 neon_load_reg64(cpu_V0, rm);
5139 neon_load_reg64(cpu_V1, rm + 1);
5140 for (pass = 0; pass < 2; pass++) {
5141 TCGv_i64 in;
5142 if (pass == 0) {
5143 in = cpu_V0;
5144 } else {
5145 in = cpu_V1;
5146 }
5147 if (q) {
5148 if (input_unsigned) {
5149 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5150 } else {
5151 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5152 }
5153 } else {
5154 if (input_unsigned) {
5155 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5156 } else {
5157 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5158 }
5159 }
5160 tmp = tcg_temp_new_i32();
5161 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5162 neon_store_reg(rd, pass, tmp);
5163 } /* for pass */
5164 tcg_temp_free_i64(tmp64);
5165 } else {
5166 if (size == 1) {
5167 imm = (uint16_t)shift;
5168 imm |= imm << 16;
5169 } else {
5170 /* size == 2 */
5171 imm = (uint32_t)shift;
5172 }
5173 tmp2 = tcg_const_i32(imm);
5174 tmp4 = neon_load_reg(rm + 1, 0);
5175 tmp5 = neon_load_reg(rm + 1, 1);
5176 for (pass = 0; pass < 2; pass++) {
5177 if (pass == 0) {
5178 tmp = neon_load_reg(rm, 0);
5179 } else {
5180 tmp = tmp4;
5181 }
5182 gen_neon_shift_narrow(size, tmp, tmp2, q,
5183 input_unsigned);
5184 if (pass == 0) {
5185 tmp3 = neon_load_reg(rm, 1);
5186 } else {
5187 tmp3 = tmp5;
5188 }
5189 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5190 input_unsigned);
5191 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5192 tcg_temp_free_i32(tmp);
5193 tcg_temp_free_i32(tmp3);
5194 tmp = tcg_temp_new_i32();
5195 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5196 neon_store_reg(rd, pass, tmp);
5197 } /* for pass */
5198 tcg_temp_free_i32(tmp2);
5199 }
5200 } else if (op == 10) {
5201 /* VSHLL, VMOVL */
5202 if (q || (rd & 1)) {
5203 return 1;
5204 }
5205 tmp = neon_load_reg(rm, 0);
5206 tmp2 = neon_load_reg(rm, 1);
5207 for (pass = 0; pass < 2; pass++) {
5208 if (pass == 1)
5209 tmp = tmp2;
5210
5211 gen_neon_widen(cpu_V0, tmp, size, u);
5212
5213 if (shift != 0) {
5214 /* The shift is less than the width of the source
5215 type, so we can just shift the whole register. */
5216 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5217 /* Widen the result of shift: we need to clear
5218 * the potential overflow bits resulting from
5219 * left bits of the narrow input appearing as
5220 * right bits of left the neighbour narrow
5221 * input. */
5222 if (size < 2 || !u) {
5223 uint64_t imm64;
5224 if (size == 0) {
5225 imm = (0xffu >> (8 - shift));
5226 imm |= imm << 16;
5227 } else if (size == 1) {
5228 imm = 0xffff >> (16 - shift);
5229 } else {
5230 /* size == 2 */
5231 imm = 0xffffffff >> (32 - shift);
5232 }
5233 if (size < 2) {
5234 imm64 = imm | (((uint64_t)imm) << 32);
5235 } else {
5236 imm64 = imm;
5237 }
5238 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5239 }
5240 }
5241 neon_store_reg64(cpu_V0, rd + pass);
5242 }
5243 } else if (op >= 14) {
5244 /* VCVT fixed-point. */
5245 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5246 return 1;
5247 }
5248 /* We have already masked out the must-be-1 top bit of imm6,
5249 * hence this 32-shift where the ARM ARM has 64-imm6.
5250 */
5251 shift = 32 - shift;
5252 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5253 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5254 if (!(op & 1)) {
5255 if (u)
5256 gen_vfp_ulto(0, shift, 1);
5257 else
5258 gen_vfp_slto(0, shift, 1);
5259 } else {
5260 if (u)
5261 gen_vfp_toul(0, shift, 1);
5262 else
5263 gen_vfp_tosl(0, shift, 1);
5264 }
5265 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5266 }
5267 } else {
5268 return 1;
5269 }
5270 } else { /* (insn & 0x00380080) == 0 */
5271 int invert;
5272 if (q && (rd & 1)) {
5273 return 1;
5274 }
5275
5276 op = (insn >> 8) & 0xf;
5277 /* One register and immediate. */
5278 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5279 invert = (insn & (1 << 5)) != 0;
5280 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5281 * We choose to not special-case this and will behave as if a
5282 * valid constant encoding of 0 had been given.
5283 */
5284 switch (op) {
5285 case 0: case 1:
5286 /* no-op */
5287 break;
5288 case 2: case 3:
5289 imm <<= 8;
5290 break;
5291 case 4: case 5:
5292 imm <<= 16;
5293 break;
5294 case 6: case 7:
5295 imm <<= 24;
5296 break;
5297 case 8: case 9:
5298 imm |= imm << 16;
5299 break;
5300 case 10: case 11:
5301 imm = (imm << 8) | (imm << 24);
5302 break;
5303 case 12:
5304 imm = (imm << 8) | 0xff;
5305 break;
5306 case 13:
5307 imm = (imm << 16) | 0xffff;
5308 break;
5309 case 14:
5310 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5311 if (invert)
5312 imm = ~imm;
5313 break;
5314 case 15:
5315 if (invert) {
5316 return 1;
5317 }
5318 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5319 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5320 break;
5321 }
5322 if (invert)
5323 imm = ~imm;
5324
5325 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5326 if (op & 1 && op < 12) {
5327 tmp = neon_load_reg(rd, pass);
5328 if (invert) {
5329 /* The immediate value has already been inverted, so
5330 BIC becomes AND. */
5331 tcg_gen_andi_i32(tmp, tmp, imm);
5332 } else {
5333 tcg_gen_ori_i32(tmp, tmp, imm);
5334 }
5335 } else {
5336 /* VMOV, VMVN. */
5337 tmp = tcg_temp_new_i32();
5338 if (op == 14 && invert) {
5339 int n;
5340 uint32_t val;
5341 val = 0;
5342 for (n = 0; n < 4; n++) {
5343 if (imm & (1 << (n + (pass & 1) * 4)))
5344 val |= 0xff << (n * 8);
5345 }
5346 tcg_gen_movi_i32(tmp, val);
5347 } else {
5348 tcg_gen_movi_i32(tmp, imm);
5349 }
5350 }
5351 neon_store_reg(rd, pass, tmp);
5352 }
5353 }
5354 } else { /* (insn & 0x00800010 == 0x00800000) */
5355 if (size != 3) {
5356 op = (insn >> 8) & 0xf;
5357 if ((insn & (1 << 6)) == 0) {
5358 /* Three registers of different lengths. */
5359 int src1_wide;
5360 int src2_wide;
5361 int prewiden;
5362 /* undefreq: bit 0 : UNDEF if size != 0
5363 * bit 1 : UNDEF if size == 0
5364 * bit 2 : UNDEF if U == 1
5365 * Note that [1:0] set implies 'always UNDEF'
5366 */
5367 int undefreq;
5368 /* prewiden, src1_wide, src2_wide, undefreq */
5369 static const int neon_3reg_wide[16][4] = {
5370 {1, 0, 0, 0}, /* VADDL */
5371 {1, 1, 0, 0}, /* VADDW */
5372 {1, 0, 0, 0}, /* VSUBL */
5373 {1, 1, 0, 0}, /* VSUBW */
5374 {0, 1, 1, 0}, /* VADDHN */
5375 {0, 0, 0, 0}, /* VABAL */
5376 {0, 1, 1, 0}, /* VSUBHN */
5377 {0, 0, 0, 0}, /* VABDL */
5378 {0, 0, 0, 0}, /* VMLAL */
5379 {0, 0, 0, 6}, /* VQDMLAL */
5380 {0, 0, 0, 0}, /* VMLSL */
5381 {0, 0, 0, 6}, /* VQDMLSL */
5382 {0, 0, 0, 0}, /* Integer VMULL */
5383 {0, 0, 0, 2}, /* VQDMULL */
5384 {0, 0, 0, 5}, /* Polynomial VMULL */
5385 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5386 };
5387
5388 prewiden = neon_3reg_wide[op][0];
5389 src1_wide = neon_3reg_wide[op][1];
5390 src2_wide = neon_3reg_wide[op][2];
5391 undefreq = neon_3reg_wide[op][3];
5392
5393 if (((undefreq & 1) && (size != 0)) ||
5394 ((undefreq & 2) && (size == 0)) ||
5395 ((undefreq & 4) && u)) {
5396 return 1;
5397 }
5398 if ((src1_wide && (rn & 1)) ||
5399 (src2_wide && (rm & 1)) ||
5400 (!src2_wide && (rd & 1))) {
5401 return 1;
5402 }
5403
5404 /* Avoid overlapping operands. Wide source operands are
5405 always aligned so will never overlap with wide
5406 destinations in problematic ways. */
5407 if (rd == rm && !src2_wide) {
5408 tmp = neon_load_reg(rm, 1);
5409 neon_store_scratch(2, tmp);
5410 } else if (rd == rn && !src1_wide) {
5411 tmp = neon_load_reg(rn, 1);
5412 neon_store_scratch(2, tmp);
5413 }
5414 TCGV_UNUSED_I32(tmp3);
5415 for (pass = 0; pass < 2; pass++) {
5416 if (src1_wide) {
5417 neon_load_reg64(cpu_V0, rn + pass);
5418 TCGV_UNUSED_I32(tmp);
5419 } else {
5420 if (pass == 1 && rd == rn) {
5421 tmp = neon_load_scratch(2);
5422 } else {
5423 tmp = neon_load_reg(rn, pass);
5424 }
5425 if (prewiden) {
5426 gen_neon_widen(cpu_V0, tmp, size, u);
5427 }
5428 }
5429 if (src2_wide) {
5430 neon_load_reg64(cpu_V1, rm + pass);
5431 TCGV_UNUSED_I32(tmp2);
5432 } else {
5433 if (pass == 1 && rd == rm) {
5434 tmp2 = neon_load_scratch(2);
5435 } else {
5436 tmp2 = neon_load_reg(rm, pass);
5437 }
5438 if (prewiden) {
5439 gen_neon_widen(cpu_V1, tmp2, size, u);
5440 }
5441 }
5442 switch (op) {
5443 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5444 gen_neon_addl(size);
5445 break;
5446 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5447 gen_neon_subl(size);
5448 break;
5449 case 5: case 7: /* VABAL, VABDL */
5450 switch ((size << 1) | u) {
5451 case 0:
5452 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5453 break;
5454 case 1:
5455 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5456 break;
5457 case 2:
5458 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5459 break;
5460 case 3:
5461 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5462 break;
5463 case 4:
5464 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5465 break;
5466 case 5:
5467 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5468 break;
5469 default: abort();
5470 }
5471 tcg_temp_free_i32(tmp2);
5472 tcg_temp_free_i32(tmp);
5473 break;
5474 case 8: case 9: case 10: case 11: case 12: case 13:
5475 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5476 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5477 break;
5478 case 14: /* Polynomial VMULL */
5479 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5480 tcg_temp_free_i32(tmp2);
5481 tcg_temp_free_i32(tmp);
5482 break;
5483 default: /* 15 is RESERVED: caught earlier */
5484 abort();
5485 }
5486 if (op == 13) {
5487 /* VQDMULL */
5488 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5489 neon_store_reg64(cpu_V0, rd + pass);
5490 } else if (op == 5 || (op >= 8 && op <= 11)) {
5491 /* Accumulate. */
5492 neon_load_reg64(cpu_V1, rd + pass);
5493 switch (op) {
5494 case 10: /* VMLSL */
5495 gen_neon_negl(cpu_V0, size);
5496 /* Fall through */
5497 case 5: case 8: /* VABAL, VMLAL */
5498 gen_neon_addl(size);
5499 break;
5500 case 9: case 11: /* VQDMLAL, VQDMLSL */
5501 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5502 if (op == 11) {
5503 gen_neon_negl(cpu_V0, size);
5504 }
5505 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5506 break;
5507 default:
5508 abort();
5509 }
5510 neon_store_reg64(cpu_V0, rd + pass);
5511 } else if (op == 4 || op == 6) {
5512 /* Narrowing operation. */
5513 tmp = tcg_temp_new_i32();
5514 if (!u) {
5515 switch (size) {
5516 case 0:
5517 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5518 break;
5519 case 1:
5520 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5521 break;
5522 case 2:
5523 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5524 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5525 break;
5526 default: abort();
5527 }
5528 } else {
5529 switch (size) {
5530 case 0:
5531 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5532 break;
5533 case 1:
5534 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5535 break;
5536 case 2:
5537 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5538 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5539 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5540 break;
5541 default: abort();
5542 }
5543 }
5544 if (pass == 0) {
5545 tmp3 = tmp;
5546 } else {
5547 neon_store_reg(rd, 0, tmp3);
5548 neon_store_reg(rd, 1, tmp);
5549 }
5550 } else {
5551 /* Write back the result. */
5552 neon_store_reg64(cpu_V0, rd + pass);
5553 }
5554 }
5555 } else {
5556 /* Two registers and a scalar. NB that for ops of this form
5557 * the ARM ARM labels bit 24 as Q, but it is in our variable
5558 * 'u', not 'q'.
5559 */
5560 if (size == 0) {
5561 return 1;
5562 }
5563 switch (op) {
5564 case 1: /* Float VMLA scalar */
5565 case 5: /* Floating point VMLS scalar */
5566 case 9: /* Floating point VMUL scalar */
5567 if (size == 1) {
5568 return 1;
5569 }
5570 /* fall through */
5571 case 0: /* Integer VMLA scalar */
5572 case 4: /* Integer VMLS scalar */
5573 case 8: /* Integer VMUL scalar */
5574 case 12: /* VQDMULH scalar */
5575 case 13: /* VQRDMULH scalar */
5576 if (u && ((rd | rn) & 1)) {
5577 return 1;
5578 }
5579 tmp = neon_get_scalar(size, rm);
5580 neon_store_scratch(0, tmp);
5581 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5582 tmp = neon_load_scratch(0);
5583 tmp2 = neon_load_reg(rn, pass);
5584 if (op == 12) {
5585 if (size == 1) {
5586 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5587 } else {
5588 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5589 }
5590 } else if (op == 13) {
5591 if (size == 1) {
5592 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5593 } else {
5594 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5595 }
5596 } else if (op & 1) {
5597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5598 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5599 tcg_temp_free_ptr(fpstatus);
5600 } else {
5601 switch (size) {
5602 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5603 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5604 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5605 default: abort();
5606 }
5607 }
5608 tcg_temp_free_i32(tmp2);
5609 if (op < 8) {
5610 /* Accumulate. */
5611 tmp2 = neon_load_reg(rd, pass);
5612 switch (op) {
5613 case 0:
5614 gen_neon_add(size, tmp, tmp2);
5615 break;
5616 case 1:
5617 {
5618 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5619 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5620 tcg_temp_free_ptr(fpstatus);
5621 break;
5622 }
5623 case 4:
5624 gen_neon_rsb(size, tmp, tmp2);
5625 break;
5626 case 5:
5627 {
5628 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5629 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5630 tcg_temp_free_ptr(fpstatus);
5631 break;
5632 }
5633 default:
5634 abort();
5635 }
5636 tcg_temp_free_i32(tmp2);
5637 }
5638 neon_store_reg(rd, pass, tmp);
5639 }
5640 break;
5641 case 3: /* VQDMLAL scalar */
5642 case 7: /* VQDMLSL scalar */
5643 case 11: /* VQDMULL scalar */
5644 if (u == 1) {
5645 return 1;
5646 }
5647 /* fall through */
5648 case 2: /* VMLAL sclar */
5649 case 6: /* VMLSL scalar */
5650 case 10: /* VMULL scalar */
5651 if (rd & 1) {
5652 return 1;
5653 }
5654 tmp2 = neon_get_scalar(size, rm);
5655 /* We need a copy of tmp2 because gen_neon_mull
5656 * deletes it during pass 0. */
5657 tmp4 = tcg_temp_new_i32();
5658 tcg_gen_mov_i32(tmp4, tmp2);
5659 tmp3 = neon_load_reg(rn, 1);
5660
5661 for (pass = 0; pass < 2; pass++) {
5662 if (pass == 0) {
5663 tmp = neon_load_reg(rn, 0);
5664 } else {
5665 tmp = tmp3;
5666 tmp2 = tmp4;
5667 }
5668 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5669 if (op != 11) {
5670 neon_load_reg64(cpu_V1, rd + pass);
5671 }
5672 switch (op) {
5673 case 6:
5674 gen_neon_negl(cpu_V0, size);
5675 /* Fall through */
5676 case 2:
5677 gen_neon_addl(size);
5678 break;
5679 case 3: case 7:
5680 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5681 if (op == 7) {
5682 gen_neon_negl(cpu_V0, size);
5683 }
5684 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5685 break;
5686 case 10:
5687 /* no-op */
5688 break;
5689 case 11:
5690 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5691 break;
5692 default:
5693 abort();
5694 }
5695 neon_store_reg64(cpu_V0, rd + pass);
5696 }
5697
5698
5699 break;
5700 default: /* 14 and 15 are RESERVED */
5701 return 1;
5702 }
5703 }
5704 } else { /* size == 3 */
5705 if (!u) {
5706 /* Extract. */
5707 imm = (insn >> 8) & 0xf;
5708
5709 if (imm > 7 && !q)
5710 return 1;
5711
5712 if (q && ((rd | rn | rm) & 1)) {
5713 return 1;
5714 }
5715
5716 if (imm == 0) {
5717 neon_load_reg64(cpu_V0, rn);
5718 if (q) {
5719 neon_load_reg64(cpu_V1, rn + 1);
5720 }
5721 } else if (imm == 8) {
5722 neon_load_reg64(cpu_V0, rn + 1);
5723 if (q) {
5724 neon_load_reg64(cpu_V1, rm);
5725 }
5726 } else if (q) {
5727 tmp64 = tcg_temp_new_i64();
5728 if (imm < 8) {
5729 neon_load_reg64(cpu_V0, rn);
5730 neon_load_reg64(tmp64, rn + 1);
5731 } else {
5732 neon_load_reg64(cpu_V0, rn + 1);
5733 neon_load_reg64(tmp64, rm);
5734 }
5735 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5736 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5737 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5738 if (imm < 8) {
5739 neon_load_reg64(cpu_V1, rm);
5740 } else {
5741 neon_load_reg64(cpu_V1, rm + 1);
5742 imm -= 8;
5743 }
5744 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5745 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5746 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5747 tcg_temp_free_i64(tmp64);
5748 } else {
5749 /* BUGFIX */
5750 neon_load_reg64(cpu_V0, rn);
5751 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5752 neon_load_reg64(cpu_V1, rm);
5753 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5754 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5755 }
5756 neon_store_reg64(cpu_V0, rd);
5757 if (q) {
5758 neon_store_reg64(cpu_V1, rd + 1);
5759 }
5760 } else if ((insn & (1 << 11)) == 0) {
5761 /* Two register misc. */
5762 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5763 size = (insn >> 18) & 3;
5764 /* UNDEF for unknown op values and bad op-size combinations */
5765 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5766 return 1;
5767 }
5768 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5769 q && ((rm | rd) & 1)) {
5770 return 1;
5771 }
5772 switch (op) {
5773 case NEON_2RM_VREV64:
5774 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5775 tmp = neon_load_reg(rm, pass * 2);
5776 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5777 switch (size) {
5778 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5779 case 1: gen_swap_half(tmp); break;
5780 case 2: /* no-op */ break;
5781 default: abort();
5782 }
5783 neon_store_reg(rd, pass * 2 + 1, tmp);
5784 if (size == 2) {
5785 neon_store_reg(rd, pass * 2, tmp2);
5786 } else {
5787 switch (size) {
5788 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5789 case 1: gen_swap_half(tmp2); break;
5790 default: abort();
5791 }
5792 neon_store_reg(rd, pass * 2, tmp2);
5793 }
5794 }
5795 break;
5796 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5797 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5798 for (pass = 0; pass < q + 1; pass++) {
5799 tmp = neon_load_reg(rm, pass * 2);
5800 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5801 tmp = neon_load_reg(rm, pass * 2 + 1);
5802 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5803 switch (size) {
5804 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5805 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5806 case 2: tcg_gen_add_i64(CPU_V001); break;
5807 default: abort();
5808 }
5809 if (op >= NEON_2RM_VPADAL) {
5810 /* Accumulate. */
5811 neon_load_reg64(cpu_V1, rd + pass);
5812 gen_neon_addl(size);
5813 }
5814 neon_store_reg64(cpu_V0, rd + pass);
5815 }
5816 break;
5817 case NEON_2RM_VTRN:
5818 if (size == 2) {
5819 int n;
5820 for (n = 0; n < (q ? 4 : 2); n += 2) {
5821 tmp = neon_load_reg(rm, n);
5822 tmp2 = neon_load_reg(rd, n + 1);
5823 neon_store_reg(rm, n, tmp2);
5824 neon_store_reg(rd, n + 1, tmp);
5825 }
5826 } else {
5827 goto elementwise;
5828 }
5829 break;
5830 case NEON_2RM_VUZP:
5831 if (gen_neon_unzip(rd, rm, size, q)) {
5832 return 1;
5833 }
5834 break;
5835 case NEON_2RM_VZIP:
5836 if (gen_neon_zip(rd, rm, size, q)) {
5837 return 1;
5838 }
5839 break;
5840 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5841 /* also VQMOVUN; op field and mnemonics don't line up */
5842 if (rm & 1) {
5843 return 1;
5844 }
5845 TCGV_UNUSED_I32(tmp2);
5846 for (pass = 0; pass < 2; pass++) {
5847 neon_load_reg64(cpu_V0, rm + pass);
5848 tmp = tcg_temp_new_i32();
5849 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5850 tmp, cpu_V0);
5851 if (pass == 0) {
5852 tmp2 = tmp;
5853 } else {
5854 neon_store_reg(rd, 0, tmp2);
5855 neon_store_reg(rd, 1, tmp);
5856 }
5857 }
5858 break;
5859 case NEON_2RM_VSHLL:
5860 if (q || (rd & 1)) {
5861 return 1;
5862 }
5863 tmp = neon_load_reg(rm, 0);
5864 tmp2 = neon_load_reg(rm, 1);
5865 for (pass = 0; pass < 2; pass++) {
5866 if (pass == 1)
5867 tmp = tmp2;
5868 gen_neon_widen(cpu_V0, tmp, size, 1);
5869 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5870 neon_store_reg64(cpu_V0, rd + pass);
5871 }
5872 break;
5873 case NEON_2RM_VCVT_F16_F32:
5874 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5875 q || (rm & 1)) {
5876 return 1;
5877 }
5878 tmp = tcg_temp_new_i32();
5879 tmp2 = tcg_temp_new_i32();
5880 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5881 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5882 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5883 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5884 tcg_gen_shli_i32(tmp2, tmp2, 16);
5885 tcg_gen_or_i32(tmp2, tmp2, tmp);
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5887 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5888 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5889 neon_store_reg(rd, 0, tmp2);
5890 tmp2 = tcg_temp_new_i32();
5891 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5892 tcg_gen_shli_i32(tmp2, tmp2, 16);
5893 tcg_gen_or_i32(tmp2, tmp2, tmp);
5894 neon_store_reg(rd, 1, tmp2);
5895 tcg_temp_free_i32(tmp);
5896 break;
5897 case NEON_2RM_VCVT_F32_F16:
5898 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5899 q || (rd & 1)) {
5900 return 1;
5901 }
5902 tmp3 = tcg_temp_new_i32();
5903 tmp = neon_load_reg(rm, 0);
5904 tmp2 = neon_load_reg(rm, 1);
5905 tcg_gen_ext16u_i32(tmp3, tmp);
5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5908 tcg_gen_shri_i32(tmp3, tmp, 16);
5909 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5910 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5911 tcg_temp_free_i32(tmp);
5912 tcg_gen_ext16u_i32(tmp3, tmp2);
5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5914 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5915 tcg_gen_shri_i32(tmp3, tmp2, 16);
5916 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5917 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5918 tcg_temp_free_i32(tmp2);
5919 tcg_temp_free_i32(tmp3);
5920 break;
5921 default:
5922 elementwise:
5923 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5924 if (neon_2rm_is_float_op(op)) {
5925 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5926 neon_reg_offset(rm, pass));
5927 TCGV_UNUSED_I32(tmp);
5928 } else {
5929 tmp = neon_load_reg(rm, pass);
5930 }
5931 switch (op) {
5932 case NEON_2RM_VREV32:
5933 switch (size) {
5934 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5935 case 1: gen_swap_half(tmp); break;
5936 default: abort();
5937 }
5938 break;
5939 case NEON_2RM_VREV16:
5940 gen_rev16(tmp);
5941 break;
5942 case NEON_2RM_VCLS:
5943 switch (size) {
5944 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5945 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5946 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5947 default: abort();
5948 }
5949 break;
5950 case NEON_2RM_VCLZ:
5951 switch (size) {
5952 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5953 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5954 case 2: gen_helper_clz(tmp, tmp); break;
5955 default: abort();
5956 }
5957 break;
5958 case NEON_2RM_VCNT:
5959 gen_helper_neon_cnt_u8(tmp, tmp);
5960 break;
5961 case NEON_2RM_VMVN:
5962 tcg_gen_not_i32(tmp, tmp);
5963 break;
5964 case NEON_2RM_VQABS:
5965 switch (size) {
5966 case 0:
5967 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5968 break;
5969 case 1:
5970 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5971 break;
5972 case 2:
5973 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5974 break;
5975 default: abort();
5976 }
5977 break;
5978 case NEON_2RM_VQNEG:
5979 switch (size) {
5980 case 0:
5981 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5982 break;
5983 case 1:
5984 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5985 break;
5986 case 2:
5987 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5988 break;
5989 default: abort();
5990 }
5991 break;
5992 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5993 tmp2 = tcg_const_i32(0);
5994 switch(size) {
5995 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5996 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5997 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5998 default: abort();
5999 }
6000 tcg_temp_free_i32(tmp2);
6001 if (op == NEON_2RM_VCLE0) {
6002 tcg_gen_not_i32(tmp, tmp);
6003 }
6004 break;
6005 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6006 tmp2 = tcg_const_i32(0);
6007 switch(size) {
6008 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6009 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6010 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6011 default: abort();
6012 }
6013 tcg_temp_free_i32(tmp2);
6014 if (op == NEON_2RM_VCLT0) {
6015 tcg_gen_not_i32(tmp, tmp);
6016 }
6017 break;
6018 case NEON_2RM_VCEQ0:
6019 tmp2 = tcg_const_i32(0);
6020 switch(size) {
6021 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6022 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6023 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6024 default: abort();
6025 }
6026 tcg_temp_free_i32(tmp2);
6027 break;
6028 case NEON_2RM_VABS:
6029 switch(size) {
6030 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6031 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6032 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6033 default: abort();
6034 }
6035 break;
6036 case NEON_2RM_VNEG:
6037 tmp2 = tcg_const_i32(0);
6038 gen_neon_rsb(size, tmp, tmp2);
6039 tcg_temp_free_i32(tmp2);
6040 break;
6041 case NEON_2RM_VCGT0_F:
6042 {
6043 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6044 tmp2 = tcg_const_i32(0);
6045 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6046 tcg_temp_free_i32(tmp2);
6047 tcg_temp_free_ptr(fpstatus);
6048 break;
6049 }
6050 case NEON_2RM_VCGE0_F:
6051 {
6052 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6053 tmp2 = tcg_const_i32(0);
6054 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6055 tcg_temp_free_i32(tmp2);
6056 tcg_temp_free_ptr(fpstatus);
6057 break;
6058 }
6059 case NEON_2RM_VCEQ0_F:
6060 {
6061 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6062 tmp2 = tcg_const_i32(0);
6063 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6064 tcg_temp_free_i32(tmp2);
6065 tcg_temp_free_ptr(fpstatus);
6066 break;
6067 }
6068 case NEON_2RM_VCLE0_F:
6069 {
6070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6071 tmp2 = tcg_const_i32(0);
6072 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6073 tcg_temp_free_i32(tmp2);
6074 tcg_temp_free_ptr(fpstatus);
6075 break;
6076 }
6077 case NEON_2RM_VCLT0_F:
6078 {
6079 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6080 tmp2 = tcg_const_i32(0);
6081 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6082 tcg_temp_free_i32(tmp2);
6083 tcg_temp_free_ptr(fpstatus);
6084 break;
6085 }
6086 case NEON_2RM_VABS_F:
6087 gen_vfp_abs(0);
6088 break;
6089 case NEON_2RM_VNEG_F:
6090 gen_vfp_neg(0);
6091 break;
6092 case NEON_2RM_VSWP:
6093 tmp2 = neon_load_reg(rd, pass);
6094 neon_store_reg(rm, pass, tmp2);
6095 break;
6096 case NEON_2RM_VTRN:
6097 tmp2 = neon_load_reg(rd, pass);
6098 switch (size) {
6099 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6100 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6101 default: abort();
6102 }
6103 neon_store_reg(rm, pass, tmp2);
6104 break;
6105 case NEON_2RM_VRECPE:
6106 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6107 break;
6108 case NEON_2RM_VRSQRTE:
6109 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6110 break;
6111 case NEON_2RM_VRECPE_F:
6112 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6113 break;
6114 case NEON_2RM_VRSQRTE_F:
6115 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6116 break;
6117 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6118 gen_vfp_sito(0, 1);
6119 break;
6120 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6121 gen_vfp_uito(0, 1);
6122 break;
6123 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6124 gen_vfp_tosiz(0, 1);
6125 break;
6126 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6127 gen_vfp_touiz(0, 1);
6128 break;
6129 default:
6130 /* Reserved op values were caught by the
6131 * neon_2rm_sizes[] check earlier.
6132 */
6133 abort();
6134 }
6135 if (neon_2rm_is_float_op(op)) {
6136 tcg_gen_st_f32(cpu_F0s, cpu_env,
6137 neon_reg_offset(rd, pass));
6138 } else {
6139 neon_store_reg(rd, pass, tmp);
6140 }
6141 }
6142 break;
6143 }
6144 } else if ((insn & (1 << 10)) == 0) {
6145 /* VTBL, VTBX. */
6146 int n = ((insn >> 8) & 3) + 1;
6147 if ((rn + n) > 32) {
6148 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6149 * helper function running off the end of the register file.
6150 */
6151 return 1;
6152 }
6153 n <<= 3;
6154 if (insn & (1 << 6)) {
6155 tmp = neon_load_reg(rd, 0);
6156 } else {
6157 tmp = tcg_temp_new_i32();
6158 tcg_gen_movi_i32(tmp, 0);
6159 }
6160 tmp2 = neon_load_reg(rm, 0);
6161 tmp4 = tcg_const_i32(rn);
6162 tmp5 = tcg_const_i32(n);
6163 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6164 tcg_temp_free_i32(tmp);
6165 if (insn & (1 << 6)) {
6166 tmp = neon_load_reg(rd, 1);
6167 } else {
6168 tmp = tcg_temp_new_i32();
6169 tcg_gen_movi_i32(tmp, 0);
6170 }
6171 tmp3 = neon_load_reg(rm, 1);
6172 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6173 tcg_temp_free_i32(tmp5);
6174 tcg_temp_free_i32(tmp4);
6175 neon_store_reg(rd, 0, tmp2);
6176 neon_store_reg(rd, 1, tmp3);
6177 tcg_temp_free_i32(tmp);
6178 } else if ((insn & 0x380) == 0) {
6179 /* VDUP */
6180 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6181 return 1;
6182 }
6183 if (insn & (1 << 19)) {
6184 tmp = neon_load_reg(rm, 1);
6185 } else {
6186 tmp = neon_load_reg(rm, 0);
6187 }
6188 if (insn & (1 << 16)) {
6189 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6190 } else if (insn & (1 << 17)) {
6191 if ((insn >> 18) & 1)
6192 gen_neon_dup_high16(tmp);
6193 else
6194 gen_neon_dup_low16(tmp);
6195 }
6196 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6197 tmp2 = tcg_temp_new_i32();
6198 tcg_gen_mov_i32(tmp2, tmp);
6199 neon_store_reg(rd, pass, tmp2);
6200 }
6201 tcg_temp_free_i32(tmp);
6202 } else {
6203 return 1;
6204 }
6205 }
6206 }
6207 return 0;
6208 }
6209
6210 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6211 {
6212 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6213 const ARMCPRegInfo *ri;
6214 ARMCPU *cpu = arm_env_get_cpu(env);
6215
6216 cpnum = (insn >> 8) & 0xf;
6217 if (arm_feature(env, ARM_FEATURE_XSCALE)
6218 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6219 return 1;
6220
6221 /* First check for coprocessor space used for actual instructions */
6222 switch (cpnum) {
6223 case 0:
6224 case 1:
6225 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6226 return disas_iwmmxt_insn(env, s, insn);
6227 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6228 return disas_dsp_insn(env, s, insn);
6229 }
6230 return 1;
6231 case 10:
6232 case 11:
6233 return disas_vfp_insn (env, s, insn);
6234 default:
6235 break;
6236 }
6237
6238 /* Otherwise treat as a generic register access */
6239 is64 = (insn & (1 << 25)) == 0;
6240 if (!is64 && ((insn & (1 << 4)) == 0)) {
6241 /* cdp */
6242 return 1;
6243 }
6244
6245 crm = insn & 0xf;
6246 if (is64) {
6247 crn = 0;
6248 opc1 = (insn >> 4) & 0xf;
6249 opc2 = 0;
6250 rt2 = (insn >> 16) & 0xf;
6251 } else {
6252 crn = (insn >> 16) & 0xf;
6253 opc1 = (insn >> 21) & 7;
6254 opc2 = (insn >> 5) & 7;
6255 rt2 = 0;
6256 }
6257 isread = (insn >> 20) & 1;
6258 rt = (insn >> 12) & 0xf;
6259
6260 ri = get_arm_cp_reginfo(cpu,
6261 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6262 if (ri) {
6263 /* Check access permissions */
6264 if (!cp_access_ok(env, ri, isread)) {
6265 return 1;
6266 }
6267
6268 /* Handle special cases first */
6269 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6270 case ARM_CP_NOP:
6271 return 0;
6272 case ARM_CP_WFI:
6273 if (isread) {
6274 return 1;
6275 }
6276 gen_set_pc_im(s->pc);
6277 s->is_jmp = DISAS_WFI;
6278 return 0;
6279 default:
6280 break;
6281 }
6282
6283 if (isread) {
6284 /* Read */
6285 if (is64) {
6286 TCGv_i64 tmp64;
6287 TCGv_i32 tmp;
6288 if (ri->type & ARM_CP_CONST) {
6289 tmp64 = tcg_const_i64(ri->resetvalue);
6290 } else if (ri->readfn) {
6291 TCGv_ptr tmpptr;
6292 gen_set_pc_im(s->pc);
6293 tmp64 = tcg_temp_new_i64();
6294 tmpptr = tcg_const_ptr(ri);
6295 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6296 tcg_temp_free_ptr(tmpptr);
6297 } else {
6298 tmp64 = tcg_temp_new_i64();
6299 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6300 }
6301 tmp = tcg_temp_new_i32();
6302 tcg_gen_trunc_i64_i32(tmp, tmp64);
6303 store_reg(s, rt, tmp);
6304 tcg_gen_shri_i64(tmp64, tmp64, 32);
6305 tmp = tcg_temp_new_i32();
6306 tcg_gen_trunc_i64_i32(tmp, tmp64);
6307 tcg_temp_free_i64(tmp64);
6308 store_reg(s, rt2, tmp);
6309 } else {
6310 TCGv_i32 tmp;
6311 if (ri->type & ARM_CP_CONST) {
6312 tmp = tcg_const_i32(ri->resetvalue);
6313 } else if (ri->readfn) {
6314 TCGv_ptr tmpptr;
6315 gen_set_pc_im(s->pc);
6316 tmp = tcg_temp_new_i32();
6317 tmpptr = tcg_const_ptr(ri);
6318 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6319 tcg_temp_free_ptr(tmpptr);
6320 } else {
6321 tmp = load_cpu_offset(ri->fieldoffset);
6322 }
6323 if (rt == 15) {
6324 /* Destination register of r15 for 32 bit loads sets
6325 * the condition codes from the high 4 bits of the value
6326 */
6327 gen_set_nzcv(tmp);
6328 tcg_temp_free_i32(tmp);
6329 } else {
6330 store_reg(s, rt, tmp);
6331 }
6332 }
6333 } else {
6334 /* Write */
6335 if (ri->type & ARM_CP_CONST) {
6336 /* If not forbidden by access permissions, treat as WI */
6337 return 0;
6338 }
6339
6340 if (is64) {
6341 TCGv_i32 tmplo, tmphi;
6342 TCGv_i64 tmp64 = tcg_temp_new_i64();
6343 tmplo = load_reg(s, rt);
6344 tmphi = load_reg(s, rt2);
6345 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6346 tcg_temp_free_i32(tmplo);
6347 tcg_temp_free_i32(tmphi);
6348 if (ri->writefn) {
6349 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6350 gen_set_pc_im(s->pc);
6351 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6352 tcg_temp_free_ptr(tmpptr);
6353 } else {
6354 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6355 }
6356 tcg_temp_free_i64(tmp64);
6357 } else {
6358 if (ri->writefn) {
6359 TCGv_i32 tmp;
6360 TCGv_ptr tmpptr;
6361 gen_set_pc_im(s->pc);
6362 tmp = load_reg(s, rt);
6363 tmpptr = tcg_const_ptr(ri);
6364 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6365 tcg_temp_free_ptr(tmpptr);
6366 tcg_temp_free_i32(tmp);
6367 } else {
6368 TCGv_i32 tmp = load_reg(s, rt);
6369 store_cpu_offset(tmp, ri->fieldoffset);
6370 }
6371 }
6372 /* We default to ending the TB on a coprocessor register write,
6373 * but allow this to be suppressed by the register definition
6374 * (usually only necessary to work around guest bugs).
6375 */
6376 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6377 gen_lookup_tb(s);
6378 }
6379 }
6380 return 0;
6381 }
6382
6383 return 1;
6384 }
6385
6386
6387 /* Store a 64-bit value to a register pair. Clobbers val. */
6388 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6389 {
6390 TCGv_i32 tmp;
6391 tmp = tcg_temp_new_i32();
6392 tcg_gen_trunc_i64_i32(tmp, val);
6393 store_reg(s, rlow, tmp);
6394 tmp = tcg_temp_new_i32();
6395 tcg_gen_shri_i64(val, val, 32);
6396 tcg_gen_trunc_i64_i32(tmp, val);
6397 store_reg(s, rhigh, tmp);
6398 }
6399
6400 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6401 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6402 {
6403 TCGv_i64 tmp;
6404 TCGv_i32 tmp2;
6405
6406 /* Load value and extend to 64 bits. */
6407 tmp = tcg_temp_new_i64();
6408 tmp2 = load_reg(s, rlow);
6409 tcg_gen_extu_i32_i64(tmp, tmp2);
6410 tcg_temp_free_i32(tmp2);
6411 tcg_gen_add_i64(val, val, tmp);
6412 tcg_temp_free_i64(tmp);
6413 }
6414
6415 /* load and add a 64-bit value from a register pair. */
6416 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6417 {
6418 TCGv_i64 tmp;
6419 TCGv_i32 tmpl;
6420 TCGv_i32 tmph;
6421
6422 /* Load 64-bit value rd:rn. */
6423 tmpl = load_reg(s, rlow);
6424 tmph = load_reg(s, rhigh);
6425 tmp = tcg_temp_new_i64();
6426 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6427 tcg_temp_free_i32(tmpl);
6428 tcg_temp_free_i32(tmph);
6429 tcg_gen_add_i64(val, val, tmp);
6430 tcg_temp_free_i64(tmp);
6431 }
6432
6433 /* Set N and Z flags from hi|lo. */
6434 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
6435 {
6436 tcg_gen_mov_i32(cpu_NF, hi);
6437 tcg_gen_or_i32(cpu_ZF, lo, hi);
6438 }
6439
6440 /* Load/Store exclusive instructions are implemented by remembering
6441 the value/address loaded, and seeing if these are the same
6442 when the store is performed. This should be sufficient to implement
6443 the architecturally mandated semantics, and avoids having to monitor
6444 regular stores.
6445
6446 In system emulation mode only one CPU will be running at once, so
6447 this sequence is effectively atomic. In user emulation mode we
6448 throw an exception and handle the atomic operation elsewhere. */
6449 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6450 TCGv_i32 addr, int size)
6451 {
6452 TCGv_i32 tmp = tcg_temp_new_i32();
6453
6454 switch (size) {
6455 case 0:
6456 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
6457 break;
6458 case 1:
6459 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
6460 break;
6461 case 2:
6462 case 3:
6463 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
6464 break;
6465 default:
6466 abort();
6467 }
6468 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6469 store_reg(s, rt, tmp);
6470 if (size == 3) {
6471 TCGv_i32 tmp2 = tcg_temp_new_i32();
6472 tcg_gen_addi_i32(tmp2, addr, 4);
6473 tmp = tcg_temp_new_i32();
6474 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
6475 tcg_temp_free_i32(tmp2);
6476 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6477 store_reg(s, rt2, tmp);
6478 }
6479 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6480 }
6481
6482 static void gen_clrex(DisasContext *s)
6483 {
6484 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6485 }
6486
6487 #ifdef CONFIG_USER_ONLY
6488 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6489 TCGv_i32 addr, int size)
6490 {
6491 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6492 tcg_gen_movi_i32(cpu_exclusive_info,
6493 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6494 gen_exception_insn(s, 4, EXCP_STREX);
6495 }
6496 #else
6497 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6498 TCGv_i32 addr, int size)
6499 {
6500 TCGv_i32 tmp;
6501 int done_label;
6502 int fail_label;
6503
6504 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6505 [addr] = {Rt};
6506 {Rd} = 0;
6507 } else {
6508 {Rd} = 1;
6509 } */
6510 fail_label = gen_new_label();
6511 done_label = gen_new_label();
6512 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6513 tmp = tcg_temp_new_i32();
6514 switch (size) {
6515 case 0:
6516 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
6517 break;
6518 case 1:
6519 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
6520 break;
6521 case 2:
6522 case 3:
6523 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
6524 break;
6525 default:
6526 abort();
6527 }
6528 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6529 tcg_temp_free_i32(tmp);
6530 if (size == 3) {
6531 TCGv_i32 tmp2 = tcg_temp_new_i32();
6532 tcg_gen_addi_i32(tmp2, addr, 4);
6533 tmp = tcg_temp_new_i32();
6534 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
6535 tcg_temp_free_i32(tmp2);
6536 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6537 tcg_temp_free_i32(tmp);
6538 }
6539 tmp = load_reg(s, rt);
6540 switch (size) {
6541 case 0:
6542 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
6543 break;
6544 case 1:
6545 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
6546 break;
6547 case 2:
6548 case 3:
6549 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6550 break;
6551 default:
6552 abort();
6553 }
6554 tcg_temp_free_i32(tmp);
6555 if (size == 3) {
6556 tcg_gen_addi_i32(addr, addr, 4);
6557 tmp = load_reg(s, rt2);
6558 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6559 tcg_temp_free_i32(tmp);
6560 }
6561 tcg_gen_movi_i32(cpu_R[rd], 0);
6562 tcg_gen_br(done_label);
6563 gen_set_label(fail_label);
6564 tcg_gen_movi_i32(cpu_R[rd], 1);
6565 gen_set_label(done_label);
6566 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6567 }
6568 #endif
6569
6570 /* gen_srs:
6571 * @env: CPUARMState
6572 * @s: DisasContext
6573 * @mode: mode field from insn (which stack to store to)
6574 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6575 * @writeback: true if writeback bit set
6576 *
6577 * Generate code for the SRS (Store Return State) insn.
6578 */
6579 static void gen_srs(DisasContext *s,
6580 uint32_t mode, uint32_t amode, bool writeback)
6581 {
6582 int32_t offset;
6583 TCGv_i32 addr = tcg_temp_new_i32();
6584 TCGv_i32 tmp = tcg_const_i32(mode);
6585 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6586 tcg_temp_free_i32(tmp);
6587 switch (amode) {
6588 case 0: /* DA */
6589 offset = -4;
6590 break;
6591 case 1: /* IA */
6592 offset = 0;
6593 break;
6594 case 2: /* DB */
6595 offset = -8;
6596 break;
6597 case 3: /* IB */
6598 offset = 4;
6599 break;
6600 default:
6601 abort();
6602 }
6603 tcg_gen_addi_i32(addr, addr, offset);
6604 tmp = load_reg(s, 14);
6605 tcg_gen_qemu_st32(tmp, addr, 0);
6606 tcg_temp_free_i32(tmp);
6607 tmp = load_cpu_field(spsr);
6608 tcg_gen_addi_i32(addr, addr, 4);
6609 tcg_gen_qemu_st32(tmp, addr, 0);
6610 tcg_temp_free_i32(tmp);
6611 if (writeback) {
6612 switch (amode) {
6613 case 0:
6614 offset = -8;
6615 break;
6616 case 1:
6617 offset = 4;
6618 break;
6619 case 2:
6620 offset = -4;
6621 break;
6622 case 3:
6623 offset = 0;
6624 break;
6625 default:
6626 abort();
6627 }
6628 tcg_gen_addi_i32(addr, addr, offset);
6629 tmp = tcg_const_i32(mode);
6630 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6631 tcg_temp_free_i32(tmp);
6632 }
6633 tcg_temp_free_i32(addr);
6634 }
6635
6636 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6637 {
6638 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6639 TCGv_i32 tmp;
6640 TCGv_i32 tmp2;
6641 TCGv_i32 tmp3;
6642 TCGv_i32 addr;
6643 TCGv_i64 tmp64;
6644
6645 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6646 s->pc += 4;
6647
6648 /* M variants do not implement ARM mode. */
6649 if (IS_M(env))
6650 goto illegal_op;
6651 cond = insn >> 28;
6652 if (cond == 0xf){
6653 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6654 * choose to UNDEF. In ARMv5 and above the space is used
6655 * for miscellaneous unconditional instructions.
6656 */
6657 ARCH(5);
6658
6659 /* Unconditional instructions. */
6660 if (((insn >> 25) & 7) == 1) {
6661 /* NEON Data processing. */
6662 if (!arm_feature(env, ARM_FEATURE_NEON))
6663 goto illegal_op;
6664
6665 if (disas_neon_data_insn(env, s, insn))
6666 goto illegal_op;
6667 return;
6668 }
6669 if ((insn & 0x0f100000) == 0x04000000) {
6670 /* NEON load/store. */
6671 if (!arm_feature(env, ARM_FEATURE_NEON))
6672 goto illegal_op;
6673
6674 if (disas_neon_ls_insn(env, s, insn))
6675 goto illegal_op;
6676 return;
6677 }
6678 if (((insn & 0x0f30f000) == 0x0510f000) ||
6679 ((insn & 0x0f30f010) == 0x0710f000)) {
6680 if ((insn & (1 << 22)) == 0) {
6681 /* PLDW; v7MP */
6682 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6683 goto illegal_op;
6684 }
6685 }
6686 /* Otherwise PLD; v5TE+ */
6687 ARCH(5TE);
6688 return;
6689 }
6690 if (((insn & 0x0f70f000) == 0x0450f000) ||
6691 ((insn & 0x0f70f010) == 0x0650f000)) {
6692 ARCH(7);
6693 return; /* PLI; V7 */
6694 }
6695 if (((insn & 0x0f700000) == 0x04100000) ||
6696 ((insn & 0x0f700010) == 0x06100000)) {
6697 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6698 goto illegal_op;
6699 }
6700 return; /* v7MP: Unallocated memory hint: must NOP */
6701 }
6702
6703 if ((insn & 0x0ffffdff) == 0x01010000) {
6704 ARCH(6);
6705 /* setend */
6706 if (((insn >> 9) & 1) != s->bswap_code) {
6707 /* Dynamic endianness switching not implemented. */
6708 goto illegal_op;
6709 }
6710 return;
6711 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6712 switch ((insn >> 4) & 0xf) {
6713 case 1: /* clrex */
6714 ARCH(6K);
6715 gen_clrex(s);
6716 return;
6717 case 4: /* dsb */
6718 case 5: /* dmb */
6719 case 6: /* isb */
6720 ARCH(7);
6721 /* We don't emulate caches so these are a no-op. */
6722 return;
6723 default:
6724 goto illegal_op;
6725 }
6726 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6727 /* srs */
6728 if (IS_USER(s)) {
6729 goto illegal_op;
6730 }
6731 ARCH(6);
6732 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
6733 return;
6734 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6735 /* rfe */
6736 int32_t offset;
6737 if (IS_USER(s))
6738 goto illegal_op;
6739 ARCH(6);
6740 rn = (insn >> 16) & 0xf;
6741 addr = load_reg(s, rn);
6742 i = (insn >> 23) & 3;
6743 switch (i) {
6744 case 0: offset = -4; break; /* DA */
6745 case 1: offset = 0; break; /* IA */
6746 case 2: offset = -8; break; /* DB */
6747 case 3: offset = 4; break; /* IB */
6748 default: abort();
6749 }
6750 if (offset)
6751 tcg_gen_addi_i32(addr, addr, offset);
6752 /* Load PC into tmp and CPSR into tmp2. */
6753 tmp = tcg_temp_new_i32();
6754 tcg_gen_qemu_ld32u(tmp, addr, 0);
6755 tcg_gen_addi_i32(addr, addr, 4);
6756 tmp2 = tcg_temp_new_i32();
6757 tcg_gen_qemu_ld32u(tmp2, addr, 0);
6758 if (insn & (1 << 21)) {
6759 /* Base writeback. */
6760 switch (i) {
6761 case 0: offset = -8; break;
6762 case 1: offset = 4; break;
6763 case 2: offset = -4; break;
6764 case 3: offset = 0; break;
6765 default: abort();
6766 }
6767 if (offset)
6768 tcg_gen_addi_i32(addr, addr, offset);
6769 store_reg(s, rn, addr);
6770 } else {
6771 tcg_temp_free_i32(addr);
6772 }
6773 gen_rfe(s, tmp, tmp2);
6774 return;
6775 } else if ((insn & 0x0e000000) == 0x0a000000) {
6776 /* branch link and change to thumb (blx <offset>) */
6777 int32_t offset;
6778
6779 val = (uint32_t)s->pc;
6780 tmp = tcg_temp_new_i32();
6781 tcg_gen_movi_i32(tmp, val);
6782 store_reg(s, 14, tmp);
6783 /* Sign-extend the 24-bit offset */
6784 offset = (((int32_t)insn) << 8) >> 8;
6785 /* offset * 4 + bit24 * 2 + (thumb bit) */
6786 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6787 /* pipeline offset */
6788 val += 4;
6789 /* protected by ARCH(5); above, near the start of uncond block */
6790 gen_bx_im(s, val);
6791 return;
6792 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6793 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6794 /* iWMMXt register transfer. */
6795 if (env->cp15.c15_cpar & (1 << 1))
6796 if (!disas_iwmmxt_insn(env, s, insn))
6797 return;
6798 }
6799 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6800 /* Coprocessor double register transfer. */
6801 ARCH(5TE);
6802 } else if ((insn & 0x0f000010) == 0x0e000010) {
6803 /* Additional coprocessor register transfer. */
6804 } else if ((insn & 0x0ff10020) == 0x01000000) {
6805 uint32_t mask;
6806 uint32_t val;
6807 /* cps (privileged) */
6808 if (IS_USER(s))
6809 return;
6810 mask = val = 0;
6811 if (insn & (1 << 19)) {
6812 if (insn & (1 << 8))
6813 mask |= CPSR_A;
6814 if (insn & (1 << 7))
6815 mask |= CPSR_I;
6816 if (insn & (1 << 6))
6817 mask |= CPSR_F;
6818 if (insn & (1 << 18))
6819 val |= mask;
6820 }
6821 if (insn & (1 << 17)) {
6822 mask |= CPSR_M;
6823 val |= (insn & 0x1f);
6824 }
6825 if (mask) {
6826 gen_set_psr_im(s, mask, 0, val);
6827 }
6828 return;
6829 }
6830 goto illegal_op;
6831 }
6832 if (cond != 0xe) {
6833 /* if not always execute, we generate a conditional jump to
6834 next instruction */
6835 s->condlabel = gen_new_label();
6836 gen_test_cc(cond ^ 1, s->condlabel);
6837 s->condjmp = 1;
6838 }
6839 if ((insn & 0x0f900000) == 0x03000000) {
6840 if ((insn & (1 << 21)) == 0) {
6841 ARCH(6T2);
6842 rd = (insn >> 12) & 0xf;
6843 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6844 if ((insn & (1 << 22)) == 0) {
6845 /* MOVW */
6846 tmp = tcg_temp_new_i32();
6847 tcg_gen_movi_i32(tmp, val);
6848 } else {
6849 /* MOVT */
6850 tmp = load_reg(s, rd);
6851 tcg_gen_ext16u_i32(tmp, tmp);
6852 tcg_gen_ori_i32(tmp, tmp, val << 16);
6853 }
6854 store_reg(s, rd, tmp);
6855 } else {
6856 if (((insn >> 12) & 0xf) != 0xf)
6857 goto illegal_op;
6858 if (((insn >> 16) & 0xf) == 0) {
6859 gen_nop_hint(s, insn & 0xff);
6860 } else {
6861 /* CPSR = immediate */
6862 val = insn & 0xff;
6863 shift = ((insn >> 8) & 0xf) * 2;
6864 if (shift)
6865 val = (val >> shift) | (val << (32 - shift));
6866 i = ((insn & (1 << 22)) != 0);
6867 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6868 goto illegal_op;
6869 }
6870 }
6871 } else if ((insn & 0x0f900000) == 0x01000000
6872 && (insn & 0x00000090) != 0x00000090) {
6873 /* miscellaneous instructions */
6874 op1 = (insn >> 21) & 3;
6875 sh = (insn >> 4) & 0xf;
6876 rm = insn & 0xf;
6877 switch (sh) {
6878 case 0x0: /* move program status register */
6879 if (op1 & 1) {
6880 /* PSR = reg */
6881 tmp = load_reg(s, rm);
6882 i = ((op1 & 2) != 0);
6883 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6884 goto illegal_op;
6885 } else {
6886 /* reg = PSR */
6887 rd = (insn >> 12) & 0xf;
6888 if (op1 & 2) {
6889 if (IS_USER(s))
6890 goto illegal_op;
6891 tmp = load_cpu_field(spsr);
6892 } else {
6893 tmp = tcg_temp_new_i32();
6894 gen_helper_cpsr_read(tmp, cpu_env);
6895 }
6896 store_reg(s, rd, tmp);
6897 }
6898 break;
6899 case 0x1:
6900 if (op1 == 1) {
6901 /* branch/exchange thumb (bx). */
6902 ARCH(4T);
6903 tmp = load_reg(s, rm);
6904 gen_bx(s, tmp);
6905 } else if (op1 == 3) {
6906 /* clz */
6907 ARCH(5);
6908 rd = (insn >> 12) & 0xf;
6909 tmp = load_reg(s, rm);
6910 gen_helper_clz(tmp, tmp);
6911 store_reg(s, rd, tmp);
6912 } else {
6913 goto illegal_op;
6914 }
6915 break;
6916 case 0x2:
6917 if (op1 == 1) {
6918 ARCH(5J); /* bxj */
6919 /* Trivial implementation equivalent to bx. */
6920 tmp = load_reg(s, rm);
6921 gen_bx(s, tmp);
6922 } else {
6923 goto illegal_op;
6924 }
6925 break;
6926 case 0x3:
6927 if (op1 != 1)
6928 goto illegal_op;
6929
6930 ARCH(5);
6931 /* branch link/exchange thumb (blx) */
6932 tmp = load_reg(s, rm);
6933 tmp2 = tcg_temp_new_i32();
6934 tcg_gen_movi_i32(tmp2, s->pc);
6935 store_reg(s, 14, tmp2);
6936 gen_bx(s, tmp);
6937 break;
6938 case 0x5: /* saturating add/subtract */
6939 ARCH(5TE);
6940 rd = (insn >> 12) & 0xf;
6941 rn = (insn >> 16) & 0xf;
6942 tmp = load_reg(s, rm);
6943 tmp2 = load_reg(s, rn);
6944 if (op1 & 2)
6945 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
6946 if (op1 & 1)
6947 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
6948 else
6949 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
6950 tcg_temp_free_i32(tmp2);
6951 store_reg(s, rd, tmp);
6952 break;
6953 case 7:
6954 /* SMC instruction (op1 == 3)
6955 and undefined instructions (op1 == 0 || op1 == 2)
6956 will trap */
6957 if (op1 != 1) {
6958 goto illegal_op;
6959 }
6960 /* bkpt */
6961 ARCH(5);
6962 gen_exception_insn(s, 4, EXCP_BKPT);
6963 break;
6964 case 0x8: /* signed multiply */
6965 case 0xa:
6966 case 0xc:
6967 case 0xe:
6968 ARCH(5TE);
6969 rs = (insn >> 8) & 0xf;
6970 rn = (insn >> 12) & 0xf;
6971 rd = (insn >> 16) & 0xf;
6972 if (op1 == 1) {
6973 /* (32 * 16) >> 16 */
6974 tmp = load_reg(s, rm);
6975 tmp2 = load_reg(s, rs);
6976 if (sh & 4)
6977 tcg_gen_sari_i32(tmp2, tmp2, 16);
6978 else
6979 gen_sxth(tmp2);
6980 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6981 tcg_gen_shri_i64(tmp64, tmp64, 16);
6982 tmp = tcg_temp_new_i32();
6983 tcg_gen_trunc_i64_i32(tmp, tmp64);
6984 tcg_temp_free_i64(tmp64);
6985 if ((sh & 2) == 0) {
6986 tmp2 = load_reg(s, rn);
6987 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
6988 tcg_temp_free_i32(tmp2);
6989 }
6990 store_reg(s, rd, tmp);
6991 } else {
6992 /* 16 * 16 */
6993 tmp = load_reg(s, rm);
6994 tmp2 = load_reg(s, rs);
6995 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6996 tcg_temp_free_i32(tmp2);
6997 if (op1 == 2) {
6998 tmp64 = tcg_temp_new_i64();
6999 tcg_gen_ext_i32_i64(tmp64, tmp);
7000 tcg_temp_free_i32(tmp);
7001 gen_addq(s, tmp64, rn, rd);
7002 gen_storeq_reg(s, rn, rd, tmp64);
7003 tcg_temp_free_i64(tmp64);
7004 } else {
7005 if (op1 == 0) {
7006 tmp2 = load_reg(s, rn);
7007 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7008 tcg_temp_free_i32(tmp2);
7009 }
7010 store_reg(s, rd, tmp);
7011 }
7012 }
7013 break;
7014 default:
7015 goto illegal_op;
7016 }
7017 } else if (((insn & 0x0e000000) == 0 &&
7018 (insn & 0x00000090) != 0x90) ||
7019 ((insn & 0x0e000000) == (1 << 25))) {
7020 int set_cc, logic_cc, shiftop;
7021
7022 op1 = (insn >> 21) & 0xf;
7023 set_cc = (insn >> 20) & 1;
7024 logic_cc = table_logic_cc[op1] & set_cc;
7025
7026 /* data processing instruction */
7027 if (insn & (1 << 25)) {
7028 /* immediate operand */
7029 val = insn & 0xff;
7030 shift = ((insn >> 8) & 0xf) * 2;
7031 if (shift) {
7032 val = (val >> shift) | (val << (32 - shift));
7033 }
7034 tmp2 = tcg_temp_new_i32();
7035 tcg_gen_movi_i32(tmp2, val);
7036 if (logic_cc && shift) {
7037 gen_set_CF_bit31(tmp2);
7038 }
7039 } else {
7040 /* register */
7041 rm = (insn) & 0xf;
7042 tmp2 = load_reg(s, rm);
7043 shiftop = (insn >> 5) & 3;
7044 if (!(insn & (1 << 4))) {
7045 shift = (insn >> 7) & 0x1f;
7046 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7047 } else {
7048 rs = (insn >> 8) & 0xf;
7049 tmp = load_reg(s, rs);
7050 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7051 }
7052 }
7053 if (op1 != 0x0f && op1 != 0x0d) {
7054 rn = (insn >> 16) & 0xf;
7055 tmp = load_reg(s, rn);
7056 } else {
7057 TCGV_UNUSED_I32(tmp);
7058 }
7059 rd = (insn >> 12) & 0xf;
7060 switch(op1) {
7061 case 0x00:
7062 tcg_gen_and_i32(tmp, tmp, tmp2);
7063 if (logic_cc) {
7064 gen_logic_CC(tmp);
7065 }
7066 store_reg_bx(env, s, rd, tmp);
7067 break;
7068 case 0x01:
7069 tcg_gen_xor_i32(tmp, tmp, tmp2);
7070 if (logic_cc) {
7071 gen_logic_CC(tmp);
7072 }
7073 store_reg_bx(env, s, rd, tmp);
7074 break;
7075 case 0x02:
7076 if (set_cc && rd == 15) {
7077 /* SUBS r15, ... is used for exception return. */
7078 if (IS_USER(s)) {
7079 goto illegal_op;
7080 }
7081 gen_sub_CC(tmp, tmp, tmp2);
7082 gen_exception_return(s, tmp);
7083 } else {
7084 if (set_cc) {
7085 gen_sub_CC(tmp, tmp, tmp2);
7086 } else {
7087 tcg_gen_sub_i32(tmp, tmp, tmp2);
7088 }
7089 store_reg_bx(env, s, rd, tmp);
7090 }
7091 break;
7092 case 0x03:
7093 if (set_cc) {
7094 gen_sub_CC(tmp, tmp2, tmp);
7095 } else {
7096 tcg_gen_sub_i32(tmp, tmp2, tmp);
7097 }
7098 store_reg_bx(env, s, rd, tmp);
7099 break;
7100 case 0x04:
7101 if (set_cc) {
7102 gen_add_CC(tmp, tmp, tmp2);
7103 } else {
7104 tcg_gen_add_i32(tmp, tmp, tmp2);
7105 }
7106 store_reg_bx(env, s, rd, tmp);
7107 break;
7108 case 0x05:
7109 if (set_cc) {
7110 gen_adc_CC(tmp, tmp, tmp2);
7111 } else {
7112 gen_add_carry(tmp, tmp, tmp2);
7113 }
7114 store_reg_bx(env, s, rd, tmp);
7115 break;
7116 case 0x06:
7117 if (set_cc) {
7118 gen_sbc_CC(tmp, tmp, tmp2);
7119 } else {
7120 gen_sub_carry(tmp, tmp, tmp2);
7121 }
7122 store_reg_bx(env, s, rd, tmp);
7123 break;
7124 case 0x07:
7125 if (set_cc) {
7126 gen_sbc_CC(tmp, tmp2, tmp);
7127 } else {
7128 gen_sub_carry(tmp, tmp2, tmp);
7129 }
7130 store_reg_bx(env, s, rd, tmp);
7131 break;
7132 case 0x08:
7133 if (set_cc) {
7134 tcg_gen_and_i32(tmp, tmp, tmp2);
7135 gen_logic_CC(tmp);
7136 }
7137 tcg_temp_free_i32(tmp);
7138 break;
7139 case 0x09:
7140 if (set_cc) {
7141 tcg_gen_xor_i32(tmp, tmp, tmp2);
7142 gen_logic_CC(tmp);
7143 }
7144 tcg_temp_free_i32(tmp);
7145 break;
7146 case 0x0a:
7147 if (set_cc) {
7148 gen_sub_CC(tmp, tmp, tmp2);
7149 }
7150 tcg_temp_free_i32(tmp);
7151 break;
7152 case 0x0b:
7153 if (set_cc) {
7154 gen_add_CC(tmp, tmp, tmp2);
7155 }
7156 tcg_temp_free_i32(tmp);
7157 break;
7158 case 0x0c:
7159 tcg_gen_or_i32(tmp, tmp, tmp2);
7160 if (logic_cc) {
7161 gen_logic_CC(tmp);
7162 }
7163 store_reg_bx(env, s, rd, tmp);
7164 break;
7165 case 0x0d:
7166 if (logic_cc && rd == 15) {
7167 /* MOVS r15, ... is used for exception return. */
7168 if (IS_USER(s)) {
7169 goto illegal_op;
7170 }
7171 gen_exception_return(s, tmp2);
7172 } else {
7173 if (logic_cc) {
7174 gen_logic_CC(tmp2);
7175 }
7176 store_reg_bx(env, s, rd, tmp2);
7177 }
7178 break;
7179 case 0x0e:
7180 tcg_gen_andc_i32(tmp, tmp, tmp2);
7181 if (logic_cc) {
7182 gen_logic_CC(tmp);
7183 }
7184 store_reg_bx(env, s, rd, tmp);
7185 break;
7186 default:
7187 case 0x0f:
7188 tcg_gen_not_i32(tmp2, tmp2);
7189 if (logic_cc) {
7190 gen_logic_CC(tmp2);
7191 }
7192 store_reg_bx(env, s, rd, tmp2);
7193 break;
7194 }
7195 if (op1 != 0x0f && op1 != 0x0d) {
7196 tcg_temp_free_i32(tmp2);
7197 }
7198 } else {
7199 /* other instructions */
7200 op1 = (insn >> 24) & 0xf;
7201 switch(op1) {
7202 case 0x0:
7203 case 0x1:
7204 /* multiplies, extra load/stores */
7205 sh = (insn >> 5) & 3;
7206 if (sh == 0) {
7207 if (op1 == 0x0) {
7208 rd = (insn >> 16) & 0xf;
7209 rn = (insn >> 12) & 0xf;
7210 rs = (insn >> 8) & 0xf;
7211 rm = (insn) & 0xf;
7212 op1 = (insn >> 20) & 0xf;
7213 switch (op1) {
7214 case 0: case 1: case 2: case 3: case 6:
7215 /* 32 bit mul */
7216 tmp = load_reg(s, rs);
7217 tmp2 = load_reg(s, rm);
7218 tcg_gen_mul_i32(tmp, tmp, tmp2);
7219 tcg_temp_free_i32(tmp2);
7220 if (insn & (1 << 22)) {
7221 /* Subtract (mls) */
7222 ARCH(6T2);
7223 tmp2 = load_reg(s, rn);
7224 tcg_gen_sub_i32(tmp, tmp2, tmp);
7225 tcg_temp_free_i32(tmp2);
7226 } else if (insn & (1 << 21)) {
7227 /* Add */
7228 tmp2 = load_reg(s, rn);
7229 tcg_gen_add_i32(tmp, tmp, tmp2);
7230 tcg_temp_free_i32(tmp2);
7231 }
7232 if (insn & (1 << 20))
7233 gen_logic_CC(tmp);
7234 store_reg(s, rd, tmp);
7235 break;
7236 case 4:
7237 /* 64 bit mul double accumulate (UMAAL) */
7238 ARCH(6);
7239 tmp = load_reg(s, rs);
7240 tmp2 = load_reg(s, rm);
7241 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7242 gen_addq_lo(s, tmp64, rn);
7243 gen_addq_lo(s, tmp64, rd);
7244 gen_storeq_reg(s, rn, rd, tmp64);
7245 tcg_temp_free_i64(tmp64);
7246 break;
7247 case 8: case 9: case 10: case 11:
7248 case 12: case 13: case 14: case 15:
7249 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7250 tmp = load_reg(s, rs);
7251 tmp2 = load_reg(s, rm);
7252 if (insn & (1 << 22)) {
7253 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
7254 } else {
7255 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
7256 }
7257 if (insn & (1 << 21)) { /* mult accumulate */
7258 TCGv_i32 al = load_reg(s, rn);
7259 TCGv_i32 ah = load_reg(s, rd);
7260 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7261 tcg_temp_free_i32(al);
7262 tcg_temp_free_i32(ah);
7263 }
7264 if (insn & (1 << 20)) {
7265 gen_logicq_cc(tmp, tmp2);
7266 }
7267 store_reg(s, rn, tmp);
7268 store_reg(s, rd, tmp2);
7269 break;
7270 default:
7271 goto illegal_op;
7272 }
7273 } else {
7274 rn = (insn >> 16) & 0xf;
7275 rd = (insn >> 12) & 0xf;
7276 if (insn & (1 << 23)) {
7277 /* load/store exclusive */
7278 int op2 = (insn >> 8) & 3;
7279 op1 = (insn >> 21) & 0x3;
7280
7281 switch (op2) {
7282 case 0: /* lda/stl */
7283 if (op1 == 1) {
7284 goto illegal_op;
7285 }
7286 ARCH(8);
7287 break;
7288 case 1: /* reserved */
7289 goto illegal_op;
7290 case 2: /* ldaex/stlex */
7291 ARCH(8);
7292 break;
7293 case 3: /* ldrex/strex */
7294 if (op1) {
7295 ARCH(6K);
7296 } else {
7297 ARCH(6);
7298 }
7299 break;
7300 }
7301
7302 addr = tcg_temp_local_new_i32();
7303 load_reg_var(s, addr, rn);
7304
7305 /* Since the emulation does not have barriers,
7306 the acquire/release semantics need no special
7307 handling */
7308 if (op2 == 0) {
7309 if (insn & (1 << 20)) {
7310 tmp = tcg_temp_new_i32();
7311 switch (op1) {
7312 case 0: /* lda */
7313 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7314 break;
7315 case 2: /* ldab */
7316 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
7317 break;
7318 case 3: /* ldah */
7319 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
7320 break;
7321 default:
7322 abort();
7323 }
7324 store_reg(s, rd, tmp);
7325 } else {
7326 rm = insn & 0xf;
7327 tmp = load_reg(s, rm);
7328 switch (op1) {
7329 case 0: /* stl */
7330 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7331 break;
7332 case 2: /* stlb */
7333 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
7334 break;
7335 case 3: /* stlh */
7336 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7337 break;
7338 default:
7339 abort();
7340 }
7341 tcg_temp_free_i32(tmp);
7342 }
7343 } else if (insn & (1 << 20)) {
7344 switch (op1) {
7345 case 0: /* ldrex */
7346 gen_load_exclusive(s, rd, 15, addr, 2);
7347 break;
7348 case 1: /* ldrexd */
7349 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7350 break;
7351 case 2: /* ldrexb */
7352 gen_load_exclusive(s, rd, 15, addr, 0);
7353 break;
7354 case 3: /* ldrexh */
7355 gen_load_exclusive(s, rd, 15, addr, 1);
7356 break;
7357 default:
7358 abort();
7359 }
7360 } else {
7361 rm = insn & 0xf;
7362 switch (op1) {
7363 case 0: /* strex */
7364 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7365 break;
7366 case 1: /* strexd */
7367 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7368 break;
7369 case 2: /* strexb */
7370 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7371 break;
7372 case 3: /* strexh */
7373 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7374 break;
7375 default:
7376 abort();
7377 }
7378 }
7379 tcg_temp_free_i32(addr);
7380 } else {
7381 /* SWP instruction */
7382 rm = (insn) & 0xf;
7383
7384 /* ??? This is not really atomic. However we know
7385 we never have multiple CPUs running in parallel,
7386 so it is good enough. */
7387 addr = load_reg(s, rn);
7388 tmp = load_reg(s, rm);
7389 tmp2 = tcg_temp_new_i32();
7390 if (insn & (1 << 22)) {
7391 tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
7392 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
7393 } else {
7394 tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
7395 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7396 }
7397 tcg_temp_free_i32(tmp);
7398 tcg_temp_free_i32(addr);
7399 store_reg(s, rd, tmp2);
7400 }
7401 }
7402 } else {
7403 int address_offset;
7404 int load;
7405 /* Misc load/store */
7406 rn = (insn >> 16) & 0xf;
7407 rd = (insn >> 12) & 0xf;
7408 addr = load_reg(s, rn);
7409 if (insn & (1 << 24))
7410 gen_add_datah_offset(s, insn, 0, addr);
7411 address_offset = 0;
7412 if (insn & (1 << 20)) {
7413 /* load */
7414 tmp = tcg_temp_new_i32();
7415 switch(sh) {
7416 case 1:
7417 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
7418 break;
7419 case 2:
7420 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
7421 break;
7422 default:
7423 case 3:
7424 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
7425 break;
7426 }
7427 load = 1;
7428 } else if (sh & 2) {
7429 ARCH(5TE);
7430 /* doubleword */
7431 if (sh & 1) {
7432 /* store */
7433 tmp = load_reg(s, rd);
7434 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7435 tcg_temp_free_i32(tmp);
7436 tcg_gen_addi_i32(addr, addr, 4);
7437 tmp = load_reg(s, rd + 1);
7438 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7439 tcg_temp_free_i32(tmp);
7440 load = 0;
7441 } else {
7442 /* load */
7443 tmp = tcg_temp_new_i32();
7444 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7445 store_reg(s, rd, tmp);
7446 tcg_gen_addi_i32(addr, addr, 4);
7447 tmp = tcg_temp_new_i32();
7448 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7449 rd++;
7450 load = 1;
7451 }
7452 address_offset = -4;
7453 } else {
7454 /* store */
7455 tmp = load_reg(s, rd);
7456 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7457 tcg_temp_free_i32(tmp);
7458 load = 0;
7459 }
7460 /* Perform base writeback before the loaded value to
7461 ensure correct behavior with overlapping index registers.
7462 ldrd with base writeback is is undefined if the
7463 destination and index registers overlap. */
7464 if (!(insn & (1 << 24))) {
7465 gen_add_datah_offset(s, insn, address_offset, addr);
7466 store_reg(s, rn, addr);
7467 } else if (insn & (1 << 21)) {
7468 if (address_offset)
7469 tcg_gen_addi_i32(addr, addr, address_offset);
7470 store_reg(s, rn, addr);
7471 } else {
7472 tcg_temp_free_i32(addr);
7473 }
7474 if (load) {
7475 /* Complete the load. */
7476 store_reg(s, rd, tmp);
7477 }
7478 }
7479 break;
7480 case 0x4:
7481 case 0x5:
7482 goto do_ldst;
7483 case 0x6:
7484 case 0x7:
7485 if (insn & (1 << 4)) {
7486 ARCH(6);
7487 /* Armv6 Media instructions. */
7488 rm = insn & 0xf;
7489 rn = (insn >> 16) & 0xf;
7490 rd = (insn >> 12) & 0xf;
7491 rs = (insn >> 8) & 0xf;
7492 switch ((insn >> 23) & 3) {
7493 case 0: /* Parallel add/subtract. */
7494 op1 = (insn >> 20) & 7;
7495 tmp = load_reg(s, rn);
7496 tmp2 = load_reg(s, rm);
7497 sh = (insn >> 5) & 7;
7498 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7499 goto illegal_op;
7500 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7501 tcg_temp_free_i32(tmp2);
7502 store_reg(s, rd, tmp);
7503 break;
7504 case 1:
7505 if ((insn & 0x00700020) == 0) {
7506 /* Halfword pack. */
7507 tmp = load_reg(s, rn);
7508 tmp2 = load_reg(s, rm);
7509 shift = (insn >> 7) & 0x1f;
7510 if (insn & (1 << 6)) {
7511 /* pkhtb */
7512 if (shift == 0)
7513 shift = 31;
7514 tcg_gen_sari_i32(tmp2, tmp2, shift);
7515 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7516 tcg_gen_ext16u_i32(tmp2, tmp2);
7517 } else {
7518 /* pkhbt */
7519 if (shift)
7520 tcg_gen_shli_i32(tmp2, tmp2, shift);
7521 tcg_gen_ext16u_i32(tmp, tmp);
7522 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7523 }
7524 tcg_gen_or_i32(tmp, tmp, tmp2);
7525 tcg_temp_free_i32(tmp2);
7526 store_reg(s, rd, tmp);
7527 } else if ((insn & 0x00200020) == 0x00200000) {
7528 /* [us]sat */
7529 tmp = load_reg(s, rm);
7530 shift = (insn >> 7) & 0x1f;
7531 if (insn & (1 << 6)) {
7532 if (shift == 0)
7533 shift = 31;
7534 tcg_gen_sari_i32(tmp, tmp, shift);
7535 } else {
7536 tcg_gen_shli_i32(tmp, tmp, shift);
7537 }
7538 sh = (insn >> 16) & 0x1f;
7539 tmp2 = tcg_const_i32(sh);
7540 if (insn & (1 << 22))
7541 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7542 else
7543 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7544 tcg_temp_free_i32(tmp2);
7545 store_reg(s, rd, tmp);
7546 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7547 /* [us]sat16 */
7548 tmp = load_reg(s, rm);
7549 sh = (insn >> 16) & 0x1f;
7550 tmp2 = tcg_const_i32(sh);
7551 if (insn & (1 << 22))
7552 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7553 else
7554 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7555 tcg_temp_free_i32(tmp2);
7556 store_reg(s, rd, tmp);
7557 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7558 /* Select bytes. */
7559 tmp = load_reg(s, rn);
7560 tmp2 = load_reg(s, rm);
7561 tmp3 = tcg_temp_new_i32();
7562 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7563 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7564 tcg_temp_free_i32(tmp3);
7565 tcg_temp_free_i32(tmp2);
7566 store_reg(s, rd, tmp);
7567 } else if ((insn & 0x000003e0) == 0x00000060) {
7568 tmp = load_reg(s, rm);
7569 shift = (insn >> 10) & 3;
7570 /* ??? In many cases it's not necessary to do a
7571 rotate, a shift is sufficient. */
7572 if (shift != 0)
7573 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7574 op1 = (insn >> 20) & 7;
7575 switch (op1) {
7576 case 0: gen_sxtb16(tmp); break;
7577 case 2: gen_sxtb(tmp); break;
7578 case 3: gen_sxth(tmp); break;
7579 case 4: gen_uxtb16(tmp); break;
7580 case 6: gen_uxtb(tmp); break;
7581 case 7: gen_uxth(tmp); break;
7582 default: goto illegal_op;
7583 }
7584 if (rn != 15) {
7585 tmp2 = load_reg(s, rn);
7586 if ((op1 & 3) == 0) {
7587 gen_add16(tmp, tmp2);
7588 } else {
7589 tcg_gen_add_i32(tmp, tmp, tmp2);
7590 tcg_temp_free_i32(tmp2);
7591 }
7592 }
7593 store_reg(s, rd, tmp);
7594 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7595 /* rev */
7596 tmp = load_reg(s, rm);
7597 if (insn & (1 << 22)) {
7598 if (insn & (1 << 7)) {
7599 gen_revsh(tmp);
7600 } else {
7601 ARCH(6T2);
7602 gen_helper_rbit(tmp, tmp);
7603 }
7604 } else {
7605 if (insn & (1 << 7))
7606 gen_rev16(tmp);
7607 else
7608 tcg_gen_bswap32_i32(tmp, tmp);
7609 }
7610 store_reg(s, rd, tmp);
7611 } else {
7612 goto illegal_op;
7613 }
7614 break;
7615 case 2: /* Multiplies (Type 3). */
7616 switch ((insn >> 20) & 0x7) {
7617 case 5:
7618 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7619 /* op2 not 00x or 11x : UNDEF */
7620 goto illegal_op;
7621 }
7622 /* Signed multiply most significant [accumulate].
7623 (SMMUL, SMMLA, SMMLS) */
7624 tmp = load_reg(s, rm);
7625 tmp2 = load_reg(s, rs);
7626 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7627
7628 if (rd != 15) {
7629 tmp = load_reg(s, rd);
7630 if (insn & (1 << 6)) {
7631 tmp64 = gen_subq_msw(tmp64, tmp);
7632 } else {
7633 tmp64 = gen_addq_msw(tmp64, tmp);
7634 }
7635 }
7636 if (insn & (1 << 5)) {
7637 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7638 }
7639 tcg_gen_shri_i64(tmp64, tmp64, 32);
7640 tmp = tcg_temp_new_i32();
7641 tcg_gen_trunc_i64_i32(tmp, tmp64);
7642 tcg_temp_free_i64(tmp64);
7643 store_reg(s, rn, tmp);
7644 break;
7645 case 0:
7646 case 4:
7647 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7648 if (insn & (1 << 7)) {
7649 goto illegal_op;
7650 }
7651 tmp = load_reg(s, rm);
7652 tmp2 = load_reg(s, rs);
7653 if (insn & (1 << 5))
7654 gen_swap_half(tmp2);
7655 gen_smul_dual(tmp, tmp2);
7656 if (insn & (1 << 6)) {
7657 /* This subtraction cannot overflow. */
7658 tcg_gen_sub_i32(tmp, tmp, tmp2);
7659 } else {
7660 /* This addition cannot overflow 32 bits;
7661 * however it may overflow considered as a signed
7662 * operation, in which case we must set the Q flag.
7663 */
7664 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7665 }
7666 tcg_temp_free_i32(tmp2);
7667 if (insn & (1 << 22)) {
7668 /* smlald, smlsld */
7669 tmp64 = tcg_temp_new_i64();
7670 tcg_gen_ext_i32_i64(tmp64, tmp);
7671 tcg_temp_free_i32(tmp);
7672 gen_addq(s, tmp64, rd, rn);
7673 gen_storeq_reg(s, rd, rn, tmp64);
7674 tcg_temp_free_i64(tmp64);
7675 } else {
7676 /* smuad, smusd, smlad, smlsd */
7677 if (rd != 15)
7678 {
7679 tmp2 = load_reg(s, rd);
7680 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7681 tcg_temp_free_i32(tmp2);
7682 }
7683 store_reg(s, rn, tmp);
7684 }
7685 break;
7686 case 1:
7687 case 3:
7688 /* SDIV, UDIV */
7689 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7690 goto illegal_op;
7691 }
7692 if (((insn >> 5) & 7) || (rd != 15)) {
7693 goto illegal_op;
7694 }
7695 tmp = load_reg(s, rm);
7696 tmp2 = load_reg(s, rs);
7697 if (insn & (1 << 21)) {
7698 gen_helper_udiv(tmp, tmp, tmp2);
7699 } else {
7700 gen_helper_sdiv(tmp, tmp, tmp2);
7701 }
7702 tcg_temp_free_i32(tmp2);
7703 store_reg(s, rn, tmp);
7704 break;
7705 default:
7706 goto illegal_op;
7707 }
7708 break;
7709 case 3:
7710 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7711 switch (op1) {
7712 case 0: /* Unsigned sum of absolute differences. */
7713 ARCH(6);
7714 tmp = load_reg(s, rm);
7715 tmp2 = load_reg(s, rs);
7716 gen_helper_usad8(tmp, tmp, tmp2);
7717 tcg_temp_free_i32(tmp2);
7718 if (rd != 15) {
7719 tmp2 = load_reg(s, rd);
7720 tcg_gen_add_i32(tmp, tmp, tmp2);
7721 tcg_temp_free_i32(tmp2);
7722 }
7723 store_reg(s, rn, tmp);
7724 break;
7725 case 0x20: case 0x24: case 0x28: case 0x2c:
7726 /* Bitfield insert/clear. */
7727 ARCH(6T2);
7728 shift = (insn >> 7) & 0x1f;
7729 i = (insn >> 16) & 0x1f;
7730 i = i + 1 - shift;
7731 if (rm == 15) {
7732 tmp = tcg_temp_new_i32();
7733 tcg_gen_movi_i32(tmp, 0);
7734 } else {
7735 tmp = load_reg(s, rm);
7736 }
7737 if (i != 32) {
7738 tmp2 = load_reg(s, rd);
7739 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7740 tcg_temp_free_i32(tmp2);
7741 }
7742 store_reg(s, rd, tmp);
7743 break;
7744 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7745 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7746 ARCH(6T2);
7747 tmp = load_reg(s, rm);
7748 shift = (insn >> 7) & 0x1f;
7749 i = ((insn >> 16) & 0x1f) + 1;
7750 if (shift + i > 32)
7751 goto illegal_op;
7752 if (i < 32) {
7753 if (op1 & 0x20) {
7754 gen_ubfx(tmp, shift, (1u << i) - 1);
7755 } else {
7756 gen_sbfx(tmp, shift, i);
7757 }
7758 }
7759 store_reg(s, rd, tmp);
7760 break;
7761 default:
7762 goto illegal_op;
7763 }
7764 break;
7765 }
7766 break;
7767 }
7768 do_ldst:
7769 /* Check for undefined extension instructions
7770 * per the ARM Bible IE:
7771 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7772 */
7773 sh = (0xf << 20) | (0xf << 4);
7774 if (op1 == 0x7 && ((insn & sh) == sh))
7775 {
7776 goto illegal_op;
7777 }
7778 /* load/store byte/word */
7779 rn = (insn >> 16) & 0xf;
7780 rd = (insn >> 12) & 0xf;
7781 tmp2 = load_reg(s, rn);
7782 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7783 if (insn & (1 << 24))
7784 gen_add_data_offset(s, insn, tmp2);
7785 if (insn & (1 << 20)) {
7786 /* load */
7787 tmp = tcg_temp_new_i32();
7788 if (insn & (1 << 22)) {
7789 tcg_gen_qemu_ld8u(tmp, tmp2, i);
7790 } else {
7791 tcg_gen_qemu_ld32u(tmp, tmp2, i);
7792 }
7793 } else {
7794 /* store */
7795 tmp = load_reg(s, rd);
7796 if (insn & (1 << 22)) {
7797 tcg_gen_qemu_st8(tmp, tmp2, i);
7798 } else {
7799 tcg_gen_qemu_st32(tmp, tmp2, i);
7800 }
7801 tcg_temp_free_i32(tmp);
7802 }
7803 if (!(insn & (1 << 24))) {
7804 gen_add_data_offset(s, insn, tmp2);
7805 store_reg(s, rn, tmp2);
7806 } else if (insn & (1 << 21)) {
7807 store_reg(s, rn, tmp2);
7808 } else {
7809 tcg_temp_free_i32(tmp2);
7810 }
7811 if (insn & (1 << 20)) {
7812 /* Complete the load. */
7813 store_reg_from_load(env, s, rd, tmp);
7814 }
7815 break;
7816 case 0x08:
7817 case 0x09:
7818 {
7819 int j, n, user, loaded_base;
7820 TCGv_i32 loaded_var;
7821 /* load/store multiple words */
7822 /* XXX: store correct base if write back */
7823 user = 0;
7824 if (insn & (1 << 22)) {
7825 if (IS_USER(s))
7826 goto illegal_op; /* only usable in supervisor mode */
7827
7828 if ((insn & (1 << 15)) == 0)
7829 user = 1;
7830 }
7831 rn = (insn >> 16) & 0xf;
7832 addr = load_reg(s, rn);
7833
7834 /* compute total size */
7835 loaded_base = 0;
7836 TCGV_UNUSED_I32(loaded_var);
7837 n = 0;
7838 for(i=0;i<16;i++) {
7839 if (insn & (1 << i))
7840 n++;
7841 }
7842 /* XXX: test invalid n == 0 case ? */
7843 if (insn & (1 << 23)) {
7844 if (insn & (1 << 24)) {
7845 /* pre increment */
7846 tcg_gen_addi_i32(addr, addr, 4);
7847 } else {
7848 /* post increment */
7849 }
7850 } else {
7851 if (insn & (1 << 24)) {
7852 /* pre decrement */
7853 tcg_gen_addi_i32(addr, addr, -(n * 4));
7854 } else {
7855 /* post decrement */
7856 if (n != 1)
7857 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7858 }
7859 }
7860 j = 0;
7861 for(i=0;i<16;i++) {
7862 if (insn & (1 << i)) {
7863 if (insn & (1 << 20)) {
7864 /* load */
7865 tmp = tcg_temp_new_i32();
7866 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7867 if (user) {
7868 tmp2 = tcg_const_i32(i);
7869 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7870 tcg_temp_free_i32(tmp2);
7871 tcg_temp_free_i32(tmp);
7872 } else if (i == rn) {
7873 loaded_var = tmp;
7874 loaded_base = 1;
7875 } else {
7876 store_reg_from_load(env, s, i, tmp);
7877 }
7878 } else {
7879 /* store */
7880 if (i == 15) {
7881 /* special case: r15 = PC + 8 */
7882 val = (long)s->pc + 4;
7883 tmp = tcg_temp_new_i32();
7884 tcg_gen_movi_i32(tmp, val);
7885 } else if (user) {
7886 tmp = tcg_temp_new_i32();
7887 tmp2 = tcg_const_i32(i);
7888 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7889 tcg_temp_free_i32(tmp2);
7890 } else {
7891 tmp = load_reg(s, i);
7892 }
7893 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7894 tcg_temp_free_i32(tmp);
7895 }
7896 j++;
7897 /* no need to add after the last transfer */
7898 if (j != n)
7899 tcg_gen_addi_i32(addr, addr, 4);
7900 }
7901 }
7902 if (insn & (1 << 21)) {
7903 /* write back */
7904 if (insn & (1 << 23)) {
7905 if (insn & (1 << 24)) {
7906 /* pre increment */
7907 } else {
7908 /* post increment */
7909 tcg_gen_addi_i32(addr, addr, 4);
7910 }
7911 } else {
7912 if (insn & (1 << 24)) {
7913 /* pre decrement */
7914 if (n != 1)
7915 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7916 } else {
7917 /* post decrement */
7918 tcg_gen_addi_i32(addr, addr, -(n * 4));
7919 }
7920 }
7921 store_reg(s, rn, addr);
7922 } else {
7923 tcg_temp_free_i32(addr);
7924 }
7925 if (loaded_base) {
7926 store_reg(s, rn, loaded_var);
7927 }
7928 if ((insn & (1 << 22)) && !user) {
7929 /* Restore CPSR from SPSR. */
7930 tmp = load_cpu_field(spsr);
7931 gen_set_cpsr(tmp, 0xffffffff);
7932 tcg_temp_free_i32(tmp);
7933 s->is_jmp = DISAS_UPDATE;
7934 }
7935 }
7936 break;
7937 case 0xa:
7938 case 0xb:
7939 {
7940 int32_t offset;
7941
7942 /* branch (and link) */
7943 val = (int32_t)s->pc;
7944 if (insn & (1 << 24)) {
7945 tmp = tcg_temp_new_i32();
7946 tcg_gen_movi_i32(tmp, val);
7947 store_reg(s, 14, tmp);
7948 }
7949 offset = (((int32_t)insn << 8) >> 8);
7950 val += (offset << 2) + 4;
7951 gen_jmp(s, val);
7952 }
7953 break;
7954 case 0xc:
7955 case 0xd:
7956 case 0xe:
7957 /* Coprocessor. */
7958 if (disas_coproc_insn(env, s, insn))
7959 goto illegal_op;
7960 break;
7961 case 0xf:
7962 /* swi */
7963 gen_set_pc_im(s->pc);
7964 s->is_jmp = DISAS_SWI;
7965 break;
7966 default:
7967 illegal_op:
7968 gen_exception_insn(s, 4, EXCP_UDEF);
7969 break;
7970 }
7971 }
7972 }
7973
7974 /* Return true if this is a Thumb-2 logical op. */
7975 static int
7976 thumb2_logic_op(int op)
7977 {
7978 return (op < 8);
7979 }
7980
7981 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7982 then set condition code flags based on the result of the operation.
7983 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7984 to the high bit of T1.
7985 Returns zero if the opcode is valid. */
7986
7987 static int
7988 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
7989 TCGv_i32 t0, TCGv_i32 t1)
7990 {
7991 int logic_cc;
7992
7993 logic_cc = 0;
7994 switch (op) {
7995 case 0: /* and */
7996 tcg_gen_and_i32(t0, t0, t1);
7997 logic_cc = conds;
7998 break;
7999 case 1: /* bic */
8000 tcg_gen_andc_i32(t0, t0, t1);
8001 logic_cc = conds;
8002 break;
8003 case 2: /* orr */
8004 tcg_gen_or_i32(t0, t0, t1);
8005 logic_cc = conds;
8006 break;
8007 case 3: /* orn */
8008 tcg_gen_orc_i32(t0, t0, t1);
8009 logic_cc = conds;
8010 break;
8011 case 4: /* eor */
8012 tcg_gen_xor_i32(t0, t0, t1);
8013 logic_cc = conds;
8014 break;
8015 case 8: /* add */
8016 if (conds)
8017 gen_add_CC(t0, t0, t1);
8018 else
8019 tcg_gen_add_i32(t0, t0, t1);
8020 break;
8021 case 10: /* adc */
8022 if (conds)
8023 gen_adc_CC(t0, t0, t1);
8024 else
8025 gen_adc(t0, t1);
8026 break;
8027 case 11: /* sbc */
8028 if (conds) {
8029 gen_sbc_CC(t0, t0, t1);
8030 } else {
8031 gen_sub_carry(t0, t0, t1);
8032 }
8033 break;
8034 case 13: /* sub */
8035 if (conds)
8036 gen_sub_CC(t0, t0, t1);
8037 else
8038 tcg_gen_sub_i32(t0, t0, t1);
8039 break;
8040 case 14: /* rsb */
8041 if (conds)
8042 gen_sub_CC(t0, t1, t0);
8043 else
8044 tcg_gen_sub_i32(t0, t1, t0);
8045 break;
8046 default: /* 5, 6, 7, 9, 12, 15. */
8047 return 1;
8048 }
8049 if (logic_cc) {
8050 gen_logic_CC(t0);
8051 if (shifter_out)
8052 gen_set_CF_bit31(t1);
8053 }
8054 return 0;
8055 }
8056
8057 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8058 is not legal. */
8059 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8060 {
8061 uint32_t insn, imm, shift, offset;
8062 uint32_t rd, rn, rm, rs;
8063 TCGv_i32 tmp;
8064 TCGv_i32 tmp2;
8065 TCGv_i32 tmp3;
8066 TCGv_i32 addr;
8067 TCGv_i64 tmp64;
8068 int op;
8069 int shiftop;
8070 int conds;
8071 int logic_cc;
8072
8073 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8074 || arm_feature (env, ARM_FEATURE_M))) {
8075 /* Thumb-1 cores may need to treat bl and blx as a pair of
8076 16-bit instructions to get correct prefetch abort behavior. */
8077 insn = insn_hw1;
8078 if ((insn & (1 << 12)) == 0) {
8079 ARCH(5);
8080 /* Second half of blx. */
8081 offset = ((insn & 0x7ff) << 1);
8082 tmp = load_reg(s, 14);
8083 tcg_gen_addi_i32(tmp, tmp, offset);
8084 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8085
8086 tmp2 = tcg_temp_new_i32();
8087 tcg_gen_movi_i32(tmp2, s->pc | 1);
8088 store_reg(s, 14, tmp2);
8089 gen_bx(s, tmp);
8090 return 0;
8091 }
8092 if (insn & (1 << 11)) {
8093 /* Second half of bl. */
8094 offset = ((insn & 0x7ff) << 1) | 1;
8095 tmp = load_reg(s, 14);
8096 tcg_gen_addi_i32(tmp, tmp, offset);
8097
8098 tmp2 = tcg_temp_new_i32();
8099 tcg_gen_movi_i32(tmp2, s->pc | 1);
8100 store_reg(s, 14, tmp2);
8101 gen_bx(s, tmp);
8102 return 0;
8103 }
8104 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8105 /* Instruction spans a page boundary. Implement it as two
8106 16-bit instructions in case the second half causes an
8107 prefetch abort. */
8108 offset = ((int32_t)insn << 21) >> 9;
8109 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8110 return 0;
8111 }
8112 /* Fall through to 32-bit decode. */
8113 }
8114
8115 insn = arm_lduw_code(env, s->pc, s->bswap_code);
8116 s->pc += 2;
8117 insn |= (uint32_t)insn_hw1 << 16;
8118
8119 if ((insn & 0xf800e800) != 0xf000e800) {
8120 ARCH(6T2);
8121 }
8122
8123 rn = (insn >> 16) & 0xf;
8124 rs = (insn >> 12) & 0xf;
8125 rd = (insn >> 8) & 0xf;
8126 rm = insn & 0xf;
8127 switch ((insn >> 25) & 0xf) {
8128 case 0: case 1: case 2: case 3:
8129 /* 16-bit instructions. Should never happen. */
8130 abort();
8131 case 4:
8132 if (insn & (1 << 22)) {
8133 /* Other load/store, table branch. */
8134 if (insn & 0x01200000) {
8135 /* Load/store doubleword. */
8136 if (rn == 15) {
8137 addr = tcg_temp_new_i32();
8138 tcg_gen_movi_i32(addr, s->pc & ~3);
8139 } else {
8140 addr = load_reg(s, rn);
8141 }
8142 offset = (insn & 0xff) * 4;
8143 if ((insn & (1 << 23)) == 0)
8144 offset = -offset;
8145 if (insn & (1 << 24)) {
8146 tcg_gen_addi_i32(addr, addr, offset);
8147 offset = 0;
8148 }
8149 if (insn & (1 << 20)) {
8150 /* ldrd */
8151 tmp = tcg_temp_new_i32();
8152 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8153 store_reg(s, rs, tmp);
8154 tcg_gen_addi_i32(addr, addr, 4);
8155 tmp = tcg_temp_new_i32();
8156 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8157 store_reg(s, rd, tmp);
8158 } else {
8159 /* strd */
8160 tmp = load_reg(s, rs);
8161 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8162 tcg_temp_free_i32(tmp);
8163 tcg_gen_addi_i32(addr, addr, 4);
8164 tmp = load_reg(s, rd);
8165 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8166 tcg_temp_free_i32(tmp);
8167 }
8168 if (insn & (1 << 21)) {
8169 /* Base writeback. */
8170 if (rn == 15)
8171 goto illegal_op;
8172 tcg_gen_addi_i32(addr, addr, offset - 4);
8173 store_reg(s, rn, addr);
8174 } else {
8175 tcg_temp_free_i32(addr);
8176 }
8177 } else if ((insn & (1 << 23)) == 0) {
8178 /* Load/store exclusive word. */
8179 addr = tcg_temp_local_new_i32();
8180 load_reg_var(s, addr, rn);
8181 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8182 if (insn & (1 << 20)) {
8183 gen_load_exclusive(s, rs, 15, addr, 2);
8184 } else {
8185 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8186 }
8187 tcg_temp_free_i32(addr);
8188 } else if ((insn & (7 << 5)) == 0) {
8189 /* Table Branch. */
8190 if (rn == 15) {
8191 addr = tcg_temp_new_i32();
8192 tcg_gen_movi_i32(addr, s->pc);
8193 } else {
8194 addr = load_reg(s, rn);
8195 }
8196 tmp = load_reg(s, rm);
8197 tcg_gen_add_i32(addr, addr, tmp);
8198 if (insn & (1 << 4)) {
8199 /* tbh */
8200 tcg_gen_add_i32(addr, addr, tmp);
8201 tcg_temp_free_i32(tmp);
8202 tmp = tcg_temp_new_i32();
8203 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8204 } else { /* tbb */
8205 tcg_temp_free_i32(tmp);
8206 tmp = tcg_temp_new_i32();
8207 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8208 }
8209 tcg_temp_free_i32(addr);
8210 tcg_gen_shli_i32(tmp, tmp, 1);
8211 tcg_gen_addi_i32(tmp, tmp, s->pc);
8212 store_reg(s, 15, tmp);
8213 } else {
8214 int op2 = (insn >> 6) & 0x3;
8215 op = (insn >> 4) & 0x3;
8216 switch (op2) {
8217 case 0:
8218 goto illegal_op;
8219 case 1:
8220 /* Load/store exclusive byte/halfword/doubleword */
8221 if (op == 2) {
8222 goto illegal_op;
8223 }
8224 ARCH(7);
8225 break;
8226 case 2:
8227 /* Load-acquire/store-release */
8228 if (op == 3) {
8229 goto illegal_op;
8230 }
8231 /* Fall through */
8232 case 3:
8233 /* Load-acquire/store-release exclusive */
8234 ARCH(8);
8235 break;
8236 }
8237 addr = tcg_temp_local_new_i32();
8238 load_reg_var(s, addr, rn);
8239 if (!(op2 & 1)) {
8240 if (insn & (1 << 20)) {
8241 tmp = tcg_temp_new_i32();
8242 switch (op) {
8243 case 0: /* ldab */
8244 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8245 break;
8246 case 1: /* ldah */
8247 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8248 break;
8249 case 2: /* lda */
8250 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8251 break;
8252 default:
8253 abort();
8254 }
8255 store_reg(s, rs, tmp);
8256 } else {
8257 tmp = load_reg(s, rs);
8258 switch (op) {
8259 case 0: /* stlb */
8260 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
8261 break;
8262 case 1: /* stlh */
8263 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
8264 break;
8265 case 2: /* stl */
8266 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8267 break;
8268 default:
8269 abort();
8270 }
8271 tcg_temp_free_i32(tmp);
8272 }
8273 } else if (insn & (1 << 20)) {
8274 gen_load_exclusive(s, rs, rd, addr, op);
8275 } else {
8276 gen_store_exclusive(s, rm, rs, rd, addr, op);
8277 }
8278 tcg_temp_free_i32(addr);
8279 }
8280 } else {
8281 /* Load/store multiple, RFE, SRS. */
8282 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8283 /* RFE, SRS: not available in user mode or on M profile */
8284 if (IS_USER(s) || IS_M(env)) {
8285 goto illegal_op;
8286 }
8287 if (insn & (1 << 20)) {
8288 /* rfe */
8289 addr = load_reg(s, rn);
8290 if ((insn & (1 << 24)) == 0)
8291 tcg_gen_addi_i32(addr, addr, -8);
8292 /* Load PC into tmp and CPSR into tmp2. */
8293 tmp = tcg_temp_new_i32();
8294 tcg_gen_qemu_ld32u(tmp, addr, 0);
8295 tcg_gen_addi_i32(addr, addr, 4);
8296 tmp2 = tcg_temp_new_i32();
8297 tcg_gen_qemu_ld32u(tmp2, addr, 0);
8298 if (insn & (1 << 21)) {
8299 /* Base writeback. */
8300 if (insn & (1 << 24)) {
8301 tcg_gen_addi_i32(addr, addr, 4);
8302 } else {
8303 tcg_gen_addi_i32(addr, addr, -4);
8304 }
8305 store_reg(s, rn, addr);
8306 } else {
8307 tcg_temp_free_i32(addr);
8308 }
8309 gen_rfe(s, tmp, tmp2);
8310 } else {
8311 /* srs */
8312 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8313 insn & (1 << 21));
8314 }
8315 } else {
8316 int i, loaded_base = 0;
8317 TCGv_i32 loaded_var;
8318 /* Load/store multiple. */
8319 addr = load_reg(s, rn);
8320 offset = 0;
8321 for (i = 0; i < 16; i++) {
8322 if (insn & (1 << i))
8323 offset += 4;
8324 }
8325 if (insn & (1 << 24)) {
8326 tcg_gen_addi_i32(addr, addr, -offset);
8327 }
8328
8329 TCGV_UNUSED_I32(loaded_var);
8330 for (i = 0; i < 16; i++) {
8331 if ((insn & (1 << i)) == 0)
8332 continue;
8333 if (insn & (1 << 20)) {
8334 /* Load. */
8335 tmp = tcg_temp_new_i32();
8336 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8337 if (i == 15) {
8338 gen_bx(s, tmp);
8339 } else if (i == rn) {
8340 loaded_var = tmp;
8341 loaded_base = 1;
8342 } else {
8343 store_reg(s, i, tmp);
8344 }
8345 } else {
8346 /* Store. */
8347 tmp = load_reg(s, i);
8348 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8349 tcg_temp_free_i32(tmp);
8350 }
8351 tcg_gen_addi_i32(addr, addr, 4);
8352 }
8353 if (loaded_base) {
8354 store_reg(s, rn, loaded_var);
8355 }
8356 if (insn & (1 << 21)) {
8357 /* Base register writeback. */
8358 if (insn & (1 << 24)) {
8359 tcg_gen_addi_i32(addr, addr, -offset);
8360 }
8361 /* Fault if writeback register is in register list. */
8362 if (insn & (1 << rn))
8363 goto illegal_op;
8364 store_reg(s, rn, addr);
8365 } else {
8366 tcg_temp_free_i32(addr);
8367 }
8368 }
8369 }
8370 break;
8371 case 5:
8372
8373 op = (insn >> 21) & 0xf;
8374 if (op == 6) {
8375 /* Halfword pack. */
8376 tmp = load_reg(s, rn);
8377 tmp2 = load_reg(s, rm);
8378 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8379 if (insn & (1 << 5)) {
8380 /* pkhtb */
8381 if (shift == 0)
8382 shift = 31;
8383 tcg_gen_sari_i32(tmp2, tmp2, shift);
8384 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8385 tcg_gen_ext16u_i32(tmp2, tmp2);
8386 } else {
8387 /* pkhbt */
8388 if (shift)
8389 tcg_gen_shli_i32(tmp2, tmp2, shift);
8390 tcg_gen_ext16u_i32(tmp, tmp);
8391 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8392 }
8393 tcg_gen_or_i32(tmp, tmp, tmp2);
8394 tcg_temp_free_i32(tmp2);
8395 store_reg(s, rd, tmp);
8396 } else {
8397 /* Data processing register constant shift. */
8398 if (rn == 15) {
8399 tmp = tcg_temp_new_i32();
8400 tcg_gen_movi_i32(tmp, 0);
8401 } else {
8402 tmp = load_reg(s, rn);
8403 }
8404 tmp2 = load_reg(s, rm);
8405
8406 shiftop = (insn >> 4) & 3;
8407 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8408 conds = (insn & (1 << 20)) != 0;
8409 logic_cc = (conds && thumb2_logic_op(op));
8410 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8411 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8412 goto illegal_op;
8413 tcg_temp_free_i32(tmp2);
8414 if (rd != 15) {
8415 store_reg(s, rd, tmp);
8416 } else {
8417 tcg_temp_free_i32(tmp);
8418 }
8419 }
8420 break;
8421 case 13: /* Misc data processing. */
8422 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8423 if (op < 4 && (insn & 0xf000) != 0xf000)
8424 goto illegal_op;
8425 switch (op) {
8426 case 0: /* Register controlled shift. */
8427 tmp = load_reg(s, rn);
8428 tmp2 = load_reg(s, rm);
8429 if ((insn & 0x70) != 0)
8430 goto illegal_op;
8431 op = (insn >> 21) & 3;
8432 logic_cc = (insn & (1 << 20)) != 0;
8433 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8434 if (logic_cc)
8435 gen_logic_CC(tmp);
8436 store_reg_bx(env, s, rd, tmp);
8437 break;
8438 case 1: /* Sign/zero extend. */
8439 tmp = load_reg(s, rm);
8440 shift = (insn >> 4) & 3;
8441 /* ??? In many cases it's not necessary to do a
8442 rotate, a shift is sufficient. */
8443 if (shift != 0)
8444 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8445 op = (insn >> 20) & 7;
8446 switch (op) {
8447 case 0: gen_sxth(tmp); break;
8448 case 1: gen_uxth(tmp); break;
8449 case 2: gen_sxtb16(tmp); break;
8450 case 3: gen_uxtb16(tmp); break;
8451 case 4: gen_sxtb(tmp); break;
8452 case 5: gen_uxtb(tmp); break;
8453 default: goto illegal_op;
8454 }
8455 if (rn != 15) {
8456 tmp2 = load_reg(s, rn);
8457 if ((op >> 1) == 1) {
8458 gen_add16(tmp, tmp2);
8459 } else {
8460 tcg_gen_add_i32(tmp, tmp, tmp2);
8461 tcg_temp_free_i32(tmp2);
8462 }
8463 }
8464 store_reg(s, rd, tmp);
8465 break;
8466 case 2: /* SIMD add/subtract. */
8467 op = (insn >> 20) & 7;
8468 shift = (insn >> 4) & 7;
8469 if ((op & 3) == 3 || (shift & 3) == 3)
8470 goto illegal_op;
8471 tmp = load_reg(s, rn);
8472 tmp2 = load_reg(s, rm);
8473 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8474 tcg_temp_free_i32(tmp2);
8475 store_reg(s, rd, tmp);
8476 break;
8477 case 3: /* Other data processing. */
8478 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8479 if (op < 4) {
8480 /* Saturating add/subtract. */
8481 tmp = load_reg(s, rn);
8482 tmp2 = load_reg(s, rm);
8483 if (op & 1)
8484 gen_helper_double_saturate(tmp, cpu_env, tmp);
8485 if (op & 2)
8486 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8487 else
8488 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8489 tcg_temp_free_i32(tmp2);
8490 } else {
8491 tmp = load_reg(s, rn);
8492 switch (op) {
8493 case 0x0a: /* rbit */
8494 gen_helper_rbit(tmp, tmp);
8495 break;
8496 case 0x08: /* rev */
8497 tcg_gen_bswap32_i32(tmp, tmp);
8498 break;
8499 case 0x09: /* rev16 */
8500 gen_rev16(tmp);
8501 break;
8502 case 0x0b: /* revsh */
8503 gen_revsh(tmp);
8504 break;
8505 case 0x10: /* sel */
8506 tmp2 = load_reg(s, rm);
8507 tmp3 = tcg_temp_new_i32();
8508 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8509 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8510 tcg_temp_free_i32(tmp3);
8511 tcg_temp_free_i32(tmp2);
8512 break;
8513 case 0x18: /* clz */
8514 gen_helper_clz(tmp, tmp);
8515 break;
8516 default:
8517 goto illegal_op;
8518 }
8519 }
8520 store_reg(s, rd, tmp);
8521 break;
8522 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8523 op = (insn >> 4) & 0xf;
8524 tmp = load_reg(s, rn);
8525 tmp2 = load_reg(s, rm);
8526 switch ((insn >> 20) & 7) {
8527 case 0: /* 32 x 32 -> 32 */
8528 tcg_gen_mul_i32(tmp, tmp, tmp2);
8529 tcg_temp_free_i32(tmp2);
8530 if (rs != 15) {
8531 tmp2 = load_reg(s, rs);
8532 if (op)
8533 tcg_gen_sub_i32(tmp, tmp2, tmp);
8534 else
8535 tcg_gen_add_i32(tmp, tmp, tmp2);
8536 tcg_temp_free_i32(tmp2);
8537 }
8538 break;
8539 case 1: /* 16 x 16 -> 32 */
8540 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8541 tcg_temp_free_i32(tmp2);
8542 if (rs != 15) {
8543 tmp2 = load_reg(s, rs);
8544 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8545 tcg_temp_free_i32(tmp2);
8546 }
8547 break;
8548 case 2: /* Dual multiply add. */
8549 case 4: /* Dual multiply subtract. */
8550 if (op)
8551 gen_swap_half(tmp2);
8552 gen_smul_dual(tmp, tmp2);
8553 if (insn & (1 << 22)) {
8554 /* This subtraction cannot overflow. */
8555 tcg_gen_sub_i32(tmp, tmp, tmp2);
8556 } else {
8557 /* This addition cannot overflow 32 bits;
8558 * however it may overflow considered as a signed
8559 * operation, in which case we must set the Q flag.
8560 */
8561 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8562 }
8563 tcg_temp_free_i32(tmp2);
8564 if (rs != 15)
8565 {
8566 tmp2 = load_reg(s, rs);
8567 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8568 tcg_temp_free_i32(tmp2);
8569 }
8570 break;
8571 case 3: /* 32 * 16 -> 32msb */
8572 if (op)
8573 tcg_gen_sari_i32(tmp2, tmp2, 16);
8574 else
8575 gen_sxth(tmp2);
8576 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8577 tcg_gen_shri_i64(tmp64, tmp64, 16);
8578 tmp = tcg_temp_new_i32();
8579 tcg_gen_trunc_i64_i32(tmp, tmp64);
8580 tcg_temp_free_i64(tmp64);
8581 if (rs != 15)
8582 {
8583 tmp2 = load_reg(s, rs);
8584 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8585 tcg_temp_free_i32(tmp2);
8586 }
8587 break;
8588 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8589 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8590 if (rs != 15) {
8591 tmp = load_reg(s, rs);
8592 if (insn & (1 << 20)) {
8593 tmp64 = gen_addq_msw(tmp64, tmp);
8594 } else {
8595 tmp64 = gen_subq_msw(tmp64, tmp);
8596 }
8597 }
8598 if (insn & (1 << 4)) {
8599 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8600 }
8601 tcg_gen_shri_i64(tmp64, tmp64, 32);
8602 tmp = tcg_temp_new_i32();
8603 tcg_gen_trunc_i64_i32(tmp, tmp64);
8604 tcg_temp_free_i64(tmp64);
8605 break;
8606 case 7: /* Unsigned sum of absolute differences. */
8607 gen_helper_usad8(tmp, tmp, tmp2);
8608 tcg_temp_free_i32(tmp2);
8609 if (rs != 15) {
8610 tmp2 = load_reg(s, rs);
8611 tcg_gen_add_i32(tmp, tmp, tmp2);
8612 tcg_temp_free_i32(tmp2);
8613 }
8614 break;
8615 }
8616 store_reg(s, rd, tmp);
8617 break;
8618 case 6: case 7: /* 64-bit multiply, Divide. */
8619 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8620 tmp = load_reg(s, rn);
8621 tmp2 = load_reg(s, rm);
8622 if ((op & 0x50) == 0x10) {
8623 /* sdiv, udiv */
8624 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8625 goto illegal_op;
8626 }
8627 if (op & 0x20)
8628 gen_helper_udiv(tmp, tmp, tmp2);
8629 else
8630 gen_helper_sdiv(tmp, tmp, tmp2);
8631 tcg_temp_free_i32(tmp2);
8632 store_reg(s, rd, tmp);
8633 } else if ((op & 0xe) == 0xc) {
8634 /* Dual multiply accumulate long. */
8635 if (op & 1)
8636 gen_swap_half(tmp2);
8637 gen_smul_dual(tmp, tmp2);
8638 if (op & 0x10) {
8639 tcg_gen_sub_i32(tmp, tmp, tmp2);
8640 } else {
8641 tcg_gen_add_i32(tmp, tmp, tmp2);
8642 }
8643 tcg_temp_free_i32(tmp2);
8644 /* BUGFIX */
8645 tmp64 = tcg_temp_new_i64();
8646 tcg_gen_ext_i32_i64(tmp64, tmp);
8647 tcg_temp_free_i32(tmp);
8648 gen_addq(s, tmp64, rs, rd);
8649 gen_storeq_reg(s, rs, rd, tmp64);
8650 tcg_temp_free_i64(tmp64);
8651 } else {
8652 if (op & 0x20) {
8653 /* Unsigned 64-bit multiply */
8654 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8655 } else {
8656 if (op & 8) {
8657 /* smlalxy */
8658 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8659 tcg_temp_free_i32(tmp2);
8660 tmp64 = tcg_temp_new_i64();
8661 tcg_gen_ext_i32_i64(tmp64, tmp);
8662 tcg_temp_free_i32(tmp);
8663 } else {
8664 /* Signed 64-bit multiply */
8665 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8666 }
8667 }
8668 if (op & 4) {
8669 /* umaal */
8670 gen_addq_lo(s, tmp64, rs);
8671 gen_addq_lo(s, tmp64, rd);
8672 } else if (op & 0x40) {
8673 /* 64-bit accumulate. */
8674 gen_addq(s, tmp64, rs, rd);
8675 }
8676 gen_storeq_reg(s, rs, rd, tmp64);
8677 tcg_temp_free_i64(tmp64);
8678 }
8679 break;
8680 }
8681 break;
8682 case 6: case 7: case 14: case 15:
8683 /* Coprocessor. */
8684 if (((insn >> 24) & 3) == 3) {
8685 /* Translate into the equivalent ARM encoding. */
8686 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8687 if (disas_neon_data_insn(env, s, insn))
8688 goto illegal_op;
8689 } else {
8690 if (insn & (1 << 28))
8691 goto illegal_op;
8692 if (disas_coproc_insn (env, s, insn))
8693 goto illegal_op;
8694 }
8695 break;
8696 case 8: case 9: case 10: case 11:
8697 if (insn & (1 << 15)) {
8698 /* Branches, misc control. */
8699 if (insn & 0x5000) {
8700 /* Unconditional branch. */
8701 /* signextend(hw1[10:0]) -> offset[:12]. */
8702 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8703 /* hw1[10:0] -> offset[11:1]. */
8704 offset |= (insn & 0x7ff) << 1;
8705 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8706 offset[24:22] already have the same value because of the
8707 sign extension above. */
8708 offset ^= ((~insn) & (1 << 13)) << 10;
8709 offset ^= ((~insn) & (1 << 11)) << 11;
8710
8711 if (insn & (1 << 14)) {
8712 /* Branch and link. */
8713 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8714 }
8715
8716 offset += s->pc;
8717 if (insn & (1 << 12)) {
8718 /* b/bl */
8719 gen_jmp(s, offset);
8720 } else {
8721 /* blx */
8722 offset &= ~(uint32_t)2;
8723 /* thumb2 bx, no need to check */
8724 gen_bx_im(s, offset);
8725 }
8726 } else if (((insn >> 23) & 7) == 7) {
8727 /* Misc control */
8728 if (insn & (1 << 13))
8729 goto illegal_op;
8730
8731 if (insn & (1 << 26)) {
8732 /* Secure monitor call (v6Z) */
8733 goto illegal_op; /* not implemented. */
8734 } else {
8735 op = (insn >> 20) & 7;
8736 switch (op) {
8737 case 0: /* msr cpsr. */
8738 if (IS_M(env)) {
8739 tmp = load_reg(s, rn);
8740 addr = tcg_const_i32(insn & 0xff);
8741 gen_helper_v7m_msr(cpu_env, addr, tmp);
8742 tcg_temp_free_i32(addr);
8743 tcg_temp_free_i32(tmp);
8744 gen_lookup_tb(s);
8745 break;
8746 }
8747 /* fall through */
8748 case 1: /* msr spsr. */
8749 if (IS_M(env))
8750 goto illegal_op;
8751 tmp = load_reg(s, rn);
8752 if (gen_set_psr(s,
8753 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8754 op == 1, tmp))
8755 goto illegal_op;
8756 break;
8757 case 2: /* cps, nop-hint. */
8758 if (((insn >> 8) & 7) == 0) {
8759 gen_nop_hint(s, insn & 0xff);
8760 }
8761 /* Implemented as NOP in user mode. */
8762 if (IS_USER(s))
8763 break;
8764 offset = 0;
8765 imm = 0;
8766 if (insn & (1 << 10)) {
8767 if (insn & (1 << 7))
8768 offset |= CPSR_A;
8769 if (insn & (1 << 6))
8770 offset |= CPSR_I;
8771 if (insn & (1 << 5))
8772 offset |= CPSR_F;
8773 if (insn & (1 << 9))
8774 imm = CPSR_A | CPSR_I | CPSR_F;
8775 }
8776 if (insn & (1 << 8)) {
8777 offset |= 0x1f;
8778 imm |= (insn & 0x1f);
8779 }
8780 if (offset) {
8781 gen_set_psr_im(s, offset, 0, imm);
8782 }
8783 break;
8784 case 3: /* Special control operations. */
8785 ARCH(7);
8786 op = (insn >> 4) & 0xf;
8787 switch (op) {
8788 case 2: /* clrex */
8789 gen_clrex(s);
8790 break;
8791 case 4: /* dsb */
8792 case 5: /* dmb */
8793 case 6: /* isb */
8794 /* These execute as NOPs. */
8795 break;
8796 default:
8797 goto illegal_op;
8798 }
8799 break;
8800 case 4: /* bxj */
8801 /* Trivial implementation equivalent to bx. */
8802 tmp = load_reg(s, rn);
8803 gen_bx(s, tmp);
8804 break;
8805 case 5: /* Exception return. */
8806 if (IS_USER(s)) {
8807 goto illegal_op;
8808 }
8809 if (rn != 14 || rd != 15) {
8810 goto illegal_op;
8811 }
8812 tmp = load_reg(s, rn);
8813 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8814 gen_exception_return(s, tmp);
8815 break;
8816 case 6: /* mrs cpsr. */
8817 tmp = tcg_temp_new_i32();
8818 if (IS_M(env)) {
8819 addr = tcg_const_i32(insn & 0xff);
8820 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8821 tcg_temp_free_i32(addr);
8822 } else {
8823 gen_helper_cpsr_read(tmp, cpu_env);
8824 }
8825 store_reg(s, rd, tmp);
8826 break;
8827 case 7: /* mrs spsr. */
8828 /* Not accessible in user mode. */
8829 if (IS_USER(s) || IS_M(env))
8830 goto illegal_op;
8831 tmp = load_cpu_field(spsr);
8832 store_reg(s, rd, tmp);
8833 break;
8834 }
8835 }
8836 } else {
8837 /* Conditional branch. */
8838 op = (insn >> 22) & 0xf;
8839 /* Generate a conditional jump to next instruction. */
8840 s->condlabel = gen_new_label();
8841 gen_test_cc(op ^ 1, s->condlabel);
8842 s->condjmp = 1;
8843
8844 /* offset[11:1] = insn[10:0] */
8845 offset = (insn & 0x7ff) << 1;
8846 /* offset[17:12] = insn[21:16]. */
8847 offset |= (insn & 0x003f0000) >> 4;
8848 /* offset[31:20] = insn[26]. */
8849 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8850 /* offset[18] = insn[13]. */
8851 offset |= (insn & (1 << 13)) << 5;
8852 /* offset[19] = insn[11]. */
8853 offset |= (insn & (1 << 11)) << 8;
8854
8855 /* jump to the offset */
8856 gen_jmp(s, s->pc + offset);
8857 }
8858 } else {
8859 /* Data processing immediate. */
8860 if (insn & (1 << 25)) {
8861 if (insn & (1 << 24)) {
8862 if (insn & (1 << 20))
8863 goto illegal_op;
8864 /* Bitfield/Saturate. */
8865 op = (insn >> 21) & 7;
8866 imm = insn & 0x1f;
8867 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8868 if (rn == 15) {
8869 tmp = tcg_temp_new_i32();
8870 tcg_gen_movi_i32(tmp, 0);
8871 } else {
8872 tmp = load_reg(s, rn);
8873 }
8874 switch (op) {
8875 case 2: /* Signed bitfield extract. */
8876 imm++;
8877 if (shift + imm > 32)
8878 goto illegal_op;
8879 if (imm < 32)
8880 gen_sbfx(tmp, shift, imm);
8881 break;
8882 case 6: /* Unsigned bitfield extract. */
8883 imm++;
8884 if (shift + imm > 32)
8885 goto illegal_op;
8886 if (imm < 32)
8887 gen_ubfx(tmp, shift, (1u << imm) - 1);
8888 break;
8889 case 3: /* Bitfield insert/clear. */
8890 if (imm < shift)
8891 goto illegal_op;
8892 imm = imm + 1 - shift;
8893 if (imm != 32) {
8894 tmp2 = load_reg(s, rd);
8895 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
8896 tcg_temp_free_i32(tmp2);
8897 }
8898 break;
8899 case 7:
8900 goto illegal_op;
8901 default: /* Saturate. */
8902 if (shift) {
8903 if (op & 1)
8904 tcg_gen_sari_i32(tmp, tmp, shift);
8905 else
8906 tcg_gen_shli_i32(tmp, tmp, shift);
8907 }
8908 tmp2 = tcg_const_i32(imm);
8909 if (op & 4) {
8910 /* Unsigned. */
8911 if ((op & 1) && shift == 0)
8912 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8913 else
8914 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8915 } else {
8916 /* Signed. */
8917 if ((op & 1) && shift == 0)
8918 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8919 else
8920 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8921 }
8922 tcg_temp_free_i32(tmp2);
8923 break;
8924 }
8925 store_reg(s, rd, tmp);
8926 } else {
8927 imm = ((insn & 0x04000000) >> 15)
8928 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8929 if (insn & (1 << 22)) {
8930 /* 16-bit immediate. */
8931 imm |= (insn >> 4) & 0xf000;
8932 if (insn & (1 << 23)) {
8933 /* movt */
8934 tmp = load_reg(s, rd);
8935 tcg_gen_ext16u_i32(tmp, tmp);
8936 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8937 } else {
8938 /* movw */
8939 tmp = tcg_temp_new_i32();
8940 tcg_gen_movi_i32(tmp, imm);
8941 }
8942 } else {
8943 /* Add/sub 12-bit immediate. */
8944 if (rn == 15) {
8945 offset = s->pc & ~(uint32_t)3;
8946 if (insn & (1 << 23))
8947 offset -= imm;
8948 else
8949 offset += imm;
8950 tmp = tcg_temp_new_i32();
8951 tcg_gen_movi_i32(tmp, offset);
8952 } else {
8953 tmp = load_reg(s, rn);
8954 if (insn & (1 << 23))
8955 tcg_gen_subi_i32(tmp, tmp, imm);
8956 else
8957 tcg_gen_addi_i32(tmp, tmp, imm);
8958 }
8959 }
8960 store_reg(s, rd, tmp);
8961 }
8962 } else {
8963 int shifter_out = 0;
8964 /* modified 12-bit immediate. */
8965 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8966 imm = (insn & 0xff);
8967 switch (shift) {
8968 case 0: /* XY */
8969 /* Nothing to do. */
8970 break;
8971 case 1: /* 00XY00XY */
8972 imm |= imm << 16;
8973 break;
8974 case 2: /* XY00XY00 */
8975 imm |= imm << 16;
8976 imm <<= 8;
8977 break;
8978 case 3: /* XYXYXYXY */
8979 imm |= imm << 16;
8980 imm |= imm << 8;
8981 break;
8982 default: /* Rotated constant. */
8983 shift = (shift << 1) | (imm >> 7);
8984 imm |= 0x80;
8985 imm = imm << (32 - shift);
8986 shifter_out = 1;
8987 break;
8988 }
8989 tmp2 = tcg_temp_new_i32();
8990 tcg_gen_movi_i32(tmp2, imm);
8991 rn = (insn >> 16) & 0xf;
8992 if (rn == 15) {
8993 tmp = tcg_temp_new_i32();
8994 tcg_gen_movi_i32(tmp, 0);
8995 } else {
8996 tmp = load_reg(s, rn);
8997 }
8998 op = (insn >> 21) & 0xf;
8999 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
9000 shifter_out, tmp, tmp2))
9001 goto illegal_op;
9002 tcg_temp_free_i32(tmp2);
9003 rd = (insn >> 8) & 0xf;
9004 if (rd != 15) {
9005 store_reg(s, rd, tmp);
9006 } else {
9007 tcg_temp_free_i32(tmp);
9008 }
9009 }
9010 }
9011 break;
9012 case 12: /* Load/store single data item. */
9013 {
9014 int postinc = 0;
9015 int writeback = 0;
9016 int user;
9017 if ((insn & 0x01100000) == 0x01000000) {
9018 if (disas_neon_ls_insn(env, s, insn))
9019 goto illegal_op;
9020 break;
9021 }
9022 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9023 if (rs == 15) {
9024 if (!(insn & (1 << 20))) {
9025 goto illegal_op;
9026 }
9027 if (op != 2) {
9028 /* Byte or halfword load space with dest == r15 : memory hints.
9029 * Catch them early so we don't emit pointless addressing code.
9030 * This space is a mix of:
9031 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9032 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9033 * cores)
9034 * unallocated hints, which must be treated as NOPs
9035 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9036 * which is easiest for the decoding logic
9037 * Some space which must UNDEF
9038 */
9039 int op1 = (insn >> 23) & 3;
9040 int op2 = (insn >> 6) & 0x3f;
9041 if (op & 2) {
9042 goto illegal_op;
9043 }
9044 if (rn == 15) {
9045 /* UNPREDICTABLE, unallocated hint or
9046 * PLD/PLDW/PLI (literal)
9047 */
9048 return 0;
9049 }
9050 if (op1 & 1) {
9051 return 0; /* PLD/PLDW/PLI or unallocated hint */
9052 }
9053 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
9054 return 0; /* PLD/PLDW/PLI or unallocated hint */
9055 }
9056 /* UNDEF space, or an UNPREDICTABLE */
9057 return 1;
9058 }
9059 }
9060 user = IS_USER(s);
9061 if (rn == 15) {
9062 addr = tcg_temp_new_i32();
9063 /* PC relative. */
9064 /* s->pc has already been incremented by 4. */
9065 imm = s->pc & 0xfffffffc;
9066 if (insn & (1 << 23))
9067 imm += insn & 0xfff;
9068 else
9069 imm -= insn & 0xfff;
9070 tcg_gen_movi_i32(addr, imm);
9071 } else {
9072 addr = load_reg(s, rn);
9073 if (insn & (1 << 23)) {
9074 /* Positive offset. */
9075 imm = insn & 0xfff;
9076 tcg_gen_addi_i32(addr, addr, imm);
9077 } else {
9078 imm = insn & 0xff;
9079 switch ((insn >> 8) & 0xf) {
9080 case 0x0: /* Shifted Register. */
9081 shift = (insn >> 4) & 0xf;
9082 if (shift > 3) {
9083 tcg_temp_free_i32(addr);
9084 goto illegal_op;
9085 }
9086 tmp = load_reg(s, rm);
9087 if (shift)
9088 tcg_gen_shli_i32(tmp, tmp, shift);
9089 tcg_gen_add_i32(addr, addr, tmp);
9090 tcg_temp_free_i32(tmp);
9091 break;
9092 case 0xc: /* Negative offset. */
9093 tcg_gen_addi_i32(addr, addr, -imm);
9094 break;
9095 case 0xe: /* User privilege. */
9096 tcg_gen_addi_i32(addr, addr, imm);
9097 user = 1;
9098 break;
9099 case 0x9: /* Post-decrement. */
9100 imm = -imm;
9101 /* Fall through. */
9102 case 0xb: /* Post-increment. */
9103 postinc = 1;
9104 writeback = 1;
9105 break;
9106 case 0xd: /* Pre-decrement. */
9107 imm = -imm;
9108 /* Fall through. */
9109 case 0xf: /* Pre-increment. */
9110 tcg_gen_addi_i32(addr, addr, imm);
9111 writeback = 1;
9112 break;
9113 default:
9114 tcg_temp_free_i32(addr);
9115 goto illegal_op;
9116 }
9117 }
9118 }
9119 if (insn & (1 << 20)) {
9120 /* Load. */
9121 tmp = tcg_temp_new_i32();
9122 switch (op) {
9123 case 0:
9124 tcg_gen_qemu_ld8u(tmp, addr, user);
9125 break;
9126 case 4:
9127 tcg_gen_qemu_ld8s(tmp, addr, user);
9128 break;
9129 case 1:
9130 tcg_gen_qemu_ld16u(tmp, addr, user);
9131 break;
9132 case 5:
9133 tcg_gen_qemu_ld16s(tmp, addr, user);
9134 break;
9135 case 2:
9136 tcg_gen_qemu_ld32u(tmp, addr, user);
9137 break;
9138 default:
9139 tcg_temp_free_i32(tmp);
9140 tcg_temp_free_i32(addr);
9141 goto illegal_op;
9142 }
9143 if (rs == 15) {
9144 gen_bx(s, tmp);
9145 } else {
9146 store_reg(s, rs, tmp);
9147 }
9148 } else {
9149 /* Store. */
9150 tmp = load_reg(s, rs);
9151 switch (op) {
9152 case 0:
9153 tcg_gen_qemu_st8(tmp, addr, user);
9154 break;
9155 case 1:
9156 tcg_gen_qemu_st16(tmp, addr, user);
9157 break;
9158 case 2:
9159 tcg_gen_qemu_st32(tmp, addr, user);
9160 break;
9161 default:
9162 tcg_temp_free_i32(tmp);
9163 tcg_temp_free_i32(addr);
9164 goto illegal_op;
9165 }
9166 tcg_temp_free_i32(tmp);
9167 }
9168 if (postinc)
9169 tcg_gen_addi_i32(addr, addr, imm);
9170 if (writeback) {
9171 store_reg(s, rn, addr);
9172 } else {
9173 tcg_temp_free_i32(addr);
9174 }
9175 }
9176 break;
9177 default:
9178 goto illegal_op;
9179 }
9180 return 0;
9181 illegal_op:
9182 return 1;
9183 }
9184
9185 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9186 {
9187 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9188 int32_t offset;
9189 int i;
9190 TCGv_i32 tmp;
9191 TCGv_i32 tmp2;
9192 TCGv_i32 addr;
9193
9194 if (s->condexec_mask) {
9195 cond = s->condexec_cond;
9196 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9197 s->condlabel = gen_new_label();
9198 gen_test_cc(cond ^ 1, s->condlabel);
9199 s->condjmp = 1;
9200 }
9201 }
9202
9203 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9204 s->pc += 2;
9205
9206 switch (insn >> 12) {
9207 case 0: case 1:
9208
9209 rd = insn & 7;
9210 op = (insn >> 11) & 3;
9211 if (op == 3) {
9212 /* add/subtract */
9213 rn = (insn >> 3) & 7;
9214 tmp = load_reg(s, rn);
9215 if (insn & (1 << 10)) {
9216 /* immediate */
9217 tmp2 = tcg_temp_new_i32();
9218 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9219 } else {
9220 /* reg */
9221 rm = (insn >> 6) & 7;
9222 tmp2 = load_reg(s, rm);
9223 }
9224 if (insn & (1 << 9)) {
9225 if (s->condexec_mask)
9226 tcg_gen_sub_i32(tmp, tmp, tmp2);
9227 else
9228 gen_sub_CC(tmp, tmp, tmp2);
9229 } else {
9230 if (s->condexec_mask)
9231 tcg_gen_add_i32(tmp, tmp, tmp2);
9232 else
9233 gen_add_CC(tmp, tmp, tmp2);
9234 }
9235 tcg_temp_free_i32(tmp2);
9236 store_reg(s, rd, tmp);
9237 } else {
9238 /* shift immediate */
9239 rm = (insn >> 3) & 7;
9240 shift = (insn >> 6) & 0x1f;
9241 tmp = load_reg(s, rm);
9242 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9243 if (!s->condexec_mask)
9244 gen_logic_CC(tmp);
9245 store_reg(s, rd, tmp);
9246 }
9247 break;
9248 case 2: case 3:
9249 /* arithmetic large immediate */
9250 op = (insn >> 11) & 3;
9251 rd = (insn >> 8) & 0x7;
9252 if (op == 0) { /* mov */
9253 tmp = tcg_temp_new_i32();
9254 tcg_gen_movi_i32(tmp, insn & 0xff);
9255 if (!s->condexec_mask)
9256 gen_logic_CC(tmp);
9257 store_reg(s, rd, tmp);
9258 } else {
9259 tmp = load_reg(s, rd);
9260 tmp2 = tcg_temp_new_i32();
9261 tcg_gen_movi_i32(tmp2, insn & 0xff);
9262 switch (op) {
9263 case 1: /* cmp */
9264 gen_sub_CC(tmp, tmp, tmp2);
9265 tcg_temp_free_i32(tmp);
9266 tcg_temp_free_i32(tmp2);
9267 break;
9268 case 2: /* add */
9269 if (s->condexec_mask)
9270 tcg_gen_add_i32(tmp, tmp, tmp2);
9271 else
9272 gen_add_CC(tmp, tmp, tmp2);
9273 tcg_temp_free_i32(tmp2);
9274 store_reg(s, rd, tmp);
9275 break;
9276 case 3: /* sub */
9277 if (s->condexec_mask)
9278 tcg_gen_sub_i32(tmp, tmp, tmp2);
9279 else
9280 gen_sub_CC(tmp, tmp, tmp2);
9281 tcg_temp_free_i32(tmp2);
9282 store_reg(s, rd, tmp);
9283 break;
9284 }
9285 }
9286 break;
9287 case 4:
9288 if (insn & (1 << 11)) {
9289 rd = (insn >> 8) & 7;
9290 /* load pc-relative. Bit 1 of PC is ignored. */
9291 val = s->pc + 2 + ((insn & 0xff) * 4);
9292 val &= ~(uint32_t)2;
9293 addr = tcg_temp_new_i32();
9294 tcg_gen_movi_i32(addr, val);
9295 tmp = tcg_temp_new_i32();
9296 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9297 tcg_temp_free_i32(addr);
9298 store_reg(s, rd, tmp);
9299 break;
9300 }
9301 if (insn & (1 << 10)) {
9302 /* data processing extended or blx */
9303 rd = (insn & 7) | ((insn >> 4) & 8);
9304 rm = (insn >> 3) & 0xf;
9305 op = (insn >> 8) & 3;
9306 switch (op) {
9307 case 0: /* add */
9308 tmp = load_reg(s, rd);
9309 tmp2 = load_reg(s, rm);
9310 tcg_gen_add_i32(tmp, tmp, tmp2);
9311 tcg_temp_free_i32(tmp2);
9312 store_reg(s, rd, tmp);
9313 break;
9314 case 1: /* cmp */
9315 tmp = load_reg(s, rd);
9316 tmp2 = load_reg(s, rm);
9317 gen_sub_CC(tmp, tmp, tmp2);
9318 tcg_temp_free_i32(tmp2);
9319 tcg_temp_free_i32(tmp);
9320 break;
9321 case 2: /* mov/cpy */
9322 tmp = load_reg(s, rm);
9323 store_reg(s, rd, tmp);
9324 break;
9325 case 3:/* branch [and link] exchange thumb register */
9326 tmp = load_reg(s, rm);
9327 if (insn & (1 << 7)) {
9328 ARCH(5);
9329 val = (uint32_t)s->pc | 1;
9330 tmp2 = tcg_temp_new_i32();
9331 tcg_gen_movi_i32(tmp2, val);
9332 store_reg(s, 14, tmp2);
9333 }
9334 /* already thumb, no need to check */
9335 gen_bx(s, tmp);
9336 break;
9337 }
9338 break;
9339 }
9340
9341 /* data processing register */
9342 rd = insn & 7;
9343 rm = (insn >> 3) & 7;
9344 op = (insn >> 6) & 0xf;
9345 if (op == 2 || op == 3 || op == 4 || op == 7) {
9346 /* the shift/rotate ops want the operands backwards */
9347 val = rm;
9348 rm = rd;
9349 rd = val;
9350 val = 1;
9351 } else {
9352 val = 0;
9353 }
9354
9355 if (op == 9) { /* neg */
9356 tmp = tcg_temp_new_i32();
9357 tcg_gen_movi_i32(tmp, 0);
9358 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9359 tmp = load_reg(s, rd);
9360 } else {
9361 TCGV_UNUSED_I32(tmp);
9362 }
9363
9364 tmp2 = load_reg(s, rm);
9365 switch (op) {
9366 case 0x0: /* and */
9367 tcg_gen_and_i32(tmp, tmp, tmp2);
9368 if (!s->condexec_mask)
9369 gen_logic_CC(tmp);
9370 break;
9371 case 0x1: /* eor */
9372 tcg_gen_xor_i32(tmp, tmp, tmp2);
9373 if (!s->condexec_mask)
9374 gen_logic_CC(tmp);
9375 break;
9376 case 0x2: /* lsl */
9377 if (s->condexec_mask) {
9378 gen_shl(tmp2, tmp2, tmp);
9379 } else {
9380 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9381 gen_logic_CC(tmp2);
9382 }
9383 break;
9384 case 0x3: /* lsr */
9385 if (s->condexec_mask) {
9386 gen_shr(tmp2, tmp2, tmp);
9387 } else {
9388 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9389 gen_logic_CC(tmp2);
9390 }
9391 break;
9392 case 0x4: /* asr */
9393 if (s->condexec_mask) {
9394 gen_sar(tmp2, tmp2, tmp);
9395 } else {
9396 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9397 gen_logic_CC(tmp2);
9398 }
9399 break;
9400 case 0x5: /* adc */
9401 if (s->condexec_mask) {
9402 gen_adc(tmp, tmp2);
9403 } else {
9404 gen_adc_CC(tmp, tmp, tmp2);
9405 }
9406 break;
9407 case 0x6: /* sbc */
9408 if (s->condexec_mask) {
9409 gen_sub_carry(tmp, tmp, tmp2);
9410 } else {
9411 gen_sbc_CC(tmp, tmp, tmp2);
9412 }
9413 break;
9414 case 0x7: /* ror */
9415 if (s->condexec_mask) {
9416 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9417 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9418 } else {
9419 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9420 gen_logic_CC(tmp2);
9421 }
9422 break;
9423 case 0x8: /* tst */
9424 tcg_gen_and_i32(tmp, tmp, tmp2);
9425 gen_logic_CC(tmp);
9426 rd = 16;
9427 break;
9428 case 0x9: /* neg */
9429 if (s->condexec_mask)
9430 tcg_gen_neg_i32(tmp, tmp2);
9431 else
9432 gen_sub_CC(tmp, tmp, tmp2);
9433 break;
9434 case 0xa: /* cmp */
9435 gen_sub_CC(tmp, tmp, tmp2);
9436 rd = 16;
9437 break;
9438 case 0xb: /* cmn */
9439 gen_add_CC(tmp, tmp, tmp2);
9440 rd = 16;
9441 break;
9442 case 0xc: /* orr */
9443 tcg_gen_or_i32(tmp, tmp, tmp2);
9444 if (!s->condexec_mask)
9445 gen_logic_CC(tmp);
9446 break;
9447 case 0xd: /* mul */
9448 tcg_gen_mul_i32(tmp, tmp, tmp2);
9449 if (!s->condexec_mask)
9450 gen_logic_CC(tmp);
9451 break;
9452 case 0xe: /* bic */
9453 tcg_gen_andc_i32(tmp, tmp, tmp2);
9454 if (!s->condexec_mask)
9455 gen_logic_CC(tmp);
9456 break;
9457 case 0xf: /* mvn */
9458 tcg_gen_not_i32(tmp2, tmp2);
9459 if (!s->condexec_mask)
9460 gen_logic_CC(tmp2);
9461 val = 1;
9462 rm = rd;
9463 break;
9464 }
9465 if (rd != 16) {
9466 if (val) {
9467 store_reg(s, rm, tmp2);
9468 if (op != 0xf)
9469 tcg_temp_free_i32(tmp);
9470 } else {
9471 store_reg(s, rd, tmp);
9472 tcg_temp_free_i32(tmp2);
9473 }
9474 } else {
9475 tcg_temp_free_i32(tmp);
9476 tcg_temp_free_i32(tmp2);
9477 }
9478 break;
9479
9480 case 5:
9481 /* load/store register offset. */
9482 rd = insn & 7;
9483 rn = (insn >> 3) & 7;
9484 rm = (insn >> 6) & 7;
9485 op = (insn >> 9) & 7;
9486 addr = load_reg(s, rn);
9487 tmp = load_reg(s, rm);
9488 tcg_gen_add_i32(addr, addr, tmp);
9489 tcg_temp_free_i32(tmp);
9490
9491 if (op < 3) { /* store */
9492 tmp = load_reg(s, rd);
9493 } else {
9494 tmp = tcg_temp_new_i32();
9495 }
9496
9497 switch (op) {
9498 case 0: /* str */
9499 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9500 break;
9501 case 1: /* strh */
9502 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9503 break;
9504 case 2: /* strb */
9505 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9506 break;
9507 case 3: /* ldrsb */
9508 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
9509 break;
9510 case 4: /* ldr */
9511 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9512 break;
9513 case 5: /* ldrh */
9514 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9515 break;
9516 case 6: /* ldrb */
9517 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9518 break;
9519 case 7: /* ldrsh */
9520 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
9521 break;
9522 }
9523 if (op >= 3) { /* load */
9524 store_reg(s, rd, tmp);
9525 } else {
9526 tcg_temp_free_i32(tmp);
9527 }
9528 tcg_temp_free_i32(addr);
9529 break;
9530
9531 case 6:
9532 /* load/store word immediate offset */
9533 rd = insn & 7;
9534 rn = (insn >> 3) & 7;
9535 addr = load_reg(s, rn);
9536 val = (insn >> 4) & 0x7c;
9537 tcg_gen_addi_i32(addr, addr, val);
9538
9539 if (insn & (1 << 11)) {
9540 /* load */
9541 tmp = tcg_temp_new_i32();
9542 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9543 store_reg(s, rd, tmp);
9544 } else {
9545 /* store */
9546 tmp = load_reg(s, rd);
9547 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9548 tcg_temp_free_i32(tmp);
9549 }
9550 tcg_temp_free_i32(addr);
9551 break;
9552
9553 case 7:
9554 /* load/store byte immediate offset */
9555 rd = insn & 7;
9556 rn = (insn >> 3) & 7;
9557 addr = load_reg(s, rn);
9558 val = (insn >> 6) & 0x1f;
9559 tcg_gen_addi_i32(addr, addr, val);
9560
9561 if (insn & (1 << 11)) {
9562 /* load */
9563 tmp = tcg_temp_new_i32();
9564 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9565 store_reg(s, rd, tmp);
9566 } else {
9567 /* store */
9568 tmp = load_reg(s, rd);
9569 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9570 tcg_temp_free_i32(tmp);
9571 }
9572 tcg_temp_free_i32(addr);
9573 break;
9574
9575 case 8:
9576 /* load/store halfword immediate offset */
9577 rd = insn & 7;
9578 rn = (insn >> 3) & 7;
9579 addr = load_reg(s, rn);
9580 val = (insn >> 5) & 0x3e;
9581 tcg_gen_addi_i32(addr, addr, val);
9582
9583 if (insn & (1 << 11)) {
9584 /* load */
9585 tmp = tcg_temp_new_i32();
9586 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9587 store_reg(s, rd, tmp);
9588 } else {
9589 /* store */
9590 tmp = load_reg(s, rd);
9591 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9592 tcg_temp_free_i32(tmp);
9593 }
9594 tcg_temp_free_i32(addr);
9595 break;
9596
9597 case 9:
9598 /* load/store from stack */
9599 rd = (insn >> 8) & 7;
9600 addr = load_reg(s, 13);
9601 val = (insn & 0xff) * 4;
9602 tcg_gen_addi_i32(addr, addr, val);
9603
9604 if (insn & (1 << 11)) {
9605 /* load */
9606 tmp = tcg_temp_new_i32();
9607 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9608 store_reg(s, rd, tmp);
9609 } else {
9610 /* store */
9611 tmp = load_reg(s, rd);
9612 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9613 tcg_temp_free_i32(tmp);
9614 }
9615 tcg_temp_free_i32(addr);
9616 break;
9617
9618 case 10:
9619 /* add to high reg */
9620 rd = (insn >> 8) & 7;
9621 if (insn & (1 << 11)) {
9622 /* SP */
9623 tmp = load_reg(s, 13);
9624 } else {
9625 /* PC. bit 1 is ignored. */
9626 tmp = tcg_temp_new_i32();
9627 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9628 }
9629 val = (insn & 0xff) * 4;
9630 tcg_gen_addi_i32(tmp, tmp, val);
9631 store_reg(s, rd, tmp);
9632 break;
9633
9634 case 11:
9635 /* misc */
9636 op = (insn >> 8) & 0xf;
9637 switch (op) {
9638 case 0:
9639 /* adjust stack pointer */
9640 tmp = load_reg(s, 13);
9641 val = (insn & 0x7f) * 4;
9642 if (insn & (1 << 7))
9643 val = -(int32_t)val;
9644 tcg_gen_addi_i32(tmp, tmp, val);
9645 store_reg(s, 13, tmp);
9646 break;
9647
9648 case 2: /* sign/zero extend. */
9649 ARCH(6);
9650 rd = insn & 7;
9651 rm = (insn >> 3) & 7;
9652 tmp = load_reg(s, rm);
9653 switch ((insn >> 6) & 3) {
9654 case 0: gen_sxth(tmp); break;
9655 case 1: gen_sxtb(tmp); break;
9656 case 2: gen_uxth(tmp); break;
9657 case 3: gen_uxtb(tmp); break;
9658 }
9659 store_reg(s, rd, tmp);
9660 break;
9661 case 4: case 5: case 0xc: case 0xd:
9662 /* push/pop */
9663 addr = load_reg(s, 13);
9664 if (insn & (1 << 8))
9665 offset = 4;
9666 else
9667 offset = 0;
9668 for (i = 0; i < 8; i++) {
9669 if (insn & (1 << i))
9670 offset += 4;
9671 }
9672 if ((insn & (1 << 11)) == 0) {
9673 tcg_gen_addi_i32(addr, addr, -offset);
9674 }
9675 for (i = 0; i < 8; i++) {
9676 if (insn & (1 << i)) {
9677 if (insn & (1 << 11)) {
9678 /* pop */
9679 tmp = tcg_temp_new_i32();
9680 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9681 store_reg(s, i, tmp);
9682 } else {
9683 /* push */
9684 tmp = load_reg(s, i);
9685 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9686 tcg_temp_free_i32(tmp);
9687 }
9688 /* advance to the next address. */
9689 tcg_gen_addi_i32(addr, addr, 4);
9690 }
9691 }
9692 TCGV_UNUSED_I32(tmp);
9693 if (insn & (1 << 8)) {
9694 if (insn & (1 << 11)) {
9695 /* pop pc */
9696 tmp = tcg_temp_new_i32();
9697 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9698 /* don't set the pc until the rest of the instruction
9699 has completed */
9700 } else {
9701 /* push lr */
9702 tmp = load_reg(s, 14);
9703 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9704 tcg_temp_free_i32(tmp);
9705 }
9706 tcg_gen_addi_i32(addr, addr, 4);
9707 }
9708 if ((insn & (1 << 11)) == 0) {
9709 tcg_gen_addi_i32(addr, addr, -offset);
9710 }
9711 /* write back the new stack pointer */
9712 store_reg(s, 13, addr);
9713 /* set the new PC value */
9714 if ((insn & 0x0900) == 0x0900) {
9715 store_reg_from_load(env, s, 15, tmp);
9716 }
9717 break;
9718
9719 case 1: case 3: case 9: case 11: /* czb */
9720 rm = insn & 7;
9721 tmp = load_reg(s, rm);
9722 s->condlabel = gen_new_label();
9723 s->condjmp = 1;
9724 if (insn & (1 << 11))
9725 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9726 else
9727 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9728 tcg_temp_free_i32(tmp);
9729 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9730 val = (uint32_t)s->pc + 2;
9731 val += offset;
9732 gen_jmp(s, val);
9733 break;
9734
9735 case 15: /* IT, nop-hint. */
9736 if ((insn & 0xf) == 0) {
9737 gen_nop_hint(s, (insn >> 4) & 0xf);
9738 break;
9739 }
9740 /* If Then. */
9741 s->condexec_cond = (insn >> 4) & 0xe;
9742 s->condexec_mask = insn & 0x1f;
9743 /* No actual code generated for this insn, just setup state. */
9744 break;
9745
9746 case 0xe: /* bkpt */
9747 ARCH(5);
9748 gen_exception_insn(s, 2, EXCP_BKPT);
9749 break;
9750
9751 case 0xa: /* rev */
9752 ARCH(6);
9753 rn = (insn >> 3) & 0x7;
9754 rd = insn & 0x7;
9755 tmp = load_reg(s, rn);
9756 switch ((insn >> 6) & 3) {
9757 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9758 case 1: gen_rev16(tmp); break;
9759 case 3: gen_revsh(tmp); break;
9760 default: goto illegal_op;
9761 }
9762 store_reg(s, rd, tmp);
9763 break;
9764
9765 case 6:
9766 switch ((insn >> 5) & 7) {
9767 case 2:
9768 /* setend */
9769 ARCH(6);
9770 if (((insn >> 3) & 1) != s->bswap_code) {
9771 /* Dynamic endianness switching not implemented. */
9772 goto illegal_op;
9773 }
9774 break;
9775 case 3:
9776 /* cps */
9777 ARCH(6);
9778 if (IS_USER(s)) {
9779 break;
9780 }
9781 if (IS_M(env)) {
9782 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9783 /* FAULTMASK */
9784 if (insn & 1) {
9785 addr = tcg_const_i32(19);
9786 gen_helper_v7m_msr(cpu_env, addr, tmp);
9787 tcg_temp_free_i32(addr);
9788 }
9789 /* PRIMASK */
9790 if (insn & 2) {
9791 addr = tcg_const_i32(16);
9792 gen_helper_v7m_msr(cpu_env, addr, tmp);
9793 tcg_temp_free_i32(addr);
9794 }
9795 tcg_temp_free_i32(tmp);
9796 gen_lookup_tb(s);
9797 } else {
9798 if (insn & (1 << 4)) {
9799 shift = CPSR_A | CPSR_I | CPSR_F;
9800 } else {
9801 shift = 0;
9802 }
9803 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9804 }
9805 break;
9806 default:
9807 goto undef;
9808 }
9809 break;
9810
9811 default:
9812 goto undef;
9813 }
9814 break;
9815
9816 case 12:
9817 {
9818 /* load/store multiple */
9819 TCGv_i32 loaded_var;
9820 TCGV_UNUSED_I32(loaded_var);
9821 rn = (insn >> 8) & 0x7;
9822 addr = load_reg(s, rn);
9823 for (i = 0; i < 8; i++) {
9824 if (insn & (1 << i)) {
9825 if (insn & (1 << 11)) {
9826 /* load */
9827 tmp = tcg_temp_new_i32();
9828 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9829 if (i == rn) {
9830 loaded_var = tmp;
9831 } else {
9832 store_reg(s, i, tmp);
9833 }
9834 } else {
9835 /* store */
9836 tmp = load_reg(s, i);
9837 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9838 tcg_temp_free_i32(tmp);
9839 }
9840 /* advance to the next address */
9841 tcg_gen_addi_i32(addr, addr, 4);
9842 }
9843 }
9844 if ((insn & (1 << rn)) == 0) {
9845 /* base reg not in list: base register writeback */
9846 store_reg(s, rn, addr);
9847 } else {
9848 /* base reg in list: if load, complete it now */
9849 if (insn & (1 << 11)) {
9850 store_reg(s, rn, loaded_var);
9851 }
9852 tcg_temp_free_i32(addr);
9853 }
9854 break;
9855 }
9856 case 13:
9857 /* conditional branch or swi */
9858 cond = (insn >> 8) & 0xf;
9859 if (cond == 0xe)
9860 goto undef;
9861
9862 if (cond == 0xf) {
9863 /* swi */
9864 gen_set_pc_im(s->pc);
9865 s->is_jmp = DISAS_SWI;
9866 break;
9867 }
9868 /* generate a conditional jump to next instruction */
9869 s->condlabel = gen_new_label();
9870 gen_test_cc(cond ^ 1, s->condlabel);
9871 s->condjmp = 1;
9872
9873 /* jump to the offset */
9874 val = (uint32_t)s->pc + 2;
9875 offset = ((int32_t)insn << 24) >> 24;
9876 val += offset << 1;
9877 gen_jmp(s, val);
9878 break;
9879
9880 case 14:
9881 if (insn & (1 << 11)) {
9882 if (disas_thumb2_insn(env, s, insn))
9883 goto undef32;
9884 break;
9885 }
9886 /* unconditional branch */
9887 val = (uint32_t)s->pc;
9888 offset = ((int32_t)insn << 21) >> 21;
9889 val += (offset << 1) + 2;
9890 gen_jmp(s, val);
9891 break;
9892
9893 case 15:
9894 if (disas_thumb2_insn(env, s, insn))
9895 goto undef32;
9896 break;
9897 }
9898 return;
9899 undef32:
9900 gen_exception_insn(s, 4, EXCP_UDEF);
9901 return;
9902 illegal_op:
9903 undef:
9904 gen_exception_insn(s, 2, EXCP_UDEF);
9905 }
9906
9907 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9908 basic block 'tb'. If search_pc is TRUE, also generate PC
9909 information for each intermediate instruction. */
9910 static inline void gen_intermediate_code_internal(ARMCPU *cpu,
9911 TranslationBlock *tb,
9912 bool search_pc)
9913 {
9914 CPUARMState *env = &cpu->env;
9915 DisasContext dc1, *dc = &dc1;
9916 CPUBreakpoint *bp;
9917 uint16_t *gen_opc_end;
9918 int j, lj;
9919 target_ulong pc_start;
9920 uint32_t next_page_start;
9921 int num_insns;
9922 int max_insns;
9923
9924 /* generate intermediate code */
9925 pc_start = tb->pc;
9926
9927 dc->tb = tb;
9928
9929 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
9930
9931 dc->is_jmp = DISAS_NEXT;
9932 dc->pc = pc_start;
9933 dc->singlestep_enabled = env->singlestep_enabled;
9934 dc->condjmp = 0;
9935 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9936 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9937 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9938 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9939 #if !defined(CONFIG_USER_ONLY)
9940 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9941 #endif
9942 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9943 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9944 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9945 cpu_F0s = tcg_temp_new_i32();
9946 cpu_F1s = tcg_temp_new_i32();
9947 cpu_F0d = tcg_temp_new_i64();
9948 cpu_F1d = tcg_temp_new_i64();
9949 cpu_V0 = cpu_F0d;
9950 cpu_V1 = cpu_F1d;
9951 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9952 cpu_M0 = tcg_temp_new_i64();
9953 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9954 lj = -1;
9955 num_insns = 0;
9956 max_insns = tb->cflags & CF_COUNT_MASK;
9957 if (max_insns == 0)
9958 max_insns = CF_COUNT_MASK;
9959
9960 gen_tb_start();
9961
9962 tcg_clear_temp_count();
9963
9964 /* A note on handling of the condexec (IT) bits:
9965 *
9966 * We want to avoid the overhead of having to write the updated condexec
9967 * bits back to the CPUARMState for every instruction in an IT block. So:
9968 * (1) if the condexec bits are not already zero then we write
9969 * zero back into the CPUARMState now. This avoids complications trying
9970 * to do it at the end of the block. (For example if we don't do this
9971 * it's hard to identify whether we can safely skip writing condexec
9972 * at the end of the TB, which we definitely want to do for the case
9973 * where a TB doesn't do anything with the IT state at all.)
9974 * (2) if we are going to leave the TB then we call gen_set_condexec()
9975 * which will write the correct value into CPUARMState if zero is wrong.
9976 * This is done both for leaving the TB at the end, and for leaving
9977 * it because of an exception we know will happen, which is done in
9978 * gen_exception_insn(). The latter is necessary because we need to
9979 * leave the TB with the PC/IT state just prior to execution of the
9980 * instruction which caused the exception.
9981 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9982 * then the CPUARMState will be wrong and we need to reset it.
9983 * This is handled in the same way as restoration of the
9984 * PC in these situations: we will be called again with search_pc=1
9985 * and generate a mapping of the condexec bits for each PC in
9986 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9987 * this to restore the condexec bits.
9988 *
9989 * Note that there are no instructions which can read the condexec
9990 * bits, and none which can write non-static values to them, so
9991 * we don't need to care about whether CPUARMState is correct in the
9992 * middle of a TB.
9993 */
9994
9995 /* Reset the conditional execution bits immediately. This avoids
9996 complications trying to do it at the end of the block. */
9997 if (dc->condexec_mask || dc->condexec_cond)
9998 {
9999 TCGv_i32 tmp = tcg_temp_new_i32();
10000 tcg_gen_movi_i32(tmp, 0);
10001 store_cpu_field(tmp, condexec_bits);
10002 }
10003 do {
10004 #ifdef CONFIG_USER_ONLY
10005 /* Intercept jump to the magic kernel page. */
10006 if (dc->pc >= 0xffff0000) {
10007 /* We always get here via a jump, so know we are not in a
10008 conditional execution block. */
10009 gen_exception(EXCP_KERNEL_TRAP);
10010 dc->is_jmp = DISAS_UPDATE;
10011 break;
10012 }
10013 #else
10014 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10015 /* We always get here via a jump, so know we are not in a
10016 conditional execution block. */
10017 gen_exception(EXCP_EXCEPTION_EXIT);
10018 dc->is_jmp = DISAS_UPDATE;
10019 break;
10020 }
10021 #endif
10022
10023 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10024 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
10025 if (bp->pc == dc->pc) {
10026 gen_exception_insn(dc, 0, EXCP_DEBUG);
10027 /* Advance PC so that clearing the breakpoint will
10028 invalidate this TB. */
10029 dc->pc += 2;
10030 goto done_generating;
10031 }
10032 }
10033 }
10034 if (search_pc) {
10035 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10036 if (lj < j) {
10037 lj++;
10038 while (lj < j)
10039 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10040 }
10041 tcg_ctx.gen_opc_pc[lj] = dc->pc;
10042 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
10043 tcg_ctx.gen_opc_instr_start[lj] = 1;
10044 tcg_ctx.gen_opc_icount[lj] = num_insns;
10045 }
10046
10047 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10048 gen_io_start();
10049
10050 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
10051 tcg_gen_debug_insn_start(dc->pc);
10052 }
10053
10054 if (dc->thumb) {
10055 disas_thumb_insn(env, dc);
10056 if (dc->condexec_mask) {
10057 dc->condexec_cond = (dc->condexec_cond & 0xe)
10058 | ((dc->condexec_mask >> 4) & 1);
10059 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10060 if (dc->condexec_mask == 0) {
10061 dc->condexec_cond = 0;
10062 }
10063 }
10064 } else {
10065 disas_arm_insn(env, dc);
10066 }
10067
10068 if (dc->condjmp && !dc->is_jmp) {
10069 gen_set_label(dc->condlabel);
10070 dc->condjmp = 0;
10071 }
10072
10073 if (tcg_check_temp_count()) {
10074 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10075 }
10076
10077 /* Translation stops when a conditional branch is encountered.
10078 * Otherwise the subsequent code could get translated several times.
10079 * Also stop translation when a page boundary is reached. This
10080 * ensures prefetch aborts occur at the right place. */
10081 num_insns ++;
10082 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
10083 !env->singlestep_enabled &&
10084 !singlestep &&
10085 dc->pc < next_page_start &&
10086 num_insns < max_insns);
10087
10088 if (tb->cflags & CF_LAST_IO) {
10089 if (dc->condjmp) {
10090 /* FIXME: This can theoretically happen with self-modifying
10091 code. */
10092 cpu_abort(env, "IO on conditional branch instruction");
10093 }
10094 gen_io_end();
10095 }
10096
10097 /* At this stage dc->condjmp will only be set when the skipped
10098 instruction was a conditional branch or trap, and the PC has
10099 already been written. */
10100 if (unlikely(env->singlestep_enabled)) {
10101 /* Make sure the pc is updated, and raise a debug exception. */
10102 if (dc->condjmp) {
10103 gen_set_condexec(dc);
10104 if (dc->is_jmp == DISAS_SWI) {
10105 gen_exception(EXCP_SWI);
10106 } else {
10107 gen_exception(EXCP_DEBUG);
10108 }
10109 gen_set_label(dc->condlabel);
10110 }
10111 if (dc->condjmp || !dc->is_jmp) {
10112 gen_set_pc_im(dc->pc);
10113 dc->condjmp = 0;
10114 }
10115 gen_set_condexec(dc);
10116 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
10117 gen_exception(EXCP_SWI);
10118 } else {
10119 /* FIXME: Single stepping a WFI insn will not halt
10120 the CPU. */
10121 gen_exception(EXCP_DEBUG);
10122 }
10123 } else {
10124 /* While branches must always occur at the end of an IT block,
10125 there are a few other things that can cause us to terminate
10126 the TB in the middle of an IT block:
10127 - Exception generating instructions (bkpt, swi, undefined).
10128 - Page boundaries.
10129 - Hardware watchpoints.
10130 Hardware breakpoints have already been handled and skip this code.
10131 */
10132 gen_set_condexec(dc);
10133 switch(dc->is_jmp) {
10134 case DISAS_NEXT:
10135 gen_goto_tb(dc, 1, dc->pc);
10136 break;
10137 default:
10138 case DISAS_JUMP:
10139 case DISAS_UPDATE:
10140 /* indicate that the hash table must be used to find the next TB */
10141 tcg_gen_exit_tb(0);
10142 break;
10143 case DISAS_TB_JUMP:
10144 /* nothing more to generate */
10145 break;
10146 case DISAS_WFI:
10147 gen_helper_wfi(cpu_env);
10148 break;
10149 case DISAS_SWI:
10150 gen_exception(EXCP_SWI);
10151 break;
10152 }
10153 if (dc->condjmp) {
10154 gen_set_label(dc->condlabel);
10155 gen_set_condexec(dc);
10156 gen_goto_tb(dc, 1, dc->pc);
10157 dc->condjmp = 0;
10158 }
10159 }
10160
10161 done_generating:
10162 gen_tb_end(tb, num_insns);
10163 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
10164
10165 #ifdef DEBUG_DISAS
10166 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10167 qemu_log("----------------\n");
10168 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10169 log_target_disas(env, pc_start, dc->pc - pc_start,
10170 dc->thumb | (dc->bswap_code << 1));
10171 qemu_log("\n");
10172 }
10173 #endif
10174 if (search_pc) {
10175 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10176 lj++;
10177 while (lj <= j)
10178 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10179 } else {
10180 tb->size = dc->pc - pc_start;
10181 tb->icount = num_insns;
10182 }
10183 }
10184
10185 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10186 {
10187 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
10188 }
10189
10190 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10191 {
10192 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
10193 }
10194
10195 static const char *cpu_mode_names[16] = {
10196 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10197 "???", "???", "???", "und", "???", "???", "???", "sys"
10198 };
10199
10200 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10201 int flags)
10202 {
10203 ARMCPU *cpu = ARM_CPU(cs);
10204 CPUARMState *env = &cpu->env;
10205 int i;
10206 uint32_t psr;
10207
10208 for(i=0;i<16;i++) {
10209 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10210 if ((i % 4) == 3)
10211 cpu_fprintf(f, "\n");
10212 else
10213 cpu_fprintf(f, " ");
10214 }
10215 psr = cpsr_read(env);
10216 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10217 psr,
10218 psr & (1 << 31) ? 'N' : '-',
10219 psr & (1 << 30) ? 'Z' : '-',
10220 psr & (1 << 29) ? 'C' : '-',
10221 psr & (1 << 28) ? 'V' : '-',
10222 psr & CPSR_T ? 'T' : 'A',
10223 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10224
10225 if (flags & CPU_DUMP_FPU) {
10226 int numvfpregs = 0;
10227 if (arm_feature(env, ARM_FEATURE_VFP)) {
10228 numvfpregs += 16;
10229 }
10230 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10231 numvfpregs += 16;
10232 }
10233 for (i = 0; i < numvfpregs; i++) {
10234 uint64_t v = float64_val(env->vfp.regs[i]);
10235 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10236 i * 2, (uint32_t)v,
10237 i * 2 + 1, (uint32_t)(v >> 32),
10238 i, v);
10239 }
10240 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10241 }
10242 }
10243
10244 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10245 {
10246 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10247 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
10248 }